WIP checks

This commit is contained in:
Dominik Werder
2023-06-17 23:07:59 +02:00
parent 3cd74601a4
commit 44dd43240b
24 changed files with 492 additions and 368 deletions

View File

@@ -72,7 +72,7 @@ where
Ready(Ok(())) => {
let n = buf.filled().len();
self.buf.wadv(n)?;
trace!("recv bytes {}", n);
trace2!("recv bytes {}", n);
Ready(Ok(n))
}
Ready(Err(e)) => Ready(Err(e.into())),

View File

@@ -12,7 +12,7 @@ use items_2::channelevents::ChannelEvents;
use items_2::merger::Merger;
use items_2::streams::PlainEventStream;
use netpod::log::*;
use netpod::ChConf;
use netpod::ChannelTypeConfigGen;
use netpod::Cluster;
use query::api4::events::PlainEventsQuery;
use serde_json::Value as JsonValue;
@@ -20,7 +20,7 @@ use std::time::Instant;
pub async fn plain_events_json(
evq: &PlainEventsQuery,
_chconf: &ChConf,
ch_conf: &ChannelTypeConfigGen,
cluster: &Cluster,
) -> Result<JsonValue, Error> {
info!("plain_events_json evquery {:?}", evq);
@@ -28,7 +28,7 @@ pub async fn plain_events_json(
let deadline = Instant::now() + evq.timeout();
let mut tr = build_merged_event_transform(evq.transform())?;
// TODO make sure the empty container arrives over the network.
let inps = open_tcp_streams::<_, ChannelEvents>(&evq, cluster).await?;
let inps = open_tcp_streams::<_, ChannelEvents>(&evq, ch_conf, cluster).await?;
// TODO propagate also the max-buf-len for the first stage event reader.
// TODO use a mixture of count and byte-size as threshold.
let stream = Merger::new(inps, evq.merger_out_len_max());

View File

@@ -17,6 +17,7 @@ use items_2::framable::EventQueryJsonStringFrame;
use items_2::framable::Framable;
use items_2::frame::make_term_frame;
use netpod::log::*;
use netpod::ChannelTypeConfigGen;
use netpod::Cluster;
use netpod::Node;
use netpod::PerfOpts;
@@ -30,6 +31,7 @@ use tokio::net::TcpStream;
pub async fn x_processed_event_blobs_stream_from_node(
query: PlainEventsQuery,
ch_conf: ChannelTypeConfigGen,
perf_opts: PerfOpts,
node: Node,
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>, Error> {
@@ -38,9 +40,16 @@ pub async fn x_processed_event_blobs_stream_from_node(
let net = TcpStream::connect(addr.clone()).await?;
let qjs = serde_json::to_string(&query)?;
let (netin, mut netout) = net.into_split();
let item = sitem_data(EventQueryJsonStringFrame(qjs));
let buf = item.make_frame()?;
netout.write_all(&buf).await?;
let s = serde_json::to_string(&ch_conf)?;
let item = sitem_data(EventQueryJsonStringFrame(s));
let buf = item.make_frame()?;
netout.write_all(&buf).await?;
let buf = make_term_frame()?;
netout.write_all(&buf).await?;
netout.flush().await?;
@@ -53,7 +62,11 @@ pub async fn x_processed_event_blobs_stream_from_node(
pub type BoxedStream<T> = Pin<Box<dyn Stream<Item = Sitemty<T>> + Send>>;
pub async fn open_tcp_streams<Q, T>(query: Q, cluster: &Cluster) -> Result<Vec<BoxedStream<T>>, Error>
pub async fn open_tcp_streams<Q, T>(
query: Q,
ch_conf: &ChannelTypeConfigGen,
cluster: &Cluster,
) -> Result<Vec<BoxedStream<T>>, Error>
where
Q: Serialize,
// Group bounds in new trait
@@ -67,9 +80,16 @@ where
let net = TcpStream::connect(addr.clone()).await?;
let qjs = serde_json::to_string(&query)?;
let (netin, mut netout) = net.into_split();
let item = sitem_data(EventQueryJsonStringFrame(qjs));
let buf = item.make_frame()?;
netout.write_all(&buf).await?;
let s = serde_json::to_string(ch_conf)?;
let item = sitem_data(EventQueryJsonStringFrame(s));
let buf = item.make_frame()?;
netout.write_all(&buf).await?;
let buf = make_term_frame()?;
netout.write_all(&buf).await?;
netout.flush().await?;

View File

@@ -21,7 +21,7 @@ use items_2::streams::PlainEventStream;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::BinnedRangeEnum;
use netpod::ChConf;
use netpod::ChannelTypeConfigGen;
use netpod::Cluster;
use query::api4::binned::BinnedQuery;
use query::api4::events::PlainEventsQuery;
@@ -38,12 +38,13 @@ async fn timebinnable_stream(
query: BinnedQuery,
range: NanoRange,
one_before_range: bool,
ch_conf: &ChannelTypeConfigGen,
cluster: Cluster,
) -> Result<TimeBinnableStreamBox, Error> {
let evq = PlainEventsQuery::new(query.channel().clone(), range.clone()).for_time_weighted_scalar();
let mut tr = build_merged_event_transform(evq.transform())?;
let inps = open_tcp_streams::<_, ChannelEvents>(&evq, &cluster).await?;
let inps = open_tcp_streams::<_, ChannelEvents>(&evq, ch_conf, &cluster).await?;
// TODO propagate also the max-buf-len for the first stage event reader.
// TODO use a mixture of count and byte-size as threshold.
let stream = Merger::new(inps, query.merger_out_len_max());
@@ -67,6 +68,7 @@ async fn timebinnable_stream(
async fn timebinned_stream(
query: BinnedQuery,
binned_range: BinnedRangeEnum,
ch_conf: &ChannelTypeConfigGen,
cluster: Cluster,
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<Box<dyn TimeBinned>>> + Send>>, Error> {
let range = binned_range.binned_range_time().to_nano_range();
@@ -74,7 +76,7 @@ async fn timebinned_stream(
let do_time_weight = true;
let one_before_range = true;
let stream = timebinnable_stream(query.clone(), range, one_before_range, cluster).await?;
let stream = timebinnable_stream(query.clone(), range, one_before_range, ch_conf, cluster).await?;
let stream: Pin<Box<dyn TimeBinnableStreamTrait>> = stream.0;
let stream = Box::pin(stream);
// TODO rename TimeBinnedStream to make it more clear that it is the component which initiates the time binning.
@@ -97,11 +99,15 @@ fn timebinned_to_collectable(
stream
}
pub async fn timebinned_json(query: BinnedQuery, _chconf: ChConf, cluster: Cluster) -> Result<JsonValue, Error> {
pub async fn timebinned_json(
query: BinnedQuery,
ch_conf: &ChannelTypeConfigGen,
cluster: Cluster,
) -> Result<JsonValue, Error> {
let deadline = Instant::now().checked_add(query.timeout_value()).unwrap();
let binned_range = BinnedRangeEnum::covering_range(query.range().clone(), query.bin_count())?;
let collect_max = 10000;
let stream = timebinned_stream(query.clone(), binned_range.clone(), cluster).await?;
let stream = timebinned_stream(query.clone(), binned_range.clone(), ch_conf, cluster).await?;
let stream = timebinned_to_collectable(stream);
let collected = Collect::new(stream, deadline, collect_max, None, Some(binned_range));
let collected: BoxFuture<_> = Box::pin(collected);