Distinct sub-query type

This commit is contained in:
Dominik Werder
2023-06-22 21:10:58 +02:00
parent d9ac27cd75
commit 0260f4b4d6
20 changed files with 374 additions and 164 deletions

View File

@@ -14,21 +14,27 @@ use items_2::streams::PlainEventStream;
use netpod::log::*;
use netpod::ChannelTypeConfigGen;
use netpod::Cluster;
use query::api4::events::EventsSubQuery;
use query::api4::events::EventsSubQuerySelect;
use query::api4::events::EventsSubQuerySettings;
use query::api4::events::PlainEventsQuery;
use serde_json::Value as JsonValue;
use std::time::Instant;
pub async fn plain_events_json(
evq: &PlainEventsQuery,
ch_conf: &ChannelTypeConfigGen,
ch_conf: ChannelTypeConfigGen,
cluster: &Cluster,
) -> Result<JsonValue, Error> {
info!("plain_events_json evquery {:?}", evq);
let select = EventsSubQuerySelect::new(ch_conf, evq.range().clone(), evq.transform().clone());
let settings = EventsSubQuerySettings::from(evq);
let subq = EventsSubQuery::from_parts(select, settings);
// TODO remove magic constant
let deadline = Instant::now() + evq.timeout();
let mut tr = build_merged_event_transform(evq.transform())?;
// TODO make sure the empty container arrives over the network.
let inps = open_tcp_streams::<_, ChannelEvents>(&evq, ch_conf, cluster).await?;
let inps = open_tcp_streams::<ChannelEvents>(subq, cluster).await?;
// TODO propagate also the max-buf-len for the first stage event reader.
// TODO use a mixture of count and byte-size as threshold.
let stream = Merger::new(inps, evq.merger_out_len_max());

View File

@@ -21,36 +21,29 @@ use netpod::ChannelTypeConfigGen;
use netpod::Cluster;
use netpod::Node;
use netpod::PerfOpts;
use query::api4::events::PlainEventsQuery;
use query::api4::events::EventsSubQuery;
use query::api4::events::Frame1Parts;
use serde::de::DeserializeOwned;
use serde::Serialize;
use serde_json::json;
use std::fmt;
use std::pin::Pin;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
pub fn make_node_command_frame<Q>(query: Q, ch_conf: &ChannelTypeConfigGen) -> Result<EventQueryJsonStringFrame, Error>
where
Q: Serialize,
{
let obj = json!({
"query": query,
"ch_conf":ch_conf,
});
pub fn make_node_command_frame(query: EventsSubQuery) -> Result<EventQueryJsonStringFrame, Error> {
let obj = Frame1Parts::new(query);
let ret = serde_json::to_string(&obj)?;
Ok(EventQueryJsonStringFrame(ret))
}
pub async fn x_processed_event_blobs_stream_from_node(
query: PlainEventsQuery,
query: EventsSubQuery,
ch_conf: ChannelTypeConfigGen,
perf_opts: PerfOpts,
node: Node,
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>, Error> {
let addr = format!("{}:{}", node.host, node.port_raw);
debug!("x_processed_event_blobs_stream_from_node to: {addr}",);
let frame1 = make_node_command_frame(&query, &ch_conf)?;
let frame1 = make_node_command_frame(query)?;
let net = TcpStream::connect(addr.clone()).await?;
let (netin, mut netout) = net.into_split();
let item = sitem_data(frame1);
@@ -68,18 +61,13 @@ pub async fn x_processed_event_blobs_stream_from_node(
pub type BoxedStream<T> = Pin<Box<dyn Stream<Item = Sitemty<T>> + Send>>;
pub async fn open_tcp_streams<Q, T>(
query: Q,
ch_conf: &ChannelTypeConfigGen,
cluster: &Cluster,
) -> Result<Vec<BoxedStream<T>>, Error>
pub async fn open_tcp_streams<T>(query: EventsSubQuery, cluster: &Cluster) -> Result<Vec<BoxedStream<T>>, Error>
where
Q: Serialize,
// Group bounds in new trait
T: FrameTypeInnerStatic + DeserializeOwned + Send + Unpin + fmt::Debug + 'static,
{
// TODO when unit tests established, change to async connect:
let frame1 = make_node_command_frame(&query, &ch_conf)?;
let frame1 = make_node_command_frame(query)?;
let mut streams = Vec::new();
for node in &cluster.nodes {
let addr = format!("{}:{}", node.host, node.port_raw);

View File

@@ -24,7 +24,9 @@ use netpod::BinnedRangeEnum;
use netpod::ChannelTypeConfigGen;
use netpod::Cluster;
use query::api4::binned::BinnedQuery;
use query::api4::events::PlainEventsQuery;
use query::api4::events::EventsSubQuery;
use query::api4::events::EventsSubQuerySelect;
use query::api4::events::EventsSubQuerySettings;
use serde_json::Value as JsonValue;
use std::pin::Pin;
use std::time::Instant;
@@ -38,13 +40,14 @@ async fn timebinnable_stream(
query: BinnedQuery,
range: NanoRange,
one_before_range: bool,
ch_conf: &ChannelTypeConfigGen,
ch_conf: ChannelTypeConfigGen,
cluster: Cluster,
) -> Result<TimeBinnableStreamBox, Error> {
let evq = PlainEventsQuery::new(query.channel().clone(), range.clone()).for_time_weighted_scalar();
let mut tr = build_merged_event_transform(evq.transform())?;
let inps = open_tcp_streams::<_, ChannelEvents>(&evq, ch_conf, &cluster).await?;
let select = EventsSubQuerySelect::new(ch_conf, range.clone().into(), query.transform().clone());
let settings = EventsSubQuerySettings::from(&query);
let subq = EventsSubQuery::from_parts(select, settings);
let mut tr = build_merged_event_transform(subq.transform())?;
let inps = open_tcp_streams::<ChannelEvents>(subq, &cluster).await?;
// TODO propagate also the max-buf-len for the first stage event reader.
// TODO use a mixture of count and byte-size as threshold.
let stream = Merger::new(inps, query.merger_out_len_max());
@@ -68,7 +71,7 @@ async fn timebinnable_stream(
async fn timebinned_stream(
query: BinnedQuery,
binned_range: BinnedRangeEnum,
ch_conf: &ChannelTypeConfigGen,
ch_conf: ChannelTypeConfigGen,
cluster: Cluster,
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<Box<dyn TimeBinned>>> + Send>>, Error> {
let range = binned_range.binned_range_time().to_nano_range();
@@ -101,7 +104,7 @@ fn timebinned_to_collectable(
pub async fn timebinned_json(
query: BinnedQuery,
ch_conf: &ChannelTypeConfigGen,
ch_conf: ChannelTypeConfigGen,
cluster: Cluster,
) -> Result<JsonValue, Error> {
let deadline = Instant::now().checked_add(query.timeout_value()).unwrap();