This commit is contained in:
Dominik Werder
2024-09-09 17:04:20 +02:00
parent 490c1ed0a0
commit 741c1380c7
25 changed files with 638 additions and 85 deletions

View File

@@ -239,7 +239,7 @@ impl Stream for EventChunkerMultifile {
chunkers.push(Box::pin(chunker) as _);
}
}
let merged = Merger::new(chunkers, self.out_max_len);
let merged = Merger::new(chunkers, Some(self.out_max_len as u32));
let filtered = RangeFilter2::new(merged, self.range.clone(), self.expand);
self.evs = Some(Box::pin(filtered));
Ready(Some(Ok(StreamItem::Log(item))))

View File

@@ -99,7 +99,7 @@ impl Stream for MergedBlobsFromRemotes {
if c1 == self.tcp_establish_futs.len() {
let inps = self.nodein.iter_mut().map(|k| k.take().unwrap()).collect();
// TODO set out_max_len dynamically
let s1 = Merger::new(inps, 128);
let s1 = Merger::new(inps, Some(128));
self.merged = Some(Box::pin(s1));
}
continue 'outer;

View File

@@ -23,6 +23,10 @@ use netpod::NodeConfigCached;
use netpod::ReqCtx;
use nodenet::client::OpenBoxedBytesViaHttp;
use query::api4::binned::BinnedQuery;
use scyllaconn::bincache::ScyllaCacheReadProvider;
use scyllaconn::worker::ScyllaQueue;
use std::sync::Arc;
use streams::timebin::CacheReadProvider;
use tracing::Instrument;
use url::Url;
@@ -71,7 +75,7 @@ impl BinnedHandler {
if req.method() != Method::GET {
return Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(body_empty())?);
}
match binned(req, ctx, &shared_res.pgqueue, ncc).await {
match binned(req, ctx, &shared_res.pgqueue, shared_res.scyqueue.clone(), ncc).await {
Ok(ret) => Ok(ret),
Err(e) => match e {
Error::ChannelNotFound => {
@@ -91,7 +95,13 @@ impl BinnedHandler {
}
}
async fn binned(req: Requ, ctx: &ReqCtx, pgqueue: &PgQueue, ncc: &NodeConfigCached) -> Result<StreamResponse, Error> {
async fn binned(
req: Requ,
ctx: &ReqCtx,
pgqueue: &PgQueue,
scyqueue: Option<ScyllaQueue>,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
let url = req_uri_to_url(req.uri()).map_err(|e| Error::BadQuery(e.to_string()))?;
if req
.uri()
@@ -101,7 +111,7 @@ async fn binned(req: Requ, ctx: &ReqCtx, pgqueue: &PgQueue, ncc: &NodeConfigCach
Err(Error::ServerError)?;
}
if accepts_json_or_all(&req.headers()) {
Ok(binned_json(url, req, ctx, pgqueue, ncc).await?)
Ok(binned_json(url, req, ctx, pgqueue, scyqueue, ncc).await?)
} else if accepts_octets(&req.headers()) {
Ok(error_response(
format!("binary binned data not yet available"),
@@ -118,6 +128,7 @@ async fn binned_json(
req: Requ,
ctx: &ReqCtx,
pgqueue: &PgQueue,
scyqueue: Option<ScyllaQueue>,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
debug!("{:?}", req);
@@ -143,8 +154,11 @@ async fn binned_json(
debug!("begin");
});
let open_bytes = OpenBoxedBytesViaHttp::new(ncc.node_config.cluster.clone());
let open_bytes = Box::pin(open_bytes);
let item = streams::timebinnedjson::timebinned_json(query, ch_conf, ctx, open_bytes)
let open_bytes = Arc::pin(open_bytes);
let cache_read_provider = scyqueue
.map(|qu| ScyllaCacheReadProvider::new(qu))
.map(|x| Arc::new(x) as Arc<dyn CacheReadProvider>);
let item = streams::timebinnedjson::timebinned_json(query, ch_conf, ctx, open_bytes, cache_read_provider)
.instrument(span1)
.await
.map_err(|e| Error::BinnedStream(e))?;

View File

@@ -36,6 +36,7 @@ use netpod::APP_JSON_FRAMED;
use netpod::HEADER_NAME_REQUEST_ID;
use nodenet::client::OpenBoxedBytesViaHttp;
use query::api4::events::PlainEventsQuery;
use std::sync::Arc;
use streams::instrument::InstrumentStream;
use tracing::Instrument;
@@ -174,7 +175,8 @@ async fn plain_events_cbor_framed(
) -> Result<StreamResponse, Error> {
debug!("plain_events_cbor_framed {ch_conf:?} {req:?}");
let open_bytes = OpenBoxedBytesViaHttp::new(ncc.node_config.cluster.clone());
let stream = streams::plaineventscbor::plain_events_cbor_stream(&evq, ch_conf, ctx, Box::pin(open_bytes)).await?;
let open_bytes = Arc::pin(open_bytes);
let stream = streams::plaineventscbor::plain_events_cbor_stream(&evq, ch_conf, ctx, open_bytes).await?;
let stream = bytes_chunks_to_framed(stream);
let logspan = if evq.log_level() == "trace" {
trace!("enable trace for handler");
@@ -202,7 +204,8 @@ async fn plain_events_json_framed(
) -> Result<StreamResponse, Error> {
debug!("plain_events_json_framed {ch_conf:?} {req:?}");
let open_bytes = OpenBoxedBytesViaHttp::new(ncc.node_config.cluster.clone());
let stream = streams::plaineventsjson::plain_events_json_stream(&evq, ch_conf, ctx, Box::pin(open_bytes)).await?;
let open_bytes = Arc::pin(open_bytes);
let stream = streams::plaineventsjson::plain_events_json_stream(&evq, ch_conf, ctx, open_bytes).await?;
let stream = bytes_chunks_to_len_framed_str(stream);
let ret = response(StatusCode::OK)
.header(CONTENT_TYPE, APP_JSON_FRAMED)
@@ -224,9 +227,9 @@ async fn plain_events_json(
// TODO handle None case better and return 404
debug!("{self_name} chconf_from_events_quorum: {ch_conf:?}");
let open_bytes = OpenBoxedBytesViaHttp::new(ncc.node_config.cluster.clone());
let open_bytes = Arc::pin(open_bytes);
let item =
streams::plaineventsjson::plain_events_json(&evq, ch_conf, ctx, &ncc.node_config.cluster, Box::pin(open_bytes))
.await;
streams::plaineventsjson::plain_events_json(&evq, ch_conf, ctx, &ncc.node_config.cluster, open_bytes).await;
debug!("{self_name} returned {}", item.is_ok());
let item = match item {
Ok(item) => item,

View File

@@ -57,6 +57,7 @@ pub trait AppendEmptyBin {
fn append_empty_bin(&mut self, ts1: u64, ts2: u64);
}
// TODO rename to make it clear that this moves. Better use drain-into or something similar.
pub trait AppendAllFrom {
fn append_all_from(&mut self, src: &mut Self);
}

View File

@@ -150,6 +150,17 @@ impl<NTY: ScalarOps> BinsDim0<NTY> {
}
true
}
// TODO make this part of a new bins trait, similar like Events trait.
// TODO check for error?
pub fn drain_into(&mut self, dst: &mut Self, range: Range<usize>) -> () {
dst.ts1s.extend(self.ts1s.drain(range.clone()));
dst.ts2s.extend(self.ts2s.drain(range.clone()));
dst.counts.extend(self.counts.drain(range.clone()));
dst.mins.extend(self.mins.drain(range.clone()));
dst.maxs.extend(self.maxs.drain(range.clone()));
dst.avgs.extend(self.avgs.drain(range.clone()));
}
}
impl<NTY> AsAnyRef for BinsDim0<NTY>
@@ -301,8 +312,39 @@ impl<NTY: ScalarOps> TimeBinnableType for BinsDim0<NTY> {
}
#[derive(Debug)]
pub struct BinsDim0TimeBinnerTy<STY> {
_t1: std::marker::PhantomData<STY>,
pub struct BinsDim0TimeBinnerTy<STY>
where
STY: ScalarOps,
{
ts1now: TsNano,
binrange: BinnedRange<TsNano>,
do_time_weight: bool,
emit_empty_bins: bool,
range_complete: bool,
buf: <Self as TimeBinnerTy>::Output,
out: <Self as TimeBinnerTy>::Output,
bins_ready_count: usize,
}
impl<STY> BinsDim0TimeBinnerTy<STY>
where
STY: ScalarOps,
{
pub fn new(binrange: BinnedRange<TsNano>, do_time_weight: bool, emit_empty_bins: bool) -> Self {
// let ts1now = TsNano::from_ns(binrange.bin_off * binrange.bin_len.ns());
// let ts2 = ts1.add_dt_nano(binrange.bin_len.to_dt_nano());
let buf = <Self as TimeBinnerTy>::Output::empty();
Self {
ts1now: TsNano::from_ns(binrange.full_range().beg()),
binrange,
do_time_weight,
emit_empty_bins,
range_complete: false,
buf,
out: <Self as TimeBinnerTy>::Output::empty(),
bins_ready_count: 0,
}
}
}
impl<STY> TimeBinnerTy for BinsDim0TimeBinnerTy<STY>
@@ -313,35 +355,36 @@ where
type Output = BinsDim0<STY>;
fn ingest(&mut self, item: &mut Self::Input) {
todo!()
// item.ts1s;
todo!("TimeBinnerTy::ingest")
}
fn set_range_complete(&mut self) {
todo!()
self.range_complete = true;
}
fn bins_ready_count(&self) -> usize {
todo!()
self.bins_ready_count
}
fn bins_ready(&mut self) -> Option<Self::Output> {
todo!()
todo!("TimeBinnerTy::bins_ready")
}
fn push_in_progress(&mut self, push_empty: bool) {
todo!()
todo!("TimeBinnerTy::push_in_progress")
}
fn cycle(&mut self) {
todo!()
todo!("TimeBinnerTy::cycle")
}
fn empty(&self) -> Option<Self::Output> {
todo!()
todo!("TimeBinnerTy::empty")
}
fn append_empty_until_end(&mut self) {
todo!()
todo!("TimeBinnerTy::append_empty_until_end")
}
}
@@ -354,7 +397,10 @@ impl<STY: ScalarOps> TimeBinnableTy for BinsDim0<STY> {
do_time_weight: bool,
emit_empty_bins: bool,
) -> Self::TimeBinner {
todo!()
match binrange {
BinnedRangeEnum::Time(binrange) => BinsDim0TimeBinnerTy::new(binrange, do_time_weight, emit_empty_bins),
BinnedRangeEnum::Pulse(_) => todo!("TimeBinnableTy for BinsDim0 Pulse"),
}
}
}

View File

@@ -101,14 +101,14 @@ impl<T> Merger<T>
where
T: Mergeable,
{
pub fn new(inps: Vec<MergeInp<T>>, out_max_len: usize) -> Self {
pub fn new(inps: Vec<MergeInp<T>>, out_max_len: Option<u32>) -> Self {
let n = inps.len();
Self {
inps: inps.into_iter().map(|x| Some(x)).collect(),
items: (0..n).into_iter().map(|_| None).collect(),
out: None,
do_clear_out: false,
out_max_len,
out_max_len: out_max_len.unwrap_or(1000) as usize,
range_complete: vec![false; n],
out_of_band_queue: VecDeque::new(),
log_queue: VecDeque::new(),

View File

@@ -86,7 +86,7 @@ fn items_merge_00() {
let v1 = ChannelEvents::Events(evs1);
let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)]));
let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)]));
let mut merger = Merger::new(vec![stream0, stream1], 8);
let mut merger = Merger::new(vec![stream0, stream1], Some(8));
while let Some(item) = merger.next().await {
eprintln!("{item:?}");
}
@@ -109,7 +109,7 @@ fn items_merge_01() {
let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)]));
let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)]));
let stream2 = Box::pin(stream::iter(vec![sitem_data(v2), sitem_data(v3), sitem_data(v4)]));
let mut merger = Merger::new(vec![stream0, stream1, stream2], 8);
let mut merger = Merger::new(vec![stream0, stream1, stream2], Some(8));
let mut total_event_count = 0;
while let Some(item) = merger.next().await {
eprintln!("{item:?}");
@@ -144,7 +144,7 @@ fn items_merge_02() {
let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)]));
let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)]));
let stream2 = Box::pin(stream::iter(vec![sitem_data(v2), sitem_data(v3), sitem_data(v4)]));
let mut merger = Merger::new(vec![stream0, stream1, stream2], 8);
let mut merger = Merger::new(vec![stream0, stream1, stream2], Some(8));
let mut total_event_count = 0;
while let Some(item) = merger.next().await {
eprintln!("{item:?}");
@@ -187,7 +187,7 @@ fn merge_00() {
let inp2: Vec<Sitemty<ChannelEvents>> = Vec::new();
let inp2 = futures_util::stream::iter(inp2);
let inp2 = Box::pin(inp2);
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], 32);
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], Some(32));
// Expect an empty first item.
let item = merger.next().await;
@@ -230,7 +230,7 @@ fn merge_01() {
let inp2: Vec<Sitemty<ChannelEvents>> = Vec::new();
let inp2 = futures_util::stream::iter(inp2);
let inp2 = Box::pin(inp2);
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], 10);
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], Some(10));
// Expect an empty first item.
let item = merger.next().await;
@@ -323,7 +323,7 @@ fn merge_02() {
let inp2: Vec<Sitemty<ChannelEvents>> = inp2_events_a;
let inp2 = futures_util::stream::iter(inp2);
let inp2 = Box::pin(inp2);
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], 10);
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], Some(10));
// Expect an empty first item.
let item = merger.next().await;
@@ -365,7 +365,7 @@ fn bin_00() {
let inp1 = futures_util::stream::iter(inp1);
let inp1 = Box::pin(inp1);
let inp2 = Box::pin(futures_util::stream::empty()) as _;
let stream = crate::merger::Merger::new(vec![inp1, inp2], 32);
let stream = crate::merger::Merger::new(vec![inp1, inp2], Some(32));
let range = NanoRange {
beg: SEC * 0,
end: SEC * 100,
@@ -413,7 +413,7 @@ fn bin_01() {
let inp1 = futures_util::stream::iter(inp1);
let inp1 = Box::pin(inp1);
let inp2 = Box::pin(futures_util::stream::empty()) as _;
let stream = crate::merger::Merger::new(vec![inp1, inp2], 32);
let stream = crate::merger::Merger::new(vec![inp1, inp2], Some(32));
// covering_range result is subject to adjustments, instead, manually choose bin edges
let range = NanoRange {
beg: TSBASE + SEC * 1,

View File

@@ -1877,6 +1877,10 @@ impl TsNano {
TsMs::from_ms_u64(self.ms())
}
pub const fn to_dt_nano(self) -> DtNano {
DtNano::from_ns(self.0)
}
pub const fn to_dt_ms(self) -> DtMs {
DtMs::from_ms_u64(self.ms())
}

View File

@@ -83,7 +83,7 @@ pub struct BinnedQuery {
#[serde(default, skip_serializing_if = "Option::is_none")]
disk_stats_every: Option<ByteSize>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub merger_out_len_max: Option<usize>,
pub merger_out_len_max: Option<u32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
test_do_wasm: Option<String>,
#[serde(default)]
@@ -159,8 +159,8 @@ impl BinnedQuery {
self.subgrids.as_ref().map(|x| x.as_slice())
}
pub fn merger_out_len_max(&self) -> usize {
self.merger_out_len_max.unwrap_or(1024)
pub fn merger_out_len_max(&self) -> Option<u32> {
self.merger_out_len_max
}
pub fn set_series_id(&mut self, series: u64) {

View File

@@ -59,7 +59,7 @@ pub struct PlainEventsQuery {
#[serde(default, skip_serializing_if = "Option::is_none")]
test_do_wasm: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
merger_out_len_max: Option<usize>,
merger_out_len_max: Option<u32>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
create_errors: Vec<String>,
#[serde(default)]
@@ -156,8 +156,8 @@ impl PlainEventsQuery {
&self.event_delay
}
pub fn merger_out_len_max(&self) -> usize {
self.merger_out_len_max.unwrap_or(1024)
pub fn merger_out_len_max(&self) -> Option<u32> {
self.merger_out_len_max
}
pub fn do_test_main_error(&self) -> bool {
@@ -417,6 +417,13 @@ pub struct EventsSubQuerySettings {
queue_len_disk_io: Option<usize>,
create_errors: Vec<String>,
use_rt: Option<RetentionTime>,
merger_out_len_max: Option<u32>,
}
impl EventsSubQuerySettings {
pub fn merger_out_len_max(&self) -> Option<u32> {
self.merger_out_len_max
}
}
impl Default for EventsSubQuerySettings {
@@ -431,6 +438,7 @@ impl Default for EventsSubQuerySettings {
queue_len_disk_io: None,
create_errors: Vec::new(),
use_rt: None,
merger_out_len_max: None,
}
}
}
@@ -448,6 +456,7 @@ impl From<&PlainEventsQuery> for EventsSubQuerySettings {
queue_len_disk_io: None,
create_errors: value.create_errors.clone(),
use_rt: value.use_rt(),
merger_out_len_max: value.merger_out_len_max(),
}
}
}
@@ -466,6 +475,7 @@ impl From<&BinnedQuery> for EventsSubQuerySettings {
queue_len_disk_io: None,
create_errors: Vec::new(),
use_rt: value.use_rt(),
merger_out_len_max: value.merger_out_len_max(),
}
}
}
@@ -484,6 +494,7 @@ impl From<&Api1Query> for EventsSubQuerySettings {
queue_len_disk_io: Some(disk_io_tune.read_queue_len),
create_errors: Vec::new(),
use_rt: None,
merger_out_len_max: None,
}
}
}
@@ -595,6 +606,10 @@ impl EventsSubQuery {
pub fn use_rt(&self) -> Option<RetentionTime> {
self.settings.use_rt.clone()
}
pub fn merger_out_len_max(&self) -> Option<u32> {
self.settings.merger_out_len_max()
}
}
#[derive(Debug, Serialize, Deserialize)]

View File

@@ -148,6 +148,10 @@ impl TransformQuery {
pub fn enum_as_string(&self) -> Option<bool> {
self.enum_as_string.clone()
}
pub fn do_wasm(&self) -> Option<&str> {
None
}
}
impl FromUrl for TransformQuery {

View File

@@ -17,5 +17,6 @@ netpod = { path = "../netpod" }
query = { path = "../query" }
items_0 = { path = "../items_0" }
items_2 = { path = "../items_2" }
streams = { path = "../streams" }
series = { path = "../../../daqingest/series" }
taskrun = { path = "../taskrun" }

View File

@@ -1,6 +1,7 @@
#![allow(unused)]
use crate::errconv::ErrConv;
use crate::worker::ScyllaQueue;
use err::Error;
use futures_util::Future;
use futures_util::Stream;
@@ -519,3 +520,20 @@ pub async fn pre_binned_value_stream(
err::todo();
Ok(Box::pin(futures_util::stream::iter([Ok(res.0)])))
}
pub struct ScyllaCacheReadProvider {
scyqueue: ScyllaQueue,
}
impl ScyllaCacheReadProvider {
pub fn new(scyqueue: ScyllaQueue) -> Self {
Self { scyqueue }
}
}
impl streams::timebin::CacheReadProvider for ScyllaCacheReadProvider {
fn read(&self) -> streams::timebin::cached::reader::Reading {
warn!("impl CacheReadProvider for ScyllaCacheReadProvider");
todo!("impl CacheReadProvider for ScyllaCacheReadProvider")
}
}

View File

@@ -34,13 +34,13 @@ pub async fn dyn_events_stream(
open_bytes: OpenBoxedBytesStreamsBox,
) -> Result<DynEventsStream, Error> {
trace!("dyn_events_stream {}", evq.summary_short());
use query::api4::events::EventsSubQuerySettings;
let subq = make_sub_query(
ch_conf,
evq.range().clone(),
evq.one_before_range(),
evq.transform().clone(),
evq.test_do_wasm(),
evq,
EventsSubQuerySettings::from(evq),
evq.log_level().into(),
ctx,
);

View File

@@ -36,6 +36,7 @@ use query::transform::TransformQuery;
use serde::de::DeserializeOwned;
use std::fmt;
use std::pin::Pin;
use std::sync::Arc;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
@@ -45,11 +46,12 @@ pub trait OpenBoxedBytesStreams {
fn open(
&self,
subq: EventsSubQuery,
// TODO take by Arc
ctx: ReqCtx,
) -> Pin<Box<dyn Future<Output = Result<Vec<BoxedBytesStream>, Error>> + Send>>;
}
pub type OpenBoxedBytesStreamsBox = Pin<Box<dyn OpenBoxedBytesStreams + Send>>;
pub type OpenBoxedBytesStreamsBox = Pin<Arc<dyn OpenBoxedBytesStreams + Send + Sync>>;
pub fn make_node_command_frame(query: EventsSubQuery) -> Result<EventQueryJsonStringFrame, Error> {
let obj = Frame1Parts::new(query);
@@ -202,7 +204,6 @@ pub fn make_sub_query<SUB>(
range: SeriesRange,
one_before_range: bool,
transform: TransformQuery,
test_do_wasm: Option<&str>,
sub: SUB,
log_level: String,
ctx: &ReqCtx,
@@ -210,8 +211,8 @@ pub fn make_sub_query<SUB>(
where
SUB: Into<EventsSubQuerySettings>,
{
let mut select = EventsSubQuerySelect::new(ch_conf, range, one_before_range, transform);
if let Some(wasm1) = test_do_wasm {
let mut select = EventsSubQuerySelect::new(ch_conf, range, one_before_range, transform.clone());
if let Some(wasm1) = transform.do_wasm() {
select.set_wasm1(wasm1.into());
}
let settings = sub.into();

View File

@@ -45,7 +45,7 @@ fn merge_mergeable_00() -> Result<(), Error> {
let fut = async {
let inp0 = inmem_test_events_d0_i32_00();
let inp1 = inmem_test_events_d0_i32_01();
let _merger = items_2::merger::Merger::new(vec![inp0, inp1], 4);
let _merger = items_2::merger::Merger::new(vec![inp0, inp1], Some(4));
Ok(())
};
runfut(fut)

View File

@@ -21,6 +21,7 @@ use netpod::Shape;
use query::api4::events::EventsSubQuery;
use query::api4::events::PlainEventsQuery;
use std::pin::Pin;
use std::sync::Arc;
#[test]
fn merged_events_cbor() {
@@ -46,7 +47,7 @@ async fn merged_events_inner() -> Result<(), Error> {
));
let evq = PlainEventsQuery::new(channel, range);
let open_bytes = StreamOpener::new();
let open_bytes = Box::pin(open_bytes);
let open_bytes = Arc::pin(open_bytes);
let stream = plain_events_cbor_stream(&evq, ch_conf.clone().into(), &ctx, open_bytes)
.await
.unwrap();

View File

@@ -1,8 +1,10 @@
mod basic;
mod cached;
pub mod cached;
mod fromlayers;
mod gapfill;
mod grid;
pub(super) use basic::TimeBinnedStream;
pub(super) use fromlayers::TimeBinnedFromLayers;
pub use cached::reader::CacheReadProvider;

View File

@@ -252,6 +252,7 @@ where
trace2!("================= POLL");
loop {
break if self.complete {
error!("TimeBinnedStream poll on complete");
panic!("TimeBinnedStream poll on complete")
} else if self.done {
self.complete = true;

View File

@@ -8,6 +8,7 @@ use netpod::DtMs;
use netpod::TsNano;
use std::future::Future;
use std::pin::Pin;
use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
@@ -23,16 +24,18 @@ impl Future for Reading {
}
}
pub trait CacheReadProvider: Send {
pub trait CacheReadProvider: Send + Sync {
fn read(&self) -> Reading;
}
#[derive(Debug, ThisError)]
#[cstm(name = "BinCachedReader")]
pub enum Error {}
pub enum Error {
TodoImpl,
}
pub struct CachedReader {
cache_read_provider: Box<dyn CacheReadProvider>,
cache_read_provider: Arc<dyn CacheReadProvider>,
}
impl CachedReader {
@@ -40,7 +43,7 @@ impl CachedReader {
series: u64,
bin_len: DtMs,
range: BinnedRange<TsNano>,
cache_read_provider: Box<dyn CacheReadProvider>,
cache_read_provider: Arc<dyn CacheReadProvider>,
) -> Result<Self, Error> {
let ret = Self { cache_read_provider };
Ok(ret)
@@ -52,6 +55,7 @@ impl Stream for CachedReader {
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
// Ready(Some(Err(Error::TodoImpl)))
Ready(None)
}
}

View File

@@ -1,4 +1,5 @@
use super::cached::reader::CacheReadProvider;
use crate::tcprawclient::OpenBoxedBytesStreamsBox;
use crate::timebin::grid::find_next_finer_bin_len;
use err::thiserror;
use err::ThisError;
@@ -14,9 +15,14 @@ use items_2::binsdim0::BinsDim0;
use netpod::log::*;
use netpod::BinnedRange;
use netpod::BinnedRangeEnum;
use netpod::ChannelTypeConfigGen;
use netpod::DtMs;
use netpod::ReqCtx;
use netpod::TsNano;
use query::api4::events::EventsSubQuerySettings;
use query::transform::TransformQuery;
use std::pin::Pin;
use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
@@ -30,6 +36,12 @@ pub enum Error {
type BoxedInput = Pin<Box<dyn Stream<Item = Sitemty<BinsDim0<f32>>> + Send>>;
pub struct TimeBinnedFromLayers {
ch_conf: ChannelTypeConfigGen,
transform_query: TransformQuery,
sub: EventsSubQuerySettings,
log_level: String,
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
inp: BoxedInput,
}
@@ -39,11 +51,17 @@ impl TimeBinnedFromLayers {
}
pub fn new(
ch_conf: ChannelTypeConfigGen,
transform_query: TransformQuery,
sub: EventsSubQuerySettings,
log_level: String,
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
series: u64,
range: BinnedRange<TsNano>,
do_time_weight: bool,
bin_len_layers: Vec<DtMs>,
cache_read_provider: Box<dyn CacheReadProvider>,
cache_read_provider: Arc<dyn CacheReadProvider + Send>,
) -> Result<Self, Error> {
info!(
"{}::new {:?} {:?} {:?}",
@@ -55,8 +73,28 @@ impl TimeBinnedFromLayers {
let bin_len = DtMs::from_ms_u64(range.bin_len.ms());
if bin_len_layers.contains(&bin_len) {
info!("{}::new bin_len in layers", Self::type_name());
let inp = super::gapfill::GapFill::new(series, range, do_time_weight, bin_len_layers, cache_read_provider)?;
let ret = Self { inp: Box::pin(inp) };
let inp = super::gapfill::GapFill::new(
ch_conf.clone(),
transform_query.clone(),
sub.clone(),
log_level.clone(),
ctx.clone(),
open_bytes.clone(),
series,
range,
do_time_weight,
bin_len_layers,
cache_read_provider,
)?;
let ret = Self {
ch_conf,
transform_query,
sub,
log_level,
ctx,
open_bytes,
inp: Box::pin(inp),
};
Ok(ret)
} else {
match find_next_finer_bin_len(bin_len, &bin_len_layers) {
@@ -64,8 +102,14 @@ impl TimeBinnedFromLayers {
// TODO
// produce from binned sub-stream with additional binner.
let range = BinnedRange::from_nano_range(range.to_nano_range(), finer);
info!("{}::new next finer {:?} {:?}", Self::type_name(), finer, range);
warn!("{}::new next finer {:?} {:?}", Self::type_name(), finer, range);
let inp = super::gapfill::GapFill::new(
ch_conf.clone(),
transform_query.clone(),
sub.clone(),
log_level.clone(),
ctx.clone(),
open_bytes.clone(),
series,
range.clone(),
do_time_weight,
@@ -77,11 +121,19 @@ impl TimeBinnedFromLayers {
BinnedRangeEnum::Time(range),
do_time_weight,
);
let ret = Self { inp: Box::pin(inp) };
let ret = Self {
ch_conf,
transform_query,
sub,
log_level,
ctx,
open_bytes,
inp: Box::pin(inp),
};
Ok(ret)
}
None => {
info!("{}::new NO next finer", Self::type_name());
warn!("{}::new NO next finer", Self::type_name());
// TODO
// produce from events
todo!()
@@ -94,7 +146,12 @@ impl TimeBinnedFromLayers {
impl Stream for TimeBinnedFromLayers {
type Item = Sitemty<BinsDim0<f32>>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
todo!()
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
match self.inp.poll_next_unpin(cx) {
Ready(Some(x)) => Ready(Some(x)),
Ready(None) => Ready(None),
Pending => Pending,
}
}
}

View File

@@ -1,14 +1,24 @@
use super::cached::reader::CacheReadProvider;
use crate::tcprawclient::OpenBoxedBytesStreamsBox;
use err::thiserror;
use err::ThisError;
use futures_util::Stream;
use futures_util::TryStreamExt;
use futures_util::StreamExt;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
use items_2::binsdim0::BinsDim0;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::BinnedRange;
use netpod::ChannelTypeConfigGen;
use netpod::DtMs;
use netpod::ReqCtx;
use netpod::TsNano;
use query::api4::events::EventsSubQuerySettings;
use query::transform::TransformQuery;
use std::pin::Pin;
use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
@@ -16,32 +26,201 @@ use std::task::Poll;
#[cstm(name = "BinCachedGapFill")]
pub enum Error {
CacheReader(#[from] super::cached::reader::Error),
GapFromFiner,
}
type INP = Pin<Box<dyn Stream<Item = Result<BinsDim0<f32>, Error>> + Send>>;
type INP = Pin<Box<dyn Stream<Item = Sitemty<BinsDim0<f32>>> + Send>>;
// Try to read from cache for the given bin len.
// For gaps in the stream, construct an alternative input from finer bin len with a binner.
pub struct GapFill {
ch_conf: ChannelTypeConfigGen,
transform_query: TransformQuery,
sub: EventsSubQuerySettings,
log_level: String,
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
series: u64,
range: BinnedRange<TsNano>,
do_time_weight: bool,
bin_len_layers: Vec<DtMs>,
inp: INP,
inp_buf: Option<BinsDim0<f32>>,
inp_finer: Option<INP>,
last_bin_ts2: Option<TsNano>,
exp_finer_range: NanoRange,
cache_read_provider: Arc<dyn CacheReadProvider>,
}
impl GapFill {
// bin_len of the given range must be a cacheable bin_len.
pub fn new(
ch_conf: ChannelTypeConfigGen,
transform_query: TransformQuery,
sub: EventsSubQuerySettings,
log_level: String,
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
series: u64,
range: BinnedRange<TsNano>,
do_time_weight: bool,
bin_len_layers: Vec<DtMs>,
cache_read_provider: Box<dyn CacheReadProvider>,
cache_read_provider: Arc<dyn CacheReadProvider>,
) -> Result<Self, Error> {
// super::fromlayers::TimeBinnedFromLayers::new(series, range, do_time_weight, bin_len_layers)?;
let inp =
super::cached::reader::CachedReader::new(series, range.bin_len.to_dt_ms(), range, cache_read_provider)?
.map_err(Error::from);
let ret = Self { inp: Box::pin(inp) };
let inp = super::cached::reader::CachedReader::new(
series,
range.bin_len.to_dt_ms(),
range.clone(),
cache_read_provider.clone(),
)?
.map(|x| match x {
Ok(x) => Ok(StreamItem::DataItem(RangeCompletableItem::Data(x))),
Err(e) => Err(::err::Error::from_string(e)),
});
let ret = Self {
ch_conf,
transform_query,
sub,
log_level,
ctx,
open_bytes,
series,
range,
do_time_weight,
bin_len_layers,
inp: Box::pin(inp),
inp_buf: None,
inp_finer: None,
last_bin_ts2: None,
// TODO just dummy:
exp_finer_range: NanoRange { beg: 0, end: 0 },
cache_read_provider,
};
Ok(ret)
}
fn handle_bins_finer(mut self: Pin<&mut Self>, bins: BinsDim0<f32>) -> Result<BinsDim0<f32>, Error> {
for (&ts1, &ts2) in bins.ts1s.iter().zip(&bins.ts2s) {
if let Some(last) = self.last_bin_ts2 {
if ts1 != last.ns() {
return Err(Error::GapFromFiner);
}
}
self.last_bin_ts2 = Some(TsNano::from_ns(ts2));
}
// TODO keep bins from finer source.
// Only write bins to cache if we receive another
// TODO make sure that input does not send "made-up" empty future bins.
// On the other hand, if the request is over past range, but the channel was silent ever since?
// Then we should in principle know that from is-alive status checking.
// So, until then, allow made-up bins?
// Maybe, for now, only write those bins before some last non-zero-count bin. The only safe way.
Ok(bins)
}
fn handle_bins(mut self: Pin<&mut Self>, bins: BinsDim0<f32>) -> Result<BinsDim0<f32>, Error> {
// TODO could use an interface to iterate over opaque bin items that only expose
// edge and count information with all remaining values opaque.
for (i, (&ts1, &ts2)) in bins.ts1s.iter().zip(&bins.ts2s).enumerate() {
if let Some(last) = self.last_bin_ts2 {
if ts1 != last.ns() {
let mut ret = <BinsDim0<f32> as items_0::Empty>::empty();
let mut bins = bins;
bins.drain_into(&mut ret, 0..i);
self.inp_buf = Some(bins);
let range = NanoRange {
beg: last.ns(),
end: ts1,
};
self.setup_inp_finer(range)?;
return Ok(ret);
}
}
self.last_bin_ts2 = Some(TsNano::from_ns(ts2));
}
Ok(bins)
}
fn setup_inp_finer(mut self: Pin<&mut Self>, range: NanoRange) -> Result<(), Error> {
// Set up range to fill from finer.
self.exp_finer_range = range.clone();
if let Some(bin_len_finer) =
super::grid::find_next_finer_bin_len(self.range.bin_len.to_dt_ms(), &self.bin_len_layers)
{
let range_finer = BinnedRange::from_nano_range(range, bin_len_finer);
let inp_finer = GapFill::new(
self.ch_conf.clone(),
self.transform_query.clone(),
self.sub.clone(),
self.log_level.clone(),
self.ctx.clone(),
self.open_bytes.clone(),
self.series,
range_finer.clone(),
self.do_time_weight,
self.bin_len_layers.clone(),
self.cache_read_provider.clone(),
)?;
let stream = Box::pin(inp_finer);
let do_time_weight = self.do_time_weight;
let range = BinnedRange::from_nano_range(range_finer.full_range(), self.range.bin_len.to_dt_ms());
let stream =
super::basic::TimeBinnedStream::new(stream, netpod::BinnedRangeEnum::Time(range), do_time_weight);
self.inp_finer = Some(Box::pin(stream));
} else {
let do_time_weight = self.do_time_weight;
let one_before_range = true;
let range = BinnedRange::from_nano_range(range, self.range.bin_len.to_dt_ms());
let stream = crate::timebinnedjson::TimeBinnableStream::new(
range.full_range(),
one_before_range,
self.ch_conf.clone(),
self.transform_query.clone(),
self.sub.clone(),
self.log_level.clone(),
self.ctx.clone(),
self.open_bytes.clone(),
);
// let stream: Pin<Box<dyn items_0::transform::TimeBinnableStreamTrait>> = stream;
let stream = Box::pin(stream);
// TODO rename TimeBinnedStream to make it more clear that it is the component which initiates the time binning.
let stream =
super::basic::TimeBinnedStream::new(stream, netpod::BinnedRangeEnum::Time(range), do_time_weight);
let stream = stream.map(|item| match item {
Ok(x) => match x {
StreamItem::DataItem(x) => match x {
RangeCompletableItem::Data(mut x) => {
// TODO need a typed time binner
if let Some(x) = x.as_any_mut().downcast_mut::<BinsDim0<f32>>() {
let y = x.clone();
Ok(StreamItem::DataItem(RangeCompletableItem::Data(y)))
} else {
Err(::err::Error::with_msg_no_trace(
"GapFill expects incoming BinsDim0<f32>",
))
}
}
RangeCompletableItem::RangeComplete => {
Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))
}
},
StreamItem::Log(x) => Ok(StreamItem::Log(x)),
StreamItem::Stats(x) => Ok(StreamItem::Stats(x)),
},
Err(e) => Err(e),
});
// let stream: Pin<
// Box<dyn Stream<Item = Sitemty<Box<dyn items_0::timebin::TimeBinned>>> + Send>,
// > = Box::pin(stream);
self.inp_finer = Some(Box::pin(stream));
}
Ok(())
}
}
impl Stream for GapFill {
@@ -49,6 +228,101 @@ impl Stream for GapFill {
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
loop {
break if let Some(inp_finer) = self.inp_finer.as_mut() {
// TODO
// detect also gaps here: if gap from finer, then error.
// on CacheUsage Use or Rereate:
// write these bins to cache because we did not find them in cache before.
match inp_finer.poll_next_unpin(cx) {
Ready(Some(Ok(x))) => match x {
StreamItem::DataItem(RangeCompletableItem::Data(x)) => {
match self.as_mut().handle_bins_finer(x) {
Ok(x) => Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(x))))),
Err(e) => Ready(Some(Err(::err::Error::from_string(e)))),
}
}
StreamItem::DataItem(RangeCompletableItem::RangeComplete) => {
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))))
}
StreamItem::Log(x) => Ready(Some(Ok(StreamItem::Log(x)))),
StreamItem::Stats(x) => Ready(Some(Ok(StreamItem::Stats(x)))),
},
Ready(Some(Err(e))) => Ready(Some(Err(::err::Error::from_string(e)))),
Ready(None) => {
if let Some(j) = self.last_bin_ts2 {
if j.ns() != self.exp_finer_range.end() {
Ready(Some(Err(::err::Error::from_string(
"finer input didn't deliver to the end",
))))
} else {
self.last_bin_ts2 = None;
self.exp_finer_range = NanoRange { beg: 0, end: 0 };
self.inp_finer = None;
continue;
}
} else {
Ready(Some(Err(::err::Error::from_string("finer input delivered nothing"))))
}
}
Pending => Pending,
}
} else if let Some(x) = self.inp_buf.take() {
match self.as_mut().handle_bins_finer(x) {
Ok(x) => Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(x))))),
Err(e) => Ready(Some(Err(::err::Error::from_string(e)))),
}
} else {
match self.inp.poll_next_unpin(cx) {
Ready(Some(Ok(x))) => match x {
StreamItem::DataItem(RangeCompletableItem::Data(x)) => match self.as_mut().handle_bins(x) {
Ok(x) => Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(x))))),
Err(e) => Ready(Some(Err(::err::Error::from_string(e)))),
},
StreamItem::DataItem(RangeCompletableItem::RangeComplete) => {
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))))
}
StreamItem::Log(x) => Ready(Some(Ok(StreamItem::Log(x)))),
StreamItem::Stats(x) => Ready(Some(Ok(StreamItem::Stats(x)))),
},
Ready(Some(Err(e))) => Ready(Some(Err(::err::Error::from_string(e)))),
Ready(None) => {
// TODO assert that we have emitted up to the requested range.
// If not, request the remaining range from "finer" input.
if let Some(j) = self.last_bin_ts2 {
if j.ns() != self.exp_finer_range.end() {
let range = NanoRange {
beg: j.ns(),
end: self.range.full_range().end(),
};
match self.as_mut().setup_inp_finer(range) {
Ok(()) => {
continue;
}
Err(e) => Ready(Some(Err(::err::Error::from_string(e)))),
}
} else {
// self.last_bin_ts2 = None;
// self.exp_finer_range = NanoRange { beg: 0, end: 0 };
// self.inp_finer = None;
// continue;
Ready(None)
}
} else {
warn!("----- NOTHING IN CACHE, SETUP FULL FROM FINER");
let range = self.range.full_range();
match self.as_mut().setup_inp_finer(range) {
Ok(()) => {
continue;
}
Err(e) => Ready(Some(Err(::err::Error::from_string(e)))),
}
}
}
Pending => Pending,
}
};
}
// When do we detect a gap:
// - when the current item poses a gap to the last.
// - when we see EOS before the requested range is filled.
@@ -59,12 +333,12 @@ impl Stream for GapFill {
// When a gap is detected:
// - buffer the current item, if there is one (can also be EOS).
// - create a new producer of bin:
// - FromFiner(series, bin_len, range)
// - GapFillwith finer range? FromFiner(series, bin_len, range) ?
// - TimeBinnedFromLayers for a bin_len in layers would also go directly into GapFill.
// what does FromFiner bring to the table?
// It does not attempt to read the given bin-len from a cache, because we just did attempt that.
// It still requires that bin-len is cacheable. (NO! it must work with the layering that I passed!)
// Then it finds the next cacheable
// Ready(None)
todo!("poll the already created cached reader, detect and fill in gaps, send off to cache-write")
}
}

View File

@@ -3,11 +3,14 @@ use crate::rangefilter2::RangeFilter2;
use crate::tcprawclient::container_stream_from_bytes_stream;
use crate::tcprawclient::make_sub_query;
use crate::tcprawclient::OpenBoxedBytesStreamsBox;
use crate::timebin::CacheReadProvider;
use crate::timebin::TimeBinnedStream;
use crate::transform::build_merged_event_transform;
use crate::transform::EventsToTimeBinnable;
use err::Error;
use futures_util::future::BoxFuture;
use futures_util::Future;
use futures_util::FutureExt;
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::collect_s::Collectable;
@@ -15,6 +18,7 @@ use items_0::on_sitemty_data;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
use items_0::timebin::TimeBinnable;
use items_0::timebin::TimeBinned;
use items_0::transform::TimeBinnableStreamBox;
use items_0::transform::TimeBinnableStreamTrait;
@@ -30,8 +34,13 @@ use netpod::ChannelTypeConfigGen;
use netpod::DtMs;
use netpod::ReqCtx;
use query::api4::binned::BinnedQuery;
use query::api4::events::EventsSubQuerySettings;
use query::transform::TransformQuery;
use serde_json::Value as JsonValue;
use std::pin::Pin;
use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
use std::time::Duration;
use std::time::Instant;
@@ -40,28 +49,30 @@ fn assert_stream_send<'u, R>(stream: impl 'u + Send + Stream<Item = R>) -> impl
stream
}
async fn timebinnable_stream(
query: BinnedQuery,
// TODO factor out, it is use now also from GapFill.
pub async fn timebinnable_stream(
range: NanoRange,
one_before_range: bool,
ch_conf: ChannelTypeConfigGen,
ctx: &ReqCtx,
transform_query: TransformQuery,
sub: EventsSubQuerySettings,
log_level: String,
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
) -> Result<TimeBinnableStreamBox, Error> {
let subq = make_sub_query(
ch_conf,
range.clone().into(),
one_before_range,
query.transform().clone(),
query.test_do_wasm(),
&query,
query.log_level().into(),
ctx,
transform_query,
sub.clone(),
log_level.clone(),
&ctx,
);
let inmem_bufcap = subq.inmem_bufcap();
let _wasm1 = subq.wasm1().map(ToString::to_string);
let mut tr = build_merged_event_transform(subq.transform())?;
let bytes_streams = open_bytes.open(subq, ctx.clone()).await?;
let bytes_streams = open_bytes.open(subq, ctx.as_ref().clone()).await?;
let mut inps = Vec::new();
for s in bytes_streams {
let s = container_stream_from_bytes_stream::<ChannelEvents>(s, inmem_bufcap.clone(), "TODOdbgdesc".into())?;
@@ -70,7 +81,7 @@ async fn timebinnable_stream(
}
// TODO propagate also the max-buf-len for the first stage event reader.
// TODO use a mixture of count and byte-size as threshold.
let stream = Merger::new(inps, query.merger_out_len_max());
let stream = Merger::new(inps, sub.merger_out_len_max());
let stream = RangeFilter2::new(stream, range, one_before_range);
@@ -221,16 +232,88 @@ async fn timebinnable_stream(
Ok(TimeBinnableStreamBox(stream))
}
pub struct TimeBinnableStream {
make_stream_fut: Option<Pin<Box<dyn Future<Output = Result<TimeBinnableStreamBox, Error>> + Send>>>,
stream: Option<Pin<Box<dyn Stream<Item = Sitemty<Box<dyn TimeBinnable>>> + Send>>>,
}
impl TimeBinnableStream {
pub fn new(
range: NanoRange,
one_before_range: bool,
ch_conf: ChannelTypeConfigGen,
transform_query: TransformQuery,
sub: EventsSubQuerySettings,
log_level: String,
// TODO take by Arc ref
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
) -> Self {
let fut = timebinnable_stream(
range,
one_before_range,
ch_conf,
transform_query,
sub,
log_level,
ctx,
open_bytes,
);
let fut = Box::pin(fut);
Self {
make_stream_fut: Some(fut),
stream: None,
}
}
}
// impl WithTransformProperties + Send
impl Stream for TimeBinnableStream {
type Item = Sitemty<Box<dyn TimeBinnable>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
loop {
break if let Some(fut) = self.make_stream_fut.as_mut() {
match fut.poll_unpin(cx) {
Ready(x) => match x {
Ok(x) => {
self.make_stream_fut = None;
self.stream = Some(Box::pin(x));
continue;
}
Err(e) => Ready(Some(Err(e))),
},
Pending => Pending,
}
} else if let Some(fut) = self.stream.as_mut() {
match fut.poll_next_unpin(cx) {
Ready(Some(x)) => Ready(Some(x)),
Ready(None) => {
self.stream = None;
Ready(None)
}
Pending => Pending,
}
} else {
Ready(None)
};
}
}
}
async fn timebinned_stream(
query: BinnedQuery,
binned_range: BinnedRangeEnum,
ch_conf: ChannelTypeConfigGen,
ctx: &ReqCtx,
open_bytes: OpenBoxedBytesStreamsBox,
cache_read_provider: Option<Arc<dyn CacheReadProvider>>,
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<Box<dyn TimeBinned>>> + Send>>, Error> {
use netpod::query::CacheUsage;
match query.cache_usage() {
CacheUsage::Use | CacheUsage::Recreate => {
match (query.cache_usage(), cache_read_provider) {
(CacheUsage::Use | CacheUsage::Recreate, Some(cache_read_provider)) => {
let series = if let Some(x) = query.channel().series() {
x
} else {
@@ -256,8 +339,13 @@ async fn timebinned_stream(
// DtMs::from_ms_u64(1000 * 10),
]
};
let cache_read_provider = err::todoval();
let stream = crate::timebin::TimeBinnedFromLayers::new(
ch_conf,
query.transform().clone(),
EventsSubQuerySettings::from(&query),
query.log_level().into(),
Arc::new(ctx.clone()),
open_bytes.clone(),
series,
binned_range.binned_range_time(),
do_time_weight,
@@ -273,13 +361,23 @@ async fn timebinned_stream(
let stream: Pin<Box<dyn Stream<Item = Sitemty<Box<dyn TimeBinned>>> + Send>> = Box::pin(stream);
Ok(stream)
}
CacheUsage::Ignore => {
_ => {
let range = binned_range.binned_range_time().to_nano_range();
let do_time_weight = true;
let one_before_range = true;
let stream = timebinnable_stream(query.clone(), range, one_before_range, ch_conf, ctx, open_bytes).await?;
let stream = timebinnable_stream(
range,
one_before_range,
ch_conf,
query.transform().clone(),
(&query).into(),
query.log_level().into(),
Arc::new(ctx.clone()),
open_bytes,
)
.await?;
let stream: Pin<Box<dyn TimeBinnableStreamTrait>> = stream.0;
let stream = Box::pin(stream);
// TODO rename TimeBinnedStream to make it more clear that it is the component which initiates the time binning.
@@ -309,13 +407,22 @@ pub async fn timebinned_json(
ch_conf: ChannelTypeConfigGen,
ctx: &ReqCtx,
open_bytes: OpenBoxedBytesStreamsBox,
cache_read_provider: Option<Arc<dyn CacheReadProvider>>,
) -> Result<JsonValue, Error> {
let deadline = Instant::now() + query.timeout_content().unwrap_or(Duration::from_millis(5000));
let binned_range = BinnedRangeEnum::covering_range(query.range().clone(), query.bin_count())?;
// TODO derive better values, from query
let collect_max = 10000;
let bytes_max = 100 * collect_max;
let stream = timebinned_stream(query.clone(), binned_range.clone(), ch_conf, ctx, open_bytes).await?;
let stream = timebinned_stream(
query.clone(),
binned_range.clone(),
ch_conf,
ctx,
open_bytes,
cache_read_provider,
)
.await?;
let stream = timebinned_to_collectable(stream);
let collected = Collect::new(stream, deadline, collect_max, bytes_max, None, Some(binned_range));
let collected: BoxFuture<_> = Box::pin(collected);