WIP typechecks

This commit is contained in:
Dominik Werder
2024-10-23 19:06:36 +02:00
parent ec425198f0
commit 6429ef5631
20 changed files with 622 additions and 213 deletions

View File

@@ -0,0 +1,121 @@
use crate::tcprawclient::OpenBoxedBytesStreamsBox;
use crate::timebin::cached::reader::EventsReadProvider;
use crate::timebin::cached::reader::EventsReading;
use crate::timebin::CacheReadProvider;
use futures_util::Future;
use futures_util::FutureExt;
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::streamitem::Sitemty;
use items_2::channelevents::ChannelEvents;
use netpod::ReqCtx;
use query::api4::events::EventsSubQuery;
use std::pin::Pin;
use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
enum StreamState {
Opening(
Pin<
Box<
dyn Future<Output = Result<Pin<Box<dyn Stream<Item = Sitemty<ChannelEvents>> + Send>>, ::err::Error>>
+ Send,
>,
>,
),
Reading(Pin<Box<dyn Stream<Item = Sitemty<ChannelEvents>> + Send>>),
}
struct InnerStream {
state: StreamState,
}
impl Stream for InnerStream {
type Item = Sitemty<ChannelEvents>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
loop {
break match &mut self.state {
StreamState::Opening(fut) => match fut.poll_unpin(cx) {
Ready(Ok(x)) => {
self.state = StreamState::Reading(x);
continue;
}
Ready(Err(e)) => Ready(Some(Err(e))),
Pending => Pending,
},
StreamState::Reading(fut) => match fut.poll_next_unpin(cx) {
Ready(Some(x)) => Ready(Some(x)),
Ready(None) => Ready(None),
Pending => Pending,
},
};
}
}
}
pub struct SfDatabufferEventReadProvider {
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
}
impl SfDatabufferEventReadProvider {
pub fn new(ctx: Arc<ReqCtx>, open_bytes: OpenBoxedBytesStreamsBox) -> Self {
Self { ctx, open_bytes }
}
}
impl EventsReadProvider for SfDatabufferEventReadProvider {
fn read(&self, evq: EventsSubQuery) -> EventsReading {
let range = match evq.range() {
netpod::range::evrange::SeriesRange::TimeRange(x) => x.clone(),
netpod::range::evrange::SeriesRange::PulseRange(_) => panic!("not available for pulse range"),
};
let ctx = self.ctx.clone();
let open_bytes = self.open_bytes.clone();
let state = StreamState::Opening(Box::pin(async move {
let ret = crate::timebinnedjson::timebinnable_stream_sf_databuffer_channelevents(
range,
evq.need_one_before_range(),
evq.ch_conf().clone(),
evq.transform().clone(),
evq.settings().clone(),
evq.log_level().into(),
ctx,
open_bytes,
)
.await;
ret.map(|x| Box::pin(x) as _)
}));
let stream = InnerStream { state };
EventsReading::new(Box::pin(stream))
}
}
pub struct DummyCacheReadProvider {}
impl DummyCacheReadProvider {
pub fn new() -> Self {
Self {}
}
}
impl CacheReadProvider for DummyCacheReadProvider {
fn read(
&self,
series: u64,
bin_len: netpod::DtMs,
msp: u64,
offs: std::ops::Range<u32>,
) -> crate::timebin::cached::reader::CacheReading {
let stream = futures_util::future::ready(Ok(None));
crate::timebin::cached::reader::CacheReading::new(Box::pin(stream))
}
fn write(&self, series: u64, bins: items_0::timebin::BinsBoxed) -> crate::timebin::cached::reader::CacheWriting {
let fut = futures_util::future::ready(Ok(()));
crate::timebin::cached::reader::CacheWriting::new(Box::pin(fut))
}
}

View File

@@ -2,6 +2,7 @@ pub mod boxed;
pub mod cbor_stream;
pub mod collect;
pub mod dtflags;
pub mod eventsplainreader;
pub mod filechunkread;
pub mod firsterr;
pub mod framed_bytes;

View File

@@ -9,7 +9,6 @@ use items_0::timebin::BinsBoxed;
use items_2::channelevents::ChannelEvents;
use netpod::log::*;
use netpod::BinnedRange;
use netpod::ChConf;
use netpod::DtMs;
use netpod::TsNano;
use query::api4::events::EventsSubQuery;
@@ -50,23 +49,23 @@ impl Stream for EventsReading {
}
pub trait EventsReadProvider: Send + Sync {
fn read(&self, evq: EventsSubQuery, chconf: ChConf) -> EventsReading;
fn read(&self, evq: EventsSubQuery) -> EventsReading;
}
pub struct CacheReading {
fut: Pin<Box<dyn Future<Output = Result<BinsBoxed, streams::timebin::cached::reader::Error>> + Send>>,
fut: Pin<Box<dyn Future<Output = Result<Option<BinsBoxed>, streams::timebin::cached::reader::Error>> + Send>>,
}
impl CacheReading {
pub fn new(
fut: Pin<Box<dyn Future<Output = Result<BinsBoxed, streams::timebin::cached::reader::Error>> + Send>>,
fut: Pin<Box<dyn Future<Output = Result<Option<BinsBoxed>, streams::timebin::cached::reader::Error>> + Send>>,
) -> Self {
Self { fut }
}
}
impl Future for CacheReading {
type Output = Result<BinsBoxed, streams::timebin::cached::reader::Error>;
type Output = Result<Option<BinsBoxed>, streams::timebin::cached::reader::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
self.fut.poll_unpin(cx)
@@ -111,7 +110,7 @@ pub struct CachedReader {
ts1next: TsNano,
bin_len: DtMs,
cache_read_provider: Arc<dyn CacheReadProvider>,
reading: Option<Pin<Box<dyn Future<Output = Result<BinsBoxed, Error>> + Send>>>,
reading: Option<Pin<Box<dyn Future<Output = Result<Option<BinsBoxed>, Error>> + Send>>>,
}
impl CachedReader {
@@ -149,7 +148,7 @@ impl Stream for CachedReader {
Ready(x) => {
self.reading = None;
match x {
Ok(bins) => {
Ok(Some(bins)) => {
trace_emit!(
"- - - - - - - - - - - - emit cached bins {} bin_len {}",
bins.len(),
@@ -157,6 +156,9 @@ impl Stream for CachedReader {
);
Ready(Some(Ok(bins)))
}
Ok(None) => {
continue;
}
Err(e) => Ready(Some(Err(e))),
}
}

View File

@@ -32,14 +32,13 @@ impl BinnedFromEvents {
pub fn new(
range: BinnedRange<TsNano>,
evq: EventsSubQuery,
chconf: ChConf,
do_time_weight: bool,
read_provider: Arc<dyn EventsReadProvider>,
) -> Result<Self, Error> {
if !evq.range().is_time() {
panic!();
}
let stream = read_provider.read(evq, chconf);
let stream = read_provider.read(evq);
// let stream = stream.map(|x| {
// let x = items_0::try_map_sitemty_data!(x, |x| match x {
// ChannelEvents::Events(x) => {

View File

@@ -56,7 +56,6 @@ pub struct TimeBinnedFromLayers {
sub: EventsSubQuerySettings,
log_level: String,
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
inp: BoxedInput,
}
@@ -72,8 +71,6 @@ impl TimeBinnedFromLayers {
sub: EventsSubQuerySettings,
log_level: String,
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
series: u64,
range: BinnedRange<TsNano>,
do_time_weight: bool,
bin_len_layers: Vec<DtMs>,
@@ -83,7 +80,7 @@ impl TimeBinnedFromLayers {
debug!(
"{}::new {:?} {:?} {:?}",
Self::type_name(),
series,
ch_conf.series(),
range,
bin_len_layers
);
@@ -98,7 +95,6 @@ impl TimeBinnedFromLayers {
sub.clone(),
log_level.clone(),
ctx.clone(),
series,
range,
do_time_weight,
bin_len_layers,
@@ -112,7 +108,6 @@ impl TimeBinnedFromLayers {
sub,
log_level,
ctx,
open_bytes,
inp: Box::pin(inp),
};
Ok(ret)
@@ -137,7 +132,6 @@ impl TimeBinnedFromLayers {
sub.clone(),
log_level.clone(),
ctx.clone(),
series,
range_finer.clone(),
do_time_weight,
bin_len_layers,
@@ -152,7 +146,6 @@ impl TimeBinnedFromLayers {
sub,
log_level,
ctx,
open_bytes,
inp: Box::pin(inp),
};
Ok(ret)
@@ -168,30 +161,18 @@ impl TimeBinnedFromLayers {
transform_query.clone(),
);
let evq = EventsSubQuery::from_parts(select, sub.clone(), ctx.reqid().into(), log_level.clone());
match &ch_conf {
ChannelTypeConfigGen::Scylla(chconf) => {
let inp = BinnedFromEvents::new(
range,
evq,
chconf.clone(),
do_time_weight,
events_read_provider,
)?;
let ret = Self {
ch_conf,
cache_usage,
transform_query,
sub,
log_level,
ctx,
open_bytes,
inp: Box::pin(inp),
};
debug!("{}::new setup from events", Self::type_name());
Ok(ret)
}
ChannelTypeConfigGen::SfDatabuffer(_) => return Err(Error::SfDatabufferNotSupported),
}
let inp = BinnedFromEvents::new(range, evq, do_time_weight, events_read_provider)?;
let ret = Self {
ch_conf,
cache_usage,
transform_query,
sub,
log_level,
ctx,
inp: Box::pin(inp),
};
debug!("{}::new setup from events", Self::type_name());
Ok(ret)
}
}
}

View File

@@ -66,7 +66,6 @@ pub struct GapFill {
sub: EventsSubQuerySettings,
log_level: String,
ctx: Arc<ReqCtx>,
series: u64,
range: BinnedRange<TsNano>,
do_time_weight: bool,
bin_len_layers: Vec<DtMs>,
@@ -97,7 +96,6 @@ impl GapFill {
sub: EventsSubQuerySettings,
log_level: String,
ctx: Arc<ReqCtx>,
series: u64,
range: BinnedRange<TsNano>,
do_time_weight: bool,
bin_len_layers: Vec<DtMs>,
@@ -107,6 +105,7 @@ impl GapFill {
let dbgname = format!("{}--[{}]", dbgname_parent, range);
debug_init!("new dbgname {}", dbgname);
let inp = if cache_usage.is_cache_read() {
let series = ch_conf.series().expect("series id for cache read");
let stream = super::cached::reader::CachedReader::new(series, range.clone(), cache_read_provider.clone())?
.map(|x| match x {
Ok(x) => Ok(StreamItem::DataItem(RangeCompletableItem::Data(x))),
@@ -125,7 +124,6 @@ impl GapFill {
sub,
log_level,
ctx,
series,
range,
do_time_weight,
bin_len_layers,
@@ -257,7 +255,6 @@ impl GapFill {
self.sub.clone(),
self.log_level.clone(),
self.ctx.clone(),
self.series,
range_finer_one_before_bin,
self.do_time_weight,
self.bin_len_layers.clone(),
@@ -288,26 +285,16 @@ impl GapFill {
self.ctx.reqid().into(),
self.log_level.clone(),
);
match &self.ch_conf {
ChannelTypeConfigGen::Scylla(chconf) => {
let range = BinnedRange::from_nano_range(range.clone(), self.range.bin_len.to_dt_ms());
let inp = BinnedFromEvents::new(
range,
evq,
chconf.clone(),
self.do_time_weight,
self.events_read_provider.clone(),
)?;
self.inp_finer = Some(Box::pin(inp));
}
ChannelTypeConfigGen::SfDatabuffer(_) => return Err(Error::SfDatabufferNotSupported),
}
let range = BinnedRange::from_nano_range(range.clone(), self.range.bin_len.to_dt_ms());
let inp = BinnedFromEvents::new(range, evq, self.do_time_weight, self.events_read_provider.clone())?;
self.inp_finer = Some(Box::pin(inp));
}
Ok(())
}
fn cache_write(mut self: Pin<&mut Self>, bins: BinsBoxed) -> Result<(), Error> {
self.cache_writing = Some(self.cache_read_provider.write(self.series, bins));
let series = ::err::todoval();
self.cache_writing = Some(self.cache_read_provider.write(series, bins));
Ok(())
}

View File

@@ -53,7 +53,7 @@ fn assert_stream_send<'u, R>(stream: impl 'u + Send + Stream<Item = R>) -> impl
stream
}
pub async fn timebinnable_stream(
pub async fn timebinnable_stream_sf_databuffer_box_events(
range: NanoRange,
one_before_range: bool,
ch_conf: ChannelTypeConfigGen,
@@ -62,7 +62,7 @@ pub async fn timebinnable_stream(
log_level: String,
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
) -> Result<TimeBinnableStreamBox, Error> {
) -> Result<impl Stream<Item = Sitemty<Box<dyn Events>>>, Error> {
let subq = make_sub_query(
ch_conf,
range.clone().into(),
@@ -222,6 +222,32 @@ pub async fn timebinnable_stream(
let stream = stream.map(|x| x);
Box::pin(stream)
};
Ok(stream)
}
async fn timebinnable_stream_sf_databuffer_binnable_box(
range: NanoRange,
one_before_range: bool,
ch_conf: ChannelTypeConfigGen,
transform_query: TransformQuery,
sub: EventsSubQuerySettings,
log_level: String,
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
) -> Result<TimeBinnableStreamBox, Error> {
let stream = timebinnable_stream_sf_databuffer_box_events(
range,
one_before_range,
ch_conf,
transform_query,
sub,
log_level,
ctx,
open_bytes,
)
.await?;
// let stream = stream.map(|x| x);
// let stream = stream.map(|x| ChannelEvents::Events(x));
// let stream = stream.map(move |k| {
// on_sitemty_data!(k, |k| {
@@ -236,6 +262,39 @@ pub async fn timebinnable_stream(
Ok(TimeBinnableStreamBox(stream))
}
pub async fn timebinnable_stream_sf_databuffer_channelevents(
range: NanoRange,
one_before_range: bool,
ch_conf: ChannelTypeConfigGen,
transform_query: TransformQuery,
sub: EventsSubQuerySettings,
log_level: String,
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
) -> Result<impl Stream<Item = Sitemty<ChannelEvents>>, Error> {
let stream = timebinnable_stream_sf_databuffer_box_events(
range,
one_before_range,
ch_conf,
transform_query,
sub,
log_level,
ctx,
open_bytes,
)
.await?;
// let stream = stream.map(|x| x);
let stream = stream.map(move |k| {
on_sitemty_data!(k, |k| {
// let k: Box<dyn Collectable> = Box::new(k);
Ok(StreamItem::DataItem(RangeCompletableItem::Data(ChannelEvents::Events(
k,
))))
})
});
Ok(stream)
}
pub struct TimeBinnableStream {
make_stream_fut: Option<Pin<Box<dyn Future<Output = Result<TimeBinnableStreamBox, Error>> + Send>>>,
stream: Option<Pin<Box<dyn Stream<Item = Sitemty<Box<dyn TimeBinnable>>> + Send>>>,
@@ -253,7 +312,7 @@ impl TimeBinnableStream {
ctx: Arc<ReqCtx>,
open_bytes: OpenBoxedBytesStreamsBox,
) -> Self {
let fut = timebinnable_stream(
let fut = timebinnable_stream_sf_databuffer_binnable_box(
range,
one_before_range,
ch_conf,
@@ -313,23 +372,13 @@ async fn timebinned_stream(
ch_conf: ChannelTypeConfigGen,
ctx: &ReqCtx,
open_bytes: OpenBoxedBytesStreamsBox,
cache_read_provider: Option<Arc<dyn CacheReadProvider>>,
events_read_provider: Option<Arc<dyn EventsReadProvider>>,
cache_read_provider: Arc<dyn CacheReadProvider>,
events_read_provider: Arc<dyn EventsReadProvider>,
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<Box<dyn TimeBinned>>> + Send>>, Error> {
use netpod::query::CacheUsage;
let cache_usage = query.cache_usage().unwrap_or(CacheUsage::V0NoCache);
match (
ch_conf.series(),
cache_usage.clone(),
cache_read_provider,
events_read_provider,
) {
(
Some(series),
CacheUsage::Use | CacheUsage::Recreate | CacheUsage::Ignore,
Some(cache_read_provider),
Some(events_read_provider),
) => {
match cache_usage.clone() {
CacheUsage::Use | CacheUsage::Recreate | CacheUsage::Ignore => {
debug!(
"timebinned_stream caching {:?} subgrids {:?}",
query,
@@ -351,8 +400,6 @@ async fn timebinned_stream(
EventsSubQuerySettings::from(&query),
query.log_level().into(),
Arc::new(ctx.clone()),
open_bytes.clone(),
series,
binned_range.binned_range_time(),
do_time_weight,
bin_len_layers,
@@ -372,7 +419,7 @@ async fn timebinned_stream(
let range = binned_range.binned_range_time().to_nano_range();
let do_time_weight = true;
let one_before_range = true;
let stream = timebinnable_stream(
let stream = timebinnable_stream_sf_databuffer_binnable_box(
range,
one_before_range,
ch_conf,
@@ -412,8 +459,8 @@ pub async fn timebinned_json(
ch_conf: ChannelTypeConfigGen,
ctx: &ReqCtx,
open_bytes: OpenBoxedBytesStreamsBox,
cache_read_provider: Option<Arc<dyn CacheReadProvider>>,
events_read_provider: Option<Arc<dyn EventsReadProvider>>,
cache_read_provider: Arc<dyn CacheReadProvider>,
events_read_provider: Arc<dyn EventsReadProvider>,
) -> Result<CollectResult<JsonValue>, Error> {
let deadline = Instant::now()
+ query
@@ -486,8 +533,8 @@ pub async fn timebinned_json_framed(
ch_conf: ChannelTypeConfigGen,
ctx: &ReqCtx,
open_bytes: OpenBoxedBytesStreamsBox,
cache_read_provider: Option<Arc<dyn CacheReadProvider>>,
events_read_provider: Option<Arc<dyn EventsReadProvider>>,
cache_read_provider: Arc<dyn CacheReadProvider>,
events_read_provider: Arc<dyn EventsReadProvider>,
) -> Result<JsonStream, Error> {
trace!("timebinned_json_framed");
let binned_range = query.covering_range()?;