From 29bee7c9e4ff6e454a5b13944d1194a1cd2dab41 Mon Sep 17 00:00:00 2001 From: Dominik Werder Date: Thu, 7 Nov 2024 21:11:24 +0100 Subject: [PATCH] Factored into separate crate --- .gitignore | 2 + Cargo.toml | 37 + src/accounting.rs | 41 + src/binning.rs | 11 + src/binning/aggregator.rs | 211 +++ src/binning/binnedvaluetype.rs | 8 + src/binning/container_bins.rs | 653 ++++++++++ src/binning/container_events.rs | 276 ++++ src/binning/test.rs | 15 + src/binning/test/events00.rs | 488 +++++++ src/binning/timeweight.rs | 16 + src/binning/timeweight/timeweight_bins.rs | 5 + src/binning/timeweight/timeweight_bins_dyn.rs | 27 + src/binning/timeweight/timeweight_events.rs | 645 ++++++++++ .../timeweight/timeweight_events_dyn.rs | 276 ++++ src/binning/valuetype.rs | 85 ++ src/binsdim0.rs | 905 +++++++++++++ src/binsxbindim0.rs | 523 ++++++++ src/channelevents.rs | 1139 +++++++++++++++++ src/empty.rs | 58 + src/eventfull.rs | 431 +++++++ src/eventsdim0.rs | 869 +++++++++++++ src/eventsdim0enum.rs | 469 +++++++ src/eventsdim1.rs | 691 ++++++++++ src/eventsxbindim0.rs | 779 +++++++++++ src/framable.rs | 221 ++++ src/frame.rs | 433 +++++++ src/inmem.rs | 34 + src/items_2.rs | 178 +++ src/merger.rs | 491 +++++++ src/streams.rs | 290 +++++ src/test.rs | 470 +++++++ src/test/eventsdim0.rs | 24 + src/test/eventsdim1.rs | 0 src/testgen.rs | 25 + src/transform.rs | 84 ++ 36 files changed, 10910 insertions(+) create mode 100644 .gitignore create mode 100644 Cargo.toml create mode 100644 src/accounting.rs create mode 100644 src/binning.rs create mode 100644 src/binning/aggregator.rs create mode 100644 src/binning/binnedvaluetype.rs create mode 100644 src/binning/container_bins.rs create mode 100644 src/binning/container_events.rs create mode 100644 src/binning/test.rs create mode 100644 src/binning/test/events00.rs create mode 100644 src/binning/timeweight.rs create mode 100644 src/binning/timeweight/timeweight_bins.rs create mode 100644 src/binning/timeweight/timeweight_bins_dyn.rs create mode 100644 src/binning/timeweight/timeweight_events.rs create mode 100644 src/binning/timeweight/timeweight_events_dyn.rs create mode 100644 src/binning/valuetype.rs create mode 100644 src/binsdim0.rs create mode 100644 src/binsxbindim0.rs create mode 100644 src/channelevents.rs create mode 100644 src/empty.rs create mode 100644 src/eventfull.rs create mode 100644 src/eventsdim0.rs create mode 100644 src/eventsdim0enum.rs create mode 100644 src/eventsdim1.rs create mode 100644 src/eventsxbindim0.rs create mode 100644 src/framable.rs create mode 100644 src/frame.rs create mode 100644 src/inmem.rs create mode 100644 src/items_2.rs create mode 100644 src/merger.rs create mode 100644 src/streams.rs create mode 100644 src/test.rs create mode 100644 src/test/eventsdim0.rs create mode 100644 src/test/eventsdim1.rs create mode 100644 src/testgen.rs create mode 100644 src/transform.rs diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1b72444 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/Cargo.lock +/target diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..592053a --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "daqbuf-items-2" +version = "0.0.3" +authors = ["Dominik Werder "] +edition = "2021" + +[lib] +path = "src/items_2.rs" +doctest = false + +[dependencies] +serde = { version = "1", features = ["derive"] } +serde_json = "1" +ciborium = "0.2.1" +rmp-serde = "1.1.1" +postcard = { version = "1.0.0", features = ["use-std"] } +erased-serde = "0.4" +typetag = "0.2.14" +bytes = "1.8" +num-traits = "0.2.15" +chrono = { version = "0.4.19", features = ["serde"] } +crc32fast = "1.3.2" +futures-util = "0.3.24" +humantime-serde = "1.1.1" +thiserror = "=0.0.1" +daqbuf-err = { path = "../daqbuf-err" } +items_0 = { path = "../daqbuf-items-0", package = "daqbuf-items-0" } +items_proc = { path = "../daqbuf-items-proc", package = "daqbuf-items-proc" } +netpod = { path = "../daqbuf-netpod", package = "daqbuf-netpod" } +parse = { path = "../daqbuf-parse", package = "daqbuf-parse" } +bitshuffle = { path = "../daqbuf-bitshuffle", package = "daqbuf-bitshuffle" } + +[patch.crates-io] +thiserror = { git = "https://github.com/dominikwerder/thiserror.git", branch = "cstm" } + +[features] +heavy = [] diff --git a/src/accounting.rs b/src/accounting.rs new file mode 100644 index 0000000..b3009f6 --- /dev/null +++ b/src/accounting.rs @@ -0,0 +1,41 @@ +use items_0::Empty; +use items_0::Extendable; +use items_0::WithLen; +use serde::Deserialize; +use serde::Serialize; +use std::collections::VecDeque; + +#[derive(Debug, Serialize, Deserialize)] +pub struct AccountingEvents { + pub tss: VecDeque, + pub count: VecDeque, + pub bytes: VecDeque, +} + +impl Empty for AccountingEvents { + fn empty() -> Self { + Self { + tss: VecDeque::new(), + count: VecDeque::new(), + bytes: VecDeque::new(), + } + } +} + +impl WithLen for AccountingEvents { + fn len(&self) -> usize { + self.tss.len() + } +} + +impl Extendable for AccountingEvents { + fn extend_from(&mut self, src: &mut Self) { + use core::mem::replace; + let v = replace(&mut src.tss, VecDeque::new()); + self.tss.extend(v.into_iter()); + let v = replace(&mut src.count, VecDeque::new()); + self.count.extend(v.into_iter()); + let v = replace(&mut src.bytes, VecDeque::new()); + self.bytes.extend(v.into_iter()); + } +} diff --git a/src/binning.rs b/src/binning.rs new file mode 100644 index 0000000..f480ad1 --- /dev/null +++ b/src/binning.rs @@ -0,0 +1,11 @@ +pub mod aggregator; +pub mod binnedvaluetype; +pub mod container_bins; +pub mod container_events; +pub mod timeweight; +pub mod valuetype; + +#[cfg(test)] +mod test; + +use super::binning as ___; diff --git a/src/binning/aggregator.rs b/src/binning/aggregator.rs new file mode 100644 index 0000000..2351718 --- /dev/null +++ b/src/binning/aggregator.rs @@ -0,0 +1,211 @@ +use super::container_events::EventValueType; +use core::fmt; +use netpod::log::*; +use netpod::DtNano; +use netpod::EnumVariant; +use serde::Deserialize; +use serde::Serialize; + +#[allow(unused)] +macro_rules! trace_event { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_result { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) } + +pub trait AggTimeWeightOutputAvg: fmt::Debug + Clone + Send + Serialize + for<'a> Deserialize<'a> {} + +impl AggTimeWeightOutputAvg for u8 {} +impl AggTimeWeightOutputAvg for u16 {} +impl AggTimeWeightOutputAvg for u32 {} +impl AggTimeWeightOutputAvg for u64 {} +impl AggTimeWeightOutputAvg for i8 {} +impl AggTimeWeightOutputAvg for i16 {} +impl AggTimeWeightOutputAvg for i32 {} +impl AggTimeWeightOutputAvg for i64 {} +impl AggTimeWeightOutputAvg for f32 {} +impl AggTimeWeightOutputAvg for f64 {} +impl AggTimeWeightOutputAvg for EnumVariant {} +impl AggTimeWeightOutputAvg for String {} +impl AggTimeWeightOutputAvg for bool {} + +pub trait AggregatorTimeWeight: fmt::Debug + Send +where + EVT: EventValueType, +{ + fn new() -> Self; + fn ingest(&mut self, dt: DtNano, bl: DtNano, val: EVT); + fn reset_for_new_bin(&mut self); + fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> EVT::AggTimeWeightOutputAvg; +} + +#[derive(Debug)] +pub struct AggregatorNumeric { + sum: f64, +} + +trait AggWithF64: EventValueType { + fn as_f64(&self) -> f64; +} + +impl AggWithF64 for f64 { + fn as_f64(&self) -> f64 { + *self + } +} + +impl AggregatorTimeWeight for AggregatorNumeric +where + EVT: AggWithF64, +{ + fn new() -> Self { + Self { sum: 0. } + } + + fn ingest(&mut self, dt: DtNano, bl: DtNano, val: EVT) { + let f = dt.ns() as f64 / bl.ns() as f64; + trace_event!("INGEST {} {:?}", f, val); + self.sum += f * val.as_f64(); + } + + fn reset_for_new_bin(&mut self) { + self.sum = 0.; + } + + fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> EVT::AggTimeWeightOutputAvg { + let sum = self.sum.clone(); + trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction); + self.sum = 0.; + sum / filled_width_fraction as f64 + } +} + +impl AggregatorTimeWeight for AggregatorNumeric { + fn new() -> Self { + Self { sum: 0. } + } + + fn ingest(&mut self, dt: DtNano, bl: DtNano, val: f32) { + let f = dt.ns() as f64 / bl.ns() as f64; + trace_event!("INGEST {} {}", f, val); + self.sum += f * val as f64; + } + + fn reset_for_new_bin(&mut self) { + self.sum = 0.; + } + + fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f32 { + let sum = self.sum.clone() as f32; + trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction); + self.sum = 0.; + sum / filled_width_fraction + } +} + +macro_rules! impl_agg_tw_for_agg_num { + ($evt:ty) => { + impl AggregatorTimeWeight<$evt> for AggregatorNumeric { + fn new() -> Self { + Self { sum: 0. } + } + + fn ingest(&mut self, dt: DtNano, bl: DtNano, val: $evt) { + let f = dt.ns() as f64 / bl.ns() as f64; + trace_event!("INGEST {} {}", f, val); + self.sum += f * val as f64; + } + + fn reset_for_new_bin(&mut self) { + self.sum = 0.; + } + + fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f64 { + let sum = self.sum.clone(); + trace_result!( + "result_and_reset_for_new_bin sum {} {}", + sum, + filled_width_fraction + ); + self.sum = 0.; + sum / filled_width_fraction as f64 + } + } + }; +} + +impl_agg_tw_for_agg_num!(u8); +impl_agg_tw_for_agg_num!(u16); +impl_agg_tw_for_agg_num!(u32); +impl_agg_tw_for_agg_num!(i8); +impl_agg_tw_for_agg_num!(i16); +impl_agg_tw_for_agg_num!(i32); +impl_agg_tw_for_agg_num!(i64); + +impl AggregatorTimeWeight for AggregatorNumeric { + fn new() -> Self { + Self { sum: 0. } + } + + fn ingest(&mut self, dt: DtNano, bl: DtNano, val: u64) { + let f = dt.ns() as f64 / bl.ns() as f64; + trace_event!("INGEST {} {}", f, val); + self.sum += f * val as f64; + } + + fn reset_for_new_bin(&mut self) { + self.sum = 0.; + } + + fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f64 { + let sum = self.sum.clone(); + trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction); + self.sum = 0.; + sum / filled_width_fraction as f64 + } +} + +impl AggregatorTimeWeight for AggregatorNumeric { + fn new() -> Self { + Self { sum: 0. } + } + + fn ingest(&mut self, dt: DtNano, bl: DtNano, val: bool) { + let f = dt.ns() as f64 / bl.ns() as f64; + trace_event!("INGEST {} {}", f, val); + self.sum += f * val as u8 as f64; + } + + fn reset_for_new_bin(&mut self) { + self.sum = 0.; + } + + fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f64 { + let sum = self.sum.clone(); + trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction); + self.sum = 0.; + sum / filled_width_fraction as f64 + } +} + +impl AggregatorTimeWeight for AggregatorNumeric { + fn new() -> Self { + Self { sum: 0. } + } + + fn ingest(&mut self, dt: DtNano, bl: DtNano, val: String) { + let f = dt.ns() as f64 / bl.ns() as f64; + trace_event!("INGEST {} {}", f, val); + self.sum += f * val.len() as f64; + } + + fn reset_for_new_bin(&mut self) { + self.sum = 0.; + } + + fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f64 { + let sum = self.sum.clone(); + trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction); + self.sum = 0.; + sum / filled_width_fraction as f64 + } +} diff --git a/src/binning/binnedvaluetype.rs b/src/binning/binnedvaluetype.rs new file mode 100644 index 0000000..306fed4 --- /dev/null +++ b/src/binning/binnedvaluetype.rs @@ -0,0 +1,8 @@ +pub trait BinnedValueType {} + +pub struct BinnedNumericValue { + avg: f32, + _t: Option, +} + +impl BinnedValueType for BinnedNumericValue {} diff --git a/src/binning/container_bins.rs b/src/binning/container_bins.rs new file mode 100644 index 0000000..d2ed4d3 --- /dev/null +++ b/src/binning/container_bins.rs @@ -0,0 +1,653 @@ +use super::aggregator::AggregatorNumeric; +use super::aggregator::AggregatorTimeWeight; +use super::container_events::EventValueType; +use super::___; +use crate::ts_offs_from_abs; +use crate::ts_offs_from_abs_with_anchor; +use core::fmt; +use daqbuf_err as err; +use err::thiserror; +use err::ThisError; +use items_0::collect_s::CollectableDyn; +use items_0::collect_s::CollectedDyn; +use items_0::collect_s::ToJsonResult; +use items_0::timebin::BinningggContainerBinsDyn; +use items_0::timebin::BinsBoxed; +use items_0::vecpreview::VecPreview; +use items_0::AsAnyMut; +use items_0::AsAnyRef; +use items_0::TypeName; +use items_0::WithLen; +use netpod::log::*; +use netpod::EnumVariant; +use netpod::TsNano; +use serde::Deserialize; +use serde::Serialize; +use std::any; +use std::collections::VecDeque; +use std::mem; + +#[allow(unused)] +macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } + +#[derive(Debug, ThisError)] +#[cstm(name = "ContainerBins")] +pub enum ContainerBinsError { + Unordered, +} + +pub trait BinValueType: fmt::Debug + Clone + PartialOrd { + // type Container: Container; + // type AggregatorTimeWeight: AggregatorTimeWeight; + // type AggTimeWeightOutputAvg; + + // fn identity_sum() -> Self; + // fn add_weighted(&self, add: &Self, f: f32) -> Self; +} + +#[derive(Debug, Clone)] +pub struct BinSingle { + pub ts1: TsNano, + pub ts2: TsNano, + pub cnt: u64, + pub min: EVT, + pub max: EVT, + pub avg: f32, + pub lst: EVT, + pub fnl: bool, +} + +#[derive(Debug, Clone)] +pub struct BinRef<'a, EVT> +where + EVT: EventValueType, +{ + pub ts1: TsNano, + pub ts2: TsNano, + pub cnt: u64, + pub min: &'a EVT, + pub max: &'a EVT, + pub avg: &'a EVT::AggTimeWeightOutputAvg, + pub lst: &'a EVT, + pub fnl: bool, +} + +pub struct IterDebug<'a, EVT> +where + EVT: EventValueType, +{ + bins: &'a ContainerBins, + ix: usize, + len: usize, +} + +impl<'a, EVT> Iterator for IterDebug<'a, EVT> +where + EVT: EventValueType, +{ + type Item = BinRef<'a, EVT>; + + fn next(&mut self) -> Option { + if self.ix < self.bins.len() && self.ix < self.len { + let b = &self.bins; + let i = self.ix; + self.ix += 1; + let ret = BinRef { + ts1: b.ts1s[i], + ts2: b.ts2s[i], + cnt: b.cnts[i], + min: &b.mins[i], + max: &b.maxs[i], + avg: &b.avgs[i], + lst: &b.lsts[i], + fnl: b.fnls[i], + }; + Some(ret) + } else { + None + } + } +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ContainerBins +where + EVT: EventValueType, +{ + ts1s: VecDeque, + ts2s: VecDeque, + cnts: VecDeque, + mins: VecDeque, + maxs: VecDeque, + avgs: VecDeque, + lsts: VecDeque, + fnls: VecDeque, +} + +impl ContainerBins +where + EVT: EventValueType, +{ + pub fn from_constituents( + ts1s: VecDeque, + ts2s: VecDeque, + cnts: VecDeque, + mins: VecDeque, + maxs: VecDeque, + avgs: VecDeque, + lsts: VecDeque, + fnls: VecDeque, + ) -> Self { + Self { + ts1s, + ts2s, + cnts, + mins, + maxs, + avgs, + lsts, + fnls, + } + } + + pub fn type_name() -> &'static str { + any::type_name::() + } + + pub fn new() -> Self { + Self { + ts1s: VecDeque::new(), + ts2s: VecDeque::new(), + cnts: VecDeque::new(), + mins: VecDeque::new(), + maxs: VecDeque::new(), + avgs: VecDeque::new(), + lsts: VecDeque::new(), + fnls: VecDeque::new(), + } + } + + pub fn len(&self) -> usize { + self.ts1s.len() + } + + pub fn verify(&self) -> Result<(), ContainerBinsError> { + if self.ts1s.iter().zip(self.ts1s.iter().skip(1)).any(|(&a, &b)| a > b) { + return Err(ContainerBinsError::Unordered); + } + if self.ts2s.iter().zip(self.ts2s.iter().skip(1)).any(|(&a, &b)| a > b) { + return Err(ContainerBinsError::Unordered); + } + Ok(()) + } + + pub fn ts1_first(&self) -> Option { + self.ts1s.front().map(|&x| x) + } + + pub fn ts2_last(&self) -> Option { + self.ts2s.back().map(|&x| x) + } + + pub fn ts1s_iter(&self) -> std::collections::vec_deque::Iter { + self.ts1s.iter() + } + + pub fn ts2s_iter(&self) -> std::collections::vec_deque::Iter { + self.ts2s.iter() + } + + pub fn cnts_iter(&self) -> std::collections::vec_deque::Iter { + self.cnts.iter() + } + + pub fn mins_iter(&self) -> std::collections::vec_deque::Iter { + self.mins.iter() + } + + pub fn maxs_iter(&self) -> std::collections::vec_deque::Iter { + self.maxs.iter() + } + + pub fn avgs_iter(&self) -> std::collections::vec_deque::Iter { + self.avgs.iter() + } + + pub fn fnls_iter(&self) -> std::collections::vec_deque::Iter { + self.fnls.iter() + } + + pub fn zip_iter( + &self, + ) -> std::iter::Zip< + std::iter::Zip< + std::iter::Zip< + std::iter::Zip< + std::iter::Zip< + std::iter::Zip< + std::collections::vec_deque::Iter, + std::collections::vec_deque::Iter, + >, + std::collections::vec_deque::Iter, + >, + std::collections::vec_deque::Iter, + >, + std::collections::vec_deque::Iter, + >, + std::collections::vec_deque::Iter, + >, + std::collections::vec_deque::Iter, + > { + self.ts1s_iter() + .zip(self.ts2s_iter()) + .zip(self.cnts_iter()) + .zip(self.mins_iter()) + .zip(self.maxs_iter()) + .zip(self.avgs_iter()) + .zip(self.fnls_iter()) + } + + pub fn edges_iter( + &self, + ) -> std::iter::Zip, std::collections::vec_deque::Iter> { + self.ts1s.iter().zip(self.ts2s.iter()) + } + + pub fn len_before(&self, end: TsNano) -> usize { + let pp = self.ts2s.partition_point(|&x| x <= end); + assert!(pp <= self.len(), "len_before pp {} len {}", pp, self.len()); + pp + } + + pub fn pop_front(&mut self) -> Option> { + todo!("pop_front"); + let ts1 = if let Some(x) = self.ts1s.pop_front() { + x + } else { + return None; + }; + let ts2 = if let Some(x) = self.ts2s.pop_front() { + x + } else { + return None; + }; + todo!() + } + + pub fn push_back( + &mut self, + ts1: TsNano, + ts2: TsNano, + cnt: u64, + min: EVT, + max: EVT, + avg: EVT::AggTimeWeightOutputAvg, + lst: EVT, + fnl: bool, + ) { + self.ts1s.push_back(ts1); + self.ts2s.push_back(ts2); + self.cnts.push_back(cnt); + self.mins.push_back(min); + self.maxs.push_back(max); + self.avgs.push_back(avg); + self.lsts.push_back(lst); + self.fnls.push_back(fnl); + } + + pub fn iter_debug(&self) -> IterDebug { + IterDebug { + bins: self, + ix: 0, + len: self.len(), + } + } +} + +impl fmt::Debug for ContainerBins +where + EVT: EventValueType, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let self_name = any::type_name::(); + write!( + fmt, + "{self_name} {{ len: {:?}, ts1s: {:?}, ts2s: {:?}, cnts: {:?}, avgs {:?}, fnls {:?} }}", + self.len(), + VecPreview::new(&self.ts1s), + VecPreview::new(&self.ts2s), + VecPreview::new(&self.cnts), + VecPreview::new(&self.avgs), + VecPreview::new(&self.fnls), + ) + } +} + +impl fmt::Display for ContainerBins +where + EVT: EventValueType, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(self, fmt) + } +} + +impl AsAnyMut for ContainerBins +where + EVT: EventValueType, +{ + fn as_any_mut(&mut self) -> &mut dyn any::Any { + self + } +} + +impl WithLen for ContainerBins +where + EVT: EventValueType, +{ + fn len(&self) -> usize { + Self::len(self) + } +} + +impl TypeName for ContainerBins +where + EVT: EventValueType, +{ + fn type_name(&self) -> String { + BinningggContainerBinsDyn::type_name(self).into() + } +} + +impl AsAnyRef for ContainerBins +where + EVT: EventValueType, +{ + fn as_any_ref(&self) -> &dyn any::Any { + self + } +} + +#[derive(Debug)] +pub struct ContainerBinsCollectorOutput +where + EVT: EventValueType, +{ + bins: ContainerBins, +} + +impl TypeName for ContainerBinsCollectorOutput +where + EVT: EventValueType, +{ + fn type_name(&self) -> String { + any::type_name::().into() + } +} + +impl AsAnyRef for ContainerBinsCollectorOutput +where + EVT: EventValueType, +{ + fn as_any_ref(&self) -> &dyn any::Any { + self + } +} + +impl AsAnyMut for ContainerBinsCollectorOutput +where + EVT: EventValueType, +{ + fn as_any_mut(&mut self) -> &mut dyn any::Any { + self + } +} + +impl WithLen for ContainerBinsCollectorOutput +where + EVT: EventValueType, +{ + fn len(&self) -> usize { + self.bins.len() + } +} + +#[derive(Debug, Serialize)] +struct ContainerBinsCollectorOutputUser +where + EVT: EventValueType, +{ + #[serde(rename = "tsAnchor")] + ts_anchor_sec: u64, + #[serde(rename = "ts1Ms")] + ts1_off_ms: VecDeque, + #[serde(rename = "ts2Ms")] + ts2_off_ms: VecDeque, + #[serde(rename = "ts1Ns")] + ts1_off_ns: VecDeque, + #[serde(rename = "ts2Ns")] + ts2_off_ns: VecDeque, + #[serde(rename = "counts")] + counts: VecDeque, + #[serde(rename = "mins")] + mins: VecDeque, + #[serde(rename = "maxs")] + maxs: VecDeque, + #[serde(rename = "avgs")] + avgs: VecDeque, + // #[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")] + // range_final: bool, + // #[serde(rename = "timedOut", default, skip_serializing_if = "is_false")] + // timed_out: bool, + // #[serde(rename = "missingBins", default, skip_serializing_if = "CmpZero::is_zero")] + // missing_bins: u32, + // #[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")] + // continue_at: Option, + // #[serde(rename = "finishedAt", default, skip_serializing_if = "Option::is_none")] + // finished_at: Option, +} + +impl ToJsonResult for ContainerBinsCollectorOutput +where + EVT: EventValueType, +{ + fn to_json_value(&self) -> Result { + let bins = &self.bins; + let ts1sns: Vec<_> = bins.ts1s.iter().map(|x| x.ns()).collect(); + let ts2sns: Vec<_> = bins.ts2s.iter().map(|x| x.ns()).collect(); + let (ts_anch, ts1ms, ts1ns) = ts_offs_from_abs(&ts1sns); + let (ts2ms, ts2ns) = ts_offs_from_abs_with_anchor(ts_anch, &ts2sns); + let counts = bins.cnts.clone(); + let mins = bins.mins.clone(); + let maxs = bins.maxs.clone(); + let avgs = bins.avgs.clone(); + let val = ContainerBinsCollectorOutputUser:: { + ts_anchor_sec: ts_anch, + ts1_off_ms: ts1ms, + ts2_off_ms: ts2ms, + ts1_off_ns: ts1ns, + ts2_off_ns: ts2ns, + counts, + mins, + maxs, + avgs, + }; + serde_json::to_value(&val) + } +} + +impl CollectedDyn for ContainerBinsCollectorOutput where EVT: EventValueType {} + +#[derive(Debug)] +pub struct ContainerBinsCollector +where + EVT: EventValueType, +{ + bins: ContainerBins, + timed_out: bool, + range_final: bool, +} + +impl ContainerBinsCollector where EVT: EventValueType {} + +impl WithLen for ContainerBinsCollector +where + EVT: EventValueType, +{ + fn len(&self) -> usize { + self.bins.len() + } +} + +impl items_0::container::ByteEstimate for ContainerBinsCollector +where + EVT: EventValueType, +{ + fn byte_estimate(&self) -> u64 { + // TODO need better estimate + self.bins.len() as u64 * 200 + } +} + +impl items_0::collect_s::CollectorDyn for ContainerBinsCollector +where + EVT: EventValueType, +{ + fn ingest(&mut self, src: &mut dyn CollectableDyn) { + if let Some(src) = src.as_any_mut().downcast_mut::>() { + src.drain_into(&mut self.bins, 0..src.len()); + } else { + let srcn = src.type_name(); + panic!("wrong src type {srcn}"); + } + } + + fn set_range_complete(&mut self) { + self.range_final = true; + } + + fn set_timed_out(&mut self) { + self.timed_out = true; + } + + fn set_continue_at_here(&mut self) { + debug!("TODO remember the continue at"); + } + + fn result( + &mut self, + range: Option, + binrange: Option, + ) -> Result, err::Error> { + // TODO do we need to set timeout, continueAt or anything? + let bins = mem::replace(&mut self.bins, ContainerBins::new()); + let ret = ContainerBinsCollectorOutput { bins }; + Ok(Box::new(ret)) + } +} + +impl CollectableDyn for ContainerBins +where + EVT: EventValueType, +{ + fn new_collector(&self) -> Box { + let ret = ContainerBinsCollector:: { + bins: ContainerBins::new(), + timed_out: false, + range_final: false, + }; + Box::new(ret) + } +} + +impl BinningggContainerBinsDyn for ContainerBins +where + EVT: EventValueType, +{ + fn type_name(&self) -> &'static str { + any::type_name::() + } + + fn empty(&self) -> BinsBoxed { + Box::new(Self::new()) + } + + fn clone(&self) -> BinsBoxed { + Box::new(::clone(self)) + } + + fn edges_iter( + &self, + ) -> std::iter::Zip, std::collections::vec_deque::Iter> { + self.ts1s.iter().zip(self.ts2s.iter()) + } + + fn drain_into(&mut self, dst: &mut dyn BinningggContainerBinsDyn, range: std::ops::Range) { + let obj = dst.as_any_mut(); + if let Some(dst) = obj.downcast_mut::() { + dst.ts1s.extend(self.ts1s.drain(range.clone())); + dst.ts2s.extend(self.ts2s.drain(range.clone())); + dst.cnts.extend(self.cnts.drain(range.clone())); + dst.mins.extend(self.mins.drain(range.clone())); + dst.maxs.extend(self.maxs.drain(range.clone())); + dst.avgs.extend(self.avgs.drain(range.clone())); + dst.lsts.extend(self.lsts.drain(range.clone())); + dst.fnls.extend(self.fnls.drain(range.clone())); + } else { + let styn = any::type_name::(); + panic!("unexpected drain EVT {} dst {}", styn, Self::type_name()); + } + } + + fn fix_numerics(&mut self) { + for ((min, max), avg) in self.mins.iter_mut().zip(self.maxs.iter_mut()).zip(self.avgs.iter_mut()) {} + } +} + +pub struct ContainerBinsTakeUpTo<'a, EVT> +where + EVT: EventValueType, +{ + evs: &'a mut ContainerBins, + len: usize, +} + +impl<'a, EVT> ContainerBinsTakeUpTo<'a, EVT> +where + EVT: EventValueType, +{ + pub fn new(evs: &'a mut ContainerBins, len: usize) -> Self { + let len = len.min(evs.len()); + Self { evs, len } + } +} + +impl<'a, EVT> ContainerBinsTakeUpTo<'a, EVT> +where + EVT: EventValueType, +{ + pub fn ts1_first(&self) -> Option { + self.evs.ts1_first() + } + + pub fn ts2_last(&self) -> Option { + self.evs.ts2_last() + } + + pub fn len(&self) -> usize { + self.len + } + + pub fn pop_front(&mut self) -> Option> { + if self.len != 0 { + if let Some(ev) = self.evs.pop_front() { + self.len -= 1; + Some(ev) + } else { + None + } + } else { + None + } + } +} diff --git a/src/binning/container_events.rs b/src/binning/container_events.rs new file mode 100644 index 0000000..902a3e5 --- /dev/null +++ b/src/binning/container_events.rs @@ -0,0 +1,276 @@ +use super::aggregator::AggTimeWeightOutputAvg; +use super::aggregator::AggregatorNumeric; +use super::aggregator::AggregatorTimeWeight; +use super::timeweight::timeweight_events_dyn::BinnedEventsTimeweightDynbox; +use core::fmt; +use daqbuf_err as err; +use err::thiserror; +use err::ThisError; +use items_0::timebin::BinningggContainerEventsDyn; +use items_0::vecpreview::PreviewRange; +use items_0::vecpreview::VecPreview; +use items_0::AsAnyRef; +use netpod::BinnedRange; +use netpod::TsNano; +use serde::Deserialize; +use serde::Serialize; +use std::any; +use std::collections::VecDeque; + +#[allow(unused)] +macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } + +#[derive(Debug, ThisError)] +#[cstm(name = "ValueContainerError")] +pub enum ValueContainerError {} + +pub trait Container: fmt::Debug + Send + Clone + PreviewRange + Serialize + for<'a> Deserialize<'a> { + fn new() -> Self; + // fn verify(&self) -> Result<(), ValueContainerError>; + fn push_back(&mut self, val: EVT); + fn pop_front(&mut self) -> Option; +} + +pub trait EventValueType: fmt::Debug + Clone + PartialOrd + Send + 'static + Serialize { + type Container: Container; + type AggregatorTimeWeight: AggregatorTimeWeight; + type AggTimeWeightOutputAvg: AggTimeWeightOutputAvg; + + // fn identity_sum() -> Self; + // fn add_weighted(&self, add: &Self, f: f32) -> Self; +} + +impl Container for VecDeque +where + EVT: EventValueType + Serialize + for<'a> Deserialize<'a>, +{ + fn new() -> Self { + VecDeque::new() + } + + fn push_back(&mut self, val: EVT) { + self.push_back(val); + } + + fn pop_front(&mut self) -> Option { + self.pop_front() + } +} + +macro_rules! impl_event_value_type { + ($evt:ty) => { + impl EventValueType for $evt { + type Container = VecDeque; + type AggregatorTimeWeight = AggregatorNumeric; + type AggTimeWeightOutputAvg = f64; + } + }; +} + +impl_event_value_type!(u8); +impl_event_value_type!(u16); +impl_event_value_type!(u32); +impl_event_value_type!(u64); +impl_event_value_type!(i8); +impl_event_value_type!(i16); +impl_event_value_type!(i32); +impl_event_value_type!(i64); +// impl_event_value_type!(f32); +// impl_event_value_type!(f64); + +impl EventValueType for f32 { + type Container = VecDeque; + type AggregatorTimeWeight = AggregatorNumeric; + type AggTimeWeightOutputAvg = f32; +} + +impl EventValueType for f64 { + type Container = VecDeque; + type AggregatorTimeWeight = AggregatorNumeric; + type AggTimeWeightOutputAvg = f64; +} + +impl EventValueType for bool { + type Container = VecDeque; + type AggregatorTimeWeight = AggregatorNumeric; + type AggTimeWeightOutputAvg = f64; +} + +impl EventValueType for String { + type Container = VecDeque; + type AggregatorTimeWeight = AggregatorNumeric; + type AggTimeWeightOutputAvg = f64; +} + +#[derive(Debug, Clone)] +pub struct EventSingle { + pub ts: TsNano, + pub val: EVT, +} + +#[derive(Debug, ThisError)] +#[cstm(name = "EventsContainerError")] +pub enum EventsContainerError { + Unordered, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ContainerEvents +where + EVT: EventValueType, +{ + tss: VecDeque, + vals: ::Container, +} + +impl ContainerEvents +where + EVT: EventValueType, +{ + pub fn from_constituents(tss: VecDeque, vals: ::Container) -> Self { + Self { tss, vals } + } + + pub fn type_name() -> &'static str { + any::type_name::() + } + + pub fn new() -> Self { + Self { + tss: VecDeque::new(), + vals: Container::new(), + } + } + + pub fn len(&self) -> usize { + self.tss.len() + } + + pub fn verify(&self) -> Result<(), EventsContainerError> { + if self.tss.iter().zip(self.tss.iter().skip(1)).any(|(&a, &b)| a > b) { + return Err(EventsContainerError::Unordered); + } + Ok(()) + } + + pub fn ts_first(&self) -> Option { + self.tss.front().map(|&x| x) + } + + pub fn ts_last(&self) -> Option { + self.tss.back().map(|&x| x) + } + + pub fn len_before(&self, end: TsNano) -> usize { + let pp = self.tss.partition_point(|&x| x < end); + assert!(pp <= self.len(), "len_before pp {} len {}", pp, self.len()); + pp + } + + pub fn pop_front(&mut self) -> Option> { + if let (Some(ts), Some(val)) = (self.tss.pop_front(), self.vals.pop_front()) { + Some(EventSingle { ts, val }) + } else { + None + } + } + + pub fn push_back(&mut self, ts: TsNano, val: EVT) { + self.tss.push_back(ts); + self.vals.push_back(val); + } +} + +impl fmt::Debug for ContainerEvents +where + EVT: EventValueType, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let self_name = any::type_name::(); + write!( + fmt, + "{self_name} {{ len: {:?}, tss: {:?}, vals {:?} }}", + self.len(), + VecPreview::new(&self.tss), + VecPreview::new(&self.vals), + ) + } +} + +impl AsAnyRef for ContainerEvents +where + EVT: EventValueType, +{ + fn as_any_ref(&self) -> &dyn any::Any { + self + } +} + +pub struct ContainerEventsTakeUpTo<'a, EVT> +where + EVT: EventValueType, +{ + evs: &'a mut ContainerEvents, + len: usize, +} + +impl<'a, EVT> ContainerEventsTakeUpTo<'a, EVT> +where + EVT: EventValueType, +{ + pub fn new(evs: &'a mut ContainerEvents, len: usize) -> Self { + let len = len.min(evs.len()); + Self { evs, len } + } +} + +impl<'a, EVT> ContainerEventsTakeUpTo<'a, EVT> +where + EVT: EventValueType, +{ + pub fn ts_first(&self) -> Option { + self.evs.ts_first() + } + + pub fn ts_last(&self) -> Option { + self.evs.ts_last() + } + + pub fn len(&self) -> usize { + self.len + } + + pub fn pop_front(&mut self) -> Option> { + if self.len != 0 { + if let Some(ev) = self.evs.pop_front() { + self.len -= 1; + Some(ev) + } else { + None + } + } else { + None + } + } +} + +impl BinningggContainerEventsDyn for ContainerEvents +where + EVT: EventValueType, +{ + fn type_name(&self) -> &'static str { + std::any::type_name::() + } + + fn binned_events_timeweight_traitobj( + &self, + range: BinnedRange, + ) -> Box { + BinnedEventsTimeweightDynbox::::new(range) + } + + fn to_anybox(&mut self) -> Box { + let ret = core::mem::replace(self, Self::new()); + Box::new(ret) + } +} diff --git a/src/binning/test.rs b/src/binning/test.rs new file mode 100644 index 0000000..f0daf3e --- /dev/null +++ b/src/binning/test.rs @@ -0,0 +1,15 @@ +mod events00; +use super::container_events::ContainerEvents; +use super::___; +use netpod::log::*; +use std::any; + +#[test] +fn test_use_serde() { + let x = ContainerEvents::::new(); + let a: &dyn any::Any = &x; + assert_eq!(a.downcast_ref::().is_some(), false); + assert_eq!(a.downcast_ref::>().is_some(), true); + let s = serde_json::to_string(&x).unwrap(); + let _: ContainerEvents = serde_json::from_str(&s).unwrap(); +} diff --git a/src/binning/test/events00.rs b/src/binning/test/events00.rs new file mode 100644 index 0000000..80096ca --- /dev/null +++ b/src/binning/test/events00.rs @@ -0,0 +1,488 @@ +use crate::binning::container_bins::ContainerBins; +use crate::binning::container_events::ContainerEvents; +use crate::binning::timeweight::timeweight_events::BinnedEventsTimeweight; +use daqbuf_err as err; +use err::thiserror; +use err::ThisError; +use netpod::log::*; +use netpod::range::evrange::NanoRange; +use netpod::BinnedRange; +use netpod::DtMs; +use netpod::EnumVariant; +use netpod::TsNano; +use std::collections::VecDeque; + +#[derive(Debug, ThisError)] +#[cstm(name = "Error")] +enum Error { + Timeweight(#[from] crate::binning::timeweight::timeweight_events::Error), + AssertMsg(String), +} + +// fn prepare_data_with_cuts(beg_ms: u64, cuts: VecDeque) -> VecDeque> { +// let beg = TsNano::from_ms(beg_ms); +// let end = TsNano::from_ms(120); +// let mut cut_next = cuts.pop_front().unwrap_or(u64::MAX); +// let mut ret = VecDeque::new(); +// let ivl = DtMs::from_ms_u64(x) +// } + +fn pu(c: &mut ContainerEvents, ts_ms: u64, val: f32) +// where +// C: AsMut>, +// C: std::borrow::BorrowMut>, +{ + c.push_back(TsNano::from_ms(ts_ms), val); +} + +trait IntoVecDequeU64 { + fn into_vec_deque_u64(self) -> VecDeque; +} + +impl IntoVecDequeU64 for &str { + fn into_vec_deque_u64(self) -> VecDeque { + self.split_ascii_whitespace().map(|x| x.parse().unwrap()).collect() + } +} +trait IntoVecDequeF32 { + fn into_vec_deque_f32(self) -> VecDeque; +} + +impl IntoVecDequeF32 for &str { + fn into_vec_deque_f32(self) -> VecDeque { + self.split_ascii_whitespace().map(|x| x.parse().unwrap()).collect() + } +} + +fn exp_u64<'a>( + vals: impl Iterator, + exps: impl Iterator, + tag: &str, +) -> Result<(), Error> { + let mut it_a = vals; + let mut it_b = exps; + let mut i = 0; + loop { + let a = it_a.next(); + let b = it_b.next(); + if a.is_none() && b.is_none() { + break; + } + if let (Some(&val), Some(&exp)) = (a, b) { + if val != exp { + return Err(Error::AssertMsg(format!("{tag} val {} exp {} i {}", val, exp, i))); + } + } else { + return Err(Error::AssertMsg(format!("{tag} len mismatch"))); + } + i += 1; + } + Ok(()) +} + +fn exp_f32<'a>( + vals: impl Iterator, + exps: impl Iterator, + tag: &str, +) -> Result<(), Error> { + let mut it_a = vals; + let mut it_b = exps; + let mut i = 0; + loop { + let a = it_a.next(); + let b = it_b.next(); + if a.is_none() && b.is_none() { + break; + } + if let (Some(&val), Some(&exp)) = (a, b) { + if netpod::f32_close(val, exp) == false { + return Err(Error::AssertMsg(format!("{tag} val {} exp {} i {}", val, exp, i))); + } + } else { + return Err(Error::AssertMsg(format!("{tag} len mismatch"))); + } + i += 1; + } + Ok(()) +} + +#[cfg(test)] +fn exp_cnts(bins: &ContainerBins, exps: impl IntoVecDequeU64) -> Result<(), Error> { + exp_u64(bins.cnts_iter(), exps.into_vec_deque_u64().iter(), "exp_cnts") +} + +#[cfg(test)] +fn exp_mins(bins: &ContainerBins, exps: impl IntoVecDequeF32) -> Result<(), Error> { + exp_f32(bins.mins_iter(), exps.into_vec_deque_f32().iter(), "exp_mins") +} + +#[cfg(test)] +fn exp_maxs(bins: &ContainerBins, exps: impl IntoVecDequeF32) -> Result<(), Error> { + exp_f32(bins.maxs_iter(), exps.into_vec_deque_f32().iter(), "exp_maxs") +} + +fn exp_avgs(bins: &ContainerBins, exps: impl IntoVecDequeF32) -> Result<(), Error> { + let exps = exps.into_vec_deque_f32(); + let mut it_a = bins.iter_debug(); + let mut it_b = exps.iter(); + let mut i = 0; + loop { + let a = it_a.next(); + let b = it_b.next(); + if a.is_none() && b.is_none() { + break; + } + if let (Some(a), Some(&exp)) = (a, b) { + let val = *a.avg as f32; + if netpod::f32_close(val, exp) == false { + return Err(Error::AssertMsg(format!("exp_avgs val {} exp {} i {}", val, exp, i))); + } + } else { + return Err(Error::AssertMsg(format!( + "len mismatch {} vs {}", + bins.len(), + exps.len() + ))); + } + i += 1; + } + Ok(()) +} + +#[test] +fn test_bin_events_f32_simple_with_before_00() -> Result<(), Error> { + let beg = TsNano::from_ms(110); + let end = TsNano::from_ms(120); + let nano_range = NanoRange { + beg: beg.ns(), + end: end.ns(), + }; + let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10)); + let mut binner = BinnedEventsTimeweight::new(range); + let mut evs = ContainerEvents::::new(); + evs.push_back(TsNano::from_ms(103), 2.0); + binner.ingest(evs)?; + binner.input_done_range_final()?; + let bins = binner.output(); + exp_cnts(&bins, "0")?; + exp_mins(&bins, "2.")?; + exp_maxs(&bins, "2.")?; + exp_avgs(&bins, "2.")?; + let bins = binner.output(); + assert_eq!(bins.len(), 0); + Ok(()) +} + +#[test] +fn test_bin_events_f32_simple_with_before_01_range_final() -> Result<(), Error> { + let beg = TsNano::from_ms(110); + let end = TsNano::from_ms(130); + let nano_range = NanoRange { + beg: beg.ns(), + end: end.ns(), + }; + let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10)); + let mut binner = BinnedEventsTimeweight::new(range); + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 103, 2.0); + binner.ingest(evs)?; + binner.input_done_range_final()?; + let bins = binner.output(); + exp_cnts(&bins, "0 0")?; + exp_mins(&bins, "2. 2.")?; + exp_maxs(&bins, "2. 2.")?; + exp_avgs(&bins, "2. 2.")?; + let bins = binner.output(); + assert_eq!(bins.len(), 0); + Ok(()) +} + +#[test] +fn test_bin_events_f32_simple_00() -> Result<(), Error> { + let beg = TsNano::from_ms(100); + let end = TsNano::from_ms(120); + let nano_range = NanoRange { + beg: beg.ns(), + end: end.ns(), + }; + let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10)); + let mut binner = BinnedEventsTimeweight::new(range); + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 100, 2.0); + pu(em, 104, 2.4); + binner.ingest(evs)?; + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 111, 1.0); + pu(em, 112, 1.2); + pu(em, 113, 1.4); + binner.ingest(evs)?; + binner.input_done_range_open()?; + let bins = binner.output(); + for b in bins.iter_debug() { + trace!("{b:?}"); + } + exp_cnts(&bins, "2 3")?; + exp_mins(&bins, "2. 1.")?; + exp_maxs(&bins, "2.4 2.4")?; + exp_avgs(&bins, "2.24 1.5333")?; + let bins = binner.output(); + assert_eq!(bins.len(), 0); + Ok(()) +} + +#[test] +fn test_bin_events_f32_simple_01() -> Result<(), Error> { + let beg = TsNano::from_ms(100); + let end = TsNano::from_ms(120); + let nano_range = NanoRange { + beg: beg.ns(), + end: end.ns(), + }; + let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10)); + let mut binner = BinnedEventsTimeweight::new(range); + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 102, 2.0); + pu(em, 104, 2.4); + binner.ingest(evs)?; + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 111, 1.0); + pu(em, 112, 1.2); + pu(em, 113, 1.4); + binner.ingest(evs)?; + binner.input_done_range_open()?; + let bins = binner.output(); + for b in bins.iter_debug() { + trace!("{b:?}"); + } + assert_eq!(bins.len(), 2); + exp_cnts(&bins, "2 3")?; + exp_mins(&bins, "2. 1.")?; + exp_maxs(&bins, "2.4 2.4")?; + exp_avgs(&bins, "2.30 1.5333")?; + let bins = binner.output(); + assert_eq!(bins.len(), 0); + Ok(()) +} + +#[test] +fn test_bin_events_f32_small_range_final() -> Result<(), Error> { + let beg = TsNano::from_ms(100); + let end = TsNano::from_ms(120); + let nano_range = NanoRange { + beg: beg.ns(), + end: end.ns(), + }; + let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10)); + let mut binner = BinnedEventsTimeweight::new(range); + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 102, 2.0); + pu(em, 104, 2.4); + binner.ingest(evs)?; + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 111, 1.0); + pu(em, 112, 1.2); + pu(em, 113, 1.4); + binner.ingest(evs)?; + binner.input_done_range_final()?; + let bins = binner.output(); + for b in bins.iter_debug() { + trace!("{b:?}"); + } + assert_eq!(bins.len(), 2); + exp_cnts(&bins, "2 3")?; + exp_mins(&bins, "2. 1.")?; + exp_maxs(&bins, "2.4 2.4")?; + exp_avgs(&bins, "2.30 1.44")?; + let bins = binner.output(); + assert_eq!(bins.len(), 0); + Ok(()) +} + +#[test] +fn test_bin_events_f32_small_intermittent_silence_range_open() -> Result<(), Error> { + let beg = TsNano::from_ms(100); + let end = TsNano::from_ms(150); + let nano_range = NanoRange { + beg: beg.ns(), + end: end.ns(), + }; + let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10)); + let mut binner = BinnedEventsTimeweight::new(range); + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 102, 2.0); + pu(em, 104, 2.4); + binner.ingest(evs)?; + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 111, 1.0); + pu(em, 112, 1.2); + binner.ingest(evs)?; + // TODO take bins already here and assert. + // TODO combine all bins together for combined assert. + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 113, 1.4); + pu(em, 146, 1.3); + pu(em, 148, 1.2); + binner.ingest(evs)?; + binner.input_done_range_open()?; + let bins = binner.output(); + for b in bins.iter_debug() { + trace!("{b:?}"); + } + assert_eq!(bins.len(), 5); + exp_cnts(&bins, "2 3 0 0 2")?; + exp_mins(&bins, "2.0 1.0 1.4 1.4 1.2")?; + exp_maxs(&bins, "2.4 2.4 1.4 1.4 1.4")?; + exp_avgs(&bins, "2.30 1.44 1.4 1.4 1.375")?; + let bins = binner.output(); + assert_eq!(bins.len(), 0); + Ok(()) +} + +#[test] +fn test_bin_events_f32_small_intermittent_silence_range_final() -> Result<(), Error> { + let beg = TsNano::from_ms(100); + let end = TsNano::from_ms(150); + let nano_range = NanoRange { + beg: beg.ns(), + end: end.ns(), + }; + let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10)); + let mut binner = BinnedEventsTimeweight::new(range); + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 102, 2.0); + pu(em, 104, 2.4); + binner.ingest(evs)?; + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 111, 1.0); + pu(em, 112, 1.2); + binner.ingest(evs)?; + // TODO take bins already here and assert. + // TODO combine all bins together for combined assert. + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 113, 1.4); + pu(em, 146, 1.3); + pu(em, 148, 1.2); + binner.ingest(evs)?; + binner.input_done_range_final()?; + let bins = binner.output(); + for b in bins.iter_debug() { + trace!("{b:?}"); + } + exp_cnts(&bins, "2 3 0 0 2")?; + exp_mins(&bins, "2.0 1.0 1.4 1.4 1.2")?; + exp_maxs(&bins, "2.4 2.4 1.4 1.4 1.4")?; + exp_avgs(&bins, "2.30 1.44 1.4 1.4 1.34")?; + let bins = binner.output(); + assert_eq!(bins.len(), 0); + Ok(()) +} + +#[test] +fn test_bin_events_f32_small_intermittent_silence_minmax_no_edge_range_final() -> Result<(), Error> { + let beg = TsNano::from_ms(110); + let end = TsNano::from_ms(120); + let nano_range = NanoRange { + beg: beg.ns(), + end: end.ns(), + }; + let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10)); + let mut binner = BinnedEventsTimeweight::new(range); + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 109, 50.); + binner.ingest(evs)?; + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 111, 40.); + // pu(em, 112, 1.2); + // binner.ingest(evs)?; + // let mut evs = ContainerEvents::::new(); + // let em = &mut evs; + // pu(em, 113, 1.4); + // pu(em, 120, 1.4); + // pu(em, 146, 1.3); + // pu(em, 148, 1.2); + binner.ingest(evs)?; + binner.input_done_range_final()?; + let bins = binner.output(); + for b in bins.iter_debug() { + trace!("{b:?}"); + } + exp_cnts(&bins, "1")?; + exp_mins(&bins, "40.")?; + exp_maxs(&bins, "50.")?; + let bins = binner.output(); + assert_eq!(bins.len(), 0); + Ok(()) +} + +#[test] +fn test_bin_events_f32_small_intermittent_silence_minmax_edge_range_final() -> Result<(), Error> { + let beg = TsNano::from_ms(110); + let end = TsNano::from_ms(120); + let nano_range = NanoRange { + beg: beg.ns(), + end: end.ns(), + }; + let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10)); + let mut binner = BinnedEventsTimeweight::new(range); + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 109, 50.); + binner.ingest(evs)?; + let mut evs = ContainerEvents::::new(); + let em = &mut evs; + pu(em, 110, 40.); + // pu(em, 112, 1.2); + // binner.ingest(evs)?; + // let mut evs = ContainerEvents::::new(); + // let em = &mut evs; + // pu(em, 113, 1.4); + // pu(em, 120, 1.4); + // pu(em, 146, 1.3); + // pu(em, 148, 1.2); + binner.ingest(evs)?; + binner.input_done_range_final()?; + let bins = binner.output(); + for b in bins.iter_debug() { + trace!("{b:?}"); + } + exp_cnts(&bins, "1")?; + exp_mins(&bins, "40.")?; + exp_maxs(&bins, "40.")?; + let bins = binner.output(); + assert_eq!(bins.len(), 0); + Ok(()) +} + +#[test] +fn test_bin_events_enum_simple_range_final() -> Result<(), Error> { + let beg = TsNano::from_ms(100); + let end = TsNano::from_ms(120); + let nano_range = NanoRange { + beg: beg.ns(), + end: end.ns(), + }; + let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10)); + let mut binner = BinnedEventsTimeweight::new(range); + let mut evs = ContainerEvents::new(); + evs.push_back(TsNano::from_ms(103), EnumVariant::new(1, "one")); + evs.push_back(TsNano::from_ms(104), EnumVariant::new(2, "two")); + binner.ingest(evs)?; + binner.input_done_range_final()?; + let bins = binner.output(); + Ok(()) +} diff --git a/src/binning/timeweight.rs b/src/binning/timeweight.rs new file mode 100644 index 0000000..8015202 --- /dev/null +++ b/src/binning/timeweight.rs @@ -0,0 +1,16 @@ +pub mod timeweight_bins; +pub mod timeweight_bins_dyn; +pub mod timeweight_events; +pub mod timeweight_events_dyn; + +use super::___; +use netpod::log::*; + +#[allow(unused)] +macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_ingest { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_ingest_detail { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } diff --git a/src/binning/timeweight/timeweight_bins.rs b/src/binning/timeweight/timeweight_bins.rs new file mode 100644 index 0000000..320924c --- /dev/null +++ b/src/binning/timeweight/timeweight_bins.rs @@ -0,0 +1,5 @@ +use super::___; +use netpod::log::*; + +#[allow(unused)] +macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } diff --git a/src/binning/timeweight/timeweight_bins_dyn.rs b/src/binning/timeweight/timeweight_bins_dyn.rs new file mode 100644 index 0000000..1b65daa --- /dev/null +++ b/src/binning/timeweight/timeweight_bins_dyn.rs @@ -0,0 +1,27 @@ +use futures_util::Stream; +use items_0::streamitem::Sitemty; +use items_0::timebin::BinningggContainerBinsDyn; +use netpod::BinnedRange; +use netpod::TsNano; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +pub struct BinnedBinsTimeweightStream {} + +impl BinnedBinsTimeweightStream { + pub fn new( + range: BinnedRange, + inp: Pin>> + Send>>, + ) -> Self { + todo!() + } +} + +impl Stream for BinnedBinsTimeweightStream { + type Item = Sitemty>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + todo!() + } +} diff --git a/src/binning/timeweight/timeweight_events.rs b/src/binning/timeweight/timeweight_events.rs new file mode 100644 index 0000000..c68008d --- /dev/null +++ b/src/binning/timeweight/timeweight_events.rs @@ -0,0 +1,645 @@ +use super::super::container_events::EventValueType; +use crate::binning::aggregator::AggregatorTimeWeight; +use crate::binning::container_bins::ContainerBins; +use crate::binning::container_events::ContainerEvents; +use crate::binning::container_events::ContainerEventsTakeUpTo; +use crate::binning::container_events::EventSingle; +use core::fmt; +use daqbuf_err as err; +use err::thiserror; +use err::ThisError; +use netpod::log::*; +use netpod::BinnedRange; +use netpod::DtNano; +use netpod::TsNano; +use std::mem; + +#[allow(unused)] +macro_rules! trace_ { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_init { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_cycle { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_event_next { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_ingest_init_lst { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_ingest_minmax { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_ingest_event { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_ingest_firsts { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_ingest_finish_bin { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_ingest_container { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_ingest_container_2 { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_fill_until { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) } + +#[cold] +#[inline] +#[allow(unused)] +fn cold() {} + +const DEBUG_CHECKS: bool = true; + +#[derive(Debug, ThisError)] +#[cstm(name = "BinnedEventsTimeweight")] +pub enum Error { + BadContainer(#[from] super::super::container_events::EventsContainerError), + Unordered, + EventAfterRange, + NoLstAfterFirst, + EmptyContainerInnerHandler, + NoLstButMinMax, + WithLstButEventBeforeRange, + WithMinMaxButEventBeforeRange, + NoMinMaxAfterInit, + ExpectEventWithinRange, +} + +type MinMax = (EventSingle, EventSingle); + +#[derive(Clone)] +struct LstRef<'a, EVT>(&'a EventSingle); + +struct LstMut<'a, EVT>(&'a mut EventSingle); + +#[derive(Debug)] +struct InnerB +where + EVT: EventValueType, +{ + cnt: u64, + active_beg: TsNano, + active_end: TsNano, + active_len: DtNano, + filled_until: TsNano, + filled_width: DtNano, + agg: ::AggregatorTimeWeight, +} + +impl InnerB +where + EVT: EventValueType, +{ + // NOTE that this is also used during bin-cycle. + fn ingest_event_with_lst_gt_range_beg_agg(&mut self, ev: EventSingle, lst: LstRef) { + let selfname = "ingest_event_with_lst_gt_range_beg_agg"; + trace_ingest_event!("{selfname} {:?}", ev); + if DEBUG_CHECKS { + if ev.ts <= self.active_beg { + panic!("should never get here"); + } + if ev.ts >= self.active_end { + panic!("should never get here"); + } + } + let dt = ev.ts.delta(self.filled_until); + trace_ingest_event!("{selfname} dt {:?} ev {:?}", dt, ev); + // TODO can the caller already take the value and replace it afterwards with the current value? + // This fn could swap the value in lst and directly use it. + // This would require that any call path does not mess with lst. + // NOTE that this fn is also used during bin-cycle. + self.agg.ingest(dt, self.active_len, lst.0.val.clone()); + self.filled_width = self.filled_width.add(dt); + self.filled_until = ev.ts; + } + + fn ingest_event_with_lst_gt_range_beg_2(&mut self, ev: EventSingle, lst: LstMut) -> Result<(), Error> { + let selfname = "ingest_event_with_lst_gt_range_beg_2"; + trace_ingest_event!("{selfname}"); + self.ingest_event_with_lst_gt_range_beg_agg(ev.clone(), LstRef(lst.0)); + InnerA::apply_lst_after_event_handled(ev, lst); + // self.cnt += 1; + Ok(()) + } + + fn ingest_event_with_lst_gt_range_beg( + &mut self, + ev: EventSingle, + lst: LstMut, + minmax: &mut MinMax, + ) -> Result<(), Error> { + let selfname = "ingest_event_with_lst_gt_range_beg"; + trace_ingest_event!("{selfname}"); + // TODO if the event is exactly on the current bin first edge, then there is no contribution to the avg yet + // and I must initialize the min/max with the current event. + InnerA::apply_min_max(&ev, minmax); + self.ingest_event_with_lst_gt_range_beg_2(ev.clone(), lst)?; + Ok(()) + } + + fn ingest_event_with_lst_eq_range_beg( + &mut self, + ev: EventSingle, + lst: LstMut, + minmax: &mut MinMax, + ) -> Result<(), Error> { + let selfname = "ingest_event_with_lst_eq_range_beg"; + trace_ingest_event!("{selfname}"); + // TODO if the event is exactly on the current bin first edge, then there is no contribution to the avg yet + // and I must initialize the min/max with the current event. + InnerA::apply_min_max(&ev, minmax); + InnerA::apply_lst_after_event_handled(ev, lst); + Ok(()) + } + + fn ingest_with_lst_gt_range_beg( + &mut self, + mut evs: ContainerEventsTakeUpTo, + lst: LstMut, + minmax: &mut MinMax, + ) -> Result<(), Error> { + let selfname = "ingest_with_lst_gt_range_beg"; + trace_ingest_event!("{selfname}"); + while let Some(ev) = evs.pop_front() { + trace_event_next!("EVENT POP FRONT {:?} {:30}", ev, selfname); + if ev.ts <= self.active_beg { + panic!("should never get here"); + } + if ev.ts >= self.active_end { + panic!("should never get here"); + } + self.ingest_event_with_lst_gt_range_beg(ev.clone(), LstMut(lst.0), minmax)?; + self.cnt += 1; + } + Ok(()) + } + + fn ingest_with_lst_ge_range_beg( + &mut self, + mut evs: ContainerEventsTakeUpTo, + lst: LstMut, + minmax: &mut MinMax, + ) -> Result<(), Error> { + let selfname = "ingest_with_lst_ge_range_beg"; + trace_ingest_event!("{selfname}"); + while let Some(ev) = evs.pop_front() { + trace_event_next!("EVENT POP FRONT {:?} {:30}", ev, selfname); + if ev.ts < self.active_beg { + panic!("should never get here"); + } + if ev.ts >= self.active_end { + panic!("should never get here"); + } + if ev.ts == self.active_beg { + self.ingest_event_with_lst_eq_range_beg(ev, LstMut(lst.0), minmax)?; + self.cnt += 1; + } else { + self.ingest_event_with_lst_gt_range_beg(ev.clone(), LstMut(lst.0), minmax)?; + self.cnt += 1; + trace_ingest_event!("{selfname} now calling ingest_with_lst_gt_range_beg"); + return self.ingest_with_lst_gt_range_beg(evs, LstMut(lst.0), minmax); + } + } + Ok(()) + } + + fn ingest_with_lst_minmax( + &mut self, + evs: ContainerEventsTakeUpTo, + lst: LstMut, + minmax: &mut MinMax, + ) -> Result<(), Error> { + let selfname = "ingest_with_lst_minmax"; + trace_ingest_event!("{selfname}"); + // TODO how to handle the min max? I don't take event data yet out of the container. + if let Some(ts0) = evs.ts_first() { + trace_ingest_event!("EVENT POP FRONT {selfname}"); + trace_ingest_event!("EVENT TIMESTAMP FRONT {:?} {selfname}", ts0); + if ts0 < self.active_beg { + panic!("should never get here"); + } else { + self.ingest_with_lst_ge_range_beg(evs, lst, minmax) + } + } else { + Ok(()) + } + } + + // PRECONDITION: filled_until < ts <= active_end + fn fill_until(&mut self, ts: TsNano, lst: LstRef) { + let b = self; + assert!(b.filled_until < ts); + assert!(ts <= b.active_end); + let dt = ts.delta(b.filled_until); + trace_fill_until!("fill_until ts {:?} dt {:?} lst {:?}", ts, dt, lst.0); + assert!(b.filled_until < ts); + assert!(ts <= b.active_end); + b.agg.ingest(dt, b.active_len, lst.0.val.clone()); + b.filled_width = b.filled_width.add(dt); + b.filled_until = ts; + } +} + +#[derive(Debug)] +struct InnerA +where + EVT: EventValueType, +{ + inner_b: InnerB, + minmax: Option<(EventSingle, EventSingle)>, +} + +impl InnerA +where + EVT: EventValueType, +{ + fn apply_min_max(ev: &EventSingle, minmax: &mut MinMax) { + if ev.val < minmax.0.val { + minmax.0 = ev.clone(); + } + if ev.val > minmax.1.val { + minmax.1 = ev.clone(); + } + } + + fn apply_lst_after_event_handled(ev: EventSingle, lst: LstMut) { + *lst.0 = ev; + } + + fn init_minmax(&mut self, ev: &EventSingle) { + trace_ingest_minmax!("init_minmax {:?}", ev); + self.minmax = Some((ev.clone(), ev.clone())); + } + + fn init_minmax_with_lst(&mut self, ev: &EventSingle, lst: LstRef) { + trace_ingest_minmax!("init_minmax_with_lst {:?} {:?}", ev, lst.0); + let minmax = self.minmax.insert((lst.0.clone(), lst.0.clone())); + Self::apply_min_max(ev, minmax); + } + + fn ingest_with_lst(&mut self, mut evs: ContainerEventsTakeUpTo, lst: LstMut) -> Result<(), Error> { + let selfname = "ingest_with_lst"; + trace_ingest_container!("{selfname} evs len {}", evs.len()); + let b = &mut self.inner_b; + if let Some(minmax) = self.minmax.as_mut() { + b.ingest_with_lst_minmax(evs, lst, minmax) + } else { + if let Some(ev) = evs.pop_front() { + trace_event_next!("EVENT POP FRONT {:?} {selfname:30}", ev); + let beg = b.active_beg; + let end = b.active_end; + if ev.ts < beg { + panic!("should never get here"); + } else if ev.ts >= end { + panic!("should never get here"); + } else { + if ev.ts == beg { + self.init_minmax(&ev); + InnerA::apply_lst_after_event_handled(ev, lst); + let b = &mut self.inner_b; + b.cnt += 1; + Ok(()) + } else { + self.init_minmax_with_lst(&ev, LstRef(lst.0)); + let b = &mut self.inner_b; + if let Some(minmax) = self.minmax.as_mut() { + if ev.ts == beg { + panic!("logic error, is handled before"); + } else { + b.ingest_event_with_lst_gt_range_beg_2(ev, LstMut(lst.0))?; + } + b.cnt += 1; + b.ingest_with_lst_minmax(evs, lst, minmax) + } else { + Err(Error::NoMinMaxAfterInit) + } + } + } + } else { + Ok(()) + } + } + } + + fn reset_01(&mut self, lst: LstRef) { + let selfname = "reset_01"; + let b = &mut self.inner_b; + trace_cycle!( + "{selfname} active_end {:?} filled_until {:?}", + b.active_end, + b.filled_until + ); + let div = b.active_len.ns(); + let old_end = b.active_end; + let ts1 = TsNano::from_ns(b.active_end.ns() / div * div); + assert!(ts1 == old_end); + b.active_beg = ts1; + b.active_end = ts1.add_dt_nano(b.active_len); + b.filled_until = ts1; + b.filled_width = DtNano::from_ns(0); + b.cnt = 0; + self.minmax = Some((lst.0.clone(), lst.0.clone())); + } + + fn push_out_and_reset(&mut self, lst: LstRef, range_final: bool, out: &mut ContainerBins) { + let selfname = "push_out_and_reset"; + // TODO there is not always good enough input to produce a meaningful bin. + // TODO can we always reset, and what exactly does reset mean here? + // TODO what logic can I save here? To output a bin I need to have min, max, lst. + let b = &mut self.inner_b; + let minmax = self.minmax.get_or_insert_with(|| { + trace_cycle!("{selfname} minmax not yet set"); + (lst.0.clone(), lst.0.clone()) + }); + { + let filled_width_fraction = b.filled_width.fraction_of(b.active_len); + let res = b.agg.result_and_reset_for_new_bin(filled_width_fraction); + out.push_back( + b.active_beg, + b.active_end, + b.cnt, + minmax.0.val.clone(), + minmax.1.val.clone(), + res, + lst.0.val.clone(), + range_final, + ); + } + self.reset_01(lst); + } +} + +pub struct BinnedEventsTimeweight +where + EVT: EventValueType, +{ + lst: Option>, + range: BinnedRange, + inner_a: InnerA, + out: ContainerBins, + produce_cnt_zero: bool, +} + +impl fmt::Debug for BinnedEventsTimeweight +where + EVT: EventValueType, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("BinnedEventsTimeweight") + .field("lst", &self.lst) + .field("range", &self.range) + .field("inner_a", &self.inner_a) + .field("out", &self.out) + .finish() + } +} + +impl BinnedEventsTimeweight +where + EVT: EventValueType, +{ + pub fn new(range: BinnedRange) -> Self { + let active_beg = range.nano_beg(); + let active_end = active_beg.add_dt_nano(range.bin_len.to_dt_nano()); + let active_len = active_end.delta(active_beg); + Self { + range, + inner_a: InnerA:: { + inner_b: InnerB { + cnt: 0, + active_beg, + active_end, + active_len, + filled_until: active_beg, + filled_width: DtNano::from_ns(0), + agg: <::AggregatorTimeWeight as AggregatorTimeWeight>::new(), + }, + minmax: None, + }, + lst: None, + out: ContainerBins::new(), + produce_cnt_zero: true, + } + } + + pub fn disable_cnt_zero(self) -> Self { + let mut ret = self; + ret.produce_cnt_zero = false; + ret + } + + fn ingest_event_without_lst(&mut self, ev: EventSingle) -> Result<(), Error> { + let selfname = "ingest_event_without_lst"; + let b = &self.inner_a.inner_b; + if ev.ts >= b.active_end { + panic!("{selfname} should never get here"); + } else { + trace_ingest_init_lst!("ingest_event_without_lst set lst {:?}", ev); + self.lst = Some(ev.clone()); + if ev.ts >= b.active_beg { + trace_ingest_minmax!("ingest_event_without_lst"); + self.inner_a.init_minmax(&ev); + let b = &mut self.inner_a.inner_b; + b.cnt += 1; + b.filled_until = ev.ts; + } + Ok(()) + } + } + + fn ingest_without_lst(&mut self, mut evs: ContainerEventsTakeUpTo) -> Result<(), Error> { + let selfname = "ingest_without_lst"; + if let Some(ev) = evs.pop_front() { + trace_event_next!("EVENT POP FRONT {:?} {:30}", ev, selfname); + if ev.ts >= self.inner_a.inner_b.active_end { + panic!("{selfname} should never get here"); + } else { + self.ingest_event_without_lst(ev)?; + if let Some(lst) = self.lst.as_mut() { + self.inner_a.ingest_with_lst(evs, LstMut(lst)) + } else { + Err(Error::NoLstAfterFirst) + } + } + } else { + Ok(()) + } + } + + // Caller asserts that evs is ordered within the current container + // and with respect to the last container, if any. + fn ingest_ordered(&mut self, evs: ContainerEventsTakeUpTo) -> Result<(), Error> { + if let Some(lst) = self.lst.as_mut() { + self.inner_a.ingest_with_lst(evs, LstMut(lst)) + } else { + if self.inner_a.minmax.is_some() { + Err(Error::NoLstButMinMax) + } else { + self.ingest_without_lst(evs) + } + } + } + + fn cycle_01(&mut self, ts: TsNano) { + let b = &self.inner_a.inner_b; + trace_cycle!("cycle_01 {:?} {:?}", ts, b.active_end); + assert!(b.active_beg < ts); + assert!(b.active_beg <= b.filled_until); + assert!(b.filled_until < ts); + assert!(b.filled_until <= b.active_end); + let div = b.active_len.ns(); + if let Some(lst) = self.lst.as_ref() { + let lst = LstRef(lst); + if self.produce_cnt_zero { + let mut i = 0; + loop { + i += 1; + assert!(i < 100000, "too many iterations"); + let b = &self.inner_a.inner_b; + if ts > b.filled_until { + if ts >= b.active_end { + if b.filled_until < b.active_end { + self.inner_a.inner_b.fill_until(b.active_end, lst.clone()); + } + self.inner_a.push_out_and_reset(lst.clone(), true, &mut self.out); + } else { + self.inner_a.inner_b.fill_until(ts, lst.clone()); + } + } else { + break; + } + } + } else { + let b = &self.inner_a.inner_b; + if ts > b.filled_until { + if ts >= b.active_end { + if b.filled_until < b.active_end { + self.inner_a.inner_b.fill_until(b.active_end, lst.clone()); + } + self.inner_a.push_out_and_reset(lst.clone(), true, &mut self.out); + } else { + // TODO should not hit this case. Prove it, assert it. + self.inner_a.inner_b.fill_until(ts, lst.clone()); + } + } else { + // TODO should never hit this case. Count. + } + + // TODO jump to next bin + // TODO merge with the other reset + // Below uses the same code + let ts1 = TsNano::from_ns(ts.ns() / div * div); + let b = &mut self.inner_a.inner_b; + b.active_beg = ts1; + b.active_end = ts1.add_dt_nano(b.active_len); + b.filled_until = ts1; + b.filled_width = DtNano::from_ns(0); + b.cnt = 0; + b.agg.reset_for_new_bin(); + // assert!(self.inner_a.minmax.is_none()); + trace_cycle!("cycled direct to {:?} {:?}", b.active_beg, b.active_end); + } + } else { + assert!(self.inner_a.minmax.is_none()); + // TODO merge with the other reset + let ts1 = TsNano::from_ns(ts.ns() / div * div); + let b = &mut self.inner_a.inner_b; + b.active_beg = ts1; + b.active_end = ts1.add_dt_nano(b.active_len); + b.filled_until = ts1; + b.filled_width = DtNano::from_ns(0); + b.cnt = 0; + b.agg.reset_for_new_bin(); + trace_cycle!("cycled direct to {:?} {:?}", b.active_beg, b.active_end); + } + } + + fn cycle_02(&mut self) { + let b = &self.inner_a.inner_b; + trace_cycle!("cycle_02 {:?}", b.active_end); + if let Some(lst) = self.lst.as_ref() { + let lst = LstRef(lst); + self.inner_a.push_out_and_reset(lst, false, &mut self.out); + } else { + // there is nothing we can produce + } + } + + pub fn ingest(&mut self, mut evs_all: ContainerEvents) -> Result<(), Error> { + // It is this type's task to find and store the one-before event. + // We then pass it to the aggregation. + // AggregatorTimeWeight needs a function for that. + // What about counting the events that actually fall into the range? + // Maybe that should be done in this type. + // That way we can pass the values and weights to the aggregation, and count the in-range here. + // This type must also "close" the current aggregation by passing the "last" and init the next. + // ALSO: need to keep track of the "lst". Probably best done in this type as well? + + // TODO should rely on external stream adapter for verification to not duplicate things. + evs_all.verify()?; + + loop { + break if let Some(ts) = evs_all.ts_first() { + trace_ingest_event!("EVENT TIMESTAMP FRONT {:?} ingest", ts); + let b = &mut self.inner_a.inner_b; + if ts >= self.range.nano_end() { + return Err(Error::EventAfterRange); + } + if ts >= b.active_end { + assert!(b.filled_until < b.active_end, "{} < {}", b.filled_until, b.active_end); + self.cycle_01(ts); + } + let n1 = evs_all.len(); + let len_before = evs_all.len_before(self.inner_a.inner_b.active_end); + let evs = ContainerEventsTakeUpTo::new(&mut evs_all, len_before); + if let Some(lst) = self.lst.as_ref() { + if ts < lst.ts { + return Err(Error::Unordered); + } else { + self.ingest_ordered(evs)? + } + } else { + self.ingest_ordered(evs)? + }; + trace_ingest_container_2!("ingest after still left len evs {}", evs_all.len()); + let n2 = evs_all.len(); + if n2 != 0 { + if n2 == n1 { + panic!("no progress"); + } + continue; + } + } else { + () + }; + } + Ok(()) + } + + pub fn input_done_range_final(&mut self) -> Result<(), Error> { + trace_cycle!("input_done_range_final"); + self.cycle_01(self.range.nano_end()); + Ok(()) + } + + pub fn input_done_range_open(&mut self) -> Result<(), Error> { + trace_cycle!("input_done_range_open"); + self.cycle_02(); + Ok(()) + } + + pub fn output_len(&self) -> usize { + self.out.len() + } + + pub fn output(&mut self) -> ContainerBins { + mem::replace(&mut self.out, ContainerBins::new()) + } +} diff --git a/src/binning/timeweight/timeweight_events_dyn.rs b/src/binning/timeweight/timeweight_events_dyn.rs new file mode 100644 index 0000000..b5f71bc --- /dev/null +++ b/src/binning/timeweight/timeweight_events_dyn.rs @@ -0,0 +1,276 @@ +use super::timeweight_events::BinnedEventsTimeweight; +use crate::binning::container_events::ContainerEvents; +use crate::binning::container_events::EventValueType; +use crate::channelevents::ChannelEvents; +use daqbuf_err as err; +use err::thiserror; +use err::ThisError; +use futures_util::Stream; +use futures_util::StreamExt; +use items_0::streamitem::LogItem; +use items_0::streamitem::Sitemty; +use items_0::timebin::BinnedEventsTimeweightTrait; +use items_0::timebin::BinningggContainerBinsDyn; +use items_0::timebin::BinningggError; +use items_0::timebin::BinsBoxed; +use items_0::timebin::EventsBoxed; +use netpod::log::*; +use netpod::BinnedRange; +use netpod::TsNano; +use std::ops::ControlFlow; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +macro_rules! trace_input_container { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) } + +macro_rules! trace_emit { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) } + +#[derive(Debug, ThisError)] +#[cstm(name = "BinnedEventsTimeweightDyn")] +pub enum Error { + InnerDynMissing, +} + +#[derive(Debug)] +pub struct BinnedEventsTimeweightDynbox +where + EVT: EventValueType, +{ + binner: BinnedEventsTimeweight, +} + +impl BinnedEventsTimeweightDynbox +where + EVT: EventValueType + 'static, +{ + pub fn new(range: BinnedRange) -> Box { + let ret = Self { + binner: BinnedEventsTimeweight::new(range), + }; + Box::new(ret) + } +} + +impl BinnedEventsTimeweightTrait for BinnedEventsTimeweightDynbox +where + EVT: EventValueType, +{ + fn ingest(&mut self, mut evs: EventsBoxed) -> Result<(), BinningggError> { + // let a = (&evs as &dyn any::Any).downcast_ref::(); + // evs.downcast::(); + // evs.as_anybox().downcast::>(); + match evs.to_anybox().downcast::>() { + Ok(evs) => { + let evs = { + let a = evs; + *a + }; + Ok(self.binner.ingest(evs)?) + } + Err(_) => Err(BinningggError::TypeMismatch { + have: evs.type_name().into(), + expect: std::any::type_name::>().into(), + }), + } + } + + fn input_done_range_final(&mut self) -> Result<(), BinningggError> { + Ok(self.binner.input_done_range_final()?) + } + + fn input_done_range_open(&mut self) -> Result<(), BinningggError> { + Ok(self.binner.input_done_range_open()?) + } + + fn output(&mut self) -> Result, BinningggError> { + if self.binner.output_len() == 0 { + Ok(None) + } else { + let c = self.binner.output(); + Ok(Some(Box::new(c))) + } + } +} + +#[derive(Debug)] +pub struct BinnedEventsTimeweightLazy { + range: BinnedRange, + binned_events: Option>, +} + +impl BinnedEventsTimeweightLazy { + pub fn new(range: BinnedRange) -> Self { + Self { + range, + binned_events: None, + } + } +} + +impl BinnedEventsTimeweightTrait for BinnedEventsTimeweightLazy { + fn ingest(&mut self, evs_all: EventsBoxed) -> Result<(), BinningggError> { + self.binned_events + .get_or_insert_with(|| evs_all.binned_events_timeweight_traitobj(self.range.clone())) + .ingest(evs_all) + } + + fn input_done_range_final(&mut self) -> Result<(), BinningggError> { + self.binned_events + .as_mut() + .map(|x| x.input_done_range_final()) + .unwrap_or_else(|| { + debug!("TODO something to do if we miss the binner here?"); + Ok(()) + }) + } + + fn input_done_range_open(&mut self) -> Result<(), BinningggError> { + self.binned_events + .as_mut() + .map(|x| x.input_done_range_open()) + .unwrap_or(Ok(())) + } + + fn output(&mut self) -> Result, BinningggError> { + self.binned_events.as_mut().map(|x| x.output()).unwrap_or(Ok(None)) + } +} + +enum StreamState { + Reading, + Done, + Invalid, +} + +pub struct BinnedEventsTimeweightStream { + state: StreamState, + inp: Pin> + Send>>, + binned_events: BinnedEventsTimeweightLazy, + range_complete: bool, +} + +impl BinnedEventsTimeweightStream { + pub fn new(range: BinnedRange, inp: Pin> + Send>>) -> Self { + Self { + state: StreamState::Reading, + inp, + binned_events: BinnedEventsTimeweightLazy::new(range), + range_complete: false, + } + } + + fn handle_sitemty( + mut self: Pin<&mut Self>, + item: Sitemty, + _cx: &mut Context, + ) -> ControlFlow::Item>>> { + use items_0::streamitem::RangeCompletableItem::*; + use items_0::streamitem::StreamItem::*; + use ControlFlow::*; + use Poll::*; + match item { + Ok(x) => match x { + DataItem(x) => match x { + Data(x) => match x { + ChannelEvents::Events(evs) => match self.binned_events.ingest(evs.to_container_events()) { + Ok(()) => { + match self.binned_events.output() { + Ok(Some(x)) => { + if x.len() == 0 { + Continue(()) + } else { + Break(Ready(Some(Ok(DataItem(Data(x)))))) + } + } + Ok(None) => Continue(()), + Err(e) => Break(Ready(Some(Err(err::Error::from_string(e))))), + } + // Continue(()) + } + Err(e) => Break(Ready(Some(Err(err::Error::from_string(e))))), + }, + ChannelEvents::Status(_) => { + // TODO use the status + Continue(()) + } + }, + RangeComplete => { + self.range_complete = true; + Continue(()) + } + }, + Log(x) => Break(Ready(Some(Ok(Log(x))))), + Stats(x) => Break(Ready(Some(Ok(Stats(x))))), + }, + Err(e) => { + self.state = StreamState::Done; + Break(Ready(Some(Err(e)))) + } + } + } + + fn handle_eos(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll::Item>> { + trace_input_container!("handle_eos"); + use items_0::streamitem::RangeCompletableItem::*; + use items_0::streamitem::StreamItem::*; + use Poll::*; + self.state = StreamState::Done; + if self.range_complete { + self.binned_events + .input_done_range_final() + .map_err(err::Error::from_string)?; + } else { + self.binned_events + .input_done_range_open() + .map_err(err::Error::from_string)?; + } + match self.binned_events.output().map_err(err::Error::from_string)? { + Some(x) => { + trace_emit!("seeing ready bins {:?}", x); + Ready(Some(Ok(DataItem(Data(x))))) + } + None => { + let item = LogItem::from_node(888, Level::INFO, format!("no bins ready on eos")); + Ready(Some(Ok(Log(item)))) + } + } + } + + fn handle_main(mut self: Pin<&mut Self>, cx: &mut Context) -> ControlFlow::Item>>> { + use ControlFlow::*; + use Poll::*; + let ret = match &self.state { + StreamState::Reading => match self.as_mut().inp.poll_next_unpin(cx) { + Ready(Some(x)) => self.as_mut().handle_sitemty(x, cx), + Ready(None) => Break(self.as_mut().handle_eos(cx)), + Pending => Break(Pending), + }, + StreamState::Done => { + self.state = StreamState::Invalid; + Break(Ready(None)) + } + StreamState::Invalid => { + panic!("StreamState::Invalid") + } + }; + if let Break(Ready(Some(Err(_)))) = ret { + self.state = StreamState::Done; + } + ret + } +} + +impl Stream for BinnedEventsTimeweightStream { + type Item = Sitemty>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + use ControlFlow::*; + loop { + break match self.as_mut().handle_main(cx) { + Break(x) => x, + Continue(()) => continue, + }; + } + } +} diff --git a/src/binning/valuetype.rs b/src/binning/valuetype.rs new file mode 100644 index 0000000..d1b373f --- /dev/null +++ b/src/binning/valuetype.rs @@ -0,0 +1,85 @@ +use super::aggregator::AggregatorTimeWeight; +use super::container_events::Container; +use super::container_events::EventValueType; +use core::fmt; +use items_0::vecpreview::PreviewRange; +use netpod::DtNano; +use netpod::EnumVariant; +use serde::Deserialize; +use serde::Serialize; +use std::collections::VecDeque; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EnumVariantContainer { + ixs: VecDeque, + names: VecDeque, +} + +impl PreviewRange for EnumVariantContainer { + fn preview<'a>(&'a self) -> Box { + let ret = items_0::vecpreview::PreviewCell { + a: self.ixs.front(), + b: self.ixs.back(), + }; + Box::new(ret) + } +} + +impl Container for EnumVariantContainer { + fn new() -> Self { + Self { + ixs: VecDeque::new(), + names: VecDeque::new(), + } + } + + fn push_back(&mut self, val: EnumVariant) { + let (ix, name) = val.into_parts(); + self.ixs.push_back(ix); + self.names.push_back(name); + } + + fn pop_front(&mut self) -> Option { + if let (Some(a), Some(b)) = (self.ixs.pop_front(), self.names.pop_front()) { + Some(EnumVariant::new(a, b)) + } else { + None + } + } +} + +#[derive(Debug)] +pub struct EnumVariantAggregatorTimeWeight { + sum: f32, +} + +impl AggregatorTimeWeight for EnumVariantAggregatorTimeWeight { + fn new() -> Self { + Self { sum: 0. } + } + + fn ingest(&mut self, dt: DtNano, bl: DtNano, val: EnumVariant) { + let f = dt.ns() as f32 / bl.ns() as f32; + eprintln!("INGEST ENUM {} {:?}", f, val); + self.sum += f * val.ix() as f32; + } + + fn reset_for_new_bin(&mut self) { + self.sum = 0.; + } + + fn result_and_reset_for_new_bin( + &mut self, + filled_width_fraction: f32, + ) -> ::AggTimeWeightOutputAvg { + let ret = self.sum.clone(); + self.sum = 0.; + ret / filled_width_fraction + } +} + +impl EventValueType for EnumVariant { + type Container = EnumVariantContainer; + type AggregatorTimeWeight = EnumVariantAggregatorTimeWeight; + type AggTimeWeightOutputAvg = f32; +} diff --git a/src/binsdim0.rs b/src/binsdim0.rs new file mode 100644 index 0000000..4b3d969 --- /dev/null +++ b/src/binsdim0.rs @@ -0,0 +1,905 @@ +use crate::ts_offs_from_abs; +use crate::ts_offs_from_abs_with_anchor; +use crate::IsoDateTime; +use daqbuf_err as err; +use err::Error; +use items_0::collect_s::CollectableDyn; +use items_0::collect_s::CollectableType; +use items_0::collect_s::CollectedDyn; +use items_0::collect_s::CollectorTy; +use items_0::collect_s::ToJsonResult; +use items_0::container::ByteEstimate; +use items_0::overlap::HasTimestampDeque; +use items_0::scalar_ops::AsPrimF32; +use items_0::scalar_ops::ScalarOps; +use items_0::timebin::TimeBinnableTy; +use items_0::timebin::TimeBinnerTy; +use items_0::timebin::TimeBins; +use items_0::vecpreview::VecPreview; +use items_0::AppendAllFrom; +use items_0::AppendEmptyBin; +use items_0::AsAnyMut; +use items_0::AsAnyRef; +use items_0::Empty; +use items_0::HasNonemptyFirstBin; +use items_0::Resettable; +use items_0::TypeName; +use items_0::WithLen; +use netpod::is_false; +use netpod::log::*; +use netpod::range::evrange::SeriesRange; +use netpod::timeunits::SEC; +use netpod::BinnedRange; +use netpod::BinnedRangeEnum; +use netpod::CmpZero; +use netpod::Dim0Kind; +use netpod::TsNano; +use serde::Deserialize; +use serde::Serialize; +use std::any; +use std::any::Any; +use std::collections::VecDeque; +use std::fmt; +use std::mem; +use std::ops::Range; + +#[allow(unused)] +macro_rules! trace_ingest { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } + +// TODO make members private +#[derive(Clone, PartialEq, Serialize, Deserialize)] +pub struct BinsDim0 { + pub ts1s: VecDeque, + pub ts2s: VecDeque, + pub cnts: VecDeque, + pub mins: VecDeque, + pub maxs: VecDeque, + pub avgs: VecDeque, + pub lsts: VecDeque, + pub dim0kind: Option, +} + +impl TypeName for BinsDim0 { + fn type_name(&self) -> String { + any::type_name::().into() + } +} + +impl fmt::Debug for BinsDim0 +where + NTY: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let self_name = any::type_name::(); + if true { + return fmt::Display::fmt(self, fmt); + } + if true { + write!( + fmt, + "{self_name} count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}", + self.ts1s.len(), + self.ts1s.iter().map(|k| k / SEC).collect::>(), + self.ts2s.iter().map(|k| k / SEC).collect::>(), + self.cnts, + self.mins, + self.maxs, + self.avgs, + ) + } else { + write!( + fmt, + "{self_name} count {} edges {:?} .. {:?} counts {:?} .. {:?} avgs {:?} .. {:?}", + self.ts1s.len(), + self.ts1s.front().map(|k| k / SEC), + self.ts2s.back().map(|k| k / SEC), + self.cnts.front(), + self.cnts.back(), + self.avgs.front(), + self.avgs.back(), + ) + } + } +} + +impl fmt::Display for BinsDim0 +where + NTY: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let self_name = any::type_name::(); + write!( + fmt, + "{self_name} {{ len: {:?}, ts1s: {:?}, ts2s {:?}, counts {:?}, mins {:?}, maxs {:?}, avgs {:?}, lsts {:?} }}", + self.len(), + VecPreview::new(&self.ts1s), + VecPreview::new(&self.ts2s), + VecPreview::new(&self.cnts), + VecPreview::new(&self.mins), + VecPreview::new(&self.maxs), + VecPreview::new(&self.avgs), + VecPreview::new(&self.lsts), + ) + } +} + +impl BinsDim0 { + pub fn push(&mut self, ts1: u64, ts2: u64, count: u64, min: NTY, max: NTY, avg: f32, lst: NTY) { + if avg < min.as_prim_f32_b() || avg > max.as_prim_f32_b() { + // TODO rounding issues? + debug!("bad avg"); + } + self.ts1s.push_back(ts1); + self.ts2s.push_back(ts2); + self.cnts.push_back(count); + self.mins.push_back(min); + self.maxs.push_back(max); + self.avgs.push_back(avg); + self.lsts.push_back(lst); + } + + pub fn equal_slack(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false; + } + for (&a, &b) in self.ts1s.iter().zip(other.ts1s.iter()) { + if a != b { + return false; + } + } + for (&a, &b) in self.ts2s.iter().zip(other.ts2s.iter()) { + if a != b { + return false; + } + } + for (a, b) in self.mins.iter().zip(other.mins.iter()) { + if !a.equal_slack(b) { + return false; + } + } + for (a, b) in self.maxs.iter().zip(other.maxs.iter()) { + if !a.equal_slack(b) { + return false; + } + } + for (a, b) in self.avgs.iter().zip(other.avgs.iter()) { + if !a.equal_slack(b) { + return false; + } + } + true + } + + // TODO make this part of a new bins trait, similar like Events trait. + // TODO check for error? + pub fn drain_into(&mut self, dst: &mut Self, range: Range) -> () { + dst.ts1s.extend(self.ts1s.drain(range.clone())); + dst.ts2s.extend(self.ts2s.drain(range.clone())); + dst.cnts.extend(self.cnts.drain(range.clone())); + dst.mins.extend(self.mins.drain(range.clone())); + dst.maxs.extend(self.maxs.drain(range.clone())); + dst.avgs.extend(self.avgs.drain(range.clone())); + dst.lsts.extend(self.lsts.drain(range.clone())); + } +} + +impl AsAnyRef for BinsDim0 +where + NTY: ScalarOps, +{ + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for BinsDim0 +where + STY: ScalarOps, +{ + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +impl Empty for BinsDim0 { + fn empty() -> Self { + Self { + ts1s: VecDeque::new(), + ts2s: VecDeque::new(), + cnts: VecDeque::new(), + mins: VecDeque::new(), + maxs: VecDeque::new(), + avgs: VecDeque::new(), + lsts: VecDeque::new(), + dim0kind: None, + } + } +} + +impl WithLen for BinsDim0 { + fn len(&self) -> usize { + self.ts1s.len() + } +} + +impl ByteEstimate for BinsDim0 { + fn byte_estimate(&self) -> u64 { + // TODO + // Should use a better estimate for waveform and string types, + // or keep some aggregated byte count on push. + let n = self.len(); + if n == 0 { + 0 + } else { + // TODO use the actual size of one/some of the elements. + let i = n * 2 / 3; + let w1 = self.mins[i].byte_estimate(); + let w2 = self.maxs[i].byte_estimate(); + (n as u64 * (8 + 8 + 8 + 4 + w1 + w2)) as u64 + } + } +} + +impl Resettable for BinsDim0 { + fn reset(&mut self) { + self.ts1s.clear(); + self.ts2s.clear(); + self.cnts.clear(); + self.mins.clear(); + self.maxs.clear(); + self.avgs.clear(); + self.lsts.clear(); + } +} + +impl HasNonemptyFirstBin for BinsDim0 { + fn has_nonempty_first_bin(&self) -> bool { + self.cnts.front().map_or(false, |x| *x > 0) + } +} + +impl HasTimestampDeque for BinsDim0 { + fn timestamp_min(&self) -> Option { + self.ts1s.front().map(|x| *x) + } + + fn timestamp_max(&self) -> Option { + self.ts2s.back().map(|x| *x) + } + + fn pulse_min(&self) -> Option { + todo!() + } + + fn pulse_max(&self) -> Option { + todo!() + } +} + +impl AppendEmptyBin for BinsDim0 { + fn append_empty_bin(&mut self, ts1: u64, ts2: u64) { + debug!("AppendEmptyBin::append_empty_bin should not get used"); + self.ts1s.push_back(ts1); + self.ts2s.push_back(ts2); + self.cnts.push_back(0); + self.mins.push_back(NTY::zero_b()); + self.maxs.push_back(NTY::zero_b()); + self.avgs.push_back(0.); + self.lsts.push_back(NTY::zero_b()); + } +} + +impl AppendAllFrom for BinsDim0 { + fn append_all_from(&mut self, src: &mut Self) { + debug!("AppendAllFrom::append_all_from should not get used"); + self.ts1s.extend(src.ts1s.drain(..)); + self.ts2s.extend(src.ts2s.drain(..)); + self.cnts.extend(src.cnts.drain(..)); + self.mins.extend(src.mins.drain(..)); + self.maxs.extend(src.maxs.drain(..)); + self.avgs.extend(src.avgs.drain(..)); + self.lsts.extend(src.lsts.drain(..)); + } +} + +impl TimeBins for BinsDim0 { + fn ts_min(&self) -> Option { + self.ts1s.front().map(Clone::clone) + } + + fn ts_max(&self) -> Option { + self.ts2s.back().map(Clone::clone) + } + + fn ts_min_max(&self) -> Option<(u64, u64)> { + if let (Some(min), Some(max)) = (self.ts1s.front().map(Clone::clone), self.ts2s.back().map(Clone::clone)) { + Some((min, max)) + } else { + None + } + } +} + +#[derive(Debug)] +pub struct BinsDim0TimeBinnerTy +where + STY: ScalarOps, +{ + ts1now: TsNano, + ts2now: TsNano, + binrange: BinnedRange, + do_time_weight: bool, + emit_empty_bins: bool, + range_complete: bool, + out: ::Output, + cnt: u64, + min: STY, + max: STY, + avg: f64, + lst: STY, + filled_up_to: TsNano, + last_seen_avg: f32, +} + +impl BinsDim0TimeBinnerTy +where + STY: ScalarOps, +{ + pub fn type_name() -> &'static str { + any::type_name::() + } + + pub fn new(binrange: BinnedRange, do_time_weight: bool, emit_empty_bins: bool) -> Self { + // let ts1now = TsNano::from_ns(binrange.bin_off * binrange.bin_len.ns()); + // let ts2 = ts1.add_dt_nano(binrange.bin_len.to_dt_nano()); + let ts1now = TsNano::from_ns(binrange.nano_beg().ns()); + let ts2now = ts1now.add_dt_nano(binrange.bin_len.to_dt_nano()); + Self { + ts1now, + ts2now, + binrange, + do_time_weight, + emit_empty_bins, + range_complete: false, + out: ::Output::empty(), + cnt: 0, + min: STY::zero_b(), + max: STY::zero_b(), + avg: 0., + lst: STY::zero_b(), + filled_up_to: ts1now, + last_seen_avg: 0., + } + } + + // used internally for the aggregation + fn reset_agg(&mut self) { + self.cnt = 0; + self.min = STY::zero_b(); + self.max = STY::zero_b(); + self.avg = 0.; + } +} + +impl TimeBinnerTy for BinsDim0TimeBinnerTy +where + STY: ScalarOps, +{ + type Input = BinsDim0; + type Output = BinsDim0; + + fn ingest(&mut self, item: &mut Self::Input) { + trace_ingest!("<{} as TimeBinnerTy>::ingest {:?}", Self::type_name(), item); + let mut count_before = 0; + for ((((((&ts1, &ts2), &cnt), min), max), &avg), lst) in item + .ts1s + .iter() + .zip(&item.ts2s) + .zip(&item.cnts) + .zip(&item.mins) + .zip(&item.maxs) + .zip(&item.avgs) + .zip(&item.lsts) + { + if ts1 < self.ts1now.ns() { + if ts2 > self.ts1now.ns() { + error!("{} bad input grid mismatch", Self::type_name()); + continue; + } + // warn!("encountered bin from time before {} {}", ts1, self.ts1now.ns()); + trace_ingest!("{} input bin before {}", Self::type_name(), TsNano::from_ns(ts1)); + self.min = min.clone(); + self.max = max.clone(); + self.lst = lst.clone(); + count_before += 1; + continue; + } else { + if ts2 > self.ts2now.ns() { + if ts2 - ts1 > self.ts2now.ns() - self.ts1now.ns() { + panic!("incoming bin len too large"); + } else if ts1 < self.ts2now.ns() { + panic!("encountered unaligned input bin"); + } else { + let mut i = 0; + while ts1 >= self.ts2now.ns() { + self.cycle(); + i += 1; + if i > 50000 { + panic!("cycle forward too many iterations"); + } + } + } + } else { + // ok, we're still inside the current bin + } + } + if cnt == 0 { + // ignore input bin, it does not contain any valid information. + } else { + if self.cnt == 0 { + self.cnt = cnt; + self.min = min.clone(); + self.max = max.clone(); + if self.do_time_weight { + let f = (ts2 - ts1) as f64 / (self.ts2now.ns() - self.ts1now.ns()) as f64; + self.avg = avg as f64 * f; + } else { + panic!("TODO non-time-weighted binning to be impl"); + } + } else { + self.cnt += cnt; + if *min < self.min { + self.min = min.clone(); + } + if *max > self.max { + self.max = max.clone(); + } + if self.do_time_weight { + let f = (ts2 - ts1) as f64 / (self.ts2now.ns() - self.ts1now.ns()) as f64; + self.avg += avg as f64 * f; + } else { + panic!("TODO non-time-weighted binning to be impl"); + } + } + self.filled_up_to = TsNano::from_ns(ts2); + self.last_seen_avg = avg; + } + } + if count_before != 0 { + warn!( + "----- seen {} / {} input bins from time before", + count_before, + item.len() + ); + } + } + + fn set_range_complete(&mut self) { + self.range_complete = true; + } + + fn bins_ready_count(&self) -> usize { + self.out.len() + } + + fn bins_ready(&mut self) -> Option { + if self.out.len() != 0 { + let ret = core::mem::replace(&mut self.out, BinsDim0::empty()); + Some(ret) + } else { + None + } + } + + fn push_in_progress(&mut self, push_empty: bool) { + if self.filled_up_to != self.ts2now { + if self.cnt != 0 { + info!("push_in_progress partially filled bin"); + if self.do_time_weight { + let f = (self.ts2now.ns() - self.filled_up_to.ns()) as f64 + / (self.ts2now.ns() - self.ts1now.ns()) as f64; + self.avg += self.lst.as_prim_f32_b() as f64 * f; + self.filled_up_to = self.ts2now; + } else { + panic!("TODO non-time-weighted binning to be impl"); + } + } else { + if self.filled_up_to != self.ts1now { + error!("partially filled bin with cnt 0"); + } + } + } + if self.cnt == 0 && !push_empty { + self.reset_agg(); + } else { + let min = self.min.clone(); + let max = self.max.clone(); + let avg = self.avg as f32; + if avg < min.as_prim_f32_b() || avg > max.as_prim_f32_b() { + // TODO rounding issues? + debug!("bad avg"); + } + self.out.ts1s.push_back(self.ts1now.ns()); + self.out.ts2s.push_back(self.ts2now.ns()); + self.out.cnts.push_back(self.cnt); + self.out.mins.push_back(min); + self.out.maxs.push_back(max); + self.out.avgs.push_back(avg); + self.out.lsts.push_back(self.lst.clone()); + self.reset_agg(); + } + } + + fn cycle(&mut self) { + self.push_in_progress(true); + self.ts1now = self.ts1now.add_dt_nano(self.binrange.bin_len.to_dt_nano()); + self.ts2now = self.ts2now.add_dt_nano(self.binrange.bin_len.to_dt_nano()); + } + + fn empty(&self) -> Option { + Some(::Output::empty()) + } + + fn append_empty_until_end(&mut self) { + let mut i = 0; + while self.ts2now.ns() < self.binrange.full_range().end() { + self.cycle(); + i += 1; + if i > 100000 { + panic!("append_empty_until_end too many iterations"); + } + } + } +} + +impl TimeBinnableTy for BinsDim0 { + type TimeBinner = BinsDim0TimeBinnerTy; + + fn time_binner_new( + &self, + binrange: BinnedRangeEnum, + do_time_weight: bool, + emit_empty_bins: bool, + ) -> Self::TimeBinner { + match binrange { + BinnedRangeEnum::Time(binrange) => BinsDim0TimeBinnerTy::new(binrange, do_time_weight, emit_empty_bins), + BinnedRangeEnum::Pulse(_) => todo!("TimeBinnableTy for BinsDim0 Pulse"), + } + } +} + +// TODO rename to BinsDim0CollectorOutput +#[derive(Debug, Serialize, Deserialize)] +pub struct BinsDim0CollectedResult { + #[serde(rename = "tsAnchor")] + ts_anchor_sec: u64, + #[serde(rename = "ts1Ms")] + ts1_off_ms: VecDeque, + #[serde(rename = "ts2Ms")] + ts2_off_ms: VecDeque, + #[serde(rename = "ts1Ns")] + ts1_off_ns: VecDeque, + #[serde(rename = "ts2Ns")] + ts2_off_ns: VecDeque, + #[serde(rename = "counts")] + counts: VecDeque, + #[serde(rename = "mins")] + mins: VecDeque, + #[serde(rename = "maxs")] + maxs: VecDeque, + #[serde(rename = "avgs")] + avgs: VecDeque, + #[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")] + range_final: bool, + #[serde(rename = "timedOut", default, skip_serializing_if = "is_false")] + timed_out: bool, + #[serde(rename = "missingBins", default, skip_serializing_if = "CmpZero::is_zero")] + missing_bins: u32, + #[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")] + continue_at: Option, + #[serde(rename = "finishedAt", default, skip_serializing_if = "Option::is_none")] + finished_at: Option, +} + +// TODO temporary fix for the enum output +impl BinsDim0CollectedResult +where + STY: ScalarOps, +{ + pub fn boxed_collected_with_enum_fix(&self) -> Box { + if let Some(bins) = self + .as_any_ref() + .downcast_ref::>() + { + debug!("boxed_collected_with_enum_fix"); + let mins = self.mins.iter().map(|x| 6).collect(); + let maxs = self.mins.iter().map(|x| 7).collect(); + let bins = BinsDim0CollectedResult:: { + ts_anchor_sec: self.ts_anchor_sec.clone(), + ts1_off_ms: self.ts1_off_ms.clone(), + ts2_off_ms: self.ts2_off_ms.clone(), + ts1_off_ns: self.ts1_off_ns.clone(), + ts2_off_ns: self.ts2_off_ns.clone(), + counts: self.counts.clone(), + mins, + maxs, + avgs: self.avgs.clone(), + range_final: self.range_final.clone(), + timed_out: self.timed_out.clone(), + missing_bins: self.missing_bins.clone(), + continue_at: self.continue_at.clone(), + finished_at: self.finished_at.clone(), + }; + Box::new(bins) + } else { + let bins = Self { + ts_anchor_sec: self.ts_anchor_sec.clone(), + ts1_off_ms: self.ts1_off_ms.clone(), + ts2_off_ms: self.ts2_off_ms.clone(), + ts1_off_ns: self.ts1_off_ns.clone(), + ts2_off_ns: self.ts2_off_ns.clone(), + counts: self.counts.clone(), + mins: self.mins.clone(), + maxs: self.maxs.clone(), + avgs: self.avgs.clone(), + range_final: self.range_final.clone(), + timed_out: self.timed_out.clone(), + missing_bins: self.missing_bins.clone(), + continue_at: self.continue_at.clone(), + finished_at: self.finished_at.clone(), + }; + Box::new(bins) + } + } +} + +impl AsAnyRef for BinsDim0CollectedResult +where + NTY: 'static, +{ + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for BinsDim0CollectedResult +where + NTY: 'static, +{ + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +impl TypeName for BinsDim0CollectedResult { + fn type_name(&self) -> String { + any::type_name::().into() + } +} + +impl WithLen for BinsDim0CollectedResult { + fn len(&self) -> usize { + self.mins.len() + } +} + +impl CollectedDyn for BinsDim0CollectedResult {} + +impl BinsDim0CollectedResult { + pub fn ts_anchor_sec(&self) -> u64 { + self.ts_anchor_sec + } + + pub fn ts1_off_ms(&self) -> &VecDeque { + &self.ts1_off_ms + } + + pub fn ts2_off_ms(&self) -> &VecDeque { + &self.ts2_off_ms + } + + pub fn counts(&self) -> &VecDeque { + &self.counts + } + + pub fn range_final(&self) -> bool { + self.range_final + } + + pub fn timed_out(&self) -> bool { + self.timed_out + } + + pub fn missing_bins(&self) -> u32 { + self.missing_bins + } + + pub fn continue_at(&self) -> Option { + self.continue_at.clone() + } + + pub fn mins(&self) -> &VecDeque { + &self.mins + } + + pub fn maxs(&self) -> &VecDeque { + &self.maxs + } + + pub fn avgs(&self) -> &VecDeque { + &self.avgs + } +} + +impl ToJsonResult for BinsDim0CollectedResult { + fn to_json_value(&self) -> Result { + serde_json::to_value(self) + } +} + +#[derive(Debug)] +pub struct BinsDim0Collector { + vals: Option>, + timed_out: bool, + range_final: bool, +} + +impl BinsDim0Collector { + pub fn self_name() -> &'static str { + any::type_name::() + } + + pub fn new() -> Self { + Self { + timed_out: false, + range_final: false, + vals: None, + } + } +} + +impl WithLen for BinsDim0Collector { + fn len(&self) -> usize { + self.vals.as_ref().map_or(0, WithLen::len) + } +} + +impl ByteEstimate for BinsDim0Collector { + fn byte_estimate(&self) -> u64 { + self.vals.as_ref().map_or(0, ByteEstimate::byte_estimate) + } +} + +impl CollectorTy for BinsDim0Collector { + type Input = BinsDim0; + type Output = BinsDim0CollectedResult; + + fn ingest(&mut self, src: &mut Self::Input) { + if self.vals.is_none() { + self.vals = Some(Self::Input::empty()); + } + let vals = self.vals.as_mut().unwrap(); + vals.ts1s.append(&mut src.ts1s); + vals.ts2s.append(&mut src.ts2s); + vals.cnts.append(&mut src.cnts); + vals.mins.append(&mut src.mins); + vals.maxs.append(&mut src.maxs); + vals.avgs.append(&mut src.avgs); + vals.lsts.append(&mut src.lsts); + } + + fn set_range_complete(&mut self) { + self.range_final = true; + } + + fn set_timed_out(&mut self) { + self.timed_out = true; + } + + fn set_continue_at_here(&mut self) { + debug!("{}::set_continue_at_here", Self::self_name()); + // TODO for bins, do nothing: either we have all bins or not. + } + + fn result( + &mut self, + _range: Option, + binrange: Option, + ) -> Result { + trace!("trying to make a result from {self:?}"); + let bin_count_exp = if let Some(r) = &binrange { + r.bin_count() as u32 + } else { + debug!("no binrange given"); + 0 + }; + let mut vals = if let Some(x) = self.vals.take() { + x + } else { + return Err(Error::with_msg_no_trace("BinsDim0Collector without vals")); + }; + let bin_count = vals.ts1s.len() as u32; + debug!( + "result make missing bins bin_count_exp {} bin_count {}", + bin_count_exp, bin_count + ); + let (missing_bins, continue_at, finished_at) = if bin_count < bin_count_exp { + match vals.ts2s.back() { + Some(&k) => { + let missing_bins = bin_count_exp - bin_count; + let continue_at = IsoDateTime::from_ns_u64(k); + let u = k + (k - vals.ts1s.back().unwrap()) * missing_bins as u64; + let finished_at = IsoDateTime::from_ns_u64(u); + (missing_bins, Some(continue_at), Some(finished_at)) + } + None => { + warn!("can not determine continue-at parameters"); + (0, None, None) + } + } + } else { + (0, None, None) + }; + if vals.ts1s.as_slices().1.len() != 0 { + warn!("ts1s non-contiguous"); + } + if vals.ts2s.as_slices().1.len() != 0 { + warn!("ts2s non-contiguous"); + } + let ts1s = vals.ts1s.make_contiguous(); + let ts2s = vals.ts2s.make_contiguous(); + let (ts_anch, ts1ms, ts1ns) = ts_offs_from_abs(ts1s); + let (ts2ms, ts2ns) = ts_offs_from_abs_with_anchor(ts_anch, ts2s); + let counts = vals.cnts; + let mins = vals.mins; + let maxs = vals.maxs; + let avgs = vals.avgs; + let ret = BinsDim0CollectedResult:: { + ts_anchor_sec: ts_anch, + ts1_off_ms: ts1ms, + ts1_off_ns: ts1ns, + ts2_off_ms: ts2ms, + ts2_off_ns: ts2ns, + counts, + mins, + maxs, + avgs, + range_final: self.range_final, + timed_out: self.timed_out, + missing_bins, + continue_at, + finished_at, + }; + *self = Self::new(); + Ok(ret) + } +} + +impl CollectableType for BinsDim0 { + type Collector = BinsDim0Collector; + + fn new_collector() -> Self::Collector { + Self::Collector::new() + } +} + +#[derive(Debug)] +pub struct BinsDim0Aggregator { + range: SeriesRange, + cnt: u64, + minmaxlst: Option<(NTY, NTY, NTY)>, + sumc: u64, + sum: f32, +} + +impl BinsDim0Aggregator { + pub fn new(range: SeriesRange, _do_time_weight: bool) -> Self { + Self { + range, + cnt: 0, + minmaxlst: None, + sumc: 0, + sum: 0f32, + } + } +} diff --git a/src/binsxbindim0.rs b/src/binsxbindim0.rs new file mode 100644 index 0000000..326f69b --- /dev/null +++ b/src/binsxbindim0.rs @@ -0,0 +1,523 @@ +use crate::ts_offs_from_abs; +use crate::ts_offs_from_abs_with_anchor; +use crate::IsoDateTime; +use daqbuf_err as err; +use err::Error; +use items_0::collect_s::CollectableDyn; +use items_0::collect_s::CollectableType; +use items_0::collect_s::CollectedDyn; +use items_0::collect_s::CollectorTy; +use items_0::collect_s::ToJsonResult; +use items_0::container::ByteEstimate; +use items_0::scalar_ops::AsPrimF32; +use items_0::scalar_ops::ScalarOps; +use items_0::timebin::TimeBins; +use items_0::AppendEmptyBin; +use items_0::AsAnyMut; +use items_0::AsAnyRef; +use items_0::Empty; +use items_0::Resettable; +use items_0::TypeName; +use items_0::WithLen; +use netpod::is_false; +use netpod::log::*; +use netpod::range::evrange::NanoRange; +use netpod::range::evrange::SeriesRange; +use netpod::timeunits::SEC; +use netpod::BinnedRangeEnum; +use netpod::CmpZero; +use netpod::Dim0Kind; +use serde::Deserialize; +use serde::Serialize; +use std::any; +use std::any::Any; +use std::collections::VecDeque; +use std::fmt; +use std::mem; +use std::ops::Range; + +#[allow(unused)] +macro_rules! trace4 { + ($($arg:tt)*) => (); + ($($arg:tt)*) => (eprintln!($($arg)*)); +} + +#[derive(Clone, PartialEq, Serialize, Deserialize)] +pub struct BinsXbinDim0 { + ts1s: VecDeque, + ts2s: VecDeque, + counts: VecDeque, + mins: VecDeque, + maxs: VecDeque, + avgs: VecDeque, + // TODO could consider more variables: + // ts min/max, pulse min/max, avg of mins, avg of maxs, variances, etc... + dim0kind: Option, +} + +impl TypeName for BinsXbinDim0 { + fn type_name(&self) -> String { + any::type_name::().into() + } +} + +impl fmt::Debug for BinsXbinDim0 +where + NTY: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let self_name = any::type_name::(); + write!( + fmt, + "{self_name} count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}", + self.ts1s.len(), + self.ts1s.iter().map(|k| k / SEC).collect::>(), + self.ts2s.iter().map(|k| k / SEC).collect::>(), + self.counts, + self.mins, + self.maxs, + self.avgs, + ) + } +} + +impl BinsXbinDim0 { + pub fn from_content( + ts1s: VecDeque, + ts2s: VecDeque, + counts: VecDeque, + mins: VecDeque, + maxs: VecDeque, + avgs: VecDeque, + ) -> Self { + Self { + ts1s, + ts2s, + counts, + mins, + maxs, + avgs, + dim0kind: None, + } + } + + pub fn counts(&self) -> &VecDeque { + &self.counts + } + + pub fn push(&mut self, ts1: u64, ts2: u64, count: u64, min: NTY, max: NTY, avg: f32) { + self.ts1s.push_back(ts1); + self.ts2s.push_back(ts2); + self.counts.push_back(count); + self.mins.push_back(min); + self.maxs.push_back(max); + self.avgs.push_back(avg); + } + + pub fn append_zero(&mut self, beg: u64, end: u64) { + self.ts1s.push_back(beg); + self.ts2s.push_back(end); + self.counts.push_back(0); + self.mins.push_back(NTY::zero_b()); + self.maxs.push_back(NTY::zero_b()); + self.avgs.push_back(0.); + } + + pub fn append_all_from(&mut self, src: &mut Self) { + self.ts1s.extend(src.ts1s.drain(..)); + self.ts2s.extend(src.ts2s.drain(..)); + self.counts.extend(src.counts.drain(..)); + self.mins.extend(src.mins.drain(..)); + self.maxs.extend(src.maxs.drain(..)); + self.avgs.extend(src.avgs.drain(..)); + } + + pub fn equal_slack(&self, other: &Self) -> bool { + for (&a, &b) in self.ts1s.iter().zip(other.ts1s.iter()) { + if a != b { + return false; + } + } + for (&a, &b) in self.ts2s.iter().zip(other.ts2s.iter()) { + if a != b { + return false; + } + } + for (a, b) in self.mins.iter().zip(other.mins.iter()) { + if !a.equal_slack(b) { + return false; + } + } + for (a, b) in self.maxs.iter().zip(other.maxs.iter()) { + if !a.equal_slack(b) { + return false; + } + } + for (a, b) in self.avgs.iter().zip(other.avgs.iter()) { + if !a.equal_slack(b) { + return false; + } + } + true + } +} + +impl AsAnyRef for BinsXbinDim0 +where + NTY: ScalarOps, +{ + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for BinsXbinDim0 +where + STY: ScalarOps, +{ + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +impl Empty for BinsXbinDim0 { + fn empty() -> Self { + Self { + ts1s: VecDeque::new(), + ts2s: VecDeque::new(), + counts: VecDeque::new(), + mins: VecDeque::new(), + maxs: VecDeque::new(), + avgs: VecDeque::new(), + dim0kind: None, + } + } +} + +impl WithLen for BinsXbinDim0 { + fn len(&self) -> usize { + self.ts1s.len() + } +} + +impl ByteEstimate for BinsXbinDim0 { + fn byte_estimate(&self) -> u64 { + // TODO + // Should use a better estimate for waveform and string types, + // or keep some aggregated byte count on push. + let n = self.len(); + if n == 0 { + 0 + } else { + // TODO use the actual size of one/some of the elements. + let i = n * 2 / 3; + let w1 = self.mins[i].byte_estimate(); + let w2 = self.maxs[i].byte_estimate(); + (n as u64 * (8 + 8 + 8 + 4 + w1 + w2)) as u64 + } + } +} + +impl Resettable for BinsXbinDim0 { + fn reset(&mut self) { + self.ts1s.clear(); + self.ts2s.clear(); + self.counts.clear(); + self.mins.clear(); + self.maxs.clear(); + self.avgs.clear(); + } +} + +impl AppendEmptyBin for BinsXbinDim0 { + fn append_empty_bin(&mut self, ts1: u64, ts2: u64) { + self.ts1s.push_back(ts1); + self.ts2s.push_back(ts2); + self.counts.push_back(0); + self.mins.push_back(NTY::zero_b()); + self.maxs.push_back(NTY::zero_b()); + self.avgs.push_back(0.); + } +} + +impl TimeBins for BinsXbinDim0 { + fn ts_min(&self) -> Option { + self.ts1s.front().map(Clone::clone) + } + + fn ts_max(&self) -> Option { + self.ts2s.back().map(Clone::clone) + } + + fn ts_min_max(&self) -> Option<(u64, u64)> { + if let (Some(min), Some(max)) = (self.ts1s.front().map(Clone::clone), self.ts2s.back().map(Clone::clone)) { + Some((min, max)) + } else { + None + } + } +} + +// TODO rename to BinsDim0CollectorOutput +#[derive(Debug, Serialize, Deserialize)] +pub struct BinsXbinDim0CollectedResult { + #[serde(rename = "tsAnchor")] + ts_anchor_sec: u64, + #[serde(rename = "ts1Ms")] + ts1_off_ms: VecDeque, + #[serde(rename = "ts2Ms")] + ts2_off_ms: VecDeque, + #[serde(rename = "ts1Ns")] + ts1_off_ns: VecDeque, + #[serde(rename = "ts2Ns")] + ts2_off_ns: VecDeque, + #[serde(rename = "counts")] + counts: VecDeque, + #[serde(rename = "mins")] + mins: VecDeque, + #[serde(rename = "maxs")] + maxs: VecDeque, + #[serde(rename = "avgs")] + avgs: VecDeque, + #[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")] + range_final: bool, + #[serde(rename = "timedOut", default, skip_serializing_if = "is_false")] + timed_out: bool, + #[serde(rename = "missingBins", default, skip_serializing_if = "CmpZero::is_zero")] + missing_bins: u32, + #[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")] + continue_at: Option, + #[serde(rename = "finishedAt", default, skip_serializing_if = "Option::is_none")] + finished_at: Option, +} + +impl AsAnyRef for BinsXbinDim0CollectedResult +where + NTY: ScalarOps, +{ + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for BinsXbinDim0CollectedResult +where + NTY: ScalarOps, +{ + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +impl TypeName for BinsXbinDim0CollectedResult { + fn type_name(&self) -> String { + any::type_name::().into() + } +} + +impl WithLen for BinsXbinDim0CollectedResult { + fn len(&self) -> usize { + self.mins.len() + } +} + +impl CollectedDyn for BinsXbinDim0CollectedResult {} + +impl BinsXbinDim0CollectedResult { + pub fn ts_anchor_sec(&self) -> u64 { + self.ts_anchor_sec + } + + pub fn ts1_off_ms(&self) -> &VecDeque { + &self.ts1_off_ms + } + + pub fn ts2_off_ms(&self) -> &VecDeque { + &self.ts2_off_ms + } + + pub fn counts(&self) -> &VecDeque { + &self.counts + } + + pub fn range_final(&self) -> bool { + self.range_final + } + + pub fn missing_bins(&self) -> u32 { + self.missing_bins + } + + pub fn continue_at(&self) -> Option { + self.continue_at.clone() + } + + pub fn mins(&self) -> &VecDeque { + &self.mins + } + + pub fn maxs(&self) -> &VecDeque { + &self.maxs + } +} + +impl ToJsonResult for BinsXbinDim0CollectedResult { + fn to_json_value(&self) -> Result { + serde_json::to_value(self) + } +} + +#[derive(Debug)] +pub struct BinsXbinDim0Collector { + vals: BinsXbinDim0, + timed_out: bool, + range_final: bool, +} + +impl BinsXbinDim0Collector { + pub fn self_name() -> &'static str { + any::type_name::() + } + + pub fn new() -> Self { + Self { + vals: BinsXbinDim0::empty(), + timed_out: false, + range_final: false, + } + } +} + +impl WithLen for BinsXbinDim0Collector { + fn len(&self) -> usize { + self.vals.len() + } +} + +impl ByteEstimate for BinsXbinDim0Collector { + fn byte_estimate(&self) -> u64 { + self.vals.byte_estimate() + } +} + +impl CollectorTy for BinsXbinDim0Collector { + type Input = BinsXbinDim0; + type Output = BinsXbinDim0CollectedResult; + + fn ingest(&mut self, src: &mut Self::Input) { + trace!("\n\n----------- BinsXbinDim0Collector ingest\n{:?}\n\n", src); + // TODO could be optimized by non-contiguous container. + self.vals.ts1s.append(&mut src.ts1s); + self.vals.ts2s.append(&mut src.ts2s); + self.vals.counts.append(&mut src.counts); + self.vals.mins.append(&mut src.mins); + self.vals.maxs.append(&mut src.maxs); + self.vals.avgs.append(&mut src.avgs); + } + + fn set_range_complete(&mut self) { + self.range_final = true; + } + + fn set_timed_out(&mut self) { + self.timed_out = true; + } + + fn set_continue_at_here(&mut self) { + debug!("{}::set_continue_at_here", Self::self_name()); + // TODO for bins, do nothing: either we have all bins or not. + } + + fn result( + &mut self, + _range: std::option::Option, + binrange: Option, + ) -> Result { + let bin_count_exp = if let Some(r) = &binrange { + r.bin_count() as u32 + } else { + 0 + }; + let bin_count = self.vals.ts1s.len() as u32; + let (missing_bins, continue_at, finished_at) = if bin_count < bin_count_exp { + match self.vals.ts2s.back() { + Some(&k) => { + let missing_bins = bin_count_exp - bin_count; + let continue_at = IsoDateTime::from_ns_u64(k); + let u = k + (k - self.vals.ts1s.back().unwrap()) * missing_bins as u64; + let finished_at = IsoDateTime::from_ns_u64(u); + (missing_bins, Some(continue_at), Some(finished_at)) + } + None => { + warn!("can not determine continue-at parameters"); + (0, None, None) + } + } + } else { + (0, None, None) + }; + if self.vals.ts1s.as_slices().1.len() != 0 { + panic!(); + } + if self.vals.ts2s.as_slices().1.len() != 0 { + panic!(); + } + let tst1 = ts_offs_from_abs(self.vals.ts1s.as_slices().0); + let tst2 = ts_offs_from_abs_with_anchor(tst1.0, self.vals.ts2s.as_slices().0); + let counts = mem::replace(&mut self.vals.counts, VecDeque::new()); + let mins = mem::replace(&mut self.vals.mins, VecDeque::new()); + let maxs = mem::replace(&mut self.vals.maxs, VecDeque::new()); + let avgs = mem::replace(&mut self.vals.avgs, VecDeque::new()); + let ret = BinsXbinDim0CollectedResult:: { + ts_anchor_sec: tst1.0, + ts1_off_ms: tst1.1, + ts1_off_ns: tst1.2, + ts2_off_ms: tst2.0, + ts2_off_ns: tst2.1, + counts, + mins, + maxs, + avgs, + range_final: self.range_final, + timed_out: self.timed_out, + missing_bins, + continue_at, + finished_at, + }; + Ok(ret) + } +} + +impl CollectableType for BinsXbinDim0 { + type Collector = BinsXbinDim0Collector; + + fn new_collector() -> Self::Collector { + Self::Collector::new() + } +} + +#[derive(Debug)] +pub struct BinsXbinDim0Aggregator { + range: SeriesRange, + count: u64, + min: NTY, + max: NTY, + // Carry over to next bin: + avg: f32, + sumc: u64, + sum: f32, +} + +impl BinsXbinDim0Aggregator { + pub fn new(range: SeriesRange, _do_time_weight: bool) -> Self { + Self { + range, + count: 0, + min: NTY::zero_b(), + max: NTY::zero_b(), + avg: 0., + sumc: 0, + sum: 0f32, + } + } +} diff --git a/src/channelevents.rs b/src/channelevents.rs new file mode 100644 index 0000000..3ce2736 --- /dev/null +++ b/src/channelevents.rs @@ -0,0 +1,1139 @@ +use crate::framable::FrameType; +use crate::merger::Mergeable; +use crate::Events; +use daqbuf_err as err; +use items_0::collect_s::CollectableDyn; +use items_0::collect_s::CollectedDyn; +use items_0::collect_s::CollectorDyn; +use items_0::container::ByteEstimate; +use items_0::framable::FrameTypeInnerStatic; +use items_0::isodate::IsoDateTime; +use items_0::streamitem::ITEMS_2_CHANNEL_EVENTS_FRAME_TYPE_ID; +use items_0::timebin::TimeBinnableTy; +use items_0::timebin::TimeBinnerTy; +use items_0::AsAnyMut; +use items_0::AsAnyRef; +use items_0::Empty; +use items_0::EventsNonObj; +use items_0::Extendable; +use items_0::MergeError; +use items_0::TypeName; +use items_0::WithLen; +use netpod::log::*; +use netpod::range::evrange::SeriesRange; +use netpod::BinnedRangeEnum; +use serde::Deserialize; +use serde::Serialize; +use std::any; +use std::any::Any; +use std::collections::VecDeque; +use std::fmt; +use std::time::Duration; +use std::time::SystemTime; + +#[allow(unused)] +macro_rules! trace_ingest { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } + +// TODO maybe rename to ChannelStatus? +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub enum ConnStatus { + Connect, + Disconnect, +} + +impl ConnStatus { + pub fn from_ca_ingest_status_kind(k: u32) -> Self { + match k { + 1 => Self::Connect, + _ => Self::Disconnect, + } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct ConnStatusEvent { + pub ts: u64, + #[serde(with = "humantime_serde")] + //pub datetime: chrono::DateTime, + pub datetime: SystemTime, + pub status: ConnStatus, +} + +impl ConnStatusEvent { + pub fn new(ts: u64, status: ConnStatus) -> Self { + let datetime = SystemTime::UNIX_EPOCH + Duration::from_millis(ts / 1000000); + Self { ts, datetime, status } + } +} + +impl ByteEstimate for ConnStatusEvent { + fn byte_estimate(&self) -> u64 { + // TODO magic number, but maybe good enough + 32 + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub enum ChannelStatus { + Connect, + Disconnect, +} + +impl ChannelStatus { + pub fn from_ca_ingest_status_kind(k: u32) -> Self { + match k { + 1 => Self::Connect, + _ => Self::Disconnect, + } + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ChannelStatusEvents { + pub tss: VecDeque, + pub datetimes: VecDeque, + pub statuses: VecDeque, +} + +impl Empty for ChannelStatusEvents { + fn empty() -> Self { + Self { + tss: VecDeque::new(), + datetimes: VecDeque::new(), + statuses: VecDeque::new(), + } + } +} + +impl WithLen for ChannelStatusEvents { + fn len(&self) -> usize { + self.tss.len() + } +} + +impl Extendable for ChannelStatusEvents { + fn extend_from(&mut self, src: &mut Self) { + use core::mem::replace; + let v = replace(&mut src.tss, VecDeque::new()); + self.tss.extend(v.into_iter()); + let v = replace(&mut src.datetimes, VecDeque::new()); + self.datetimes.extend(v.into_iter()); + let v = replace(&mut src.statuses, VecDeque::new()); + self.statuses.extend(v.into_iter()); + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct ChannelStatusEvent { + pub ts: u64, + #[serde(with = "humantime_serde")] + //pub datetime: chrono::DateTime, + pub datetime: SystemTime, + pub status: ChannelStatus, +} + +impl ChannelStatusEvent { + pub fn new(ts: u64, status: ChannelStatus) -> Self { + let datetime = SystemTime::UNIX_EPOCH + Duration::from_millis(ts / 1000000); + Self { ts, datetime, status } + } +} + +impl ByteEstimate for ChannelStatusEvent { + fn byte_estimate(&self) -> u64 { + // TODO magic number, but maybe good enough + 32 + } +} + +/// Events on a channel consist not only of e.g. timestamped values, but can be also +/// connection status changes. +#[derive(Debug)] +pub enum ChannelEvents { + Events(Box), + Status(Option), +} + +impl ChannelEvents { + pub fn is_events(&self) -> bool { + match self { + ChannelEvents::Events(_) => true, + ChannelEvents::Status(_) => false, + } + } +} + +impl TypeName for ChannelEvents { + fn type_name(&self) -> String { + any::type_name::().into() + } +} + +impl FrameTypeInnerStatic for ChannelEvents { + const FRAME_TYPE_ID: u32 = ITEMS_2_CHANNEL_EVENTS_FRAME_TYPE_ID; +} + +impl FrameType for ChannelEvents { + fn frame_type_id(&self) -> u32 { + // TODO SubFrId missing, but get rid of the frame type concept anyhow. + ::FRAME_TYPE_ID + } +} + +impl Clone for ChannelEvents { + fn clone(&self) -> Self { + match self { + Self::Events(arg0) => Self::Events(arg0.clone_dyn()), + Self::Status(arg0) => Self::Status(arg0.clone()), + } + } +} + +impl AsAnyRef for ChannelEvents { + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for ChannelEvents { + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +mod serde_channel_events { + use super::ChannelEvents; + use super::Events; + use crate::channelevents::ConnStatusEvent; + use crate::eventsdim0::EventsDim0; + use crate::eventsdim1::EventsDim1; + use crate::eventsxbindim0::EventsXbinDim0; + use items_0::subfr::SubFrId; + use netpod::log::*; + use netpod::EnumVariant; + use serde::de; + use serde::de::EnumAccess; + use serde::de::VariantAccess; + use serde::de::Visitor; + use serde::ser::SerializeSeq; + use serde::Deserialize; + use serde::Deserializer; + use serde::Serialize; + use serde::Serializer; + use std::fmt; + + struct EvRef<'a>(&'a dyn Events); + + struct EvBox(Box); + + impl<'a> Serialize for EvRef<'a> { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut ser = serializer.serialize_seq(Some(3))?; + ser.serialize_element(self.0.serde_id())?; + ser.serialize_element(&self.0.nty_id())?; + ser.serialize_element(self.0)?; + ser.end() + } + } + + struct EvBoxVis; + + impl EvBoxVis { + fn name() -> &'static str { + "Events" + } + } + + impl<'de> Visitor<'de> for EvBoxVis { + type Value = EvBox; + + fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "{}", Self::name()) + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: de::SeqAccess<'de>, + { + let cty: &str = seq.next_element()?.ok_or_else(|| de::Error::missing_field("[0] cty"))?; + let nty: u32 = seq.next_element()?.ok_or_else(|| de::Error::missing_field("[1] nty"))?; + if cty == EventsDim0::::serde_id() { + match nty { + u8::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + u16::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + u32::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + u64::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + i8::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + i16::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + i32::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + i64::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + f32::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + f64::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + bool::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + String::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + EnumVariant::SUB => { + let obj: EventsDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + _ => { + error!("TODO serde cty {cty} nty {nty}"); + Err(de::Error::custom(&format!("unknown nty {nty}"))) + } + } + } else if cty == EventsDim1::::serde_id() { + match nty { + u8::SUB => { + let obj: EventsDim1 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + u16::SUB => { + let obj: EventsDim1 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + u32::SUB => { + let obj: EventsDim1 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + u64::SUB => { + let obj: EventsDim1 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + i8::SUB => { + let obj: EventsDim1 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + i16::SUB => { + let obj: EventsDim1 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + i32::SUB => { + let obj: EventsDim1 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + i64::SUB => { + let obj: EventsDim1 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + f32::SUB => { + let obj: EventsDim1 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + f64::SUB => { + let obj: EventsDim1 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + bool::SUB => { + let obj: EventsDim1 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + String::SUB => { + let obj: EventsDim1 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + _ => { + error!("TODO serde cty {cty} nty {nty}"); + Err(de::Error::custom(&format!("unknown nty {nty}"))) + } + } + } else if cty == EventsXbinDim0::::serde_id() { + match nty { + f32::SUB => { + let obj: EventsXbinDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + f64::SUB => { + let obj: EventsXbinDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + bool::SUB => { + let obj: EventsXbinDim0 = + seq.next_element()?.ok_or_else(|| de::Error::missing_field("[2] obj"))?; + Ok(EvBox(Box::new(obj))) + } + _ => { + error!("TODO serde cty {cty} nty {nty}"); + Err(de::Error::custom(&format!("unknown nty {nty}"))) + } + } + } else { + error!("TODO serde cty {cty} nty {nty}"); + Err(de::Error::custom(&format!("unknown cty {cty}"))) + } + } + } + + impl<'de> Deserialize<'de> for EvBox { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_seq(EvBoxVis) + } + } + + impl Serialize for ChannelEvents { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let name = "ChannelEvents"; + let vars = ChannelEventsVis::allowed_variants(); + match self { + ChannelEvents::Events(obj) => { + serializer.serialize_newtype_variant(name, 0, vars[0], &EvRef(obj.as_ref())) + } + ChannelEvents::Status(val) => serializer.serialize_newtype_variant(name, 1, vars[1], val), + } + } + } + + enum VarId { + Events, + Status, + } + + struct VarIdVis; + + impl<'de> Visitor<'de> for VarIdVis { + type Value = VarId; + + fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "variant identifier") + } + + fn visit_u64(self, val: u64) -> Result + where + E: de::Error, + { + match val { + 0 => Ok(VarId::Events), + 1 => Ok(VarId::Status), + _ => Err(de::Error::invalid_value( + de::Unexpected::Unsigned(val), + &"variant index 0..2", + )), + } + } + + fn visit_str(self, val: &str) -> Result + where + E: de::Error, + { + let vars = ChannelEventsVis::allowed_variants(); + if val == vars[0] { + Ok(VarId::Events) + } else if val == vars[1] { + Ok(VarId::Status) + } else { + Err(de::Error::unknown_variant(val, ChannelEventsVis::allowed_variants())) + } + } + } + + impl<'de> Deserialize<'de> for VarId { + fn deserialize(de: D) -> Result + where + D: Deserializer<'de>, + { + de.deserialize_identifier(VarIdVis) + } + } + + pub struct ChannelEventsVis; + + impl ChannelEventsVis { + fn name() -> &'static str { + "ChannelEvents" + } + + fn allowed_variants() -> &'static [&'static str] { + &["Events", "Status"] + } + } + + impl<'de> Visitor<'de> for ChannelEventsVis { + type Value = ChannelEvents; + + fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "{}", Self::name()) + } + + fn visit_enum(self, data: A) -> Result + where + A: EnumAccess<'de>, + { + let (id, var) = data.variant()?; + match id { + VarId::Events => { + let x: EvBox = var.newtype_variant()?; + Ok(Self::Value::Events(x.0)) + } + VarId::Status => { + let x: Option = var.newtype_variant()?; + Ok(Self::Value::Status(x)) + } + } + } + } + + impl<'de> Deserialize<'de> for ChannelEvents { + fn deserialize(de: D) -> Result + where + D: Deserializer<'de>, + { + de.deserialize_enum( + ChannelEventsVis::name(), + ChannelEventsVis::allowed_variants(), + ChannelEventsVis, + ) + } + } +} + +#[cfg(test)] +mod test_channel_events_serde { + use super::ChannelEvents; + use crate::channelevents::ConnStatusEvent; + use crate::eventsdim0::EventsDim0; + use bincode::config::FixintEncoding; + use bincode::config::LittleEndian; + use bincode::config::RejectTrailing; + use bincode::config::WithOtherEndian; + use bincode::config::WithOtherIntEncoding; + use bincode::config::WithOtherTrailing; + use bincode::DefaultOptions; + use items_0::bincode; + use items_0::Appendable; + use items_0::Empty; + use serde::Deserialize; + use serde::Serialize; + use std::time::SystemTime; + + #[test] + fn channel_events() { + let mut evs = EventsDim0::empty(); + evs.push(8, 2, 3.0f32); + evs.push(12, 3, 3.2f32); + let item = ChannelEvents::Events(Box::new(evs)); + let s = serde_json::to_string_pretty(&item).unwrap(); + eprintln!("{s}"); + let w: ChannelEvents = serde_json::from_str(&s).unwrap(); + eprintln!("{w:?}"); + } + + type OptsTy = WithOtherTrailing< + WithOtherIntEncoding, FixintEncoding>, + RejectTrailing, + >; + + fn bincode_opts() -> OptsTy { + use bincode::Options; + let opts = bincode::DefaultOptions::new() + .with_little_endian() + .with_fixint_encoding() + .reject_trailing_bytes(); + opts + } + + #[test] + fn channel_events_bincode() { + let mut evs = EventsDim0::empty(); + evs.push(8, 2, 3.0f32); + evs.push(12, 3, 3.2f32); + let item = ChannelEvents::Events(Box::new(evs)); + let opts = bincode_opts(); + let mut out = Vec::new(); + let mut ser = bincode::Serializer::new(&mut out, opts); + item.serialize(&mut ser).unwrap(); + eprintln!("serialized into {} bytes", out.len()); + let mut de = bincode::Deserializer::from_slice(&out, opts); + let item = ::deserialize(&mut de).unwrap(); + let item = if let ChannelEvents::Events(x) = item { + x + } else { + panic!() + }; + let item: &EventsDim0 = item.as_any_ref().downcast_ref().unwrap(); + assert_eq!(item.tss().len(), 2); + assert_eq!(item.tss()[1], 12); + } + + #[test] + fn channel_status_bincode() { + let mut evs = EventsDim0::empty(); + evs.push(8, 2, 3.0f32); + evs.push(12, 3, 3.2f32); + let status = ConnStatusEvent { + ts: 567, + datetime: SystemTime::UNIX_EPOCH, + status: crate::channelevents::ConnStatus::Connect, + }; + let item = ChannelEvents::Status(Some(status)); + let opts = bincode_opts(); + let mut out = Vec::new(); + let mut ser = bincode::Serializer::new(&mut out, opts); + item.serialize(&mut ser).unwrap(); + eprintln!("serialized into {} bytes", out.len()); + let mut de = bincode::Deserializer::from_slice(&out, opts); + let item = ::deserialize(&mut de).unwrap(); + let item = if let ChannelEvents::Status(x) = item { + x + } else { + panic!() + }; + if let Some(item) = item { + assert_eq!(item.ts, 567); + } else { + panic!() + } + } +} + +impl PartialEq for ChannelEvents { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::Events(l0), Self::Events(r0)) => l0 == r0, + (Self::Status(l0), Self::Status(r0)) => l0 == r0, + _ => core::mem::discriminant(self) == core::mem::discriminant(other), + } + } +} + +impl WithLen for ChannelEvents { + fn len(&self) -> usize { + match self { + ChannelEvents::Events(k) => k.as_ref().len(), + ChannelEvents::Status(k) => match k { + Some(_) => 1, + None => 0, + }, + } + } +} + +impl ByteEstimate for ChannelEvents { + fn byte_estimate(&self) -> u64 { + match self { + ChannelEvents::Events(k) => k.byte_estimate(), + ChannelEvents::Status(k) => match k { + Some(k) => k.byte_estimate(), + None => 0, + }, + } + } +} + +impl Mergeable for ChannelEvents { + fn ts_min(&self) -> Option { + match self { + ChannelEvents::Events(k) => Mergeable::ts_min(k), + ChannelEvents::Status(k) => match k { + Some(k) => Some(k.ts), + None => None, + }, + } + } + + fn ts_max(&self) -> Option { + match self { + ChannelEvents::Events(k) => Mergeable::ts_max(k), + ChannelEvents::Status(k) => match k { + Some(k) => Some(k.ts), + None => None, + }, + } + } + + fn new_empty(&self) -> Self { + match self { + ChannelEvents::Events(k) => ChannelEvents::Events(k.new_empty()), + ChannelEvents::Status(_) => ChannelEvents::Status(None), + } + } + + fn clear(&mut self) { + match self { + ChannelEvents::Events(x) => { + Mergeable::clear(x); + } + ChannelEvents::Status(x) => { + *x = None; + } + } + } + + fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError> { + match self { + ChannelEvents::Events(k) => match dst { + ChannelEvents::Events(j) => k.drain_into(j, range), + ChannelEvents::Status(_) => Err(MergeError::NotCompatible), + }, + ChannelEvents::Status(k) => match dst { + ChannelEvents::Events(_) => Err(MergeError::NotCompatible), + ChannelEvents::Status(j) => match j { + Some(_) => { + trace!("drain_into merger::MergeError::Full"); + Err(MergeError::Full) + } + None => { + if range.0 > 0 { + trace!("weird range {range:?}"); + } + if range.1 > 1 { + trace!("weird range {range:?}"); + } + if range.0 == range.1 { + trace!("try to add empty range to status container {range:?}"); + } + *j = k.take(); + Ok(()) + } + }, + }, + } + } + + fn find_lowest_index_gt(&self, ts: u64) -> Option { + match self { + ChannelEvents::Events(k) => k.find_lowest_index_gt(ts), + ChannelEvents::Status(k) => { + if let Some(k) = k { + if k.ts > ts { + Some(0) + } else { + None + } + } else { + None + } + } + } + } + + fn find_lowest_index_ge(&self, ts: u64) -> Option { + match self { + ChannelEvents::Events(k) => k.find_lowest_index_ge(ts), + ChannelEvents::Status(k) => { + if let Some(k) = k { + if k.ts >= ts { + Some(0) + } else { + None + } + } else { + None + } + } + } + } + + fn find_highest_index_lt(&self, ts: u64) -> Option { + match self { + ChannelEvents::Events(k) => k.find_highest_index_lt(ts), + ChannelEvents::Status(k) => { + if let Some(k) = k { + if k.ts < ts { + Some(0) + } else { + None + } + } else { + None + } + } + } + } + + fn tss(&self) -> Vec { + Events::tss(self) + .iter() + .map(|x| netpod::TsMs::from_ns_u64(*x)) + .collect() + } +} + +impl EventsNonObj for ChannelEvents { + fn into_tss_pulses(self: Box) -> (VecDeque, VecDeque) { + match *self { + ChannelEvents::Events(k) => k.into_tss_pulses(), + ChannelEvents::Status(_) => (VecDeque::new(), VecDeque::new()), + } + } +} + +impl Events for ChannelEvents { + fn verify(&self) -> bool { + match self { + ChannelEvents::Events(x) => Events::verify(x), + ChannelEvents::Status(_) => panic!(), + } + } + + fn output_info(&self) -> String { + todo!() + } + + fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn { + todo!() + } + + fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn { + todo!() + } + + fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn { + todo!() + } + + fn ts_min(&self) -> Option { + todo!() + } + + fn ts_max(&self) -> Option { + todo!() + } + + fn take_new_events_until_ts(&mut self, _ts_end: u64) -> Box { + todo!() + } + + fn new_empty_evs(&self) -> Box { + match self { + ChannelEvents::Events(x) => Events::new_empty_evs(x), + ChannelEvents::Status(_) => panic!(), + } + } + + fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), MergeError> { + let dst2 = if let Some(x) = dst.as_any_mut().downcast_mut::() { + // debug!("unwrapped dst ChannelEvents as well"); + x + } else { + panic!("dst is not ChannelEvents"); + }; + match self { + ChannelEvents::Events(k) => match dst2 { + ChannelEvents::Events(j) => Events::drain_into_evs(k, j, range), + ChannelEvents::Status(_) => panic!("dst is not events"), + }, + ChannelEvents::Status(_) => panic!("self is not events"), + } + } + + fn find_lowest_index_gt_evs(&self, _ts: u64) -> Option { + todo!() + } + + fn find_lowest_index_ge_evs(&self, _ts: u64) -> Option { + todo!() + } + + fn find_highest_index_lt_evs(&self, _ts: u64) -> Option { + todo!() + } + + fn clone_dyn(&self) -> Box { + todo!() + } + + fn partial_eq_dyn(&self, _other: &dyn Events) -> bool { + todo!() + } + + fn serde_id(&self) -> &'static str { + todo!() + } + + fn nty_id(&self) -> u32 { + todo!() + } + + fn tss(&self) -> &VecDeque { + match self { + ChannelEvents::Events(x) => Events::tss(x), + ChannelEvents::Status(_) => panic!(), + } + } + + fn pulses(&self) -> &VecDeque { + todo!() + } + + fn frame_type_id(&self) -> u32 { + ::FRAME_TYPE_ID + } + + fn to_min_max_avg(&mut self) -> Box { + match self { + ChannelEvents::Events(item) => Box::new(ChannelEvents::Events(Events::to_min_max_avg(item))), + ChannelEvents::Status(item) => Box::new(ChannelEvents::Status(item.take())), + } + } + + fn to_json_string(&self) -> String { + match self { + ChannelEvents::Events(item) => item.to_json_string(), + ChannelEvents::Status(_item) => { + error!("TODO convert status to json"); + String::new() + } + } + } + + fn to_json_vec_u8(&self) -> Vec { + match self { + ChannelEvents::Events(item) => item.to_json_vec_u8(), + ChannelEvents::Status(_item) => { + error!("TODO convert status to json"); + Vec::new() + } + } + } + + fn to_cbor_vec_u8(&self) -> Vec { + match self { + ChannelEvents::Events(item) => item.to_cbor_vec_u8(), + ChannelEvents::Status(_item) => { + error!("TODO convert status to cbor"); + Vec::new() + } + } + } + + fn clear(&mut self) { + match self { + ChannelEvents::Events(x) => Events::clear(x.as_mut()), + ChannelEvents::Status(x) => { + *x = None; + } + } + } + + fn to_dim0_f32_for_binning(&self) -> Box { + use ChannelEvents::*; + match self { + Events(x) => x.to_dim0_f32_for_binning(), + Status(_x) => panic!("ChannelEvents::to_dim0_f32_for_binning"), + } + } + + fn to_container_events(&self) -> Box { + panic!("should not get used") + } +} + +impl CollectableDyn for ChannelEvents { + fn new_collector(&self) -> Box { + Box::new(ChannelEventsCollector::new()) + } +} + +// TODO remove type +#[derive(Debug, Serialize, Deserialize)] +pub struct ChannelEventsCollectorOutput {} + +impl AsAnyRef for ChannelEventsCollectorOutput { + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for ChannelEventsCollectorOutput { + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +impl TypeName for ChannelEventsCollectorOutput { + fn type_name(&self) -> String { + // TODO should not be here + any::type_name::().into() + } +} + +impl WithLen for ChannelEventsCollectorOutput { + fn len(&self) -> usize { + todo!() + } +} + +impl items_0::collect_s::ToJsonResult for ChannelEventsCollectorOutput { + fn to_json_value(&self) -> Result { + serde_json::to_value(self) + } +} + +impl CollectedDyn for ChannelEventsCollectorOutput {} + +#[derive(Debug)] +pub struct ChannelEventsCollector { + coll: Option>, + range_complete: bool, + timed_out: bool, + needs_continue_at: bool, + tmp_warned_status: bool, + tmp_error_unknown_type: bool, +} + +impl ChannelEventsCollector { + pub fn self_name() -> &'static str { + any::type_name::() + } + + pub fn new() -> Self { + Self { + coll: None, + range_complete: false, + timed_out: false, + needs_continue_at: false, + tmp_warned_status: false, + tmp_error_unknown_type: false, + } + } +} + +impl WithLen for ChannelEventsCollector { + fn len(&self) -> usize { + self.coll.as_ref().map_or(0, |x| x.len()) + } +} + +impl ByteEstimate for ChannelEventsCollector { + fn byte_estimate(&self) -> u64 { + self.coll.as_ref().map_or(0, |x| x.byte_estimate()) + } +} + +impl CollectorDyn for ChannelEventsCollector { + fn ingest(&mut self, item: &mut dyn CollectableDyn) { + if let Some(item) = item.as_any_mut().downcast_mut::() { + match item { + ChannelEvents::Events(item) => { + let coll = self + .coll + .get_or_insert_with(|| item.as_ref().as_collectable_with_default_ref().new_collector()); + coll.ingest(item.as_collectable_with_default_mut()); + } + ChannelEvents::Status(_) => { + // TODO decide on output format to collect also the connection status events + if !self.tmp_warned_status { + self.tmp_warned_status = true; + warn!("TODO ChannelEventsCollector ChannelEvents::Status"); + } + } + } + } else { + if !self.tmp_error_unknown_type { + self.tmp_error_unknown_type = true; + error!("ChannelEventsCollector::ingest unexpected item {:?}", item); + } + } + } + + fn set_range_complete(&mut self) { + self.range_complete = true; + } + + fn set_timed_out(&mut self) { + self.timed_out = true; + } + + fn set_continue_at_here(&mut self) { + self.needs_continue_at = true; + } + + fn result( + &mut self, + range: Option, + binrange: Option, + ) -> Result, err::Error> { + match self.coll.as_mut() { + Some(coll) => { + if self.needs_continue_at { + debug!("ChannelEventsCollector set_continue_at_here"); + coll.set_continue_at_here(); + } + if self.range_complete { + coll.set_range_complete(); + } + if self.timed_out { + debug!("ChannelEventsCollector set_timed_out"); + coll.set_timed_out(); + } + let res = coll.result(range, binrange)?; + Ok(res) + } + None => { + let e = err::Error::with_public_msg_no_trace("nothing collected [caa8d2565]"); + error!("{e}"); + Err(e) + } + } + } +} diff --git a/src/empty.rs b/src/empty.rs new file mode 100644 index 0000000..dc2e6b2 --- /dev/null +++ b/src/empty.rs @@ -0,0 +1,58 @@ +use crate::eventsdim0::EventsDim0; +use crate::eventsdim1::EventsDim1; +use crate::Error; +use daqbuf_err as err; +use items_0::Empty; +use items_0::Events; +use netpod::log::*; +use netpod::EnumVariant; +use netpod::ScalarType; +use netpod::Shape; + +pub fn empty_events_dyn_ev(scalar_type: &ScalarType, shape: &Shape) -> Result, Error> { + let ret: Box = match shape { + Shape::Scalar => { + use ScalarType::*; + type K = EventsDim0; + match scalar_type { + U8 => Box::new(K::::empty()), + U16 => Box::new(K::::empty()), + U32 => Box::new(K::::empty()), + U64 => Box::new(K::::empty()), + I8 => Box::new(K::::empty()), + I16 => Box::new(K::::empty()), + I32 => Box::new(K::::empty()), + I64 => Box::new(K::::empty()), + F32 => Box::new(K::::empty()), + F64 => Box::new(K::::empty()), + BOOL => Box::new(K::::empty()), + STRING => Box::new(K::::empty()), + Enum => Box::new(K::::empty()), + } + } + Shape::Wave(..) => { + use ScalarType::*; + type K = EventsDim1; + match scalar_type { + U8 => Box::new(K::::empty()), + U16 => Box::new(K::::empty()), + U32 => Box::new(K::::empty()), + U64 => Box::new(K::::empty()), + I8 => Box::new(K::::empty()), + I16 => Box::new(K::::empty()), + I32 => Box::new(K::::empty()), + I64 => Box::new(K::::empty()), + F32 => Box::new(K::::empty()), + F64 => Box::new(K::::empty()), + BOOL => Box::new(K::::empty()), + STRING => Box::new(K::::empty()), + Enum => Box::new(K::::empty()), + } + } + Shape::Image(..) => { + error!("TODO empty_events_dyn_ev {scalar_type:?} {shape:?}"); + err::todoval() + } + }; + Ok(ret) +} diff --git a/src/eventfull.rs b/src/eventfull.rs new file mode 100644 index 0000000..b686933 --- /dev/null +++ b/src/eventfull.rs @@ -0,0 +1,431 @@ +use crate::framable::FrameType; +use crate::merger::Mergeable; +use bytes::BytesMut; +use daqbuf_err as err; +use err::thiserror; +use err::ThisError; +use items_0::container::ByteEstimate; +use items_0::framable::FrameTypeInnerStatic; +use items_0::streamitem::EVENT_FULL_FRAME_TYPE_ID; +use items_0::Empty; +use items_0::MergeError; +use items_0::WithLen; +#[allow(unused)] +use netpod::log::*; +use netpod::ScalarType; +use netpod::Shape; +use parse::channelconfig::CompressionMethod; +use serde::Deserialize; +use serde::Deserializer; +use serde::Serialize; +use serde::Serializer; +use std::borrow::Cow; +use std::collections::VecDeque; +use std::time::Instant; + +#[allow(unused)] +macro_rules! trace2 { + ($($arg:tt)*) => {}; + ($($arg:tt)*) => { trace!($($arg)*) }; +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct EventFull { + pub tss: VecDeque, + pub pulses: VecDeque, + pub blobs: VecDeque>, + //#[serde(with = "decomps_serde")] + pub scalar_types: VecDeque, + pub be: VecDeque, + pub shapes: VecDeque, + pub comps: VecDeque>, + pub entry_payload_max: u64, +} + +#[allow(unused)] +mod decomps_serde { + use super::*; + + pub fn serialize(t: &VecDeque>, s: S) -> Result + where + S: Serializer, + { + let a: Vec<_> = t + .iter() + .map(|k| match k { + None => None, + Some(j) => Some(j[..].to_vec()), + }) + .collect(); + Serialize::serialize(&a, s) + } + + pub fn deserialize<'de, D>(d: D) -> Result>, D::Error> + where + D: Deserializer<'de>, + { + let a: Vec>> = Deserialize::deserialize(d)?; + let a = a + .iter() + .map(|k| match k { + None => None, + Some(j) => { + let mut a = BytesMut::new(); + a.extend_from_slice(&j); + Some(a) + } + }) + .collect(); + Ok(a) + } +} + +impl EventFull { + pub fn push( + &mut self, + ts: u64, + pulse: u64, + blob: Vec, + scalar_type: ScalarType, + be: bool, + shape: Shape, + comp: Option, + ) { + let m1 = blob.len(); + self.entry_payload_max = self.entry_payload_max.max(m1 as u64); + self.tss.push_back(ts); + self.pulses.push_back(pulse); + self.blobs.push_back(blob); + self.scalar_types.push_back(scalar_type); + self.be.push_back(be); + self.shapes.push_back(shape); + self.comps.push_back(comp); + } + + // TODO possible to get rid of this? + pub fn truncate_ts(&mut self, end: u64) { + let mut nkeep = usize::MAX; + for (i, &ts) in self.tss.iter().enumerate() { + if ts >= end { + nkeep = i; + break; + } + } + self.tss.truncate(nkeep); + self.pulses.truncate(nkeep); + self.blobs.truncate(nkeep); + self.scalar_types.truncate(nkeep); + self.be.truncate(nkeep); + self.shapes.truncate(nkeep); + self.comps.truncate(nkeep); + } + + // NOTE needed because the databuffer actually doesn't write the correct shape per event. + pub fn overwrite_all_shapes(&mut self, shape: &Shape) { + for u in &mut self.shapes { + *u = shape.clone(); + } + } + + pub fn pop_back(&mut self) { + self.tss.pop_back(); + self.pulses.pop_back(); + self.blobs.pop_back(); + self.scalar_types.pop_back(); + self.be.pop_back(); + self.shapes.pop_back(); + self.comps.pop_back(); + } + + pub fn keep_ixs(&mut self, ixs: &[bool]) { + fn inner(v: &mut VecDeque, ixs: &[bool]) { + let mut it = ixs.iter(); + v.retain_mut(move |_| it.next().map(Clone::clone).unwrap_or(false)); + } + inner(&mut self.tss, ixs); + inner(&mut self.pulses, ixs); + inner(&mut self.blobs, ixs); + inner(&mut self.scalar_types, ixs); + inner(&mut self.be, ixs); + inner(&mut self.shapes, ixs); + inner(&mut self.comps, ixs); + } +} + +impl FrameTypeInnerStatic for EventFull { + const FRAME_TYPE_ID: u32 = EVENT_FULL_FRAME_TYPE_ID; +} + +impl FrameType for EventFull { + fn frame_type_id(&self) -> u32 { + ::FRAME_TYPE_ID + } +} + +impl Empty for EventFull { + fn empty() -> Self { + Self { + tss: VecDeque::new(), + pulses: VecDeque::new(), + blobs: VecDeque::new(), + scalar_types: VecDeque::new(), + be: VecDeque::new(), + shapes: VecDeque::new(), + comps: VecDeque::new(), + entry_payload_max: 0, + } + } +} + +impl WithLen for EventFull { + fn len(&self) -> usize { + self.tss.len() + } +} + +impl ByteEstimate for EventFull { + fn byte_estimate(&self) -> u64 { + self.len() as u64 * (64 + self.entry_payload_max) + } +} + +impl Mergeable for EventFull { + fn ts_min(&self) -> Option { + self.tss.front().map(|&x| x) + } + + fn ts_max(&self) -> Option { + self.tss.back().map(|&x| x) + } + + fn new_empty(&self) -> Self { + Empty::empty() + } + + fn clear(&mut self) { + self.tss.clear(); + self.pulses.clear(); + self.blobs.clear(); + self.scalar_types.clear(); + self.be.clear(); + self.shapes.clear(); + self.comps.clear(); + self.entry_payload_max = 0; + } + + fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError> { + // TODO make it harder to forget new members when the struct may get modified in the future + let r = range.0..range.1; + let mut max = dst.entry_payload_max; + for i in r.clone() { + max = max.max(self.blobs[i].len() as _); + } + dst.entry_payload_max = max; + dst.tss.extend(self.tss.drain(r.clone())); + dst.pulses.extend(self.pulses.drain(r.clone())); + dst.blobs.extend(self.blobs.drain(r.clone())); + dst.scalar_types.extend(self.scalar_types.drain(r.clone())); + dst.be.extend(self.be.drain(r.clone())); + dst.shapes.extend(self.shapes.drain(r.clone())); + dst.comps.extend(self.comps.drain(r.clone())); + Ok(()) + } + + fn find_lowest_index_gt(&self, ts: u64) -> Option { + for (i, &m) in self.tss.iter().enumerate() { + if m > ts { + return Some(i); + } + } + None + } + + fn find_lowest_index_ge(&self, ts: u64) -> Option { + for (i, &m) in self.tss.iter().enumerate() { + if m >= ts { + return Some(i); + } + } + None + } + + fn find_highest_index_lt(&self, ts: u64) -> Option { + for (i, &m) in self.tss.iter().enumerate().rev() { + if m < ts { + return Some(i); + } + } + None + } + + fn tss(&self) -> Vec { + self.tss.iter().map(|x| netpod::TsMs::from_ns_u64(*x)).collect() + } +} + +#[derive(Debug, ThisError, Serialize, Deserialize)] +#[cstm(name = "Decompress")] +pub enum DecompError { + TooLittleInput, + BadCompresionBlockSize, + UnusedBytes, + BitshuffleError, + ShapeMakesNoSense, + UnexpectedCompressedScalarValue, +} + +fn decompress(databuf: &[u8], type_size: u32) -> Result, DecompError> { + // TODO collect decompression stats + let ts1 = Instant::now(); + if databuf.len() < 12 { + return Err(DecompError::TooLittleInput); + } + let value_bytes = u64::from_be_bytes(databuf[0..8].try_into().unwrap()); + let block_size = u32::from_be_bytes(databuf[8..12].try_into().unwrap()); + trace2!( + "decompress len {} value_bytes {} block_size {}", + databuf.len(), + value_bytes, + block_size + ); + if block_size > 1024 * 32 { + return Err(DecompError::BadCompresionBlockSize); + } + let ele_count = value_bytes / type_size as u64; + trace2!( + "ele_count {} ele_count_2 {} ele_count_exp {}", + ele_count, + ele_count_2, + ele_count_exp + ); + let mut decomp: Vec = Vec::with_capacity(type_size as usize * ele_count as usize); + unsafe { + decomp.set_len(decomp.capacity()); + } + // #[cfg(DISABLED)] + match bitshuffle::bitshuffle_decompress(&databuf[12..], &mut decomp, ele_count as _, type_size as _, 0) { + Ok(c1) => { + if 12 + c1 != databuf.len() { + Err(DecompError::UnusedBytes) + } else { + let ts2 = Instant::now(); + let _dt = ts2.duration_since(ts1); + // TODO analyze the histo + //self.decomp_dt_histo.ingest(dt.as_secs() as u32 + dt.subsec_micros()); + Ok(decomp) + } + } + Err(_) => Err(DecompError::BitshuffleError), + } + // todo!("bitshuffle not available") +} + +impl EventFull { + /// Tries to infer the actual shape of the event from what's on disk and what we expect. + /// The event data on disk usually always indicate "scalar" even for waveforms. + /// If the data is compressed via bslz4 then we can infer the number of elements + /// but we still don't know whether that's an image or a waveform. + /// Therefore, the function accepts the expected shape to at least make an assumption + /// about whether this is an image or a waveform. + pub fn shape_derived( + &self, + i: usize, + scalar_type_exp: &ScalarType, + shape_exp: &Shape, + ) -> Result { + match shape_exp { + Shape::Scalar => match &self.comps[i] { + Some(_) => match scalar_type_exp { + ScalarType::STRING => Ok(Shape::Scalar), + _ => Err(DecompError::UnexpectedCompressedScalarValue), + }, + None => Ok(Shape::Scalar), + }, + Shape::Wave(_) => match &self.shapes[i] { + Shape::Scalar => match &self.comps[i] { + Some(comp) => match comp { + CompressionMethod::BitshuffleLZ4 => { + let type_size = self.scalar_types[i].bytes() as u32; + match self.blobs[i][0..8].try_into() { + Ok(a) => { + let value_bytes = u64::from_be_bytes(a); + let value_bytes = value_bytes as u32; + if value_bytes % type_size != 0 { + Err(DecompError::ShapeMakesNoSense) + } else { + let n = value_bytes / type_size; + // Here we still can't know whether the disk contains a waveform or image + // so we assume that the user input is correct: + Ok(Shape::Wave(n)) + } + } + Err(_) => Err(DecompError::ShapeMakesNoSense), + } + } + }, + None => Err(DecompError::ShapeMakesNoSense), + }, + Shape::Wave(s) => Ok(Shape::Wave(s.clone())), + Shape::Image(_, _) => Err(DecompError::ShapeMakesNoSense), + }, + Shape::Image(a, b) => match &self.shapes[i] { + Shape::Scalar => match &self.comps[i] { + Some(comp) => match comp { + CompressionMethod::BitshuffleLZ4 => { + let type_size = self.scalar_types[i].bytes() as u32; + match self.blobs[i][0..8].try_into() { + Ok(vb) => { + let value_bytes = u64::from_be_bytes(vb); + let value_bytes = value_bytes as u32; + if value_bytes % type_size != 0 { + Err(DecompError::ShapeMakesNoSense) + } else { + let n = value_bytes / type_size; + // Here we still can't know whether the disk contains a waveform or image + // so we assume that the user input is correct. + // NOTE + // We only know the number of pixels from the compressed blob but we can't + // know the actual shape. + // Can only rely on user input and check that total number of pixels agree. + if *a * *b != n { + Err(DecompError::ShapeMakesNoSense) + } else { + Ok(Shape::Image(*a, *b)) + } + } + } + Err(_) => Err(DecompError::ShapeMakesNoSense), + } + } + }, + None => Err(DecompError::ShapeMakesNoSense), + }, + Shape::Wave(_) => Err(DecompError::ShapeMakesNoSense), + Shape::Image(a, b) => Ok(Shape::Image(*a, *b)), + }, + } + } + + pub fn data_raw(&self, i: usize) -> &[u8] { + &self.blobs[i] + } + + pub fn data_decompressed(&self, i: usize) -> Result, DecompError> { + if let Some(comp) = &self.comps[i] { + match comp { + CompressionMethod::BitshuffleLZ4 => { + // NOTE the event data on databuffer disk seems to contain the correct scalar type + // but the shape of the event record seems always "scalar" even for waveforms + // so we must derive the shape of the compressed data from the length of the + // uncompressed byte blob and the byte size of the scalar type. + let type_size = self.scalar_types[i].bytes() as u32; + let data = decompress(&self.blobs[i], type_size)?; + Ok(Cow::Owned(data)) + } + } + } else { + let data = &self.blobs[i]; + Ok(Cow::Borrowed(data.as_slice())) + } + } +} diff --git a/src/eventsdim0.rs b/src/eventsdim0.rs new file mode 100644 index 0000000..f9a10c8 --- /dev/null +++ b/src/eventsdim0.rs @@ -0,0 +1,869 @@ +use crate::IsoDateTime; +use daqbuf_err as err; +use err::Error; +use items_0::collect_s::CollectableDyn; +use items_0::collect_s::CollectedDyn; +use items_0::collect_s::CollectorTy; +use items_0::collect_s::ToJsonResult; +use items_0::container::ByteEstimate; +use items_0::overlap::HasTimestampDeque; +use items_0::scalar_ops::ScalarOps; +use items_0::Appendable; +use items_0::AsAnyMut; +use items_0::AsAnyRef; +use items_0::Empty; +use items_0::Events; +use items_0::EventsNonObj; +use items_0::MergeError; +use items_0::Resettable; +use items_0::TypeName; +use items_0::WithLen; +use netpod::is_false; +use netpod::log::*; +use netpod::range::evrange::SeriesRange; +use netpod::timeunits::MS; +use netpod::timeunits::SEC; +use netpod::BinnedRangeEnum; +use netpod::TsNano; +use serde::Deserialize; +use serde::Serialize; +use std::any; +use std::any::Any; +use std::collections::VecDeque; +use std::fmt; +use std::mem; + +#[allow(unused)] +macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_ingest_item { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_ingest_event { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace2 { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) } + +#[allow(unused)] +macro_rules! trace_binning { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } + +#[allow(unused)] +macro_rules! debug_ingest { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) } + +#[derive(Clone, PartialEq, Serialize, Deserialize)] +pub struct EventsDim0NoPulse { + pub tss: VecDeque, + pub values: VecDeque, +} + +impl From> for EventsDim0 { + fn from(value: EventsDim0NoPulse) -> Self { + let pulses = vec![0; value.tss.len()].into(); + Self { + tss: value.tss, + pulses, + values: value.values, + } + } +} + +#[derive(Clone, PartialEq, Serialize, Deserialize)] +pub struct EventsDim0 { + pub tss: VecDeque, + pub pulses: VecDeque, + pub values: VecDeque, +} + +impl EventsDim0 { + pub fn type_name() -> &'static str { + std::any::type_name::() + } + + pub fn push_back(&mut self, ts: u64, pulse: u64, value: STY) { + self.tss.push_back(ts); + self.pulses.push_back(pulse); + self.values.push_back(value); + } + + pub fn push_front(&mut self, ts: u64, pulse: u64, value: STY) { + self.tss.push_front(ts); + self.pulses.push_front(pulse); + self.values.push_front(value); + } + + pub fn serde_id() -> &'static str { + "EventsDim0" + } + + pub fn tss(&self) -> &VecDeque { + &self.tss + } + + // only for testing at the moment + pub fn private_values_ref(&self) -> &VecDeque { + &self.values + } + pub fn private_values_mut(&mut self) -> &mut VecDeque { + &mut self.values + } +} + +impl AsAnyRef for EventsDim0 +where + STY: ScalarOps, +{ + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for EventsDim0 +where + STY: ScalarOps, +{ + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +impl Empty for EventsDim0 { + fn empty() -> Self { + Self { + tss: VecDeque::new(), + pulses: VecDeque::new(), + values: VecDeque::new(), + } + } +} + +impl fmt::Debug for EventsDim0 +where + STY: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + if false { + write!( + fmt, + "{} {{ count {} ts {:?} vals {:?} }}", + self.type_name(), + self.tss.len(), + self.tss.iter().map(|x| x / SEC).collect::>(), + self.values, + ) + } else { + write!( + fmt, + "{} {{ count {} ts {:?} .. {:?} vals {:?} .. {:?} }}", + self.type_name(), + self.tss.len(), + self.tss.front().map(|&x| TsNano::from_ns(x)), + self.tss.back().map(|&x| TsNano::from_ns(x)), + self.values.front(), + self.values.back(), + ) + } + } +} + +impl WithLen for EventsDim0 { + fn len(&self) -> usize { + self.tss.len() + } +} + +impl ByteEstimate for EventsDim0 { + fn byte_estimate(&self) -> u64 { + // TODO + // Should use a better estimate for waveform and string types, + // or keep some aggregated byte count on push. + let n = self.len(); + if n == 0 { + 0 + } else { + // TODO use the actual size of one/some of the elements. + let i = n * 2 / 3; + let sty_bytes = self.values[i].byte_estimate(); + (n as u64 * (8 + 8 + sty_bytes)) as u64 + } + } +} + +impl Resettable for EventsDim0 { + fn reset(&mut self) { + self.tss.clear(); + self.pulses.clear(); + self.values.clear(); + } +} + +impl HasTimestampDeque for EventsDim0 { + fn timestamp_min(&self) -> Option { + self.tss.front().map(|x| *x) + } + + fn timestamp_max(&self) -> Option { + self.tss.back().map(|x| *x) + } + + fn pulse_min(&self) -> Option { + self.pulses.front().map(|x| *x) + } + + fn pulse_max(&self) -> Option { + self.pulses.back().map(|x| *x) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct EventsDim0ChunkOutput { + tss: VecDeque, + pulses: VecDeque, + values: VecDeque, + scalar_type: String, +} + +impl EventsDim0ChunkOutput {} + +#[derive(Debug)] +pub struct EventsDim0Collector { + vals: EventsDim0, + range_final: bool, + timed_out: bool, + needs_continue_at: bool, +} + +impl EventsDim0Collector { + pub fn self_name() -> &'static str { + any::type_name::() + } + + pub fn new() -> Self { + debug!("EventsDim0Collector NEW"); + Self { + vals: EventsDim0::empty(), + range_final: false, + timed_out: false, + needs_continue_at: false, + } + } +} + +impl WithLen for EventsDim0Collector { + fn len(&self) -> usize { + WithLen::len(&self.vals) + } +} + +impl ByteEstimate for EventsDim0Collector { + fn byte_estimate(&self) -> u64 { + ByteEstimate::byte_estimate(&self.vals) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct EventsDim0CollectorOutput { + #[serde(rename = "tsAnchor")] + ts_anchor_sec: u64, + #[serde(rename = "tsMs")] + ts_off_ms: VecDeque, + #[serde(rename = "tsNs")] + ts_off_ns: VecDeque, + #[serde(rename = "pulseAnchor")] + pulse_anchor: u64, + #[serde(rename = "pulseOff")] + pulse_off: VecDeque, + #[serde(rename = "values")] + values: VecDeque, + #[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")] + range_final: bool, + #[serde(rename = "timedOut", default, skip_serializing_if = "is_false")] + timed_out: bool, + #[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")] + continue_at: Option, +} + +impl EventsDim0CollectorOutput { + pub fn ts_anchor_sec(&self) -> u64 { + self.ts_anchor_sec + } + + pub fn ts_off_ms(&self) -> &VecDeque { + &self.ts_off_ms + } + + pub fn pulse_anchor(&self) -> u64 { + self.pulse_anchor + } + + pub fn pulse_off(&self) -> &VecDeque { + &self.pulse_off + } + + /// Note: only used for unit tests. + pub fn values_to_f32(&self) -> VecDeque { + self.values.iter().map(|x| x.as_prim_f32_b()).collect() + } + + pub fn range_final(&self) -> bool { + self.range_final + } + + pub fn timed_out(&self) -> bool { + self.timed_out + } + + pub fn is_valid(&self) -> bool { + if self.ts_off_ms.len() != self.ts_off_ns.len() { + false + } else if self.ts_off_ms.len() != self.pulse_off.len() { + false + } else if self.ts_off_ms.len() != self.values.len() { + false + } else { + true + } + } + + pub fn info_str(&self) -> String { + use fmt::Write; + let mut out = String::new(); + write!( + out, + "ts_off_ms {} ts_off_ns {} pulse_off {} values {}", + self.ts_off_ms.len(), + self.ts_off_ns.len(), + self.pulse_off.len(), + self.values.len(), + ) + .unwrap(); + out + } +} + +impl AsAnyRef for EventsDim0CollectorOutput +where + STY: 'static, +{ + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for EventsDim0CollectorOutput +where + STY: 'static, +{ + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +impl TypeName for EventsDim0CollectorOutput { + fn type_name(&self) -> String { + any::type_name::().into() + } +} + +impl WithLen for EventsDim0CollectorOutput { + fn len(&self) -> usize { + self.values.len() + } +} + +impl ToJsonResult for EventsDim0CollectorOutput { + fn to_json_value(&self) -> Result { + serde_json::to_value(self) + } +} + +impl CollectedDyn for EventsDim0CollectorOutput {} + +impl CollectorTy for EventsDim0Collector { + type Input = EventsDim0; + type Output = EventsDim0CollectorOutput; + + fn ingest(&mut self, src: &mut Self::Input) { + self.vals.tss.append(&mut src.tss); + self.vals.pulses.append(&mut src.pulses); + self.vals.values.append(&mut src.values); + } + + fn set_range_complete(&mut self) { + self.range_final = true; + } + + fn set_timed_out(&mut self) { + self.timed_out = true; + self.needs_continue_at = true; + } + + fn set_continue_at_here(&mut self) { + self.needs_continue_at = true; + } + + fn result( + &mut self, + range: Option, + _binrange: Option, + ) -> Result { + debug!( + "{} result() needs_continue_at {}", + Self::self_name(), + self.needs_continue_at + ); + // If we timed out, we want to hint the client from where to continue. + // This is tricky: currently, client can not request a left-exclusive range. + // We currently give the timestamp of the last event plus a small delta. + // The amount of the delta must take into account what kind of timestamp precision the client + // can parse and handle. + let vals = &mut self.vals; + let continue_at = if self.needs_continue_at { + if let Some(ts) = vals.tss.back() { + let x = Some(IsoDateTime::from_ns_u64(*ts / MS * MS + MS)); + x + } else { + if let Some(range) = &range { + match range { + SeriesRange::TimeRange(x) => Some(IsoDateTime::from_ns_u64(x.beg + SEC)), + SeriesRange::PulseRange(_) => { + error!("TODO emit create continueAt for pulse range"); + Some(IsoDateTime::from_ns_u64(0)) + } + } + } else { + Some(IsoDateTime::from_ns_u64(0)) + } + } + } else { + None + }; + let tss_sl = vals.tss.make_contiguous(); + let pulses_sl = vals.pulses.make_contiguous(); + let (ts_anchor_sec, ts_off_ms, ts_off_ns) = crate::ts_offs_from_abs(tss_sl); + let (pulse_anchor, pulse_off) = crate::pulse_offs_from_abs(pulses_sl); + let values = mem::replace(&mut vals.values, VecDeque::new()); + if ts_off_ms.len() != ts_off_ns.len() { + return Err(Error::with_msg_no_trace("collected len mismatch")); + } + if ts_off_ms.len() != pulse_off.len() { + return Err(Error::with_msg_no_trace("collected len mismatch")); + } + if ts_off_ms.len() != values.len() { + return Err(Error::with_msg_no_trace("collected len mismatch")); + } + let ret = Self::Output { + ts_anchor_sec, + ts_off_ms, + ts_off_ns, + pulse_anchor, + pulse_off, + values, + range_final: self.range_final, + timed_out: self.timed_out, + continue_at, + }; + if !ret.is_valid() { + error!("invalid:\n{}", ret.info_str()); + } + Ok(ret) + } +} + +impl items_0::collect_s::CollectableType for EventsDim0 { + type Collector = EventsDim0Collector; + + fn new_collector() -> Self::Collector { + Self::Collector::new() + } +} + +#[derive(Debug)] +pub struct EventsDim0Aggregator { + range: SeriesRange, + count: u64, + minmaxlst: Option<(STY, STY, STY)>, + sumc: u64, + sum: f32, + int_ts: u64, + last_ts: u64, + do_time_weight: bool, + events_ignored_count: u64, + items_seen: usize, +} + +impl Drop for EventsDim0Aggregator { + fn drop(&mut self) { + // TODO collect as stats for the request context: + trace!("count {} ignored {}", self.count, self.events_ignored_count); + } +} + +impl TypeName for EventsDim0 { + fn type_name(&self) -> String { + let self_name = any::type_name::(); + format!("{self_name}") + } +} + +impl EventsNonObj for EventsDim0 { + fn into_tss_pulses(self: Box) -> (VecDeque, VecDeque) { + trace!( + "{}::into_tss_pulses len {} len {}", + Self::type_name(), + self.tss.len(), + self.pulses.len() + ); + (self.tss, self.pulses) + } +} + +macro_rules! try_to_container_events { + ($sty:ty, $this:expr) => { + let this = $this; + if let Some(evs) = this.as_any_ref().downcast_ref::>() { + use crate::binning::container_events::ContainerEvents; + let tss = this.tss.iter().map(|&x| TsNano::from_ns(x)).collect(); + let vals = evs.values.clone(); + let ret = ContainerEvents::<$sty>::from_constituents(tss, vals); + return Box::new(ret); + } + }; +} + +impl Events for EventsDim0 { + fn verify(&self) -> bool { + let mut good = true; + let n = self.tss.len(); + for (&ts1, &ts2) in self.tss.iter().zip(self.tss.range(n.min(1)..n)) { + if ts1 > ts2 { + good = false; + error!("unordered event data ts1 {} ts2 {}", ts1, ts2); + break; + } + } + good + } + + fn output_info(&self) -> String { + let n2 = self.tss.len().max(1) - 1; + let min = if let Some(ts) = self.tss.get(0) { + TsNano::from_ns(*ts).fmt().to_string() + } else { + String::from("None") + }; + let max = if let Some(ts) = self.tss.get(n2) { + TsNano::from_ns(*ts).fmt().to_string() + } else { + String::from("None") + }; + format!( + "EventsDim0OutputInfo {{ len {}, ts_min {}, ts_max {} }}", + self.tss.len(), + min, + max, + ) + } + + fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn { + self + } + + fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn { + self + } + + fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn { + self + } + + fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box { + // TODO improve the search + let n1 = self.tss.iter().take_while(|&&x| x <= ts_end).count(); + let tss = self.tss.drain(..n1).collect(); + let pulses = self.pulses.drain(..n1).collect(); + let values = self.values.drain(..n1).collect(); + let ret = Self { tss, pulses, values }; + Box::new(ret) + } + + fn new_empty_evs(&self) -> Box { + Box::new(Self::empty()) + } + + fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), MergeError> { + // TODO as_any and as_any_mut are declared on unrelated traits. Simplify. + if let Some(dst) = dst.as_any_mut().downcast_mut::() { + // TODO make it harder to forget new members when the struct may get modified in the future + let r = range.0..range.1; + dst.tss.extend(self.tss.drain(r.clone())); + dst.pulses.extend(self.pulses.drain(r.clone())); + dst.values.extend(self.values.drain(r.clone())); + Ok(()) + } else { + error!( + "downcast to EventsDim0 FAILED\n\n{}\n\n{}\n\n", + self.type_name(), + dst.type_name() + ); + panic!(); + Err(MergeError::NotCompatible) + } + } + + fn find_lowest_index_gt_evs(&self, ts: u64) -> Option { + for (i, &m) in self.tss.iter().enumerate() { + if m > ts { + return Some(i); + } + } + None + } + + fn find_lowest_index_ge_evs(&self, ts: u64) -> Option { + for (i, &m) in self.tss.iter().enumerate() { + if m >= ts { + return Some(i); + } + } + None + } + + fn find_highest_index_lt_evs(&self, ts: u64) -> Option { + for (i, &m) in self.tss.iter().enumerate().rev() { + if m < ts { + return Some(i); + } + } + None + } + + fn ts_min(&self) -> Option { + self.tss.front().map(|&x| x) + } + + fn ts_max(&self) -> Option { + self.tss.back().map(|&x| x) + } + + fn partial_eq_dyn(&self, other: &dyn Events) -> bool { + if let Some(other) = other.as_any_ref().downcast_ref::() { + self == other + } else { + false + } + } + + fn serde_id(&self) -> &'static str { + Self::serde_id() + } + + fn nty_id(&self) -> u32 { + STY::SUB + } + + fn clone_dyn(&self) -> Box { + Box::new(self.clone()) + } + + fn tss(&self) -> &VecDeque { + &self.tss + } + + fn pulses(&self) -> &VecDeque { + &self.pulses + } + + fn frame_type_id(&self) -> u32 { + error!("TODO frame_type_id should not be called"); + // TODO make more nice + panic!() + } + + fn to_min_max_avg(&mut self) -> Box { + let dst = Self { + tss: mem::replace(&mut self.tss, Default::default()), + pulses: mem::replace(&mut self.pulses, Default::default()), + values: mem::replace(&mut self.values, Default::default()), + }; + Box::new(dst) + } + + fn to_json_string(&self) -> String { + // TODO redesign with mut access, rename to `into_` and take the values out. + let mut tss = self.tss.clone(); + let mut pulses = self.pulses.clone(); + let mut values = self.values.clone(); + let tss_sl = tss.make_contiguous(); + let pulses_sl = pulses.make_contiguous(); + let (ts_anchor_sec, ts_off_ms, ts_off_ns) = crate::ts_offs_from_abs(tss_sl); + let (pulse_anchor, pulse_off) = crate::pulse_offs_from_abs(pulses_sl); + let values = mem::replace(&mut values, VecDeque::new()); + let ret = EventsDim0CollectorOutput { + ts_anchor_sec, + ts_off_ms, + ts_off_ns, + pulse_anchor, + pulse_off, + values, + range_final: false, + timed_out: false, + continue_at: None, + }; + serde_json::to_string(&ret).unwrap() + } + + fn to_json_vec_u8(&self) -> Vec { + self.to_json_string().into_bytes() + } + + fn to_cbor_vec_u8(&self) -> Vec { + // TODO redesign with mut access, rename to `into_` and take the values out. + let ret = EventsDim0ChunkOutput { + // TODO use &mut to swap the content + tss: self.tss.clone(), + pulses: self.pulses.clone(), + values: self.values.clone(), + scalar_type: STY::scalar_type_name().into(), + }; + let mut buf = Vec::new(); + ciborium::into_writer(&ret, &mut buf).unwrap(); + buf + } + + fn clear(&mut self) { + self.tss.clear(); + self.pulses.clear(); + self.values.clear(); + } + + fn to_dim0_f32_for_binning(&self) -> Box { + let mut ret = EventsDim0::empty(); + for (&ts, val) in self.tss.iter().zip(self.values.iter()) { + ret.push(ts, 0, val.as_prim_f32_b()); + } + Box::new(ret) + } + + fn to_container_events(&self) -> Box { + try_to_container_events!(u8, self); + try_to_container_events!(u16, self); + try_to_container_events!(u32, self); + try_to_container_events!(u64, self); + try_to_container_events!(i8, self); + try_to_container_events!(i16, self); + try_to_container_events!(i32, self); + try_to_container_events!(i64, self); + try_to_container_events!(f32, self); + try_to_container_events!(f64, self); + try_to_container_events!(bool, self); + try_to_container_events!(String, self); + let this = self; + if let Some(evs) = self.as_any_ref().downcast_ref::>() { + use crate::binning::container_events::ContainerEvents; + let tss = this.tss.iter().map(|&x| TsNano::from_ns(x)).collect(); + use crate::binning::container_events::Container; + let mut vals = crate::binning::valuetype::EnumVariantContainer::new(); + for x in evs.values.iter() { + vals.push_back(x.clone()); + } + let ret = ContainerEvents::::from_constituents(tss, vals); + return Box::new(ret); + } + let styn = any::type_name::(); + todo!("TODO to_container_events for {styn}") + } +} + +impl Appendable for EventsDim0 +where + STY: ScalarOps, +{ + fn push(&mut self, ts: u64, pulse: u64, value: STY) { + self.tss.push_back(ts); + self.pulses.push_back(pulse); + self.values.push_back(value); + } +} + +#[cfg(test)] +mod test_frame { + use super::*; + use crate::channelevents::ChannelEvents; + use crate::framable::Framable; + use crate::framable::INMEM_FRAME_ENCID; + use crate::frame::decode_frame; + use crate::inmem::InMemoryFrame; + use items_0::streamitem::RangeCompletableItem; + use items_0::streamitem::Sitemty; + use items_0::streamitem::StreamItem; + + #[test] + fn events_serialize() { + // taskrun::tracing_init_testing().unwrap(); + let mut events = EventsDim0::empty(); + events.push(123, 234, 55f32); + let events = events; + let events: Box = Box::new(events); + let item = ChannelEvents::Events(events); + let item = Ok::<_, Error>(StreamItem::DataItem(RangeCompletableItem::Data(item))); + let mut buf = item.make_frame_dyn().unwrap(); + let s = String::from_utf8_lossy(&buf[20..buf.len() - 4]); + eprintln!("[[{s}]]"); + let buflen = buf.len(); + let frame = InMemoryFrame { + encid: INMEM_FRAME_ENCID, + tyid: 0x2500, + len: (buflen - 24) as _, + buf: buf.split_off(20).split_to(buflen - 20 - 4).freeze(), + }; + let item: Sitemty = decode_frame(&frame).unwrap(); + let item = if let Ok(x) = item { x } else { panic!() }; + let item = if let StreamItem::DataItem(x) = item { + x + } else { + panic!() + }; + let item = if let RangeCompletableItem::Data(x) = item { + x + } else { + panic!() + }; + let mut item = if let ChannelEvents::Events(x) = item { + x + } else { + panic!() + }; + let item = if let Some(item) = item.as_any_mut().downcast_mut::>() { + item + } else { + panic!() + }; + assert_eq!(item.tss(), &[123]); + } +} + +#[cfg(test)] +mod test_serde_opt { + use super::*; + + #[derive(Serialize)] + struct A { + a: Option, + #[serde(default)] + b: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + c: Option, + } + + #[test] + fn test_a() { + let s = serde_json::to_string(&A { + a: None, + b: None, + c: None, + }) + .unwrap(); + assert_eq!(s, r#"{"a":null,"b":null}"#); + } +} diff --git a/src/eventsdim0enum.rs b/src/eventsdim0enum.rs new file mode 100644 index 0000000..50f7da2 --- /dev/null +++ b/src/eventsdim0enum.rs @@ -0,0 +1,469 @@ +use daqbuf_err as err; +use err::Error; +use items_0::collect_s::CollectableDyn; +use items_0::collect_s::CollectedDyn; +use items_0::collect_s::CollectorDyn; +use items_0::collect_s::CollectorTy; +use items_0::collect_s::ToJsonBytes; +use items_0::collect_s::ToJsonResult; +use items_0::container::ByteEstimate; +use items_0::isodate::IsoDateTime; +use items_0::scalar_ops::ScalarOps; +use items_0::timebin::TimeBinnableTy; +use items_0::timebin::TimeBinnerTy; +use items_0::AsAnyMut; +use items_0::AsAnyRef; +use items_0::Events; +use items_0::EventsNonObj; +use items_0::TypeName; +use items_0::WithLen; +use netpod::log::*; +use netpod::range::evrange::SeriesRange; +use netpod::timeunits::MS; +use netpod::timeunits::SEC; +use netpod::BinnedRangeEnum; +use serde::Deserialize; +use serde::Serialize; +use std::any; +use std::any::Any; +use std::collections::VecDeque; +use std::mem; + +#[allow(unused)] +macro_rules! trace_collect_result { + ($($arg:tt)*) => { + if false { + trace!($($arg)*); + } + }; +} + +#[derive(Debug)] +pub struct EventsDim0EnumCollector { + vals: EventsDim0Enum, + range_final: bool, + timed_out: bool, + needs_continue_at: bool, +} + +impl EventsDim0EnumCollector { + pub fn new() -> Self { + Self { + vals: EventsDim0Enum::new(), + range_final: false, + timed_out: false, + needs_continue_at: false, + } + } +} + +impl TypeName for EventsDim0EnumCollector { + fn type_name(&self) -> String { + "EventsDim0EnumCollector".into() + } +} + +impl WithLen for EventsDim0EnumCollector { + fn len(&self) -> usize { + self.vals.tss.len() + } +} + +impl ByteEstimate for EventsDim0EnumCollector { + fn byte_estimate(&self) -> u64 { + // TODO does it need to be more accurate? + 30 * self.len() as u64 + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct EventsDim0EnumCollectorOutput { + #[serde(rename = "tsAnchor")] + ts_anchor_sec: u64, + #[serde(rename = "tsMs")] + ts_off_ms: VecDeque, + #[serde(rename = "tsNs")] + ts_off_ns: VecDeque, + #[serde(rename = "values")] + vals: VecDeque, + #[serde(rename = "valuestrings")] + valstrs: VecDeque, + #[serde(rename = "rangeFinal", default, skip_serializing_if = "netpod::is_false")] + range_final: bool, + #[serde(rename = "timedOut", default, skip_serializing_if = "netpod::is_false")] + timed_out: bool, + #[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")] + continue_at: Option, +} + +impl WithLen for EventsDim0EnumCollectorOutput { + fn len(&self) -> usize { + todo!() + } +} + +impl AsAnyRef for EventsDim0EnumCollectorOutput { + fn as_any_ref(&self) -> &dyn Any { + todo!() + } +} + +impl AsAnyMut for EventsDim0EnumCollectorOutput { + fn as_any_mut(&mut self) -> &mut dyn Any { + todo!() + } +} + +impl TypeName for EventsDim0EnumCollectorOutput { + fn type_name(&self) -> String { + any::type_name::().into() + } +} + +impl ToJsonResult for EventsDim0EnumCollectorOutput { + fn to_json_value(&self) -> Result { + todo!() + } +} + +impl CollectedDyn for EventsDim0EnumCollectorOutput {} + +impl CollectorTy for EventsDim0EnumCollector { + type Input = EventsDim0Enum; + type Output = EventsDim0EnumCollectorOutput; + + fn ingest(&mut self, src: &mut EventsDim0Enum) { + self.vals.tss.append(&mut src.tss); + self.vals.values.append(&mut src.values); + self.vals.valuestrs.append(&mut src.valuestrs); + } + + fn set_range_complete(&mut self) { + self.range_final = true; + } + + fn set_timed_out(&mut self) { + self.timed_out = true; + self.needs_continue_at = true; + } + + fn set_continue_at_here(&mut self) { + self.needs_continue_at = true; + } + + fn result( + &mut self, + range: Option, + binrange: Option, + ) -> Result { + trace_collect_result!( + "{} result() needs_continue_at {}", + self.type_name(), + self.needs_continue_at + ); + // If we timed out, we want to hint the client from where to continue. + // This is tricky: currently, client can not request a left-exclusive range. + // We currently give the timestamp of the last event plus a small delta. + // The amount of the delta must take into account what kind of timestamp precision the client + // can parse and handle. + let vals = &mut self.vals; + let continue_at = if self.needs_continue_at { + if let Some(ts) = vals.tss.back() { + let x = Some(IsoDateTime::from_ns_u64(*ts / MS * MS + MS)); + x + } else { + if let Some(range) = &range { + match range { + SeriesRange::TimeRange(x) => Some(IsoDateTime::from_ns_u64(x.beg + SEC)), + SeriesRange::PulseRange(_) => { + error!("TODO emit create continueAt for pulse range"); + Some(IsoDateTime::from_ns_u64(0)) + } + } + } else { + Some(IsoDateTime::from_ns_u64(0)) + } + } + } else { + None + }; + let tss_sl = vals.tss.make_contiguous(); + let (ts_anchor_sec, ts_off_ms, ts_off_ns) = crate::ts_offs_from_abs(tss_sl); + let valixs = mem::replace(&mut vals.values, VecDeque::new()); + let valstrs = mem::replace(&mut vals.valuestrs, VecDeque::new()); + let vals = valixs; + if ts_off_ms.len() != ts_off_ns.len() { + return Err(Error::with_msg_no_trace("collected len mismatch")); + } + if ts_off_ms.len() != vals.len() { + return Err(Error::with_msg_no_trace("collected len mismatch")); + } + if ts_off_ms.len() != valstrs.len() { + return Err(Error::with_msg_no_trace("collected len mismatch")); + } + let ret = Self::Output { + ts_anchor_sec, + ts_off_ms, + ts_off_ns, + vals, + valstrs, + range_final: self.range_final, + timed_out: self.timed_out, + continue_at, + }; + Ok(ret) + } +} + +// Experiment with having this special case for enums +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct EventsDim0Enum { + pub tss: VecDeque, + pub values: VecDeque, + pub valuestrs: VecDeque, +} + +impl EventsDim0Enum { + pub fn new() -> Self { + Self { + tss: VecDeque::new(), + values: VecDeque::new(), + valuestrs: VecDeque::new(), + } + } + + pub fn push_back(&mut self, ts: u64, value: u16, valuestr: String) { + self.tss.push_back(ts); + self.values.push_back(value); + self.valuestrs.push_back(valuestr); + } +} + +impl TypeName for EventsDim0Enum { + fn type_name(&self) -> String { + "EventsDim0Enum".into() + } +} + +impl AsAnyRef for EventsDim0Enum { + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for EventsDim0Enum { + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +impl WithLen for EventsDim0Enum { + fn len(&self) -> usize { + self.tss.len() + } +} + +impl CollectableDyn for EventsDim0Enum { + fn new_collector(&self) -> Box { + Box::new(EventsDim0EnumCollector::new()) + } +} + +// impl Events + +impl ByteEstimate for EventsDim0Enum { + fn byte_estimate(&self) -> u64 { + todo!() + } +} + +impl EventsNonObj for EventsDim0Enum { + fn into_tss_pulses(self: Box) -> (VecDeque, VecDeque) { + todo!() + } +} + +// NOTE just a dummy because currently we don't use this for time binning +#[derive(Debug)] +pub struct EventsDim0EnumTimeBinner; + +impl TimeBinnerTy for EventsDim0EnumTimeBinner { + type Input = EventsDim0Enum; + type Output = (); + + fn ingest(&mut self, item: &mut Self::Input) { + todo!() + } + + fn set_range_complete(&mut self) { + todo!() + } + + fn bins_ready_count(&self) -> usize { + todo!() + } + + fn bins_ready(&mut self) -> Option { + todo!() + } + + fn push_in_progress(&mut self, push_empty: bool) { + todo!() + } + + fn cycle(&mut self) { + todo!() + } + + fn empty(&self) -> Option { + todo!() + } + + fn append_empty_until_end(&mut self) { + todo!() + } +} + +// NOTE just a dummy because currently we don't use this for time binning +impl TimeBinnableTy for EventsDim0Enum { + type TimeBinner = EventsDim0EnumTimeBinner; + + fn time_binner_new( + &self, + binrange: BinnedRangeEnum, + do_time_weight: bool, + emit_empty_bins: bool, + ) -> Self::TimeBinner { + todo!() + } +} + +// NOTE just a dummy because currently we don't use this for time binning + +#[derive(Debug, Serialize, Deserialize)] +pub struct EventsDim0EnumChunkOutput { + tss: VecDeque, + values: VecDeque, + valuestrings: VecDeque, + scalar_type: String, +} + +impl Events for EventsDim0Enum { + fn verify(&self) -> bool { + todo!() + } + + fn output_info(&self) -> String { + todo!() + } + + fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn { + todo!() + } + + fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn { + todo!() + } + + fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn { + todo!() + } + + fn ts_min(&self) -> Option { + todo!() + } + + fn ts_max(&self) -> Option { + todo!() + } + + fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box { + todo!() + } + + fn new_empty_evs(&self) -> Box { + todo!() + } + + fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), items_0::MergeError> { + todo!() + } + + fn find_lowest_index_gt_evs(&self, ts: u64) -> Option { + todo!() + } + + fn find_lowest_index_ge_evs(&self, ts: u64) -> Option { + todo!() + } + + fn find_highest_index_lt_evs(&self, ts: u64) -> Option { + todo!() + } + + fn clone_dyn(&self) -> Box { + todo!() + } + + fn partial_eq_dyn(&self, other: &dyn Events) -> bool { + todo!() + } + + fn serde_id(&self) -> &'static str { + todo!() + } + + fn nty_id(&self) -> u32 { + todo!() + } + + fn tss(&self) -> &VecDeque { + todo!() + } + + fn pulses(&self) -> &VecDeque { + todo!() + } + + fn frame_type_id(&self) -> u32 { + todo!() + } + + fn to_min_max_avg(&mut self) -> Box { + todo!() + } + + fn to_json_string(&self) -> String { + todo!() + } + + fn to_json_vec_u8(&self) -> Vec { + self.to_json_string().into_bytes() + } + + fn to_cbor_vec_u8(&self) -> Vec { + // TODO redesign with mut access, rename to `into_` and take the values out. + let ret = EventsDim0EnumChunkOutput { + // TODO use &mut to swap the content + tss: self.tss.clone(), + values: self.values.clone(), + valuestrings: self.valuestrs.clone(), + scalar_type: netpod::EnumVariant::scalar_type_name().into(), + }; + let mut buf = Vec::new(); + ciborium::into_writer(&ret, &mut buf).unwrap(); + buf + } + + fn clear(&mut self) { + todo!() + } + + fn to_dim0_f32_for_binning(&self) -> Box { + todo!("{}::to_dim0_f32_for_binning", self.type_name()) + } + + fn to_container_events(&self) -> Box { + todo!("{}::to_container_events", self.type_name()) + } +} diff --git a/src/eventsdim1.rs b/src/eventsdim1.rs new file mode 100644 index 0000000..9c5a9b5 --- /dev/null +++ b/src/eventsdim1.rs @@ -0,0 +1,691 @@ +use crate::binsdim0::BinsDim0; +use crate::eventsxbindim0::EventsXbinDim0; +use crate::IsoDateTime; +use daqbuf_err as err; +use err::Error; +use items_0::collect_s::CollectableDyn; +use items_0::collect_s::CollectableType; +use items_0::collect_s::CollectedDyn; +use items_0::collect_s::CollectorTy; +use items_0::collect_s::ToJsonResult; +use items_0::container::ByteEstimate; +use items_0::overlap::HasTimestampDeque; +use items_0::scalar_ops::ScalarOps; +use items_0::Appendable; +use items_0::AsAnyMut; +use items_0::AsAnyRef; +use items_0::Empty; +use items_0::Events; +use items_0::EventsNonObj; +use items_0::MergeError; +use items_0::TypeName; +use items_0::WithLen; +use netpod::is_false; +use netpod::log::*; +use netpod::range::evrange::SeriesRange; +use netpod::timeunits::MS; +use netpod::timeunits::SEC; +use netpod::BinnedRangeEnum; +use serde::Deserialize; +use serde::Serialize; +use std::any; +use std::any::Any; +use std::collections::VecDeque; +use std::fmt; +use std::marker::PhantomData; +use std::mem; + +#[allow(unused)] +macro_rules! trace2 { + (EN$($arg:tt)*) => (); + ($($arg:tt)*) => (trace!($($arg)*)); +} + +#[derive(Clone, PartialEq, Serialize, Deserialize)] +pub struct EventsDim1NoPulse { + pub tss: VecDeque, + pub values: VecDeque>, +} + +impl From> for EventsDim1 { + fn from(value: EventsDim1NoPulse) -> Self { + let pulses = vec![0; value.tss.len()].into(); + Self { + tss: value.tss, + pulses, + values: value.values, + } + } +} + +#[derive(Clone, PartialEq, Serialize, Deserialize)] +pub struct EventsDim1 { + pub tss: VecDeque, + pub pulses: VecDeque, + pub values: VecDeque>, +} + +impl EventsDim1 { + #[inline(always)] + pub fn push(&mut self, ts: u64, pulse: u64, value: Vec) { + self.tss.push_back(ts); + self.pulses.push_back(pulse); + self.values.push_back(value); + } + + #[inline(always)] + pub fn push_front(&mut self, ts: u64, pulse: u64, value: Vec) { + self.tss.push_front(ts); + self.pulses.push_front(pulse); + self.values.push_front(value); + } + + pub fn serde_id() -> &'static str { + "EventsDim1" + } + + pub fn tss(&self) -> &VecDeque { + &self.tss + } +} + +impl AsAnyRef for EventsDim1 +where + STY: ScalarOps, +{ + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for EventsDim1 +where + STY: ScalarOps, +{ + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +impl Empty for EventsDim1 { + fn empty() -> Self { + Self { + tss: VecDeque::new(), + pulses: VecDeque::new(), + values: VecDeque::new(), + } + } +} + +impl fmt::Debug for EventsDim1 +where + STY: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + if false { + write!( + fmt, + "EventsDim1 {{ count {} ts {:?} vals {:?} }}", + self.tss.len(), + self.tss.iter().map(|x| x / SEC).collect::>(), + self.values, + ) + } else { + write!( + fmt, + "EventsDim1 {{ count {} ts {:?} .. {:?} vals {:?} .. {:?} }}", + self.tss.len(), + self.tss.front().map(|x| x / SEC), + self.tss.back().map(|x| x / SEC), + self.values.front(), + self.values.back(), + ) + } + } +} + +impl WithLen for EventsDim1 { + fn len(&self) -> usize { + self.tss.len() + } +} + +impl ByteEstimate for EventsDim1 { + fn byte_estimate(&self) -> u64 { + let stylen = mem::size_of::(); + let n = self.values.front().map_or(0, Vec::len); + (self.len() * (8 + 8 + n * stylen)) as u64 + } +} + +impl HasTimestampDeque for EventsDim1 { + fn timestamp_min(&self) -> Option { + self.tss.front().map(|x| *x) + } + + fn timestamp_max(&self) -> Option { + self.tss.back().map(|x| *x) + } + + fn pulse_min(&self) -> Option { + self.pulses.front().map(|x| *x) + } + + fn pulse_max(&self) -> Option { + self.pulses.back().map(|x| *x) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct EventsDim1ChunkOutput { + tss: VecDeque, + pulses: VecDeque, + values: VecDeque>, + scalar_type: String, +} + +impl EventsDim1ChunkOutput {} + +#[derive(Debug)] +pub struct EventsDim1Collector { + vals: EventsDim1, + range_final: bool, + timed_out: bool, + needs_continue_at: bool, +} + +impl EventsDim1Collector { + pub fn self_name() -> &'static str { + any::type_name::() + } + + pub fn new() -> Self { + Self { + vals: EventsDim1::empty(), + range_final: false, + timed_out: false, + needs_continue_at: false, + } + } +} + +impl WithLen for EventsDim1Collector { + fn len(&self) -> usize { + WithLen::len(&self.vals) + } +} + +impl ByteEstimate for EventsDim1Collector { + fn byte_estimate(&self) -> u64 { + ByteEstimate::byte_estimate(&self.vals) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct EventsDim1CollectorOutput { + #[serde(rename = "tsAnchor")] + ts_anchor_sec: u64, + #[serde(rename = "tsMs")] + ts_off_ms: VecDeque, + #[serde(rename = "tsNs")] + ts_off_ns: VecDeque, + #[serde(rename = "pulseAnchor")] + pulse_anchor: u64, + #[serde(rename = "pulseOff")] + pulse_off: VecDeque, + #[serde(rename = "values")] + values: VecDeque>, + #[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")] + range_final: bool, + #[serde(rename = "timedOut", default, skip_serializing_if = "is_false")] + timed_out: bool, + #[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")] + continue_at: Option, +} + +impl EventsDim1CollectorOutput { + pub fn ts_anchor_sec(&self) -> u64 { + self.ts_anchor_sec + } + + pub fn ts_off_ms(&self) -> &VecDeque { + &self.ts_off_ms + } + + pub fn pulse_anchor(&self) -> u64 { + self.pulse_anchor + } + + pub fn pulse_off(&self) -> &VecDeque { + &self.pulse_off + } + + /// Note: only used for unit tests. + pub fn values_to_f32(&self) -> VecDeque> { + self.values + .iter() + .map(|x| x.iter().map(|x| x.as_prim_f32_b()).collect()) + .collect() + } + + pub fn range_final(&self) -> bool { + self.range_final + } + + pub fn timed_out(&self) -> bool { + self.timed_out + } + + pub fn is_valid(&self) -> bool { + if self.ts_off_ms.len() != self.ts_off_ns.len() { + false + } else if self.ts_off_ms.len() != self.pulse_off.len() { + false + } else if self.ts_off_ms.len() != self.values.len() { + false + } else { + true + } + } + + pub fn info_str(&self) -> String { + use fmt::Write; + let mut out = String::new(); + write!( + out, + "ts_off_ms {} ts_off_ns {} pulse_off {} values {}", + self.ts_off_ms.len(), + self.ts_off_ns.len(), + self.pulse_off.len(), + self.values.len(), + ) + .unwrap(); + out + } +} + +impl AsAnyRef for EventsDim1CollectorOutput +where + STY: 'static, +{ + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for EventsDim1CollectorOutput +where + STY: 'static, +{ + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +impl TypeName for EventsDim1CollectorOutput { + fn type_name(&self) -> String { + any::type_name::().into() + } +} + +impl WithLen for EventsDim1CollectorOutput { + fn len(&self) -> usize { + self.values.len() + } +} + +impl ToJsonResult for EventsDim1CollectorOutput { + fn to_json_value(&self) -> Result { + serde_json::to_value(self) + } +} + +impl CollectedDyn for EventsDim1CollectorOutput {} + +impl CollectorTy for EventsDim1Collector { + type Input = EventsDim1; + type Output = EventsDim1CollectorOutput; + + fn ingest(&mut self, src: &mut Self::Input) { + self.vals.tss.append(&mut src.tss); + self.vals.pulses.append(&mut src.pulses); + self.vals.values.append(&mut src.values); + } + + fn set_range_complete(&mut self) { + self.range_final = true; + } + + fn set_timed_out(&mut self) { + self.timed_out = true; + } + + fn set_continue_at_here(&mut self) { + debug!("{}::set_continue_at_here", Self::self_name()); + self.needs_continue_at = true; + } + + // TODO unify with dim0 case + fn result( + &mut self, + range: Option, + _binrange: Option, + ) -> Result { + // If we timed out, we want to hint the client from where to continue. + // This is tricky: currently, client can not request a left-exclusive range. + // We currently give the timestamp of the last event plus a small delta. + // The amount of the delta must take into account what kind of timestamp precision the client + // can parse and handle. + let vals = &mut self.vals; + let continue_at = if self.timed_out { + if let Some(ts) = vals.tss.back() { + Some(IsoDateTime::from_ns_u64(*ts + MS)) + } else { + if let Some(range) = &range { + match range { + SeriesRange::TimeRange(x) => Some(IsoDateTime::from_ns_u64(x.beg + SEC)), + SeriesRange::PulseRange(_) => { + error!("TODO emit create continueAt for pulse range"); + Some(IsoDateTime::from_ns_u64(0)) + } + } + } else { + warn!("can not determine continue-at parameters"); + Some(IsoDateTime::from_ns_u64(0)) + } + } + } else { + None + }; + let tss_sl = vals.tss.make_contiguous(); + let pulses_sl = vals.pulses.make_contiguous(); + let (ts_anchor_sec, ts_off_ms, ts_off_ns) = crate::ts_offs_from_abs(tss_sl); + let (pulse_anchor, pulse_off) = crate::pulse_offs_from_abs(pulses_sl); + let values = mem::replace(&mut vals.values, VecDeque::new()); + if ts_off_ms.len() != ts_off_ns.len() { + return Err(Error::with_msg_no_trace("collected len mismatch")); + } + if ts_off_ms.len() != pulse_off.len() { + return Err(Error::with_msg_no_trace("collected len mismatch")); + } + if ts_off_ms.len() != values.len() { + return Err(Error::with_msg_no_trace("collected len mismatch")); + } + let ret = Self::Output { + ts_anchor_sec, + ts_off_ms, + ts_off_ns, + pulse_anchor, + pulse_off, + values, + range_final: self.range_final, + timed_out: self.timed_out, + continue_at, + }; + if !ret.is_valid() { + error!("invalid:\n{}", ret.info_str()); + } + Ok(ret) + } +} + +impl CollectableType for EventsDim1 { + type Collector = EventsDim1Collector; + + fn new_collector() -> Self::Collector { + Self::Collector::new() + } +} + +#[derive(Debug)] +pub struct EventsDim1Aggregator { + _last_seen_val: Option, + events_taken_count: u64, + events_ignored_count: u64, +} + +impl Drop for EventsDim1Aggregator { + fn drop(&mut self) { + // TODO collect as stats for the request context: + trace!( + "taken {} ignored {}", + self.events_taken_count, + self.events_ignored_count + ); + } +} + +impl EventsDim1Aggregator { + pub fn new(_range: SeriesRange, _do_time_weight: bool) -> Self { + panic!("TODO remove") + } +} + +impl items_0::TypeName for EventsDim1 { + fn type_name(&self) -> String { + let sty = std::any::type_name::(); + format!("EventsDim1<{sty}>") + } +} + +impl EventsNonObj for EventsDim1 { + fn into_tss_pulses(self: Box) -> (VecDeque, VecDeque) { + panic!("TODO remove") + } +} + +impl Events for EventsDim1 { + fn verify(&self) -> bool { + let mut good = true; + let mut ts_max = 0; + for ts in &self.tss { + let ts = *ts; + if ts < ts_max { + good = false; + error!("unordered event data ts {} ts_max {}", ts, ts_max); + } + ts_max = ts_max.max(ts); + } + good + } + + fn output_info(&self) -> String { + let n2 = self.tss.len().max(1) - 1; + format!( + "EventsDim1OutputInfo {{ len {}, ts_min {}, ts_max {} }}", + self.tss.len(), + self.tss.get(0).map_or(-1i64, |&x| x as i64), + self.tss.get(n2).map_or(-1i64, |&x| x as i64), + ) + } + + fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn { + self + } + + fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn { + self + } + + fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn { + self + } + + fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box { + // TODO improve the search + let n1 = self.tss.iter().take_while(|&&x| x <= ts_end).count(); + let tss = self.tss.drain(..n1).collect(); + let pulses = self.pulses.drain(..n1).collect(); + let values = self.values.drain(..n1).collect(); + let ret = Self { tss, pulses, values }; + Box::new(ret) + } + + fn new_empty_evs(&self) -> Box { + Box::new(Self::empty()) + } + + fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), MergeError> { + // TODO as_any and as_any_mut are declared on unrelated traits. Simplify. + if let Some(dst) = dst.as_any_mut().downcast_mut::() { + // TODO make it harder to forget new members when the struct may get modified in the future + let r = range.0..range.1; + dst.tss.extend(self.tss.drain(r.clone())); + dst.pulses.extend(self.pulses.drain(r.clone())); + dst.values.extend(self.values.drain(r.clone())); + Ok(()) + } else { + error!("downcast to EventsDim0 FAILED"); + Err(MergeError::NotCompatible) + } + } + + fn find_lowest_index_gt_evs(&self, ts: u64) -> Option { + for (i, &m) in self.tss.iter().enumerate() { + if m > ts { + return Some(i); + } + } + None + } + + fn find_lowest_index_ge_evs(&self, ts: u64) -> Option { + for (i, &m) in self.tss.iter().enumerate() { + if m >= ts { + return Some(i); + } + } + None + } + + fn find_highest_index_lt_evs(&self, ts: u64) -> Option { + for (i, &m) in self.tss.iter().enumerate().rev() { + if m < ts { + return Some(i); + } + } + None + } + + fn ts_min(&self) -> Option { + self.tss.front().map(|&x| x) + } + + fn ts_max(&self) -> Option { + self.tss.back().map(|&x| x) + } + + fn partial_eq_dyn(&self, other: &dyn Events) -> bool { + if let Some(other) = other.as_any_ref().downcast_ref::() { + self == other + } else { + false + } + } + + fn serde_id(&self) -> &'static str { + Self::serde_id() + } + + fn nty_id(&self) -> u32 { + STY::SUB + } + + fn clone_dyn(&self) -> Box { + Box::new(self.clone()) + } + + fn tss(&self) -> &VecDeque { + &self.tss + } + + fn pulses(&self) -> &VecDeque { + &self.pulses + } + + fn frame_type_id(&self) -> u32 { + // TODO make more nice + panic!() + } + + fn to_min_max_avg(&mut self) -> Box { + let mins = self + .values + .iter() + .map(|x| STY::find_vec_min(x)) + .map(|x| x.unwrap_or_else(|| STY::zero_b())) + .collect(); + let maxs = self + .values + .iter() + .map(|x| STY::find_vec_max(x)) + .map(|x| x.unwrap_or_else(|| STY::zero_b())) + .collect(); + let avgs = self + .values + .iter() + .map(|x| STY::avg_vec(x)) + .map(|x| x.unwrap_or_else(|| STY::zero_b())) + .map(|x| x.as_prim_f32_b()) + .collect(); + let item = EventsXbinDim0 { + tss: mem::replace(&mut self.tss, VecDeque::new()), + pulses: mem::replace(&mut self.pulses, VecDeque::new()), + mins, + maxs, + avgs, + }; + Box::new(item) + } + + fn to_json_string(&self) -> String { + let ret = EventsDim1ChunkOutput { + // TODO use &mut to swap the content + tss: self.tss.clone(), + pulses: self.pulses.clone(), + values: self.values.clone(), + scalar_type: STY::scalar_type_name().into(), + }; + serde_json::to_string(&ret).unwrap() + } + + fn to_json_vec_u8(&self) -> Vec { + self.to_json_string().into_bytes() + } + + fn to_cbor_vec_u8(&self) -> Vec { + let ret = EventsDim1ChunkOutput { + // TODO use &mut to swap the content + tss: self.tss.clone(), + pulses: self.pulses.clone(), + values: self.values.clone(), + scalar_type: STY::scalar_type_name().into(), + }; + let mut buf = Vec::new(); + ciborium::into_writer(&ret, &mut buf).unwrap(); + buf + } + + fn clear(&mut self) { + self.tss.clear(); + self.pulses.clear(); + self.values.clear(); + } + + fn to_dim0_f32_for_binning(&self) -> Box { + todo!("{}::to_dim0_f32_for_binning", self.type_name()) + } + + fn to_container_events(&self) -> Box { + todo!("{}::to_container_events", self.type_name()) + } +} + +impl Appendable> for EventsDim1 +where + STY: ScalarOps, +{ + fn push(&mut self, ts: u64, pulse: u64, value: Vec) { + Self::push(self, ts, pulse, value) + } +} diff --git a/src/eventsxbindim0.rs b/src/eventsxbindim0.rs new file mode 100644 index 0000000..c2256ea --- /dev/null +++ b/src/eventsxbindim0.rs @@ -0,0 +1,779 @@ +use crate::binsxbindim0::BinsXbinDim0; +use crate::IsoDateTime; +use daqbuf_err as err; +use err::Error; +use items_0::collect_s::CollectableDyn; +use items_0::collect_s::CollectableType; +use items_0::collect_s::CollectedDyn; +use items_0::collect_s::CollectorTy; +use items_0::collect_s::ToJsonResult; +use items_0::container::ByteEstimate; +use items_0::overlap::HasTimestampDeque; +use items_0::scalar_ops::ScalarOps; +use items_0::timebin::TimeBinnerTy; +use items_0::AsAnyMut; +use items_0::AsAnyRef; +use items_0::Empty; +use items_0::Events; +use items_0::EventsNonObj; +use items_0::MergeError; +use items_0::TypeName; +use items_0::WithLen; +use netpod::is_false; +use netpod::log::*; +use netpod::range::evrange::NanoRange; +use netpod::range::evrange::SeriesRange; +use netpod::timeunits::SEC; +use netpod::BinnedRangeEnum; +use serde::Deserialize; +use serde::Serialize; +use std::any; +use std::any::Any; +use std::collections::VecDeque; +use std::fmt; +use std::mem; + +#[allow(unused)] +macro_rules! trace_ingest { + ($($arg:tt)*) => {}; + ($($arg:tt)*) => { trace!($($arg)*) }; +} + +#[allow(unused)] +macro_rules! trace2 { + ($($arg:tt)*) => {}; + ($($arg:tt)*) => { trace!($($arg)*) }; +} + +#[derive(Clone, PartialEq, Serialize, Deserialize)] +pub struct EventsXbinDim0 { + pub tss: VecDeque, + pub pulses: VecDeque, + pub mins: VecDeque, + pub maxs: VecDeque, + pub avgs: VecDeque, + // TODO maybe add variance? +} + +impl EventsXbinDim0 { + #[inline(always)] + pub fn push(&mut self, ts: u64, pulse: u64, min: NTY, max: NTY, avg: f32) { + self.tss.push_back(ts); + self.pulses.push_back(pulse); + self.mins.push_back(min); + self.maxs.push_back(max); + self.avgs.push_back(avg); + } + + #[inline(always)] + pub fn push_front(&mut self, ts: u64, pulse: u64, min: NTY, max: NTY, avg: f32) { + self.tss.push_front(ts); + self.pulses.push_front(pulse); + self.mins.push_front(min); + self.maxs.push_front(max); + self.avgs.push_front(avg); + } + + pub fn serde_id() -> &'static str { + "EventsXbinDim0" + } +} + +impl TypeName for EventsXbinDim0 { + fn type_name(&self) -> String { + any::type_name::().into() + } +} + +impl fmt::Debug for EventsXbinDim0 +where + STY: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + if false { + write!( + fmt, + "{} {{ count {} ts {:?} vals {:?} }}", + self.type_name(), + self.tss.len(), + self.tss.iter().map(|x| x / SEC).collect::>(), + self.avgs, + ) + } else { + write!( + fmt, + "{} {{ count {} ts {:?} .. {:?} vals {:?} .. {:?} }}", + self.type_name(), + self.tss.len(), + self.tss.front().map(|x| x / SEC), + self.tss.back().map(|x| x / SEC), + self.avgs.front(), + self.avgs.back(), + ) + } + } +} + +impl ByteEstimate for EventsXbinDim0 { + fn byte_estimate(&self) -> u64 { + let stylen = mem::size_of::(); + (self.len() * (8 + 8 + 2 * stylen + 4)) as u64 + } +} + +impl Empty for EventsXbinDim0 { + fn empty() -> Self { + Self { + tss: VecDeque::new(), + pulses: VecDeque::new(), + mins: VecDeque::new(), + maxs: VecDeque::new(), + avgs: VecDeque::new(), + } + } +} + +impl AsAnyRef for EventsXbinDim0 +where + STY: ScalarOps, +{ + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for EventsXbinDim0 +where + STY: ScalarOps, +{ + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +impl WithLen for EventsXbinDim0 { + fn len(&self) -> usize { + self.tss.len() + } +} + +impl HasTimestampDeque for EventsXbinDim0 { + fn timestamp_min(&self) -> Option { + self.tss.front().map(|x| *x) + } + + fn timestamp_max(&self) -> Option { + self.tss.back().map(|x| *x) + } + + fn pulse_min(&self) -> Option { + self.pulses.front().map(|x| *x) + } + + fn pulse_max(&self) -> Option { + self.pulses.back().map(|x| *x) + } +} + +impl EventsNonObj for EventsXbinDim0 { + fn into_tss_pulses(self: Box) -> (VecDeque, VecDeque) { + info!( + "EventsXbinDim0::into_tss_pulses len {} len {}", + self.tss.len(), + self.pulses.len() + ); + (self.tss, self.pulses) + } +} + +impl Events for EventsXbinDim0 { + fn verify(&self) -> bool { + let mut good = true; + let mut ts_max = 0; + for ts in &self.tss { + let ts = *ts; + if ts < ts_max { + good = false; + error!("unordered event data ts {} ts_max {}", ts, ts_max); + } + ts_max = ts_max.max(ts); + } + good + } + + fn output_info(&self) -> String { + let n2 = self.tss.len().max(1) - 1; + format!( + "EventsXbinDim0OutputInfo {{ len {}, ts_min {}, ts_max {} }}", + self.tss.len(), + self.tss.get(0).map_or(-1i64, |&x| x as i64), + self.tss.get(n2).map_or(-1i64, |&x| x as i64), + ) + } + + fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn { + self + } + + fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn { + self + } + + fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn { + self + } + + fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box { + // TODO improve the search + let n1 = self.tss.iter().take_while(|&&x| x <= ts_end).count(); + let tss = self.tss.drain(..n1).collect(); + let pulses = self.pulses.drain(..n1).collect(); + let mins = self.mins.drain(..n1).collect(); + let maxs = self.maxs.drain(..n1).collect(); + let avgs = self.avgs.drain(..n1).collect(); + let ret = Self { + tss, + pulses, + mins, + maxs, + avgs, + }; + Box::new(ret) + } + + fn new_empty_evs(&self) -> Box { + Box::new(Self::empty()) + } + + fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), MergeError> { + // TODO as_any and as_any_mut are declared on unrelated traits. Simplify. + if let Some(dst) = dst.as_any_mut().downcast_mut::() { + // TODO make it harder to forget new members when the struct may get modified in the future + let r = range.0..range.1; + dst.tss.extend(self.tss.drain(r.clone())); + dst.pulses.extend(self.pulses.drain(r.clone())); + dst.mins.extend(self.mins.drain(r.clone())); + dst.maxs.extend(self.maxs.drain(r.clone())); + dst.avgs.extend(self.avgs.drain(r.clone())); + Ok(()) + } else { + error!("downcast to {} FAILED", self.type_name()); + Err(MergeError::NotCompatible) + } + } + + fn find_lowest_index_gt_evs(&self, ts: u64) -> Option { + for (i, &m) in self.tss.iter().enumerate() { + if m > ts { + return Some(i); + } + } + None + } + + fn find_lowest_index_ge_evs(&self, ts: u64) -> Option { + for (i, &m) in self.tss.iter().enumerate() { + if m >= ts { + return Some(i); + } + } + None + } + + fn find_highest_index_lt_evs(&self, ts: u64) -> Option { + for (i, &m) in self.tss.iter().enumerate().rev() { + if m < ts { + return Some(i); + } + } + None + } + + fn ts_min(&self) -> Option { + self.tss.front().map(|&x| x) + } + + fn ts_max(&self) -> Option { + self.tss.back().map(|&x| x) + } + + fn partial_eq_dyn(&self, other: &dyn Events) -> bool { + if let Some(other) = other.as_any_ref().downcast_ref::() { + self == other + } else { + false + } + } + + fn serde_id(&self) -> &'static str { + Self::serde_id() + } + + fn nty_id(&self) -> u32 { + STY::SUB + } + + fn clone_dyn(&self) -> Box { + Box::new(self.clone()) + } + + fn tss(&self) -> &VecDeque { + &self.tss + } + + fn pulses(&self) -> &VecDeque { + &self.pulses + } + + fn frame_type_id(&self) -> u32 { + error!("TODO frame_type_id should not be called"); + // TODO make more nice + panic!() + } + + fn to_min_max_avg(&mut self) -> Box { + let dst = Self { + tss: mem::replace(&mut self.tss, Default::default()), + pulses: mem::replace(&mut self.pulses, Default::default()), + mins: mem::replace(&mut self.mins, Default::default()), + maxs: mem::replace(&mut self.maxs, Default::default()), + avgs: mem::replace(&mut self.avgs, Default::default()), + }; + Box::new(dst) + } + + fn to_json_string(&self) -> String { + todo!() + } + + fn to_json_vec_u8(&self) -> Vec { + todo!() + } + + fn to_cbor_vec_u8(&self) -> Vec { + todo!() + } + + fn clear(&mut self) { + self.tss.clear(); + self.pulses.clear(); + self.mins.clear(); + self.maxs.clear(); + self.avgs.clear(); + } + + fn to_dim0_f32_for_binning(&self) -> Box { + todo!("{}::to_dim0_f32_for_binning", self.type_name()) + } + + fn to_container_events(&self) -> Box { + todo!("{}::to_container_events", self.type_name()) + } +} + +#[derive(Debug)] +pub struct EventsXbinDim0Aggregator +where + STY: ScalarOps, +{ + range: SeriesRange, + /// Number of events which actually fall in this bin. + count: u64, + min: STY, + max: STY, + /// Number of times we accumulated to the sum of this bin. + sumc: u64, + sum: f32, + int_ts: u64, + last_ts: u64, + last_vals: Option<(STY, STY, f32)>, + did_min_max: bool, + do_time_weight: bool, + events_ignored_count: u64, +} + +impl EventsXbinDim0Aggregator +where + STY: ScalarOps, +{ + pub fn type_name() -> &'static str { + std::any::type_name::() + } + + pub fn new(range: SeriesRange, do_time_weight: bool) -> Self { + let int_ts = range.beg_u64(); + Self { + range, + did_min_max: false, + count: 0, + min: STY::zero_b(), + max: STY::zero_b(), + sumc: 0, + sum: 0f32, + int_ts, + last_ts: 0, + last_vals: None, + events_ignored_count: 0, + do_time_weight, + } + } + + fn apply_min_max(&mut self, min: &STY, max: &STY) { + if self.did_min_max != (self.sumc > 0) { + panic!("logic error apply_min_max {} {}", self.did_min_max, self.sumc); + } + if self.sumc == 0 { + self.did_min_max = true; + self.min = min.clone(); + self.max = max.clone(); + } else { + if *min < self.min { + self.min = min.clone(); + } + if *max > self.max { + self.max = max.clone(); + } + } + } + + fn apply_event_unweight(&mut self, avg: f32, min: STY, max: STY) { + //debug!("apply_event_unweight"); + self.apply_min_max(&min, &max); + self.sumc += 1; + let vf = avg; + if vf.is_nan() { + } else { + self.sum += vf; + } + } + + // Only integrate, do not count because it is used even if the event does not fall into current bin. + fn apply_event_time_weight(&mut self, px: u64) { + trace_ingest!( + "apply_event_time_weight px {} count {} sumc {} events_ignored_count {}", + px, + self.count, + self.sumc, + self.events_ignored_count + ); + if let Some((min, max, avg)) = self.last_vals.as_ref() { + let vf = *avg; + { + let min = min.clone(); + let max = max.clone(); + self.apply_min_max(&min, &max); + } + self.sumc += 1; + let w = (px - self.int_ts) as f32 * 1e-9; + if vf.is_nan() { + } else { + self.sum += vf * w; + } + self.int_ts = px; + } else { + debug!("apply_event_time_weight NO VALUE"); + } + } + + fn ingest_unweight(&mut self, item: &EventsXbinDim0) { + /*for i1 in 0..item.tss.len() { + let ts = item.tss[i1]; + let avg = item.avgs[i1]; + let min = item.mins[i1].clone(); + let max = item.maxs[i1].clone(); + if ts < self.range.beg { + } else if ts >= self.range.end { + } else { + self.apply_event_unweight(avg, min, max); + } + }*/ + todo!() + } + + fn ingest_time_weight(&mut self, item: &EventsXbinDim0) { + trace!( + "{} ingest_time_weight range {:?} last_ts {:?} int_ts {:?}", + Self::type_name(), + self.range, + self.last_ts, + self.int_ts + ); + let range_beg = self.range.beg_u64(); + let range_end = self.range.end_u64(); + for (((&ts, min), max), avg) in item + .tss + .iter() + .zip(item.mins.iter()) + .zip(item.maxs.iter()) + .zip(item.avgs.iter()) + { + if ts >= range_end { + self.events_ignored_count += 1; + // TODO break early when tests pass. + //break; + } else if ts >= range_beg { + self.apply_event_time_weight(ts); + self.count += 1; + self.last_ts = ts; + self.last_vals = Some((min.clone(), max.clone(), avg.clone())); + } else { + self.events_ignored_count += 1; + self.last_ts = ts; + self.last_vals = Some((min.clone(), max.clone(), avg.clone())); + } + } + } + + fn result_reset_unweight(&mut self, range: SeriesRange) -> BinsXbinDim0 { + /*let avg = if self.sumc == 0 { + 0f32 + } else { + self.sum / self.sumc as f32 + }; + let ret = BinsXbinDim0::from_content( + [self.range.beg].into(), + [self.range.end].into(), + [self.count].into(), + [self.min.clone()].into(), + [self.max.clone()].into(), + [avg].into(), + ); + self.int_ts = range.beg; + self.range = range; + self.sum = 0f32; + self.sumc = 0; + self.did_min_max = false; + self.min = NTY::zero_b(); + self.max = NTY::zero_b(); + ret*/ + todo!() + } + + fn result_reset_time_weight(&mut self, range: SeriesRange) -> BinsXbinDim0 { + trace!("{} result_reset_time_weight", Self::type_name()); + // TODO check callsite for correct expand status. + if self.range.is_time() { + self.apply_event_time_weight(self.range.end_u64()); + } else { + error!("TODO result_reset_time_weight"); + err::todoval() + } + let range_beg = self.range.beg_u64(); + let range_end = self.range.end_u64(); + let (min, max, avg) = if self.sumc > 0 { + let avg = self.sum / (self.range.delta_u64() as f32 * 1e-9); + (self.min.clone(), self.max.clone(), avg) + } else { + let (min, max, avg) = match &self.last_vals { + Some((min, max, avg)) => { + warn!("\n\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! SHOULD ALWAYS HAVE ACCUMULATED IN THIS CASE"); + (min.clone(), max.clone(), avg.clone()) + } + None => (STY::zero_b(), STY::zero_b(), 0.), + }; + (min, max, avg) + }; + let ret = BinsXbinDim0::from_content( + [range_beg].into(), + [range_end].into(), + [self.count].into(), + [min.clone()].into(), + [max.clone()].into(), + [avg].into(), + ); + self.int_ts = range.beg_u64(); + self.range = range; + self.count = 0; + self.sumc = 0; + self.sum = 0.; + self.did_min_max = false; + self.min = STY::zero_b(); + self.max = STY::zero_b(); + ret + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct EventsXbinDim0CollectorOutput { + #[serde(rename = "tsAnchor")] + ts_anchor_sec: u64, + #[serde(rename = "tsMs")] + ts_off_ms: VecDeque, + #[serde(rename = "tsNs")] + ts_off_ns: VecDeque, + #[serde(rename = "pulseAnchor")] + pulse_anchor: u64, + #[serde(rename = "pulseOff")] + pulse_off: VecDeque, + #[serde(rename = "mins")] + mins: VecDeque, + #[serde(rename = "maxs")] + maxs: VecDeque, + #[serde(rename = "avgs")] + avgs: VecDeque, + #[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")] + range_final: bool, + #[serde(rename = "timedOut", default, skip_serializing_if = "is_false")] + timed_out: bool, + #[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")] + continue_at: Option, +} + +impl AsAnyRef for EventsXbinDim0CollectorOutput +where + NTY: ScalarOps, +{ + fn as_any_ref(&self) -> &dyn Any { + self + } +} + +impl AsAnyMut for EventsXbinDim0CollectorOutput +where + NTY: ScalarOps, +{ + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } +} + +impl TypeName for EventsXbinDim0CollectorOutput { + fn type_name(&self) -> String { + any::type_name::().into() + } +} + +impl WithLen for EventsXbinDim0CollectorOutput { + fn len(&self) -> usize { + self.mins.len() + } +} + +impl ToJsonResult for EventsXbinDim0CollectorOutput +where + NTY: ScalarOps, +{ + fn to_json_value(&self) -> Result { + serde_json::to_value(self) + } +} + +impl CollectedDyn for EventsXbinDim0CollectorOutput where NTY: ScalarOps {} + +#[derive(Debug)] +pub struct EventsXbinDim0Collector { + vals: EventsXbinDim0, + range_final: bool, + timed_out: bool, + needs_continue_at: bool, +} + +impl EventsXbinDim0Collector { + pub fn self_name() -> &'static str { + any::type_name::() + } + + pub fn new() -> Self { + Self { + range_final: false, + timed_out: false, + vals: EventsXbinDim0::empty(), + needs_continue_at: false, + } + } +} + +impl WithLen for EventsXbinDim0Collector { + fn len(&self) -> usize { + WithLen::len(&self.vals) + } +} + +impl ByteEstimate for EventsXbinDim0Collector { + fn byte_estimate(&self) -> u64 { + ByteEstimate::byte_estimate(&self.vals) + } +} + +impl CollectorTy for EventsXbinDim0Collector +where + NTY: ScalarOps, +{ + type Input = EventsXbinDim0; + type Output = EventsXbinDim0CollectorOutput; + + fn ingest(&mut self, src: &mut Self::Input) { + self.vals.tss.append(&mut src.tss); + self.vals.pulses.append(&mut src.pulses); + self.vals.mins.append(&mut src.mins); + self.vals.maxs.append(&mut src.maxs); + self.vals.avgs.append(&mut src.avgs); + } + + fn set_range_complete(&mut self) { + self.range_final = true; + } + + fn set_timed_out(&mut self) { + self.timed_out = true; + } + + fn set_continue_at_here(&mut self) { + self.needs_continue_at = true; + } + + fn result( + &mut self, + range: Option, + _binrange: Option, + ) -> Result { + /*use std::mem::replace; + let continue_at = if self.timed_out { + if let Some(ts) = self.vals.tss.back() { + Some(IsoDateTime::from_u64(*ts + netpod::timeunits::MS)) + } else { + if let Some(range) = &range { + Some(IsoDateTime::from_u64(range.beg + netpod::timeunits::SEC)) + } else { + warn!("can not determine continue-at parameters"); + None + } + } + } else { + None + }; + let mins = replace(&mut self.vals.mins, VecDeque::new()); + let maxs = replace(&mut self.vals.maxs, VecDeque::new()); + let avgs = replace(&mut self.vals.avgs, VecDeque::new()); + self.vals.tss.make_contiguous(); + self.vals.pulses.make_contiguous(); + let tst = crate::ts_offs_from_abs(self.vals.tss.as_slices().0); + let (pulse_anchor, pulse_off) = crate::pulse_offs_from_abs(&self.vals.pulses.as_slices().0); + let ret = Self::Output { + ts_anchor_sec: tst.0, + ts_off_ms: tst.1, + ts_off_ns: tst.2, + pulse_anchor, + pulse_off, + mins, + maxs, + avgs, + range_final: self.range_final, + timed_out: self.timed_out, + continue_at, + }; + Ok(ret)*/ + todo!() + } +} + +impl CollectableType for EventsXbinDim0 +where + NTY: ScalarOps, +{ + type Collector = EventsXbinDim0Collector; + + fn new_collector() -> Self::Collector { + Self::Collector::new() + } +} diff --git a/src/framable.rs b/src/framable.rs new file mode 100644 index 0000000..33629c4 --- /dev/null +++ b/src/framable.rs @@ -0,0 +1,221 @@ +use crate::frame::make_error_frame; +use crate::frame::make_frame_2; +use crate::frame::make_log_frame; +use crate::frame::make_range_complete_frame; +use crate::frame::make_stats_frame; +use bytes::BytesMut; +use daqbuf_err as err; +use items_0::framable::FrameTypeInnerDyn; +use items_0::framable::FrameTypeInnerStatic; +use items_0::streamitem::LogItem; +use items_0::streamitem::RangeCompletableItem; +use items_0::streamitem::Sitemty; +use items_0::streamitem::StatsItem; +use items_0::streamitem::StreamItem; +use items_0::streamitem::ERROR_FRAME_TYPE_ID; +use items_0::streamitem::EVENT_QUERY_JSON_STRING_FRAME; +use items_0::streamitem::SITEMTY_NONSPEC_FRAME_TYPE_ID; +use items_0::Events; +use netpod::log::*; +use serde::de::DeserializeOwned; +use serde::Deserialize; +use serde::Serialize; + +pub const INMEM_FRAME_ENCID: u32 = 0x12121212; +pub const INMEM_FRAME_HEAD: usize = 20; +pub const INMEM_FRAME_FOOT: usize = 4; +pub const INMEM_FRAME_MAGIC: u32 = 0xc6c3b73d; + +#[derive(Debug, thiserror::Error)] +#[cstm(name = "ItemFramable")] +pub enum Error { + Msg(String), + DummyError, + Frame(#[from] crate::frame::Error), +} + +struct ErrMsg(E) +where + E: ToString; + +impl From> for Error +where + E: ToString, +{ + fn from(value: ErrMsg) -> Self { + Self::Msg(value.0.to_string()) + } +} + +pub trait FrameTypeStatic { + const FRAME_TYPE_ID: u32; +} + +impl FrameTypeStatic for Sitemty +where + T: FrameTypeInnerStatic, +{ + const FRAME_TYPE_ID: u32 = ::FRAME_TYPE_ID; +} + +// Framable trait objects need some inspection to handle the supposed-to-be common Err ser format: +// Meant to be implemented by Sitemty. +pub trait FrameType { + fn frame_type_id(&self) -> u32; +} + +impl FrameType for Box +where + T: FrameType, +{ + fn frame_type_id(&self) -> u32 { + self.as_ref().frame_type_id() + } +} + +impl FrameType for Box { + fn frame_type_id(&self) -> u32 { + self.as_ref().frame_type_id() + } +} + +pub trait Framable { + fn make_frame_dyn(&self) -> Result; +} + +pub trait FramableInner: erased_serde::Serialize + FrameTypeInnerDyn + Send { + fn _dummy(&self); +} + +impl FramableInner for T { + fn _dummy(&self) {} +} + +impl Framable for Sitemty +where + T: Sized + serde::Serialize + FrameType, +{ + fn make_frame_dyn(&self) -> Result { + match self { + Ok(StreamItem::DataItem(RangeCompletableItem::Data(k))) => { + let frame_type_id = k.frame_type_id(); + make_frame_2(self, frame_type_id).map_err(Error::from) + } + Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)) => { + make_range_complete_frame().map_err(Error::from) + } + Ok(StreamItem::Log(item)) => make_log_frame(item).map_err(Error::from), + Ok(StreamItem::Stats(item)) => make_stats_frame(item).map_err(Error::from), + Err(e) => { + info!("calling make_error_frame for [[{e}]]"); + make_error_frame(e).map_err(Error::from) + } + } + } +} + +impl Framable for Box +where + T: Framable + ?Sized, +{ + fn make_frame_dyn(&self) -> Result { + self.as_ref().make_frame_dyn() + } +} + +pub trait FrameDecodable: FrameTypeStatic + DeserializeOwned { + fn from_error(e: err::Error) -> Self; + fn from_log(item: LogItem) -> Self; + fn from_stats(item: StatsItem) -> Self; + fn from_range_complete() -> Self; +} + +impl FrameDecodable for Sitemty +where + T: FrameTypeInnerStatic + DeserializeOwned, +{ + fn from_error(e: err::Error) -> Self { + Err(e) + } + + fn from_log(item: LogItem) -> Self { + Ok(StreamItem::Log(item)) + } + + fn from_stats(item: StatsItem) -> Self { + Ok(StreamItem::Stats(item)) + } + + fn from_range_complete() -> Self { + Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EventQueryJsonStringFrame(pub String); + +impl EventQueryJsonStringFrame { + pub fn str(&self) -> &str { + &self.0 + } +} + +impl FrameTypeInnerStatic for EventQueryJsonStringFrame { + const FRAME_TYPE_ID: u32 = EVENT_QUERY_JSON_STRING_FRAME; +} + +impl FrameType for EventQueryJsonStringFrame { + fn frame_type_id(&self) -> u32 { + EventQueryJsonStringFrame::FRAME_TYPE_ID + } +} + +impl FrameType for Sitemty +where + T: FrameType, +{ + fn frame_type_id(&self) -> u32 { + match self { + Ok(item) => match item { + StreamItem::DataItem(item) => match item { + RangeCompletableItem::RangeComplete => SITEMTY_NONSPEC_FRAME_TYPE_ID, + RangeCompletableItem::Data(item) => item.frame_type_id(), + }, + StreamItem::Log(_) => SITEMTY_NONSPEC_FRAME_TYPE_ID, + StreamItem::Stats(_) => SITEMTY_NONSPEC_FRAME_TYPE_ID, + }, + Err(_) => ERROR_FRAME_TYPE_ID, + } + } +} + +#[test] +fn test_frame_log() { + use crate::channelevents::ChannelEvents; + use crate::frame::decode_from_slice; + use netpod::log::Level; + let item = LogItem { + node_ix: 123, + level: Level::TRACE, + msg: format!("test-log-message"), + }; + let item: Sitemty = Ok(StreamItem::Log(item)); + let buf = Framable::make_frame_dyn(&item).unwrap(); + let len = u32::from_le_bytes(buf[12..16].try_into().unwrap()); + let item2: LogItem = decode_from_slice(&buf[20..20 + len as usize]).unwrap(); +} + +#[test] +fn test_frame_error() { + use crate::channelevents::ChannelEvents; + use crate::frame::json_from_slice; + let item: Sitemty = items_0::streamitem::sitem_err_from_string("dummyerror"); + let buf = Framable::make_frame_dyn(&item).unwrap(); + let len = u32::from_le_bytes(buf[12..16].try_into().unwrap()); + let tyid = u32::from_le_bytes(buf[8..12].try_into().unwrap()); + if tyid != ERROR_FRAME_TYPE_ID { + panic!("bad tyid"); + } + eprintln!("buf len {} len {}", buf.len(), len); + let item2: items_0::streamitem::SitemErrTy = json_from_slice(&buf[20..20 + len as usize]).unwrap(); +} diff --git a/src/frame.rs b/src/frame.rs new file mode 100644 index 0000000..3e736b2 --- /dev/null +++ b/src/frame.rs @@ -0,0 +1,433 @@ +use crate::framable::FrameDecodable; +use crate::framable::INMEM_FRAME_ENCID; +use crate::framable::INMEM_FRAME_FOOT; +use crate::framable::INMEM_FRAME_HEAD; +use crate::framable::INMEM_FRAME_MAGIC; +use crate::inmem::InMemoryFrame; +use bincode::config::FixintEncoding; +use bincode::config::LittleEndian; +use bincode::config::RejectTrailing; +use bincode::config::WithOtherEndian; +use bincode::config::WithOtherIntEncoding; +use bincode::config::WithOtherTrailing; +use bincode::DefaultOptions; +use bytes::BufMut; +use bytes::BytesMut; +use daqbuf_err as err; +use items_0::bincode; +use items_0::streamitem::LogItem; +use items_0::streamitem::StatsItem; +use items_0::streamitem::ERROR_FRAME_TYPE_ID; +use items_0::streamitem::LOG_FRAME_TYPE_ID; +use items_0::streamitem::RANGE_COMPLETE_FRAME_TYPE_ID; +use items_0::streamitem::STATS_FRAME_TYPE_ID; +use items_0::streamitem::TERM_FRAME_TYPE_ID; +use netpod::log::*; +use serde::Serialize; +use std::any; +use std::io; + +#[derive(Debug, thiserror::Error)] +#[cstm(name = "ItemFrame")] +pub enum Error { + TooLongPayload(usize), + UnknownEncoder(u32), + #[error("BufferMismatch({0}, {1}, {2})")] + BufferMismatch(u32, usize, u32), + #[error("TyIdMismatch({0}, {1})")] + TyIdMismatch(u32, u32), + Msg(String), + Bincode(#[from] Box), + RmpEnc(#[from] rmp_serde::encode::Error), + RmpDec(#[from] rmp_serde::decode::Error), + ErasedSerde(#[from] erased_serde::Error), + Postcard(#[from] postcard::Error), + SerdeJson(#[from] serde_json::Error), +} + +struct ErrMsg(E) +where + E: ToString; + +impl From> for Error +where + E: ToString, +{ + fn from(value: ErrMsg) -> Self { + Self::Msg(value.0.to_string()) + } +} + +pub fn bincode_ser( + w: W, +) -> bincode::Serializer< + W, + WithOtherTrailing< + WithOtherIntEncoding, FixintEncoding>, + RejectTrailing, + >, +> +where + W: io::Write, +{ + use bincode::Options; + let opts = DefaultOptions::new() + .with_little_endian() + .with_fixint_encoding() + .reject_trailing_bytes(); + let ser = bincode::Serializer::new(w, opts); + ser +} + +fn bincode_to_vec(item: S) -> Result, Error> +where + S: Serialize, +{ + let mut out = Vec::new(); + let mut ser = bincode_ser(&mut out); + item.serialize(&mut ser)?; + Ok(out) +} + +fn bincode_from_slice(buf: &[u8]) -> Result +where + T: for<'de> serde::Deserialize<'de>, +{ + use bincode::Options; + let opts = DefaultOptions::new() + .with_little_endian() + .with_fixint_encoding() + .reject_trailing_bytes(); + let mut de = bincode::Deserializer::from_slice(buf, opts); + ::deserialize(&mut de).map_err(Into::into) +} + +fn msgpack_to_vec(item: T) -> Result, Error> +where + T: Serialize, +{ + rmp_serde::to_vec_named(&item).map_err(Error::from) +} + +fn msgpack_erased_to_vec(item: T) -> Result, Error> +where + T: erased_serde::Serialize, +{ + let mut out = Vec::new(); + { + let mut ser1 = rmp_serde::Serializer::new(&mut out).with_struct_map(); + let mut ser2 = ::erase(&mut ser1); + item.erased_serialize(&mut ser2)?; + } + Ok(out) +} + +fn msgpack_from_slice(buf: &[u8]) -> Result +where + T: for<'de> serde::Deserialize<'de>, +{ + rmp_serde::from_slice(buf).map_err(Error::from) +} + +fn postcard_to_vec(item: T) -> Result, Error> +where + T: Serialize, +{ + postcard::to_stdvec(&item).map_err(Error::from) +} + +fn postcard_erased_to_vec(item: T) -> Result, Error> +where + T: erased_serde::Serialize, +{ + use postcard::ser_flavors::Flavor; + let mut ser1 = postcard::Serializer { + output: postcard::ser_flavors::AllocVec::new(), + }; + { + let mut ser2 = ::erase(&mut ser1); + item.erased_serialize(&mut ser2) + }?; + let ret = ser1.output.finalize()?; + Ok(ret) +} + +pub fn postcard_from_slice(buf: &[u8]) -> Result +where + T: for<'de> serde::Deserialize<'de>, +{ + Ok(postcard::from_bytes(buf)?) +} + +fn json_to_vec(item: T) -> Result, Error> +where + T: Serialize, +{ + Ok(serde_json::to_vec(&item)?) +} + +pub fn json_from_slice(buf: &[u8]) -> Result +where + T: for<'de> serde::Deserialize<'de>, +{ + Ok(serde_json::from_slice(buf)?) +} + +pub fn encode_to_vec(item: T) -> Result, Error> +where + T: Serialize, +{ + if false { + msgpack_to_vec(item) + } else if false { + bincode_to_vec(item) + } else { + postcard_to_vec(item) + } +} + +pub fn encode_erased_to_vec(item: T) -> Result, Error> +where + T: erased_serde::Serialize, +{ + if false { + msgpack_erased_to_vec(item) + } else { + postcard_erased_to_vec(item) + } +} + +pub fn decode_from_slice(buf: &[u8]) -> Result +where + T: for<'de> serde::Deserialize<'de>, +{ + if false { + msgpack_from_slice(buf) + } else if false { + bincode_from_slice(buf) + } else { + postcard_from_slice(buf) + } +} + +pub fn make_frame_2(item: T, fty: u32) -> Result +where + T: erased_serde::Serialize, +{ + let enc = encode_erased_to_vec(item)?; + if enc.len() > u32::MAX as usize { + return Err(Error::TooLongPayload(enc.len())); + } + let mut h = crc32fast::Hasher::new(); + h.update(&enc); + let payload_crc = h.finalize(); + // TODO reserve also for footer via constant + let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len()); + buf.put_u32_le(INMEM_FRAME_MAGIC); + buf.put_u32_le(INMEM_FRAME_ENCID); + buf.put_u32_le(fty); + buf.put_u32_le(enc.len() as u32); + buf.put_u32_le(payload_crc); + // TODO add padding to align to 8 bytes. + buf.put(enc.as_ref()); + let mut h = crc32fast::Hasher::new(); + h.update(&buf); + let frame_crc = h.finalize(); + buf.put_u32_le(frame_crc); + return Ok(buf); +} + +// TODO remove duplication for these similar `make_*_frame` functions: + +pub fn make_error_frame(error: &err::Error) -> Result { + // error frames are always encoded as json + match json_to_vec(error) { + Ok(enc) => { + let mut h = crc32fast::Hasher::new(); + h.update(&enc); + let payload_crc = h.finalize(); + let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len()); + buf.put_u32_le(INMEM_FRAME_MAGIC); + buf.put_u32_le(INMEM_FRAME_ENCID); + buf.put_u32_le(ERROR_FRAME_TYPE_ID); + buf.put_u32_le(enc.len() as u32); + buf.put_u32_le(payload_crc); + buf.put(enc.as_ref()); + let mut h = crc32fast::Hasher::new(); + h.update(&buf); + let frame_crc = h.finalize(); + buf.put_u32_le(frame_crc); + Ok(buf) + } + Err(e) => Err(e)?, + } +} + +pub fn make_log_frame(item: &LogItem) -> Result { + match encode_to_vec(item) { + Ok(enc) => { + let mut h = crc32fast::Hasher::new(); + h.update(&enc); + let payload_crc = h.finalize(); + let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len()); + buf.put_u32_le(INMEM_FRAME_MAGIC); + buf.put_u32_le(INMEM_FRAME_ENCID); + buf.put_u32_le(LOG_FRAME_TYPE_ID); + buf.put_u32_le(enc.len() as u32); + buf.put_u32_le(payload_crc); + buf.put(enc.as_ref()); + let mut h = crc32fast::Hasher::new(); + h.update(&buf); + let frame_crc = h.finalize(); + buf.put_u32_le(frame_crc); + Ok(buf) + } + Err(e) => Err(e)?, + } +} + +pub fn make_stats_frame(item: &StatsItem) -> Result { + match encode_to_vec(item) { + Ok(enc) => { + let mut h = crc32fast::Hasher::new(); + h.update(&enc); + let payload_crc = h.finalize(); + let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len()); + buf.put_u32_le(INMEM_FRAME_MAGIC); + buf.put_u32_le(INMEM_FRAME_ENCID); + buf.put_u32_le(STATS_FRAME_TYPE_ID); + buf.put_u32_le(enc.len() as u32); + buf.put_u32_le(payload_crc); + buf.put(enc.as_ref()); + let mut h = crc32fast::Hasher::new(); + h.update(&buf); + let frame_crc = h.finalize(); + buf.put_u32_le(frame_crc); + Ok(buf) + } + Err(e) => Err(e)?, + } +} + +pub fn make_range_complete_frame() -> Result { + let enc = []; + let mut h = crc32fast::Hasher::new(); + h.update(&enc); + let payload_crc = h.finalize(); + let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len()); + buf.put_u32_le(INMEM_FRAME_MAGIC); + buf.put_u32_le(INMEM_FRAME_ENCID); + buf.put_u32_le(RANGE_COMPLETE_FRAME_TYPE_ID); + buf.put_u32_le(enc.len() as u32); + buf.put_u32_le(payload_crc); + buf.put(enc.as_ref()); + let mut h = crc32fast::Hasher::new(); + h.update(&buf); + let frame_crc = h.finalize(); + buf.put_u32_le(frame_crc); + Ok(buf) +} + +pub fn make_term_frame() -> Result { + let enc = []; + let mut h = crc32fast::Hasher::new(); + h.update(&enc); + let payload_crc = h.finalize(); + let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len()); + buf.put_u32_le(INMEM_FRAME_MAGIC); + buf.put_u32_le(INMEM_FRAME_ENCID); + buf.put_u32_le(TERM_FRAME_TYPE_ID); + buf.put_u32_le(enc.len() as u32); + buf.put_u32_le(payload_crc); + buf.put(enc.as_ref()); + let mut h = crc32fast::Hasher::new(); + h.update(&buf); + let frame_crc = h.finalize(); + buf.put_u32_le(frame_crc); + Ok(buf) +} + +pub fn decode_frame(frame: &InMemoryFrame) -> Result +where + T: FrameDecodable, +{ + if frame.encid() != INMEM_FRAME_ENCID { + return Err(Error::UnknownEncoder(frame.encid())); + } + if frame.len() as usize != frame.buf().len() { + return Err(Error::BufferMismatch(frame.len(), frame.buf().len(), frame.tyid())); + } + if frame.tyid() == ERROR_FRAME_TYPE_ID { + // error frames are always encoded as json + let k: err::Error = match json_from_slice(frame.buf()) { + Ok(item) => item, + Err(e) => { + error!("deserialize len {} ERROR_FRAME_TYPE_ID {}", frame.buf().len(), e); + let n = frame.buf().len().min(256); + let s = String::from_utf8_lossy(&frame.buf()[..n]); + error!("frame.buf as string: {:?}", s); + Err(e)? + } + }; + Ok(T::from_error(k)) + } else if frame.tyid() == LOG_FRAME_TYPE_ID { + let k: LogItem = match decode_from_slice(frame.buf()) { + Ok(item) => item, + Err(e) => { + error!("deserialize len {} LOG_FRAME_TYPE_ID {}", frame.buf().len(), e); + let n = frame.buf().len().min(128); + let s = String::from_utf8_lossy(&frame.buf()[..n]); + error!("frame.buf as string: {:?}", s); + Err(e)? + } + }; + Ok(T::from_log(k)) + } else if frame.tyid() == STATS_FRAME_TYPE_ID { + let k: StatsItem = match decode_from_slice(frame.buf()) { + Ok(item) => item, + Err(e) => { + error!("deserialize len {} STATS_FRAME_TYPE_ID {}", frame.buf().len(), e); + let n = frame.buf().len().min(128); + let s = String::from_utf8_lossy(&frame.buf()[..n]); + error!("frame.buf as string: {:?}", s); + Err(e)? + } + }; + Ok(T::from_stats(k)) + } else if frame.tyid() == RANGE_COMPLETE_FRAME_TYPE_ID { + // There is currently no content in this variant. + Ok(T::from_range_complete()) + } else { + let tyid = T::FRAME_TYPE_ID; + if frame.tyid() != tyid { + Err(Error::TyIdMismatch(tyid, frame.tyid())) + } else { + match decode_from_slice(frame.buf()) { + Ok(item) => Ok(item), + Err(e) => { + error!( + "decode_from_slice error len {} tyid {:04x} T {}", + frame.buf().len(), + frame.tyid(), + any::type_name::() + ); + let n = frame.buf().len().min(64); + let s = String::from_utf8_lossy(&frame.buf()[..n]); + error!("decode_from_slice bad frame.buf as bytes: {:?}", &frame.buf()[..n]); + error!("decode_from_slice bad frame.buf as string: {:?}", s); + Err(e)? + } + } + } + } +} + +pub fn crchex(t: T) -> String +where + T: AsRef<[u8]>, +{ + let mut h = crc32fast::Hasher::new(); + h.update(t.as_ref()); + let crc = h.finalize(); + format!("{:08x}", crc) +} diff --git a/src/inmem.rs b/src/inmem.rs new file mode 100644 index 0000000..55a00ab --- /dev/null +++ b/src/inmem.rs @@ -0,0 +1,34 @@ +use bytes::Bytes; +use std::fmt; + +pub struct InMemoryFrame { + pub encid: u32, + pub tyid: u32, + pub len: u32, + pub buf: Bytes, +} + +impl InMemoryFrame { + pub fn encid(&self) -> u32 { + self.encid + } + pub fn tyid(&self) -> u32 { + self.tyid + } + pub fn len(&self) -> u32 { + self.len + } + pub fn buf(&self) -> &Bytes { + &self.buf + } +} + +impl fmt::Debug for InMemoryFrame { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + fmt, + "InMemoryFrame {{ encid: {:x} tyid: {:x} len {} }}", + self.encid, self.tyid, self.len + ) + } +} diff --git a/src/items_2.rs b/src/items_2.rs new file mode 100644 index 0000000..300d198 --- /dev/null +++ b/src/items_2.rs @@ -0,0 +1,178 @@ +pub mod accounting; +pub mod binning; +pub mod binsdim0; +pub mod binsxbindim0; +pub mod channelevents; +pub mod empty; +pub mod eventfull; +pub mod eventsdim0; +pub mod eventsdim0enum; +pub mod eventsdim1; +pub mod eventsxbindim0; +pub mod framable; +pub mod frame; +pub mod inmem; +pub mod merger; +pub mod streams; +#[cfg(feature = "heavy")] +#[cfg(test)] +pub mod test; +pub mod testgen; +pub mod transform; + +use channelevents::ChannelEvents; +use daqbuf_err as err; +use futures_util::Stream; +use items_0::isodate::IsoDateTime; +use items_0::streamitem::Sitemty; +use items_0::transform::EventTransform; +use items_0::Empty; +use items_0::Events; +use items_0::MergeError; +use merger::Mergeable; +use netpod::range::evrange::SeriesRange; +use netpod::timeunits::*; +use std::collections::VecDeque; +use std::fmt; + +pub fn ts_offs_from_abs(tss: &[u64]) -> (u64, VecDeque, VecDeque) { + let ts_anchor_sec = tss.first().map_or(0, |&k| k) / SEC; + let ts_anchor_ns = ts_anchor_sec * SEC; + let ts_off_ms: VecDeque<_> = tss.iter().map(|&k| (k - ts_anchor_ns) / MS).collect(); + let ts_off_ns = tss + .iter() + .zip(ts_off_ms.iter().map(|&k| k * MS)) + .map(|(&j, k)| (j - ts_anchor_ns - k)) + .collect(); + (ts_anchor_sec, ts_off_ms, ts_off_ns) +} + +pub fn ts_offs_from_abs_with_anchor(ts_anchor_sec: u64, tss: &[u64]) -> (VecDeque, VecDeque) { + let ts_anchor_ns = ts_anchor_sec * SEC; + let ts_off_ms: VecDeque<_> = tss.iter().map(|&k| (k - ts_anchor_ns) / MS).collect(); + let ts_off_ns = tss + .iter() + .zip(ts_off_ms.iter().map(|&k| k * MS)) + .map(|(&j, k)| (j - ts_anchor_ns - k)) + .collect(); + (ts_off_ms, ts_off_ns) +} + +pub fn pulse_offs_from_abs(pulse: &[u64]) -> (u64, VecDeque) { + let pulse_anchor = pulse.first().map_or(0, |&k| k) / 10000 * 10000; + let pulse_off = pulse.iter().map(|&k| k - pulse_anchor).collect(); + (pulse_anchor, pulse_off) +} + +#[derive(Debug, PartialEq)] +pub enum ErrorKind { + General, + #[allow(unused)] + MismatchedType, +} + +// TODO stack error better +#[derive(Debug, PartialEq)] +pub struct Error { + #[allow(unused)] + kind: ErrorKind, + msg: Option, +} + +impl fmt::Display for Error { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "{self:?}") + } +} + +impl From for Error { + fn from(kind: ErrorKind) -> Self { + Self { kind, msg: None } + } +} + +impl From for Error { + fn from(msg: String) -> Self { + Self { + msg: Some(msg), + kind: ErrorKind::General, + } + } +} + +// TODO this discards structure +impl From for Error { + fn from(e: err::Error) -> Self { + Self { + msg: Some(format!("{e}")), + kind: ErrorKind::General, + } + } +} + +// TODO this discards structure +impl From for err::Error { + fn from(e: Error) -> Self { + err::Error::with_msg_no_trace(format!("{e}")) + } +} + +impl std::error::Error for Error {} + +impl serde::de::Error for Error { + fn custom(msg: T) -> Self + where + T: fmt::Display, + { + format!("{msg}").into() + } +} + +pub fn make_iso_ts(tss: &[u64]) -> Vec { + tss.iter().map(|&k| IsoDateTime::from_ns_u64(k)).collect() +} + +impl Mergeable for Box { + fn ts_min(&self) -> Option { + self.as_ref().ts_min() + } + + fn ts_max(&self) -> Option { + self.as_ref().ts_max() + } + + fn new_empty(&self) -> Self { + self.as_ref().new_empty_evs() + } + + fn clear(&mut self) { + Events::clear(self.as_mut()) + } + + fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError> { + self.as_mut().drain_into_evs(dst, range) + } + + fn find_lowest_index_gt(&self, ts: u64) -> Option { + self.as_ref().find_lowest_index_gt_evs(ts) + } + + fn find_lowest_index_ge(&self, ts: u64) -> Option { + self.as_ref().find_lowest_index_ge_evs(ts) + } + + fn find_highest_index_lt(&self, ts: u64) -> Option { + self.as_ref().find_highest_index_lt_evs(ts) + } + + fn tss(&self) -> Vec { + Events::tss(self) + .iter() + .map(|x| netpod::TsMs::from_ns_u64(*x)) + .collect() + } +} + +pub trait ChannelEventsInput: Stream> + EventTransform + Send {} + +impl ChannelEventsInput for T where T: Stream> + EventTransform + Send {} diff --git a/src/merger.rs b/src/merger.rs new file mode 100644 index 0000000..48f83a6 --- /dev/null +++ b/src/merger.rs @@ -0,0 +1,491 @@ +use crate::Error; +use futures_util::Stream; +use futures_util::StreamExt; +use items_0::container::ByteEstimate; +use items_0::on_sitemty_data; +use items_0::streamitem::sitem_data; +use items_0::streamitem::LogItem; +use items_0::streamitem::RangeCompletableItem; +use items_0::streamitem::Sitemty; +use items_0::streamitem::StreamItem; +use items_0::transform::EventTransform; +use items_0::transform::TransformProperties; +use items_0::transform::WithTransformProperties; +use items_0::Events; +use items_0::MergeError; +use items_0::WithLen; +use netpod::log::*; +use netpod::TsMs; +use std::collections::VecDeque; +use std::fmt; +use std::ops::ControlFlow; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +const OUT_MAX_BYTES: u64 = 1024 * 200; +const DO_DETECT_NON_MONO: bool = true; + +#[allow(unused)] +macro_rules! trace2 { + ($($arg:tt)*) => {}; + ($($arg:tt)*) => { trace!($($arg)*) }; +} + +#[allow(unused)] +macro_rules! trace3 { + ($($arg:tt)*) => {}; + ($($arg:tt)*) => { trace!($($arg)*) }; +} + +#[allow(unused)] +macro_rules! trace4 { + ($($arg:tt)*) => {}; + ($($arg:tt)*) => { trace!($($arg)*) }; +} + +pub trait Mergeable: fmt::Debug + WithLen + ByteEstimate + Unpin { + fn ts_min(&self) -> Option; + fn ts_max(&self) -> Option; + fn new_empty(&self) -> Self; + fn clear(&mut self); + // TODO when MergeError::Full gets returned, any guarantees about what has been modified or kept unchanged? + fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError>; + fn find_lowest_index_gt(&self, ts: u64) -> Option; + fn find_lowest_index_ge(&self, ts: u64) -> Option; + fn find_highest_index_lt(&self, ts: u64) -> Option; + // TODO only for testing: + fn tss(&self) -> Vec; +} + +type MergeInp = Pin> + Send>>; + +pub struct Merger { + inps: Vec>>, + items: Vec>, + out: Option, + do_clear_out: bool, + out_max_len: usize, + range_complete: Vec, + out_of_band_queue: VecDeque>, + log_queue: VecDeque, + dim0ix_max: u64, + done_emit_first_empty: bool, + done_data: bool, + done_buffered: bool, + done_range_complete: bool, + complete: bool, + poll_count: usize, +} + +impl fmt::Debug for Merger +where + T: Mergeable, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let inps: Vec<_> = self.inps.iter().map(|x| x.is_some()).collect(); + fmt.debug_struct(std::any::type_name::()) + .field("inps", &inps) + .field("items", &self.items) + .field("out_max_len", &self.out_max_len) + .field("range_complete", &self.range_complete) + .field("out_of_band_queue", &self.out_of_band_queue.len()) + .field("done_data", &self.done_data) + .field("done_buffered", &self.done_buffered) + .field("done_range_complete", &self.done_range_complete) + .finish() + } +} + +impl Merger +where + T: Mergeable, +{ + pub fn new(inps: Vec>, out_max_len: Option) -> Self { + let n = inps.len(); + Self { + inps: inps.into_iter().map(|x| Some(x)).collect(), + items: (0..n).into_iter().map(|_| None).collect(), + out: None, + do_clear_out: false, + out_max_len: out_max_len.unwrap_or(1000) as usize, + range_complete: vec![false; n], + out_of_band_queue: VecDeque::new(), + log_queue: VecDeque::new(), + dim0ix_max: 0, + done_emit_first_empty: false, + done_data: false, + done_buffered: false, + done_range_complete: false, + complete: false, + poll_count: 0, + } + } + + fn drain_into_upto(src: &mut T, dst: &mut T, upto: u64) -> Result<(), MergeError> { + match src.find_lowest_index_gt(upto) { + Some(ilgt) => { + src.drain_into(dst, (0, ilgt))?; + } + None => { + // TODO should not be here. + src.drain_into(dst, (0, src.len()))?; + } + } + Ok(()) + } + + fn take_into_output_all(&mut self, src: &mut T) -> Result<(), MergeError> { + // TODO optimize the case when some large batch should be added to some existing small batch already in out. + // TODO maybe use two output slots? + self.take_into_output_upto(src, u64::MAX) + } + + fn take_into_output_upto(&mut self, src: &mut T, upto: u64) -> Result<(), MergeError> { + // TODO optimize the case when some large batch should be added to some existing small batch already in out. + // TODO maybe use two output slots? + if let Some(out) = self.out.as_mut() { + Self::drain_into_upto(src, out, upto)?; + } else { + trace2!("move into fresh"); + let mut fresh = src.new_empty(); + Self::drain_into_upto(src, &mut fresh, upto)?; + self.out = Some(fresh); + } + Ok(()) + } + + fn process(mut self: Pin<&mut Self>, _cx: &mut Context) -> Result, Error> { + use ControlFlow::*; + trace4!("process"); + let mut log_items = Vec::new(); + let mut tslows = [None, None]; + for (i1, itemopt) in self.items.iter_mut().enumerate() { + if let Some(item) = itemopt { + if let Some(t1) = item.ts_min() { + if let Some((_, a)) = tslows[0] { + if t1 < a { + tslows[1] = tslows[0]; + tslows[0] = Some((i1, t1)); + } else { + if let Some((_, b)) = tslows[1] { + if t1 < b { + tslows[1] = Some((i1, t1)); + } else { + // nothing to do + } + } else { + tslows[1] = Some((i1, t1)); + } + } + } else { + tslows[0] = Some((i1, t1)); + } + } else { + // the item seems empty. + // TODO count for stats. + trace2!("empty item, something to do here?"); + *itemopt = None; + return Ok(Continue(())); + } + } + } + if DO_DETECT_NON_MONO { + if let Some((i1, t1)) = tslows[0].as_ref() { + if *t1 <= self.dim0ix_max { + self.dim0ix_max = *t1; + let item = LogItem { + node_ix: *i1 as _, + level: Level::INFO, + msg: format!( + "dim0ix_max {} vs {} diff {}", + self.dim0ix_max, + t1, + self.dim0ix_max - t1 + ), + }; + log_items.push(item); + } + } + } + trace4!("tslows {tslows:?}"); + if let Some((il0, _tl0)) = tslows[0] { + if let Some((_il1, tl1)) = tslows[1] { + // There is a second input, take only up to the second highest timestamp + let item = self.items[il0].as_mut().unwrap(); + if let Some(th0) = item.ts_max() { + if th0 <= tl1 { + // Can take the whole item + // TODO gather stats about this case. Should be never for databuffer, and often for scylla. + let mut item = self.items[il0].take().unwrap(); + trace3!("Take all from item {item:?}"); + match self.take_into_output_all(&mut item) { + Ok(()) => Ok(Break(())), + Err(MergeError::Full) | Err(MergeError::NotCompatible) => { + // TODO count for stats + trace3!("Put item back"); + self.items[il0] = Some(item); + self.do_clear_out = true; + Ok(Break(())) + } + } + } else { + // Take only up to the lowest ts of the second-lowest input + let mut item = self.items[il0].take().unwrap(); + trace3!("Take up to {tl1} from item {item:?}"); + let res = self.take_into_output_upto(&mut item, tl1); + match res { + Ok(()) => { + if item.len() == 0 { + // TODO should never be here because we should have taken the whole item + Err(format!("Should have taken the whole item instead").into()) + } else { + self.items[il0] = Some(item); + Ok(Break(())) + } + } + Err(MergeError::Full) | Err(MergeError::NotCompatible) => { + // TODO count for stats + info!("Put item back because {res:?}"); + self.items[il0] = Some(item); + self.do_clear_out = true; + Ok(Break(())) + } + } + } + } else { + // TODO should never be here because ts-max should always exist here. + Err(format!("selected input without max ts").into()) + } + } else { + // No other input, take the whole item + let mut item = self.items[il0].take().unwrap(); + trace3!("Take all from item (no other input) {item:?}"); + match self.take_into_output_all(&mut item) { + Ok(()) => Ok(Break(())), + Err(_) => { + // TODO count for stats + trace3!("Put item back"); + self.items[il0] = Some(item); + self.do_clear_out = true; + Ok(Break(())) + } + } + } + } else { + Err(format!("after low ts search nothing found").into()) + } + } + + fn refill(mut self: Pin<&mut Self>, cx: &mut Context) -> Result, Error> { + trace4!("refill"); + use Poll::*; + let mut has_pending = false; + for i in 0..self.inps.len() { + if self.items[i].is_none() { + while let Some(inp) = self.inps[i].as_mut() { + match inp.poll_next_unpin(cx) { + Ready(Some(Ok(k))) => match k { + StreamItem::DataItem(k) => match k { + RangeCompletableItem::Data(k) => { + if self.done_emit_first_empty == false { + trace!("emit first empty marker item"); + self.done_emit_first_empty = true; + let item = k.new_empty(); + let item = sitem_data(item); + self.out_of_band_queue.push_back(item); + } + self.items[i] = Some(k); + trace4!("refilled {}", i); + } + RangeCompletableItem::RangeComplete => { + self.range_complete[i] = true; + trace!("range_complete {:?}", self.range_complete); + continue; + } + }, + StreamItem::Log(item) => { + // TODO limit queue length + self.out_of_band_queue.push_back(Ok(StreamItem::Log(item))); + continue; + } + StreamItem::Stats(item) => { + // TODO limit queue length + self.out_of_band_queue.push_back(Ok(StreamItem::Stats(item))); + continue; + } + }, + Ready(Some(Err(e))) => { + self.inps[i] = None; + return Err(e.into()); + } + Ready(None) => { + self.inps[i] = None; + } + Pending => { + has_pending = true; + } + } + break; + } + } + } + if has_pending { + Ok(Pending) + } else { + Ok(Ready(())) + } + } + + fn poll3(mut self: Pin<&mut Self>, cx: &mut Context) -> ControlFlow>>> { + use ControlFlow::*; + use Poll::*; + trace4!("poll3"); + #[allow(unused)] + let ninps = self.inps.iter().filter(|a| a.is_some()).count(); + let nitems = self.items.iter().filter(|a| a.is_some()).count(); + let nitemsmissing = self + .inps + .iter() + .zip(self.items.iter()) + .filter(|(a, b)| a.is_some() && b.is_none()) + .count(); + trace3!("ninps {ninps} nitems {nitems} nitemsmissing {nitemsmissing}"); + if nitemsmissing != 0 { + let e = Error::from(format!("missing but no pending")); + return Break(Ready(Some(Err(e)))); + } + let last_emit = nitems == 0; + if nitems != 0 { + match Self::process(Pin::new(&mut self), cx) { + Ok(Break(())) => {} + Ok(Continue(())) => {} + Err(e) => return Break(Ready(Some(Err(e)))), + } + } + if let Some(o) = self.out.as_ref() { + if o.len() >= self.out_max_len || o.byte_estimate() >= OUT_MAX_BYTES || self.do_clear_out || last_emit { + if o.len() > self.out_max_len { + debug!("MERGER OVERWEIGHT ITEM {} vs {}", o.len(), self.out_max_len); + } + trace3!("decide to output"); + self.do_clear_out = false; + //Break(Ready(Some(Ok(self.out.take().unwrap())))) + let item = sitem_data(self.out.take().unwrap()); + self.out_of_band_queue.push_back(item); + Continue(()) + } else { + trace4!("not enough output yet"); + Continue(()) + } + } else { + trace!("no output candidate"); + if last_emit { + Break(Ready(None)) + } else { + Continue(()) + } + } + } + + fn poll2(mut self: Pin<&mut Self>, cx: &mut Context) -> ControlFlow>>> { + use ControlFlow::*; + use Poll::*; + match Self::refill(Pin::new(&mut self), cx) { + Ok(Ready(())) => Self::poll3(self, cx), + Ok(Pending) => Break(Pending), + Err(e) => Break(Ready(Some(Err(e)))), + } + } +} + +impl Stream for Merger +where + T: Mergeable, +{ + type Item = Sitemty; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + use Poll::*; + self.poll_count += 1; + let span1 = span!(Level::INFO, "Merger", pc = self.poll_count); + let _spg = span1.enter(); + loop { + trace3!("poll"); + break if let Some(item) = self.log_queue.pop_front() { + Ready(Some(Ok(StreamItem::Log(item)))) + } else if self.poll_count == usize::MAX { + self.done_range_complete = true; + continue; + } else if self.complete { + panic!("poll after complete"); + } else if self.done_range_complete { + self.complete = true; + Ready(None) + } else if self.done_buffered { + self.done_range_complete = true; + if self.range_complete.iter().all(|x| *x) { + trace!("emit RangeComplete"); + Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)))) + } else { + continue; + } + } else if self.done_data { + trace!("done_data"); + self.done_buffered = true; + if let Some(out) = self.out.take() { + trace!("done_data emit buffered len {}", out.len()); + Ready(Some(sitem_data(out))) + } else { + continue; + } + } else if let Some(item) = self.out_of_band_queue.pop_front() { + let item = on_sitemty_data!(item, |k: T| { + trace3!("emit out-of-band data len {}", k.len()); + sitem_data(k) + }); + Ready(Some(item)) + } else { + match Self::poll2(self.as_mut(), cx) { + ControlFlow::Continue(()) => continue, + ControlFlow::Break(k) => match k { + Ready(Some(Ok(out))) => { + if true { + error!("THIS BRANCH SHOULD NO LONGER OCCUR, REFACTOR"); + self.done_data = true; + let e = Error::from(format!("TODO refactor direct emit in merger")); + return Ready(Some(Err(e.into()))); + } + trace!("emit buffered len {}", out.len()); + Ready(Some(sitem_data(out))) + } + Ready(Some(Err(e))) => { + self.done_data = true; + Ready(Some(Err(e.into()))) + } + Ready(None) => { + self.done_data = true; + continue; + } + Pending => Pending, + }, + } + }; + } + } +} + +impl WithTransformProperties for Merger { + fn query_transform_properties(&self) -> TransformProperties { + todo!() + } +} + +impl EventTransform for Merger +where + T: Send, +{ + fn transform(&mut self, src: Box) -> Box { + todo!() + } +} diff --git a/src/streams.rs b/src/streams.rs new file mode 100644 index 0000000..99a3b78 --- /dev/null +++ b/src/streams.rs @@ -0,0 +1,290 @@ +use futures_util::Future; +use futures_util::FutureExt; +use futures_util::Stream; +use futures_util::StreamExt; +use items_0::streamitem::RangeCompletableItem; +use items_0::streamitem::Sitemty; +use items_0::streamitem::StreamItem; +use items_0::transform::EventStreamTrait; +use items_0::transform::EventTransform; +use items_0::transform::TransformProperties; +use items_0::transform::WithTransformProperties; +use items_0::Events; +use std::collections::VecDeque; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +pub struct Enumerate2 { + inp: T, + cnt: usize, +} + +impl Enumerate2 { + pub fn new(inp: T) -> Self + where + T: EventTransform, + { + Self { inp, cnt: 0 } + } +} + +impl Stream for Enumerate2 +where + T: Stream + Unpin, +{ + type Item = (usize, ::Item); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + use Poll::*; + match self.inp.poll_next_unpin(cx) { + Ready(Some(item)) => { + let i = self.cnt; + self.cnt += 1; + Ready(Some((i, item))) + } + Ready(None) => Ready(None), + Pending => Pending, + } + } +} + +impl WithTransformProperties for Enumerate2 +where + T: WithTransformProperties, +{ + fn query_transform_properties(&self) -> TransformProperties { + self.inp.query_transform_properties() + } +} + +impl EventTransform for Enumerate2 +where + T: WithTransformProperties + Send, +{ + fn transform(&mut self, src: Box) -> Box { + todo!() + } +} + +pub struct Then2 { + inp: Pin>, + f: Pin>, + fut: Option>>, +} + +impl Then2 +where + T: Stream, + F: Fn(::Item) -> Fut, +{ + pub fn new(inp: T, f: F) -> Self + where + T: EventTransform, + { + Self { + inp: Box::pin(inp), + f: Box::pin(f), + fut: None, + } + } + + fn prepare_fut(&mut self, item: ::Item) { + self.fut = Some(Box::pin((self.f)(item))); + } +} + +/*impl Unpin for Then2 +where + T: Unpin, + F: Unpin, + Fut: Unpin, +{ +}*/ + +impl Stream for Then2 +where + T: Stream, + F: Fn(::Item) -> Fut, + Fut: Future, +{ + type Item = ::Output; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + use Poll::*; + loop { + break if let Some(fut) = self.fut.as_mut() { + match fut.poll_unpin(cx) { + Ready(item) => { + self.fut = None; + Ready(Some(item)) + } + Pending => Pending, + } + } else { + match self.inp.poll_next_unpin(cx) { + Ready(Some(item)) => { + self.prepare_fut(item); + continue; + } + Ready(None) => Ready(None), + Pending => Pending, + } + }; + } + } +} + +impl WithTransformProperties for Then2 +where + T: EventTransform, +{ + fn query_transform_properties(&self) -> TransformProperties { + self.inp.query_transform_properties() + } +} + +impl EventTransform for Then2 +where + T: EventTransform + Send, + F: Send, + Fut: Send, +{ + fn transform(&mut self, src: Box) -> Box { + todo!() + } +} + +pub trait TransformerExt { + fn enumerate2(self) -> Enumerate2 + where + Self: EventTransform + Sized; + + fn then2(self, f: F) -> Then2 + where + Self: EventTransform + Stream + Sized, + F: Fn(::Item) -> Fut, + Fut: Future; +} + +impl TransformerExt for T { + fn enumerate2(self) -> Enumerate2 + where + Self: EventTransform + Sized, + { + Enumerate2::new(self) + } + + fn then2(self, f: F) -> Then2 + where + Self: EventTransform + Stream + Sized, + F: Fn(::Item) -> Fut, + Fut: Future, + { + Then2::new(self, f) + } +} + +pub struct VecStream { + inp: VecDeque, +} + +impl VecStream { + pub fn new(inp: VecDeque) -> Self { + Self { inp } + } +} + +impl Stream for VecStream +where + T: Unpin, +{ + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { + use Poll::*; + if let Some(item) = self.inp.pop_front() { + Ready(Some(item)) + } else { + Ready(None) + } + } +} + +impl WithTransformProperties for VecStream { + fn query_transform_properties(&self) -> TransformProperties { + todo!() + } +} + +impl EventTransform for VecStream +where + T: Send, +{ + fn transform(&mut self, src: Box) -> Box { + todo!() + } +} + +/// Wrap any event stream and provide transformation properties. +pub struct PlainEventStream +where + T: Events, + INP: Stream>, +{ + inp: Pin>, +} + +impl PlainEventStream +where + T: Events, + INP: Stream>, +{ + pub fn new(inp: INP) -> Self { + Self { inp: Box::pin(inp) } + } +} + +impl Stream for PlainEventStream +where + T: Events, + INP: Stream>, +{ + type Item = Sitemty>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + use Poll::*; + match self.inp.poll_next_unpin(cx) { + Ready(Some(item)) => Ready(Some(match item { + Ok(item) => Ok(match item { + StreamItem::DataItem(item) => StreamItem::DataItem(match item { + RangeCompletableItem::RangeComplete => RangeCompletableItem::RangeComplete, + RangeCompletableItem::Data(item) => RangeCompletableItem::Data(Box::new(item)), + }), + StreamItem::Log(item) => StreamItem::Log(item), + StreamItem::Stats(item) => StreamItem::Stats(item), + }), + Err(e) => Err(e), + })), + Ready(None) => Ready(None), + Pending => Pending, + } + } +} + +impl WithTransformProperties for PlainEventStream +where + T: Events, + INP: Stream>, +{ + fn query_transform_properties(&self) -> TransformProperties { + todo!() + } +} + +impl EventStreamTrait for PlainEventStream +where + T: Events, + INP: Stream> + Send, +{ +} diff --git a/src/test.rs b/src/test.rs new file mode 100644 index 0000000..15123b0 --- /dev/null +++ b/src/test.rs @@ -0,0 +1,470 @@ +#[cfg(test)] +pub mod eventsdim0; +#[cfg(test)] +pub mod eventsdim1; + +use crate::channelevents::ConnStatus; +use crate::channelevents::ConnStatusEvent; +use crate::eventsdim0::EventsDim0; +use crate::merger::Mergeable; +use crate::merger::Merger; +use crate::runfut; +use crate::streams::TransformerExt; +use crate::streams::VecStream; +use crate::testgen::make_some_boxed_d0_f32; +use crate::ChannelEvents; +use crate::Error; +use crate::Events; +use futures_util::stream; +use futures_util::StreamExt; +use items_0::streamitem::sitem_data; +use items_0::streamitem::RangeCompletableItem; +use items_0::streamitem::Sitemty; +use items_0::streamitem::StreamItem; +use items_0::Appendable; +use items_0::Empty; +use items_0::WithLen; +use netpod::log::*; +use netpod::range::evrange::NanoRange; +use netpod::timeunits::*; +use netpod::BinnedRangeEnum; +use std::time::Duration; +use std::time::Instant; + +#[cfg(test)] +pub fn runfut(fut: F) -> Result +where + F: std::future::Future>, +{ + use futures_util::TryFutureExt; + let fut = fut.map_err(|e| e.into()); + taskrun::run(fut) +} + +#[test] +fn items_move_events() { + let evs = make_some_boxed_d0_f32(10, SEC, SEC, 0, 1846713782); + let v0 = ChannelEvents::Events(evs); + let mut v1 = v0.clone(); + eprintln!("{v1:?}"); + eprintln!("{}", v1.len()); + let mut v2 = v1.new_empty(); + match v1.find_lowest_index_gt(4) { + Some(ilgt) => { + v1.drain_into(&mut v2, (0, ilgt)).unwrap(); + } + None => { + v1.drain_into(&mut v2, (0, v1.len())).unwrap(); + } + } + eprintln!("{}", v1.len()); + eprintln!("{}", v2.len()); + match v1.find_lowest_index_gt(u64::MAX) { + Some(ilgt) => { + v1.drain_into(&mut v2, (0, ilgt)).unwrap(); + } + None => { + v1.drain_into(&mut v2, (0, v1.len())).unwrap(); + } + } + eprintln!("{}", v1.len()); + eprintln!("{}", v2.len()); + eprintln!("{v1:?}"); + eprintln!("{v2:?}"); + assert_eq!(v1.len(), 0); + assert_eq!(v2.len(), 10); + assert_eq!(v2, v0); +} + +#[test] +fn items_merge_00() { + let fut = async { + use crate::merger::Merger; + let evs0 = make_some_boxed_d0_f32(10, SEC * 1, SEC * 2, 0, 1846713782); + let evs1 = make_some_boxed_d0_f32(10, SEC * 2, SEC * 2, 0, 828764893); + let v0 = ChannelEvents::Events(evs0); + let v1 = ChannelEvents::Events(evs1); + let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)])); + let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)])); + let mut merger = Merger::new(vec![stream0, stream1], Some(8)); + while let Some(item) = merger.next().await { + eprintln!("{item:?}"); + } + Ok(()) + }; + runfut(fut).unwrap(); +} + +#[test] +fn items_merge_01() { + let fut = async { + use crate::merger::Merger; + let evs0 = make_some_boxed_d0_f32(10, SEC * 1, SEC * 2, 0, 1846713782); + let evs1 = make_some_boxed_d0_f32(10, SEC * 2, SEC * 2, 0, 828764893); + let v0 = ChannelEvents::Events(evs0); + let v1 = ChannelEvents::Events(evs1); + let v2 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 100, ConnStatus::Connect))); + let v3 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2300, ConnStatus::Disconnect))); + let v4 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2800, ConnStatus::Connect))); + let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)])); + let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)])); + let stream2 = Box::pin(stream::iter(vec![sitem_data(v2), sitem_data(v3), sitem_data(v4)])); + let mut merger = Merger::new(vec![stream0, stream1, stream2], Some(8)); + let mut total_event_count = 0; + while let Some(item) = merger.next().await { + eprintln!("{item:?}"); + let item = item?; + match item { + StreamItem::DataItem(item) => match item { + RangeCompletableItem::RangeComplete => {} + RangeCompletableItem::Data(item) => { + total_event_count += item.len(); + } + }, + StreamItem::Log(_) => {} + StreamItem::Stats(_) => {} + } + } + assert_eq!(total_event_count, 23); + Ok(()) + }; + runfut(fut).unwrap(); +} + +#[test] +fn items_merge_02() { + let fut = async { + let evs0 = make_some_boxed_d0_f32(100, SEC * 1, SEC * 2, 0, 1846713782); + let evs1 = make_some_boxed_d0_f32(100, SEC * 2, SEC * 2, 0, 828764893); + let v0 = ChannelEvents::Events(evs0); + let v1 = ChannelEvents::Events(evs1); + let v2 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 100, ConnStatus::Connect))); + let v3 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2300, ConnStatus::Disconnect))); + let v4 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2800, ConnStatus::Connect))); + let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)])); + let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)])); + let stream2 = Box::pin(stream::iter(vec![sitem_data(v2), sitem_data(v3), sitem_data(v4)])); + let mut merger = Merger::new(vec![stream0, stream1, stream2], Some(8)); + let mut total_event_count = 0; + while let Some(item) = merger.next().await { + eprintln!("{item:?}"); + let item = item.unwrap(); + match item { + StreamItem::DataItem(item) => match item { + RangeCompletableItem::RangeComplete => {} + RangeCompletableItem::Data(item) => { + total_event_count += item.len(); + } + }, + StreamItem::Log(_) => {} + StreamItem::Stats(_) => {} + } + } + assert_eq!(total_event_count, 203); + Ok(()) + }; + runfut(fut).unwrap(); +} + +#[test] +fn merge_00() { + let fut = async { + let mut events_vec1: Vec> = Vec::new(); + let mut events_vec2: Vec> = Vec::new(); + { + let mut events = EventsDim0::empty(); + for i in 0..10 { + events.push(i * 100, i, i as f32 * 100.); + } + let cev = ChannelEvents::Events(Box::new(events.clone())); + events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev)))); + let cev = ChannelEvents::Events(Box::new(events.clone())); + events_vec2.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev)))); + } + let inp1 = events_vec1; + let inp1 = futures_util::stream::iter(inp1); + let inp1 = Box::pin(inp1); + let inp2: Vec> = Vec::new(); + let inp2 = futures_util::stream::iter(inp2); + let inp2 = Box::pin(inp2); + let mut merger = crate::merger::Merger::new(vec![inp1, inp2], Some(32)); + + // Expect an empty first item. + let item = merger.next().await; + let item = match item { + Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))) => item, + _ => panic!(), + }; + assert_eq!(item.len(), 0); + + let item = merger.next().await; + assert_eq!(item.as_ref(), events_vec2.get(0)); + let item = merger.next().await; + assert_eq!(item.as_ref(), None); + Ok(()) + }; + runfut(fut).unwrap(); +} + +#[test] +fn merge_01() { + let fut = async { + let events_vec1 = { + let mut vec = Vec::new(); + let mut events = EventsDim0::empty(); + for i in 0..10 { + events.push(i * 100, i, i as f32 * 100.); + } + push_evd0(&mut vec, Box::new(events.clone())); + let mut events = EventsDim0::empty(); + for i in 10..20 { + events.push(i * 100, i, i as f32 * 100.); + } + push_evd0(&mut vec, Box::new(events.clone())); + vec + }; + let exp = events_vec1.clone(); + let inp1 = events_vec1; + let inp1 = futures_util::stream::iter(inp1); + let inp1 = Box::pin(inp1); + let inp2: Vec> = Vec::new(); + let inp2 = futures_util::stream::iter(inp2); + let inp2 = Box::pin(inp2); + let mut merger = crate::merger::Merger::new(vec![inp1, inp2], Some(10)); + + // Expect an empty first item. + let item = merger.next().await; + let item = match item { + Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))) => item, + _ => panic!(), + }; + assert_eq!(item.len(), 0); + + let item = merger.next().await; + assert_eq!(item.as_ref(), exp.get(0)); + let item = merger.next().await; + assert_eq!(item.as_ref(), exp.get(1)); + let item = merger.next().await; + assert_eq!(item.as_ref(), None); + Ok(()) + }; + runfut(fut).unwrap(); +} + +fn push_evd0(vec: &mut Vec>, events: Box) { + let cev = ChannelEvents::Events(events); + vec.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev)))); +} + +#[test] +fn merge_02() { + let fut = async { + let events_vec1 = { + let mut vec = Vec::new(); + let mut events = EventsDim0::empty(); + for i in 0..10 { + events.push(i * 100, i, i as f32 * 100.); + } + push_evd0(&mut vec, Box::new(events)); + let mut events = EventsDim0::empty(); + for i in 10..20 { + events.push(i * 100, i, i as f32 * 100.); + } + push_evd0(&mut vec, Box::new(events)); + vec + }; + let events_vec2 = { + let mut vec = Vec::new(); + let mut events = EventsDim0::empty(); + for i in 0..10 { + events.push(i * 100, i, i as f32 * 100.); + } + push_evd0(&mut vec, Box::new(events)); + let mut events = EventsDim0::empty(); + for i in 10..12 { + events.push(i * 100, i, i as f32 * 100.); + } + push_evd0(&mut vec, Box::new(events)); + let mut events = EventsDim0::empty(); + for i in 12..20 { + events.push(i * 100, i, i as f32 * 100.); + } + push_evd0(&mut vec, Box::new(events)); + vec + }; + + let inp2_events_a = { + let ev = ConnStatusEvent { + ts: 1199, + datetime: std::time::SystemTime::UNIX_EPOCH, + status: ConnStatus::Disconnect, + }; + let item: Sitemty = Ok(StreamItem::DataItem(RangeCompletableItem::Data( + ChannelEvents::Status(Some(ev)), + ))); + vec![item] + }; + + let inp2_events_b = { + let ev = ConnStatusEvent { + ts: 1199, + datetime: std::time::SystemTime::UNIX_EPOCH, + status: ConnStatus::Disconnect, + }; + let item: Sitemty = Ok(StreamItem::DataItem(RangeCompletableItem::Data( + ChannelEvents::Status(Some(ev)), + ))); + vec![item] + }; + + let inp1 = events_vec1; + let inp1 = futures_util::stream::iter(inp1); + let inp1 = Box::pin(inp1); + let inp2: Vec> = inp2_events_a; + let inp2 = futures_util::stream::iter(inp2); + let inp2 = Box::pin(inp2); + let mut merger = crate::merger::Merger::new(vec![inp1, inp2], Some(10)); + + // Expect an empty first item. + let item = merger.next().await; + let item = match item { + Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))) => item, + _ => panic!(), + }; + assert_eq!(item.len(), 0); + + let item = merger.next().await; + assert_eq!(item.as_ref(), events_vec2.get(0)); + let item = merger.next().await; + assert_eq!(item.as_ref(), events_vec2.get(1)); + let item = merger.next().await; + assert_eq!(item.as_ref(), inp2_events_b.get(0)); + let item = merger.next().await; + assert_eq!(item.as_ref(), events_vec2.get(2)); + let item = merger.next().await; + assert_eq!(item.as_ref(), None); + Ok(()) + }; + runfut(fut).unwrap(); +} + +#[test] +fn bin_01() { + const TSBASE: u64 = SEC * 1600000000; + fn val(ts: u64) -> f32 { + 2f32 + ((ts / SEC) % 2) as f32 + 0.2 * ((ts / (MS * 100)) % 2) as f32 + } + let fut = async { + let mut events_vec1 = Vec::new(); + let mut t = TSBASE; + for _ in 0..20 { + let mut events = EventsDim0::empty(); + for _ in 0..10 { + events.push(t, t, val(t)); + t += MS * 100; + } + let cev = ChannelEvents::Events(Box::new(events)); + events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev)))); + } + events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))); + let inp1 = events_vec1; + let inp1 = futures_util::stream::iter(inp1); + let inp1 = Box::pin(inp1); + let inp2 = Box::pin(futures_util::stream::empty()) as _; + let stream = crate::merger::Merger::new(vec![inp1, inp2], Some(32)); + // covering_range result is subject to adjustments, instead, manually choose bin edges + let range = NanoRange { + beg: TSBASE + SEC * 1, + end: TSBASE + SEC * 10, + }; + // let binrange = BinnedRangeEnum::covering_range(range.into(), 9).map_err(|e| format!("{e}"))?; + // let stream = Box::pin(stream); + // let deadline = Instant::now() + Duration::from_millis(4000); + // let do_time_weight = true; + // let emit_empty_bins = false; + // let res = BinnedCollected::new( + // binrange, + // ScalarType::F32, + // Shape::Scalar, + // do_time_weight, + // emit_empty_bins, + // deadline, + // Box::pin(stream), + // ) + // .await?; + // eprintln!("res {:?}", res); + Ok::<_, Error>(()) + }; + runfut(fut).unwrap(); +} + +#[test] +fn binned_timeout_00() { + if true { + return; + } + // TODO items_2::binnedcollected::BinnedCollected is currently not used. + trace!("binned_timeout_01 uses a delay"); + const TSBASE: u64 = SEC * 1600000000; + fn val(ts: u64) -> f32 { + 2f32 + ((ts / SEC) % 2) as f32 + 0.2 * ((ts / (MS * 100)) % 2) as f32 + } + eprintln!("binned_timeout_01 ENTER"); + let fut = async { + eprintln!("binned_timeout_01 IN FUT"); + let mut events_vec1: Vec> = Vec::new(); + let mut t = TSBASE; + for _ in 0..20 { + let mut events = EventsDim0::empty(); + for _ in 0..10 { + events.push(t, t, val(t)); + t += MS * 100; + } + let cev = ChannelEvents::Events(Box::new(events)); + events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev)))); + } + events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))); + let inp1 = VecStream::new(events_vec1.into_iter().collect()); + let inp1 = inp1.enumerate2().then2(|(i, k)| async move { + if i == 5 { + let _ = tokio::time::sleep(Duration::from_millis(10000)).await; + } + k + }); + let edges: Vec<_> = (0..10).into_iter().map(|x| TSBASE + SEC * (1 + x)).collect(); + let range = NanoRange { + beg: TSBASE + SEC * 1, + end: TSBASE + SEC * 10, + }; + let binrange = BinnedRangeEnum::covering_range(range.into(), 9)?; + eprintln!("edges1: {:?}", edges); + //eprintln!("edges2: {:?}", binrange.edges()); + let timeout = Duration::from_millis(400); + // let inp1 = Box::pin(inp1); + // let deadline = Instant::now() + timeout; + // let do_time_weight = true; + // let emit_empty_bins = false; + // TODO with new binning + + // let res = BinnedCollected::new( + // binrange, + // ScalarType::F32, + // Shape::Scalar, + // do_time_weight, + // emit_empty_bins, + // deadline, + // inp1, + // ) + // .await?; + // let r2: &BinsDim0CollectedResult = res.result.as_any_ref().downcast_ref().expect("res seems wrong type"); + // eprintln!("rs: {r2:?}"); + // assert_eq!(SEC * r2.ts_anchor_sec(), TSBASE + SEC); + // assert_eq!(r2.counts(), &[10, 10, 10]); + // assert_eq!(r2.mins(), &[3.0, 2.0, 3.0]); + // assert_eq!(r2.maxs(), &[3.2, 2.2, 3.2]); + // assert_eq!(r2.missing_bins(), 6); + // assert_eq!(r2.continue_at(), Some(IsoDateTime::from_ns_u64(TSBASE + SEC * 4))); + Ok::<_, Error>(()) + }; + runfut(fut).unwrap(); +} diff --git a/src/test/eventsdim0.rs b/src/test/eventsdim0.rs new file mode 100644 index 0000000..61e6080 --- /dev/null +++ b/src/test/eventsdim0.rs @@ -0,0 +1,24 @@ +use crate::eventsdim0::EventsDim0; +use items_0::Appendable; +use items_0::Empty; +use items_0::Events; + +#[test] +fn collect_s_00() { + let mut evs = EventsDim0::empty(); + evs.push(123, 4, 1.00f32); + evs.push(124, 5, 1.01); + let mut coll = evs.as_collectable_mut().new_collector(); + coll.ingest(&mut evs); + assert_eq!(coll.len(), 2); +} + +#[test] +fn collect_c_00() { + let mut evs = EventsDim0::empty(); + evs.push(123, 4, 1.00f32); + evs.push(124, 5, 1.01); + let mut coll = evs.as_collectable_with_default_ref().new_collector(); + coll.ingest(&mut evs); + assert_eq!(coll.len(), 2); +} diff --git a/src/test/eventsdim1.rs b/src/test/eventsdim1.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/testgen.rs b/src/testgen.rs new file mode 100644 index 0000000..07d19f4 --- /dev/null +++ b/src/testgen.rs @@ -0,0 +1,25 @@ +use crate::eventsdim0::EventsDim0; +use crate::Events; +use items_0::Appendable; +use items_0::Empty; + +#[allow(unused)] +fn xorshift32(state: u32) -> u32 { + let mut x = state; + x ^= x << 13; + x ^= x >> 17; + x ^= x << 5; + x +} + +pub fn make_some_boxed_d0_f32(n: usize, t0: u64, tstep: u64, tmask: u64, seed: u32) -> Box { + let mut vstate = seed; + let mut events = EventsDim0::empty(); + for i in 0..n { + vstate = xorshift32(vstate); + let ts = t0 + i as u64 * tstep + (vstate as u64 & tmask); + let value = i as f32 * 100. + vstate as f32 / u32::MAX as f32 / 10.; + events.push(ts, ts, value); + } + Box::new(events) +} diff --git a/src/transform.rs b/src/transform.rs new file mode 100644 index 0000000..4a26a81 --- /dev/null +++ b/src/transform.rs @@ -0,0 +1,84 @@ +//! Helper functions to create transforms which act locally on a batch of events. +//! Tailored to the usage pattern given by `TransformQuery`. + +use crate::channelevents::ChannelEvents; +use crate::eventsdim0::EventsDim0; +use items_0::transform::EventTransform; +use items_0::transform::TransformEvent; +use items_0::transform::TransformProperties; +use items_0::transform::WithTransformProperties; +use items_0::Appendable; +use items_0::AsAnyMut; +use items_0::Empty; +use items_0::Events; +use items_0::EventsNonObj; +use netpod::log::*; +use std::mem; + +struct TransformEventIdentity {} + +impl WithTransformProperties for TransformEventIdentity { + fn query_transform_properties(&self) -> TransformProperties { + todo!() + } +} + +impl EventTransform for TransformEventIdentity { + fn transform(&mut self, src: Box) -> Box { + src + } +} + +pub fn make_transform_identity() -> TransformEvent { + TransformEvent(Box::new(TransformEventIdentity {})) +} + +struct TransformEventMinMaxAvg {} + +impl WithTransformProperties for TransformEventMinMaxAvg { + fn query_transform_properties(&self) -> TransformProperties { + todo!() + } +} + +impl EventTransform for TransformEventMinMaxAvg { + fn transform(&mut self, mut src: Box) -> Box { + src.to_min_max_avg() + } +} + +pub fn make_transform_min_max_avg() -> TransformEvent { + TransformEvent(Box::new(TransformEventMinMaxAvg {})) +} + +struct TransformEventPulseIdDiff { + pulse_last: Option, +} + +impl WithTransformProperties for TransformEventPulseIdDiff { + fn query_transform_properties(&self) -> TransformProperties { + todo!() + } +} + +impl EventTransform for TransformEventPulseIdDiff { + fn transform(&mut self, src: Box) -> Box { + let (tss, pulses) = EventsNonObj::into_tss_pulses(src); + let mut item = EventsDim0::empty(); + let pulse_last = &mut self.pulse_last; + for (ts, pulse) in tss.into_iter().zip(pulses) { + let value = if let Some(last) = pulse_last { + pulse as i64 - *last as i64 + } else { + 0 + }; + item.push(ts, pulse, value); + *pulse_last = Some(pulse); + } + Box::new(ChannelEvents::Events(Box::new(item))) + } +} + +pub fn make_transform_pulse_id_diff() -> TransformEvent { + TransformEvent(Box::new(TransformEventPulseIdDiff { pulse_last: None })) +}