Factored into separate crate
This commit is contained in:
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/Cargo.lock
|
||||
/target
|
||||
37
Cargo.toml
Normal file
37
Cargo.toml
Normal file
@@ -0,0 +1,37 @@
|
||||
[package]
|
||||
name = "daqbuf-items-2"
|
||||
version = "0.0.3"
|
||||
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
path = "src/items_2.rs"
|
||||
doctest = false
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
ciborium = "0.2.1"
|
||||
rmp-serde = "1.1.1"
|
||||
postcard = { version = "1.0.0", features = ["use-std"] }
|
||||
erased-serde = "0.4"
|
||||
typetag = "0.2.14"
|
||||
bytes = "1.8"
|
||||
num-traits = "0.2.15"
|
||||
chrono = { version = "0.4.19", features = ["serde"] }
|
||||
crc32fast = "1.3.2"
|
||||
futures-util = "0.3.24"
|
||||
humantime-serde = "1.1.1"
|
||||
thiserror = "=0.0.1"
|
||||
daqbuf-err = { path = "../daqbuf-err" }
|
||||
items_0 = { path = "../daqbuf-items-0", package = "daqbuf-items-0" }
|
||||
items_proc = { path = "../daqbuf-items-proc", package = "daqbuf-items-proc" }
|
||||
netpod = { path = "../daqbuf-netpod", package = "daqbuf-netpod" }
|
||||
parse = { path = "../daqbuf-parse", package = "daqbuf-parse" }
|
||||
bitshuffle = { path = "../daqbuf-bitshuffle", package = "daqbuf-bitshuffle" }
|
||||
|
||||
[patch.crates-io]
|
||||
thiserror = { git = "https://github.com/dominikwerder/thiserror.git", branch = "cstm" }
|
||||
|
||||
[features]
|
||||
heavy = []
|
||||
41
src/accounting.rs
Normal file
41
src/accounting.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
use items_0::Empty;
|
||||
use items_0::Extendable;
|
||||
use items_0::WithLen;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AccountingEvents {
|
||||
pub tss: VecDeque<u64>,
|
||||
pub count: VecDeque<u64>,
|
||||
pub bytes: VecDeque<u64>,
|
||||
}
|
||||
|
||||
impl Empty for AccountingEvents {
|
||||
fn empty() -> Self {
|
||||
Self {
|
||||
tss: VecDeque::new(),
|
||||
count: VecDeque::new(),
|
||||
bytes: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for AccountingEvents {
|
||||
fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl Extendable for AccountingEvents {
|
||||
fn extend_from(&mut self, src: &mut Self) {
|
||||
use core::mem::replace;
|
||||
let v = replace(&mut src.tss, VecDeque::new());
|
||||
self.tss.extend(v.into_iter());
|
||||
let v = replace(&mut src.count, VecDeque::new());
|
||||
self.count.extend(v.into_iter());
|
||||
let v = replace(&mut src.bytes, VecDeque::new());
|
||||
self.bytes.extend(v.into_iter());
|
||||
}
|
||||
}
|
||||
11
src/binning.rs
Normal file
11
src/binning.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
pub mod aggregator;
|
||||
pub mod binnedvaluetype;
|
||||
pub mod container_bins;
|
||||
pub mod container_events;
|
||||
pub mod timeweight;
|
||||
pub mod valuetype;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test;
|
||||
|
||||
use super::binning as ___;
|
||||
211
src/binning/aggregator.rs
Normal file
211
src/binning/aggregator.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
use super::container_events::EventValueType;
|
||||
use core::fmt;
|
||||
use netpod::log::*;
|
||||
use netpod::DtNano;
|
||||
use netpod::EnumVariant;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_event { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_result { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
|
||||
|
||||
pub trait AggTimeWeightOutputAvg: fmt::Debug + Clone + Send + Serialize + for<'a> Deserialize<'a> {}
|
||||
|
||||
impl AggTimeWeightOutputAvg for u8 {}
|
||||
impl AggTimeWeightOutputAvg for u16 {}
|
||||
impl AggTimeWeightOutputAvg for u32 {}
|
||||
impl AggTimeWeightOutputAvg for u64 {}
|
||||
impl AggTimeWeightOutputAvg for i8 {}
|
||||
impl AggTimeWeightOutputAvg for i16 {}
|
||||
impl AggTimeWeightOutputAvg for i32 {}
|
||||
impl AggTimeWeightOutputAvg for i64 {}
|
||||
impl AggTimeWeightOutputAvg for f32 {}
|
||||
impl AggTimeWeightOutputAvg for f64 {}
|
||||
impl AggTimeWeightOutputAvg for EnumVariant {}
|
||||
impl AggTimeWeightOutputAvg for String {}
|
||||
impl AggTimeWeightOutputAvg for bool {}
|
||||
|
||||
pub trait AggregatorTimeWeight<EVT>: fmt::Debug + Send
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn new() -> Self;
|
||||
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: EVT);
|
||||
fn reset_for_new_bin(&mut self);
|
||||
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> EVT::AggTimeWeightOutputAvg;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AggregatorNumeric {
|
||||
sum: f64,
|
||||
}
|
||||
|
||||
trait AggWithF64: EventValueType<AggTimeWeightOutputAvg = f64> {
|
||||
fn as_f64(&self) -> f64;
|
||||
}
|
||||
|
||||
impl AggWithF64 for f64 {
|
||||
fn as_f64(&self) -> f64 {
|
||||
*self
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> AggregatorTimeWeight<EVT> for AggregatorNumeric
|
||||
where
|
||||
EVT: AggWithF64,
|
||||
{
|
||||
fn new() -> Self {
|
||||
Self { sum: 0. }
|
||||
}
|
||||
|
||||
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: EVT) {
|
||||
let f = dt.ns() as f64 / bl.ns() as f64;
|
||||
trace_event!("INGEST {} {:?}", f, val);
|
||||
self.sum += f * val.as_f64();
|
||||
}
|
||||
|
||||
fn reset_for_new_bin(&mut self) {
|
||||
self.sum = 0.;
|
||||
}
|
||||
|
||||
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> EVT::AggTimeWeightOutputAvg {
|
||||
let sum = self.sum.clone();
|
||||
trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction);
|
||||
self.sum = 0.;
|
||||
sum / filled_width_fraction as f64
|
||||
}
|
||||
}
|
||||
|
||||
impl AggregatorTimeWeight<f32> for AggregatorNumeric {
|
||||
fn new() -> Self {
|
||||
Self { sum: 0. }
|
||||
}
|
||||
|
||||
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: f32) {
|
||||
let f = dt.ns() as f64 / bl.ns() as f64;
|
||||
trace_event!("INGEST {} {}", f, val);
|
||||
self.sum += f * val as f64;
|
||||
}
|
||||
|
||||
fn reset_for_new_bin(&mut self) {
|
||||
self.sum = 0.;
|
||||
}
|
||||
|
||||
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f32 {
|
||||
let sum = self.sum.clone() as f32;
|
||||
trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction);
|
||||
self.sum = 0.;
|
||||
sum / filled_width_fraction
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_agg_tw_for_agg_num {
|
||||
($evt:ty) => {
|
||||
impl AggregatorTimeWeight<$evt> for AggregatorNumeric {
|
||||
fn new() -> Self {
|
||||
Self { sum: 0. }
|
||||
}
|
||||
|
||||
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: $evt) {
|
||||
let f = dt.ns() as f64 / bl.ns() as f64;
|
||||
trace_event!("INGEST {} {}", f, val);
|
||||
self.sum += f * val as f64;
|
||||
}
|
||||
|
||||
fn reset_for_new_bin(&mut self) {
|
||||
self.sum = 0.;
|
||||
}
|
||||
|
||||
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f64 {
|
||||
let sum = self.sum.clone();
|
||||
trace_result!(
|
||||
"result_and_reset_for_new_bin sum {} {}",
|
||||
sum,
|
||||
filled_width_fraction
|
||||
);
|
||||
self.sum = 0.;
|
||||
sum / filled_width_fraction as f64
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_agg_tw_for_agg_num!(u8);
|
||||
impl_agg_tw_for_agg_num!(u16);
|
||||
impl_agg_tw_for_agg_num!(u32);
|
||||
impl_agg_tw_for_agg_num!(i8);
|
||||
impl_agg_tw_for_agg_num!(i16);
|
||||
impl_agg_tw_for_agg_num!(i32);
|
||||
impl_agg_tw_for_agg_num!(i64);
|
||||
|
||||
impl AggregatorTimeWeight<u64> for AggregatorNumeric {
|
||||
fn new() -> Self {
|
||||
Self { sum: 0. }
|
||||
}
|
||||
|
||||
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: u64) {
|
||||
let f = dt.ns() as f64 / bl.ns() as f64;
|
||||
trace_event!("INGEST {} {}", f, val);
|
||||
self.sum += f * val as f64;
|
||||
}
|
||||
|
||||
fn reset_for_new_bin(&mut self) {
|
||||
self.sum = 0.;
|
||||
}
|
||||
|
||||
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f64 {
|
||||
let sum = self.sum.clone();
|
||||
trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction);
|
||||
self.sum = 0.;
|
||||
sum / filled_width_fraction as f64
|
||||
}
|
||||
}
|
||||
|
||||
impl AggregatorTimeWeight<bool> for AggregatorNumeric {
|
||||
fn new() -> Self {
|
||||
Self { sum: 0. }
|
||||
}
|
||||
|
||||
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: bool) {
|
||||
let f = dt.ns() as f64 / bl.ns() as f64;
|
||||
trace_event!("INGEST {} {}", f, val);
|
||||
self.sum += f * val as u8 as f64;
|
||||
}
|
||||
|
||||
fn reset_for_new_bin(&mut self) {
|
||||
self.sum = 0.;
|
||||
}
|
||||
|
||||
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f64 {
|
||||
let sum = self.sum.clone();
|
||||
trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction);
|
||||
self.sum = 0.;
|
||||
sum / filled_width_fraction as f64
|
||||
}
|
||||
}
|
||||
|
||||
impl AggregatorTimeWeight<String> for AggregatorNumeric {
|
||||
fn new() -> Self {
|
||||
Self { sum: 0. }
|
||||
}
|
||||
|
||||
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: String) {
|
||||
let f = dt.ns() as f64 / bl.ns() as f64;
|
||||
trace_event!("INGEST {} {}", f, val);
|
||||
self.sum += f * val.len() as f64;
|
||||
}
|
||||
|
||||
fn reset_for_new_bin(&mut self) {
|
||||
self.sum = 0.;
|
||||
}
|
||||
|
||||
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f64 {
|
||||
let sum = self.sum.clone();
|
||||
trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction);
|
||||
self.sum = 0.;
|
||||
sum / filled_width_fraction as f64
|
||||
}
|
||||
}
|
||||
8
src/binning/binnedvaluetype.rs
Normal file
8
src/binning/binnedvaluetype.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
pub trait BinnedValueType {}
|
||||
|
||||
pub struct BinnedNumericValue<EVT> {
|
||||
avg: f32,
|
||||
_t: Option<EVT>,
|
||||
}
|
||||
|
||||
impl<EVT> BinnedValueType for BinnedNumericValue<EVT> {}
|
||||
653
src/binning/container_bins.rs
Normal file
653
src/binning/container_bins.rs
Normal file
@@ -0,0 +1,653 @@
|
||||
use super::aggregator::AggregatorNumeric;
|
||||
use super::aggregator::AggregatorTimeWeight;
|
||||
use super::container_events::EventValueType;
|
||||
use super::___;
|
||||
use crate::ts_offs_from_abs;
|
||||
use crate::ts_offs_from_abs_with_anchor;
|
||||
use core::fmt;
|
||||
use daqbuf_err as err;
|
||||
use err::thiserror;
|
||||
use err::ThisError;
|
||||
use items_0::collect_s::CollectableDyn;
|
||||
use items_0::collect_s::CollectedDyn;
|
||||
use items_0::collect_s::ToJsonResult;
|
||||
use items_0::timebin::BinningggContainerBinsDyn;
|
||||
use items_0::timebin::BinsBoxed;
|
||||
use items_0::vecpreview::VecPreview;
|
||||
use items_0::AsAnyMut;
|
||||
use items_0::AsAnyRef;
|
||||
use items_0::TypeName;
|
||||
use items_0::WithLen;
|
||||
use netpod::log::*;
|
||||
use netpod::EnumVariant;
|
||||
use netpod::TsNano;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::any;
|
||||
use std::collections::VecDeque;
|
||||
use std::mem;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
|
||||
|
||||
#[derive(Debug, ThisError)]
|
||||
#[cstm(name = "ContainerBins")]
|
||||
pub enum ContainerBinsError {
|
||||
Unordered,
|
||||
}
|
||||
|
||||
pub trait BinValueType: fmt::Debug + Clone + PartialOrd {
|
||||
// type Container: Container<Self>;
|
||||
// type AggregatorTimeWeight: AggregatorTimeWeight<Self>;
|
||||
// type AggTimeWeightOutputAvg;
|
||||
|
||||
// fn identity_sum() -> Self;
|
||||
// fn add_weighted(&self, add: &Self, f: f32) -> Self;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BinSingle<EVT> {
|
||||
pub ts1: TsNano,
|
||||
pub ts2: TsNano,
|
||||
pub cnt: u64,
|
||||
pub min: EVT,
|
||||
pub max: EVT,
|
||||
pub avg: f32,
|
||||
pub lst: EVT,
|
||||
pub fnl: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BinRef<'a, EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
pub ts1: TsNano,
|
||||
pub ts2: TsNano,
|
||||
pub cnt: u64,
|
||||
pub min: &'a EVT,
|
||||
pub max: &'a EVT,
|
||||
pub avg: &'a EVT::AggTimeWeightOutputAvg,
|
||||
pub lst: &'a EVT,
|
||||
pub fnl: bool,
|
||||
}
|
||||
|
||||
pub struct IterDebug<'a, EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
bins: &'a ContainerBins<EVT>,
|
||||
ix: usize,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl<'a, EVT> Iterator for IterDebug<'a, EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
type Item = BinRef<'a, EVT>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.ix < self.bins.len() && self.ix < self.len {
|
||||
let b = &self.bins;
|
||||
let i = self.ix;
|
||||
self.ix += 1;
|
||||
let ret = BinRef {
|
||||
ts1: b.ts1s[i],
|
||||
ts2: b.ts2s[i],
|
||||
cnt: b.cnts[i],
|
||||
min: &b.mins[i],
|
||||
max: &b.maxs[i],
|
||||
avg: &b.avgs[i],
|
||||
lst: &b.lsts[i],
|
||||
fnl: b.fnls[i],
|
||||
};
|
||||
Some(ret)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct ContainerBins<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
ts1s: VecDeque<TsNano>,
|
||||
ts2s: VecDeque<TsNano>,
|
||||
cnts: VecDeque<u64>,
|
||||
mins: VecDeque<EVT>,
|
||||
maxs: VecDeque<EVT>,
|
||||
avgs: VecDeque<EVT::AggTimeWeightOutputAvg>,
|
||||
lsts: VecDeque<EVT>,
|
||||
fnls: VecDeque<bool>,
|
||||
}
|
||||
|
||||
impl<EVT> ContainerBins<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
pub fn from_constituents(
|
||||
ts1s: VecDeque<TsNano>,
|
||||
ts2s: VecDeque<TsNano>,
|
||||
cnts: VecDeque<u64>,
|
||||
mins: VecDeque<EVT>,
|
||||
maxs: VecDeque<EVT>,
|
||||
avgs: VecDeque<EVT::AggTimeWeightOutputAvg>,
|
||||
lsts: VecDeque<EVT>,
|
||||
fnls: VecDeque<bool>,
|
||||
) -> Self {
|
||||
Self {
|
||||
ts1s,
|
||||
ts2s,
|
||||
cnts,
|
||||
mins,
|
||||
maxs,
|
||||
avgs,
|
||||
lsts,
|
||||
fnls,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn type_name() -> &'static str {
|
||||
any::type_name::<Self>()
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
ts1s: VecDeque::new(),
|
||||
ts2s: VecDeque::new(),
|
||||
cnts: VecDeque::new(),
|
||||
mins: VecDeque::new(),
|
||||
maxs: VecDeque::new(),
|
||||
avgs: VecDeque::new(),
|
||||
lsts: VecDeque::new(),
|
||||
fnls: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.ts1s.len()
|
||||
}
|
||||
|
||||
pub fn verify(&self) -> Result<(), ContainerBinsError> {
|
||||
if self.ts1s.iter().zip(self.ts1s.iter().skip(1)).any(|(&a, &b)| a > b) {
|
||||
return Err(ContainerBinsError::Unordered);
|
||||
}
|
||||
if self.ts2s.iter().zip(self.ts2s.iter().skip(1)).any(|(&a, &b)| a > b) {
|
||||
return Err(ContainerBinsError::Unordered);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn ts1_first(&self) -> Option<TsNano> {
|
||||
self.ts1s.front().map(|&x| x)
|
||||
}
|
||||
|
||||
pub fn ts2_last(&self) -> Option<TsNano> {
|
||||
self.ts2s.back().map(|&x| x)
|
||||
}
|
||||
|
||||
pub fn ts1s_iter(&self) -> std::collections::vec_deque::Iter<TsNano> {
|
||||
self.ts1s.iter()
|
||||
}
|
||||
|
||||
pub fn ts2s_iter(&self) -> std::collections::vec_deque::Iter<TsNano> {
|
||||
self.ts2s.iter()
|
||||
}
|
||||
|
||||
pub fn cnts_iter(&self) -> std::collections::vec_deque::Iter<u64> {
|
||||
self.cnts.iter()
|
||||
}
|
||||
|
||||
pub fn mins_iter(&self) -> std::collections::vec_deque::Iter<EVT> {
|
||||
self.mins.iter()
|
||||
}
|
||||
|
||||
pub fn maxs_iter(&self) -> std::collections::vec_deque::Iter<EVT> {
|
||||
self.maxs.iter()
|
||||
}
|
||||
|
||||
pub fn avgs_iter(&self) -> std::collections::vec_deque::Iter<EVT::AggTimeWeightOutputAvg> {
|
||||
self.avgs.iter()
|
||||
}
|
||||
|
||||
pub fn fnls_iter(&self) -> std::collections::vec_deque::Iter<bool> {
|
||||
self.fnls.iter()
|
||||
}
|
||||
|
||||
pub fn zip_iter(
|
||||
&self,
|
||||
) -> std::iter::Zip<
|
||||
std::iter::Zip<
|
||||
std::iter::Zip<
|
||||
std::iter::Zip<
|
||||
std::iter::Zip<
|
||||
std::iter::Zip<
|
||||
std::collections::vec_deque::Iter<TsNano>,
|
||||
std::collections::vec_deque::Iter<TsNano>,
|
||||
>,
|
||||
std::collections::vec_deque::Iter<u64>,
|
||||
>,
|
||||
std::collections::vec_deque::Iter<EVT>,
|
||||
>,
|
||||
std::collections::vec_deque::Iter<EVT>,
|
||||
>,
|
||||
std::collections::vec_deque::Iter<EVT::AggTimeWeightOutputAvg>,
|
||||
>,
|
||||
std::collections::vec_deque::Iter<bool>,
|
||||
> {
|
||||
self.ts1s_iter()
|
||||
.zip(self.ts2s_iter())
|
||||
.zip(self.cnts_iter())
|
||||
.zip(self.mins_iter())
|
||||
.zip(self.maxs_iter())
|
||||
.zip(self.avgs_iter())
|
||||
.zip(self.fnls_iter())
|
||||
}
|
||||
|
||||
pub fn edges_iter(
|
||||
&self,
|
||||
) -> std::iter::Zip<std::collections::vec_deque::Iter<TsNano>, std::collections::vec_deque::Iter<TsNano>> {
|
||||
self.ts1s.iter().zip(self.ts2s.iter())
|
||||
}
|
||||
|
||||
pub fn len_before(&self, end: TsNano) -> usize {
|
||||
let pp = self.ts2s.partition_point(|&x| x <= end);
|
||||
assert!(pp <= self.len(), "len_before pp {} len {}", pp, self.len());
|
||||
pp
|
||||
}
|
||||
|
||||
pub fn pop_front(&mut self) -> Option<BinSingle<EVT>> {
|
||||
todo!("pop_front");
|
||||
let ts1 = if let Some(x) = self.ts1s.pop_front() {
|
||||
x
|
||||
} else {
|
||||
return None;
|
||||
};
|
||||
let ts2 = if let Some(x) = self.ts2s.pop_front() {
|
||||
x
|
||||
} else {
|
||||
return None;
|
||||
};
|
||||
todo!()
|
||||
}
|
||||
|
||||
pub fn push_back(
|
||||
&mut self,
|
||||
ts1: TsNano,
|
||||
ts2: TsNano,
|
||||
cnt: u64,
|
||||
min: EVT,
|
||||
max: EVT,
|
||||
avg: EVT::AggTimeWeightOutputAvg,
|
||||
lst: EVT,
|
||||
fnl: bool,
|
||||
) {
|
||||
self.ts1s.push_back(ts1);
|
||||
self.ts2s.push_back(ts2);
|
||||
self.cnts.push_back(cnt);
|
||||
self.mins.push_back(min);
|
||||
self.maxs.push_back(max);
|
||||
self.avgs.push_back(avg);
|
||||
self.lsts.push_back(lst);
|
||||
self.fnls.push_back(fnl);
|
||||
}
|
||||
|
||||
pub fn iter_debug(&self) -> IterDebug<EVT> {
|
||||
IterDebug {
|
||||
bins: self,
|
||||
ix: 0,
|
||||
len: self.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> fmt::Debug for ContainerBins<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
let self_name = any::type_name::<Self>();
|
||||
write!(
|
||||
fmt,
|
||||
"{self_name} {{ len: {:?}, ts1s: {:?}, ts2s: {:?}, cnts: {:?}, avgs {:?}, fnls {:?} }}",
|
||||
self.len(),
|
||||
VecPreview::new(&self.ts1s),
|
||||
VecPreview::new(&self.ts2s),
|
||||
VecPreview::new(&self.cnts),
|
||||
VecPreview::new(&self.avgs),
|
||||
VecPreview::new(&self.fnls),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> fmt::Display for ContainerBins<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt::Debug::fmt(self, fmt)
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> AsAnyMut for ContainerBins<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> WithLen for ContainerBins<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
Self::len(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> TypeName for ContainerBins<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn type_name(&self) -> String {
|
||||
BinningggContainerBinsDyn::type_name(self).into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> AsAnyRef for ContainerBins<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ContainerBinsCollectorOutput<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
bins: ContainerBins<EVT>,
|
||||
}
|
||||
|
||||
impl<EVT> TypeName for ContainerBinsCollectorOutput<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn type_name(&self) -> String {
|
||||
any::type_name::<Self>().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> AsAnyRef for ContainerBinsCollectorOutput<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> AsAnyMut for ContainerBinsCollectorOutput<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> WithLen for ContainerBinsCollectorOutput<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
self.bins.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
struct ContainerBinsCollectorOutputUser<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "ts1Ms")]
|
||||
ts1_off_ms: VecDeque<u64>,
|
||||
#[serde(rename = "ts2Ms")]
|
||||
ts2_off_ms: VecDeque<u64>,
|
||||
#[serde(rename = "ts1Ns")]
|
||||
ts1_off_ns: VecDeque<u64>,
|
||||
#[serde(rename = "ts2Ns")]
|
||||
ts2_off_ns: VecDeque<u64>,
|
||||
#[serde(rename = "counts")]
|
||||
counts: VecDeque<u64>,
|
||||
#[serde(rename = "mins")]
|
||||
mins: VecDeque<EVT>,
|
||||
#[serde(rename = "maxs")]
|
||||
maxs: VecDeque<EVT>,
|
||||
#[serde(rename = "avgs")]
|
||||
avgs: VecDeque<EVT::AggTimeWeightOutputAvg>,
|
||||
// #[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
|
||||
// range_final: bool,
|
||||
// #[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
|
||||
// timed_out: bool,
|
||||
// #[serde(rename = "missingBins", default, skip_serializing_if = "CmpZero::is_zero")]
|
||||
// missing_bins: u32,
|
||||
// #[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
|
||||
// continue_at: Option<IsoDateTime>,
|
||||
// #[serde(rename = "finishedAt", default, skip_serializing_if = "Option::is_none")]
|
||||
// finished_at: Option<IsoDateTime>,
|
||||
}
|
||||
|
||||
impl<EVT> ToJsonResult for ContainerBinsCollectorOutput<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
|
||||
let bins = &self.bins;
|
||||
let ts1sns: Vec<_> = bins.ts1s.iter().map(|x| x.ns()).collect();
|
||||
let ts2sns: Vec<_> = bins.ts2s.iter().map(|x| x.ns()).collect();
|
||||
let (ts_anch, ts1ms, ts1ns) = ts_offs_from_abs(&ts1sns);
|
||||
let (ts2ms, ts2ns) = ts_offs_from_abs_with_anchor(ts_anch, &ts2sns);
|
||||
let counts = bins.cnts.clone();
|
||||
let mins = bins.mins.clone();
|
||||
let maxs = bins.maxs.clone();
|
||||
let avgs = bins.avgs.clone();
|
||||
let val = ContainerBinsCollectorOutputUser::<EVT> {
|
||||
ts_anchor_sec: ts_anch,
|
||||
ts1_off_ms: ts1ms,
|
||||
ts2_off_ms: ts2ms,
|
||||
ts1_off_ns: ts1ns,
|
||||
ts2_off_ns: ts2ns,
|
||||
counts,
|
||||
mins,
|
||||
maxs,
|
||||
avgs,
|
||||
};
|
||||
serde_json::to_value(&val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> CollectedDyn for ContainerBinsCollectorOutput<EVT> where EVT: EventValueType {}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ContainerBinsCollector<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
bins: ContainerBins<EVT>,
|
||||
timed_out: bool,
|
||||
range_final: bool,
|
||||
}
|
||||
|
||||
impl<EVT> ContainerBinsCollector<EVT> where EVT: EventValueType {}
|
||||
|
||||
impl<EVT> WithLen for ContainerBinsCollector<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
self.bins.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> items_0::container::ByteEstimate for ContainerBinsCollector<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
// TODO need better estimate
|
||||
self.bins.len() as u64 * 200
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> items_0::collect_s::CollectorDyn for ContainerBinsCollector<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn ingest(&mut self, src: &mut dyn CollectableDyn) {
|
||||
if let Some(src) = src.as_any_mut().downcast_mut::<ContainerBins<EVT>>() {
|
||||
src.drain_into(&mut self.bins, 0..src.len());
|
||||
} else {
|
||||
let srcn = src.type_name();
|
||||
panic!("wrong src type {srcn}");
|
||||
}
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_final = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn set_continue_at_here(&mut self) {
|
||||
debug!("TODO remember the continue at");
|
||||
}
|
||||
|
||||
fn result(
|
||||
&mut self,
|
||||
range: Option<netpod::range::evrange::SeriesRange>,
|
||||
binrange: Option<netpod::BinnedRangeEnum>,
|
||||
) -> Result<Box<dyn items_0::collect_s::CollectedDyn>, err::Error> {
|
||||
// TODO do we need to set timeout, continueAt or anything?
|
||||
let bins = mem::replace(&mut self.bins, ContainerBins::new());
|
||||
let ret = ContainerBinsCollectorOutput { bins };
|
||||
Ok(Box::new(ret))
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> CollectableDyn for ContainerBins<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn new_collector(&self) -> Box<dyn items_0::collect_s::CollectorDyn> {
|
||||
let ret = ContainerBinsCollector::<EVT> {
|
||||
bins: ContainerBins::new(),
|
||||
timed_out: false,
|
||||
range_final: false,
|
||||
};
|
||||
Box::new(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> BinningggContainerBinsDyn for ContainerBins<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn type_name(&self) -> &'static str {
|
||||
any::type_name::<Self>()
|
||||
}
|
||||
|
||||
fn empty(&self) -> BinsBoxed {
|
||||
Box::new(Self::new())
|
||||
}
|
||||
|
||||
fn clone(&self) -> BinsBoxed {
|
||||
Box::new(<Self as Clone>::clone(self))
|
||||
}
|
||||
|
||||
fn edges_iter(
|
||||
&self,
|
||||
) -> std::iter::Zip<std::collections::vec_deque::Iter<TsNano>, std::collections::vec_deque::Iter<TsNano>> {
|
||||
self.ts1s.iter().zip(self.ts2s.iter())
|
||||
}
|
||||
|
||||
fn drain_into(&mut self, dst: &mut dyn BinningggContainerBinsDyn, range: std::ops::Range<usize>) {
|
||||
let obj = dst.as_any_mut();
|
||||
if let Some(dst) = obj.downcast_mut::<Self>() {
|
||||
dst.ts1s.extend(self.ts1s.drain(range.clone()));
|
||||
dst.ts2s.extend(self.ts2s.drain(range.clone()));
|
||||
dst.cnts.extend(self.cnts.drain(range.clone()));
|
||||
dst.mins.extend(self.mins.drain(range.clone()));
|
||||
dst.maxs.extend(self.maxs.drain(range.clone()));
|
||||
dst.avgs.extend(self.avgs.drain(range.clone()));
|
||||
dst.lsts.extend(self.lsts.drain(range.clone()));
|
||||
dst.fnls.extend(self.fnls.drain(range.clone()));
|
||||
} else {
|
||||
let styn = any::type_name::<EVT>();
|
||||
panic!("unexpected drain EVT {} dst {}", styn, Self::type_name());
|
||||
}
|
||||
}
|
||||
|
||||
fn fix_numerics(&mut self) {
|
||||
for ((min, max), avg) in self.mins.iter_mut().zip(self.maxs.iter_mut()).zip(self.avgs.iter_mut()) {}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ContainerBinsTakeUpTo<'a, EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
evs: &'a mut ContainerBins<EVT>,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl<'a, EVT> ContainerBinsTakeUpTo<'a, EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
pub fn new(evs: &'a mut ContainerBins<EVT>, len: usize) -> Self {
|
||||
let len = len.min(evs.len());
|
||||
Self { evs, len }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, EVT> ContainerBinsTakeUpTo<'a, EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
pub fn ts1_first(&self) -> Option<TsNano> {
|
||||
self.evs.ts1_first()
|
||||
}
|
||||
|
||||
pub fn ts2_last(&self) -> Option<TsNano> {
|
||||
self.evs.ts2_last()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
pub fn pop_front(&mut self) -> Option<BinSingle<EVT>> {
|
||||
if self.len != 0 {
|
||||
if let Some(ev) = self.evs.pop_front() {
|
||||
self.len -= 1;
|
||||
Some(ev)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
276
src/binning/container_events.rs
Normal file
276
src/binning/container_events.rs
Normal file
@@ -0,0 +1,276 @@
|
||||
use super::aggregator::AggTimeWeightOutputAvg;
|
||||
use super::aggregator::AggregatorNumeric;
|
||||
use super::aggregator::AggregatorTimeWeight;
|
||||
use super::timeweight::timeweight_events_dyn::BinnedEventsTimeweightDynbox;
|
||||
use core::fmt;
|
||||
use daqbuf_err as err;
|
||||
use err::thiserror;
|
||||
use err::ThisError;
|
||||
use items_0::timebin::BinningggContainerEventsDyn;
|
||||
use items_0::vecpreview::PreviewRange;
|
||||
use items_0::vecpreview::VecPreview;
|
||||
use items_0::AsAnyRef;
|
||||
use netpod::BinnedRange;
|
||||
use netpod::TsNano;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::any;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
|
||||
|
||||
#[derive(Debug, ThisError)]
|
||||
#[cstm(name = "ValueContainerError")]
|
||||
pub enum ValueContainerError {}
|
||||
|
||||
pub trait Container<EVT>: fmt::Debug + Send + Clone + PreviewRange + Serialize + for<'a> Deserialize<'a> {
|
||||
fn new() -> Self;
|
||||
// fn verify(&self) -> Result<(), ValueContainerError>;
|
||||
fn push_back(&mut self, val: EVT);
|
||||
fn pop_front(&mut self) -> Option<EVT>;
|
||||
}
|
||||
|
||||
pub trait EventValueType: fmt::Debug + Clone + PartialOrd + Send + 'static + Serialize {
|
||||
type Container: Container<Self>;
|
||||
type AggregatorTimeWeight: AggregatorTimeWeight<Self>;
|
||||
type AggTimeWeightOutputAvg: AggTimeWeightOutputAvg;
|
||||
|
||||
// fn identity_sum() -> Self;
|
||||
// fn add_weighted(&self, add: &Self, f: f32) -> Self;
|
||||
}
|
||||
|
||||
impl<EVT> Container<EVT> for VecDeque<EVT>
|
||||
where
|
||||
EVT: EventValueType + Serialize + for<'a> Deserialize<'a>,
|
||||
{
|
||||
fn new() -> Self {
|
||||
VecDeque::new()
|
||||
}
|
||||
|
||||
fn push_back(&mut self, val: EVT) {
|
||||
self.push_back(val);
|
||||
}
|
||||
|
||||
fn pop_front(&mut self) -> Option<EVT> {
|
||||
self.pop_front()
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_event_value_type {
|
||||
($evt:ty) => {
|
||||
impl EventValueType for $evt {
|
||||
type Container = VecDeque<Self>;
|
||||
type AggregatorTimeWeight = AggregatorNumeric;
|
||||
type AggTimeWeightOutputAvg = f64;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_event_value_type!(u8);
|
||||
impl_event_value_type!(u16);
|
||||
impl_event_value_type!(u32);
|
||||
impl_event_value_type!(u64);
|
||||
impl_event_value_type!(i8);
|
||||
impl_event_value_type!(i16);
|
||||
impl_event_value_type!(i32);
|
||||
impl_event_value_type!(i64);
|
||||
// impl_event_value_type!(f32);
|
||||
// impl_event_value_type!(f64);
|
||||
|
||||
impl EventValueType for f32 {
|
||||
type Container = VecDeque<Self>;
|
||||
type AggregatorTimeWeight = AggregatorNumeric;
|
||||
type AggTimeWeightOutputAvg = f32;
|
||||
}
|
||||
|
||||
impl EventValueType for f64 {
|
||||
type Container = VecDeque<Self>;
|
||||
type AggregatorTimeWeight = AggregatorNumeric;
|
||||
type AggTimeWeightOutputAvg = f64;
|
||||
}
|
||||
|
||||
impl EventValueType for bool {
|
||||
type Container = VecDeque<Self>;
|
||||
type AggregatorTimeWeight = AggregatorNumeric;
|
||||
type AggTimeWeightOutputAvg = f64;
|
||||
}
|
||||
|
||||
impl EventValueType for String {
|
||||
type Container = VecDeque<Self>;
|
||||
type AggregatorTimeWeight = AggregatorNumeric;
|
||||
type AggTimeWeightOutputAvg = f64;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EventSingle<EVT> {
|
||||
pub ts: TsNano,
|
||||
pub val: EVT,
|
||||
}
|
||||
|
||||
#[derive(Debug, ThisError)]
|
||||
#[cstm(name = "EventsContainerError")]
|
||||
pub enum EventsContainerError {
|
||||
Unordered,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct ContainerEvents<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
tss: VecDeque<TsNano>,
|
||||
vals: <EVT as EventValueType>::Container,
|
||||
}
|
||||
|
||||
impl<EVT> ContainerEvents<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
pub fn from_constituents(tss: VecDeque<TsNano>, vals: <EVT as EventValueType>::Container) -> Self {
|
||||
Self { tss, vals }
|
||||
}
|
||||
|
||||
pub fn type_name() -> &'static str {
|
||||
any::type_name::<Self>()
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
tss: VecDeque::new(),
|
||||
vals: Container::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
|
||||
pub fn verify(&self) -> Result<(), EventsContainerError> {
|
||||
if self.tss.iter().zip(self.tss.iter().skip(1)).any(|(&a, &b)| a > b) {
|
||||
return Err(EventsContainerError::Unordered);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn ts_first(&self) -> Option<TsNano> {
|
||||
self.tss.front().map(|&x| x)
|
||||
}
|
||||
|
||||
pub fn ts_last(&self) -> Option<TsNano> {
|
||||
self.tss.back().map(|&x| x)
|
||||
}
|
||||
|
||||
pub fn len_before(&self, end: TsNano) -> usize {
|
||||
let pp = self.tss.partition_point(|&x| x < end);
|
||||
assert!(pp <= self.len(), "len_before pp {} len {}", pp, self.len());
|
||||
pp
|
||||
}
|
||||
|
||||
pub fn pop_front(&mut self) -> Option<EventSingle<EVT>> {
|
||||
if let (Some(ts), Some(val)) = (self.tss.pop_front(), self.vals.pop_front()) {
|
||||
Some(EventSingle { ts, val })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_back(&mut self, ts: TsNano, val: EVT) {
|
||||
self.tss.push_back(ts);
|
||||
self.vals.push_back(val);
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> fmt::Debug for ContainerEvents<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
let self_name = any::type_name::<Self>();
|
||||
write!(
|
||||
fmt,
|
||||
"{self_name} {{ len: {:?}, tss: {:?}, vals {:?} }}",
|
||||
self.len(),
|
||||
VecPreview::new(&self.tss),
|
||||
VecPreview::new(&self.vals),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> AsAnyRef for ContainerEvents<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ContainerEventsTakeUpTo<'a, EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
evs: &'a mut ContainerEvents<EVT>,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl<'a, EVT> ContainerEventsTakeUpTo<'a, EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
pub fn new(evs: &'a mut ContainerEvents<EVT>, len: usize) -> Self {
|
||||
let len = len.min(evs.len());
|
||||
Self { evs, len }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, EVT> ContainerEventsTakeUpTo<'a, EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
pub fn ts_first(&self) -> Option<TsNano> {
|
||||
self.evs.ts_first()
|
||||
}
|
||||
|
||||
pub fn ts_last(&self) -> Option<TsNano> {
|
||||
self.evs.ts_last()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
pub fn pop_front(&mut self) -> Option<EventSingle<EVT>> {
|
||||
if self.len != 0 {
|
||||
if let Some(ev) = self.evs.pop_front() {
|
||||
self.len -= 1;
|
||||
Some(ev)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> BinningggContainerEventsDyn for ContainerEvents<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn type_name(&self) -> &'static str {
|
||||
std::any::type_name::<Self>()
|
||||
}
|
||||
|
||||
fn binned_events_timeweight_traitobj(
|
||||
&self,
|
||||
range: BinnedRange<TsNano>,
|
||||
) -> Box<dyn items_0::timebin::BinnedEventsTimeweightTrait> {
|
||||
BinnedEventsTimeweightDynbox::<EVT>::new(range)
|
||||
}
|
||||
|
||||
fn to_anybox(&mut self) -> Box<dyn std::any::Any> {
|
||||
let ret = core::mem::replace(self, Self::new());
|
||||
Box::new(ret)
|
||||
}
|
||||
}
|
||||
15
src/binning/test.rs
Normal file
15
src/binning/test.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
mod events00;
|
||||
use super::container_events::ContainerEvents;
|
||||
use super::___;
|
||||
use netpod::log::*;
|
||||
use std::any;
|
||||
|
||||
#[test]
|
||||
fn test_use_serde() {
|
||||
let x = ContainerEvents::<f32>::new();
|
||||
let a: &dyn any::Any = &x;
|
||||
assert_eq!(a.downcast_ref::<String>().is_some(), false);
|
||||
assert_eq!(a.downcast_ref::<ContainerEvents<f32>>().is_some(), true);
|
||||
let s = serde_json::to_string(&x).unwrap();
|
||||
let _: ContainerEvents<f32> = serde_json::from_str(&s).unwrap();
|
||||
}
|
||||
488
src/binning/test/events00.rs
Normal file
488
src/binning/test/events00.rs
Normal file
@@ -0,0 +1,488 @@
|
||||
use crate::binning::container_bins::ContainerBins;
|
||||
use crate::binning::container_events::ContainerEvents;
|
||||
use crate::binning::timeweight::timeweight_events::BinnedEventsTimeweight;
|
||||
use daqbuf_err as err;
|
||||
use err::thiserror;
|
||||
use err::ThisError;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::BinnedRange;
|
||||
use netpod::DtMs;
|
||||
use netpod::EnumVariant;
|
||||
use netpod::TsNano;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
#[derive(Debug, ThisError)]
|
||||
#[cstm(name = "Error")]
|
||||
enum Error {
|
||||
Timeweight(#[from] crate::binning::timeweight::timeweight_events::Error),
|
||||
AssertMsg(String),
|
||||
}
|
||||
|
||||
// fn prepare_data_with_cuts(beg_ms: u64, cuts: VecDeque<u64>) -> VecDeque<ContainerEvents<f32>> {
|
||||
// let beg = TsNano::from_ms(beg_ms);
|
||||
// let end = TsNano::from_ms(120);
|
||||
// let mut cut_next = cuts.pop_front().unwrap_or(u64::MAX);
|
||||
// let mut ret = VecDeque::new();
|
||||
// let ivl = DtMs::from_ms_u64(x)
|
||||
// }
|
||||
|
||||
fn pu(c: &mut ContainerEvents<f32>, ts_ms: u64, val: f32)
|
||||
// where
|
||||
// C: AsMut<ContainerEvents<f32>>,
|
||||
// C: std::borrow::BorrowMut<ContainerEvents<f32>>,
|
||||
{
|
||||
c.push_back(TsNano::from_ms(ts_ms), val);
|
||||
}
|
||||
|
||||
trait IntoVecDequeU64 {
|
||||
fn into_vec_deque_u64(self) -> VecDeque<u64>;
|
||||
}
|
||||
|
||||
impl IntoVecDequeU64 for &str {
|
||||
fn into_vec_deque_u64(self) -> VecDeque<u64> {
|
||||
self.split_ascii_whitespace().map(|x| x.parse().unwrap()).collect()
|
||||
}
|
||||
}
|
||||
trait IntoVecDequeF32 {
|
||||
fn into_vec_deque_f32(self) -> VecDeque<f32>;
|
||||
}
|
||||
|
||||
impl IntoVecDequeF32 for &str {
|
||||
fn into_vec_deque_f32(self) -> VecDeque<f32> {
|
||||
self.split_ascii_whitespace().map(|x| x.parse().unwrap()).collect()
|
||||
}
|
||||
}
|
||||
|
||||
fn exp_u64<'a>(
|
||||
vals: impl Iterator<Item = &'a u64>,
|
||||
exps: impl Iterator<Item = &'a u64>,
|
||||
tag: &str,
|
||||
) -> Result<(), Error> {
|
||||
let mut it_a = vals;
|
||||
let mut it_b = exps;
|
||||
let mut i = 0;
|
||||
loop {
|
||||
let a = it_a.next();
|
||||
let b = it_b.next();
|
||||
if a.is_none() && b.is_none() {
|
||||
break;
|
||||
}
|
||||
if let (Some(&val), Some(&exp)) = (a, b) {
|
||||
if val != exp {
|
||||
return Err(Error::AssertMsg(format!("{tag} val {} exp {} i {}", val, exp, i)));
|
||||
}
|
||||
} else {
|
||||
return Err(Error::AssertMsg(format!("{tag} len mismatch")));
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn exp_f32<'a>(
|
||||
vals: impl Iterator<Item = &'a f32>,
|
||||
exps: impl Iterator<Item = &'a f32>,
|
||||
tag: &str,
|
||||
) -> Result<(), Error> {
|
||||
let mut it_a = vals;
|
||||
let mut it_b = exps;
|
||||
let mut i = 0;
|
||||
loop {
|
||||
let a = it_a.next();
|
||||
let b = it_b.next();
|
||||
if a.is_none() && b.is_none() {
|
||||
break;
|
||||
}
|
||||
if let (Some(&val), Some(&exp)) = (a, b) {
|
||||
if netpod::f32_close(val, exp) == false {
|
||||
return Err(Error::AssertMsg(format!("{tag} val {} exp {} i {}", val, exp, i)));
|
||||
}
|
||||
} else {
|
||||
return Err(Error::AssertMsg(format!("{tag} len mismatch")));
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn exp_cnts(bins: &ContainerBins<f32>, exps: impl IntoVecDequeU64) -> Result<(), Error> {
|
||||
exp_u64(bins.cnts_iter(), exps.into_vec_deque_u64().iter(), "exp_cnts")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn exp_mins(bins: &ContainerBins<f32>, exps: impl IntoVecDequeF32) -> Result<(), Error> {
|
||||
exp_f32(bins.mins_iter(), exps.into_vec_deque_f32().iter(), "exp_mins")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn exp_maxs(bins: &ContainerBins<f32>, exps: impl IntoVecDequeF32) -> Result<(), Error> {
|
||||
exp_f32(bins.maxs_iter(), exps.into_vec_deque_f32().iter(), "exp_maxs")
|
||||
}
|
||||
|
||||
fn exp_avgs(bins: &ContainerBins<f32>, exps: impl IntoVecDequeF32) -> Result<(), Error> {
|
||||
let exps = exps.into_vec_deque_f32();
|
||||
let mut it_a = bins.iter_debug();
|
||||
let mut it_b = exps.iter();
|
||||
let mut i = 0;
|
||||
loop {
|
||||
let a = it_a.next();
|
||||
let b = it_b.next();
|
||||
if a.is_none() && b.is_none() {
|
||||
break;
|
||||
}
|
||||
if let (Some(a), Some(&exp)) = (a, b) {
|
||||
let val = *a.avg as f32;
|
||||
if netpod::f32_close(val, exp) == false {
|
||||
return Err(Error::AssertMsg(format!("exp_avgs val {} exp {} i {}", val, exp, i)));
|
||||
}
|
||||
} else {
|
||||
return Err(Error::AssertMsg(format!(
|
||||
"len mismatch {} vs {}",
|
||||
bins.len(),
|
||||
exps.len()
|
||||
)));
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bin_events_f32_simple_with_before_00() -> Result<(), Error> {
|
||||
let beg = TsNano::from_ms(110);
|
||||
let end = TsNano::from_ms(120);
|
||||
let nano_range = NanoRange {
|
||||
beg: beg.ns(),
|
||||
end: end.ns(),
|
||||
};
|
||||
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
|
||||
let mut binner = BinnedEventsTimeweight::new(range);
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
evs.push_back(TsNano::from_ms(103), 2.0);
|
||||
binner.ingest(evs)?;
|
||||
binner.input_done_range_final()?;
|
||||
let bins = binner.output();
|
||||
exp_cnts(&bins, "0")?;
|
||||
exp_mins(&bins, "2.")?;
|
||||
exp_maxs(&bins, "2.")?;
|
||||
exp_avgs(&bins, "2.")?;
|
||||
let bins = binner.output();
|
||||
assert_eq!(bins.len(), 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bin_events_f32_simple_with_before_01_range_final() -> Result<(), Error> {
|
||||
let beg = TsNano::from_ms(110);
|
||||
let end = TsNano::from_ms(130);
|
||||
let nano_range = NanoRange {
|
||||
beg: beg.ns(),
|
||||
end: end.ns(),
|
||||
};
|
||||
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
|
||||
let mut binner = BinnedEventsTimeweight::new(range);
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 103, 2.0);
|
||||
binner.ingest(evs)?;
|
||||
binner.input_done_range_final()?;
|
||||
let bins = binner.output();
|
||||
exp_cnts(&bins, "0 0")?;
|
||||
exp_mins(&bins, "2. 2.")?;
|
||||
exp_maxs(&bins, "2. 2.")?;
|
||||
exp_avgs(&bins, "2. 2.")?;
|
||||
let bins = binner.output();
|
||||
assert_eq!(bins.len(), 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bin_events_f32_simple_00() -> Result<(), Error> {
|
||||
let beg = TsNano::from_ms(100);
|
||||
let end = TsNano::from_ms(120);
|
||||
let nano_range = NanoRange {
|
||||
beg: beg.ns(),
|
||||
end: end.ns(),
|
||||
};
|
||||
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
|
||||
let mut binner = BinnedEventsTimeweight::new(range);
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 100, 2.0);
|
||||
pu(em, 104, 2.4);
|
||||
binner.ingest(evs)?;
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 111, 1.0);
|
||||
pu(em, 112, 1.2);
|
||||
pu(em, 113, 1.4);
|
||||
binner.ingest(evs)?;
|
||||
binner.input_done_range_open()?;
|
||||
let bins = binner.output();
|
||||
for b in bins.iter_debug() {
|
||||
trace!("{b:?}");
|
||||
}
|
||||
exp_cnts(&bins, "2 3")?;
|
||||
exp_mins(&bins, "2. 1.")?;
|
||||
exp_maxs(&bins, "2.4 2.4")?;
|
||||
exp_avgs(&bins, "2.24 1.5333")?;
|
||||
let bins = binner.output();
|
||||
assert_eq!(bins.len(), 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bin_events_f32_simple_01() -> Result<(), Error> {
|
||||
let beg = TsNano::from_ms(100);
|
||||
let end = TsNano::from_ms(120);
|
||||
let nano_range = NanoRange {
|
||||
beg: beg.ns(),
|
||||
end: end.ns(),
|
||||
};
|
||||
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
|
||||
let mut binner = BinnedEventsTimeweight::new(range);
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 102, 2.0);
|
||||
pu(em, 104, 2.4);
|
||||
binner.ingest(evs)?;
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 111, 1.0);
|
||||
pu(em, 112, 1.2);
|
||||
pu(em, 113, 1.4);
|
||||
binner.ingest(evs)?;
|
||||
binner.input_done_range_open()?;
|
||||
let bins = binner.output();
|
||||
for b in bins.iter_debug() {
|
||||
trace!("{b:?}");
|
||||
}
|
||||
assert_eq!(bins.len(), 2);
|
||||
exp_cnts(&bins, "2 3")?;
|
||||
exp_mins(&bins, "2. 1.")?;
|
||||
exp_maxs(&bins, "2.4 2.4")?;
|
||||
exp_avgs(&bins, "2.30 1.5333")?;
|
||||
let bins = binner.output();
|
||||
assert_eq!(bins.len(), 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bin_events_f32_small_range_final() -> Result<(), Error> {
|
||||
let beg = TsNano::from_ms(100);
|
||||
let end = TsNano::from_ms(120);
|
||||
let nano_range = NanoRange {
|
||||
beg: beg.ns(),
|
||||
end: end.ns(),
|
||||
};
|
||||
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
|
||||
let mut binner = BinnedEventsTimeweight::new(range);
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 102, 2.0);
|
||||
pu(em, 104, 2.4);
|
||||
binner.ingest(evs)?;
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 111, 1.0);
|
||||
pu(em, 112, 1.2);
|
||||
pu(em, 113, 1.4);
|
||||
binner.ingest(evs)?;
|
||||
binner.input_done_range_final()?;
|
||||
let bins = binner.output();
|
||||
for b in bins.iter_debug() {
|
||||
trace!("{b:?}");
|
||||
}
|
||||
assert_eq!(bins.len(), 2);
|
||||
exp_cnts(&bins, "2 3")?;
|
||||
exp_mins(&bins, "2. 1.")?;
|
||||
exp_maxs(&bins, "2.4 2.4")?;
|
||||
exp_avgs(&bins, "2.30 1.44")?;
|
||||
let bins = binner.output();
|
||||
assert_eq!(bins.len(), 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bin_events_f32_small_intermittent_silence_range_open() -> Result<(), Error> {
|
||||
let beg = TsNano::from_ms(100);
|
||||
let end = TsNano::from_ms(150);
|
||||
let nano_range = NanoRange {
|
||||
beg: beg.ns(),
|
||||
end: end.ns(),
|
||||
};
|
||||
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
|
||||
let mut binner = BinnedEventsTimeweight::new(range);
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 102, 2.0);
|
||||
pu(em, 104, 2.4);
|
||||
binner.ingest(evs)?;
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 111, 1.0);
|
||||
pu(em, 112, 1.2);
|
||||
binner.ingest(evs)?;
|
||||
// TODO take bins already here and assert.
|
||||
// TODO combine all bins together for combined assert.
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 113, 1.4);
|
||||
pu(em, 146, 1.3);
|
||||
pu(em, 148, 1.2);
|
||||
binner.ingest(evs)?;
|
||||
binner.input_done_range_open()?;
|
||||
let bins = binner.output();
|
||||
for b in bins.iter_debug() {
|
||||
trace!("{b:?}");
|
||||
}
|
||||
assert_eq!(bins.len(), 5);
|
||||
exp_cnts(&bins, "2 3 0 0 2")?;
|
||||
exp_mins(&bins, "2.0 1.0 1.4 1.4 1.2")?;
|
||||
exp_maxs(&bins, "2.4 2.4 1.4 1.4 1.4")?;
|
||||
exp_avgs(&bins, "2.30 1.44 1.4 1.4 1.375")?;
|
||||
let bins = binner.output();
|
||||
assert_eq!(bins.len(), 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bin_events_f32_small_intermittent_silence_range_final() -> Result<(), Error> {
|
||||
let beg = TsNano::from_ms(100);
|
||||
let end = TsNano::from_ms(150);
|
||||
let nano_range = NanoRange {
|
||||
beg: beg.ns(),
|
||||
end: end.ns(),
|
||||
};
|
||||
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
|
||||
let mut binner = BinnedEventsTimeweight::new(range);
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 102, 2.0);
|
||||
pu(em, 104, 2.4);
|
||||
binner.ingest(evs)?;
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 111, 1.0);
|
||||
pu(em, 112, 1.2);
|
||||
binner.ingest(evs)?;
|
||||
// TODO take bins already here and assert.
|
||||
// TODO combine all bins together for combined assert.
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 113, 1.4);
|
||||
pu(em, 146, 1.3);
|
||||
pu(em, 148, 1.2);
|
||||
binner.ingest(evs)?;
|
||||
binner.input_done_range_final()?;
|
||||
let bins = binner.output();
|
||||
for b in bins.iter_debug() {
|
||||
trace!("{b:?}");
|
||||
}
|
||||
exp_cnts(&bins, "2 3 0 0 2")?;
|
||||
exp_mins(&bins, "2.0 1.0 1.4 1.4 1.2")?;
|
||||
exp_maxs(&bins, "2.4 2.4 1.4 1.4 1.4")?;
|
||||
exp_avgs(&bins, "2.30 1.44 1.4 1.4 1.34")?;
|
||||
let bins = binner.output();
|
||||
assert_eq!(bins.len(), 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bin_events_f32_small_intermittent_silence_minmax_no_edge_range_final() -> Result<(), Error> {
|
||||
let beg = TsNano::from_ms(110);
|
||||
let end = TsNano::from_ms(120);
|
||||
let nano_range = NanoRange {
|
||||
beg: beg.ns(),
|
||||
end: end.ns(),
|
||||
};
|
||||
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
|
||||
let mut binner = BinnedEventsTimeweight::new(range);
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 109, 50.);
|
||||
binner.ingest(evs)?;
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 111, 40.);
|
||||
// pu(em, 112, 1.2);
|
||||
// binner.ingest(evs)?;
|
||||
// let mut evs = ContainerEvents::<f32>::new();
|
||||
// let em = &mut evs;
|
||||
// pu(em, 113, 1.4);
|
||||
// pu(em, 120, 1.4);
|
||||
// pu(em, 146, 1.3);
|
||||
// pu(em, 148, 1.2);
|
||||
binner.ingest(evs)?;
|
||||
binner.input_done_range_final()?;
|
||||
let bins = binner.output();
|
||||
for b in bins.iter_debug() {
|
||||
trace!("{b:?}");
|
||||
}
|
||||
exp_cnts(&bins, "1")?;
|
||||
exp_mins(&bins, "40.")?;
|
||||
exp_maxs(&bins, "50.")?;
|
||||
let bins = binner.output();
|
||||
assert_eq!(bins.len(), 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bin_events_f32_small_intermittent_silence_minmax_edge_range_final() -> Result<(), Error> {
|
||||
let beg = TsNano::from_ms(110);
|
||||
let end = TsNano::from_ms(120);
|
||||
let nano_range = NanoRange {
|
||||
beg: beg.ns(),
|
||||
end: end.ns(),
|
||||
};
|
||||
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
|
||||
let mut binner = BinnedEventsTimeweight::new(range);
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 109, 50.);
|
||||
binner.ingest(evs)?;
|
||||
let mut evs = ContainerEvents::<f32>::new();
|
||||
let em = &mut evs;
|
||||
pu(em, 110, 40.);
|
||||
// pu(em, 112, 1.2);
|
||||
// binner.ingest(evs)?;
|
||||
// let mut evs = ContainerEvents::<f32>::new();
|
||||
// let em = &mut evs;
|
||||
// pu(em, 113, 1.4);
|
||||
// pu(em, 120, 1.4);
|
||||
// pu(em, 146, 1.3);
|
||||
// pu(em, 148, 1.2);
|
||||
binner.ingest(evs)?;
|
||||
binner.input_done_range_final()?;
|
||||
let bins = binner.output();
|
||||
for b in bins.iter_debug() {
|
||||
trace!("{b:?}");
|
||||
}
|
||||
exp_cnts(&bins, "1")?;
|
||||
exp_mins(&bins, "40.")?;
|
||||
exp_maxs(&bins, "40.")?;
|
||||
let bins = binner.output();
|
||||
assert_eq!(bins.len(), 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bin_events_enum_simple_range_final() -> Result<(), Error> {
|
||||
let beg = TsNano::from_ms(100);
|
||||
let end = TsNano::from_ms(120);
|
||||
let nano_range = NanoRange {
|
||||
beg: beg.ns(),
|
||||
end: end.ns(),
|
||||
};
|
||||
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
|
||||
let mut binner = BinnedEventsTimeweight::new(range);
|
||||
let mut evs = ContainerEvents::new();
|
||||
evs.push_back(TsNano::from_ms(103), EnumVariant::new(1, "one"));
|
||||
evs.push_back(TsNano::from_ms(104), EnumVariant::new(2, "two"));
|
||||
binner.ingest(evs)?;
|
||||
binner.input_done_range_final()?;
|
||||
let bins = binner.output();
|
||||
Ok(())
|
||||
}
|
||||
16
src/binning/timeweight.rs
Normal file
16
src/binning/timeweight.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
pub mod timeweight_bins;
|
||||
pub mod timeweight_bins_dyn;
|
||||
pub mod timeweight_events;
|
||||
pub mod timeweight_events_dyn;
|
||||
|
||||
use super::___;
|
||||
use netpod::log::*;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest_detail { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
|
||||
5
src/binning/timeweight/timeweight_bins.rs
Normal file
5
src/binning/timeweight/timeweight_bins.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
use super::___;
|
||||
use netpod::log::*;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
|
||||
27
src/binning/timeweight/timeweight_bins_dyn.rs
Normal file
27
src/binning/timeweight/timeweight_bins_dyn.rs
Normal file
@@ -0,0 +1,27 @@
|
||||
use futures_util::Stream;
|
||||
use items_0::streamitem::Sitemty;
|
||||
use items_0::timebin::BinningggContainerBinsDyn;
|
||||
use netpod::BinnedRange;
|
||||
use netpod::TsNano;
|
||||
use std::pin::Pin;
|
||||
use std::task::Context;
|
||||
use std::task::Poll;
|
||||
|
||||
pub struct BinnedBinsTimeweightStream {}
|
||||
|
||||
impl BinnedBinsTimeweightStream {
|
||||
pub fn new(
|
||||
range: BinnedRange<TsNano>,
|
||||
inp: Pin<Box<dyn Stream<Item = Sitemty<Box<dyn BinningggContainerBinsDyn>>> + Send>>,
|
||||
) -> Self {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for BinnedBinsTimeweightStream {
|
||||
type Item = Sitemty<Box<dyn BinningggContainerBinsDyn>>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
645
src/binning/timeweight/timeweight_events.rs
Normal file
645
src/binning/timeweight/timeweight_events.rs
Normal file
@@ -0,0 +1,645 @@
|
||||
use super::super::container_events::EventValueType;
|
||||
use crate::binning::aggregator::AggregatorTimeWeight;
|
||||
use crate::binning::container_bins::ContainerBins;
|
||||
use crate::binning::container_events::ContainerEvents;
|
||||
use crate::binning::container_events::ContainerEventsTakeUpTo;
|
||||
use crate::binning::container_events::EventSingle;
|
||||
use core::fmt;
|
||||
use daqbuf_err as err;
|
||||
use err::thiserror;
|
||||
use err::ThisError;
|
||||
use netpod::log::*;
|
||||
use netpod::BinnedRange;
|
||||
use netpod::DtNano;
|
||||
use netpod::TsNano;
|
||||
use std::mem;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_init { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_cycle { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_event_next { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest_init_lst { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest_minmax { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest_event { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest_firsts { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest_finish_bin { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest_container { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest_container_2 { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_fill_until { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
|
||||
|
||||
#[cold]
|
||||
#[inline]
|
||||
#[allow(unused)]
|
||||
fn cold() {}
|
||||
|
||||
const DEBUG_CHECKS: bool = true;
|
||||
|
||||
#[derive(Debug, ThisError)]
|
||||
#[cstm(name = "BinnedEventsTimeweight")]
|
||||
pub enum Error {
|
||||
BadContainer(#[from] super::super::container_events::EventsContainerError),
|
||||
Unordered,
|
||||
EventAfterRange,
|
||||
NoLstAfterFirst,
|
||||
EmptyContainerInnerHandler,
|
||||
NoLstButMinMax,
|
||||
WithLstButEventBeforeRange,
|
||||
WithMinMaxButEventBeforeRange,
|
||||
NoMinMaxAfterInit,
|
||||
ExpectEventWithinRange,
|
||||
}
|
||||
|
||||
type MinMax<EVT> = (EventSingle<EVT>, EventSingle<EVT>);
|
||||
|
||||
#[derive(Clone)]
|
||||
struct LstRef<'a, EVT>(&'a EventSingle<EVT>);
|
||||
|
||||
struct LstMut<'a, EVT>(&'a mut EventSingle<EVT>);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct InnerB<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
cnt: u64,
|
||||
active_beg: TsNano,
|
||||
active_end: TsNano,
|
||||
active_len: DtNano,
|
||||
filled_until: TsNano,
|
||||
filled_width: DtNano,
|
||||
agg: <EVT as EventValueType>::AggregatorTimeWeight,
|
||||
}
|
||||
|
||||
impl<EVT> InnerB<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
// NOTE that this is also used during bin-cycle.
|
||||
fn ingest_event_with_lst_gt_range_beg_agg(&mut self, ev: EventSingle<EVT>, lst: LstRef<EVT>) {
|
||||
let selfname = "ingest_event_with_lst_gt_range_beg_agg";
|
||||
trace_ingest_event!("{selfname} {:?}", ev);
|
||||
if DEBUG_CHECKS {
|
||||
if ev.ts <= self.active_beg {
|
||||
panic!("should never get here");
|
||||
}
|
||||
if ev.ts >= self.active_end {
|
||||
panic!("should never get here");
|
||||
}
|
||||
}
|
||||
let dt = ev.ts.delta(self.filled_until);
|
||||
trace_ingest_event!("{selfname} dt {:?} ev {:?}", dt, ev);
|
||||
// TODO can the caller already take the value and replace it afterwards with the current value?
|
||||
// This fn could swap the value in lst and directly use it.
|
||||
// This would require that any call path does not mess with lst.
|
||||
// NOTE that this fn is also used during bin-cycle.
|
||||
self.agg.ingest(dt, self.active_len, lst.0.val.clone());
|
||||
self.filled_width = self.filled_width.add(dt);
|
||||
self.filled_until = ev.ts;
|
||||
}
|
||||
|
||||
fn ingest_event_with_lst_gt_range_beg_2(&mut self, ev: EventSingle<EVT>, lst: LstMut<EVT>) -> Result<(), Error> {
|
||||
let selfname = "ingest_event_with_lst_gt_range_beg_2";
|
||||
trace_ingest_event!("{selfname}");
|
||||
self.ingest_event_with_lst_gt_range_beg_agg(ev.clone(), LstRef(lst.0));
|
||||
InnerA::apply_lst_after_event_handled(ev, lst);
|
||||
// self.cnt += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ingest_event_with_lst_gt_range_beg(
|
||||
&mut self,
|
||||
ev: EventSingle<EVT>,
|
||||
lst: LstMut<EVT>,
|
||||
minmax: &mut MinMax<EVT>,
|
||||
) -> Result<(), Error> {
|
||||
let selfname = "ingest_event_with_lst_gt_range_beg";
|
||||
trace_ingest_event!("{selfname}");
|
||||
// TODO if the event is exactly on the current bin first edge, then there is no contribution to the avg yet
|
||||
// and I must initialize the min/max with the current event.
|
||||
InnerA::apply_min_max(&ev, minmax);
|
||||
self.ingest_event_with_lst_gt_range_beg_2(ev.clone(), lst)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ingest_event_with_lst_eq_range_beg(
|
||||
&mut self,
|
||||
ev: EventSingle<EVT>,
|
||||
lst: LstMut<EVT>,
|
||||
minmax: &mut MinMax<EVT>,
|
||||
) -> Result<(), Error> {
|
||||
let selfname = "ingest_event_with_lst_eq_range_beg";
|
||||
trace_ingest_event!("{selfname}");
|
||||
// TODO if the event is exactly on the current bin first edge, then there is no contribution to the avg yet
|
||||
// and I must initialize the min/max with the current event.
|
||||
InnerA::apply_min_max(&ev, minmax);
|
||||
InnerA::apply_lst_after_event_handled(ev, lst);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ingest_with_lst_gt_range_beg(
|
||||
&mut self,
|
||||
mut evs: ContainerEventsTakeUpTo<EVT>,
|
||||
lst: LstMut<EVT>,
|
||||
minmax: &mut MinMax<EVT>,
|
||||
) -> Result<(), Error> {
|
||||
let selfname = "ingest_with_lst_gt_range_beg";
|
||||
trace_ingest_event!("{selfname}");
|
||||
while let Some(ev) = evs.pop_front() {
|
||||
trace_event_next!("EVENT POP FRONT {:?} {:30}", ev, selfname);
|
||||
if ev.ts <= self.active_beg {
|
||||
panic!("should never get here");
|
||||
}
|
||||
if ev.ts >= self.active_end {
|
||||
panic!("should never get here");
|
||||
}
|
||||
self.ingest_event_with_lst_gt_range_beg(ev.clone(), LstMut(lst.0), minmax)?;
|
||||
self.cnt += 1;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ingest_with_lst_ge_range_beg(
|
||||
&mut self,
|
||||
mut evs: ContainerEventsTakeUpTo<EVT>,
|
||||
lst: LstMut<EVT>,
|
||||
minmax: &mut MinMax<EVT>,
|
||||
) -> Result<(), Error> {
|
||||
let selfname = "ingest_with_lst_ge_range_beg";
|
||||
trace_ingest_event!("{selfname}");
|
||||
while let Some(ev) = evs.pop_front() {
|
||||
trace_event_next!("EVENT POP FRONT {:?} {:30}", ev, selfname);
|
||||
if ev.ts < self.active_beg {
|
||||
panic!("should never get here");
|
||||
}
|
||||
if ev.ts >= self.active_end {
|
||||
panic!("should never get here");
|
||||
}
|
||||
if ev.ts == self.active_beg {
|
||||
self.ingest_event_with_lst_eq_range_beg(ev, LstMut(lst.0), minmax)?;
|
||||
self.cnt += 1;
|
||||
} else {
|
||||
self.ingest_event_with_lst_gt_range_beg(ev.clone(), LstMut(lst.0), minmax)?;
|
||||
self.cnt += 1;
|
||||
trace_ingest_event!("{selfname} now calling ingest_with_lst_gt_range_beg");
|
||||
return self.ingest_with_lst_gt_range_beg(evs, LstMut(lst.0), minmax);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ingest_with_lst_minmax(
|
||||
&mut self,
|
||||
evs: ContainerEventsTakeUpTo<EVT>,
|
||||
lst: LstMut<EVT>,
|
||||
minmax: &mut MinMax<EVT>,
|
||||
) -> Result<(), Error> {
|
||||
let selfname = "ingest_with_lst_minmax";
|
||||
trace_ingest_event!("{selfname}");
|
||||
// TODO how to handle the min max? I don't take event data yet out of the container.
|
||||
if let Some(ts0) = evs.ts_first() {
|
||||
trace_ingest_event!("EVENT POP FRONT {selfname}");
|
||||
trace_ingest_event!("EVENT TIMESTAMP FRONT {:?} {selfname}", ts0);
|
||||
if ts0 < self.active_beg {
|
||||
panic!("should never get here");
|
||||
} else {
|
||||
self.ingest_with_lst_ge_range_beg(evs, lst, minmax)
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// PRECONDITION: filled_until < ts <= active_end
|
||||
fn fill_until(&mut self, ts: TsNano, lst: LstRef<EVT>) {
|
||||
let b = self;
|
||||
assert!(b.filled_until < ts);
|
||||
assert!(ts <= b.active_end);
|
||||
let dt = ts.delta(b.filled_until);
|
||||
trace_fill_until!("fill_until ts {:?} dt {:?} lst {:?}", ts, dt, lst.0);
|
||||
assert!(b.filled_until < ts);
|
||||
assert!(ts <= b.active_end);
|
||||
b.agg.ingest(dt, b.active_len, lst.0.val.clone());
|
||||
b.filled_width = b.filled_width.add(dt);
|
||||
b.filled_until = ts;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct InnerA<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
inner_b: InnerB<EVT>,
|
||||
minmax: Option<(EventSingle<EVT>, EventSingle<EVT>)>,
|
||||
}
|
||||
|
||||
impl<EVT> InnerA<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn apply_min_max(ev: &EventSingle<EVT>, minmax: &mut MinMax<EVT>) {
|
||||
if ev.val < minmax.0.val {
|
||||
minmax.0 = ev.clone();
|
||||
}
|
||||
if ev.val > minmax.1.val {
|
||||
minmax.1 = ev.clone();
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_lst_after_event_handled(ev: EventSingle<EVT>, lst: LstMut<EVT>) {
|
||||
*lst.0 = ev;
|
||||
}
|
||||
|
||||
fn init_minmax(&mut self, ev: &EventSingle<EVT>) {
|
||||
trace_ingest_minmax!("init_minmax {:?}", ev);
|
||||
self.minmax = Some((ev.clone(), ev.clone()));
|
||||
}
|
||||
|
||||
fn init_minmax_with_lst(&mut self, ev: &EventSingle<EVT>, lst: LstRef<EVT>) {
|
||||
trace_ingest_minmax!("init_minmax_with_lst {:?} {:?}", ev, lst.0);
|
||||
let minmax = self.minmax.insert((lst.0.clone(), lst.0.clone()));
|
||||
Self::apply_min_max(ev, minmax);
|
||||
}
|
||||
|
||||
fn ingest_with_lst(&mut self, mut evs: ContainerEventsTakeUpTo<EVT>, lst: LstMut<EVT>) -> Result<(), Error> {
|
||||
let selfname = "ingest_with_lst";
|
||||
trace_ingest_container!("{selfname} evs len {}", evs.len());
|
||||
let b = &mut self.inner_b;
|
||||
if let Some(minmax) = self.minmax.as_mut() {
|
||||
b.ingest_with_lst_minmax(evs, lst, minmax)
|
||||
} else {
|
||||
if let Some(ev) = evs.pop_front() {
|
||||
trace_event_next!("EVENT POP FRONT {:?} {selfname:30}", ev);
|
||||
let beg = b.active_beg;
|
||||
let end = b.active_end;
|
||||
if ev.ts < beg {
|
||||
panic!("should never get here");
|
||||
} else if ev.ts >= end {
|
||||
panic!("should never get here");
|
||||
} else {
|
||||
if ev.ts == beg {
|
||||
self.init_minmax(&ev);
|
||||
InnerA::apply_lst_after_event_handled(ev, lst);
|
||||
let b = &mut self.inner_b;
|
||||
b.cnt += 1;
|
||||
Ok(())
|
||||
} else {
|
||||
self.init_minmax_with_lst(&ev, LstRef(lst.0));
|
||||
let b = &mut self.inner_b;
|
||||
if let Some(minmax) = self.minmax.as_mut() {
|
||||
if ev.ts == beg {
|
||||
panic!("logic error, is handled before");
|
||||
} else {
|
||||
b.ingest_event_with_lst_gt_range_beg_2(ev, LstMut(lst.0))?;
|
||||
}
|
||||
b.cnt += 1;
|
||||
b.ingest_with_lst_minmax(evs, lst, minmax)
|
||||
} else {
|
||||
Err(Error::NoMinMaxAfterInit)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn reset_01(&mut self, lst: LstRef<EVT>) {
|
||||
let selfname = "reset_01";
|
||||
let b = &mut self.inner_b;
|
||||
trace_cycle!(
|
||||
"{selfname} active_end {:?} filled_until {:?}",
|
||||
b.active_end,
|
||||
b.filled_until
|
||||
);
|
||||
let div = b.active_len.ns();
|
||||
let old_end = b.active_end;
|
||||
let ts1 = TsNano::from_ns(b.active_end.ns() / div * div);
|
||||
assert!(ts1 == old_end);
|
||||
b.active_beg = ts1;
|
||||
b.active_end = ts1.add_dt_nano(b.active_len);
|
||||
b.filled_until = ts1;
|
||||
b.filled_width = DtNano::from_ns(0);
|
||||
b.cnt = 0;
|
||||
self.minmax = Some((lst.0.clone(), lst.0.clone()));
|
||||
}
|
||||
|
||||
fn push_out_and_reset(&mut self, lst: LstRef<EVT>, range_final: bool, out: &mut ContainerBins<EVT>) {
|
||||
let selfname = "push_out_and_reset";
|
||||
// TODO there is not always good enough input to produce a meaningful bin.
|
||||
// TODO can we always reset, and what exactly does reset mean here?
|
||||
// TODO what logic can I save here? To output a bin I need to have min, max, lst.
|
||||
let b = &mut self.inner_b;
|
||||
let minmax = self.minmax.get_or_insert_with(|| {
|
||||
trace_cycle!("{selfname} minmax not yet set");
|
||||
(lst.0.clone(), lst.0.clone())
|
||||
});
|
||||
{
|
||||
let filled_width_fraction = b.filled_width.fraction_of(b.active_len);
|
||||
let res = b.agg.result_and_reset_for_new_bin(filled_width_fraction);
|
||||
out.push_back(
|
||||
b.active_beg,
|
||||
b.active_end,
|
||||
b.cnt,
|
||||
minmax.0.val.clone(),
|
||||
minmax.1.val.clone(),
|
||||
res,
|
||||
lst.0.val.clone(),
|
||||
range_final,
|
||||
);
|
||||
}
|
||||
self.reset_01(lst);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BinnedEventsTimeweight<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
lst: Option<EventSingle<EVT>>,
|
||||
range: BinnedRange<TsNano>,
|
||||
inner_a: InnerA<EVT>,
|
||||
out: ContainerBins<EVT>,
|
||||
produce_cnt_zero: bool,
|
||||
}
|
||||
|
||||
impl<EVT> fmt::Debug for BinnedEventsTimeweight<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("BinnedEventsTimeweight")
|
||||
.field("lst", &self.lst)
|
||||
.field("range", &self.range)
|
||||
.field("inner_a", &self.inner_a)
|
||||
.field("out", &self.out)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> BinnedEventsTimeweight<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
pub fn new(range: BinnedRange<TsNano>) -> Self {
|
||||
let active_beg = range.nano_beg();
|
||||
let active_end = active_beg.add_dt_nano(range.bin_len.to_dt_nano());
|
||||
let active_len = active_end.delta(active_beg);
|
||||
Self {
|
||||
range,
|
||||
inner_a: InnerA::<EVT> {
|
||||
inner_b: InnerB {
|
||||
cnt: 0,
|
||||
active_beg,
|
||||
active_end,
|
||||
active_len,
|
||||
filled_until: active_beg,
|
||||
filled_width: DtNano::from_ns(0),
|
||||
agg: <<EVT as EventValueType>::AggregatorTimeWeight as AggregatorTimeWeight<EVT>>::new(),
|
||||
},
|
||||
minmax: None,
|
||||
},
|
||||
lst: None,
|
||||
out: ContainerBins::new(),
|
||||
produce_cnt_zero: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn disable_cnt_zero(self) -> Self {
|
||||
let mut ret = self;
|
||||
ret.produce_cnt_zero = false;
|
||||
ret
|
||||
}
|
||||
|
||||
fn ingest_event_without_lst(&mut self, ev: EventSingle<EVT>) -> Result<(), Error> {
|
||||
let selfname = "ingest_event_without_lst";
|
||||
let b = &self.inner_a.inner_b;
|
||||
if ev.ts >= b.active_end {
|
||||
panic!("{selfname} should never get here");
|
||||
} else {
|
||||
trace_ingest_init_lst!("ingest_event_without_lst set lst {:?}", ev);
|
||||
self.lst = Some(ev.clone());
|
||||
if ev.ts >= b.active_beg {
|
||||
trace_ingest_minmax!("ingest_event_without_lst");
|
||||
self.inner_a.init_minmax(&ev);
|
||||
let b = &mut self.inner_a.inner_b;
|
||||
b.cnt += 1;
|
||||
b.filled_until = ev.ts;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn ingest_without_lst(&mut self, mut evs: ContainerEventsTakeUpTo<EVT>) -> Result<(), Error> {
|
||||
let selfname = "ingest_without_lst";
|
||||
if let Some(ev) = evs.pop_front() {
|
||||
trace_event_next!("EVENT POP FRONT {:?} {:30}", ev, selfname);
|
||||
if ev.ts >= self.inner_a.inner_b.active_end {
|
||||
panic!("{selfname} should never get here");
|
||||
} else {
|
||||
self.ingest_event_without_lst(ev)?;
|
||||
if let Some(lst) = self.lst.as_mut() {
|
||||
self.inner_a.ingest_with_lst(evs, LstMut(lst))
|
||||
} else {
|
||||
Err(Error::NoLstAfterFirst)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Caller asserts that evs is ordered within the current container
|
||||
// and with respect to the last container, if any.
|
||||
fn ingest_ordered(&mut self, evs: ContainerEventsTakeUpTo<EVT>) -> Result<(), Error> {
|
||||
if let Some(lst) = self.lst.as_mut() {
|
||||
self.inner_a.ingest_with_lst(evs, LstMut(lst))
|
||||
} else {
|
||||
if self.inner_a.minmax.is_some() {
|
||||
Err(Error::NoLstButMinMax)
|
||||
} else {
|
||||
self.ingest_without_lst(evs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn cycle_01(&mut self, ts: TsNano) {
|
||||
let b = &self.inner_a.inner_b;
|
||||
trace_cycle!("cycle_01 {:?} {:?}", ts, b.active_end);
|
||||
assert!(b.active_beg < ts);
|
||||
assert!(b.active_beg <= b.filled_until);
|
||||
assert!(b.filled_until < ts);
|
||||
assert!(b.filled_until <= b.active_end);
|
||||
let div = b.active_len.ns();
|
||||
if let Some(lst) = self.lst.as_ref() {
|
||||
let lst = LstRef(lst);
|
||||
if self.produce_cnt_zero {
|
||||
let mut i = 0;
|
||||
loop {
|
||||
i += 1;
|
||||
assert!(i < 100000, "too many iterations");
|
||||
let b = &self.inner_a.inner_b;
|
||||
if ts > b.filled_until {
|
||||
if ts >= b.active_end {
|
||||
if b.filled_until < b.active_end {
|
||||
self.inner_a.inner_b.fill_until(b.active_end, lst.clone());
|
||||
}
|
||||
self.inner_a.push_out_and_reset(lst.clone(), true, &mut self.out);
|
||||
} else {
|
||||
self.inner_a.inner_b.fill_until(ts, lst.clone());
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let b = &self.inner_a.inner_b;
|
||||
if ts > b.filled_until {
|
||||
if ts >= b.active_end {
|
||||
if b.filled_until < b.active_end {
|
||||
self.inner_a.inner_b.fill_until(b.active_end, lst.clone());
|
||||
}
|
||||
self.inner_a.push_out_and_reset(lst.clone(), true, &mut self.out);
|
||||
} else {
|
||||
// TODO should not hit this case. Prove it, assert it.
|
||||
self.inner_a.inner_b.fill_until(ts, lst.clone());
|
||||
}
|
||||
} else {
|
||||
// TODO should never hit this case. Count.
|
||||
}
|
||||
|
||||
// TODO jump to next bin
|
||||
// TODO merge with the other reset
|
||||
// Below uses the same code
|
||||
let ts1 = TsNano::from_ns(ts.ns() / div * div);
|
||||
let b = &mut self.inner_a.inner_b;
|
||||
b.active_beg = ts1;
|
||||
b.active_end = ts1.add_dt_nano(b.active_len);
|
||||
b.filled_until = ts1;
|
||||
b.filled_width = DtNano::from_ns(0);
|
||||
b.cnt = 0;
|
||||
b.agg.reset_for_new_bin();
|
||||
// assert!(self.inner_a.minmax.is_none());
|
||||
trace_cycle!("cycled direct to {:?} {:?}", b.active_beg, b.active_end);
|
||||
}
|
||||
} else {
|
||||
assert!(self.inner_a.minmax.is_none());
|
||||
// TODO merge with the other reset
|
||||
let ts1 = TsNano::from_ns(ts.ns() / div * div);
|
||||
let b = &mut self.inner_a.inner_b;
|
||||
b.active_beg = ts1;
|
||||
b.active_end = ts1.add_dt_nano(b.active_len);
|
||||
b.filled_until = ts1;
|
||||
b.filled_width = DtNano::from_ns(0);
|
||||
b.cnt = 0;
|
||||
b.agg.reset_for_new_bin();
|
||||
trace_cycle!("cycled direct to {:?} {:?}", b.active_beg, b.active_end);
|
||||
}
|
||||
}
|
||||
|
||||
fn cycle_02(&mut self) {
|
||||
let b = &self.inner_a.inner_b;
|
||||
trace_cycle!("cycle_02 {:?}", b.active_end);
|
||||
if let Some(lst) = self.lst.as_ref() {
|
||||
let lst = LstRef(lst);
|
||||
self.inner_a.push_out_and_reset(lst, false, &mut self.out);
|
||||
} else {
|
||||
// there is nothing we can produce
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ingest(&mut self, mut evs_all: ContainerEvents<EVT>) -> Result<(), Error> {
|
||||
// It is this type's task to find and store the one-before event.
|
||||
// We then pass it to the aggregation.
|
||||
// AggregatorTimeWeight needs a function for that.
|
||||
// What about counting the events that actually fall into the range?
|
||||
// Maybe that should be done in this type.
|
||||
// That way we can pass the values and weights to the aggregation, and count the in-range here.
|
||||
// This type must also "close" the current aggregation by passing the "last" and init the next.
|
||||
// ALSO: need to keep track of the "lst". Probably best done in this type as well?
|
||||
|
||||
// TODO should rely on external stream adapter for verification to not duplicate things.
|
||||
evs_all.verify()?;
|
||||
|
||||
loop {
|
||||
break if let Some(ts) = evs_all.ts_first() {
|
||||
trace_ingest_event!("EVENT TIMESTAMP FRONT {:?} ingest", ts);
|
||||
let b = &mut self.inner_a.inner_b;
|
||||
if ts >= self.range.nano_end() {
|
||||
return Err(Error::EventAfterRange);
|
||||
}
|
||||
if ts >= b.active_end {
|
||||
assert!(b.filled_until < b.active_end, "{} < {}", b.filled_until, b.active_end);
|
||||
self.cycle_01(ts);
|
||||
}
|
||||
let n1 = evs_all.len();
|
||||
let len_before = evs_all.len_before(self.inner_a.inner_b.active_end);
|
||||
let evs = ContainerEventsTakeUpTo::new(&mut evs_all, len_before);
|
||||
if let Some(lst) = self.lst.as_ref() {
|
||||
if ts < lst.ts {
|
||||
return Err(Error::Unordered);
|
||||
} else {
|
||||
self.ingest_ordered(evs)?
|
||||
}
|
||||
} else {
|
||||
self.ingest_ordered(evs)?
|
||||
};
|
||||
trace_ingest_container_2!("ingest after still left len evs {}", evs_all.len());
|
||||
let n2 = evs_all.len();
|
||||
if n2 != 0 {
|
||||
if n2 == n1 {
|
||||
panic!("no progress");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
()
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn input_done_range_final(&mut self) -> Result<(), Error> {
|
||||
trace_cycle!("input_done_range_final");
|
||||
self.cycle_01(self.range.nano_end());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn input_done_range_open(&mut self) -> Result<(), Error> {
|
||||
trace_cycle!("input_done_range_open");
|
||||
self.cycle_02();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn output_len(&self) -> usize {
|
||||
self.out.len()
|
||||
}
|
||||
|
||||
pub fn output(&mut self) -> ContainerBins<EVT> {
|
||||
mem::replace(&mut self.out, ContainerBins::new())
|
||||
}
|
||||
}
|
||||
276
src/binning/timeweight/timeweight_events_dyn.rs
Normal file
276
src/binning/timeweight/timeweight_events_dyn.rs
Normal file
@@ -0,0 +1,276 @@
|
||||
use super::timeweight_events::BinnedEventsTimeweight;
|
||||
use crate::binning::container_events::ContainerEvents;
|
||||
use crate::binning::container_events::EventValueType;
|
||||
use crate::channelevents::ChannelEvents;
|
||||
use daqbuf_err as err;
|
||||
use err::thiserror;
|
||||
use err::ThisError;
|
||||
use futures_util::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use items_0::streamitem::LogItem;
|
||||
use items_0::streamitem::Sitemty;
|
||||
use items_0::timebin::BinnedEventsTimeweightTrait;
|
||||
use items_0::timebin::BinningggContainerBinsDyn;
|
||||
use items_0::timebin::BinningggError;
|
||||
use items_0::timebin::BinsBoxed;
|
||||
use items_0::timebin::EventsBoxed;
|
||||
use netpod::log::*;
|
||||
use netpod::BinnedRange;
|
||||
use netpod::TsNano;
|
||||
use std::ops::ControlFlow;
|
||||
use std::pin::Pin;
|
||||
use std::task::Context;
|
||||
use std::task::Poll;
|
||||
|
||||
macro_rules! trace_input_container { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
|
||||
|
||||
macro_rules! trace_emit { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
|
||||
|
||||
#[derive(Debug, ThisError)]
|
||||
#[cstm(name = "BinnedEventsTimeweightDyn")]
|
||||
pub enum Error {
|
||||
InnerDynMissing,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BinnedEventsTimeweightDynbox<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
binner: BinnedEventsTimeweight<EVT>,
|
||||
}
|
||||
|
||||
impl<EVT> BinnedEventsTimeweightDynbox<EVT>
|
||||
where
|
||||
EVT: EventValueType + 'static,
|
||||
{
|
||||
pub fn new(range: BinnedRange<TsNano>) -> Box<dyn BinnedEventsTimeweightTrait> {
|
||||
let ret = Self {
|
||||
binner: BinnedEventsTimeweight::new(range),
|
||||
};
|
||||
Box::new(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<EVT> BinnedEventsTimeweightTrait for BinnedEventsTimeweightDynbox<EVT>
|
||||
where
|
||||
EVT: EventValueType,
|
||||
{
|
||||
fn ingest(&mut self, mut evs: EventsBoxed) -> Result<(), BinningggError> {
|
||||
// let a = (&evs as &dyn any::Any).downcast_ref::<String>();
|
||||
// evs.downcast::<String>();
|
||||
// evs.as_anybox().downcast::<ContainerEvents<f64>>();
|
||||
match evs.to_anybox().downcast::<ContainerEvents<EVT>>() {
|
||||
Ok(evs) => {
|
||||
let evs = {
|
||||
let a = evs;
|
||||
*a
|
||||
};
|
||||
Ok(self.binner.ingest(evs)?)
|
||||
}
|
||||
Err(_) => Err(BinningggError::TypeMismatch {
|
||||
have: evs.type_name().into(),
|
||||
expect: std::any::type_name::<ContainerEvents<EVT>>().into(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn input_done_range_final(&mut self) -> Result<(), BinningggError> {
|
||||
Ok(self.binner.input_done_range_final()?)
|
||||
}
|
||||
|
||||
fn input_done_range_open(&mut self) -> Result<(), BinningggError> {
|
||||
Ok(self.binner.input_done_range_open()?)
|
||||
}
|
||||
|
||||
fn output(&mut self) -> Result<Option<BinsBoxed>, BinningggError> {
|
||||
if self.binner.output_len() == 0 {
|
||||
Ok(None)
|
||||
} else {
|
||||
let c = self.binner.output();
|
||||
Ok(Some(Box::new(c)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BinnedEventsTimeweightLazy {
|
||||
range: BinnedRange<TsNano>,
|
||||
binned_events: Option<Box<dyn BinnedEventsTimeweightTrait>>,
|
||||
}
|
||||
|
||||
impl BinnedEventsTimeweightLazy {
|
||||
pub fn new(range: BinnedRange<TsNano>) -> Self {
|
||||
Self {
|
||||
range,
|
||||
binned_events: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BinnedEventsTimeweightTrait for BinnedEventsTimeweightLazy {
|
||||
fn ingest(&mut self, evs_all: EventsBoxed) -> Result<(), BinningggError> {
|
||||
self.binned_events
|
||||
.get_or_insert_with(|| evs_all.binned_events_timeweight_traitobj(self.range.clone()))
|
||||
.ingest(evs_all)
|
||||
}
|
||||
|
||||
fn input_done_range_final(&mut self) -> Result<(), BinningggError> {
|
||||
self.binned_events
|
||||
.as_mut()
|
||||
.map(|x| x.input_done_range_final())
|
||||
.unwrap_or_else(|| {
|
||||
debug!("TODO something to do if we miss the binner here?");
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn input_done_range_open(&mut self) -> Result<(), BinningggError> {
|
||||
self.binned_events
|
||||
.as_mut()
|
||||
.map(|x| x.input_done_range_open())
|
||||
.unwrap_or(Ok(()))
|
||||
}
|
||||
|
||||
fn output(&mut self) -> Result<Option<BinsBoxed>, BinningggError> {
|
||||
self.binned_events.as_mut().map(|x| x.output()).unwrap_or(Ok(None))
|
||||
}
|
||||
}
|
||||
|
||||
enum StreamState {
|
||||
Reading,
|
||||
Done,
|
||||
Invalid,
|
||||
}
|
||||
|
||||
pub struct BinnedEventsTimeweightStream {
|
||||
state: StreamState,
|
||||
inp: Pin<Box<dyn Stream<Item = Sitemty<ChannelEvents>> + Send>>,
|
||||
binned_events: BinnedEventsTimeweightLazy,
|
||||
range_complete: bool,
|
||||
}
|
||||
|
||||
impl BinnedEventsTimeweightStream {
|
||||
pub fn new(range: BinnedRange<TsNano>, inp: Pin<Box<dyn Stream<Item = Sitemty<ChannelEvents>> + Send>>) -> Self {
|
||||
Self {
|
||||
state: StreamState::Reading,
|
||||
inp,
|
||||
binned_events: BinnedEventsTimeweightLazy::new(range),
|
||||
range_complete: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_sitemty(
|
||||
mut self: Pin<&mut Self>,
|
||||
item: Sitemty<ChannelEvents>,
|
||||
_cx: &mut Context,
|
||||
) -> ControlFlow<Poll<Option<<Self as Stream>::Item>>> {
|
||||
use items_0::streamitem::RangeCompletableItem::*;
|
||||
use items_0::streamitem::StreamItem::*;
|
||||
use ControlFlow::*;
|
||||
use Poll::*;
|
||||
match item {
|
||||
Ok(x) => match x {
|
||||
DataItem(x) => match x {
|
||||
Data(x) => match x {
|
||||
ChannelEvents::Events(evs) => match self.binned_events.ingest(evs.to_container_events()) {
|
||||
Ok(()) => {
|
||||
match self.binned_events.output() {
|
||||
Ok(Some(x)) => {
|
||||
if x.len() == 0 {
|
||||
Continue(())
|
||||
} else {
|
||||
Break(Ready(Some(Ok(DataItem(Data(x))))))
|
||||
}
|
||||
}
|
||||
Ok(None) => Continue(()),
|
||||
Err(e) => Break(Ready(Some(Err(err::Error::from_string(e))))),
|
||||
}
|
||||
// Continue(())
|
||||
}
|
||||
Err(e) => Break(Ready(Some(Err(err::Error::from_string(e))))),
|
||||
},
|
||||
ChannelEvents::Status(_) => {
|
||||
// TODO use the status
|
||||
Continue(())
|
||||
}
|
||||
},
|
||||
RangeComplete => {
|
||||
self.range_complete = true;
|
||||
Continue(())
|
||||
}
|
||||
},
|
||||
Log(x) => Break(Ready(Some(Ok(Log(x))))),
|
||||
Stats(x) => Break(Ready(Some(Ok(Stats(x))))),
|
||||
},
|
||||
Err(e) => {
|
||||
self.state = StreamState::Done;
|
||||
Break(Ready(Some(Err(e))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_eos(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<<Self as Stream>::Item>> {
|
||||
trace_input_container!("handle_eos");
|
||||
use items_0::streamitem::RangeCompletableItem::*;
|
||||
use items_0::streamitem::StreamItem::*;
|
||||
use Poll::*;
|
||||
self.state = StreamState::Done;
|
||||
if self.range_complete {
|
||||
self.binned_events
|
||||
.input_done_range_final()
|
||||
.map_err(err::Error::from_string)?;
|
||||
} else {
|
||||
self.binned_events
|
||||
.input_done_range_open()
|
||||
.map_err(err::Error::from_string)?;
|
||||
}
|
||||
match self.binned_events.output().map_err(err::Error::from_string)? {
|
||||
Some(x) => {
|
||||
trace_emit!("seeing ready bins {:?}", x);
|
||||
Ready(Some(Ok(DataItem(Data(x)))))
|
||||
}
|
||||
None => {
|
||||
let item = LogItem::from_node(888, Level::INFO, format!("no bins ready on eos"));
|
||||
Ready(Some(Ok(Log(item))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_main(mut self: Pin<&mut Self>, cx: &mut Context) -> ControlFlow<Poll<Option<<Self as Stream>::Item>>> {
|
||||
use ControlFlow::*;
|
||||
use Poll::*;
|
||||
let ret = match &self.state {
|
||||
StreamState::Reading => match self.as_mut().inp.poll_next_unpin(cx) {
|
||||
Ready(Some(x)) => self.as_mut().handle_sitemty(x, cx),
|
||||
Ready(None) => Break(self.as_mut().handle_eos(cx)),
|
||||
Pending => Break(Pending),
|
||||
},
|
||||
StreamState::Done => {
|
||||
self.state = StreamState::Invalid;
|
||||
Break(Ready(None))
|
||||
}
|
||||
StreamState::Invalid => {
|
||||
panic!("StreamState::Invalid")
|
||||
}
|
||||
};
|
||||
if let Break(Ready(Some(Err(_)))) = ret {
|
||||
self.state = StreamState::Done;
|
||||
}
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for BinnedEventsTimeweightStream {
|
||||
type Item = Sitemty<Box<dyn BinningggContainerBinsDyn>>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use ControlFlow::*;
|
||||
loop {
|
||||
break match self.as_mut().handle_main(cx) {
|
||||
Break(x) => x,
|
||||
Continue(()) => continue,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
85
src/binning/valuetype.rs
Normal file
85
src/binning/valuetype.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
use super::aggregator::AggregatorTimeWeight;
|
||||
use super::container_events::Container;
|
||||
use super::container_events::EventValueType;
|
||||
use core::fmt;
|
||||
use items_0::vecpreview::PreviewRange;
|
||||
use netpod::DtNano;
|
||||
use netpod::EnumVariant;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EnumVariantContainer {
|
||||
ixs: VecDeque<u16>,
|
||||
names: VecDeque<String>,
|
||||
}
|
||||
|
||||
impl PreviewRange for EnumVariantContainer {
|
||||
fn preview<'a>(&'a self) -> Box<dyn fmt::Debug + 'a> {
|
||||
let ret = items_0::vecpreview::PreviewCell {
|
||||
a: self.ixs.front(),
|
||||
b: self.ixs.back(),
|
||||
};
|
||||
Box::new(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl Container<EnumVariant> for EnumVariantContainer {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
ixs: VecDeque::new(),
|
||||
names: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn push_back(&mut self, val: EnumVariant) {
|
||||
let (ix, name) = val.into_parts();
|
||||
self.ixs.push_back(ix);
|
||||
self.names.push_back(name);
|
||||
}
|
||||
|
||||
fn pop_front(&mut self) -> Option<EnumVariant> {
|
||||
if let (Some(a), Some(b)) = (self.ixs.pop_front(), self.names.pop_front()) {
|
||||
Some(EnumVariant::new(a, b))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EnumVariantAggregatorTimeWeight {
|
||||
sum: f32,
|
||||
}
|
||||
|
||||
impl AggregatorTimeWeight<EnumVariant> for EnumVariantAggregatorTimeWeight {
|
||||
fn new() -> Self {
|
||||
Self { sum: 0. }
|
||||
}
|
||||
|
||||
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: EnumVariant) {
|
||||
let f = dt.ns() as f32 / bl.ns() as f32;
|
||||
eprintln!("INGEST ENUM {} {:?}", f, val);
|
||||
self.sum += f * val.ix() as f32;
|
||||
}
|
||||
|
||||
fn reset_for_new_bin(&mut self) {
|
||||
self.sum = 0.;
|
||||
}
|
||||
|
||||
fn result_and_reset_for_new_bin(
|
||||
&mut self,
|
||||
filled_width_fraction: f32,
|
||||
) -> <EnumVariant as EventValueType>::AggTimeWeightOutputAvg {
|
||||
let ret = self.sum.clone();
|
||||
self.sum = 0.;
|
||||
ret / filled_width_fraction
|
||||
}
|
||||
}
|
||||
|
||||
impl EventValueType for EnumVariant {
|
||||
type Container = EnumVariantContainer;
|
||||
type AggregatorTimeWeight = EnumVariantAggregatorTimeWeight;
|
||||
type AggTimeWeightOutputAvg = f32;
|
||||
}
|
||||
905
src/binsdim0.rs
Normal file
905
src/binsdim0.rs
Normal file
@@ -0,0 +1,905 @@
|
||||
use crate::ts_offs_from_abs;
|
||||
use crate::ts_offs_from_abs_with_anchor;
|
||||
use crate::IsoDateTime;
|
||||
use daqbuf_err as err;
|
||||
use err::Error;
|
||||
use items_0::collect_s::CollectableDyn;
|
||||
use items_0::collect_s::CollectableType;
|
||||
use items_0::collect_s::CollectedDyn;
|
||||
use items_0::collect_s::CollectorTy;
|
||||
use items_0::collect_s::ToJsonResult;
|
||||
use items_0::container::ByteEstimate;
|
||||
use items_0::overlap::HasTimestampDeque;
|
||||
use items_0::scalar_ops::AsPrimF32;
|
||||
use items_0::scalar_ops::ScalarOps;
|
||||
use items_0::timebin::TimeBinnableTy;
|
||||
use items_0::timebin::TimeBinnerTy;
|
||||
use items_0::timebin::TimeBins;
|
||||
use items_0::vecpreview::VecPreview;
|
||||
use items_0::AppendAllFrom;
|
||||
use items_0::AppendEmptyBin;
|
||||
use items_0::AsAnyMut;
|
||||
use items_0::AsAnyRef;
|
||||
use items_0::Empty;
|
||||
use items_0::HasNonemptyFirstBin;
|
||||
use items_0::Resettable;
|
||||
use items_0::TypeName;
|
||||
use items_0::WithLen;
|
||||
use netpod::is_false;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::SeriesRange;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::BinnedRange;
|
||||
use netpod::BinnedRangeEnum;
|
||||
use netpod::CmpZero;
|
||||
use netpod::Dim0Kind;
|
||||
use netpod::TsNano;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::any;
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ops::Range;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
|
||||
|
||||
// TODO make members private
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BinsDim0<NTY> {
|
||||
pub ts1s: VecDeque<u64>,
|
||||
pub ts2s: VecDeque<u64>,
|
||||
pub cnts: VecDeque<u64>,
|
||||
pub mins: VecDeque<NTY>,
|
||||
pub maxs: VecDeque<NTY>,
|
||||
pub avgs: VecDeque<f32>,
|
||||
pub lsts: VecDeque<NTY>,
|
||||
pub dim0kind: Option<Dim0Kind>,
|
||||
}
|
||||
|
||||
impl<STY> TypeName for BinsDim0<STY> {
|
||||
fn type_name(&self) -> String {
|
||||
any::type_name::<Self>().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> fmt::Debug for BinsDim0<NTY>
|
||||
where
|
||||
NTY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
let self_name = any::type_name::<Self>();
|
||||
if true {
|
||||
return fmt::Display::fmt(self, fmt);
|
||||
}
|
||||
if true {
|
||||
write!(
|
||||
fmt,
|
||||
"{self_name} count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}",
|
||||
self.ts1s.len(),
|
||||
self.ts1s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
|
||||
self.ts2s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
|
||||
self.cnts,
|
||||
self.mins,
|
||||
self.maxs,
|
||||
self.avgs,
|
||||
)
|
||||
} else {
|
||||
write!(
|
||||
fmt,
|
||||
"{self_name} count {} edges {:?} .. {:?} counts {:?} .. {:?} avgs {:?} .. {:?}",
|
||||
self.ts1s.len(),
|
||||
self.ts1s.front().map(|k| k / SEC),
|
||||
self.ts2s.back().map(|k| k / SEC),
|
||||
self.cnts.front(),
|
||||
self.cnts.back(),
|
||||
self.avgs.front(),
|
||||
self.avgs.back(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> fmt::Display for BinsDim0<NTY>
|
||||
where
|
||||
NTY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
let self_name = any::type_name::<Self>();
|
||||
write!(
|
||||
fmt,
|
||||
"{self_name} {{ len: {:?}, ts1s: {:?}, ts2s {:?}, counts {:?}, mins {:?}, maxs {:?}, avgs {:?}, lsts {:?} }}",
|
||||
self.len(),
|
||||
VecPreview::new(&self.ts1s),
|
||||
VecPreview::new(&self.ts2s),
|
||||
VecPreview::new(&self.cnts),
|
||||
VecPreview::new(&self.mins),
|
||||
VecPreview::new(&self.maxs),
|
||||
VecPreview::new(&self.avgs),
|
||||
VecPreview::new(&self.lsts),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> BinsDim0<NTY> {
|
||||
pub fn push(&mut self, ts1: u64, ts2: u64, count: u64, min: NTY, max: NTY, avg: f32, lst: NTY) {
|
||||
if avg < min.as_prim_f32_b() || avg > max.as_prim_f32_b() {
|
||||
// TODO rounding issues?
|
||||
debug!("bad avg");
|
||||
}
|
||||
self.ts1s.push_back(ts1);
|
||||
self.ts2s.push_back(ts2);
|
||||
self.cnts.push_back(count);
|
||||
self.mins.push_back(min);
|
||||
self.maxs.push_back(max);
|
||||
self.avgs.push_back(avg);
|
||||
self.lsts.push_back(lst);
|
||||
}
|
||||
|
||||
pub fn equal_slack(&self, other: &Self) -> bool {
|
||||
if self.len() != other.len() {
|
||||
return false;
|
||||
}
|
||||
for (&a, &b) in self.ts1s.iter().zip(other.ts1s.iter()) {
|
||||
if a != b {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
for (&a, &b) in self.ts2s.iter().zip(other.ts2s.iter()) {
|
||||
if a != b {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
for (a, b) in self.mins.iter().zip(other.mins.iter()) {
|
||||
if !a.equal_slack(b) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
for (a, b) in self.maxs.iter().zip(other.maxs.iter()) {
|
||||
if !a.equal_slack(b) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
for (a, b) in self.avgs.iter().zip(other.avgs.iter()) {
|
||||
if !a.equal_slack(b) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
// TODO make this part of a new bins trait, similar like Events trait.
|
||||
// TODO check for error?
|
||||
pub fn drain_into(&mut self, dst: &mut Self, range: Range<usize>) -> () {
|
||||
dst.ts1s.extend(self.ts1s.drain(range.clone()));
|
||||
dst.ts2s.extend(self.ts2s.drain(range.clone()));
|
||||
dst.cnts.extend(self.cnts.drain(range.clone()));
|
||||
dst.mins.extend(self.mins.drain(range.clone()));
|
||||
dst.maxs.extend(self.maxs.drain(range.clone()));
|
||||
dst.avgs.extend(self.avgs.drain(range.clone()));
|
||||
dst.lsts.extend(self.lsts.drain(range.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> AsAnyRef for BinsDim0<NTY>
|
||||
where
|
||||
NTY: ScalarOps,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> AsAnyMut for BinsDim0<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> Empty for BinsDim0<STY> {
|
||||
fn empty() -> Self {
|
||||
Self {
|
||||
ts1s: VecDeque::new(),
|
||||
ts2s: VecDeque::new(),
|
||||
cnts: VecDeque::new(),
|
||||
mins: VecDeque::new(),
|
||||
maxs: VecDeque::new(),
|
||||
avgs: VecDeque::new(),
|
||||
lsts: VecDeque::new(),
|
||||
dim0kind: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> WithLen for BinsDim0<STY> {
|
||||
fn len(&self) -> usize {
|
||||
self.ts1s.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> ByteEstimate for BinsDim0<STY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
// TODO
|
||||
// Should use a better estimate for waveform and string types,
|
||||
// or keep some aggregated byte count on push.
|
||||
let n = self.len();
|
||||
if n == 0 {
|
||||
0
|
||||
} else {
|
||||
// TODO use the actual size of one/some of the elements.
|
||||
let i = n * 2 / 3;
|
||||
let w1 = self.mins[i].byte_estimate();
|
||||
let w2 = self.maxs[i].byte_estimate();
|
||||
(n as u64 * (8 + 8 + 8 + 4 + w1 + w2)) as u64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> Resettable for BinsDim0<STY> {
|
||||
fn reset(&mut self) {
|
||||
self.ts1s.clear();
|
||||
self.ts2s.clear();
|
||||
self.cnts.clear();
|
||||
self.mins.clear();
|
||||
self.maxs.clear();
|
||||
self.avgs.clear();
|
||||
self.lsts.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> HasNonemptyFirstBin for BinsDim0<STY> {
|
||||
fn has_nonempty_first_bin(&self) -> bool {
|
||||
self.cnts.front().map_or(false, |x| *x > 0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> HasTimestampDeque for BinsDim0<STY> {
|
||||
fn timestamp_min(&self) -> Option<u64> {
|
||||
self.ts1s.front().map(|x| *x)
|
||||
}
|
||||
|
||||
fn timestamp_max(&self) -> Option<u64> {
|
||||
self.ts2s.back().map(|x| *x)
|
||||
}
|
||||
|
||||
fn pulse_min(&self) -> Option<u64> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn pulse_max(&self) -> Option<u64> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> AppendEmptyBin for BinsDim0<NTY> {
|
||||
fn append_empty_bin(&mut self, ts1: u64, ts2: u64) {
|
||||
debug!("AppendEmptyBin::append_empty_bin should not get used");
|
||||
self.ts1s.push_back(ts1);
|
||||
self.ts2s.push_back(ts2);
|
||||
self.cnts.push_back(0);
|
||||
self.mins.push_back(NTY::zero_b());
|
||||
self.maxs.push_back(NTY::zero_b());
|
||||
self.avgs.push_back(0.);
|
||||
self.lsts.push_back(NTY::zero_b());
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> AppendAllFrom for BinsDim0<NTY> {
|
||||
fn append_all_from(&mut self, src: &mut Self) {
|
||||
debug!("AppendAllFrom::append_all_from should not get used");
|
||||
self.ts1s.extend(src.ts1s.drain(..));
|
||||
self.ts2s.extend(src.ts2s.drain(..));
|
||||
self.cnts.extend(src.cnts.drain(..));
|
||||
self.mins.extend(src.mins.drain(..));
|
||||
self.maxs.extend(src.maxs.drain(..));
|
||||
self.avgs.extend(src.avgs.drain(..));
|
||||
self.lsts.extend(src.lsts.drain(..));
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> TimeBins for BinsDim0<NTY> {
|
||||
fn ts_min(&self) -> Option<u64> {
|
||||
self.ts1s.front().map(Clone::clone)
|
||||
}
|
||||
|
||||
fn ts_max(&self) -> Option<u64> {
|
||||
self.ts2s.back().map(Clone::clone)
|
||||
}
|
||||
|
||||
fn ts_min_max(&self) -> Option<(u64, u64)> {
|
||||
if let (Some(min), Some(max)) = (self.ts1s.front().map(Clone::clone), self.ts2s.back().map(Clone::clone)) {
|
||||
Some((min, max))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BinsDim0TimeBinnerTy<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
ts1now: TsNano,
|
||||
ts2now: TsNano,
|
||||
binrange: BinnedRange<TsNano>,
|
||||
do_time_weight: bool,
|
||||
emit_empty_bins: bool,
|
||||
range_complete: bool,
|
||||
out: <Self as TimeBinnerTy>::Output,
|
||||
cnt: u64,
|
||||
min: STY,
|
||||
max: STY,
|
||||
avg: f64,
|
||||
lst: STY,
|
||||
filled_up_to: TsNano,
|
||||
last_seen_avg: f32,
|
||||
}
|
||||
|
||||
impl<STY> BinsDim0TimeBinnerTy<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
pub fn type_name() -> &'static str {
|
||||
any::type_name::<Self>()
|
||||
}
|
||||
|
||||
pub fn new(binrange: BinnedRange<TsNano>, do_time_weight: bool, emit_empty_bins: bool) -> Self {
|
||||
// let ts1now = TsNano::from_ns(binrange.bin_off * binrange.bin_len.ns());
|
||||
// let ts2 = ts1.add_dt_nano(binrange.bin_len.to_dt_nano());
|
||||
let ts1now = TsNano::from_ns(binrange.nano_beg().ns());
|
||||
let ts2now = ts1now.add_dt_nano(binrange.bin_len.to_dt_nano());
|
||||
Self {
|
||||
ts1now,
|
||||
ts2now,
|
||||
binrange,
|
||||
do_time_weight,
|
||||
emit_empty_bins,
|
||||
range_complete: false,
|
||||
out: <Self as TimeBinnerTy>::Output::empty(),
|
||||
cnt: 0,
|
||||
min: STY::zero_b(),
|
||||
max: STY::zero_b(),
|
||||
avg: 0.,
|
||||
lst: STY::zero_b(),
|
||||
filled_up_to: ts1now,
|
||||
last_seen_avg: 0.,
|
||||
}
|
||||
}
|
||||
|
||||
// used internally for the aggregation
|
||||
fn reset_agg(&mut self) {
|
||||
self.cnt = 0;
|
||||
self.min = STY::zero_b();
|
||||
self.max = STY::zero_b();
|
||||
self.avg = 0.;
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> TimeBinnerTy for BinsDim0TimeBinnerTy<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
type Input = BinsDim0<STY>;
|
||||
type Output = BinsDim0<STY>;
|
||||
|
||||
fn ingest(&mut self, item: &mut Self::Input) {
|
||||
trace_ingest!("<{} as TimeBinnerTy>::ingest {:?}", Self::type_name(), item);
|
||||
let mut count_before = 0;
|
||||
for ((((((&ts1, &ts2), &cnt), min), max), &avg), lst) in item
|
||||
.ts1s
|
||||
.iter()
|
||||
.zip(&item.ts2s)
|
||||
.zip(&item.cnts)
|
||||
.zip(&item.mins)
|
||||
.zip(&item.maxs)
|
||||
.zip(&item.avgs)
|
||||
.zip(&item.lsts)
|
||||
{
|
||||
if ts1 < self.ts1now.ns() {
|
||||
if ts2 > self.ts1now.ns() {
|
||||
error!("{} bad input grid mismatch", Self::type_name());
|
||||
continue;
|
||||
}
|
||||
// warn!("encountered bin from time before {} {}", ts1, self.ts1now.ns());
|
||||
trace_ingest!("{} input bin before {}", Self::type_name(), TsNano::from_ns(ts1));
|
||||
self.min = min.clone();
|
||||
self.max = max.clone();
|
||||
self.lst = lst.clone();
|
||||
count_before += 1;
|
||||
continue;
|
||||
} else {
|
||||
if ts2 > self.ts2now.ns() {
|
||||
if ts2 - ts1 > self.ts2now.ns() - self.ts1now.ns() {
|
||||
panic!("incoming bin len too large");
|
||||
} else if ts1 < self.ts2now.ns() {
|
||||
panic!("encountered unaligned input bin");
|
||||
} else {
|
||||
let mut i = 0;
|
||||
while ts1 >= self.ts2now.ns() {
|
||||
self.cycle();
|
||||
i += 1;
|
||||
if i > 50000 {
|
||||
panic!("cycle forward too many iterations");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// ok, we're still inside the current bin
|
||||
}
|
||||
}
|
||||
if cnt == 0 {
|
||||
// ignore input bin, it does not contain any valid information.
|
||||
} else {
|
||||
if self.cnt == 0 {
|
||||
self.cnt = cnt;
|
||||
self.min = min.clone();
|
||||
self.max = max.clone();
|
||||
if self.do_time_weight {
|
||||
let f = (ts2 - ts1) as f64 / (self.ts2now.ns() - self.ts1now.ns()) as f64;
|
||||
self.avg = avg as f64 * f;
|
||||
} else {
|
||||
panic!("TODO non-time-weighted binning to be impl");
|
||||
}
|
||||
} else {
|
||||
self.cnt += cnt;
|
||||
if *min < self.min {
|
||||
self.min = min.clone();
|
||||
}
|
||||
if *max > self.max {
|
||||
self.max = max.clone();
|
||||
}
|
||||
if self.do_time_weight {
|
||||
let f = (ts2 - ts1) as f64 / (self.ts2now.ns() - self.ts1now.ns()) as f64;
|
||||
self.avg += avg as f64 * f;
|
||||
} else {
|
||||
panic!("TODO non-time-weighted binning to be impl");
|
||||
}
|
||||
}
|
||||
self.filled_up_to = TsNano::from_ns(ts2);
|
||||
self.last_seen_avg = avg;
|
||||
}
|
||||
}
|
||||
if count_before != 0 {
|
||||
warn!(
|
||||
"----- seen {} / {} input bins from time before",
|
||||
count_before,
|
||||
item.len()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_complete = true;
|
||||
}
|
||||
|
||||
fn bins_ready_count(&self) -> usize {
|
||||
self.out.len()
|
||||
}
|
||||
|
||||
fn bins_ready(&mut self) -> Option<Self::Output> {
|
||||
if self.out.len() != 0 {
|
||||
let ret = core::mem::replace(&mut self.out, BinsDim0::empty());
|
||||
Some(ret)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn push_in_progress(&mut self, push_empty: bool) {
|
||||
if self.filled_up_to != self.ts2now {
|
||||
if self.cnt != 0 {
|
||||
info!("push_in_progress partially filled bin");
|
||||
if self.do_time_weight {
|
||||
let f = (self.ts2now.ns() - self.filled_up_to.ns()) as f64
|
||||
/ (self.ts2now.ns() - self.ts1now.ns()) as f64;
|
||||
self.avg += self.lst.as_prim_f32_b() as f64 * f;
|
||||
self.filled_up_to = self.ts2now;
|
||||
} else {
|
||||
panic!("TODO non-time-weighted binning to be impl");
|
||||
}
|
||||
} else {
|
||||
if self.filled_up_to != self.ts1now {
|
||||
error!("partially filled bin with cnt 0");
|
||||
}
|
||||
}
|
||||
}
|
||||
if self.cnt == 0 && !push_empty {
|
||||
self.reset_agg();
|
||||
} else {
|
||||
let min = self.min.clone();
|
||||
let max = self.max.clone();
|
||||
let avg = self.avg as f32;
|
||||
if avg < min.as_prim_f32_b() || avg > max.as_prim_f32_b() {
|
||||
// TODO rounding issues?
|
||||
debug!("bad avg");
|
||||
}
|
||||
self.out.ts1s.push_back(self.ts1now.ns());
|
||||
self.out.ts2s.push_back(self.ts2now.ns());
|
||||
self.out.cnts.push_back(self.cnt);
|
||||
self.out.mins.push_back(min);
|
||||
self.out.maxs.push_back(max);
|
||||
self.out.avgs.push_back(avg);
|
||||
self.out.lsts.push_back(self.lst.clone());
|
||||
self.reset_agg();
|
||||
}
|
||||
}
|
||||
|
||||
fn cycle(&mut self) {
|
||||
self.push_in_progress(true);
|
||||
self.ts1now = self.ts1now.add_dt_nano(self.binrange.bin_len.to_dt_nano());
|
||||
self.ts2now = self.ts2now.add_dt_nano(self.binrange.bin_len.to_dt_nano());
|
||||
}
|
||||
|
||||
fn empty(&self) -> Option<Self::Output> {
|
||||
Some(<Self as TimeBinnerTy>::Output::empty())
|
||||
}
|
||||
|
||||
fn append_empty_until_end(&mut self) {
|
||||
let mut i = 0;
|
||||
while self.ts2now.ns() < self.binrange.full_range().end() {
|
||||
self.cycle();
|
||||
i += 1;
|
||||
if i > 100000 {
|
||||
panic!("append_empty_until_end too many iterations");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> TimeBinnableTy for BinsDim0<STY> {
|
||||
type TimeBinner = BinsDim0TimeBinnerTy<STY>;
|
||||
|
||||
fn time_binner_new(
|
||||
&self,
|
||||
binrange: BinnedRangeEnum,
|
||||
do_time_weight: bool,
|
||||
emit_empty_bins: bool,
|
||||
) -> Self::TimeBinner {
|
||||
match binrange {
|
||||
BinnedRangeEnum::Time(binrange) => BinsDim0TimeBinnerTy::new(binrange, do_time_weight, emit_empty_bins),
|
||||
BinnedRangeEnum::Pulse(_) => todo!("TimeBinnableTy for BinsDim0 Pulse"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO rename to BinsDim0CollectorOutput
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BinsDim0CollectedResult<NTY> {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "ts1Ms")]
|
||||
ts1_off_ms: VecDeque<u64>,
|
||||
#[serde(rename = "ts2Ms")]
|
||||
ts2_off_ms: VecDeque<u64>,
|
||||
#[serde(rename = "ts1Ns")]
|
||||
ts1_off_ns: VecDeque<u64>,
|
||||
#[serde(rename = "ts2Ns")]
|
||||
ts2_off_ns: VecDeque<u64>,
|
||||
#[serde(rename = "counts")]
|
||||
counts: VecDeque<u64>,
|
||||
#[serde(rename = "mins")]
|
||||
mins: VecDeque<NTY>,
|
||||
#[serde(rename = "maxs")]
|
||||
maxs: VecDeque<NTY>,
|
||||
#[serde(rename = "avgs")]
|
||||
avgs: VecDeque<f32>,
|
||||
#[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
|
||||
range_final: bool,
|
||||
#[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
|
||||
timed_out: bool,
|
||||
#[serde(rename = "missingBins", default, skip_serializing_if = "CmpZero::is_zero")]
|
||||
missing_bins: u32,
|
||||
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
|
||||
continue_at: Option<IsoDateTime>,
|
||||
#[serde(rename = "finishedAt", default, skip_serializing_if = "Option::is_none")]
|
||||
finished_at: Option<IsoDateTime>,
|
||||
}
|
||||
|
||||
// TODO temporary fix for the enum output
|
||||
impl<STY> BinsDim0CollectedResult<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
pub fn boxed_collected_with_enum_fix(&self) -> Box<dyn CollectedDyn> {
|
||||
if let Some(bins) = self
|
||||
.as_any_ref()
|
||||
.downcast_ref::<BinsDim0CollectedResult<netpod::EnumVariant>>()
|
||||
{
|
||||
debug!("boxed_collected_with_enum_fix");
|
||||
let mins = self.mins.iter().map(|x| 6).collect();
|
||||
let maxs = self.mins.iter().map(|x| 7).collect();
|
||||
let bins = BinsDim0CollectedResult::<u16> {
|
||||
ts_anchor_sec: self.ts_anchor_sec.clone(),
|
||||
ts1_off_ms: self.ts1_off_ms.clone(),
|
||||
ts2_off_ms: self.ts2_off_ms.clone(),
|
||||
ts1_off_ns: self.ts1_off_ns.clone(),
|
||||
ts2_off_ns: self.ts2_off_ns.clone(),
|
||||
counts: self.counts.clone(),
|
||||
mins,
|
||||
maxs,
|
||||
avgs: self.avgs.clone(),
|
||||
range_final: self.range_final.clone(),
|
||||
timed_out: self.timed_out.clone(),
|
||||
missing_bins: self.missing_bins.clone(),
|
||||
continue_at: self.continue_at.clone(),
|
||||
finished_at: self.finished_at.clone(),
|
||||
};
|
||||
Box::new(bins)
|
||||
} else {
|
||||
let bins = Self {
|
||||
ts_anchor_sec: self.ts_anchor_sec.clone(),
|
||||
ts1_off_ms: self.ts1_off_ms.clone(),
|
||||
ts2_off_ms: self.ts2_off_ms.clone(),
|
||||
ts1_off_ns: self.ts1_off_ns.clone(),
|
||||
ts2_off_ns: self.ts2_off_ns.clone(),
|
||||
counts: self.counts.clone(),
|
||||
mins: self.mins.clone(),
|
||||
maxs: self.maxs.clone(),
|
||||
avgs: self.avgs.clone(),
|
||||
range_final: self.range_final.clone(),
|
||||
timed_out: self.timed_out.clone(),
|
||||
missing_bins: self.missing_bins.clone(),
|
||||
continue_at: self.continue_at.clone(),
|
||||
finished_at: self.finished_at.clone(),
|
||||
};
|
||||
Box::new(bins)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> AsAnyRef for BinsDim0CollectedResult<NTY>
|
||||
where
|
||||
NTY: 'static,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> AsAnyMut for BinsDim0CollectedResult<NTY>
|
||||
where
|
||||
NTY: 'static,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> TypeName for BinsDim0CollectedResult<STY> {
|
||||
fn type_name(&self) -> String {
|
||||
any::type_name::<Self>().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> WithLen for BinsDim0CollectedResult<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.mins.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> CollectedDyn for BinsDim0CollectedResult<NTY> {}
|
||||
|
||||
impl<NTY> BinsDim0CollectedResult<NTY> {
|
||||
pub fn ts_anchor_sec(&self) -> u64 {
|
||||
self.ts_anchor_sec
|
||||
}
|
||||
|
||||
pub fn ts1_off_ms(&self) -> &VecDeque<u64> {
|
||||
&self.ts1_off_ms
|
||||
}
|
||||
|
||||
pub fn ts2_off_ms(&self) -> &VecDeque<u64> {
|
||||
&self.ts2_off_ms
|
||||
}
|
||||
|
||||
pub fn counts(&self) -> &VecDeque<u64> {
|
||||
&self.counts
|
||||
}
|
||||
|
||||
pub fn range_final(&self) -> bool {
|
||||
self.range_final
|
||||
}
|
||||
|
||||
pub fn timed_out(&self) -> bool {
|
||||
self.timed_out
|
||||
}
|
||||
|
||||
pub fn missing_bins(&self) -> u32 {
|
||||
self.missing_bins
|
||||
}
|
||||
|
||||
pub fn continue_at(&self) -> Option<IsoDateTime> {
|
||||
self.continue_at.clone()
|
||||
}
|
||||
|
||||
pub fn mins(&self) -> &VecDeque<NTY> {
|
||||
&self.mins
|
||||
}
|
||||
|
||||
pub fn maxs(&self) -> &VecDeque<NTY> {
|
||||
&self.maxs
|
||||
}
|
||||
|
||||
pub fn avgs(&self) -> &VecDeque<f32> {
|
||||
&self.avgs
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> ToJsonResult for BinsDim0CollectedResult<NTY> {
|
||||
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
|
||||
serde_json::to_value(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BinsDim0Collector<NTY> {
|
||||
vals: Option<BinsDim0<NTY>>,
|
||||
timed_out: bool,
|
||||
range_final: bool,
|
||||
}
|
||||
|
||||
impl<NTY> BinsDim0Collector<NTY> {
|
||||
pub fn self_name() -> &'static str {
|
||||
any::type_name::<Self>()
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
timed_out: false,
|
||||
range_final: false,
|
||||
vals: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for BinsDim0Collector<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.vals.as_ref().map_or(0, WithLen::len)
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> ByteEstimate for BinsDim0Collector<STY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
self.vals.as_ref().map_or(0, ByteEstimate::byte_estimate)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> CollectorTy for BinsDim0Collector<NTY> {
|
||||
type Input = BinsDim0<NTY>;
|
||||
type Output = BinsDim0CollectedResult<NTY>;
|
||||
|
||||
fn ingest(&mut self, src: &mut Self::Input) {
|
||||
if self.vals.is_none() {
|
||||
self.vals = Some(Self::Input::empty());
|
||||
}
|
||||
let vals = self.vals.as_mut().unwrap();
|
||||
vals.ts1s.append(&mut src.ts1s);
|
||||
vals.ts2s.append(&mut src.ts2s);
|
||||
vals.cnts.append(&mut src.cnts);
|
||||
vals.mins.append(&mut src.mins);
|
||||
vals.maxs.append(&mut src.maxs);
|
||||
vals.avgs.append(&mut src.avgs);
|
||||
vals.lsts.append(&mut src.lsts);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_final = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn set_continue_at_here(&mut self) {
|
||||
debug!("{}::set_continue_at_here", Self::self_name());
|
||||
// TODO for bins, do nothing: either we have all bins or not.
|
||||
}
|
||||
|
||||
fn result(
|
||||
&mut self,
|
||||
_range: Option<SeriesRange>,
|
||||
binrange: Option<BinnedRangeEnum>,
|
||||
) -> Result<Self::Output, Error> {
|
||||
trace!("trying to make a result from {self:?}");
|
||||
let bin_count_exp = if let Some(r) = &binrange {
|
||||
r.bin_count() as u32
|
||||
} else {
|
||||
debug!("no binrange given");
|
||||
0
|
||||
};
|
||||
let mut vals = if let Some(x) = self.vals.take() {
|
||||
x
|
||||
} else {
|
||||
return Err(Error::with_msg_no_trace("BinsDim0Collector without vals"));
|
||||
};
|
||||
let bin_count = vals.ts1s.len() as u32;
|
||||
debug!(
|
||||
"result make missing bins bin_count_exp {} bin_count {}",
|
||||
bin_count_exp, bin_count
|
||||
);
|
||||
let (missing_bins, continue_at, finished_at) = if bin_count < bin_count_exp {
|
||||
match vals.ts2s.back() {
|
||||
Some(&k) => {
|
||||
let missing_bins = bin_count_exp - bin_count;
|
||||
let continue_at = IsoDateTime::from_ns_u64(k);
|
||||
let u = k + (k - vals.ts1s.back().unwrap()) * missing_bins as u64;
|
||||
let finished_at = IsoDateTime::from_ns_u64(u);
|
||||
(missing_bins, Some(continue_at), Some(finished_at))
|
||||
}
|
||||
None => {
|
||||
warn!("can not determine continue-at parameters");
|
||||
(0, None, None)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
(0, None, None)
|
||||
};
|
||||
if vals.ts1s.as_slices().1.len() != 0 {
|
||||
warn!("ts1s non-contiguous");
|
||||
}
|
||||
if vals.ts2s.as_slices().1.len() != 0 {
|
||||
warn!("ts2s non-contiguous");
|
||||
}
|
||||
let ts1s = vals.ts1s.make_contiguous();
|
||||
let ts2s = vals.ts2s.make_contiguous();
|
||||
let (ts_anch, ts1ms, ts1ns) = ts_offs_from_abs(ts1s);
|
||||
let (ts2ms, ts2ns) = ts_offs_from_abs_with_anchor(ts_anch, ts2s);
|
||||
let counts = vals.cnts;
|
||||
let mins = vals.mins;
|
||||
let maxs = vals.maxs;
|
||||
let avgs = vals.avgs;
|
||||
let ret = BinsDim0CollectedResult::<NTY> {
|
||||
ts_anchor_sec: ts_anch,
|
||||
ts1_off_ms: ts1ms,
|
||||
ts1_off_ns: ts1ns,
|
||||
ts2_off_ms: ts2ms,
|
||||
ts2_off_ns: ts2ns,
|
||||
counts,
|
||||
mins,
|
||||
maxs,
|
||||
avgs,
|
||||
range_final: self.range_final,
|
||||
timed_out: self.timed_out,
|
||||
missing_bins,
|
||||
continue_at,
|
||||
finished_at,
|
||||
};
|
||||
*self = Self::new();
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> CollectableType for BinsDim0<NTY> {
|
||||
type Collector = BinsDim0Collector<NTY>;
|
||||
|
||||
fn new_collector() -> Self::Collector {
|
||||
Self::Collector::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BinsDim0Aggregator<NTY> {
|
||||
range: SeriesRange,
|
||||
cnt: u64,
|
||||
minmaxlst: Option<(NTY, NTY, NTY)>,
|
||||
sumc: u64,
|
||||
sum: f32,
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> BinsDim0Aggregator<NTY> {
|
||||
pub fn new(range: SeriesRange, _do_time_weight: bool) -> Self {
|
||||
Self {
|
||||
range,
|
||||
cnt: 0,
|
||||
minmaxlst: None,
|
||||
sumc: 0,
|
||||
sum: 0f32,
|
||||
}
|
||||
}
|
||||
}
|
||||
523
src/binsxbindim0.rs
Normal file
523
src/binsxbindim0.rs
Normal file
@@ -0,0 +1,523 @@
|
||||
use crate::ts_offs_from_abs;
|
||||
use crate::ts_offs_from_abs_with_anchor;
|
||||
use crate::IsoDateTime;
|
||||
use daqbuf_err as err;
|
||||
use err::Error;
|
||||
use items_0::collect_s::CollectableDyn;
|
||||
use items_0::collect_s::CollectableType;
|
||||
use items_0::collect_s::CollectedDyn;
|
||||
use items_0::collect_s::CollectorTy;
|
||||
use items_0::collect_s::ToJsonResult;
|
||||
use items_0::container::ByteEstimate;
|
||||
use items_0::scalar_ops::AsPrimF32;
|
||||
use items_0::scalar_ops::ScalarOps;
|
||||
use items_0::timebin::TimeBins;
|
||||
use items_0::AppendEmptyBin;
|
||||
use items_0::AsAnyMut;
|
||||
use items_0::AsAnyRef;
|
||||
use items_0::Empty;
|
||||
use items_0::Resettable;
|
||||
use items_0::TypeName;
|
||||
use items_0::WithLen;
|
||||
use netpod::is_false;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::range::evrange::SeriesRange;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::BinnedRangeEnum;
|
||||
use netpod::CmpZero;
|
||||
use netpod::Dim0Kind;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::any;
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ops::Range;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace4 {
|
||||
($($arg:tt)*) => ();
|
||||
($($arg:tt)*) => (eprintln!($($arg)*));
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BinsXbinDim0<NTY> {
|
||||
ts1s: VecDeque<u64>,
|
||||
ts2s: VecDeque<u64>,
|
||||
counts: VecDeque<u64>,
|
||||
mins: VecDeque<NTY>,
|
||||
maxs: VecDeque<NTY>,
|
||||
avgs: VecDeque<f32>,
|
||||
// TODO could consider more variables:
|
||||
// ts min/max, pulse min/max, avg of mins, avg of maxs, variances, etc...
|
||||
dim0kind: Option<Dim0Kind>,
|
||||
}
|
||||
|
||||
impl<STY> TypeName for BinsXbinDim0<STY> {
|
||||
fn type_name(&self) -> String {
|
||||
any::type_name::<Self>().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> fmt::Debug for BinsXbinDim0<NTY>
|
||||
where
|
||||
NTY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
let self_name = any::type_name::<Self>();
|
||||
write!(
|
||||
fmt,
|
||||
"{self_name} count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}",
|
||||
self.ts1s.len(),
|
||||
self.ts1s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
|
||||
self.ts2s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
|
||||
self.counts,
|
||||
self.mins,
|
||||
self.maxs,
|
||||
self.avgs,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> BinsXbinDim0<NTY> {
|
||||
pub fn from_content(
|
||||
ts1s: VecDeque<u64>,
|
||||
ts2s: VecDeque<u64>,
|
||||
counts: VecDeque<u64>,
|
||||
mins: VecDeque<NTY>,
|
||||
maxs: VecDeque<NTY>,
|
||||
avgs: VecDeque<f32>,
|
||||
) -> Self {
|
||||
Self {
|
||||
ts1s,
|
||||
ts2s,
|
||||
counts,
|
||||
mins,
|
||||
maxs,
|
||||
avgs,
|
||||
dim0kind: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn counts(&self) -> &VecDeque<u64> {
|
||||
&self.counts
|
||||
}
|
||||
|
||||
pub fn push(&mut self, ts1: u64, ts2: u64, count: u64, min: NTY, max: NTY, avg: f32) {
|
||||
self.ts1s.push_back(ts1);
|
||||
self.ts2s.push_back(ts2);
|
||||
self.counts.push_back(count);
|
||||
self.mins.push_back(min);
|
||||
self.maxs.push_back(max);
|
||||
self.avgs.push_back(avg);
|
||||
}
|
||||
|
||||
pub fn append_zero(&mut self, beg: u64, end: u64) {
|
||||
self.ts1s.push_back(beg);
|
||||
self.ts2s.push_back(end);
|
||||
self.counts.push_back(0);
|
||||
self.mins.push_back(NTY::zero_b());
|
||||
self.maxs.push_back(NTY::zero_b());
|
||||
self.avgs.push_back(0.);
|
||||
}
|
||||
|
||||
pub fn append_all_from(&mut self, src: &mut Self) {
|
||||
self.ts1s.extend(src.ts1s.drain(..));
|
||||
self.ts2s.extend(src.ts2s.drain(..));
|
||||
self.counts.extend(src.counts.drain(..));
|
||||
self.mins.extend(src.mins.drain(..));
|
||||
self.maxs.extend(src.maxs.drain(..));
|
||||
self.avgs.extend(src.avgs.drain(..));
|
||||
}
|
||||
|
||||
pub fn equal_slack(&self, other: &Self) -> bool {
|
||||
for (&a, &b) in self.ts1s.iter().zip(other.ts1s.iter()) {
|
||||
if a != b {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
for (&a, &b) in self.ts2s.iter().zip(other.ts2s.iter()) {
|
||||
if a != b {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
for (a, b) in self.mins.iter().zip(other.mins.iter()) {
|
||||
if !a.equal_slack(b) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
for (a, b) in self.maxs.iter().zip(other.maxs.iter()) {
|
||||
if !a.equal_slack(b) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
for (a, b) in self.avgs.iter().zip(other.avgs.iter()) {
|
||||
if !a.equal_slack(b) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> AsAnyRef for BinsXbinDim0<NTY>
|
||||
where
|
||||
NTY: ScalarOps,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> AsAnyMut for BinsXbinDim0<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> Empty for BinsXbinDim0<STY> {
|
||||
fn empty() -> Self {
|
||||
Self {
|
||||
ts1s: VecDeque::new(),
|
||||
ts2s: VecDeque::new(),
|
||||
counts: VecDeque::new(),
|
||||
mins: VecDeque::new(),
|
||||
maxs: VecDeque::new(),
|
||||
avgs: VecDeque::new(),
|
||||
dim0kind: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> WithLen for BinsXbinDim0<STY> {
|
||||
fn len(&self) -> usize {
|
||||
self.ts1s.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> ByteEstimate for BinsXbinDim0<STY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
// TODO
|
||||
// Should use a better estimate for waveform and string types,
|
||||
// or keep some aggregated byte count on push.
|
||||
let n = self.len();
|
||||
if n == 0 {
|
||||
0
|
||||
} else {
|
||||
// TODO use the actual size of one/some of the elements.
|
||||
let i = n * 2 / 3;
|
||||
let w1 = self.mins[i].byte_estimate();
|
||||
let w2 = self.maxs[i].byte_estimate();
|
||||
(n as u64 * (8 + 8 + 8 + 4 + w1 + w2)) as u64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> Resettable for BinsXbinDim0<STY> {
|
||||
fn reset(&mut self) {
|
||||
self.ts1s.clear();
|
||||
self.ts2s.clear();
|
||||
self.counts.clear();
|
||||
self.mins.clear();
|
||||
self.maxs.clear();
|
||||
self.avgs.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> AppendEmptyBin for BinsXbinDim0<NTY> {
|
||||
fn append_empty_bin(&mut self, ts1: u64, ts2: u64) {
|
||||
self.ts1s.push_back(ts1);
|
||||
self.ts2s.push_back(ts2);
|
||||
self.counts.push_back(0);
|
||||
self.mins.push_back(NTY::zero_b());
|
||||
self.maxs.push_back(NTY::zero_b());
|
||||
self.avgs.push_back(0.);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> TimeBins for BinsXbinDim0<NTY> {
|
||||
fn ts_min(&self) -> Option<u64> {
|
||||
self.ts1s.front().map(Clone::clone)
|
||||
}
|
||||
|
||||
fn ts_max(&self) -> Option<u64> {
|
||||
self.ts2s.back().map(Clone::clone)
|
||||
}
|
||||
|
||||
fn ts_min_max(&self) -> Option<(u64, u64)> {
|
||||
if let (Some(min), Some(max)) = (self.ts1s.front().map(Clone::clone), self.ts2s.back().map(Clone::clone)) {
|
||||
Some((min, max))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO rename to BinsDim0CollectorOutput
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BinsXbinDim0CollectedResult<NTY> {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "ts1Ms")]
|
||||
ts1_off_ms: VecDeque<u64>,
|
||||
#[serde(rename = "ts2Ms")]
|
||||
ts2_off_ms: VecDeque<u64>,
|
||||
#[serde(rename = "ts1Ns")]
|
||||
ts1_off_ns: VecDeque<u64>,
|
||||
#[serde(rename = "ts2Ns")]
|
||||
ts2_off_ns: VecDeque<u64>,
|
||||
#[serde(rename = "counts")]
|
||||
counts: VecDeque<u64>,
|
||||
#[serde(rename = "mins")]
|
||||
mins: VecDeque<NTY>,
|
||||
#[serde(rename = "maxs")]
|
||||
maxs: VecDeque<NTY>,
|
||||
#[serde(rename = "avgs")]
|
||||
avgs: VecDeque<f32>,
|
||||
#[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
|
||||
range_final: bool,
|
||||
#[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
|
||||
timed_out: bool,
|
||||
#[serde(rename = "missingBins", default, skip_serializing_if = "CmpZero::is_zero")]
|
||||
missing_bins: u32,
|
||||
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
|
||||
continue_at: Option<IsoDateTime>,
|
||||
#[serde(rename = "finishedAt", default, skip_serializing_if = "Option::is_none")]
|
||||
finished_at: Option<IsoDateTime>,
|
||||
}
|
||||
|
||||
impl<NTY> AsAnyRef for BinsXbinDim0CollectedResult<NTY>
|
||||
where
|
||||
NTY: ScalarOps,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> AsAnyMut for BinsXbinDim0CollectedResult<NTY>
|
||||
where
|
||||
NTY: ScalarOps,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> TypeName for BinsXbinDim0CollectedResult<STY> {
|
||||
fn type_name(&self) -> String {
|
||||
any::type_name::<Self>().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> WithLen for BinsXbinDim0CollectedResult<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.mins.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> CollectedDyn for BinsXbinDim0CollectedResult<NTY> {}
|
||||
|
||||
impl<NTY> BinsXbinDim0CollectedResult<NTY> {
|
||||
pub fn ts_anchor_sec(&self) -> u64 {
|
||||
self.ts_anchor_sec
|
||||
}
|
||||
|
||||
pub fn ts1_off_ms(&self) -> &VecDeque<u64> {
|
||||
&self.ts1_off_ms
|
||||
}
|
||||
|
||||
pub fn ts2_off_ms(&self) -> &VecDeque<u64> {
|
||||
&self.ts2_off_ms
|
||||
}
|
||||
|
||||
pub fn counts(&self) -> &VecDeque<u64> {
|
||||
&self.counts
|
||||
}
|
||||
|
||||
pub fn range_final(&self) -> bool {
|
||||
self.range_final
|
||||
}
|
||||
|
||||
pub fn missing_bins(&self) -> u32 {
|
||||
self.missing_bins
|
||||
}
|
||||
|
||||
pub fn continue_at(&self) -> Option<IsoDateTime> {
|
||||
self.continue_at.clone()
|
||||
}
|
||||
|
||||
pub fn mins(&self) -> &VecDeque<NTY> {
|
||||
&self.mins
|
||||
}
|
||||
|
||||
pub fn maxs(&self) -> &VecDeque<NTY> {
|
||||
&self.maxs
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> ToJsonResult for BinsXbinDim0CollectedResult<NTY> {
|
||||
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
|
||||
serde_json::to_value(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BinsXbinDim0Collector<NTY> {
|
||||
vals: BinsXbinDim0<NTY>,
|
||||
timed_out: bool,
|
||||
range_final: bool,
|
||||
}
|
||||
|
||||
impl<NTY> BinsXbinDim0Collector<NTY> {
|
||||
pub fn self_name() -> &'static str {
|
||||
any::type_name::<Self>()
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
vals: BinsXbinDim0::empty(),
|
||||
timed_out: false,
|
||||
range_final: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for BinsXbinDim0Collector<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.vals.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> ByteEstimate for BinsXbinDim0Collector<STY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
self.vals.byte_estimate()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> CollectorTy for BinsXbinDim0Collector<NTY> {
|
||||
type Input = BinsXbinDim0<NTY>;
|
||||
type Output = BinsXbinDim0CollectedResult<NTY>;
|
||||
|
||||
fn ingest(&mut self, src: &mut Self::Input) {
|
||||
trace!("\n\n----------- BinsXbinDim0Collector ingest\n{:?}\n\n", src);
|
||||
// TODO could be optimized by non-contiguous container.
|
||||
self.vals.ts1s.append(&mut src.ts1s);
|
||||
self.vals.ts2s.append(&mut src.ts2s);
|
||||
self.vals.counts.append(&mut src.counts);
|
||||
self.vals.mins.append(&mut src.mins);
|
||||
self.vals.maxs.append(&mut src.maxs);
|
||||
self.vals.avgs.append(&mut src.avgs);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_final = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn set_continue_at_here(&mut self) {
|
||||
debug!("{}::set_continue_at_here", Self::self_name());
|
||||
// TODO for bins, do nothing: either we have all bins or not.
|
||||
}
|
||||
|
||||
fn result(
|
||||
&mut self,
|
||||
_range: std::option::Option<SeriesRange>,
|
||||
binrange: Option<BinnedRangeEnum>,
|
||||
) -> Result<Self::Output, Error> {
|
||||
let bin_count_exp = if let Some(r) = &binrange {
|
||||
r.bin_count() as u32
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let bin_count = self.vals.ts1s.len() as u32;
|
||||
let (missing_bins, continue_at, finished_at) = if bin_count < bin_count_exp {
|
||||
match self.vals.ts2s.back() {
|
||||
Some(&k) => {
|
||||
let missing_bins = bin_count_exp - bin_count;
|
||||
let continue_at = IsoDateTime::from_ns_u64(k);
|
||||
let u = k + (k - self.vals.ts1s.back().unwrap()) * missing_bins as u64;
|
||||
let finished_at = IsoDateTime::from_ns_u64(u);
|
||||
(missing_bins, Some(continue_at), Some(finished_at))
|
||||
}
|
||||
None => {
|
||||
warn!("can not determine continue-at parameters");
|
||||
(0, None, None)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
(0, None, None)
|
||||
};
|
||||
if self.vals.ts1s.as_slices().1.len() != 0 {
|
||||
panic!();
|
||||
}
|
||||
if self.vals.ts2s.as_slices().1.len() != 0 {
|
||||
panic!();
|
||||
}
|
||||
let tst1 = ts_offs_from_abs(self.vals.ts1s.as_slices().0);
|
||||
let tst2 = ts_offs_from_abs_with_anchor(tst1.0, self.vals.ts2s.as_slices().0);
|
||||
let counts = mem::replace(&mut self.vals.counts, VecDeque::new());
|
||||
let mins = mem::replace(&mut self.vals.mins, VecDeque::new());
|
||||
let maxs = mem::replace(&mut self.vals.maxs, VecDeque::new());
|
||||
let avgs = mem::replace(&mut self.vals.avgs, VecDeque::new());
|
||||
let ret = BinsXbinDim0CollectedResult::<NTY> {
|
||||
ts_anchor_sec: tst1.0,
|
||||
ts1_off_ms: tst1.1,
|
||||
ts1_off_ns: tst1.2,
|
||||
ts2_off_ms: tst2.0,
|
||||
ts2_off_ns: tst2.1,
|
||||
counts,
|
||||
mins,
|
||||
maxs,
|
||||
avgs,
|
||||
range_final: self.range_final,
|
||||
timed_out: self.timed_out,
|
||||
missing_bins,
|
||||
continue_at,
|
||||
finished_at,
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> CollectableType for BinsXbinDim0<NTY> {
|
||||
type Collector = BinsXbinDim0Collector<NTY>;
|
||||
|
||||
fn new_collector() -> Self::Collector {
|
||||
Self::Collector::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BinsXbinDim0Aggregator<NTY> {
|
||||
range: SeriesRange,
|
||||
count: u64,
|
||||
min: NTY,
|
||||
max: NTY,
|
||||
// Carry over to next bin:
|
||||
avg: f32,
|
||||
sumc: u64,
|
||||
sum: f32,
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> BinsXbinDim0Aggregator<NTY> {
|
||||
pub fn new(range: SeriesRange, _do_time_weight: bool) -> Self {
|
||||
Self {
|
||||
range,
|
||||
count: 0,
|
||||
min: NTY::zero_b(),
|
||||
max: NTY::zero_b(),
|
||||
avg: 0.,
|
||||
sumc: 0,
|
||||
sum: 0f32,
|
||||
}
|
||||
}
|
||||
}
|
||||
1139
src/channelevents.rs
Normal file
1139
src/channelevents.rs
Normal file
File diff suppressed because it is too large
Load Diff
58
src/empty.rs
Normal file
58
src/empty.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
use crate::eventsdim0::EventsDim0;
|
||||
use crate::eventsdim1::EventsDim1;
|
||||
use crate::Error;
|
||||
use daqbuf_err as err;
|
||||
use items_0::Empty;
|
||||
use items_0::Events;
|
||||
use netpod::log::*;
|
||||
use netpod::EnumVariant;
|
||||
use netpod::ScalarType;
|
||||
use netpod::Shape;
|
||||
|
||||
pub fn empty_events_dyn_ev(scalar_type: &ScalarType, shape: &Shape) -> Result<Box<dyn Events>, Error> {
|
||||
let ret: Box<dyn Events> = match shape {
|
||||
Shape::Scalar => {
|
||||
use ScalarType::*;
|
||||
type K<T> = EventsDim0<T>;
|
||||
match scalar_type {
|
||||
U8 => Box::new(K::<u8>::empty()),
|
||||
U16 => Box::new(K::<u16>::empty()),
|
||||
U32 => Box::new(K::<u32>::empty()),
|
||||
U64 => Box::new(K::<u64>::empty()),
|
||||
I8 => Box::new(K::<i8>::empty()),
|
||||
I16 => Box::new(K::<i16>::empty()),
|
||||
I32 => Box::new(K::<i32>::empty()),
|
||||
I64 => Box::new(K::<i64>::empty()),
|
||||
F32 => Box::new(K::<f32>::empty()),
|
||||
F64 => Box::new(K::<f64>::empty()),
|
||||
BOOL => Box::new(K::<bool>::empty()),
|
||||
STRING => Box::new(K::<String>::empty()),
|
||||
Enum => Box::new(K::<EnumVariant>::empty()),
|
||||
}
|
||||
}
|
||||
Shape::Wave(..) => {
|
||||
use ScalarType::*;
|
||||
type K<T> = EventsDim1<T>;
|
||||
match scalar_type {
|
||||
U8 => Box::new(K::<u8>::empty()),
|
||||
U16 => Box::new(K::<u16>::empty()),
|
||||
U32 => Box::new(K::<u32>::empty()),
|
||||
U64 => Box::new(K::<u64>::empty()),
|
||||
I8 => Box::new(K::<i8>::empty()),
|
||||
I16 => Box::new(K::<i16>::empty()),
|
||||
I32 => Box::new(K::<i32>::empty()),
|
||||
I64 => Box::new(K::<i64>::empty()),
|
||||
F32 => Box::new(K::<f32>::empty()),
|
||||
F64 => Box::new(K::<f64>::empty()),
|
||||
BOOL => Box::new(K::<bool>::empty()),
|
||||
STRING => Box::new(K::<String>::empty()),
|
||||
Enum => Box::new(K::<EnumVariant>::empty()),
|
||||
}
|
||||
}
|
||||
Shape::Image(..) => {
|
||||
error!("TODO empty_events_dyn_ev {scalar_type:?} {shape:?}");
|
||||
err::todoval()
|
||||
}
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
431
src/eventfull.rs
Normal file
431
src/eventfull.rs
Normal file
@@ -0,0 +1,431 @@
|
||||
use crate::framable::FrameType;
|
||||
use crate::merger::Mergeable;
|
||||
use bytes::BytesMut;
|
||||
use daqbuf_err as err;
|
||||
use err::thiserror;
|
||||
use err::ThisError;
|
||||
use items_0::container::ByteEstimate;
|
||||
use items_0::framable::FrameTypeInnerStatic;
|
||||
use items_0::streamitem::EVENT_FULL_FRAME_TYPE_ID;
|
||||
use items_0::Empty;
|
||||
use items_0::MergeError;
|
||||
use items_0::WithLen;
|
||||
#[allow(unused)]
|
||||
use netpod::log::*;
|
||||
use netpod::ScalarType;
|
||||
use netpod::Shape;
|
||||
use parse::channelconfig::CompressionMethod;
|
||||
use serde::Deserialize;
|
||||
use serde::Deserializer;
|
||||
use serde::Serialize;
|
||||
use serde::Serializer;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::VecDeque;
|
||||
use std::time::Instant;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace2 {
|
||||
($($arg:tt)*) => {};
|
||||
($($arg:tt)*) => { trace!($($arg)*) };
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct EventFull {
|
||||
pub tss: VecDeque<u64>,
|
||||
pub pulses: VecDeque<u64>,
|
||||
pub blobs: VecDeque<Vec<u8>>,
|
||||
//#[serde(with = "decomps_serde")]
|
||||
pub scalar_types: VecDeque<ScalarType>,
|
||||
pub be: VecDeque<bool>,
|
||||
pub shapes: VecDeque<Shape>,
|
||||
pub comps: VecDeque<Option<CompressionMethod>>,
|
||||
pub entry_payload_max: u64,
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
mod decomps_serde {
|
||||
use super::*;
|
||||
|
||||
pub fn serialize<S>(t: &VecDeque<Option<BytesMut>>, s: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let a: Vec<_> = t
|
||||
.iter()
|
||||
.map(|k| match k {
|
||||
None => None,
|
||||
Some(j) => Some(j[..].to_vec()),
|
||||
})
|
||||
.collect();
|
||||
Serialize::serialize(&a, s)
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(d: D) -> Result<VecDeque<Option<BytesMut>>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let a: Vec<Option<Vec<u8>>> = Deserialize::deserialize(d)?;
|
||||
let a = a
|
||||
.iter()
|
||||
.map(|k| match k {
|
||||
None => None,
|
||||
Some(j) => {
|
||||
let mut a = BytesMut::new();
|
||||
a.extend_from_slice(&j);
|
||||
Some(a)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(a)
|
||||
}
|
||||
}
|
||||
|
||||
impl EventFull {
|
||||
pub fn push(
|
||||
&mut self,
|
||||
ts: u64,
|
||||
pulse: u64,
|
||||
blob: Vec<u8>,
|
||||
scalar_type: ScalarType,
|
||||
be: bool,
|
||||
shape: Shape,
|
||||
comp: Option<CompressionMethod>,
|
||||
) {
|
||||
let m1 = blob.len();
|
||||
self.entry_payload_max = self.entry_payload_max.max(m1 as u64);
|
||||
self.tss.push_back(ts);
|
||||
self.pulses.push_back(pulse);
|
||||
self.blobs.push_back(blob);
|
||||
self.scalar_types.push_back(scalar_type);
|
||||
self.be.push_back(be);
|
||||
self.shapes.push_back(shape);
|
||||
self.comps.push_back(comp);
|
||||
}
|
||||
|
||||
// TODO possible to get rid of this?
|
||||
pub fn truncate_ts(&mut self, end: u64) {
|
||||
let mut nkeep = usize::MAX;
|
||||
for (i, &ts) in self.tss.iter().enumerate() {
|
||||
if ts >= end {
|
||||
nkeep = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
self.tss.truncate(nkeep);
|
||||
self.pulses.truncate(nkeep);
|
||||
self.blobs.truncate(nkeep);
|
||||
self.scalar_types.truncate(nkeep);
|
||||
self.be.truncate(nkeep);
|
||||
self.shapes.truncate(nkeep);
|
||||
self.comps.truncate(nkeep);
|
||||
}
|
||||
|
||||
// NOTE needed because the databuffer actually doesn't write the correct shape per event.
|
||||
pub fn overwrite_all_shapes(&mut self, shape: &Shape) {
|
||||
for u in &mut self.shapes {
|
||||
*u = shape.clone();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pop_back(&mut self) {
|
||||
self.tss.pop_back();
|
||||
self.pulses.pop_back();
|
||||
self.blobs.pop_back();
|
||||
self.scalar_types.pop_back();
|
||||
self.be.pop_back();
|
||||
self.shapes.pop_back();
|
||||
self.comps.pop_back();
|
||||
}
|
||||
|
||||
pub fn keep_ixs(&mut self, ixs: &[bool]) {
|
||||
fn inner<T>(v: &mut VecDeque<T>, ixs: &[bool]) {
|
||||
let mut it = ixs.iter();
|
||||
v.retain_mut(move |_| it.next().map(Clone::clone).unwrap_or(false));
|
||||
}
|
||||
inner(&mut self.tss, ixs);
|
||||
inner(&mut self.pulses, ixs);
|
||||
inner(&mut self.blobs, ixs);
|
||||
inner(&mut self.scalar_types, ixs);
|
||||
inner(&mut self.be, ixs);
|
||||
inner(&mut self.shapes, ixs);
|
||||
inner(&mut self.comps, ixs);
|
||||
}
|
||||
}
|
||||
|
||||
impl FrameTypeInnerStatic for EventFull {
|
||||
const FRAME_TYPE_ID: u32 = EVENT_FULL_FRAME_TYPE_ID;
|
||||
}
|
||||
|
||||
impl FrameType for EventFull {
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
|
||||
}
|
||||
}
|
||||
|
||||
impl Empty for EventFull {
|
||||
fn empty() -> Self {
|
||||
Self {
|
||||
tss: VecDeque::new(),
|
||||
pulses: VecDeque::new(),
|
||||
blobs: VecDeque::new(),
|
||||
scalar_types: VecDeque::new(),
|
||||
be: VecDeque::new(),
|
||||
shapes: VecDeque::new(),
|
||||
comps: VecDeque::new(),
|
||||
entry_payload_max: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for EventFull {
|
||||
fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl ByteEstimate for EventFull {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
self.len() as u64 * (64 + self.entry_payload_max)
|
||||
}
|
||||
}
|
||||
|
||||
impl Mergeable for EventFull {
|
||||
fn ts_min(&self) -> Option<u64> {
|
||||
self.tss.front().map(|&x| x)
|
||||
}
|
||||
|
||||
fn ts_max(&self) -> Option<u64> {
|
||||
self.tss.back().map(|&x| x)
|
||||
}
|
||||
|
||||
fn new_empty(&self) -> Self {
|
||||
Empty::empty()
|
||||
}
|
||||
|
||||
fn clear(&mut self) {
|
||||
self.tss.clear();
|
||||
self.pulses.clear();
|
||||
self.blobs.clear();
|
||||
self.scalar_types.clear();
|
||||
self.be.clear();
|
||||
self.shapes.clear();
|
||||
self.comps.clear();
|
||||
self.entry_payload_max = 0;
|
||||
}
|
||||
|
||||
fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError> {
|
||||
// TODO make it harder to forget new members when the struct may get modified in the future
|
||||
let r = range.0..range.1;
|
||||
let mut max = dst.entry_payload_max;
|
||||
for i in r.clone() {
|
||||
max = max.max(self.blobs[i].len() as _);
|
||||
}
|
||||
dst.entry_payload_max = max;
|
||||
dst.tss.extend(self.tss.drain(r.clone()));
|
||||
dst.pulses.extend(self.pulses.drain(r.clone()));
|
||||
dst.blobs.extend(self.blobs.drain(r.clone()));
|
||||
dst.scalar_types.extend(self.scalar_types.drain(r.clone()));
|
||||
dst.be.extend(self.be.drain(r.clone()));
|
||||
dst.shapes.extend(self.shapes.drain(r.clone()));
|
||||
dst.comps.extend(self.comps.drain(r.clone()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn find_lowest_index_gt(&self, ts: u64) -> Option<usize> {
|
||||
for (i, &m) in self.tss.iter().enumerate() {
|
||||
if m > ts {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn find_lowest_index_ge(&self, ts: u64) -> Option<usize> {
|
||||
for (i, &m) in self.tss.iter().enumerate() {
|
||||
if m >= ts {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn find_highest_index_lt(&self, ts: u64) -> Option<usize> {
|
||||
for (i, &m) in self.tss.iter().enumerate().rev() {
|
||||
if m < ts {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn tss(&self) -> Vec<netpod::TsMs> {
|
||||
self.tss.iter().map(|x| netpod::TsMs::from_ns_u64(*x)).collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, ThisError, Serialize, Deserialize)]
|
||||
#[cstm(name = "Decompress")]
|
||||
pub enum DecompError {
|
||||
TooLittleInput,
|
||||
BadCompresionBlockSize,
|
||||
UnusedBytes,
|
||||
BitshuffleError,
|
||||
ShapeMakesNoSense,
|
||||
UnexpectedCompressedScalarValue,
|
||||
}
|
||||
|
||||
fn decompress(databuf: &[u8], type_size: u32) -> Result<Vec<u8>, DecompError> {
|
||||
// TODO collect decompression stats
|
||||
let ts1 = Instant::now();
|
||||
if databuf.len() < 12 {
|
||||
return Err(DecompError::TooLittleInput);
|
||||
}
|
||||
let value_bytes = u64::from_be_bytes(databuf[0..8].try_into().unwrap());
|
||||
let block_size = u32::from_be_bytes(databuf[8..12].try_into().unwrap());
|
||||
trace2!(
|
||||
"decompress len {} value_bytes {} block_size {}",
|
||||
databuf.len(),
|
||||
value_bytes,
|
||||
block_size
|
||||
);
|
||||
if block_size > 1024 * 32 {
|
||||
return Err(DecompError::BadCompresionBlockSize);
|
||||
}
|
||||
let ele_count = value_bytes / type_size as u64;
|
||||
trace2!(
|
||||
"ele_count {} ele_count_2 {} ele_count_exp {}",
|
||||
ele_count,
|
||||
ele_count_2,
|
||||
ele_count_exp
|
||||
);
|
||||
let mut decomp: Vec<u8> = Vec::with_capacity(type_size as usize * ele_count as usize);
|
||||
unsafe {
|
||||
decomp.set_len(decomp.capacity());
|
||||
}
|
||||
// #[cfg(DISABLED)]
|
||||
match bitshuffle::bitshuffle_decompress(&databuf[12..], &mut decomp, ele_count as _, type_size as _, 0) {
|
||||
Ok(c1) => {
|
||||
if 12 + c1 != databuf.len() {
|
||||
Err(DecompError::UnusedBytes)
|
||||
} else {
|
||||
let ts2 = Instant::now();
|
||||
let _dt = ts2.duration_since(ts1);
|
||||
// TODO analyze the histo
|
||||
//self.decomp_dt_histo.ingest(dt.as_secs() as u32 + dt.subsec_micros());
|
||||
Ok(decomp)
|
||||
}
|
||||
}
|
||||
Err(_) => Err(DecompError::BitshuffleError),
|
||||
}
|
||||
// todo!("bitshuffle not available")
|
||||
}
|
||||
|
||||
impl EventFull {
|
||||
/// Tries to infer the actual shape of the event from what's on disk and what we expect.
|
||||
/// The event data on disk usually always indicate "scalar" even for waveforms.
|
||||
/// If the data is compressed via bslz4 then we can infer the number of elements
|
||||
/// but we still don't know whether that's an image or a waveform.
|
||||
/// Therefore, the function accepts the expected shape to at least make an assumption
|
||||
/// about whether this is an image or a waveform.
|
||||
pub fn shape_derived(
|
||||
&self,
|
||||
i: usize,
|
||||
scalar_type_exp: &ScalarType,
|
||||
shape_exp: &Shape,
|
||||
) -> Result<Shape, DecompError> {
|
||||
match shape_exp {
|
||||
Shape::Scalar => match &self.comps[i] {
|
||||
Some(_) => match scalar_type_exp {
|
||||
ScalarType::STRING => Ok(Shape::Scalar),
|
||||
_ => Err(DecompError::UnexpectedCompressedScalarValue),
|
||||
},
|
||||
None => Ok(Shape::Scalar),
|
||||
},
|
||||
Shape::Wave(_) => match &self.shapes[i] {
|
||||
Shape::Scalar => match &self.comps[i] {
|
||||
Some(comp) => match comp {
|
||||
CompressionMethod::BitshuffleLZ4 => {
|
||||
let type_size = self.scalar_types[i].bytes() as u32;
|
||||
match self.blobs[i][0..8].try_into() {
|
||||
Ok(a) => {
|
||||
let value_bytes = u64::from_be_bytes(a);
|
||||
let value_bytes = value_bytes as u32;
|
||||
if value_bytes % type_size != 0 {
|
||||
Err(DecompError::ShapeMakesNoSense)
|
||||
} else {
|
||||
let n = value_bytes / type_size;
|
||||
// Here we still can't know whether the disk contains a waveform or image
|
||||
// so we assume that the user input is correct:
|
||||
Ok(Shape::Wave(n))
|
||||
}
|
||||
}
|
||||
Err(_) => Err(DecompError::ShapeMakesNoSense),
|
||||
}
|
||||
}
|
||||
},
|
||||
None => Err(DecompError::ShapeMakesNoSense),
|
||||
},
|
||||
Shape::Wave(s) => Ok(Shape::Wave(s.clone())),
|
||||
Shape::Image(_, _) => Err(DecompError::ShapeMakesNoSense),
|
||||
},
|
||||
Shape::Image(a, b) => match &self.shapes[i] {
|
||||
Shape::Scalar => match &self.comps[i] {
|
||||
Some(comp) => match comp {
|
||||
CompressionMethod::BitshuffleLZ4 => {
|
||||
let type_size = self.scalar_types[i].bytes() as u32;
|
||||
match self.blobs[i][0..8].try_into() {
|
||||
Ok(vb) => {
|
||||
let value_bytes = u64::from_be_bytes(vb);
|
||||
let value_bytes = value_bytes as u32;
|
||||
if value_bytes % type_size != 0 {
|
||||
Err(DecompError::ShapeMakesNoSense)
|
||||
} else {
|
||||
let n = value_bytes / type_size;
|
||||
// Here we still can't know whether the disk contains a waveform or image
|
||||
// so we assume that the user input is correct.
|
||||
// NOTE
|
||||
// We only know the number of pixels from the compressed blob but we can't
|
||||
// know the actual shape.
|
||||
// Can only rely on user input and check that total number of pixels agree.
|
||||
if *a * *b != n {
|
||||
Err(DecompError::ShapeMakesNoSense)
|
||||
} else {
|
||||
Ok(Shape::Image(*a, *b))
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(_) => Err(DecompError::ShapeMakesNoSense),
|
||||
}
|
||||
}
|
||||
},
|
||||
None => Err(DecompError::ShapeMakesNoSense),
|
||||
},
|
||||
Shape::Wave(_) => Err(DecompError::ShapeMakesNoSense),
|
||||
Shape::Image(a, b) => Ok(Shape::Image(*a, *b)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn data_raw(&self, i: usize) -> &[u8] {
|
||||
&self.blobs[i]
|
||||
}
|
||||
|
||||
pub fn data_decompressed(&self, i: usize) -> Result<Cow<[u8]>, DecompError> {
|
||||
if let Some(comp) = &self.comps[i] {
|
||||
match comp {
|
||||
CompressionMethod::BitshuffleLZ4 => {
|
||||
// NOTE the event data on databuffer disk seems to contain the correct scalar type
|
||||
// but the shape of the event record seems always "scalar" even for waveforms
|
||||
// so we must derive the shape of the compressed data from the length of the
|
||||
// uncompressed byte blob and the byte size of the scalar type.
|
||||
let type_size = self.scalar_types[i].bytes() as u32;
|
||||
let data = decompress(&self.blobs[i], type_size)?;
|
||||
Ok(Cow::Owned(data))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let data = &self.blobs[i];
|
||||
Ok(Cow::Borrowed(data.as_slice()))
|
||||
}
|
||||
}
|
||||
}
|
||||
869
src/eventsdim0.rs
Normal file
869
src/eventsdim0.rs
Normal file
@@ -0,0 +1,869 @@
|
||||
use crate::IsoDateTime;
|
||||
use daqbuf_err as err;
|
||||
use err::Error;
|
||||
use items_0::collect_s::CollectableDyn;
|
||||
use items_0::collect_s::CollectedDyn;
|
||||
use items_0::collect_s::CollectorTy;
|
||||
use items_0::collect_s::ToJsonResult;
|
||||
use items_0::container::ByteEstimate;
|
||||
use items_0::overlap::HasTimestampDeque;
|
||||
use items_0::scalar_ops::ScalarOps;
|
||||
use items_0::Appendable;
|
||||
use items_0::AsAnyMut;
|
||||
use items_0::AsAnyRef;
|
||||
use items_0::Empty;
|
||||
use items_0::Events;
|
||||
use items_0::EventsNonObj;
|
||||
use items_0::MergeError;
|
||||
use items_0::Resettable;
|
||||
use items_0::TypeName;
|
||||
use items_0::WithLen;
|
||||
use netpod::is_false;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::SeriesRange;
|
||||
use netpod::timeunits::MS;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::BinnedRangeEnum;
|
||||
use netpod::TsNano;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::any;
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest_item { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest_event { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace2 { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_binning { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! debug_ingest { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct EventsDim0NoPulse<STY> {
|
||||
pub tss: VecDeque<u64>,
|
||||
pub values: VecDeque<STY>,
|
||||
}
|
||||
|
||||
impl<STY> From<EventsDim0NoPulse<STY>> for EventsDim0<STY> {
|
||||
fn from(value: EventsDim0NoPulse<STY>) -> Self {
|
||||
let pulses = vec![0; value.tss.len()].into();
|
||||
Self {
|
||||
tss: value.tss,
|
||||
pulses,
|
||||
values: value.values,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct EventsDim0<STY> {
|
||||
pub tss: VecDeque<u64>,
|
||||
pub pulses: VecDeque<u64>,
|
||||
pub values: VecDeque<STY>,
|
||||
}
|
||||
|
||||
impl<STY> EventsDim0<STY> {
|
||||
pub fn type_name() -> &'static str {
|
||||
std::any::type_name::<Self>()
|
||||
}
|
||||
|
||||
pub fn push_back(&mut self, ts: u64, pulse: u64, value: STY) {
|
||||
self.tss.push_back(ts);
|
||||
self.pulses.push_back(pulse);
|
||||
self.values.push_back(value);
|
||||
}
|
||||
|
||||
pub fn push_front(&mut self, ts: u64, pulse: u64, value: STY) {
|
||||
self.tss.push_front(ts);
|
||||
self.pulses.push_front(pulse);
|
||||
self.values.push_front(value);
|
||||
}
|
||||
|
||||
pub fn serde_id() -> &'static str {
|
||||
"EventsDim0"
|
||||
}
|
||||
|
||||
pub fn tss(&self) -> &VecDeque<u64> {
|
||||
&self.tss
|
||||
}
|
||||
|
||||
// only for testing at the moment
|
||||
pub fn private_values_ref(&self) -> &VecDeque<STY> {
|
||||
&self.values
|
||||
}
|
||||
pub fn private_values_mut(&mut self) -> &mut VecDeque<STY> {
|
||||
&mut self.values
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> AsAnyRef for EventsDim0<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> AsAnyMut for EventsDim0<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> Empty for EventsDim0<STY> {
|
||||
fn empty() -> Self {
|
||||
Self {
|
||||
tss: VecDeque::new(),
|
||||
pulses: VecDeque::new(),
|
||||
values: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> fmt::Debug for EventsDim0<STY>
|
||||
where
|
||||
STY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
if false {
|
||||
write!(
|
||||
fmt,
|
||||
"{} {{ count {} ts {:?} vals {:?} }}",
|
||||
self.type_name(),
|
||||
self.tss.len(),
|
||||
self.tss.iter().map(|x| x / SEC).collect::<Vec<_>>(),
|
||||
self.values,
|
||||
)
|
||||
} else {
|
||||
write!(
|
||||
fmt,
|
||||
"{} {{ count {} ts {:?} .. {:?} vals {:?} .. {:?} }}",
|
||||
self.type_name(),
|
||||
self.tss.len(),
|
||||
self.tss.front().map(|&x| TsNano::from_ns(x)),
|
||||
self.tss.back().map(|&x| TsNano::from_ns(x)),
|
||||
self.values.front(),
|
||||
self.values.back(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> WithLen for EventsDim0<STY> {
|
||||
fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> ByteEstimate for EventsDim0<STY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
// TODO
|
||||
// Should use a better estimate for waveform and string types,
|
||||
// or keep some aggregated byte count on push.
|
||||
let n = self.len();
|
||||
if n == 0 {
|
||||
0
|
||||
} else {
|
||||
// TODO use the actual size of one/some of the elements.
|
||||
let i = n * 2 / 3;
|
||||
let sty_bytes = self.values[i].byte_estimate();
|
||||
(n as u64 * (8 + 8 + sty_bytes)) as u64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> Resettable for EventsDim0<STY> {
|
||||
fn reset(&mut self) {
|
||||
self.tss.clear();
|
||||
self.pulses.clear();
|
||||
self.values.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> HasTimestampDeque for EventsDim0<STY> {
|
||||
fn timestamp_min(&self) -> Option<u64> {
|
||||
self.tss.front().map(|x| *x)
|
||||
}
|
||||
|
||||
fn timestamp_max(&self) -> Option<u64> {
|
||||
self.tss.back().map(|x| *x)
|
||||
}
|
||||
|
||||
fn pulse_min(&self) -> Option<u64> {
|
||||
self.pulses.front().map(|x| *x)
|
||||
}
|
||||
|
||||
fn pulse_max(&self) -> Option<u64> {
|
||||
self.pulses.back().map(|x| *x)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct EventsDim0ChunkOutput<STY> {
|
||||
tss: VecDeque<u64>,
|
||||
pulses: VecDeque<u64>,
|
||||
values: VecDeque<STY>,
|
||||
scalar_type: String,
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> EventsDim0ChunkOutput<STY> {}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EventsDim0Collector<STY> {
|
||||
vals: EventsDim0<STY>,
|
||||
range_final: bool,
|
||||
timed_out: bool,
|
||||
needs_continue_at: bool,
|
||||
}
|
||||
|
||||
impl<STY> EventsDim0Collector<STY> {
|
||||
pub fn self_name() -> &'static str {
|
||||
any::type_name::<Self>()
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
debug!("EventsDim0Collector NEW");
|
||||
Self {
|
||||
vals: EventsDim0::empty(),
|
||||
range_final: false,
|
||||
timed_out: false,
|
||||
needs_continue_at: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> WithLen for EventsDim0Collector<STY> {
|
||||
fn len(&self) -> usize {
|
||||
WithLen::len(&self.vals)
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> ByteEstimate for EventsDim0Collector<STY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
ByteEstimate::byte_estimate(&self.vals)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct EventsDim0CollectorOutput<STY> {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "tsMs")]
|
||||
ts_off_ms: VecDeque<u64>,
|
||||
#[serde(rename = "tsNs")]
|
||||
ts_off_ns: VecDeque<u64>,
|
||||
#[serde(rename = "pulseAnchor")]
|
||||
pulse_anchor: u64,
|
||||
#[serde(rename = "pulseOff")]
|
||||
pulse_off: VecDeque<u64>,
|
||||
#[serde(rename = "values")]
|
||||
values: VecDeque<STY>,
|
||||
#[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
|
||||
range_final: bool,
|
||||
#[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
|
||||
timed_out: bool,
|
||||
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
|
||||
continue_at: Option<IsoDateTime>,
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> EventsDim0CollectorOutput<STY> {
|
||||
pub fn ts_anchor_sec(&self) -> u64 {
|
||||
self.ts_anchor_sec
|
||||
}
|
||||
|
||||
pub fn ts_off_ms(&self) -> &VecDeque<u64> {
|
||||
&self.ts_off_ms
|
||||
}
|
||||
|
||||
pub fn pulse_anchor(&self) -> u64 {
|
||||
self.pulse_anchor
|
||||
}
|
||||
|
||||
pub fn pulse_off(&self) -> &VecDeque<u64> {
|
||||
&self.pulse_off
|
||||
}
|
||||
|
||||
/// Note: only used for unit tests.
|
||||
pub fn values_to_f32(&self) -> VecDeque<f32> {
|
||||
self.values.iter().map(|x| x.as_prim_f32_b()).collect()
|
||||
}
|
||||
|
||||
pub fn range_final(&self) -> bool {
|
||||
self.range_final
|
||||
}
|
||||
|
||||
pub fn timed_out(&self) -> bool {
|
||||
self.timed_out
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if self.ts_off_ms.len() != self.ts_off_ns.len() {
|
||||
false
|
||||
} else if self.ts_off_ms.len() != self.pulse_off.len() {
|
||||
false
|
||||
} else if self.ts_off_ms.len() != self.values.len() {
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
pub fn info_str(&self) -> String {
|
||||
use fmt::Write;
|
||||
let mut out = String::new();
|
||||
write!(
|
||||
out,
|
||||
"ts_off_ms {} ts_off_ns {} pulse_off {} values {}",
|
||||
self.ts_off_ms.len(),
|
||||
self.ts_off_ns.len(),
|
||||
self.pulse_off.len(),
|
||||
self.values.len(),
|
||||
)
|
||||
.unwrap();
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> AsAnyRef for EventsDim0CollectorOutput<STY>
|
||||
where
|
||||
STY: 'static,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> AsAnyMut for EventsDim0CollectorOutput<STY>
|
||||
where
|
||||
STY: 'static,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> TypeName for EventsDim0CollectorOutput<STY> {
|
||||
fn type_name(&self) -> String {
|
||||
any::type_name::<Self>().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> WithLen for EventsDim0CollectorOutput<STY> {
|
||||
fn len(&self) -> usize {
|
||||
self.values.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> ToJsonResult for EventsDim0CollectorOutput<STY> {
|
||||
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
|
||||
serde_json::to_value(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> CollectedDyn for EventsDim0CollectorOutput<STY> {}
|
||||
|
||||
impl<STY: ScalarOps> CollectorTy for EventsDim0Collector<STY> {
|
||||
type Input = EventsDim0<STY>;
|
||||
type Output = EventsDim0CollectorOutput<STY>;
|
||||
|
||||
fn ingest(&mut self, src: &mut Self::Input) {
|
||||
self.vals.tss.append(&mut src.tss);
|
||||
self.vals.pulses.append(&mut src.pulses);
|
||||
self.vals.values.append(&mut src.values);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_final = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
self.needs_continue_at = true;
|
||||
}
|
||||
|
||||
fn set_continue_at_here(&mut self) {
|
||||
self.needs_continue_at = true;
|
||||
}
|
||||
|
||||
fn result(
|
||||
&mut self,
|
||||
range: Option<SeriesRange>,
|
||||
_binrange: Option<BinnedRangeEnum>,
|
||||
) -> Result<Self::Output, Error> {
|
||||
debug!(
|
||||
"{} result() needs_continue_at {}",
|
||||
Self::self_name(),
|
||||
self.needs_continue_at
|
||||
);
|
||||
// If we timed out, we want to hint the client from where to continue.
|
||||
// This is tricky: currently, client can not request a left-exclusive range.
|
||||
// We currently give the timestamp of the last event plus a small delta.
|
||||
// The amount of the delta must take into account what kind of timestamp precision the client
|
||||
// can parse and handle.
|
||||
let vals = &mut self.vals;
|
||||
let continue_at = if self.needs_continue_at {
|
||||
if let Some(ts) = vals.tss.back() {
|
||||
let x = Some(IsoDateTime::from_ns_u64(*ts / MS * MS + MS));
|
||||
x
|
||||
} else {
|
||||
if let Some(range) = &range {
|
||||
match range {
|
||||
SeriesRange::TimeRange(x) => Some(IsoDateTime::from_ns_u64(x.beg + SEC)),
|
||||
SeriesRange::PulseRange(_) => {
|
||||
error!("TODO emit create continueAt for pulse range");
|
||||
Some(IsoDateTime::from_ns_u64(0))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Some(IsoDateTime::from_ns_u64(0))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let tss_sl = vals.tss.make_contiguous();
|
||||
let pulses_sl = vals.pulses.make_contiguous();
|
||||
let (ts_anchor_sec, ts_off_ms, ts_off_ns) = crate::ts_offs_from_abs(tss_sl);
|
||||
let (pulse_anchor, pulse_off) = crate::pulse_offs_from_abs(pulses_sl);
|
||||
let values = mem::replace(&mut vals.values, VecDeque::new());
|
||||
if ts_off_ms.len() != ts_off_ns.len() {
|
||||
return Err(Error::with_msg_no_trace("collected len mismatch"));
|
||||
}
|
||||
if ts_off_ms.len() != pulse_off.len() {
|
||||
return Err(Error::with_msg_no_trace("collected len mismatch"));
|
||||
}
|
||||
if ts_off_ms.len() != values.len() {
|
||||
return Err(Error::with_msg_no_trace("collected len mismatch"));
|
||||
}
|
||||
let ret = Self::Output {
|
||||
ts_anchor_sec,
|
||||
ts_off_ms,
|
||||
ts_off_ns,
|
||||
pulse_anchor,
|
||||
pulse_off,
|
||||
values,
|
||||
range_final: self.range_final,
|
||||
timed_out: self.timed_out,
|
||||
continue_at,
|
||||
};
|
||||
if !ret.is_valid() {
|
||||
error!("invalid:\n{}", ret.info_str());
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> items_0::collect_s::CollectableType for EventsDim0<STY> {
|
||||
type Collector = EventsDim0Collector<STY>;
|
||||
|
||||
fn new_collector() -> Self::Collector {
|
||||
Self::Collector::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EventsDim0Aggregator<STY> {
|
||||
range: SeriesRange,
|
||||
count: u64,
|
||||
minmaxlst: Option<(STY, STY, STY)>,
|
||||
sumc: u64,
|
||||
sum: f32,
|
||||
int_ts: u64,
|
||||
last_ts: u64,
|
||||
do_time_weight: bool,
|
||||
events_ignored_count: u64,
|
||||
items_seen: usize,
|
||||
}
|
||||
|
||||
impl<STY> Drop for EventsDim0Aggregator<STY> {
|
||||
fn drop(&mut self) {
|
||||
// TODO collect as stats for the request context:
|
||||
trace!("count {} ignored {}", self.count, self.events_ignored_count);
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> TypeName for EventsDim0<STY> {
|
||||
fn type_name(&self) -> String {
|
||||
let self_name = any::type_name::<Self>();
|
||||
format!("{self_name}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> EventsNonObj for EventsDim0<STY> {
|
||||
fn into_tss_pulses(self: Box<Self>) -> (VecDeque<u64>, VecDeque<u64>) {
|
||||
trace!(
|
||||
"{}::into_tss_pulses len {} len {}",
|
||||
Self::type_name(),
|
||||
self.tss.len(),
|
||||
self.pulses.len()
|
||||
);
|
||||
(self.tss, self.pulses)
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! try_to_container_events {
|
||||
($sty:ty, $this:expr) => {
|
||||
let this = $this;
|
||||
if let Some(evs) = this.as_any_ref().downcast_ref::<EventsDim0<$sty>>() {
|
||||
use crate::binning::container_events::ContainerEvents;
|
||||
let tss = this.tss.iter().map(|&x| TsNano::from_ns(x)).collect();
|
||||
let vals = evs.values.clone();
|
||||
let ret = ContainerEvents::<$sty>::from_constituents(tss, vals);
|
||||
return Box::new(ret);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> Events for EventsDim0<STY> {
|
||||
fn verify(&self) -> bool {
|
||||
let mut good = true;
|
||||
let n = self.tss.len();
|
||||
for (&ts1, &ts2) in self.tss.iter().zip(self.tss.range(n.min(1)..n)) {
|
||||
if ts1 > ts2 {
|
||||
good = false;
|
||||
error!("unordered event data ts1 {} ts2 {}", ts1, ts2);
|
||||
break;
|
||||
}
|
||||
}
|
||||
good
|
||||
}
|
||||
|
||||
fn output_info(&self) -> String {
|
||||
let n2 = self.tss.len().max(1) - 1;
|
||||
let min = if let Some(ts) = self.tss.get(0) {
|
||||
TsNano::from_ns(*ts).fmt().to_string()
|
||||
} else {
|
||||
String::from("None")
|
||||
};
|
||||
let max = if let Some(ts) = self.tss.get(n2) {
|
||||
TsNano::from_ns(*ts).fmt().to_string()
|
||||
} else {
|
||||
String::from("None")
|
||||
};
|
||||
format!(
|
||||
"EventsDim0OutputInfo {{ len {}, ts_min {}, ts_max {} }}",
|
||||
self.tss.len(),
|
||||
min,
|
||||
max,
|
||||
)
|
||||
}
|
||||
|
||||
fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn {
|
||||
self
|
||||
}
|
||||
|
||||
fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box<dyn Events> {
|
||||
// TODO improve the search
|
||||
let n1 = self.tss.iter().take_while(|&&x| x <= ts_end).count();
|
||||
let tss = self.tss.drain(..n1).collect();
|
||||
let pulses = self.pulses.drain(..n1).collect();
|
||||
let values = self.values.drain(..n1).collect();
|
||||
let ret = Self { tss, pulses, values };
|
||||
Box::new(ret)
|
||||
}
|
||||
|
||||
fn new_empty_evs(&self) -> Box<dyn Events> {
|
||||
Box::new(Self::empty())
|
||||
}
|
||||
|
||||
fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), MergeError> {
|
||||
// TODO as_any and as_any_mut are declared on unrelated traits. Simplify.
|
||||
if let Some(dst) = dst.as_any_mut().downcast_mut::<Self>() {
|
||||
// TODO make it harder to forget new members when the struct may get modified in the future
|
||||
let r = range.0..range.1;
|
||||
dst.tss.extend(self.tss.drain(r.clone()));
|
||||
dst.pulses.extend(self.pulses.drain(r.clone()));
|
||||
dst.values.extend(self.values.drain(r.clone()));
|
||||
Ok(())
|
||||
} else {
|
||||
error!(
|
||||
"downcast to EventsDim0 FAILED\n\n{}\n\n{}\n\n",
|
||||
self.type_name(),
|
||||
dst.type_name()
|
||||
);
|
||||
panic!();
|
||||
Err(MergeError::NotCompatible)
|
||||
}
|
||||
}
|
||||
|
||||
fn find_lowest_index_gt_evs(&self, ts: u64) -> Option<usize> {
|
||||
for (i, &m) in self.tss.iter().enumerate() {
|
||||
if m > ts {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn find_lowest_index_ge_evs(&self, ts: u64) -> Option<usize> {
|
||||
for (i, &m) in self.tss.iter().enumerate() {
|
||||
if m >= ts {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn find_highest_index_lt_evs(&self, ts: u64) -> Option<usize> {
|
||||
for (i, &m) in self.tss.iter().enumerate().rev() {
|
||||
if m < ts {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn ts_min(&self) -> Option<u64> {
|
||||
self.tss.front().map(|&x| x)
|
||||
}
|
||||
|
||||
fn ts_max(&self) -> Option<u64> {
|
||||
self.tss.back().map(|&x| x)
|
||||
}
|
||||
|
||||
fn partial_eq_dyn(&self, other: &dyn Events) -> bool {
|
||||
if let Some(other) = other.as_any_ref().downcast_ref::<Self>() {
|
||||
self == other
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn serde_id(&self) -> &'static str {
|
||||
Self::serde_id()
|
||||
}
|
||||
|
||||
fn nty_id(&self) -> u32 {
|
||||
STY::SUB
|
||||
}
|
||||
|
||||
fn clone_dyn(&self) -> Box<dyn Events> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn tss(&self) -> &VecDeque<u64> {
|
||||
&self.tss
|
||||
}
|
||||
|
||||
fn pulses(&self) -> &VecDeque<u64> {
|
||||
&self.pulses
|
||||
}
|
||||
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
error!("TODO frame_type_id should not be called");
|
||||
// TODO make more nice
|
||||
panic!()
|
||||
}
|
||||
|
||||
fn to_min_max_avg(&mut self) -> Box<dyn Events> {
|
||||
let dst = Self {
|
||||
tss: mem::replace(&mut self.tss, Default::default()),
|
||||
pulses: mem::replace(&mut self.pulses, Default::default()),
|
||||
values: mem::replace(&mut self.values, Default::default()),
|
||||
};
|
||||
Box::new(dst)
|
||||
}
|
||||
|
||||
fn to_json_string(&self) -> String {
|
||||
// TODO redesign with mut access, rename to `into_` and take the values out.
|
||||
let mut tss = self.tss.clone();
|
||||
let mut pulses = self.pulses.clone();
|
||||
let mut values = self.values.clone();
|
||||
let tss_sl = tss.make_contiguous();
|
||||
let pulses_sl = pulses.make_contiguous();
|
||||
let (ts_anchor_sec, ts_off_ms, ts_off_ns) = crate::ts_offs_from_abs(tss_sl);
|
||||
let (pulse_anchor, pulse_off) = crate::pulse_offs_from_abs(pulses_sl);
|
||||
let values = mem::replace(&mut values, VecDeque::new());
|
||||
let ret = EventsDim0CollectorOutput {
|
||||
ts_anchor_sec,
|
||||
ts_off_ms,
|
||||
ts_off_ns,
|
||||
pulse_anchor,
|
||||
pulse_off,
|
||||
values,
|
||||
range_final: false,
|
||||
timed_out: false,
|
||||
continue_at: None,
|
||||
};
|
||||
serde_json::to_string(&ret).unwrap()
|
||||
}
|
||||
|
||||
fn to_json_vec_u8(&self) -> Vec<u8> {
|
||||
self.to_json_string().into_bytes()
|
||||
}
|
||||
|
||||
fn to_cbor_vec_u8(&self) -> Vec<u8> {
|
||||
// TODO redesign with mut access, rename to `into_` and take the values out.
|
||||
let ret = EventsDim0ChunkOutput {
|
||||
// TODO use &mut to swap the content
|
||||
tss: self.tss.clone(),
|
||||
pulses: self.pulses.clone(),
|
||||
values: self.values.clone(),
|
||||
scalar_type: STY::scalar_type_name().into(),
|
||||
};
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(&ret, &mut buf).unwrap();
|
||||
buf
|
||||
}
|
||||
|
||||
fn clear(&mut self) {
|
||||
self.tss.clear();
|
||||
self.pulses.clear();
|
||||
self.values.clear();
|
||||
}
|
||||
|
||||
fn to_dim0_f32_for_binning(&self) -> Box<dyn Events> {
|
||||
let mut ret = EventsDim0::empty();
|
||||
for (&ts, val) in self.tss.iter().zip(self.values.iter()) {
|
||||
ret.push(ts, 0, val.as_prim_f32_b());
|
||||
}
|
||||
Box::new(ret)
|
||||
}
|
||||
|
||||
fn to_container_events(&self) -> Box<dyn ::items_0::timebin::BinningggContainerEventsDyn> {
|
||||
try_to_container_events!(u8, self);
|
||||
try_to_container_events!(u16, self);
|
||||
try_to_container_events!(u32, self);
|
||||
try_to_container_events!(u64, self);
|
||||
try_to_container_events!(i8, self);
|
||||
try_to_container_events!(i16, self);
|
||||
try_to_container_events!(i32, self);
|
||||
try_to_container_events!(i64, self);
|
||||
try_to_container_events!(f32, self);
|
||||
try_to_container_events!(f64, self);
|
||||
try_to_container_events!(bool, self);
|
||||
try_to_container_events!(String, self);
|
||||
let this = self;
|
||||
if let Some(evs) = self.as_any_ref().downcast_ref::<EventsDim0<netpod::EnumVariant>>() {
|
||||
use crate::binning::container_events::ContainerEvents;
|
||||
let tss = this.tss.iter().map(|&x| TsNano::from_ns(x)).collect();
|
||||
use crate::binning::container_events::Container;
|
||||
let mut vals = crate::binning::valuetype::EnumVariantContainer::new();
|
||||
for x in evs.values.iter() {
|
||||
vals.push_back(x.clone());
|
||||
}
|
||||
let ret = ContainerEvents::<netpod::EnumVariant>::from_constituents(tss, vals);
|
||||
return Box::new(ret);
|
||||
}
|
||||
let styn = any::type_name::<STY>();
|
||||
todo!("TODO to_container_events for {styn}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> Appendable<STY> for EventsDim0<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
fn push(&mut self, ts: u64, pulse: u64, value: STY) {
|
||||
self.tss.push_back(ts);
|
||||
self.pulses.push_back(pulse);
|
||||
self.values.push_back(value);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_frame {
|
||||
use super::*;
|
||||
use crate::channelevents::ChannelEvents;
|
||||
use crate::framable::Framable;
|
||||
use crate::framable::INMEM_FRAME_ENCID;
|
||||
use crate::frame::decode_frame;
|
||||
use crate::inmem::InMemoryFrame;
|
||||
use items_0::streamitem::RangeCompletableItem;
|
||||
use items_0::streamitem::Sitemty;
|
||||
use items_0::streamitem::StreamItem;
|
||||
|
||||
#[test]
|
||||
fn events_serialize() {
|
||||
// taskrun::tracing_init_testing().unwrap();
|
||||
let mut events = EventsDim0::empty();
|
||||
events.push(123, 234, 55f32);
|
||||
let events = events;
|
||||
let events: Box<dyn Events> = Box::new(events);
|
||||
let item = ChannelEvents::Events(events);
|
||||
let item = Ok::<_, Error>(StreamItem::DataItem(RangeCompletableItem::Data(item)));
|
||||
let mut buf = item.make_frame_dyn().unwrap();
|
||||
let s = String::from_utf8_lossy(&buf[20..buf.len() - 4]);
|
||||
eprintln!("[[{s}]]");
|
||||
let buflen = buf.len();
|
||||
let frame = InMemoryFrame {
|
||||
encid: INMEM_FRAME_ENCID,
|
||||
tyid: 0x2500,
|
||||
len: (buflen - 24) as _,
|
||||
buf: buf.split_off(20).split_to(buflen - 20 - 4).freeze(),
|
||||
};
|
||||
let item: Sitemty<ChannelEvents> = decode_frame(&frame).unwrap();
|
||||
let item = if let Ok(x) = item { x } else { panic!() };
|
||||
let item = if let StreamItem::DataItem(x) = item {
|
||||
x
|
||||
} else {
|
||||
panic!()
|
||||
};
|
||||
let item = if let RangeCompletableItem::Data(x) = item {
|
||||
x
|
||||
} else {
|
||||
panic!()
|
||||
};
|
||||
let mut item = if let ChannelEvents::Events(x) = item {
|
||||
x
|
||||
} else {
|
||||
panic!()
|
||||
};
|
||||
let item = if let Some(item) = item.as_any_mut().downcast_mut::<EventsDim0<f32>>() {
|
||||
item
|
||||
} else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(item.tss(), &[123]);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_serde_opt {
|
||||
use super::*;
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct A {
|
||||
a: Option<String>,
|
||||
#[serde(default)]
|
||||
b: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
c: Option<String>,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_a() {
|
||||
let s = serde_json::to_string(&A {
|
||||
a: None,
|
||||
b: None,
|
||||
c: None,
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(s, r#"{"a":null,"b":null}"#);
|
||||
}
|
||||
}
|
||||
469
src/eventsdim0enum.rs
Normal file
469
src/eventsdim0enum.rs
Normal file
@@ -0,0 +1,469 @@
|
||||
use daqbuf_err as err;
|
||||
use err::Error;
|
||||
use items_0::collect_s::CollectableDyn;
|
||||
use items_0::collect_s::CollectedDyn;
|
||||
use items_0::collect_s::CollectorDyn;
|
||||
use items_0::collect_s::CollectorTy;
|
||||
use items_0::collect_s::ToJsonBytes;
|
||||
use items_0::collect_s::ToJsonResult;
|
||||
use items_0::container::ByteEstimate;
|
||||
use items_0::isodate::IsoDateTime;
|
||||
use items_0::scalar_ops::ScalarOps;
|
||||
use items_0::timebin::TimeBinnableTy;
|
||||
use items_0::timebin::TimeBinnerTy;
|
||||
use items_0::AsAnyMut;
|
||||
use items_0::AsAnyRef;
|
||||
use items_0::Events;
|
||||
use items_0::EventsNonObj;
|
||||
use items_0::TypeName;
|
||||
use items_0::WithLen;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::SeriesRange;
|
||||
use netpod::timeunits::MS;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::BinnedRangeEnum;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::any;
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::mem;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_collect_result {
|
||||
($($arg:tt)*) => {
|
||||
if false {
|
||||
trace!($($arg)*);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EventsDim0EnumCollector {
|
||||
vals: EventsDim0Enum,
|
||||
range_final: bool,
|
||||
timed_out: bool,
|
||||
needs_continue_at: bool,
|
||||
}
|
||||
|
||||
impl EventsDim0EnumCollector {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
vals: EventsDim0Enum::new(),
|
||||
range_final: false,
|
||||
timed_out: false,
|
||||
needs_continue_at: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TypeName for EventsDim0EnumCollector {
|
||||
fn type_name(&self) -> String {
|
||||
"EventsDim0EnumCollector".into()
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for EventsDim0EnumCollector {
|
||||
fn len(&self) -> usize {
|
||||
self.vals.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl ByteEstimate for EventsDim0EnumCollector {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
// TODO does it need to be more accurate?
|
||||
30 * self.len() as u64
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct EventsDim0EnumCollectorOutput {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "tsMs")]
|
||||
ts_off_ms: VecDeque<u64>,
|
||||
#[serde(rename = "tsNs")]
|
||||
ts_off_ns: VecDeque<u64>,
|
||||
#[serde(rename = "values")]
|
||||
vals: VecDeque<u16>,
|
||||
#[serde(rename = "valuestrings")]
|
||||
valstrs: VecDeque<String>,
|
||||
#[serde(rename = "rangeFinal", default, skip_serializing_if = "netpod::is_false")]
|
||||
range_final: bool,
|
||||
#[serde(rename = "timedOut", default, skip_serializing_if = "netpod::is_false")]
|
||||
timed_out: bool,
|
||||
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
|
||||
continue_at: Option<IsoDateTime>,
|
||||
}
|
||||
|
||||
impl WithLen for EventsDim0EnumCollectorOutput {
|
||||
fn len(&self) -> usize {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsAnyRef for EventsDim0EnumCollectorOutput {
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsAnyMut for EventsDim0EnumCollectorOutput {
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl TypeName for EventsDim0EnumCollectorOutput {
|
||||
fn type_name(&self) -> String {
|
||||
any::type_name::<Self>().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl ToJsonResult for EventsDim0EnumCollectorOutput {
|
||||
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl CollectedDyn for EventsDim0EnumCollectorOutput {}
|
||||
|
||||
impl CollectorTy for EventsDim0EnumCollector {
|
||||
type Input = EventsDim0Enum;
|
||||
type Output = EventsDim0EnumCollectorOutput;
|
||||
|
||||
fn ingest(&mut self, src: &mut EventsDim0Enum) {
|
||||
self.vals.tss.append(&mut src.tss);
|
||||
self.vals.values.append(&mut src.values);
|
||||
self.vals.valuestrs.append(&mut src.valuestrs);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_final = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
self.needs_continue_at = true;
|
||||
}
|
||||
|
||||
fn set_continue_at_here(&mut self) {
|
||||
self.needs_continue_at = true;
|
||||
}
|
||||
|
||||
fn result(
|
||||
&mut self,
|
||||
range: Option<SeriesRange>,
|
||||
binrange: Option<BinnedRangeEnum>,
|
||||
) -> Result<EventsDim0EnumCollectorOutput, Error> {
|
||||
trace_collect_result!(
|
||||
"{} result() needs_continue_at {}",
|
||||
self.type_name(),
|
||||
self.needs_continue_at
|
||||
);
|
||||
// If we timed out, we want to hint the client from where to continue.
|
||||
// This is tricky: currently, client can not request a left-exclusive range.
|
||||
// We currently give the timestamp of the last event plus a small delta.
|
||||
// The amount of the delta must take into account what kind of timestamp precision the client
|
||||
// can parse and handle.
|
||||
let vals = &mut self.vals;
|
||||
let continue_at = if self.needs_continue_at {
|
||||
if let Some(ts) = vals.tss.back() {
|
||||
let x = Some(IsoDateTime::from_ns_u64(*ts / MS * MS + MS));
|
||||
x
|
||||
} else {
|
||||
if let Some(range) = &range {
|
||||
match range {
|
||||
SeriesRange::TimeRange(x) => Some(IsoDateTime::from_ns_u64(x.beg + SEC)),
|
||||
SeriesRange::PulseRange(_) => {
|
||||
error!("TODO emit create continueAt for pulse range");
|
||||
Some(IsoDateTime::from_ns_u64(0))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Some(IsoDateTime::from_ns_u64(0))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let tss_sl = vals.tss.make_contiguous();
|
||||
let (ts_anchor_sec, ts_off_ms, ts_off_ns) = crate::ts_offs_from_abs(tss_sl);
|
||||
let valixs = mem::replace(&mut vals.values, VecDeque::new());
|
||||
let valstrs = mem::replace(&mut vals.valuestrs, VecDeque::new());
|
||||
let vals = valixs;
|
||||
if ts_off_ms.len() != ts_off_ns.len() {
|
||||
return Err(Error::with_msg_no_trace("collected len mismatch"));
|
||||
}
|
||||
if ts_off_ms.len() != vals.len() {
|
||||
return Err(Error::with_msg_no_trace("collected len mismatch"));
|
||||
}
|
||||
if ts_off_ms.len() != valstrs.len() {
|
||||
return Err(Error::with_msg_no_trace("collected len mismatch"));
|
||||
}
|
||||
let ret = Self::Output {
|
||||
ts_anchor_sec,
|
||||
ts_off_ms,
|
||||
ts_off_ns,
|
||||
vals,
|
||||
valstrs,
|
||||
range_final: self.range_final,
|
||||
timed_out: self.timed_out,
|
||||
continue_at,
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
// Experiment with having this special case for enums
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct EventsDim0Enum {
|
||||
pub tss: VecDeque<u64>,
|
||||
pub values: VecDeque<u16>,
|
||||
pub valuestrs: VecDeque<String>,
|
||||
}
|
||||
|
||||
impl EventsDim0Enum {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
tss: VecDeque::new(),
|
||||
values: VecDeque::new(),
|
||||
valuestrs: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_back(&mut self, ts: u64, value: u16, valuestr: String) {
|
||||
self.tss.push_back(ts);
|
||||
self.values.push_back(value);
|
||||
self.valuestrs.push_back(valuestr);
|
||||
}
|
||||
}
|
||||
|
||||
impl TypeName for EventsDim0Enum {
|
||||
fn type_name(&self) -> String {
|
||||
"EventsDim0Enum".into()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsAnyRef for EventsDim0Enum {
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl AsAnyMut for EventsDim0Enum {
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for EventsDim0Enum {
|
||||
fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl CollectableDyn for EventsDim0Enum {
|
||||
fn new_collector(&self) -> Box<dyn CollectorDyn> {
|
||||
Box::new(EventsDim0EnumCollector::new())
|
||||
}
|
||||
}
|
||||
|
||||
// impl Events
|
||||
|
||||
impl ByteEstimate for EventsDim0Enum {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl EventsNonObj for EventsDim0Enum {
|
||||
fn into_tss_pulses(self: Box<Self>) -> (VecDeque<u64>, VecDeque<u64>) {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE just a dummy because currently we don't use this for time binning
|
||||
#[derive(Debug)]
|
||||
pub struct EventsDim0EnumTimeBinner;
|
||||
|
||||
impl TimeBinnerTy for EventsDim0EnumTimeBinner {
|
||||
type Input = EventsDim0Enum;
|
||||
type Output = ();
|
||||
|
||||
fn ingest(&mut self, item: &mut Self::Input) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn bins_ready_count(&self) -> usize {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn bins_ready(&mut self) -> Option<Self::Output> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn push_in_progress(&mut self, push_empty: bool) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn cycle(&mut self) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn empty(&self) -> Option<Self::Output> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn append_empty_until_end(&mut self) {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE just a dummy because currently we don't use this for time binning
|
||||
impl TimeBinnableTy for EventsDim0Enum {
|
||||
type TimeBinner = EventsDim0EnumTimeBinner;
|
||||
|
||||
fn time_binner_new(
|
||||
&self,
|
||||
binrange: BinnedRangeEnum,
|
||||
do_time_weight: bool,
|
||||
emit_empty_bins: bool,
|
||||
) -> Self::TimeBinner {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE just a dummy because currently we don't use this for time binning
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct EventsDim0EnumChunkOutput {
|
||||
tss: VecDeque<u64>,
|
||||
values: VecDeque<u16>,
|
||||
valuestrings: VecDeque<String>,
|
||||
scalar_type: String,
|
||||
}
|
||||
|
||||
impl Events for EventsDim0Enum {
|
||||
fn verify(&self) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn output_info(&self) -> String {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ts_min(&self) -> Option<u64> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ts_max(&self) -> Option<u64> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box<dyn Events> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn new_empty_evs(&self) -> Box<dyn Events> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), items_0::MergeError> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn find_lowest_index_gt_evs(&self, ts: u64) -> Option<usize> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn find_lowest_index_ge_evs(&self, ts: u64) -> Option<usize> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn find_highest_index_lt_evs(&self, ts: u64) -> Option<usize> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn clone_dyn(&self) -> Box<dyn Events> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn partial_eq_dyn(&self, other: &dyn Events) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn serde_id(&self) -> &'static str {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn nty_id(&self) -> u32 {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn tss(&self) -> &VecDeque<u64> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn pulses(&self) -> &VecDeque<u64> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn to_min_max_avg(&mut self) -> Box<dyn Events> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn to_json_string(&self) -> String {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn to_json_vec_u8(&self) -> Vec<u8> {
|
||||
self.to_json_string().into_bytes()
|
||||
}
|
||||
|
||||
fn to_cbor_vec_u8(&self) -> Vec<u8> {
|
||||
// TODO redesign with mut access, rename to `into_` and take the values out.
|
||||
let ret = EventsDim0EnumChunkOutput {
|
||||
// TODO use &mut to swap the content
|
||||
tss: self.tss.clone(),
|
||||
values: self.values.clone(),
|
||||
valuestrings: self.valuestrs.clone(),
|
||||
scalar_type: netpod::EnumVariant::scalar_type_name().into(),
|
||||
};
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(&ret, &mut buf).unwrap();
|
||||
buf
|
||||
}
|
||||
|
||||
fn clear(&mut self) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn to_dim0_f32_for_binning(&self) -> Box<dyn Events> {
|
||||
todo!("{}::to_dim0_f32_for_binning", self.type_name())
|
||||
}
|
||||
|
||||
fn to_container_events(&self) -> Box<dyn ::items_0::timebin::BinningggContainerEventsDyn> {
|
||||
todo!("{}::to_container_events", self.type_name())
|
||||
}
|
||||
}
|
||||
691
src/eventsdim1.rs
Normal file
691
src/eventsdim1.rs
Normal file
@@ -0,0 +1,691 @@
|
||||
use crate::binsdim0::BinsDim0;
|
||||
use crate::eventsxbindim0::EventsXbinDim0;
|
||||
use crate::IsoDateTime;
|
||||
use daqbuf_err as err;
|
||||
use err::Error;
|
||||
use items_0::collect_s::CollectableDyn;
|
||||
use items_0::collect_s::CollectableType;
|
||||
use items_0::collect_s::CollectedDyn;
|
||||
use items_0::collect_s::CollectorTy;
|
||||
use items_0::collect_s::ToJsonResult;
|
||||
use items_0::container::ByteEstimate;
|
||||
use items_0::overlap::HasTimestampDeque;
|
||||
use items_0::scalar_ops::ScalarOps;
|
||||
use items_0::Appendable;
|
||||
use items_0::AsAnyMut;
|
||||
use items_0::AsAnyRef;
|
||||
use items_0::Empty;
|
||||
use items_0::Events;
|
||||
use items_0::EventsNonObj;
|
||||
use items_0::MergeError;
|
||||
use items_0::TypeName;
|
||||
use items_0::WithLen;
|
||||
use netpod::is_false;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::SeriesRange;
|
||||
use netpod::timeunits::MS;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::BinnedRangeEnum;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::any;
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace2 {
|
||||
(EN$($arg:tt)*) => ();
|
||||
($($arg:tt)*) => (trace!($($arg)*));
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct EventsDim1NoPulse<STY> {
|
||||
pub tss: VecDeque<u64>,
|
||||
pub values: VecDeque<Vec<STY>>,
|
||||
}
|
||||
|
||||
impl<STY> From<EventsDim1NoPulse<STY>> for EventsDim1<STY> {
|
||||
fn from(value: EventsDim1NoPulse<STY>) -> Self {
|
||||
let pulses = vec![0; value.tss.len()].into();
|
||||
Self {
|
||||
tss: value.tss,
|
||||
pulses,
|
||||
values: value.values,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct EventsDim1<STY> {
|
||||
pub tss: VecDeque<u64>,
|
||||
pub pulses: VecDeque<u64>,
|
||||
pub values: VecDeque<Vec<STY>>,
|
||||
}
|
||||
|
||||
impl<STY> EventsDim1<STY> {
|
||||
#[inline(always)]
|
||||
pub fn push(&mut self, ts: u64, pulse: u64, value: Vec<STY>) {
|
||||
self.tss.push_back(ts);
|
||||
self.pulses.push_back(pulse);
|
||||
self.values.push_back(value);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn push_front(&mut self, ts: u64, pulse: u64, value: Vec<STY>) {
|
||||
self.tss.push_front(ts);
|
||||
self.pulses.push_front(pulse);
|
||||
self.values.push_front(value);
|
||||
}
|
||||
|
||||
pub fn serde_id() -> &'static str {
|
||||
"EventsDim1"
|
||||
}
|
||||
|
||||
pub fn tss(&self) -> &VecDeque<u64> {
|
||||
&self.tss
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> AsAnyRef for EventsDim1<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> AsAnyMut for EventsDim1<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> Empty for EventsDim1<STY> {
|
||||
fn empty() -> Self {
|
||||
Self {
|
||||
tss: VecDeque::new(),
|
||||
pulses: VecDeque::new(),
|
||||
values: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> fmt::Debug for EventsDim1<STY>
|
||||
where
|
||||
STY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
if false {
|
||||
write!(
|
||||
fmt,
|
||||
"EventsDim1 {{ count {} ts {:?} vals {:?} }}",
|
||||
self.tss.len(),
|
||||
self.tss.iter().map(|x| x / SEC).collect::<Vec<_>>(),
|
||||
self.values,
|
||||
)
|
||||
} else {
|
||||
write!(
|
||||
fmt,
|
||||
"EventsDim1 {{ count {} ts {:?} .. {:?} vals {:?} .. {:?} }}",
|
||||
self.tss.len(),
|
||||
self.tss.front().map(|x| x / SEC),
|
||||
self.tss.back().map(|x| x / SEC),
|
||||
self.values.front(),
|
||||
self.values.back(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> WithLen for EventsDim1<STY> {
|
||||
fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> ByteEstimate for EventsDim1<STY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
let stylen = mem::size_of::<STY>();
|
||||
let n = self.values.front().map_or(0, Vec::len);
|
||||
(self.len() * (8 + 8 + n * stylen)) as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> HasTimestampDeque for EventsDim1<STY> {
|
||||
fn timestamp_min(&self) -> Option<u64> {
|
||||
self.tss.front().map(|x| *x)
|
||||
}
|
||||
|
||||
fn timestamp_max(&self) -> Option<u64> {
|
||||
self.tss.back().map(|x| *x)
|
||||
}
|
||||
|
||||
fn pulse_min(&self) -> Option<u64> {
|
||||
self.pulses.front().map(|x| *x)
|
||||
}
|
||||
|
||||
fn pulse_max(&self) -> Option<u64> {
|
||||
self.pulses.back().map(|x| *x)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct EventsDim1ChunkOutput<STY> {
|
||||
tss: VecDeque<u64>,
|
||||
pulses: VecDeque<u64>,
|
||||
values: VecDeque<Vec<STY>>,
|
||||
scalar_type: String,
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> EventsDim1ChunkOutput<STY> {}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EventsDim1Collector<STY> {
|
||||
vals: EventsDim1<STY>,
|
||||
range_final: bool,
|
||||
timed_out: bool,
|
||||
needs_continue_at: bool,
|
||||
}
|
||||
|
||||
impl<STY> EventsDim1Collector<STY> {
|
||||
pub fn self_name() -> &'static str {
|
||||
any::type_name::<Self>()
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
vals: EventsDim1::empty(),
|
||||
range_final: false,
|
||||
timed_out: false,
|
||||
needs_continue_at: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> WithLen for EventsDim1Collector<STY> {
|
||||
fn len(&self) -> usize {
|
||||
WithLen::len(&self.vals)
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> ByteEstimate for EventsDim1Collector<STY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
ByteEstimate::byte_estimate(&self.vals)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct EventsDim1CollectorOutput<STY> {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "tsMs")]
|
||||
ts_off_ms: VecDeque<u64>,
|
||||
#[serde(rename = "tsNs")]
|
||||
ts_off_ns: VecDeque<u64>,
|
||||
#[serde(rename = "pulseAnchor")]
|
||||
pulse_anchor: u64,
|
||||
#[serde(rename = "pulseOff")]
|
||||
pulse_off: VecDeque<u64>,
|
||||
#[serde(rename = "values")]
|
||||
values: VecDeque<Vec<STY>>,
|
||||
#[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
|
||||
range_final: bool,
|
||||
#[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
|
||||
timed_out: bool,
|
||||
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
|
||||
continue_at: Option<IsoDateTime>,
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> EventsDim1CollectorOutput<STY> {
|
||||
pub fn ts_anchor_sec(&self) -> u64 {
|
||||
self.ts_anchor_sec
|
||||
}
|
||||
|
||||
pub fn ts_off_ms(&self) -> &VecDeque<u64> {
|
||||
&self.ts_off_ms
|
||||
}
|
||||
|
||||
pub fn pulse_anchor(&self) -> u64 {
|
||||
self.pulse_anchor
|
||||
}
|
||||
|
||||
pub fn pulse_off(&self) -> &VecDeque<u64> {
|
||||
&self.pulse_off
|
||||
}
|
||||
|
||||
/// Note: only used for unit tests.
|
||||
pub fn values_to_f32(&self) -> VecDeque<Vec<f32>> {
|
||||
self.values
|
||||
.iter()
|
||||
.map(|x| x.iter().map(|x| x.as_prim_f32_b()).collect())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn range_final(&self) -> bool {
|
||||
self.range_final
|
||||
}
|
||||
|
||||
pub fn timed_out(&self) -> bool {
|
||||
self.timed_out
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if self.ts_off_ms.len() != self.ts_off_ns.len() {
|
||||
false
|
||||
} else if self.ts_off_ms.len() != self.pulse_off.len() {
|
||||
false
|
||||
} else if self.ts_off_ms.len() != self.values.len() {
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
pub fn info_str(&self) -> String {
|
||||
use fmt::Write;
|
||||
let mut out = String::new();
|
||||
write!(
|
||||
out,
|
||||
"ts_off_ms {} ts_off_ns {} pulse_off {} values {}",
|
||||
self.ts_off_ms.len(),
|
||||
self.ts_off_ns.len(),
|
||||
self.pulse_off.len(),
|
||||
self.values.len(),
|
||||
)
|
||||
.unwrap();
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> AsAnyRef for EventsDim1CollectorOutput<STY>
|
||||
where
|
||||
STY: 'static,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> AsAnyMut for EventsDim1CollectorOutput<STY>
|
||||
where
|
||||
STY: 'static,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> TypeName for EventsDim1CollectorOutput<STY> {
|
||||
fn type_name(&self) -> String {
|
||||
any::type_name::<Self>().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> WithLen for EventsDim1CollectorOutput<STY> {
|
||||
fn len(&self) -> usize {
|
||||
self.values.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> ToJsonResult for EventsDim1CollectorOutput<STY> {
|
||||
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
|
||||
serde_json::to_value(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> CollectedDyn for EventsDim1CollectorOutput<STY> {}
|
||||
|
||||
impl<STY: ScalarOps> CollectorTy for EventsDim1Collector<STY> {
|
||||
type Input = EventsDim1<STY>;
|
||||
type Output = EventsDim1CollectorOutput<STY>;
|
||||
|
||||
fn ingest(&mut self, src: &mut Self::Input) {
|
||||
self.vals.tss.append(&mut src.tss);
|
||||
self.vals.pulses.append(&mut src.pulses);
|
||||
self.vals.values.append(&mut src.values);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_final = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn set_continue_at_here(&mut self) {
|
||||
debug!("{}::set_continue_at_here", Self::self_name());
|
||||
self.needs_continue_at = true;
|
||||
}
|
||||
|
||||
// TODO unify with dim0 case
|
||||
fn result(
|
||||
&mut self,
|
||||
range: Option<SeriesRange>,
|
||||
_binrange: Option<BinnedRangeEnum>,
|
||||
) -> Result<Self::Output, Error> {
|
||||
// If we timed out, we want to hint the client from where to continue.
|
||||
// This is tricky: currently, client can not request a left-exclusive range.
|
||||
// We currently give the timestamp of the last event plus a small delta.
|
||||
// The amount of the delta must take into account what kind of timestamp precision the client
|
||||
// can parse and handle.
|
||||
let vals = &mut self.vals;
|
||||
let continue_at = if self.timed_out {
|
||||
if let Some(ts) = vals.tss.back() {
|
||||
Some(IsoDateTime::from_ns_u64(*ts + MS))
|
||||
} else {
|
||||
if let Some(range) = &range {
|
||||
match range {
|
||||
SeriesRange::TimeRange(x) => Some(IsoDateTime::from_ns_u64(x.beg + SEC)),
|
||||
SeriesRange::PulseRange(_) => {
|
||||
error!("TODO emit create continueAt for pulse range");
|
||||
Some(IsoDateTime::from_ns_u64(0))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("can not determine continue-at parameters");
|
||||
Some(IsoDateTime::from_ns_u64(0))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let tss_sl = vals.tss.make_contiguous();
|
||||
let pulses_sl = vals.pulses.make_contiguous();
|
||||
let (ts_anchor_sec, ts_off_ms, ts_off_ns) = crate::ts_offs_from_abs(tss_sl);
|
||||
let (pulse_anchor, pulse_off) = crate::pulse_offs_from_abs(pulses_sl);
|
||||
let values = mem::replace(&mut vals.values, VecDeque::new());
|
||||
if ts_off_ms.len() != ts_off_ns.len() {
|
||||
return Err(Error::with_msg_no_trace("collected len mismatch"));
|
||||
}
|
||||
if ts_off_ms.len() != pulse_off.len() {
|
||||
return Err(Error::with_msg_no_trace("collected len mismatch"));
|
||||
}
|
||||
if ts_off_ms.len() != values.len() {
|
||||
return Err(Error::with_msg_no_trace("collected len mismatch"));
|
||||
}
|
||||
let ret = Self::Output {
|
||||
ts_anchor_sec,
|
||||
ts_off_ms,
|
||||
ts_off_ns,
|
||||
pulse_anchor,
|
||||
pulse_off,
|
||||
values,
|
||||
range_final: self.range_final,
|
||||
timed_out: self.timed_out,
|
||||
continue_at,
|
||||
};
|
||||
if !ret.is_valid() {
|
||||
error!("invalid:\n{}", ret.info_str());
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> CollectableType for EventsDim1<STY> {
|
||||
type Collector = EventsDim1Collector<STY>;
|
||||
|
||||
fn new_collector() -> Self::Collector {
|
||||
Self::Collector::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EventsDim1Aggregator<STY> {
|
||||
_last_seen_val: Option<STY>,
|
||||
events_taken_count: u64,
|
||||
events_ignored_count: u64,
|
||||
}
|
||||
|
||||
impl<STY> Drop for EventsDim1Aggregator<STY> {
|
||||
fn drop(&mut self) {
|
||||
// TODO collect as stats for the request context:
|
||||
trace!(
|
||||
"taken {} ignored {}",
|
||||
self.events_taken_count,
|
||||
self.events_ignored_count
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> EventsDim1Aggregator<STY> {
|
||||
pub fn new(_range: SeriesRange, _do_time_weight: bool) -> Self {
|
||||
panic!("TODO remove")
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> items_0::TypeName for EventsDim1<STY> {
|
||||
fn type_name(&self) -> String {
|
||||
let sty = std::any::type_name::<STY>();
|
||||
format!("EventsDim1<{sty}>")
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> EventsNonObj for EventsDim1<STY> {
|
||||
fn into_tss_pulses(self: Box<Self>) -> (VecDeque<u64>, VecDeque<u64>) {
|
||||
panic!("TODO remove")
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> Events for EventsDim1<STY> {
|
||||
fn verify(&self) -> bool {
|
||||
let mut good = true;
|
||||
let mut ts_max = 0;
|
||||
for ts in &self.tss {
|
||||
let ts = *ts;
|
||||
if ts < ts_max {
|
||||
good = false;
|
||||
error!("unordered event data ts {} ts_max {}", ts, ts_max);
|
||||
}
|
||||
ts_max = ts_max.max(ts);
|
||||
}
|
||||
good
|
||||
}
|
||||
|
||||
fn output_info(&self) -> String {
|
||||
let n2 = self.tss.len().max(1) - 1;
|
||||
format!(
|
||||
"EventsDim1OutputInfo {{ len {}, ts_min {}, ts_max {} }}",
|
||||
self.tss.len(),
|
||||
self.tss.get(0).map_or(-1i64, |&x| x as i64),
|
||||
self.tss.get(n2).map_or(-1i64, |&x| x as i64),
|
||||
)
|
||||
}
|
||||
|
||||
fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn {
|
||||
self
|
||||
}
|
||||
|
||||
fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box<dyn Events> {
|
||||
// TODO improve the search
|
||||
let n1 = self.tss.iter().take_while(|&&x| x <= ts_end).count();
|
||||
let tss = self.tss.drain(..n1).collect();
|
||||
let pulses = self.pulses.drain(..n1).collect();
|
||||
let values = self.values.drain(..n1).collect();
|
||||
let ret = Self { tss, pulses, values };
|
||||
Box::new(ret)
|
||||
}
|
||||
|
||||
fn new_empty_evs(&self) -> Box<dyn Events> {
|
||||
Box::new(Self::empty())
|
||||
}
|
||||
|
||||
fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), MergeError> {
|
||||
// TODO as_any and as_any_mut are declared on unrelated traits. Simplify.
|
||||
if let Some(dst) = dst.as_any_mut().downcast_mut::<Self>() {
|
||||
// TODO make it harder to forget new members when the struct may get modified in the future
|
||||
let r = range.0..range.1;
|
||||
dst.tss.extend(self.tss.drain(r.clone()));
|
||||
dst.pulses.extend(self.pulses.drain(r.clone()));
|
||||
dst.values.extend(self.values.drain(r.clone()));
|
||||
Ok(())
|
||||
} else {
|
||||
error!("downcast to EventsDim0 FAILED");
|
||||
Err(MergeError::NotCompatible)
|
||||
}
|
||||
}
|
||||
|
||||
fn find_lowest_index_gt_evs(&self, ts: u64) -> Option<usize> {
|
||||
for (i, &m) in self.tss.iter().enumerate() {
|
||||
if m > ts {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn find_lowest_index_ge_evs(&self, ts: u64) -> Option<usize> {
|
||||
for (i, &m) in self.tss.iter().enumerate() {
|
||||
if m >= ts {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn find_highest_index_lt_evs(&self, ts: u64) -> Option<usize> {
|
||||
for (i, &m) in self.tss.iter().enumerate().rev() {
|
||||
if m < ts {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn ts_min(&self) -> Option<u64> {
|
||||
self.tss.front().map(|&x| x)
|
||||
}
|
||||
|
||||
fn ts_max(&self) -> Option<u64> {
|
||||
self.tss.back().map(|&x| x)
|
||||
}
|
||||
|
||||
fn partial_eq_dyn(&self, other: &dyn Events) -> bool {
|
||||
if let Some(other) = other.as_any_ref().downcast_ref::<Self>() {
|
||||
self == other
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn serde_id(&self) -> &'static str {
|
||||
Self::serde_id()
|
||||
}
|
||||
|
||||
fn nty_id(&self) -> u32 {
|
||||
STY::SUB
|
||||
}
|
||||
|
||||
fn clone_dyn(&self) -> Box<dyn Events> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn tss(&self) -> &VecDeque<u64> {
|
||||
&self.tss
|
||||
}
|
||||
|
||||
fn pulses(&self) -> &VecDeque<u64> {
|
||||
&self.pulses
|
||||
}
|
||||
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
// TODO make more nice
|
||||
panic!()
|
||||
}
|
||||
|
||||
fn to_min_max_avg(&mut self) -> Box<dyn Events> {
|
||||
let mins = self
|
||||
.values
|
||||
.iter()
|
||||
.map(|x| STY::find_vec_min(x))
|
||||
.map(|x| x.unwrap_or_else(|| STY::zero_b()))
|
||||
.collect();
|
||||
let maxs = self
|
||||
.values
|
||||
.iter()
|
||||
.map(|x| STY::find_vec_max(x))
|
||||
.map(|x| x.unwrap_or_else(|| STY::zero_b()))
|
||||
.collect();
|
||||
let avgs = self
|
||||
.values
|
||||
.iter()
|
||||
.map(|x| STY::avg_vec(x))
|
||||
.map(|x| x.unwrap_or_else(|| STY::zero_b()))
|
||||
.map(|x| x.as_prim_f32_b())
|
||||
.collect();
|
||||
let item = EventsXbinDim0 {
|
||||
tss: mem::replace(&mut self.tss, VecDeque::new()),
|
||||
pulses: mem::replace(&mut self.pulses, VecDeque::new()),
|
||||
mins,
|
||||
maxs,
|
||||
avgs,
|
||||
};
|
||||
Box::new(item)
|
||||
}
|
||||
|
||||
fn to_json_string(&self) -> String {
|
||||
let ret = EventsDim1ChunkOutput {
|
||||
// TODO use &mut to swap the content
|
||||
tss: self.tss.clone(),
|
||||
pulses: self.pulses.clone(),
|
||||
values: self.values.clone(),
|
||||
scalar_type: STY::scalar_type_name().into(),
|
||||
};
|
||||
serde_json::to_string(&ret).unwrap()
|
||||
}
|
||||
|
||||
fn to_json_vec_u8(&self) -> Vec<u8> {
|
||||
self.to_json_string().into_bytes()
|
||||
}
|
||||
|
||||
fn to_cbor_vec_u8(&self) -> Vec<u8> {
|
||||
let ret = EventsDim1ChunkOutput {
|
||||
// TODO use &mut to swap the content
|
||||
tss: self.tss.clone(),
|
||||
pulses: self.pulses.clone(),
|
||||
values: self.values.clone(),
|
||||
scalar_type: STY::scalar_type_name().into(),
|
||||
};
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(&ret, &mut buf).unwrap();
|
||||
buf
|
||||
}
|
||||
|
||||
fn clear(&mut self) {
|
||||
self.tss.clear();
|
||||
self.pulses.clear();
|
||||
self.values.clear();
|
||||
}
|
||||
|
||||
fn to_dim0_f32_for_binning(&self) -> Box<dyn Events> {
|
||||
todo!("{}::to_dim0_f32_for_binning", self.type_name())
|
||||
}
|
||||
|
||||
fn to_container_events(&self) -> Box<dyn ::items_0::timebin::BinningggContainerEventsDyn> {
|
||||
todo!("{}::to_container_events", self.type_name())
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> Appendable<Vec<STY>> for EventsDim1<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
fn push(&mut self, ts: u64, pulse: u64, value: Vec<STY>) {
|
||||
Self::push(self, ts, pulse, value)
|
||||
}
|
||||
}
|
||||
779
src/eventsxbindim0.rs
Normal file
779
src/eventsxbindim0.rs
Normal file
@@ -0,0 +1,779 @@
|
||||
use crate::binsxbindim0::BinsXbinDim0;
|
||||
use crate::IsoDateTime;
|
||||
use daqbuf_err as err;
|
||||
use err::Error;
|
||||
use items_0::collect_s::CollectableDyn;
|
||||
use items_0::collect_s::CollectableType;
|
||||
use items_0::collect_s::CollectedDyn;
|
||||
use items_0::collect_s::CollectorTy;
|
||||
use items_0::collect_s::ToJsonResult;
|
||||
use items_0::container::ByteEstimate;
|
||||
use items_0::overlap::HasTimestampDeque;
|
||||
use items_0::scalar_ops::ScalarOps;
|
||||
use items_0::timebin::TimeBinnerTy;
|
||||
use items_0::AsAnyMut;
|
||||
use items_0::AsAnyRef;
|
||||
use items_0::Empty;
|
||||
use items_0::Events;
|
||||
use items_0::EventsNonObj;
|
||||
use items_0::MergeError;
|
||||
use items_0::TypeName;
|
||||
use items_0::WithLen;
|
||||
use netpod::is_false;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::range::evrange::SeriesRange;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::BinnedRangeEnum;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::any;
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace_ingest {
|
||||
($($arg:tt)*) => {};
|
||||
($($arg:tt)*) => { trace!($($arg)*) };
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace2 {
|
||||
($($arg:tt)*) => {};
|
||||
($($arg:tt)*) => { trace!($($arg)*) };
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct EventsXbinDim0<NTY> {
|
||||
pub tss: VecDeque<u64>,
|
||||
pub pulses: VecDeque<u64>,
|
||||
pub mins: VecDeque<NTY>,
|
||||
pub maxs: VecDeque<NTY>,
|
||||
pub avgs: VecDeque<f32>,
|
||||
// TODO maybe add variance?
|
||||
}
|
||||
|
||||
impl<NTY> EventsXbinDim0<NTY> {
|
||||
#[inline(always)]
|
||||
pub fn push(&mut self, ts: u64, pulse: u64, min: NTY, max: NTY, avg: f32) {
|
||||
self.tss.push_back(ts);
|
||||
self.pulses.push_back(pulse);
|
||||
self.mins.push_back(min);
|
||||
self.maxs.push_back(max);
|
||||
self.avgs.push_back(avg);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn push_front(&mut self, ts: u64, pulse: u64, min: NTY, max: NTY, avg: f32) {
|
||||
self.tss.push_front(ts);
|
||||
self.pulses.push_front(pulse);
|
||||
self.mins.push_front(min);
|
||||
self.maxs.push_front(max);
|
||||
self.avgs.push_front(avg);
|
||||
}
|
||||
|
||||
pub fn serde_id() -> &'static str {
|
||||
"EventsXbinDim0"
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> TypeName for EventsXbinDim0<STY> {
|
||||
fn type_name(&self) -> String {
|
||||
any::type_name::<Self>().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> fmt::Debug for EventsXbinDim0<STY>
|
||||
where
|
||||
STY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
if false {
|
||||
write!(
|
||||
fmt,
|
||||
"{} {{ count {} ts {:?} vals {:?} }}",
|
||||
self.type_name(),
|
||||
self.tss.len(),
|
||||
self.tss.iter().map(|x| x / SEC).collect::<Vec<_>>(),
|
||||
self.avgs,
|
||||
)
|
||||
} else {
|
||||
write!(
|
||||
fmt,
|
||||
"{} {{ count {} ts {:?} .. {:?} vals {:?} .. {:?} }}",
|
||||
self.type_name(),
|
||||
self.tss.len(),
|
||||
self.tss.front().map(|x| x / SEC),
|
||||
self.tss.back().map(|x| x / SEC),
|
||||
self.avgs.front(),
|
||||
self.avgs.back(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> ByteEstimate for EventsXbinDim0<STY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
let stylen = mem::size_of::<STY>();
|
||||
(self.len() * (8 + 8 + 2 * stylen + 4)) as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> Empty for EventsXbinDim0<STY> {
|
||||
fn empty() -> Self {
|
||||
Self {
|
||||
tss: VecDeque::new(),
|
||||
pulses: VecDeque::new(),
|
||||
mins: VecDeque::new(),
|
||||
maxs: VecDeque::new(),
|
||||
avgs: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> AsAnyRef for EventsXbinDim0<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> AsAnyMut for EventsXbinDim0<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> WithLen for EventsXbinDim0<STY> {
|
||||
fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> HasTimestampDeque for EventsXbinDim0<STY> {
|
||||
fn timestamp_min(&self) -> Option<u64> {
|
||||
self.tss.front().map(|x| *x)
|
||||
}
|
||||
|
||||
fn timestamp_max(&self) -> Option<u64> {
|
||||
self.tss.back().map(|x| *x)
|
||||
}
|
||||
|
||||
fn pulse_min(&self) -> Option<u64> {
|
||||
self.pulses.front().map(|x| *x)
|
||||
}
|
||||
|
||||
fn pulse_max(&self) -> Option<u64> {
|
||||
self.pulses.back().map(|x| *x)
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> EventsNonObj for EventsXbinDim0<STY> {
|
||||
fn into_tss_pulses(self: Box<Self>) -> (VecDeque<u64>, VecDeque<u64>) {
|
||||
info!(
|
||||
"EventsXbinDim0::into_tss_pulses len {} len {}",
|
||||
self.tss.len(),
|
||||
self.pulses.len()
|
||||
);
|
||||
(self.tss, self.pulses)
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> Events for EventsXbinDim0<STY> {
|
||||
fn verify(&self) -> bool {
|
||||
let mut good = true;
|
||||
let mut ts_max = 0;
|
||||
for ts in &self.tss {
|
||||
let ts = *ts;
|
||||
if ts < ts_max {
|
||||
good = false;
|
||||
error!("unordered event data ts {} ts_max {}", ts, ts_max);
|
||||
}
|
||||
ts_max = ts_max.max(ts);
|
||||
}
|
||||
good
|
||||
}
|
||||
|
||||
fn output_info(&self) -> String {
|
||||
let n2 = self.tss.len().max(1) - 1;
|
||||
format!(
|
||||
"EventsXbinDim0OutputInfo {{ len {}, ts_min {}, ts_max {} }}",
|
||||
self.tss.len(),
|
||||
self.tss.get(0).map_or(-1i64, |&x| x as i64),
|
||||
self.tss.get(n2).map_or(-1i64, |&x| x as i64),
|
||||
)
|
||||
}
|
||||
|
||||
fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn {
|
||||
self
|
||||
}
|
||||
|
||||
fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box<dyn Events> {
|
||||
// TODO improve the search
|
||||
let n1 = self.tss.iter().take_while(|&&x| x <= ts_end).count();
|
||||
let tss = self.tss.drain(..n1).collect();
|
||||
let pulses = self.pulses.drain(..n1).collect();
|
||||
let mins = self.mins.drain(..n1).collect();
|
||||
let maxs = self.maxs.drain(..n1).collect();
|
||||
let avgs = self.avgs.drain(..n1).collect();
|
||||
let ret = Self {
|
||||
tss,
|
||||
pulses,
|
||||
mins,
|
||||
maxs,
|
||||
avgs,
|
||||
};
|
||||
Box::new(ret)
|
||||
}
|
||||
|
||||
fn new_empty_evs(&self) -> Box<dyn Events> {
|
||||
Box::new(Self::empty())
|
||||
}
|
||||
|
||||
fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), MergeError> {
|
||||
// TODO as_any and as_any_mut are declared on unrelated traits. Simplify.
|
||||
if let Some(dst) = dst.as_any_mut().downcast_mut::<Self>() {
|
||||
// TODO make it harder to forget new members when the struct may get modified in the future
|
||||
let r = range.0..range.1;
|
||||
dst.tss.extend(self.tss.drain(r.clone()));
|
||||
dst.pulses.extend(self.pulses.drain(r.clone()));
|
||||
dst.mins.extend(self.mins.drain(r.clone()));
|
||||
dst.maxs.extend(self.maxs.drain(r.clone()));
|
||||
dst.avgs.extend(self.avgs.drain(r.clone()));
|
||||
Ok(())
|
||||
} else {
|
||||
error!("downcast to {} FAILED", self.type_name());
|
||||
Err(MergeError::NotCompatible)
|
||||
}
|
||||
}
|
||||
|
||||
fn find_lowest_index_gt_evs(&self, ts: u64) -> Option<usize> {
|
||||
for (i, &m) in self.tss.iter().enumerate() {
|
||||
if m > ts {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn find_lowest_index_ge_evs(&self, ts: u64) -> Option<usize> {
|
||||
for (i, &m) in self.tss.iter().enumerate() {
|
||||
if m >= ts {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn find_highest_index_lt_evs(&self, ts: u64) -> Option<usize> {
|
||||
for (i, &m) in self.tss.iter().enumerate().rev() {
|
||||
if m < ts {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn ts_min(&self) -> Option<u64> {
|
||||
self.tss.front().map(|&x| x)
|
||||
}
|
||||
|
||||
fn ts_max(&self) -> Option<u64> {
|
||||
self.tss.back().map(|&x| x)
|
||||
}
|
||||
|
||||
fn partial_eq_dyn(&self, other: &dyn Events) -> bool {
|
||||
if let Some(other) = other.as_any_ref().downcast_ref::<Self>() {
|
||||
self == other
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn serde_id(&self) -> &'static str {
|
||||
Self::serde_id()
|
||||
}
|
||||
|
||||
fn nty_id(&self) -> u32 {
|
||||
STY::SUB
|
||||
}
|
||||
|
||||
fn clone_dyn(&self) -> Box<dyn Events> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn tss(&self) -> &VecDeque<u64> {
|
||||
&self.tss
|
||||
}
|
||||
|
||||
fn pulses(&self) -> &VecDeque<u64> {
|
||||
&self.pulses
|
||||
}
|
||||
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
error!("TODO frame_type_id should not be called");
|
||||
// TODO make more nice
|
||||
panic!()
|
||||
}
|
||||
|
||||
fn to_min_max_avg(&mut self) -> Box<dyn Events> {
|
||||
let dst = Self {
|
||||
tss: mem::replace(&mut self.tss, Default::default()),
|
||||
pulses: mem::replace(&mut self.pulses, Default::default()),
|
||||
mins: mem::replace(&mut self.mins, Default::default()),
|
||||
maxs: mem::replace(&mut self.maxs, Default::default()),
|
||||
avgs: mem::replace(&mut self.avgs, Default::default()),
|
||||
};
|
||||
Box::new(dst)
|
||||
}
|
||||
|
||||
fn to_json_string(&self) -> String {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn to_json_vec_u8(&self) -> Vec<u8> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn to_cbor_vec_u8(&self) -> Vec<u8> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn clear(&mut self) {
|
||||
self.tss.clear();
|
||||
self.pulses.clear();
|
||||
self.mins.clear();
|
||||
self.maxs.clear();
|
||||
self.avgs.clear();
|
||||
}
|
||||
|
||||
fn to_dim0_f32_for_binning(&self) -> Box<dyn Events> {
|
||||
todo!("{}::to_dim0_f32_for_binning", self.type_name())
|
||||
}
|
||||
|
||||
fn to_container_events(&self) -> Box<dyn ::items_0::timebin::BinningggContainerEventsDyn> {
|
||||
todo!("{}::to_container_events", self.type_name())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EventsXbinDim0Aggregator<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
range: SeriesRange,
|
||||
/// Number of events which actually fall in this bin.
|
||||
count: u64,
|
||||
min: STY,
|
||||
max: STY,
|
||||
/// Number of times we accumulated to the sum of this bin.
|
||||
sumc: u64,
|
||||
sum: f32,
|
||||
int_ts: u64,
|
||||
last_ts: u64,
|
||||
last_vals: Option<(STY, STY, f32)>,
|
||||
did_min_max: bool,
|
||||
do_time_weight: bool,
|
||||
events_ignored_count: u64,
|
||||
}
|
||||
|
||||
impl<STY> EventsXbinDim0Aggregator<STY>
|
||||
where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
pub fn type_name() -> &'static str {
|
||||
std::any::type_name::<Self>()
|
||||
}
|
||||
|
||||
pub fn new(range: SeriesRange, do_time_weight: bool) -> Self {
|
||||
let int_ts = range.beg_u64();
|
||||
Self {
|
||||
range,
|
||||
did_min_max: false,
|
||||
count: 0,
|
||||
min: STY::zero_b(),
|
||||
max: STY::zero_b(),
|
||||
sumc: 0,
|
||||
sum: 0f32,
|
||||
int_ts,
|
||||
last_ts: 0,
|
||||
last_vals: None,
|
||||
events_ignored_count: 0,
|
||||
do_time_weight,
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_min_max(&mut self, min: &STY, max: &STY) {
|
||||
if self.did_min_max != (self.sumc > 0) {
|
||||
panic!("logic error apply_min_max {} {}", self.did_min_max, self.sumc);
|
||||
}
|
||||
if self.sumc == 0 {
|
||||
self.did_min_max = true;
|
||||
self.min = min.clone();
|
||||
self.max = max.clone();
|
||||
} else {
|
||||
if *min < self.min {
|
||||
self.min = min.clone();
|
||||
}
|
||||
if *max > self.max {
|
||||
self.max = max.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_event_unweight(&mut self, avg: f32, min: STY, max: STY) {
|
||||
//debug!("apply_event_unweight");
|
||||
self.apply_min_max(&min, &max);
|
||||
self.sumc += 1;
|
||||
let vf = avg;
|
||||
if vf.is_nan() {
|
||||
} else {
|
||||
self.sum += vf;
|
||||
}
|
||||
}
|
||||
|
||||
// Only integrate, do not count because it is used even if the event does not fall into current bin.
|
||||
fn apply_event_time_weight(&mut self, px: u64) {
|
||||
trace_ingest!(
|
||||
"apply_event_time_weight px {} count {} sumc {} events_ignored_count {}",
|
||||
px,
|
||||
self.count,
|
||||
self.sumc,
|
||||
self.events_ignored_count
|
||||
);
|
||||
if let Some((min, max, avg)) = self.last_vals.as_ref() {
|
||||
let vf = *avg;
|
||||
{
|
||||
let min = min.clone();
|
||||
let max = max.clone();
|
||||
self.apply_min_max(&min, &max);
|
||||
}
|
||||
self.sumc += 1;
|
||||
let w = (px - self.int_ts) as f32 * 1e-9;
|
||||
if vf.is_nan() {
|
||||
} else {
|
||||
self.sum += vf * w;
|
||||
}
|
||||
self.int_ts = px;
|
||||
} else {
|
||||
debug!("apply_event_time_weight NO VALUE");
|
||||
}
|
||||
}
|
||||
|
||||
fn ingest_unweight(&mut self, item: &EventsXbinDim0<STY>) {
|
||||
/*for i1 in 0..item.tss.len() {
|
||||
let ts = item.tss[i1];
|
||||
let avg = item.avgs[i1];
|
||||
let min = item.mins[i1].clone();
|
||||
let max = item.maxs[i1].clone();
|
||||
if ts < self.range.beg {
|
||||
} else if ts >= self.range.end {
|
||||
} else {
|
||||
self.apply_event_unweight(avg, min, max);
|
||||
}
|
||||
}*/
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ingest_time_weight(&mut self, item: &EventsXbinDim0<STY>) {
|
||||
trace!(
|
||||
"{} ingest_time_weight range {:?} last_ts {:?} int_ts {:?}",
|
||||
Self::type_name(),
|
||||
self.range,
|
||||
self.last_ts,
|
||||
self.int_ts
|
||||
);
|
||||
let range_beg = self.range.beg_u64();
|
||||
let range_end = self.range.end_u64();
|
||||
for (((&ts, min), max), avg) in item
|
||||
.tss
|
||||
.iter()
|
||||
.zip(item.mins.iter())
|
||||
.zip(item.maxs.iter())
|
||||
.zip(item.avgs.iter())
|
||||
{
|
||||
if ts >= range_end {
|
||||
self.events_ignored_count += 1;
|
||||
// TODO break early when tests pass.
|
||||
//break;
|
||||
} else if ts >= range_beg {
|
||||
self.apply_event_time_weight(ts);
|
||||
self.count += 1;
|
||||
self.last_ts = ts;
|
||||
self.last_vals = Some((min.clone(), max.clone(), avg.clone()));
|
||||
} else {
|
||||
self.events_ignored_count += 1;
|
||||
self.last_ts = ts;
|
||||
self.last_vals = Some((min.clone(), max.clone(), avg.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset_unweight(&mut self, range: SeriesRange) -> BinsXbinDim0<STY> {
|
||||
/*let avg = if self.sumc == 0 {
|
||||
0f32
|
||||
} else {
|
||||
self.sum / self.sumc as f32
|
||||
};
|
||||
let ret = BinsXbinDim0::from_content(
|
||||
[self.range.beg].into(),
|
||||
[self.range.end].into(),
|
||||
[self.count].into(),
|
||||
[self.min.clone()].into(),
|
||||
[self.max.clone()].into(),
|
||||
[avg].into(),
|
||||
);
|
||||
self.int_ts = range.beg;
|
||||
self.range = range;
|
||||
self.sum = 0f32;
|
||||
self.sumc = 0;
|
||||
self.did_min_max = false;
|
||||
self.min = NTY::zero_b();
|
||||
self.max = NTY::zero_b();
|
||||
ret*/
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn result_reset_time_weight(&mut self, range: SeriesRange) -> BinsXbinDim0<STY> {
|
||||
trace!("{} result_reset_time_weight", Self::type_name());
|
||||
// TODO check callsite for correct expand status.
|
||||
if self.range.is_time() {
|
||||
self.apply_event_time_weight(self.range.end_u64());
|
||||
} else {
|
||||
error!("TODO result_reset_time_weight");
|
||||
err::todoval()
|
||||
}
|
||||
let range_beg = self.range.beg_u64();
|
||||
let range_end = self.range.end_u64();
|
||||
let (min, max, avg) = if self.sumc > 0 {
|
||||
let avg = self.sum / (self.range.delta_u64() as f32 * 1e-9);
|
||||
(self.min.clone(), self.max.clone(), avg)
|
||||
} else {
|
||||
let (min, max, avg) = match &self.last_vals {
|
||||
Some((min, max, avg)) => {
|
||||
warn!("\n\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! SHOULD ALWAYS HAVE ACCUMULATED IN THIS CASE");
|
||||
(min.clone(), max.clone(), avg.clone())
|
||||
}
|
||||
None => (STY::zero_b(), STY::zero_b(), 0.),
|
||||
};
|
||||
(min, max, avg)
|
||||
};
|
||||
let ret = BinsXbinDim0::from_content(
|
||||
[range_beg].into(),
|
||||
[range_end].into(),
|
||||
[self.count].into(),
|
||||
[min.clone()].into(),
|
||||
[max.clone()].into(),
|
||||
[avg].into(),
|
||||
);
|
||||
self.int_ts = range.beg_u64();
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.sumc = 0;
|
||||
self.sum = 0.;
|
||||
self.did_min_max = false;
|
||||
self.min = STY::zero_b();
|
||||
self.max = STY::zero_b();
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct EventsXbinDim0CollectorOutput<NTY> {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "tsMs")]
|
||||
ts_off_ms: VecDeque<u64>,
|
||||
#[serde(rename = "tsNs")]
|
||||
ts_off_ns: VecDeque<u64>,
|
||||
#[serde(rename = "pulseAnchor")]
|
||||
pulse_anchor: u64,
|
||||
#[serde(rename = "pulseOff")]
|
||||
pulse_off: VecDeque<u64>,
|
||||
#[serde(rename = "mins")]
|
||||
mins: VecDeque<NTY>,
|
||||
#[serde(rename = "maxs")]
|
||||
maxs: VecDeque<NTY>,
|
||||
#[serde(rename = "avgs")]
|
||||
avgs: VecDeque<f32>,
|
||||
#[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
|
||||
range_final: bool,
|
||||
#[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
|
||||
timed_out: bool,
|
||||
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
|
||||
continue_at: Option<IsoDateTime>,
|
||||
}
|
||||
|
||||
impl<NTY> AsAnyRef for EventsXbinDim0CollectorOutput<NTY>
|
||||
where
|
||||
NTY: ScalarOps,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> AsAnyMut for EventsXbinDim0CollectorOutput<NTY>
|
||||
where
|
||||
NTY: ScalarOps,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> TypeName for EventsXbinDim0CollectorOutput<STY> {
|
||||
fn type_name(&self) -> String {
|
||||
any::type_name::<Self>().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> WithLen for EventsXbinDim0CollectorOutput<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.mins.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ToJsonResult for EventsXbinDim0CollectorOutput<NTY>
|
||||
where
|
||||
NTY: ScalarOps,
|
||||
{
|
||||
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
|
||||
serde_json::to_value(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> CollectedDyn for EventsXbinDim0CollectorOutput<NTY> where NTY: ScalarOps {}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EventsXbinDim0Collector<NTY> {
|
||||
vals: EventsXbinDim0<NTY>,
|
||||
range_final: bool,
|
||||
timed_out: bool,
|
||||
needs_continue_at: bool,
|
||||
}
|
||||
|
||||
impl<NTY> EventsXbinDim0Collector<NTY> {
|
||||
pub fn self_name() -> &'static str {
|
||||
any::type_name::<Self>()
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
range_final: false,
|
||||
timed_out: false,
|
||||
vals: EventsXbinDim0::empty(),
|
||||
needs_continue_at: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for EventsXbinDim0Collector<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
WithLen::len(&self.vals)
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY> ByteEstimate for EventsXbinDim0Collector<STY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
ByteEstimate::byte_estimate(&self.vals)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> CollectorTy for EventsXbinDim0Collector<NTY>
|
||||
where
|
||||
NTY: ScalarOps,
|
||||
{
|
||||
type Input = EventsXbinDim0<NTY>;
|
||||
type Output = EventsXbinDim0CollectorOutput<NTY>;
|
||||
|
||||
fn ingest(&mut self, src: &mut Self::Input) {
|
||||
self.vals.tss.append(&mut src.tss);
|
||||
self.vals.pulses.append(&mut src.pulses);
|
||||
self.vals.mins.append(&mut src.mins);
|
||||
self.vals.maxs.append(&mut src.maxs);
|
||||
self.vals.avgs.append(&mut src.avgs);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_final = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn set_continue_at_here(&mut self) {
|
||||
self.needs_continue_at = true;
|
||||
}
|
||||
|
||||
fn result(
|
||||
&mut self,
|
||||
range: Option<SeriesRange>,
|
||||
_binrange: Option<BinnedRangeEnum>,
|
||||
) -> Result<Self::Output, Error> {
|
||||
/*use std::mem::replace;
|
||||
let continue_at = if self.timed_out {
|
||||
if let Some(ts) = self.vals.tss.back() {
|
||||
Some(IsoDateTime::from_u64(*ts + netpod::timeunits::MS))
|
||||
} else {
|
||||
if let Some(range) = &range {
|
||||
Some(IsoDateTime::from_u64(range.beg + netpod::timeunits::SEC))
|
||||
} else {
|
||||
warn!("can not determine continue-at parameters");
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let mins = replace(&mut self.vals.mins, VecDeque::new());
|
||||
let maxs = replace(&mut self.vals.maxs, VecDeque::new());
|
||||
let avgs = replace(&mut self.vals.avgs, VecDeque::new());
|
||||
self.vals.tss.make_contiguous();
|
||||
self.vals.pulses.make_contiguous();
|
||||
let tst = crate::ts_offs_from_abs(self.vals.tss.as_slices().0);
|
||||
let (pulse_anchor, pulse_off) = crate::pulse_offs_from_abs(&self.vals.pulses.as_slices().0);
|
||||
let ret = Self::Output {
|
||||
ts_anchor_sec: tst.0,
|
||||
ts_off_ms: tst.1,
|
||||
ts_off_ns: tst.2,
|
||||
pulse_anchor,
|
||||
pulse_off,
|
||||
mins,
|
||||
maxs,
|
||||
avgs,
|
||||
range_final: self.range_final,
|
||||
timed_out: self.timed_out,
|
||||
continue_at,
|
||||
};
|
||||
Ok(ret)*/
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> CollectableType for EventsXbinDim0<NTY>
|
||||
where
|
||||
NTY: ScalarOps,
|
||||
{
|
||||
type Collector = EventsXbinDim0Collector<NTY>;
|
||||
|
||||
fn new_collector() -> Self::Collector {
|
||||
Self::Collector::new()
|
||||
}
|
||||
}
|
||||
221
src/framable.rs
Normal file
221
src/framable.rs
Normal file
@@ -0,0 +1,221 @@
|
||||
use crate::frame::make_error_frame;
|
||||
use crate::frame::make_frame_2;
|
||||
use crate::frame::make_log_frame;
|
||||
use crate::frame::make_range_complete_frame;
|
||||
use crate::frame::make_stats_frame;
|
||||
use bytes::BytesMut;
|
||||
use daqbuf_err as err;
|
||||
use items_0::framable::FrameTypeInnerDyn;
|
||||
use items_0::framable::FrameTypeInnerStatic;
|
||||
use items_0::streamitem::LogItem;
|
||||
use items_0::streamitem::RangeCompletableItem;
|
||||
use items_0::streamitem::Sitemty;
|
||||
use items_0::streamitem::StatsItem;
|
||||
use items_0::streamitem::StreamItem;
|
||||
use items_0::streamitem::ERROR_FRAME_TYPE_ID;
|
||||
use items_0::streamitem::EVENT_QUERY_JSON_STRING_FRAME;
|
||||
use items_0::streamitem::SITEMTY_NONSPEC_FRAME_TYPE_ID;
|
||||
use items_0::Events;
|
||||
use netpod::log::*;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
pub const INMEM_FRAME_ENCID: u32 = 0x12121212;
|
||||
pub const INMEM_FRAME_HEAD: usize = 20;
|
||||
pub const INMEM_FRAME_FOOT: usize = 4;
|
||||
pub const INMEM_FRAME_MAGIC: u32 = 0xc6c3b73d;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[cstm(name = "ItemFramable")]
|
||||
pub enum Error {
|
||||
Msg(String),
|
||||
DummyError,
|
||||
Frame(#[from] crate::frame::Error),
|
||||
}
|
||||
|
||||
struct ErrMsg<E>(E)
|
||||
where
|
||||
E: ToString;
|
||||
|
||||
impl<E> From<ErrMsg<E>> for Error
|
||||
where
|
||||
E: ToString,
|
||||
{
|
||||
fn from(value: ErrMsg<E>) -> Self {
|
||||
Self::Msg(value.0.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait FrameTypeStatic {
|
||||
const FRAME_TYPE_ID: u32;
|
||||
}
|
||||
|
||||
impl<T> FrameTypeStatic for Sitemty<T>
|
||||
where
|
||||
T: FrameTypeInnerStatic,
|
||||
{
|
||||
const FRAME_TYPE_ID: u32 = <T as FrameTypeInnerStatic>::FRAME_TYPE_ID;
|
||||
}
|
||||
|
||||
// Framable trait objects need some inspection to handle the supposed-to-be common Err ser format:
|
||||
// Meant to be implemented by Sitemty.
|
||||
pub trait FrameType {
|
||||
fn frame_type_id(&self) -> u32;
|
||||
}
|
||||
|
||||
impl<T> FrameType for Box<T>
|
||||
where
|
||||
T: FrameType,
|
||||
{
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
self.as_ref().frame_type_id()
|
||||
}
|
||||
}
|
||||
|
||||
impl FrameType for Box<dyn Events> {
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
self.as_ref().frame_type_id()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Framable {
|
||||
fn make_frame_dyn(&self) -> Result<BytesMut, Error>;
|
||||
}
|
||||
|
||||
pub trait FramableInner: erased_serde::Serialize + FrameTypeInnerDyn + Send {
|
||||
fn _dummy(&self);
|
||||
}
|
||||
|
||||
impl<T: erased_serde::Serialize + FrameTypeInnerDyn + Send> FramableInner for T {
|
||||
fn _dummy(&self) {}
|
||||
}
|
||||
|
||||
impl<T> Framable for Sitemty<T>
|
||||
where
|
||||
T: Sized + serde::Serialize + FrameType,
|
||||
{
|
||||
fn make_frame_dyn(&self) -> Result<BytesMut, Error> {
|
||||
match self {
|
||||
Ok(StreamItem::DataItem(RangeCompletableItem::Data(k))) => {
|
||||
let frame_type_id = k.frame_type_id();
|
||||
make_frame_2(self, frame_type_id).map_err(Error::from)
|
||||
}
|
||||
Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)) => {
|
||||
make_range_complete_frame().map_err(Error::from)
|
||||
}
|
||||
Ok(StreamItem::Log(item)) => make_log_frame(item).map_err(Error::from),
|
||||
Ok(StreamItem::Stats(item)) => make_stats_frame(item).map_err(Error::from),
|
||||
Err(e) => {
|
||||
info!("calling make_error_frame for [[{e}]]");
|
||||
make_error_frame(e).map_err(Error::from)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Framable for Box<T>
|
||||
where
|
||||
T: Framable + ?Sized,
|
||||
{
|
||||
fn make_frame_dyn(&self) -> Result<BytesMut, Error> {
|
||||
self.as_ref().make_frame_dyn()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait FrameDecodable: FrameTypeStatic + DeserializeOwned {
|
||||
fn from_error(e: err::Error) -> Self;
|
||||
fn from_log(item: LogItem) -> Self;
|
||||
fn from_stats(item: StatsItem) -> Self;
|
||||
fn from_range_complete() -> Self;
|
||||
}
|
||||
|
||||
impl<T> FrameDecodable for Sitemty<T>
|
||||
where
|
||||
T: FrameTypeInnerStatic + DeserializeOwned,
|
||||
{
|
||||
fn from_error(e: err::Error) -> Self {
|
||||
Err(e)
|
||||
}
|
||||
|
||||
fn from_log(item: LogItem) -> Self {
|
||||
Ok(StreamItem::Log(item))
|
||||
}
|
||||
|
||||
fn from_stats(item: StatsItem) -> Self {
|
||||
Ok(StreamItem::Stats(item))
|
||||
}
|
||||
|
||||
fn from_range_complete() -> Self {
|
||||
Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EventQueryJsonStringFrame(pub String);
|
||||
|
||||
impl EventQueryJsonStringFrame {
|
||||
pub fn str(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl FrameTypeInnerStatic for EventQueryJsonStringFrame {
|
||||
const FRAME_TYPE_ID: u32 = EVENT_QUERY_JSON_STRING_FRAME;
|
||||
}
|
||||
|
||||
impl FrameType for EventQueryJsonStringFrame {
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
EventQueryJsonStringFrame::FRAME_TYPE_ID
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> FrameType for Sitemty<T>
|
||||
where
|
||||
T: FrameType,
|
||||
{
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
match self {
|
||||
Ok(item) => match item {
|
||||
StreamItem::DataItem(item) => match item {
|
||||
RangeCompletableItem::RangeComplete => SITEMTY_NONSPEC_FRAME_TYPE_ID,
|
||||
RangeCompletableItem::Data(item) => item.frame_type_id(),
|
||||
},
|
||||
StreamItem::Log(_) => SITEMTY_NONSPEC_FRAME_TYPE_ID,
|
||||
StreamItem::Stats(_) => SITEMTY_NONSPEC_FRAME_TYPE_ID,
|
||||
},
|
||||
Err(_) => ERROR_FRAME_TYPE_ID,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_frame_log() {
|
||||
use crate::channelevents::ChannelEvents;
|
||||
use crate::frame::decode_from_slice;
|
||||
use netpod::log::Level;
|
||||
let item = LogItem {
|
||||
node_ix: 123,
|
||||
level: Level::TRACE,
|
||||
msg: format!("test-log-message"),
|
||||
};
|
||||
let item: Sitemty<ChannelEvents> = Ok(StreamItem::Log(item));
|
||||
let buf = Framable::make_frame_dyn(&item).unwrap();
|
||||
let len = u32::from_le_bytes(buf[12..16].try_into().unwrap());
|
||||
let item2: LogItem = decode_from_slice(&buf[20..20 + len as usize]).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_frame_error() {
|
||||
use crate::channelevents::ChannelEvents;
|
||||
use crate::frame::json_from_slice;
|
||||
let item: Sitemty<ChannelEvents> = items_0::streamitem::sitem_err_from_string("dummyerror");
|
||||
let buf = Framable::make_frame_dyn(&item).unwrap();
|
||||
let len = u32::from_le_bytes(buf[12..16].try_into().unwrap());
|
||||
let tyid = u32::from_le_bytes(buf[8..12].try_into().unwrap());
|
||||
if tyid != ERROR_FRAME_TYPE_ID {
|
||||
panic!("bad tyid");
|
||||
}
|
||||
eprintln!("buf len {} len {}", buf.len(), len);
|
||||
let item2: items_0::streamitem::SitemErrTy = json_from_slice(&buf[20..20 + len as usize]).unwrap();
|
||||
}
|
||||
433
src/frame.rs
Normal file
433
src/frame.rs
Normal file
@@ -0,0 +1,433 @@
|
||||
use crate::framable::FrameDecodable;
|
||||
use crate::framable::INMEM_FRAME_ENCID;
|
||||
use crate::framable::INMEM_FRAME_FOOT;
|
||||
use crate::framable::INMEM_FRAME_HEAD;
|
||||
use crate::framable::INMEM_FRAME_MAGIC;
|
||||
use crate::inmem::InMemoryFrame;
|
||||
use bincode::config::FixintEncoding;
|
||||
use bincode::config::LittleEndian;
|
||||
use bincode::config::RejectTrailing;
|
||||
use bincode::config::WithOtherEndian;
|
||||
use bincode::config::WithOtherIntEncoding;
|
||||
use bincode::config::WithOtherTrailing;
|
||||
use bincode::DefaultOptions;
|
||||
use bytes::BufMut;
|
||||
use bytes::BytesMut;
|
||||
use daqbuf_err as err;
|
||||
use items_0::bincode;
|
||||
use items_0::streamitem::LogItem;
|
||||
use items_0::streamitem::StatsItem;
|
||||
use items_0::streamitem::ERROR_FRAME_TYPE_ID;
|
||||
use items_0::streamitem::LOG_FRAME_TYPE_ID;
|
||||
use items_0::streamitem::RANGE_COMPLETE_FRAME_TYPE_ID;
|
||||
use items_0::streamitem::STATS_FRAME_TYPE_ID;
|
||||
use items_0::streamitem::TERM_FRAME_TYPE_ID;
|
||||
use netpod::log::*;
|
||||
use serde::Serialize;
|
||||
use std::any;
|
||||
use std::io;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[cstm(name = "ItemFrame")]
|
||||
pub enum Error {
|
||||
TooLongPayload(usize),
|
||||
UnknownEncoder(u32),
|
||||
#[error("BufferMismatch({0}, {1}, {2})")]
|
||||
BufferMismatch(u32, usize, u32),
|
||||
#[error("TyIdMismatch({0}, {1})")]
|
||||
TyIdMismatch(u32, u32),
|
||||
Msg(String),
|
||||
Bincode(#[from] Box<bincode::ErrorKind>),
|
||||
RmpEnc(#[from] rmp_serde::encode::Error),
|
||||
RmpDec(#[from] rmp_serde::decode::Error),
|
||||
ErasedSerde(#[from] erased_serde::Error),
|
||||
Postcard(#[from] postcard::Error),
|
||||
SerdeJson(#[from] serde_json::Error),
|
||||
}
|
||||
|
||||
struct ErrMsg<E>(E)
|
||||
where
|
||||
E: ToString;
|
||||
|
||||
impl<E> From<ErrMsg<E>> for Error
|
||||
where
|
||||
E: ToString,
|
||||
{
|
||||
fn from(value: ErrMsg<E>) -> Self {
|
||||
Self::Msg(value.0.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bincode_ser<W>(
|
||||
w: W,
|
||||
) -> bincode::Serializer<
|
||||
W,
|
||||
WithOtherTrailing<
|
||||
WithOtherIntEncoding<WithOtherEndian<DefaultOptions, LittleEndian>, FixintEncoding>,
|
||||
RejectTrailing,
|
||||
>,
|
||||
>
|
||||
where
|
||||
W: io::Write,
|
||||
{
|
||||
use bincode::Options;
|
||||
let opts = DefaultOptions::new()
|
||||
.with_little_endian()
|
||||
.with_fixint_encoding()
|
||||
.reject_trailing_bytes();
|
||||
let ser = bincode::Serializer::new(w, opts);
|
||||
ser
|
||||
}
|
||||
|
||||
fn bincode_to_vec<S>(item: S) -> Result<Vec<u8>, Error>
|
||||
where
|
||||
S: Serialize,
|
||||
{
|
||||
let mut out = Vec::new();
|
||||
let mut ser = bincode_ser(&mut out);
|
||||
item.serialize(&mut ser)?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn bincode_from_slice<T>(buf: &[u8]) -> Result<T, Error>
|
||||
where
|
||||
T: for<'de> serde::Deserialize<'de>,
|
||||
{
|
||||
use bincode::Options;
|
||||
let opts = DefaultOptions::new()
|
||||
.with_little_endian()
|
||||
.with_fixint_encoding()
|
||||
.reject_trailing_bytes();
|
||||
let mut de = bincode::Deserializer::from_slice(buf, opts);
|
||||
<T as serde::Deserialize>::deserialize(&mut de).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn msgpack_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
rmp_serde::to_vec_named(&item).map_err(Error::from)
|
||||
}
|
||||
|
||||
fn msgpack_erased_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
|
||||
where
|
||||
T: erased_serde::Serialize,
|
||||
{
|
||||
let mut out = Vec::new();
|
||||
{
|
||||
let mut ser1 = rmp_serde::Serializer::new(&mut out).with_struct_map();
|
||||
let mut ser2 = <dyn erased_serde::Serializer>::erase(&mut ser1);
|
||||
item.erased_serialize(&mut ser2)?;
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn msgpack_from_slice<T>(buf: &[u8]) -> Result<T, Error>
|
||||
where
|
||||
T: for<'de> serde::Deserialize<'de>,
|
||||
{
|
||||
rmp_serde::from_slice(buf).map_err(Error::from)
|
||||
}
|
||||
|
||||
fn postcard_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
postcard::to_stdvec(&item).map_err(Error::from)
|
||||
}
|
||||
|
||||
fn postcard_erased_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
|
||||
where
|
||||
T: erased_serde::Serialize,
|
||||
{
|
||||
use postcard::ser_flavors::Flavor;
|
||||
let mut ser1 = postcard::Serializer {
|
||||
output: postcard::ser_flavors::AllocVec::new(),
|
||||
};
|
||||
{
|
||||
let mut ser2 = <dyn erased_serde::Serializer>::erase(&mut ser1);
|
||||
item.erased_serialize(&mut ser2)
|
||||
}?;
|
||||
let ret = ser1.output.finalize()?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub fn postcard_from_slice<T>(buf: &[u8]) -> Result<T, Error>
|
||||
where
|
||||
T: for<'de> serde::Deserialize<'de>,
|
||||
{
|
||||
Ok(postcard::from_bytes(buf)?)
|
||||
}
|
||||
|
||||
fn json_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
Ok(serde_json::to_vec(&item)?)
|
||||
}
|
||||
|
||||
pub fn json_from_slice<T>(buf: &[u8]) -> Result<T, Error>
|
||||
where
|
||||
T: for<'de> serde::Deserialize<'de>,
|
||||
{
|
||||
Ok(serde_json::from_slice(buf)?)
|
||||
}
|
||||
|
||||
pub fn encode_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
if false {
|
||||
msgpack_to_vec(item)
|
||||
} else if false {
|
||||
bincode_to_vec(item)
|
||||
} else {
|
||||
postcard_to_vec(item)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode_erased_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
|
||||
where
|
||||
T: erased_serde::Serialize,
|
||||
{
|
||||
if false {
|
||||
msgpack_erased_to_vec(item)
|
||||
} else {
|
||||
postcard_erased_to_vec(item)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode_from_slice<T>(buf: &[u8]) -> Result<T, Error>
|
||||
where
|
||||
T: for<'de> serde::Deserialize<'de>,
|
||||
{
|
||||
if false {
|
||||
msgpack_from_slice(buf)
|
||||
} else if false {
|
||||
bincode_from_slice(buf)
|
||||
} else {
|
||||
postcard_from_slice(buf)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_frame_2<T>(item: T, fty: u32) -> Result<BytesMut, Error>
|
||||
where
|
||||
T: erased_serde::Serialize,
|
||||
{
|
||||
let enc = encode_erased_to_vec(item)?;
|
||||
if enc.len() > u32::MAX as usize {
|
||||
return Err(Error::TooLongPayload(enc.len()));
|
||||
}
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&enc);
|
||||
let payload_crc = h.finalize();
|
||||
// TODO reserve also for footer via constant
|
||||
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len());
|
||||
buf.put_u32_le(INMEM_FRAME_MAGIC);
|
||||
buf.put_u32_le(INMEM_FRAME_ENCID);
|
||||
buf.put_u32_le(fty);
|
||||
buf.put_u32_le(enc.len() as u32);
|
||||
buf.put_u32_le(payload_crc);
|
||||
// TODO add padding to align to 8 bytes.
|
||||
buf.put(enc.as_ref());
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&buf);
|
||||
let frame_crc = h.finalize();
|
||||
buf.put_u32_le(frame_crc);
|
||||
return Ok(buf);
|
||||
}
|
||||
|
||||
// TODO remove duplication for these similar `make_*_frame` functions:
|
||||
|
||||
pub fn make_error_frame(error: &err::Error) -> Result<BytesMut, Error> {
|
||||
// error frames are always encoded as json
|
||||
match json_to_vec(error) {
|
||||
Ok(enc) => {
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&enc);
|
||||
let payload_crc = h.finalize();
|
||||
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len());
|
||||
buf.put_u32_le(INMEM_FRAME_MAGIC);
|
||||
buf.put_u32_le(INMEM_FRAME_ENCID);
|
||||
buf.put_u32_le(ERROR_FRAME_TYPE_ID);
|
||||
buf.put_u32_le(enc.len() as u32);
|
||||
buf.put_u32_le(payload_crc);
|
||||
buf.put(enc.as_ref());
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&buf);
|
||||
let frame_crc = h.finalize();
|
||||
buf.put_u32_le(frame_crc);
|
||||
Ok(buf)
|
||||
}
|
||||
Err(e) => Err(e)?,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_log_frame(item: &LogItem) -> Result<BytesMut, Error> {
|
||||
match encode_to_vec(item) {
|
||||
Ok(enc) => {
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&enc);
|
||||
let payload_crc = h.finalize();
|
||||
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len());
|
||||
buf.put_u32_le(INMEM_FRAME_MAGIC);
|
||||
buf.put_u32_le(INMEM_FRAME_ENCID);
|
||||
buf.put_u32_le(LOG_FRAME_TYPE_ID);
|
||||
buf.put_u32_le(enc.len() as u32);
|
||||
buf.put_u32_le(payload_crc);
|
||||
buf.put(enc.as_ref());
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&buf);
|
||||
let frame_crc = h.finalize();
|
||||
buf.put_u32_le(frame_crc);
|
||||
Ok(buf)
|
||||
}
|
||||
Err(e) => Err(e)?,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_stats_frame(item: &StatsItem) -> Result<BytesMut, Error> {
|
||||
match encode_to_vec(item) {
|
||||
Ok(enc) => {
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&enc);
|
||||
let payload_crc = h.finalize();
|
||||
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len());
|
||||
buf.put_u32_le(INMEM_FRAME_MAGIC);
|
||||
buf.put_u32_le(INMEM_FRAME_ENCID);
|
||||
buf.put_u32_le(STATS_FRAME_TYPE_ID);
|
||||
buf.put_u32_le(enc.len() as u32);
|
||||
buf.put_u32_le(payload_crc);
|
||||
buf.put(enc.as_ref());
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&buf);
|
||||
let frame_crc = h.finalize();
|
||||
buf.put_u32_le(frame_crc);
|
||||
Ok(buf)
|
||||
}
|
||||
Err(e) => Err(e)?,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_range_complete_frame() -> Result<BytesMut, Error> {
|
||||
let enc = [];
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&enc);
|
||||
let payload_crc = h.finalize();
|
||||
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len());
|
||||
buf.put_u32_le(INMEM_FRAME_MAGIC);
|
||||
buf.put_u32_le(INMEM_FRAME_ENCID);
|
||||
buf.put_u32_le(RANGE_COMPLETE_FRAME_TYPE_ID);
|
||||
buf.put_u32_le(enc.len() as u32);
|
||||
buf.put_u32_le(payload_crc);
|
||||
buf.put(enc.as_ref());
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&buf);
|
||||
let frame_crc = h.finalize();
|
||||
buf.put_u32_le(frame_crc);
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
pub fn make_term_frame() -> Result<BytesMut, Error> {
|
||||
let enc = [];
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&enc);
|
||||
let payload_crc = h.finalize();
|
||||
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len());
|
||||
buf.put_u32_le(INMEM_FRAME_MAGIC);
|
||||
buf.put_u32_le(INMEM_FRAME_ENCID);
|
||||
buf.put_u32_le(TERM_FRAME_TYPE_ID);
|
||||
buf.put_u32_le(enc.len() as u32);
|
||||
buf.put_u32_le(payload_crc);
|
||||
buf.put(enc.as_ref());
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&buf);
|
||||
let frame_crc = h.finalize();
|
||||
buf.put_u32_le(frame_crc);
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
pub fn decode_frame<T>(frame: &InMemoryFrame) -> Result<T, Error>
|
||||
where
|
||||
T: FrameDecodable,
|
||||
{
|
||||
if frame.encid() != INMEM_FRAME_ENCID {
|
||||
return Err(Error::UnknownEncoder(frame.encid()));
|
||||
}
|
||||
if frame.len() as usize != frame.buf().len() {
|
||||
return Err(Error::BufferMismatch(frame.len(), frame.buf().len(), frame.tyid()));
|
||||
}
|
||||
if frame.tyid() == ERROR_FRAME_TYPE_ID {
|
||||
// error frames are always encoded as json
|
||||
let k: err::Error = match json_from_slice(frame.buf()) {
|
||||
Ok(item) => item,
|
||||
Err(e) => {
|
||||
error!("deserialize len {} ERROR_FRAME_TYPE_ID {}", frame.buf().len(), e);
|
||||
let n = frame.buf().len().min(256);
|
||||
let s = String::from_utf8_lossy(&frame.buf()[..n]);
|
||||
error!("frame.buf as string: {:?}", s);
|
||||
Err(e)?
|
||||
}
|
||||
};
|
||||
Ok(T::from_error(k))
|
||||
} else if frame.tyid() == LOG_FRAME_TYPE_ID {
|
||||
let k: LogItem = match decode_from_slice(frame.buf()) {
|
||||
Ok(item) => item,
|
||||
Err(e) => {
|
||||
error!("deserialize len {} LOG_FRAME_TYPE_ID {}", frame.buf().len(), e);
|
||||
let n = frame.buf().len().min(128);
|
||||
let s = String::from_utf8_lossy(&frame.buf()[..n]);
|
||||
error!("frame.buf as string: {:?}", s);
|
||||
Err(e)?
|
||||
}
|
||||
};
|
||||
Ok(T::from_log(k))
|
||||
} else if frame.tyid() == STATS_FRAME_TYPE_ID {
|
||||
let k: StatsItem = match decode_from_slice(frame.buf()) {
|
||||
Ok(item) => item,
|
||||
Err(e) => {
|
||||
error!("deserialize len {} STATS_FRAME_TYPE_ID {}", frame.buf().len(), e);
|
||||
let n = frame.buf().len().min(128);
|
||||
let s = String::from_utf8_lossy(&frame.buf()[..n]);
|
||||
error!("frame.buf as string: {:?}", s);
|
||||
Err(e)?
|
||||
}
|
||||
};
|
||||
Ok(T::from_stats(k))
|
||||
} else if frame.tyid() == RANGE_COMPLETE_FRAME_TYPE_ID {
|
||||
// There is currently no content in this variant.
|
||||
Ok(T::from_range_complete())
|
||||
} else {
|
||||
let tyid = T::FRAME_TYPE_ID;
|
||||
if frame.tyid() != tyid {
|
||||
Err(Error::TyIdMismatch(tyid, frame.tyid()))
|
||||
} else {
|
||||
match decode_from_slice(frame.buf()) {
|
||||
Ok(item) => Ok(item),
|
||||
Err(e) => {
|
||||
error!(
|
||||
"decode_from_slice error len {} tyid {:04x} T {}",
|
||||
frame.buf().len(),
|
||||
frame.tyid(),
|
||||
any::type_name::<T>()
|
||||
);
|
||||
let n = frame.buf().len().min(64);
|
||||
let s = String::from_utf8_lossy(&frame.buf()[..n]);
|
||||
error!("decode_from_slice bad frame.buf as bytes: {:?}", &frame.buf()[..n]);
|
||||
error!("decode_from_slice bad frame.buf as string: {:?}", s);
|
||||
Err(e)?
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn crchex<T>(t: T) -> String
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(t.as_ref());
|
||||
let crc = h.finalize();
|
||||
format!("{:08x}", crc)
|
||||
}
|
||||
34
src/inmem.rs
Normal file
34
src/inmem.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
use bytes::Bytes;
|
||||
use std::fmt;
|
||||
|
||||
pub struct InMemoryFrame {
|
||||
pub encid: u32,
|
||||
pub tyid: u32,
|
||||
pub len: u32,
|
||||
pub buf: Bytes,
|
||||
}
|
||||
|
||||
impl InMemoryFrame {
|
||||
pub fn encid(&self) -> u32 {
|
||||
self.encid
|
||||
}
|
||||
pub fn tyid(&self) -> u32 {
|
||||
self.tyid
|
||||
}
|
||||
pub fn len(&self) -> u32 {
|
||||
self.len
|
||||
}
|
||||
pub fn buf(&self) -> &Bytes {
|
||||
&self.buf
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for InMemoryFrame {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(
|
||||
fmt,
|
||||
"InMemoryFrame {{ encid: {:x} tyid: {:x} len {} }}",
|
||||
self.encid, self.tyid, self.len
|
||||
)
|
||||
}
|
||||
}
|
||||
178
src/items_2.rs
Normal file
178
src/items_2.rs
Normal file
@@ -0,0 +1,178 @@
|
||||
pub mod accounting;
|
||||
pub mod binning;
|
||||
pub mod binsdim0;
|
||||
pub mod binsxbindim0;
|
||||
pub mod channelevents;
|
||||
pub mod empty;
|
||||
pub mod eventfull;
|
||||
pub mod eventsdim0;
|
||||
pub mod eventsdim0enum;
|
||||
pub mod eventsdim1;
|
||||
pub mod eventsxbindim0;
|
||||
pub mod framable;
|
||||
pub mod frame;
|
||||
pub mod inmem;
|
||||
pub mod merger;
|
||||
pub mod streams;
|
||||
#[cfg(feature = "heavy")]
|
||||
#[cfg(test)]
|
||||
pub mod test;
|
||||
pub mod testgen;
|
||||
pub mod transform;
|
||||
|
||||
use channelevents::ChannelEvents;
|
||||
use daqbuf_err as err;
|
||||
use futures_util::Stream;
|
||||
use items_0::isodate::IsoDateTime;
|
||||
use items_0::streamitem::Sitemty;
|
||||
use items_0::transform::EventTransform;
|
||||
use items_0::Empty;
|
||||
use items_0::Events;
|
||||
use items_0::MergeError;
|
||||
use merger::Mergeable;
|
||||
use netpod::range::evrange::SeriesRange;
|
||||
use netpod::timeunits::*;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
|
||||
pub fn ts_offs_from_abs(tss: &[u64]) -> (u64, VecDeque<u64>, VecDeque<u64>) {
|
||||
let ts_anchor_sec = tss.first().map_or(0, |&k| k) / SEC;
|
||||
let ts_anchor_ns = ts_anchor_sec * SEC;
|
||||
let ts_off_ms: VecDeque<_> = tss.iter().map(|&k| (k - ts_anchor_ns) / MS).collect();
|
||||
let ts_off_ns = tss
|
||||
.iter()
|
||||
.zip(ts_off_ms.iter().map(|&k| k * MS))
|
||||
.map(|(&j, k)| (j - ts_anchor_ns - k))
|
||||
.collect();
|
||||
(ts_anchor_sec, ts_off_ms, ts_off_ns)
|
||||
}
|
||||
|
||||
pub fn ts_offs_from_abs_with_anchor(ts_anchor_sec: u64, tss: &[u64]) -> (VecDeque<u64>, VecDeque<u64>) {
|
||||
let ts_anchor_ns = ts_anchor_sec * SEC;
|
||||
let ts_off_ms: VecDeque<_> = tss.iter().map(|&k| (k - ts_anchor_ns) / MS).collect();
|
||||
let ts_off_ns = tss
|
||||
.iter()
|
||||
.zip(ts_off_ms.iter().map(|&k| k * MS))
|
||||
.map(|(&j, k)| (j - ts_anchor_ns - k))
|
||||
.collect();
|
||||
(ts_off_ms, ts_off_ns)
|
||||
}
|
||||
|
||||
pub fn pulse_offs_from_abs(pulse: &[u64]) -> (u64, VecDeque<u64>) {
|
||||
let pulse_anchor = pulse.first().map_or(0, |&k| k) / 10000 * 10000;
|
||||
let pulse_off = pulse.iter().map(|&k| k - pulse_anchor).collect();
|
||||
(pulse_anchor, pulse_off)
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum ErrorKind {
|
||||
General,
|
||||
#[allow(unused)]
|
||||
MismatchedType,
|
||||
}
|
||||
|
||||
// TODO stack error better
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct Error {
|
||||
#[allow(unused)]
|
||||
kind: ErrorKind,
|
||||
msg: Option<String>,
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "{self:?}")
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ErrorKind> for Error {
|
||||
fn from(kind: ErrorKind) -> Self {
|
||||
Self { kind, msg: None }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Error {
|
||||
fn from(msg: String) -> Self {
|
||||
Self {
|
||||
msg: Some(msg),
|
||||
kind: ErrorKind::General,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO this discards structure
|
||||
impl From<err::Error> for Error {
|
||||
fn from(e: err::Error) -> Self {
|
||||
Self {
|
||||
msg: Some(format!("{e}")),
|
||||
kind: ErrorKind::General,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO this discards structure
|
||||
impl From<Error> for err::Error {
|
||||
fn from(e: Error) -> Self {
|
||||
err::Error::with_msg_no_trace(format!("{e}"))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Error {}
|
||||
|
||||
impl serde::de::Error for Error {
|
||||
fn custom<T>(msg: T) -> Self
|
||||
where
|
||||
T: fmt::Display,
|
||||
{
|
||||
format!("{msg}").into()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_iso_ts(tss: &[u64]) -> Vec<IsoDateTime> {
|
||||
tss.iter().map(|&k| IsoDateTime::from_ns_u64(k)).collect()
|
||||
}
|
||||
|
||||
impl Mergeable for Box<dyn Events> {
|
||||
fn ts_min(&self) -> Option<u64> {
|
||||
self.as_ref().ts_min()
|
||||
}
|
||||
|
||||
fn ts_max(&self) -> Option<u64> {
|
||||
self.as_ref().ts_max()
|
||||
}
|
||||
|
||||
fn new_empty(&self) -> Self {
|
||||
self.as_ref().new_empty_evs()
|
||||
}
|
||||
|
||||
fn clear(&mut self) {
|
||||
Events::clear(self.as_mut())
|
||||
}
|
||||
|
||||
fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError> {
|
||||
self.as_mut().drain_into_evs(dst, range)
|
||||
}
|
||||
|
||||
fn find_lowest_index_gt(&self, ts: u64) -> Option<usize> {
|
||||
self.as_ref().find_lowest_index_gt_evs(ts)
|
||||
}
|
||||
|
||||
fn find_lowest_index_ge(&self, ts: u64) -> Option<usize> {
|
||||
self.as_ref().find_lowest_index_ge_evs(ts)
|
||||
}
|
||||
|
||||
fn find_highest_index_lt(&self, ts: u64) -> Option<usize> {
|
||||
self.as_ref().find_highest_index_lt_evs(ts)
|
||||
}
|
||||
|
||||
fn tss(&self) -> Vec<netpod::TsMs> {
|
||||
Events::tss(self)
|
||||
.iter()
|
||||
.map(|x| netpod::TsMs::from_ns_u64(*x))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ChannelEventsInput: Stream<Item = Sitemty<ChannelEvents>> + EventTransform + Send {}
|
||||
|
||||
impl<T> ChannelEventsInput for T where T: Stream<Item = Sitemty<ChannelEvents>> + EventTransform + Send {}
|
||||
491
src/merger.rs
Normal file
491
src/merger.rs
Normal file
@@ -0,0 +1,491 @@
|
||||
use crate::Error;
|
||||
use futures_util::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use items_0::container::ByteEstimate;
|
||||
use items_0::on_sitemty_data;
|
||||
use items_0::streamitem::sitem_data;
|
||||
use items_0::streamitem::LogItem;
|
||||
use items_0::streamitem::RangeCompletableItem;
|
||||
use items_0::streamitem::Sitemty;
|
||||
use items_0::streamitem::StreamItem;
|
||||
use items_0::transform::EventTransform;
|
||||
use items_0::transform::TransformProperties;
|
||||
use items_0::transform::WithTransformProperties;
|
||||
use items_0::Events;
|
||||
use items_0::MergeError;
|
||||
use items_0::WithLen;
|
||||
use netpod::log::*;
|
||||
use netpod::TsMs;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::ops::ControlFlow;
|
||||
use std::pin::Pin;
|
||||
use std::task::Context;
|
||||
use std::task::Poll;
|
||||
|
||||
const OUT_MAX_BYTES: u64 = 1024 * 200;
|
||||
const DO_DETECT_NON_MONO: bool = true;
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace2 {
|
||||
($($arg:tt)*) => {};
|
||||
($($arg:tt)*) => { trace!($($arg)*) };
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace3 {
|
||||
($($arg:tt)*) => {};
|
||||
($($arg:tt)*) => { trace!($($arg)*) };
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
macro_rules! trace4 {
|
||||
($($arg:tt)*) => {};
|
||||
($($arg:tt)*) => { trace!($($arg)*) };
|
||||
}
|
||||
|
||||
pub trait Mergeable<Rhs = Self>: fmt::Debug + WithLen + ByteEstimate + Unpin {
|
||||
fn ts_min(&self) -> Option<u64>;
|
||||
fn ts_max(&self) -> Option<u64>;
|
||||
fn new_empty(&self) -> Self;
|
||||
fn clear(&mut self);
|
||||
// TODO when MergeError::Full gets returned, any guarantees about what has been modified or kept unchanged?
|
||||
fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError>;
|
||||
fn find_lowest_index_gt(&self, ts: u64) -> Option<usize>;
|
||||
fn find_lowest_index_ge(&self, ts: u64) -> Option<usize>;
|
||||
fn find_highest_index_lt(&self, ts: u64) -> Option<usize>;
|
||||
// TODO only for testing:
|
||||
fn tss(&self) -> Vec<TsMs>;
|
||||
}
|
||||
|
||||
type MergeInp<T> = Pin<Box<dyn Stream<Item = Sitemty<T>> + Send>>;
|
||||
|
||||
pub struct Merger<T> {
|
||||
inps: Vec<Option<MergeInp<T>>>,
|
||||
items: Vec<Option<T>>,
|
||||
out: Option<T>,
|
||||
do_clear_out: bool,
|
||||
out_max_len: usize,
|
||||
range_complete: Vec<bool>,
|
||||
out_of_band_queue: VecDeque<Sitemty<T>>,
|
||||
log_queue: VecDeque<LogItem>,
|
||||
dim0ix_max: u64,
|
||||
done_emit_first_empty: bool,
|
||||
done_data: bool,
|
||||
done_buffered: bool,
|
||||
done_range_complete: bool,
|
||||
complete: bool,
|
||||
poll_count: usize,
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for Merger<T>
|
||||
where
|
||||
T: Mergeable,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
let inps: Vec<_> = self.inps.iter().map(|x| x.is_some()).collect();
|
||||
fmt.debug_struct(std::any::type_name::<Self>())
|
||||
.field("inps", &inps)
|
||||
.field("items", &self.items)
|
||||
.field("out_max_len", &self.out_max_len)
|
||||
.field("range_complete", &self.range_complete)
|
||||
.field("out_of_band_queue", &self.out_of_band_queue.len())
|
||||
.field("done_data", &self.done_data)
|
||||
.field("done_buffered", &self.done_buffered)
|
||||
.field("done_range_complete", &self.done_range_complete)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Merger<T>
|
||||
where
|
||||
T: Mergeable,
|
||||
{
|
||||
pub fn new(inps: Vec<MergeInp<T>>, out_max_len: Option<u32>) -> Self {
|
||||
let n = inps.len();
|
||||
Self {
|
||||
inps: inps.into_iter().map(|x| Some(x)).collect(),
|
||||
items: (0..n).into_iter().map(|_| None).collect(),
|
||||
out: None,
|
||||
do_clear_out: false,
|
||||
out_max_len: out_max_len.unwrap_or(1000) as usize,
|
||||
range_complete: vec![false; n],
|
||||
out_of_band_queue: VecDeque::new(),
|
||||
log_queue: VecDeque::new(),
|
||||
dim0ix_max: 0,
|
||||
done_emit_first_empty: false,
|
||||
done_data: false,
|
||||
done_buffered: false,
|
||||
done_range_complete: false,
|
||||
complete: false,
|
||||
poll_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn drain_into_upto(src: &mut T, dst: &mut T, upto: u64) -> Result<(), MergeError> {
|
||||
match src.find_lowest_index_gt(upto) {
|
||||
Some(ilgt) => {
|
||||
src.drain_into(dst, (0, ilgt))?;
|
||||
}
|
||||
None => {
|
||||
// TODO should not be here.
|
||||
src.drain_into(dst, (0, src.len()))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn take_into_output_all(&mut self, src: &mut T) -> Result<(), MergeError> {
|
||||
// TODO optimize the case when some large batch should be added to some existing small batch already in out.
|
||||
// TODO maybe use two output slots?
|
||||
self.take_into_output_upto(src, u64::MAX)
|
||||
}
|
||||
|
||||
fn take_into_output_upto(&mut self, src: &mut T, upto: u64) -> Result<(), MergeError> {
|
||||
// TODO optimize the case when some large batch should be added to some existing small batch already in out.
|
||||
// TODO maybe use two output slots?
|
||||
if let Some(out) = self.out.as_mut() {
|
||||
Self::drain_into_upto(src, out, upto)?;
|
||||
} else {
|
||||
trace2!("move into fresh");
|
||||
let mut fresh = src.new_empty();
|
||||
Self::drain_into_upto(src, &mut fresh, upto)?;
|
||||
self.out = Some(fresh);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process(mut self: Pin<&mut Self>, _cx: &mut Context) -> Result<ControlFlow<()>, Error> {
|
||||
use ControlFlow::*;
|
||||
trace4!("process");
|
||||
let mut log_items = Vec::new();
|
||||
let mut tslows = [None, None];
|
||||
for (i1, itemopt) in self.items.iter_mut().enumerate() {
|
||||
if let Some(item) = itemopt {
|
||||
if let Some(t1) = item.ts_min() {
|
||||
if let Some((_, a)) = tslows[0] {
|
||||
if t1 < a {
|
||||
tslows[1] = tslows[0];
|
||||
tslows[0] = Some((i1, t1));
|
||||
} else {
|
||||
if let Some((_, b)) = tslows[1] {
|
||||
if t1 < b {
|
||||
tslows[1] = Some((i1, t1));
|
||||
} else {
|
||||
// nothing to do
|
||||
}
|
||||
} else {
|
||||
tslows[1] = Some((i1, t1));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tslows[0] = Some((i1, t1));
|
||||
}
|
||||
} else {
|
||||
// the item seems empty.
|
||||
// TODO count for stats.
|
||||
trace2!("empty item, something to do here?");
|
||||
*itemopt = None;
|
||||
return Ok(Continue(()));
|
||||
}
|
||||
}
|
||||
}
|
||||
if DO_DETECT_NON_MONO {
|
||||
if let Some((i1, t1)) = tslows[0].as_ref() {
|
||||
if *t1 <= self.dim0ix_max {
|
||||
self.dim0ix_max = *t1;
|
||||
let item = LogItem {
|
||||
node_ix: *i1 as _,
|
||||
level: Level::INFO,
|
||||
msg: format!(
|
||||
"dim0ix_max {} vs {} diff {}",
|
||||
self.dim0ix_max,
|
||||
t1,
|
||||
self.dim0ix_max - t1
|
||||
),
|
||||
};
|
||||
log_items.push(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
trace4!("tslows {tslows:?}");
|
||||
if let Some((il0, _tl0)) = tslows[0] {
|
||||
if let Some((_il1, tl1)) = tslows[1] {
|
||||
// There is a second input, take only up to the second highest timestamp
|
||||
let item = self.items[il0].as_mut().unwrap();
|
||||
if let Some(th0) = item.ts_max() {
|
||||
if th0 <= tl1 {
|
||||
// Can take the whole item
|
||||
// TODO gather stats about this case. Should be never for databuffer, and often for scylla.
|
||||
let mut item = self.items[il0].take().unwrap();
|
||||
trace3!("Take all from item {item:?}");
|
||||
match self.take_into_output_all(&mut item) {
|
||||
Ok(()) => Ok(Break(())),
|
||||
Err(MergeError::Full) | Err(MergeError::NotCompatible) => {
|
||||
// TODO count for stats
|
||||
trace3!("Put item back");
|
||||
self.items[il0] = Some(item);
|
||||
self.do_clear_out = true;
|
||||
Ok(Break(()))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Take only up to the lowest ts of the second-lowest input
|
||||
let mut item = self.items[il0].take().unwrap();
|
||||
trace3!("Take up to {tl1} from item {item:?}");
|
||||
let res = self.take_into_output_upto(&mut item, tl1);
|
||||
match res {
|
||||
Ok(()) => {
|
||||
if item.len() == 0 {
|
||||
// TODO should never be here because we should have taken the whole item
|
||||
Err(format!("Should have taken the whole item instead").into())
|
||||
} else {
|
||||
self.items[il0] = Some(item);
|
||||
Ok(Break(()))
|
||||
}
|
||||
}
|
||||
Err(MergeError::Full) | Err(MergeError::NotCompatible) => {
|
||||
// TODO count for stats
|
||||
info!("Put item back because {res:?}");
|
||||
self.items[il0] = Some(item);
|
||||
self.do_clear_out = true;
|
||||
Ok(Break(()))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// TODO should never be here because ts-max should always exist here.
|
||||
Err(format!("selected input without max ts").into())
|
||||
}
|
||||
} else {
|
||||
// No other input, take the whole item
|
||||
let mut item = self.items[il0].take().unwrap();
|
||||
trace3!("Take all from item (no other input) {item:?}");
|
||||
match self.take_into_output_all(&mut item) {
|
||||
Ok(()) => Ok(Break(())),
|
||||
Err(_) => {
|
||||
// TODO count for stats
|
||||
trace3!("Put item back");
|
||||
self.items[il0] = Some(item);
|
||||
self.do_clear_out = true;
|
||||
Ok(Break(()))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Err(format!("after low ts search nothing found").into())
|
||||
}
|
||||
}
|
||||
|
||||
fn refill(mut self: Pin<&mut Self>, cx: &mut Context) -> Result<Poll<()>, Error> {
|
||||
trace4!("refill");
|
||||
use Poll::*;
|
||||
let mut has_pending = false;
|
||||
for i in 0..self.inps.len() {
|
||||
if self.items[i].is_none() {
|
||||
while let Some(inp) = self.inps[i].as_mut() {
|
||||
match inp.poll_next_unpin(cx) {
|
||||
Ready(Some(Ok(k))) => match k {
|
||||
StreamItem::DataItem(k) => match k {
|
||||
RangeCompletableItem::Data(k) => {
|
||||
if self.done_emit_first_empty == false {
|
||||
trace!("emit first empty marker item");
|
||||
self.done_emit_first_empty = true;
|
||||
let item = k.new_empty();
|
||||
let item = sitem_data(item);
|
||||
self.out_of_band_queue.push_back(item);
|
||||
}
|
||||
self.items[i] = Some(k);
|
||||
trace4!("refilled {}", i);
|
||||
}
|
||||
RangeCompletableItem::RangeComplete => {
|
||||
self.range_complete[i] = true;
|
||||
trace!("range_complete {:?}", self.range_complete);
|
||||
continue;
|
||||
}
|
||||
},
|
||||
StreamItem::Log(item) => {
|
||||
// TODO limit queue length
|
||||
self.out_of_band_queue.push_back(Ok(StreamItem::Log(item)));
|
||||
continue;
|
||||
}
|
||||
StreamItem::Stats(item) => {
|
||||
// TODO limit queue length
|
||||
self.out_of_band_queue.push_back(Ok(StreamItem::Stats(item)));
|
||||
continue;
|
||||
}
|
||||
},
|
||||
Ready(Some(Err(e))) => {
|
||||
self.inps[i] = None;
|
||||
return Err(e.into());
|
||||
}
|
||||
Ready(None) => {
|
||||
self.inps[i] = None;
|
||||
}
|
||||
Pending => {
|
||||
has_pending = true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if has_pending {
|
||||
Ok(Pending)
|
||||
} else {
|
||||
Ok(Ready(()))
|
||||
}
|
||||
}
|
||||
|
||||
fn poll3(mut self: Pin<&mut Self>, cx: &mut Context) -> ControlFlow<Poll<Option<Result<T, Error>>>> {
|
||||
use ControlFlow::*;
|
||||
use Poll::*;
|
||||
trace4!("poll3");
|
||||
#[allow(unused)]
|
||||
let ninps = self.inps.iter().filter(|a| a.is_some()).count();
|
||||
let nitems = self.items.iter().filter(|a| a.is_some()).count();
|
||||
let nitemsmissing = self
|
||||
.inps
|
||||
.iter()
|
||||
.zip(self.items.iter())
|
||||
.filter(|(a, b)| a.is_some() && b.is_none())
|
||||
.count();
|
||||
trace3!("ninps {ninps} nitems {nitems} nitemsmissing {nitemsmissing}");
|
||||
if nitemsmissing != 0 {
|
||||
let e = Error::from(format!("missing but no pending"));
|
||||
return Break(Ready(Some(Err(e))));
|
||||
}
|
||||
let last_emit = nitems == 0;
|
||||
if nitems != 0 {
|
||||
match Self::process(Pin::new(&mut self), cx) {
|
||||
Ok(Break(())) => {}
|
||||
Ok(Continue(())) => {}
|
||||
Err(e) => return Break(Ready(Some(Err(e)))),
|
||||
}
|
||||
}
|
||||
if let Some(o) = self.out.as_ref() {
|
||||
if o.len() >= self.out_max_len || o.byte_estimate() >= OUT_MAX_BYTES || self.do_clear_out || last_emit {
|
||||
if o.len() > self.out_max_len {
|
||||
debug!("MERGER OVERWEIGHT ITEM {} vs {}", o.len(), self.out_max_len);
|
||||
}
|
||||
trace3!("decide to output");
|
||||
self.do_clear_out = false;
|
||||
//Break(Ready(Some(Ok(self.out.take().unwrap()))))
|
||||
let item = sitem_data(self.out.take().unwrap());
|
||||
self.out_of_band_queue.push_back(item);
|
||||
Continue(())
|
||||
} else {
|
||||
trace4!("not enough output yet");
|
||||
Continue(())
|
||||
}
|
||||
} else {
|
||||
trace!("no output candidate");
|
||||
if last_emit {
|
||||
Break(Ready(None))
|
||||
} else {
|
||||
Continue(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn poll2(mut self: Pin<&mut Self>, cx: &mut Context) -> ControlFlow<Poll<Option<Result<T, Error>>>> {
|
||||
use ControlFlow::*;
|
||||
use Poll::*;
|
||||
match Self::refill(Pin::new(&mut self), cx) {
|
||||
Ok(Ready(())) => Self::poll3(self, cx),
|
||||
Ok(Pending) => Break(Pending),
|
||||
Err(e) => Break(Ready(Some(Err(e)))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Stream for Merger<T>
|
||||
where
|
||||
T: Mergeable,
|
||||
{
|
||||
type Item = Sitemty<T>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
self.poll_count += 1;
|
||||
let span1 = span!(Level::INFO, "Merger", pc = self.poll_count);
|
||||
let _spg = span1.enter();
|
||||
loop {
|
||||
trace3!("poll");
|
||||
break if let Some(item) = self.log_queue.pop_front() {
|
||||
Ready(Some(Ok(StreamItem::Log(item))))
|
||||
} else if self.poll_count == usize::MAX {
|
||||
self.done_range_complete = true;
|
||||
continue;
|
||||
} else if self.complete {
|
||||
panic!("poll after complete");
|
||||
} else if self.done_range_complete {
|
||||
self.complete = true;
|
||||
Ready(None)
|
||||
} else if self.done_buffered {
|
||||
self.done_range_complete = true;
|
||||
if self.range_complete.iter().all(|x| *x) {
|
||||
trace!("emit RangeComplete");
|
||||
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))))
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} else if self.done_data {
|
||||
trace!("done_data");
|
||||
self.done_buffered = true;
|
||||
if let Some(out) = self.out.take() {
|
||||
trace!("done_data emit buffered len {}", out.len());
|
||||
Ready(Some(sitem_data(out)))
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} else if let Some(item) = self.out_of_band_queue.pop_front() {
|
||||
let item = on_sitemty_data!(item, |k: T| {
|
||||
trace3!("emit out-of-band data len {}", k.len());
|
||||
sitem_data(k)
|
||||
});
|
||||
Ready(Some(item))
|
||||
} else {
|
||||
match Self::poll2(self.as_mut(), cx) {
|
||||
ControlFlow::Continue(()) => continue,
|
||||
ControlFlow::Break(k) => match k {
|
||||
Ready(Some(Ok(out))) => {
|
||||
if true {
|
||||
error!("THIS BRANCH SHOULD NO LONGER OCCUR, REFACTOR");
|
||||
self.done_data = true;
|
||||
let e = Error::from(format!("TODO refactor direct emit in merger"));
|
||||
return Ready(Some(Err(e.into())));
|
||||
}
|
||||
trace!("emit buffered len {}", out.len());
|
||||
Ready(Some(sitem_data(out)))
|
||||
}
|
||||
Ready(Some(Err(e))) => {
|
||||
self.done_data = true;
|
||||
Ready(Some(Err(e.into())))
|
||||
}
|
||||
Ready(None) => {
|
||||
self.done_data = true;
|
||||
continue;
|
||||
}
|
||||
Pending => Pending,
|
||||
},
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> WithTransformProperties for Merger<T> {
|
||||
fn query_transform_properties(&self) -> TransformProperties {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> EventTransform for Merger<T>
|
||||
where
|
||||
T: Send,
|
||||
{
|
||||
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
290
src/streams.rs
Normal file
290
src/streams.rs
Normal file
@@ -0,0 +1,290 @@
|
||||
use futures_util::Future;
|
||||
use futures_util::FutureExt;
|
||||
use futures_util::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use items_0::streamitem::RangeCompletableItem;
|
||||
use items_0::streamitem::Sitemty;
|
||||
use items_0::streamitem::StreamItem;
|
||||
use items_0::transform::EventStreamTrait;
|
||||
use items_0::transform::EventTransform;
|
||||
use items_0::transform::TransformProperties;
|
||||
use items_0::transform::WithTransformProperties;
|
||||
use items_0::Events;
|
||||
use std::collections::VecDeque;
|
||||
use std::pin::Pin;
|
||||
use std::task::Context;
|
||||
use std::task::Poll;
|
||||
|
||||
pub struct Enumerate2<T> {
|
||||
inp: T,
|
||||
cnt: usize,
|
||||
}
|
||||
|
||||
impl<T> Enumerate2<T> {
|
||||
pub fn new(inp: T) -> Self
|
||||
where
|
||||
T: EventTransform,
|
||||
{
|
||||
Self { inp, cnt: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Stream for Enumerate2<T>
|
||||
where
|
||||
T: Stream + Unpin,
|
||||
{
|
||||
type Item = (usize, <T as Stream>::Item);
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
match self.inp.poll_next_unpin(cx) {
|
||||
Ready(Some(item)) => {
|
||||
let i = self.cnt;
|
||||
self.cnt += 1;
|
||||
Ready(Some((i, item)))
|
||||
}
|
||||
Ready(None) => Ready(None),
|
||||
Pending => Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> WithTransformProperties for Enumerate2<T>
|
||||
where
|
||||
T: WithTransformProperties,
|
||||
{
|
||||
fn query_transform_properties(&self) -> TransformProperties {
|
||||
self.inp.query_transform_properties()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> EventTransform for Enumerate2<T>
|
||||
where
|
||||
T: WithTransformProperties + Send,
|
||||
{
|
||||
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Then2<T, F, Fut> {
|
||||
inp: Pin<Box<T>>,
|
||||
f: Pin<Box<F>>,
|
||||
fut: Option<Pin<Box<Fut>>>,
|
||||
}
|
||||
|
||||
impl<T, F, Fut> Then2<T, F, Fut>
|
||||
where
|
||||
T: Stream,
|
||||
F: Fn(<T as Stream>::Item) -> Fut,
|
||||
{
|
||||
pub fn new(inp: T, f: F) -> Self
|
||||
where
|
||||
T: EventTransform,
|
||||
{
|
||||
Self {
|
||||
inp: Box::pin(inp),
|
||||
f: Box::pin(f),
|
||||
fut: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn prepare_fut(&mut self, item: <T as Stream>::Item) {
|
||||
self.fut = Some(Box::pin((self.f)(item)));
|
||||
}
|
||||
}
|
||||
|
||||
/*impl<T, F, Fut> Unpin for Then2<T, F, Fut>
|
||||
where
|
||||
T: Unpin,
|
||||
F: Unpin,
|
||||
Fut: Unpin,
|
||||
{
|
||||
}*/
|
||||
|
||||
impl<T, F, Fut> Stream for Then2<T, F, Fut>
|
||||
where
|
||||
T: Stream,
|
||||
F: Fn(<T as Stream>::Item) -> Fut,
|
||||
Fut: Future,
|
||||
{
|
||||
type Item = <Fut as Future>::Output;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
loop {
|
||||
break if let Some(fut) = self.fut.as_mut() {
|
||||
match fut.poll_unpin(cx) {
|
||||
Ready(item) => {
|
||||
self.fut = None;
|
||||
Ready(Some(item))
|
||||
}
|
||||
Pending => Pending,
|
||||
}
|
||||
} else {
|
||||
match self.inp.poll_next_unpin(cx) {
|
||||
Ready(Some(item)) => {
|
||||
self.prepare_fut(item);
|
||||
continue;
|
||||
}
|
||||
Ready(None) => Ready(None),
|
||||
Pending => Pending,
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, F, Fut> WithTransformProperties for Then2<T, F, Fut>
|
||||
where
|
||||
T: EventTransform,
|
||||
{
|
||||
fn query_transform_properties(&self) -> TransformProperties {
|
||||
self.inp.query_transform_properties()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, F, Fut> EventTransform for Then2<T, F, Fut>
|
||||
where
|
||||
T: EventTransform + Send,
|
||||
F: Send,
|
||||
Fut: Send,
|
||||
{
|
||||
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait TransformerExt {
|
||||
fn enumerate2(self) -> Enumerate2<Self>
|
||||
where
|
||||
Self: EventTransform + Sized;
|
||||
|
||||
fn then2<F, Fut>(self, f: F) -> Then2<Self, F, Fut>
|
||||
where
|
||||
Self: EventTransform + Stream + Sized,
|
||||
F: Fn(<Self as Stream>::Item) -> Fut,
|
||||
Fut: Future;
|
||||
}
|
||||
|
||||
impl<T> TransformerExt for T {
|
||||
fn enumerate2(self) -> Enumerate2<Self>
|
||||
where
|
||||
Self: EventTransform + Sized,
|
||||
{
|
||||
Enumerate2::new(self)
|
||||
}
|
||||
|
||||
fn then2<F, Fut>(self, f: F) -> Then2<Self, F, Fut>
|
||||
where
|
||||
Self: EventTransform + Stream + Sized,
|
||||
F: Fn(<Self as Stream>::Item) -> Fut,
|
||||
Fut: Future,
|
||||
{
|
||||
Then2::new(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct VecStream<T> {
|
||||
inp: VecDeque<T>,
|
||||
}
|
||||
|
||||
impl<T> VecStream<T> {
|
||||
pub fn new(inp: VecDeque<T>) -> Self {
|
||||
Self { inp }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Stream for VecStream<T>
|
||||
where
|
||||
T: Unpin,
|
||||
{
|
||||
type Item = T;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
if let Some(item) = self.inp.pop_front() {
|
||||
Ready(Some(item))
|
||||
} else {
|
||||
Ready(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> WithTransformProperties for VecStream<T> {
|
||||
fn query_transform_properties(&self) -> TransformProperties {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> EventTransform for VecStream<T>
|
||||
where
|
||||
T: Send,
|
||||
{
|
||||
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrap any event stream and provide transformation properties.
|
||||
pub struct PlainEventStream<INP, T>
|
||||
where
|
||||
T: Events,
|
||||
INP: Stream<Item = Sitemty<T>>,
|
||||
{
|
||||
inp: Pin<Box<INP>>,
|
||||
}
|
||||
|
||||
impl<INP, T> PlainEventStream<INP, T>
|
||||
where
|
||||
T: Events,
|
||||
INP: Stream<Item = Sitemty<T>>,
|
||||
{
|
||||
pub fn new(inp: INP) -> Self {
|
||||
Self { inp: Box::pin(inp) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<INP, T> Stream for PlainEventStream<INP, T>
|
||||
where
|
||||
T: Events,
|
||||
INP: Stream<Item = Sitemty<T>>,
|
||||
{
|
||||
type Item = Sitemty<Box<dyn Events>>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
match self.inp.poll_next_unpin(cx) {
|
||||
Ready(Some(item)) => Ready(Some(match item {
|
||||
Ok(item) => Ok(match item {
|
||||
StreamItem::DataItem(item) => StreamItem::DataItem(match item {
|
||||
RangeCompletableItem::RangeComplete => RangeCompletableItem::RangeComplete,
|
||||
RangeCompletableItem::Data(item) => RangeCompletableItem::Data(Box::new(item)),
|
||||
}),
|
||||
StreamItem::Log(item) => StreamItem::Log(item),
|
||||
StreamItem::Stats(item) => StreamItem::Stats(item),
|
||||
}),
|
||||
Err(e) => Err(e),
|
||||
})),
|
||||
Ready(None) => Ready(None),
|
||||
Pending => Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<INP, T> WithTransformProperties for PlainEventStream<INP, T>
|
||||
where
|
||||
T: Events,
|
||||
INP: Stream<Item = Sitemty<T>>,
|
||||
{
|
||||
fn query_transform_properties(&self) -> TransformProperties {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<INP, T> EventStreamTrait for PlainEventStream<INP, T>
|
||||
where
|
||||
T: Events,
|
||||
INP: Stream<Item = Sitemty<T>> + Send,
|
||||
{
|
||||
}
|
||||
470
src/test.rs
Normal file
470
src/test.rs
Normal file
@@ -0,0 +1,470 @@
|
||||
#[cfg(test)]
|
||||
pub mod eventsdim0;
|
||||
#[cfg(test)]
|
||||
pub mod eventsdim1;
|
||||
|
||||
use crate::channelevents::ConnStatus;
|
||||
use crate::channelevents::ConnStatusEvent;
|
||||
use crate::eventsdim0::EventsDim0;
|
||||
use crate::merger::Mergeable;
|
||||
use crate::merger::Merger;
|
||||
use crate::runfut;
|
||||
use crate::streams::TransformerExt;
|
||||
use crate::streams::VecStream;
|
||||
use crate::testgen::make_some_boxed_d0_f32;
|
||||
use crate::ChannelEvents;
|
||||
use crate::Error;
|
||||
use crate::Events;
|
||||
use futures_util::stream;
|
||||
use futures_util::StreamExt;
|
||||
use items_0::streamitem::sitem_data;
|
||||
use items_0::streamitem::RangeCompletableItem;
|
||||
use items_0::streamitem::Sitemty;
|
||||
use items_0::streamitem::StreamItem;
|
||||
use items_0::Appendable;
|
||||
use items_0::Empty;
|
||||
use items_0::WithLen;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::timeunits::*;
|
||||
use netpod::BinnedRangeEnum;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn runfut<T, F>(fut: F) -> Result<T, err::Error>
|
||||
where
|
||||
F: std::future::Future<Output = Result<T, Error>>,
|
||||
{
|
||||
use futures_util::TryFutureExt;
|
||||
let fut = fut.map_err(|e| e.into());
|
||||
taskrun::run(fut)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn items_move_events() {
|
||||
let evs = make_some_boxed_d0_f32(10, SEC, SEC, 0, 1846713782);
|
||||
let v0 = ChannelEvents::Events(evs);
|
||||
let mut v1 = v0.clone();
|
||||
eprintln!("{v1:?}");
|
||||
eprintln!("{}", v1.len());
|
||||
let mut v2 = v1.new_empty();
|
||||
match v1.find_lowest_index_gt(4) {
|
||||
Some(ilgt) => {
|
||||
v1.drain_into(&mut v2, (0, ilgt)).unwrap();
|
||||
}
|
||||
None => {
|
||||
v1.drain_into(&mut v2, (0, v1.len())).unwrap();
|
||||
}
|
||||
}
|
||||
eprintln!("{}", v1.len());
|
||||
eprintln!("{}", v2.len());
|
||||
match v1.find_lowest_index_gt(u64::MAX) {
|
||||
Some(ilgt) => {
|
||||
v1.drain_into(&mut v2, (0, ilgt)).unwrap();
|
||||
}
|
||||
None => {
|
||||
v1.drain_into(&mut v2, (0, v1.len())).unwrap();
|
||||
}
|
||||
}
|
||||
eprintln!("{}", v1.len());
|
||||
eprintln!("{}", v2.len());
|
||||
eprintln!("{v1:?}");
|
||||
eprintln!("{v2:?}");
|
||||
assert_eq!(v1.len(), 0);
|
||||
assert_eq!(v2.len(), 10);
|
||||
assert_eq!(v2, v0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn items_merge_00() {
|
||||
let fut = async {
|
||||
use crate::merger::Merger;
|
||||
let evs0 = make_some_boxed_d0_f32(10, SEC * 1, SEC * 2, 0, 1846713782);
|
||||
let evs1 = make_some_boxed_d0_f32(10, SEC * 2, SEC * 2, 0, 828764893);
|
||||
let v0 = ChannelEvents::Events(evs0);
|
||||
let v1 = ChannelEvents::Events(evs1);
|
||||
let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)]));
|
||||
let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)]));
|
||||
let mut merger = Merger::new(vec![stream0, stream1], Some(8));
|
||||
while let Some(item) = merger.next().await {
|
||||
eprintln!("{item:?}");
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
runfut(fut).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn items_merge_01() {
|
||||
let fut = async {
|
||||
use crate::merger::Merger;
|
||||
let evs0 = make_some_boxed_d0_f32(10, SEC * 1, SEC * 2, 0, 1846713782);
|
||||
let evs1 = make_some_boxed_d0_f32(10, SEC * 2, SEC * 2, 0, 828764893);
|
||||
let v0 = ChannelEvents::Events(evs0);
|
||||
let v1 = ChannelEvents::Events(evs1);
|
||||
let v2 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 100, ConnStatus::Connect)));
|
||||
let v3 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2300, ConnStatus::Disconnect)));
|
||||
let v4 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2800, ConnStatus::Connect)));
|
||||
let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)]));
|
||||
let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)]));
|
||||
let stream2 = Box::pin(stream::iter(vec![sitem_data(v2), sitem_data(v3), sitem_data(v4)]));
|
||||
let mut merger = Merger::new(vec![stream0, stream1, stream2], Some(8));
|
||||
let mut total_event_count = 0;
|
||||
while let Some(item) = merger.next().await {
|
||||
eprintln!("{item:?}");
|
||||
let item = item?;
|
||||
match item {
|
||||
StreamItem::DataItem(item) => match item {
|
||||
RangeCompletableItem::RangeComplete => {}
|
||||
RangeCompletableItem::Data(item) => {
|
||||
total_event_count += item.len();
|
||||
}
|
||||
},
|
||||
StreamItem::Log(_) => {}
|
||||
StreamItem::Stats(_) => {}
|
||||
}
|
||||
}
|
||||
assert_eq!(total_event_count, 23);
|
||||
Ok(())
|
||||
};
|
||||
runfut(fut).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn items_merge_02() {
|
||||
let fut = async {
|
||||
let evs0 = make_some_boxed_d0_f32(100, SEC * 1, SEC * 2, 0, 1846713782);
|
||||
let evs1 = make_some_boxed_d0_f32(100, SEC * 2, SEC * 2, 0, 828764893);
|
||||
let v0 = ChannelEvents::Events(evs0);
|
||||
let v1 = ChannelEvents::Events(evs1);
|
||||
let v2 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 100, ConnStatus::Connect)));
|
||||
let v3 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2300, ConnStatus::Disconnect)));
|
||||
let v4 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2800, ConnStatus::Connect)));
|
||||
let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)]));
|
||||
let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)]));
|
||||
let stream2 = Box::pin(stream::iter(vec![sitem_data(v2), sitem_data(v3), sitem_data(v4)]));
|
||||
let mut merger = Merger::new(vec![stream0, stream1, stream2], Some(8));
|
||||
let mut total_event_count = 0;
|
||||
while let Some(item) = merger.next().await {
|
||||
eprintln!("{item:?}");
|
||||
let item = item.unwrap();
|
||||
match item {
|
||||
StreamItem::DataItem(item) => match item {
|
||||
RangeCompletableItem::RangeComplete => {}
|
||||
RangeCompletableItem::Data(item) => {
|
||||
total_event_count += item.len();
|
||||
}
|
||||
},
|
||||
StreamItem::Log(_) => {}
|
||||
StreamItem::Stats(_) => {}
|
||||
}
|
||||
}
|
||||
assert_eq!(total_event_count, 203);
|
||||
Ok(())
|
||||
};
|
||||
runfut(fut).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_00() {
|
||||
let fut = async {
|
||||
let mut events_vec1: Vec<Sitemty<ChannelEvents>> = Vec::new();
|
||||
let mut events_vec2: Vec<Sitemty<ChannelEvents>> = Vec::new();
|
||||
{
|
||||
let mut events = EventsDim0::empty();
|
||||
for i in 0..10 {
|
||||
events.push(i * 100, i, i as f32 * 100.);
|
||||
}
|
||||
let cev = ChannelEvents::Events(Box::new(events.clone()));
|
||||
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
|
||||
let cev = ChannelEvents::Events(Box::new(events.clone()));
|
||||
events_vec2.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
|
||||
}
|
||||
let inp1 = events_vec1;
|
||||
let inp1 = futures_util::stream::iter(inp1);
|
||||
let inp1 = Box::pin(inp1);
|
||||
let inp2: Vec<Sitemty<ChannelEvents>> = Vec::new();
|
||||
let inp2 = futures_util::stream::iter(inp2);
|
||||
let inp2 = Box::pin(inp2);
|
||||
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], Some(32));
|
||||
|
||||
// Expect an empty first item.
|
||||
let item = merger.next().await;
|
||||
let item = match item {
|
||||
Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))) => item,
|
||||
_ => panic!(),
|
||||
};
|
||||
assert_eq!(item.len(), 0);
|
||||
|
||||
let item = merger.next().await;
|
||||
assert_eq!(item.as_ref(), events_vec2.get(0));
|
||||
let item = merger.next().await;
|
||||
assert_eq!(item.as_ref(), None);
|
||||
Ok(())
|
||||
};
|
||||
runfut(fut).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_01() {
|
||||
let fut = async {
|
||||
let events_vec1 = {
|
||||
let mut vec = Vec::new();
|
||||
let mut events = EventsDim0::empty();
|
||||
for i in 0..10 {
|
||||
events.push(i * 100, i, i as f32 * 100.);
|
||||
}
|
||||
push_evd0(&mut vec, Box::new(events.clone()));
|
||||
let mut events = EventsDim0::empty();
|
||||
for i in 10..20 {
|
||||
events.push(i * 100, i, i as f32 * 100.);
|
||||
}
|
||||
push_evd0(&mut vec, Box::new(events.clone()));
|
||||
vec
|
||||
};
|
||||
let exp = events_vec1.clone();
|
||||
let inp1 = events_vec1;
|
||||
let inp1 = futures_util::stream::iter(inp1);
|
||||
let inp1 = Box::pin(inp1);
|
||||
let inp2: Vec<Sitemty<ChannelEvents>> = Vec::new();
|
||||
let inp2 = futures_util::stream::iter(inp2);
|
||||
let inp2 = Box::pin(inp2);
|
||||
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], Some(10));
|
||||
|
||||
// Expect an empty first item.
|
||||
let item = merger.next().await;
|
||||
let item = match item {
|
||||
Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))) => item,
|
||||
_ => panic!(),
|
||||
};
|
||||
assert_eq!(item.len(), 0);
|
||||
|
||||
let item = merger.next().await;
|
||||
assert_eq!(item.as_ref(), exp.get(0));
|
||||
let item = merger.next().await;
|
||||
assert_eq!(item.as_ref(), exp.get(1));
|
||||
let item = merger.next().await;
|
||||
assert_eq!(item.as_ref(), None);
|
||||
Ok(())
|
||||
};
|
||||
runfut(fut).unwrap();
|
||||
}
|
||||
|
||||
fn push_evd0(vec: &mut Vec<Sitemty<ChannelEvents>>, events: Box<dyn Events>) {
|
||||
let cev = ChannelEvents::Events(events);
|
||||
vec.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_02() {
|
||||
let fut = async {
|
||||
let events_vec1 = {
|
||||
let mut vec = Vec::new();
|
||||
let mut events = EventsDim0::empty();
|
||||
for i in 0..10 {
|
||||
events.push(i * 100, i, i as f32 * 100.);
|
||||
}
|
||||
push_evd0(&mut vec, Box::new(events));
|
||||
let mut events = EventsDim0::empty();
|
||||
for i in 10..20 {
|
||||
events.push(i * 100, i, i as f32 * 100.);
|
||||
}
|
||||
push_evd0(&mut vec, Box::new(events));
|
||||
vec
|
||||
};
|
||||
let events_vec2 = {
|
||||
let mut vec = Vec::new();
|
||||
let mut events = EventsDim0::empty();
|
||||
for i in 0..10 {
|
||||
events.push(i * 100, i, i as f32 * 100.);
|
||||
}
|
||||
push_evd0(&mut vec, Box::new(events));
|
||||
let mut events = EventsDim0::empty();
|
||||
for i in 10..12 {
|
||||
events.push(i * 100, i, i as f32 * 100.);
|
||||
}
|
||||
push_evd0(&mut vec, Box::new(events));
|
||||
let mut events = EventsDim0::empty();
|
||||
for i in 12..20 {
|
||||
events.push(i * 100, i, i as f32 * 100.);
|
||||
}
|
||||
push_evd0(&mut vec, Box::new(events));
|
||||
vec
|
||||
};
|
||||
|
||||
let inp2_events_a = {
|
||||
let ev = ConnStatusEvent {
|
||||
ts: 1199,
|
||||
datetime: std::time::SystemTime::UNIX_EPOCH,
|
||||
status: ConnStatus::Disconnect,
|
||||
};
|
||||
let item: Sitemty<ChannelEvents> = Ok(StreamItem::DataItem(RangeCompletableItem::Data(
|
||||
ChannelEvents::Status(Some(ev)),
|
||||
)));
|
||||
vec![item]
|
||||
};
|
||||
|
||||
let inp2_events_b = {
|
||||
let ev = ConnStatusEvent {
|
||||
ts: 1199,
|
||||
datetime: std::time::SystemTime::UNIX_EPOCH,
|
||||
status: ConnStatus::Disconnect,
|
||||
};
|
||||
let item: Sitemty<ChannelEvents> = Ok(StreamItem::DataItem(RangeCompletableItem::Data(
|
||||
ChannelEvents::Status(Some(ev)),
|
||||
)));
|
||||
vec![item]
|
||||
};
|
||||
|
||||
let inp1 = events_vec1;
|
||||
let inp1 = futures_util::stream::iter(inp1);
|
||||
let inp1 = Box::pin(inp1);
|
||||
let inp2: Vec<Sitemty<ChannelEvents>> = inp2_events_a;
|
||||
let inp2 = futures_util::stream::iter(inp2);
|
||||
let inp2 = Box::pin(inp2);
|
||||
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], Some(10));
|
||||
|
||||
// Expect an empty first item.
|
||||
let item = merger.next().await;
|
||||
let item = match item {
|
||||
Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))) => item,
|
||||
_ => panic!(),
|
||||
};
|
||||
assert_eq!(item.len(), 0);
|
||||
|
||||
let item = merger.next().await;
|
||||
assert_eq!(item.as_ref(), events_vec2.get(0));
|
||||
let item = merger.next().await;
|
||||
assert_eq!(item.as_ref(), events_vec2.get(1));
|
||||
let item = merger.next().await;
|
||||
assert_eq!(item.as_ref(), inp2_events_b.get(0));
|
||||
let item = merger.next().await;
|
||||
assert_eq!(item.as_ref(), events_vec2.get(2));
|
||||
let item = merger.next().await;
|
||||
assert_eq!(item.as_ref(), None);
|
||||
Ok(())
|
||||
};
|
||||
runfut(fut).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bin_01() {
|
||||
const TSBASE: u64 = SEC * 1600000000;
|
||||
fn val(ts: u64) -> f32 {
|
||||
2f32 + ((ts / SEC) % 2) as f32 + 0.2 * ((ts / (MS * 100)) % 2) as f32
|
||||
}
|
||||
let fut = async {
|
||||
let mut events_vec1 = Vec::new();
|
||||
let mut t = TSBASE;
|
||||
for _ in 0..20 {
|
||||
let mut events = EventsDim0::empty();
|
||||
for _ in 0..10 {
|
||||
events.push(t, t, val(t));
|
||||
t += MS * 100;
|
||||
}
|
||||
let cev = ChannelEvents::Events(Box::new(events));
|
||||
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
|
||||
}
|
||||
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)));
|
||||
let inp1 = events_vec1;
|
||||
let inp1 = futures_util::stream::iter(inp1);
|
||||
let inp1 = Box::pin(inp1);
|
||||
let inp2 = Box::pin(futures_util::stream::empty()) as _;
|
||||
let stream = crate::merger::Merger::new(vec![inp1, inp2], Some(32));
|
||||
// covering_range result is subject to adjustments, instead, manually choose bin edges
|
||||
let range = NanoRange {
|
||||
beg: TSBASE + SEC * 1,
|
||||
end: TSBASE + SEC * 10,
|
||||
};
|
||||
// let binrange = BinnedRangeEnum::covering_range(range.into(), 9).map_err(|e| format!("{e}"))?;
|
||||
// let stream = Box::pin(stream);
|
||||
// let deadline = Instant::now() + Duration::from_millis(4000);
|
||||
// let do_time_weight = true;
|
||||
// let emit_empty_bins = false;
|
||||
// let res = BinnedCollected::new(
|
||||
// binrange,
|
||||
// ScalarType::F32,
|
||||
// Shape::Scalar,
|
||||
// do_time_weight,
|
||||
// emit_empty_bins,
|
||||
// deadline,
|
||||
// Box::pin(stream),
|
||||
// )
|
||||
// .await?;
|
||||
// eprintln!("res {:?}", res);
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
runfut(fut).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn binned_timeout_00() {
|
||||
if true {
|
||||
return;
|
||||
}
|
||||
// TODO items_2::binnedcollected::BinnedCollected is currently not used.
|
||||
trace!("binned_timeout_01 uses a delay");
|
||||
const TSBASE: u64 = SEC * 1600000000;
|
||||
fn val(ts: u64) -> f32 {
|
||||
2f32 + ((ts / SEC) % 2) as f32 + 0.2 * ((ts / (MS * 100)) % 2) as f32
|
||||
}
|
||||
eprintln!("binned_timeout_01 ENTER");
|
||||
let fut = async {
|
||||
eprintln!("binned_timeout_01 IN FUT");
|
||||
let mut events_vec1: Vec<Sitemty<ChannelEvents>> = Vec::new();
|
||||
let mut t = TSBASE;
|
||||
for _ in 0..20 {
|
||||
let mut events = EventsDim0::empty();
|
||||
for _ in 0..10 {
|
||||
events.push(t, t, val(t));
|
||||
t += MS * 100;
|
||||
}
|
||||
let cev = ChannelEvents::Events(Box::new(events));
|
||||
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
|
||||
}
|
||||
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)));
|
||||
let inp1 = VecStream::new(events_vec1.into_iter().collect());
|
||||
let inp1 = inp1.enumerate2().then2(|(i, k)| async move {
|
||||
if i == 5 {
|
||||
let _ = tokio::time::sleep(Duration::from_millis(10000)).await;
|
||||
}
|
||||
k
|
||||
});
|
||||
let edges: Vec<_> = (0..10).into_iter().map(|x| TSBASE + SEC * (1 + x)).collect();
|
||||
let range = NanoRange {
|
||||
beg: TSBASE + SEC * 1,
|
||||
end: TSBASE + SEC * 10,
|
||||
};
|
||||
let binrange = BinnedRangeEnum::covering_range(range.into(), 9)?;
|
||||
eprintln!("edges1: {:?}", edges);
|
||||
//eprintln!("edges2: {:?}", binrange.edges());
|
||||
let timeout = Duration::from_millis(400);
|
||||
// let inp1 = Box::pin(inp1);
|
||||
// let deadline = Instant::now() + timeout;
|
||||
// let do_time_weight = true;
|
||||
// let emit_empty_bins = false;
|
||||
// TODO with new binning
|
||||
|
||||
// let res = BinnedCollected::new(
|
||||
// binrange,
|
||||
// ScalarType::F32,
|
||||
// Shape::Scalar,
|
||||
// do_time_weight,
|
||||
// emit_empty_bins,
|
||||
// deadline,
|
||||
// inp1,
|
||||
// )
|
||||
// .await?;
|
||||
// let r2: &BinsDim0CollectedResult<f32> = res.result.as_any_ref().downcast_ref().expect("res seems wrong type");
|
||||
// eprintln!("rs: {r2:?}");
|
||||
// assert_eq!(SEC * r2.ts_anchor_sec(), TSBASE + SEC);
|
||||
// assert_eq!(r2.counts(), &[10, 10, 10]);
|
||||
// assert_eq!(r2.mins(), &[3.0, 2.0, 3.0]);
|
||||
// assert_eq!(r2.maxs(), &[3.2, 2.2, 3.2]);
|
||||
// assert_eq!(r2.missing_bins(), 6);
|
||||
// assert_eq!(r2.continue_at(), Some(IsoDateTime::from_ns_u64(TSBASE + SEC * 4)));
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
runfut(fut).unwrap();
|
||||
}
|
||||
24
src/test/eventsdim0.rs
Normal file
24
src/test/eventsdim0.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
use crate::eventsdim0::EventsDim0;
|
||||
use items_0::Appendable;
|
||||
use items_0::Empty;
|
||||
use items_0::Events;
|
||||
|
||||
#[test]
|
||||
fn collect_s_00() {
|
||||
let mut evs = EventsDim0::empty();
|
||||
evs.push(123, 4, 1.00f32);
|
||||
evs.push(124, 5, 1.01);
|
||||
let mut coll = evs.as_collectable_mut().new_collector();
|
||||
coll.ingest(&mut evs);
|
||||
assert_eq!(coll.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn collect_c_00() {
|
||||
let mut evs = EventsDim0::empty();
|
||||
evs.push(123, 4, 1.00f32);
|
||||
evs.push(124, 5, 1.01);
|
||||
let mut coll = evs.as_collectable_with_default_ref().new_collector();
|
||||
coll.ingest(&mut evs);
|
||||
assert_eq!(coll.len(), 2);
|
||||
}
|
||||
0
src/test/eventsdim1.rs
Normal file
0
src/test/eventsdim1.rs
Normal file
25
src/testgen.rs
Normal file
25
src/testgen.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
use crate::eventsdim0::EventsDim0;
|
||||
use crate::Events;
|
||||
use items_0::Appendable;
|
||||
use items_0::Empty;
|
||||
|
||||
#[allow(unused)]
|
||||
fn xorshift32(state: u32) -> u32 {
|
||||
let mut x = state;
|
||||
x ^= x << 13;
|
||||
x ^= x >> 17;
|
||||
x ^= x << 5;
|
||||
x
|
||||
}
|
||||
|
||||
pub fn make_some_boxed_d0_f32(n: usize, t0: u64, tstep: u64, tmask: u64, seed: u32) -> Box<dyn Events> {
|
||||
let mut vstate = seed;
|
||||
let mut events = EventsDim0::empty();
|
||||
for i in 0..n {
|
||||
vstate = xorshift32(vstate);
|
||||
let ts = t0 + i as u64 * tstep + (vstate as u64 & tmask);
|
||||
let value = i as f32 * 100. + vstate as f32 / u32::MAX as f32 / 10.;
|
||||
events.push(ts, ts, value);
|
||||
}
|
||||
Box::new(events)
|
||||
}
|
||||
84
src/transform.rs
Normal file
84
src/transform.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
//! Helper functions to create transforms which act locally on a batch of events.
|
||||
//! Tailored to the usage pattern given by `TransformQuery`.
|
||||
|
||||
use crate::channelevents::ChannelEvents;
|
||||
use crate::eventsdim0::EventsDim0;
|
||||
use items_0::transform::EventTransform;
|
||||
use items_0::transform::TransformEvent;
|
||||
use items_0::transform::TransformProperties;
|
||||
use items_0::transform::WithTransformProperties;
|
||||
use items_0::Appendable;
|
||||
use items_0::AsAnyMut;
|
||||
use items_0::Empty;
|
||||
use items_0::Events;
|
||||
use items_0::EventsNonObj;
|
||||
use netpod::log::*;
|
||||
use std::mem;
|
||||
|
||||
struct TransformEventIdentity {}
|
||||
|
||||
impl WithTransformProperties for TransformEventIdentity {
|
||||
fn query_transform_properties(&self) -> TransformProperties {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl EventTransform for TransformEventIdentity {
|
||||
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
|
||||
src
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_transform_identity() -> TransformEvent {
|
||||
TransformEvent(Box::new(TransformEventIdentity {}))
|
||||
}
|
||||
|
||||
struct TransformEventMinMaxAvg {}
|
||||
|
||||
impl WithTransformProperties for TransformEventMinMaxAvg {
|
||||
fn query_transform_properties(&self) -> TransformProperties {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl EventTransform for TransformEventMinMaxAvg {
|
||||
fn transform(&mut self, mut src: Box<dyn Events>) -> Box<dyn Events> {
|
||||
src.to_min_max_avg()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_transform_min_max_avg() -> TransformEvent {
|
||||
TransformEvent(Box::new(TransformEventMinMaxAvg {}))
|
||||
}
|
||||
|
||||
struct TransformEventPulseIdDiff {
|
||||
pulse_last: Option<u64>,
|
||||
}
|
||||
|
||||
impl WithTransformProperties for TransformEventPulseIdDiff {
|
||||
fn query_transform_properties(&self) -> TransformProperties {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl EventTransform for TransformEventPulseIdDiff {
|
||||
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
|
||||
let (tss, pulses) = EventsNonObj::into_tss_pulses(src);
|
||||
let mut item = EventsDim0::empty();
|
||||
let pulse_last = &mut self.pulse_last;
|
||||
for (ts, pulse) in tss.into_iter().zip(pulses) {
|
||||
let value = if let Some(last) = pulse_last {
|
||||
pulse as i64 - *last as i64
|
||||
} else {
|
||||
0
|
||||
};
|
||||
item.push(ts, pulse, value);
|
||||
*pulse_last = Some(pulse);
|
||||
}
|
||||
Box::new(ChannelEvents::Events(Box::new(item)))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_transform_pulse_id_diff() -> TransformEvent {
|
||||
TransformEvent(Box::new(TransformEventPulseIdDiff { pulse_last: None }))
|
||||
}
|
||||
Reference in New Issue
Block a user