Refactor (WIP) the event fetch pipeline
This commit is contained in:
538
items_2/src/binsdim0.rs
Normal file
538
items_2/src/binsdim0.rs
Normal file
@@ -0,0 +1,538 @@
|
||||
use crate::streams::{CollectableType, CollectorType, ToJsonResult};
|
||||
use crate::{
|
||||
ts_offs_from_abs, AppendEmptyBin, Empty, IsoDateTime, ScalarOps, TimeBinnable, TimeBinnableType,
|
||||
TimeBinnableTypeAggregator, TimeBinned, TimeBinner, TimeSeries, WithLen,
|
||||
};
|
||||
use chrono::{TimeZone, Utc};
|
||||
use err::Error;
|
||||
use netpod::log::*;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::NanoRange;
|
||||
use num_traits::Zero;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::marker::PhantomData;
|
||||
use std::{fmt, mem};
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct MinMaxAvgDim0Bins<NTY> {
|
||||
pub ts1s: Vec<u64>,
|
||||
pub ts2s: Vec<u64>,
|
||||
pub counts: Vec<u64>,
|
||||
pub mins: Vec<NTY>,
|
||||
pub maxs: Vec<NTY>,
|
||||
pub avgs: Vec<f32>,
|
||||
}
|
||||
|
||||
impl<NTY> fmt::Debug for MinMaxAvgDim0Bins<NTY>
|
||||
where
|
||||
NTY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
let self_name = std::any::type_name::<Self>();
|
||||
write!(
|
||||
fmt,
|
||||
"{self_name} count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}",
|
||||
self.ts1s.len(),
|
||||
self.ts1s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
|
||||
self.ts2s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
|
||||
self.counts,
|
||||
self.mins,
|
||||
self.maxs,
|
||||
self.avgs,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgDim0Bins<NTY> {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
ts1s: vec![],
|
||||
ts2s: vec![],
|
||||
counts: vec![],
|
||||
mins: vec![],
|
||||
maxs: vec![],
|
||||
avgs: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for MinMaxAvgDim0Bins<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.ts1s.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Empty for MinMaxAvgDim0Bins<NTY> {
|
||||
fn empty() -> Self {
|
||||
Self {
|
||||
ts1s: Vec::new(),
|
||||
ts2s: Vec::new(),
|
||||
counts: Vec::new(),
|
||||
mins: Vec::new(),
|
||||
maxs: Vec::new(),
|
||||
avgs: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> AppendEmptyBin for MinMaxAvgDim0Bins<NTY> {
|
||||
fn append_empty_bin(&mut self, ts1: u64, ts2: u64) {
|
||||
self.ts1s.push(ts1);
|
||||
self.ts2s.push(ts2);
|
||||
self.counts.push(0);
|
||||
self.mins.push(NTY::zero());
|
||||
self.maxs.push(NTY::zero());
|
||||
self.avgs.push(0.);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> TimeSeries for MinMaxAvgDim0Bins<NTY> {
|
||||
fn ts_min(&self) -> Option<u64> {
|
||||
todo!("collection of bins can not be TimeSeries")
|
||||
}
|
||||
|
||||
fn ts_max(&self) -> Option<u64> {
|
||||
todo!("collection of bins can not be TimeSeries")
|
||||
}
|
||||
|
||||
fn ts_min_max(&self) -> Option<(u64, u64)> {
|
||||
todo!("collection of bins can not be TimeSeries")
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> TimeBinnableType for MinMaxAvgDim0Bins<NTY> {
|
||||
type Output = MinMaxAvgDim0Bins<NTY>;
|
||||
type Aggregator = MinMaxAvgDim0BinsAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator {
|
||||
let self_name = std::any::type_name::<Self>();
|
||||
debug!(
|
||||
"TimeBinnableType for {self_name} aggregator() range {:?} x_bin_count {} do_time_weight {}",
|
||||
range, x_bin_count, do_time_weight
|
||||
);
|
||||
Self::Aggregator::new(range, do_time_weight)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgBinsCollected<NTY> {
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgBinsCollected<NTY> {
|
||||
pub fn new() -> Self {
|
||||
Self { _m1: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct MinMaxAvgBinsCollectedResult<NTY> {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "tsMs")]
|
||||
ts_off_ms: Vec<u64>,
|
||||
#[serde(rename = "tsNs")]
|
||||
ts_off_ns: Vec<u64>,
|
||||
counts: Vec<u64>,
|
||||
mins: Vec<NTY>,
|
||||
maxs: Vec<NTY>,
|
||||
avgs: Vec<f32>,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "finalisedRange")]
|
||||
finalised_range: bool,
|
||||
#[serde(skip_serializing_if = "Zero::is_zero", rename = "missingBins")]
|
||||
missing_bins: u32,
|
||||
#[serde(skip_serializing_if = "Option::is_none", rename = "continueAt")]
|
||||
continue_at: Option<IsoDateTime>,
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> ToJsonResult for MinMaxAvgBinsCollectedResult<NTY> {
|
||||
fn to_json_result(&self) -> Result<Box<dyn crate::streams::ToJsonBytes>, Error> {
|
||||
let k = serde_json::to_value(self)?;
|
||||
Ok(Box::new(k))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgBinsCollector<NTY> {
|
||||
timed_out: bool,
|
||||
range_complete: bool,
|
||||
vals: MinMaxAvgDim0Bins<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgBinsCollector<NTY> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
timed_out: false,
|
||||
range_complete: false,
|
||||
vals: MinMaxAvgDim0Bins::<NTY>::empty(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for MinMaxAvgBinsCollector<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.vals.ts1s.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> CollectorType for MinMaxAvgBinsCollector<NTY> {
|
||||
type Input = MinMaxAvgDim0Bins<NTY>;
|
||||
type Output = MinMaxAvgBinsCollectedResult<NTY>;
|
||||
|
||||
fn ingest(&mut self, _src: &mut Self::Input) {
|
||||
err::todo();
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_complete = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn result(&mut self) -> Result<Self::Output, Error> {
|
||||
let bin_count = self.vals.ts1s.len() as u32;
|
||||
// TODO could save the copy:
|
||||
let mut ts_all = self.vals.ts1s.clone();
|
||||
if self.vals.ts2s.len() > 0 {
|
||||
ts_all.push(*self.vals.ts2s.last().unwrap());
|
||||
}
|
||||
info!("TODO return proper continueAt");
|
||||
let bin_count_exp = 100 as u32;
|
||||
let continue_at = if self.vals.ts1s.len() < bin_count_exp as usize {
|
||||
match ts_all.last() {
|
||||
Some(&k) => {
|
||||
let iso = IsoDateTime(Utc.timestamp_nanos(k as i64));
|
||||
Some(iso)
|
||||
}
|
||||
None => Err(Error::with_msg("partial_content but no bin in result"))?,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let tst = ts_offs_from_abs(&ts_all);
|
||||
let counts = mem::replace(&mut self.vals.counts, Vec::new());
|
||||
let mins = mem::replace(&mut self.vals.mins, Vec::new());
|
||||
let maxs = mem::replace(&mut self.vals.maxs, Vec::new());
|
||||
let avgs = mem::replace(&mut self.vals.avgs, Vec::new());
|
||||
let ret = MinMaxAvgBinsCollectedResult::<NTY> {
|
||||
ts_anchor_sec: tst.0,
|
||||
ts_off_ms: tst.1,
|
||||
ts_off_ns: tst.2,
|
||||
counts,
|
||||
mins,
|
||||
maxs,
|
||||
avgs,
|
||||
finalised_range: self.range_complete,
|
||||
missing_bins: bin_count_exp - bin_count,
|
||||
continue_at,
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> CollectableType for MinMaxAvgDim0Bins<NTY> {
|
||||
type Collector = MinMaxAvgBinsCollector<NTY>;
|
||||
|
||||
fn new_collector() -> Self::Collector {
|
||||
Self::Collector::new()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgDim0BinsAggregator<NTY> {
|
||||
range: NanoRange,
|
||||
count: u64,
|
||||
min: NTY,
|
||||
max: NTY,
|
||||
// Carry over to next bin:
|
||||
avg: f32,
|
||||
sumc: u64,
|
||||
sum: f32,
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> MinMaxAvgDim0BinsAggregator<NTY> {
|
||||
pub fn new(range: NanoRange, _do_time_weight: bool) -> Self {
|
||||
Self {
|
||||
range,
|
||||
count: 0,
|
||||
min: NTY::zero(),
|
||||
max: NTY::zero(),
|
||||
avg: 0.,
|
||||
sumc: 0,
|
||||
sum: 0f32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> TimeBinnableTypeAggregator for MinMaxAvgDim0BinsAggregator<NTY> {
|
||||
type Input = MinMaxAvgDim0Bins<NTY>;
|
||||
type Output = MinMaxAvgDim0Bins<NTY>;
|
||||
|
||||
fn range(&self) -> &NanoRange {
|
||||
&self.range
|
||||
}
|
||||
|
||||
fn ingest(&mut self, item: &Self::Input) {
|
||||
for i1 in 0..item.ts1s.len() {
|
||||
if item.counts[i1] == 0 {
|
||||
} else if item.ts2s[i1] <= self.range.beg {
|
||||
} else if item.ts1s[i1] >= self.range.end {
|
||||
} else {
|
||||
if self.count == 0 {
|
||||
self.min = item.mins[i1].clone();
|
||||
self.max = item.maxs[i1].clone();
|
||||
} else {
|
||||
if self.min > item.mins[i1] {
|
||||
self.min = item.mins[i1].clone();
|
||||
}
|
||||
if self.max < item.maxs[i1] {
|
||||
self.max = item.maxs[i1].clone();
|
||||
}
|
||||
}
|
||||
self.count += item.counts[i1];
|
||||
self.sum += item.avgs[i1];
|
||||
self.sumc += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset(&mut self, range: NanoRange, _expand: bool) -> Self::Output {
|
||||
if self.sumc > 0 {
|
||||
self.avg = self.sum / self.sumc as f32;
|
||||
}
|
||||
let ret = Self::Output {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![self.min.clone()],
|
||||
maxs: vec![self.max.clone()],
|
||||
avgs: vec![self.avg],
|
||||
};
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.sum = 0f32;
|
||||
self.sumc = 0;
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> TimeBinnable for MinMaxAvgDim0Bins<NTY> {
|
||||
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Box<dyn TimeBinner> {
|
||||
let ret = MinMaxAvgDim0BinsTimeBinner::<NTY>::new(edges.into(), do_time_weight);
|
||||
Box::new(ret)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self as &dyn Any
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgDim0BinsTimeBinner<NTY: ScalarOps> {
|
||||
edges: VecDeque<u64>,
|
||||
do_time_weight: bool,
|
||||
agg: Option<MinMaxAvgDim0BinsAggregator<NTY>>,
|
||||
ready: Option<<MinMaxAvgDim0BinsAggregator<NTY> as TimeBinnableTypeAggregator>::Output>,
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> MinMaxAvgDim0BinsTimeBinner<NTY> {
|
||||
fn new(edges: VecDeque<u64>, do_time_weight: bool) -> Self {
|
||||
Self {
|
||||
edges,
|
||||
do_time_weight,
|
||||
agg: None,
|
||||
ready: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn next_bin_range(&mut self) -> Option<NanoRange> {
|
||||
if self.edges.len() >= 2 {
|
||||
let ret = NanoRange {
|
||||
beg: self.edges[0],
|
||||
end: self.edges[1],
|
||||
};
|
||||
self.edges.pop_front();
|
||||
Some(ret)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> TimeBinner for MinMaxAvgDim0BinsTimeBinner<NTY> {
|
||||
fn ingest(&mut self, item: &dyn TimeBinnable) {
|
||||
let self_name = std::any::type_name::<Self>();
|
||||
if item.len() == 0 {
|
||||
// Return already here, RangeOverlapInfo would not give much sense.
|
||||
return;
|
||||
}
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {self_name} no more bin in edges A");
|
||||
return;
|
||||
}
|
||||
// TODO optimize by remembering at which event array index we have arrived.
|
||||
// That needs modified interfaces which can take and yield the start and latest index.
|
||||
loop {
|
||||
while item.starts_after(NanoRange {
|
||||
beg: 0,
|
||||
end: self.edges[1],
|
||||
}) {
|
||||
self.cycle();
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {self_name} no more bin in edges B");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if item.ends_before(NanoRange {
|
||||
beg: self.edges[0],
|
||||
end: u64::MAX,
|
||||
}) {
|
||||
return;
|
||||
} else {
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {self_name} edge list exhausted");
|
||||
return;
|
||||
} else {
|
||||
let agg = if let Some(agg) = self.agg.as_mut() {
|
||||
agg
|
||||
} else {
|
||||
self.agg = Some(MinMaxAvgDim0BinsAggregator::new(
|
||||
// We know here that we have enough edges for another bin.
|
||||
// and `next_bin_range` will pop the first edge.
|
||||
self.next_bin_range().unwrap(),
|
||||
self.do_time_weight,
|
||||
));
|
||||
self.agg.as_mut().unwrap()
|
||||
};
|
||||
if let Some(item) = item
|
||||
.as_any()
|
||||
// TODO make statically sure that we attempt to cast to the correct type here:
|
||||
.downcast_ref::<<MinMaxAvgDim0BinsAggregator<NTY> as TimeBinnableTypeAggregator>::Input>()
|
||||
{
|
||||
agg.ingest(item);
|
||||
} else {
|
||||
let tyid_item = std::any::Any::type_id(item.as_any());
|
||||
error!("not correct item type {:?}", tyid_item);
|
||||
};
|
||||
if item.ends_after(agg.range().clone()) {
|
||||
self.cycle();
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {self_name} no more bin in edges C");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn bins_ready_count(&self) -> usize {
|
||||
match &self.ready {
|
||||
Some(k) => k.len(),
|
||||
None => 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn bins_ready(&mut self) -> Option<Box<dyn crate::TimeBinned>> {
|
||||
match self.ready.take() {
|
||||
Some(k) => Some(Box::new(k)),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO there is too much common code between implementors:
|
||||
fn push_in_progress(&mut self, push_empty: bool) {
|
||||
// TODO expand should be derived from AggKind. Is it still required after all?
|
||||
let expand = true;
|
||||
if let Some(agg) = self.agg.as_mut() {
|
||||
let dummy_range = NanoRange { beg: 4, end: 5 };
|
||||
let bins = agg.result_reset(dummy_range, expand);
|
||||
self.agg = None;
|
||||
assert_eq!(bins.len(), 1);
|
||||
if push_empty || bins.counts[0] != 0 {
|
||||
match self.ready.as_mut() {
|
||||
Some(_ready) => {
|
||||
err::todo();
|
||||
//ready.append(&mut bins);
|
||||
}
|
||||
None => {
|
||||
self.ready = Some(bins);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO there is too much common code between implementors:
|
||||
fn cycle(&mut self) {
|
||||
let n = self.bins_ready_count();
|
||||
self.push_in_progress(true);
|
||||
if self.bins_ready_count() == n {
|
||||
if let Some(_range) = self.next_bin_range() {
|
||||
let bins = MinMaxAvgDim0Bins::<NTY>::empty();
|
||||
err::todo();
|
||||
//bins.append_zero(range.beg, range.end);
|
||||
match self.ready.as_mut() {
|
||||
Some(_ready) => {
|
||||
err::todo();
|
||||
//ready.append(&mut bins);
|
||||
}
|
||||
None => {
|
||||
self.ready = Some(bins);
|
||||
}
|
||||
}
|
||||
if self.bins_ready_count() <= n {
|
||||
error!("failed to push a zero bin");
|
||||
}
|
||||
} else {
|
||||
warn!("cycle: no in-progress bin pushed, but also no more bin to add as zero-bin");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> TimeBinned for MinMaxAvgDim0Bins<NTY> {
|
||||
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnable {
|
||||
self as &dyn TimeBinnable
|
||||
}
|
||||
|
||||
fn edges_slice(&self) -> (&[u64], &[u64]) {
|
||||
(&self.ts1s[..], &self.ts2s[..])
|
||||
}
|
||||
|
||||
fn counts(&self) -> &[u64] {
|
||||
&self.counts[..]
|
||||
}
|
||||
|
||||
fn mins(&self) -> Vec<f32> {
|
||||
self.mins.iter().map(|x| x.clone().as_prim_f32()).collect()
|
||||
}
|
||||
|
||||
fn maxs(&self) -> Vec<f32> {
|
||||
self.maxs.iter().map(|x| x.clone().as_prim_f32()).collect()
|
||||
}
|
||||
|
||||
fn avgs(&self) -> Vec<f32> {
|
||||
self.avgs.clone()
|
||||
}
|
||||
|
||||
fn validate(&self) -> Result<(), String> {
|
||||
use std::fmt::Write;
|
||||
let mut msg = String::new();
|
||||
if self.ts1s.len() != self.ts2s.len() {
|
||||
write!(&mut msg, "ts1s ≠ ts2s\n").unwrap();
|
||||
}
|
||||
for (i, ((count, min), max)) in self.counts.iter().zip(&self.mins).zip(&self.maxs).enumerate() {
|
||||
if min.as_prim_f32() < 1. && *count != 0 {
|
||||
write!(&mut msg, "i {} count {} min {:?} max {:?}\n", i, count, min, max).unwrap();
|
||||
}
|
||||
}
|
||||
if msg.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
660
items_2/src/eventsdim0.rs
Normal file
660
items_2/src/eventsdim0.rs
Normal file
@@ -0,0 +1,660 @@
|
||||
use crate::binsdim0::MinMaxAvgDim0Bins;
|
||||
use crate::streams::{CollectableType, CollectorType, ToJsonResult};
|
||||
use crate::{pulse_offs_from_abs, ts_offs_from_abs};
|
||||
use crate::{
|
||||
Empty, Events, ScalarOps, TimeBinnable, TimeBinnableType, TimeBinnableTypeAggregator, TimeBinner, TimeSeries,
|
||||
WithLen,
|
||||
};
|
||||
use err::Error;
|
||||
use netpod::log::*;
|
||||
use netpod::NanoRange;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::{fmt, mem};
|
||||
|
||||
// TODO in this module reduce clones.
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct EventsDim0<NTY> {
|
||||
pub tss: Vec<u64>,
|
||||
pub pulses: Vec<u64>,
|
||||
pub values: Vec<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> EventsDim0<NTY> {
|
||||
#[inline(always)]
|
||||
pub fn push(&mut self, ts: u64, pulse: u64, value: NTY) {
|
||||
self.tss.push(ts);
|
||||
self.pulses.push(pulse);
|
||||
self.values.push(value);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Empty for EventsDim0<NTY> {
|
||||
fn empty() -> Self {
|
||||
Self {
|
||||
tss: vec![],
|
||||
pulses: vec![],
|
||||
values: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> fmt::Debug for EventsDim0<NTY>
|
||||
where
|
||||
NTY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
fmt,
|
||||
"count {} ts {:?} .. {:?} vals {:?} .. {:?}",
|
||||
self.tss.len(),
|
||||
self.tss.first(),
|
||||
self.tss.last(),
|
||||
self.values.first(),
|
||||
self.values.last(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for EventsDim0<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeSeries for EventsDim0<NTY> {
|
||||
fn ts_min(&self) -> Option<u64> {
|
||||
self.tss.first().map(Clone::clone)
|
||||
}
|
||||
|
||||
fn ts_max(&self) -> Option<u64> {
|
||||
self.tss.last().map(Clone::clone)
|
||||
}
|
||||
|
||||
fn ts_min_max(&self) -> Option<(u64, u64)> {
|
||||
if self.tss.len() == 0 {
|
||||
None
|
||||
} else {
|
||||
Some((self.tss.first().unwrap().clone(), self.tss.last().unwrap().clone()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> TimeBinnableType for EventsDim0<NTY> {
|
||||
type Output = MinMaxAvgDim0Bins<NTY>;
|
||||
type Aggregator = EventValuesAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator {
|
||||
let self_name = std::any::type_name::<Self>();
|
||||
debug!(
|
||||
"TimeBinnableType for {self_name} aggregator() range {:?} x_bin_count {} do_time_weight {}",
|
||||
range, x_bin_count, do_time_weight
|
||||
);
|
||||
Self::Aggregator::new(range, do_time_weight)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EventValuesCollector<NTY> {
|
||||
vals: EventsDim0<NTY>,
|
||||
range_complete: bool,
|
||||
timed_out: bool,
|
||||
}
|
||||
|
||||
impl<NTY> EventValuesCollector<NTY> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
vals: EventsDim0::empty(),
|
||||
range_complete: false,
|
||||
timed_out: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for EventValuesCollector<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.vals.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct EventValuesCollectorOutput<NTY> {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "tsMs")]
|
||||
ts_off_ms: Vec<u64>,
|
||||
#[serde(rename = "tsNs")]
|
||||
ts_off_ns: Vec<u64>,
|
||||
#[serde(rename = "pulseAnchor")]
|
||||
pulse_anchor: u64,
|
||||
#[serde(rename = "pulseOff")]
|
||||
pulse_off: Vec<u64>,
|
||||
values: Vec<NTY>,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "finalisedRange")]
|
||||
range_complete: bool,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "timedOut")]
|
||||
timed_out: bool,
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> ToJsonResult for EventValuesCollectorOutput<NTY> {
|
||||
fn to_json_result(&self) -> Result<Box<dyn crate::streams::ToJsonBytes>, Error> {
|
||||
let k = serde_json::to_value(self)?;
|
||||
Ok(Box::new(k))
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> CollectorType for EventValuesCollector<NTY> {
|
||||
type Input = EventsDim0<NTY>;
|
||||
type Output = EventValuesCollectorOutput<NTY>;
|
||||
|
||||
fn ingest(&mut self, src: &mut Self::Input) {
|
||||
// TODO could be optimized by non-contiguous container.
|
||||
self.vals.tss.append(&mut src.tss);
|
||||
self.vals.pulses.append(&mut src.pulses);
|
||||
self.vals.values.append(&mut src.values);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_complete = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn result(&mut self) -> Result<Self::Output, Error> {
|
||||
let tst = ts_offs_from_abs(&self.vals.tss);
|
||||
let (pulse_anchor, pulse_off) = pulse_offs_from_abs(&self.vals.pulses);
|
||||
let ret = Self::Output {
|
||||
ts_anchor_sec: tst.0,
|
||||
ts_off_ms: tst.1,
|
||||
ts_off_ns: tst.2,
|
||||
pulse_anchor,
|
||||
pulse_off,
|
||||
values: mem::replace(&mut self.vals.values, Vec::new()),
|
||||
range_complete: self.range_complete,
|
||||
timed_out: self.timed_out,
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> CollectableType for EventsDim0<NTY> {
|
||||
type Collector = EventValuesCollector<NTY>;
|
||||
|
||||
fn new_collector() -> Self::Collector {
|
||||
Self::Collector::new()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EventValuesAggregator<NTY> {
|
||||
range: NanoRange,
|
||||
count: u64,
|
||||
min: NTY,
|
||||
max: NTY,
|
||||
sumc: u64,
|
||||
sum: f32,
|
||||
int_ts: u64,
|
||||
last_ts: u64,
|
||||
last_val: Option<NTY>,
|
||||
do_time_weight: bool,
|
||||
events_taken_count: u64,
|
||||
events_ignored_count: u64,
|
||||
}
|
||||
|
||||
impl<NTY> Drop for EventValuesAggregator<NTY> {
|
||||
fn drop(&mut self) {
|
||||
// TODO collect as stats for the request context:
|
||||
trace!(
|
||||
"taken {} ignored {}",
|
||||
self.events_taken_count,
|
||||
self.events_ignored_count
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> EventValuesAggregator<NTY> {
|
||||
pub fn new(range: NanoRange, do_time_weight: bool) -> Self {
|
||||
let int_ts = range.beg;
|
||||
Self {
|
||||
range,
|
||||
count: 0,
|
||||
min: NTY::zero(),
|
||||
max: NTY::zero(),
|
||||
sum: 0.,
|
||||
sumc: 0,
|
||||
int_ts,
|
||||
last_ts: 0,
|
||||
last_val: None,
|
||||
do_time_weight,
|
||||
events_taken_count: 0,
|
||||
events_ignored_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO reduce clone.. optimize via more traits to factor the trade-offs?
|
||||
fn apply_min_max(&mut self, val: NTY) {
|
||||
if self.count == 0 {
|
||||
self.min = val.clone();
|
||||
self.max = val.clone();
|
||||
} else {
|
||||
if self.min > val {
|
||||
self.min = val.clone();
|
||||
}
|
||||
if self.max < val {
|
||||
self.max = val.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_event_unweight(&mut self, val: NTY) {
|
||||
let vf = val.as_prim_f32();
|
||||
self.apply_min_max(val);
|
||||
if vf.is_nan() {
|
||||
} else {
|
||||
self.sum += vf;
|
||||
self.sumc += 1;
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_event_time_weight(&mut self, ts: u64) {
|
||||
if let Some(v) = &self.last_val {
|
||||
let vf = v.as_prim_f32();
|
||||
let v2 = v.clone();
|
||||
self.apply_min_max(v2);
|
||||
let w = if self.do_time_weight {
|
||||
(ts - self.int_ts) as f32 * 1e-9
|
||||
} else {
|
||||
1.
|
||||
};
|
||||
if vf.is_nan() {
|
||||
} else {
|
||||
self.sum += vf * w;
|
||||
self.sumc += 1;
|
||||
}
|
||||
self.int_ts = ts;
|
||||
} else {
|
||||
debug!(
|
||||
"apply_event_time_weight NO VALUE {}",
|
||||
ts as i64 - self.range.beg as i64
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn ingest_unweight(&mut self, item: &<Self as TimeBinnableTypeAggregator>::Input) {
|
||||
for i1 in 0..item.tss.len() {
|
||||
let ts = item.tss[i1];
|
||||
let val = item.values[i1].clone();
|
||||
if ts < self.range.beg {
|
||||
self.events_ignored_count += 1;
|
||||
} else if ts >= self.range.end {
|
||||
self.events_ignored_count += 1;
|
||||
return;
|
||||
} else {
|
||||
self.apply_event_unweight(val);
|
||||
self.count += 1;
|
||||
self.events_taken_count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn ingest_time_weight(&mut self, item: &<Self as TimeBinnableTypeAggregator>::Input) {
|
||||
for i1 in 0..item.tss.len() {
|
||||
let ts = item.tss[i1];
|
||||
let val = item.values[i1].clone();
|
||||
if ts < self.int_ts {
|
||||
if self.last_val.is_none() {
|
||||
info!(
|
||||
"ingest_time_weight event before range, only set last ts {} val {:?}",
|
||||
ts, val
|
||||
);
|
||||
}
|
||||
self.events_ignored_count += 1;
|
||||
self.last_ts = ts;
|
||||
self.last_val = Some(val);
|
||||
} else if ts >= self.range.end {
|
||||
self.events_ignored_count += 1;
|
||||
return;
|
||||
} else {
|
||||
self.apply_event_time_weight(ts);
|
||||
if self.last_val.is_none() {
|
||||
info!(
|
||||
"call apply_min_max without last val, use current instead {} {:?}",
|
||||
ts, val
|
||||
);
|
||||
self.apply_min_max(val.clone());
|
||||
}
|
||||
self.count += 1;
|
||||
self.last_ts = ts;
|
||||
self.last_val = Some(val);
|
||||
self.events_taken_count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset_unweight(&mut self, range: NanoRange, _expand: bool) -> MinMaxAvgDim0Bins<NTY> {
|
||||
let (min, max, avg) = if self.sumc > 0 {
|
||||
let avg = self.sum / self.sumc as f32;
|
||||
(self.min.clone(), self.max.clone(), avg)
|
||||
} else {
|
||||
let g = match &self.last_val {
|
||||
Some(x) => x.clone(),
|
||||
None => NTY::zero(),
|
||||
};
|
||||
(g.clone(), g.clone(), g.as_prim_f32())
|
||||
};
|
||||
let ret = MinMaxAvgDim0Bins {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![min],
|
||||
maxs: vec![max],
|
||||
avgs: vec![avg],
|
||||
};
|
||||
self.int_ts = range.beg;
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.sum = 0f32;
|
||||
self.sumc = 0;
|
||||
ret
|
||||
}
|
||||
|
||||
fn result_reset_time_weight(&mut self, range: NanoRange, expand: bool) -> MinMaxAvgDim0Bins<NTY> {
|
||||
// TODO check callsite for correct expand status.
|
||||
if expand {
|
||||
debug!("result_reset_time_weight calls apply_event_time_weight");
|
||||
self.apply_event_time_weight(self.range.end);
|
||||
} else {
|
||||
debug!("result_reset_time_weight NO EXPAND");
|
||||
}
|
||||
let (min, max, avg) = if self.sumc > 0 {
|
||||
let avg = self.sum / (self.range.delta() as f32 * 1e-9);
|
||||
(self.min.clone(), self.max.clone(), avg)
|
||||
} else {
|
||||
let g = match &self.last_val {
|
||||
Some(x) => x.clone(),
|
||||
None => NTY::zero(),
|
||||
};
|
||||
(g.clone(), g.clone(), g.as_prim_f32())
|
||||
};
|
||||
let ret = MinMaxAvgDim0Bins {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![min],
|
||||
maxs: vec![max],
|
||||
avgs: vec![avg],
|
||||
};
|
||||
self.int_ts = range.beg;
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.sum = 0f32;
|
||||
self.sumc = 0;
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> TimeBinnableTypeAggregator for EventValuesAggregator<NTY> {
|
||||
type Input = EventsDim0<NTY>;
|
||||
type Output = MinMaxAvgDim0Bins<NTY>;
|
||||
|
||||
fn range(&self) -> &NanoRange {
|
||||
&self.range
|
||||
}
|
||||
|
||||
fn ingest(&mut self, item: &Self::Input) {
|
||||
debug!("ingest len {}", item.len());
|
||||
if self.do_time_weight {
|
||||
self.ingest_time_weight(item)
|
||||
} else {
|
||||
self.ingest_unweight(item)
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset(&mut self, range: NanoRange, expand: bool) -> Self::Output {
|
||||
debug!("Produce for {:?} next {:?}", self.range, range);
|
||||
if self.do_time_weight {
|
||||
self.result_reset_time_weight(range, expand)
|
||||
} else {
|
||||
self.result_reset_unweight(range, expand)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> TimeBinnable for EventsDim0<NTY> {
|
||||
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Box<dyn TimeBinner> {
|
||||
let ret = ScalarEventsTimeBinner::<NTY>::new(edges.into(), do_time_weight);
|
||||
Box::new(ret)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self as &dyn Any
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> Events for EventsDim0<NTY> {
|
||||
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnable {
|
||||
self as &dyn TimeBinnable
|
||||
}
|
||||
|
||||
fn verify(&self) {
|
||||
let mut ts_max = 0;
|
||||
for ts in &self.tss {
|
||||
let ts = *ts;
|
||||
if ts < ts_max {
|
||||
error!("unordered event data ts {} ts_max {}", ts, ts_max);
|
||||
}
|
||||
ts_max = ts_max.max(ts);
|
||||
}
|
||||
}
|
||||
|
||||
fn output_info(&self) {
|
||||
if false {
|
||||
info!("output_info len {}", self.tss.len());
|
||||
if self.tss.len() == 1 {
|
||||
info!(
|
||||
" only: ts {} pulse {} value {:?}",
|
||||
self.tss[0], self.pulses[0], self.values[0]
|
||||
);
|
||||
} else if self.tss.len() > 1 {
|
||||
info!(
|
||||
" first: ts {} pulse {} value {:?}",
|
||||
self.tss[0], self.pulses[0], self.values[0]
|
||||
);
|
||||
let n = self.tss.len() - 1;
|
||||
info!(
|
||||
" last: ts {} pulse {} value {:?}",
|
||||
self.tss[n], self.pulses[n], self.values[n]
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn as_collectable_mut(&mut self) -> &mut dyn crate::streams::Collectable {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ScalarEventsTimeBinner<NTY: ScalarOps> {
|
||||
// The first two edges are used the next time that we create an aggregator, or push a zero bin.
|
||||
edges: VecDeque<u64>,
|
||||
do_time_weight: bool,
|
||||
agg: Option<EventValuesAggregator<NTY>>,
|
||||
ready: Option<<EventValuesAggregator<NTY> as TimeBinnableTypeAggregator>::Output>,
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> ScalarEventsTimeBinner<NTY> {
|
||||
fn new(edges: VecDeque<u64>, do_time_weight: bool) -> Self {
|
||||
Self {
|
||||
edges,
|
||||
do_time_weight,
|
||||
agg: None,
|
||||
ready: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn next_bin_range(&mut self) -> Option<NanoRange> {
|
||||
if self.edges.len() >= 2 {
|
||||
let ret = NanoRange {
|
||||
beg: self.edges[0],
|
||||
end: self.edges[1],
|
||||
};
|
||||
self.edges.pop_front();
|
||||
Some(ret)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: ScalarOps> TimeBinner for ScalarEventsTimeBinner<NTY> {
|
||||
fn bins_ready_count(&self) -> usize {
|
||||
match &self.ready {
|
||||
Some(k) => k.len(),
|
||||
None => 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn bins_ready(&mut self) -> Option<Box<dyn crate::TimeBinned>> {
|
||||
match self.ready.take() {
|
||||
Some(k) => Some(Box::new(k)),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn ingest(&mut self, item: &dyn TimeBinnable) {
|
||||
const SELF: &str = "ScalarEventsTimeBinner";
|
||||
if item.len() == 0 {
|
||||
// Return already here, RangeOverlapInfo would not give much sense.
|
||||
return;
|
||||
}
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {SELF} no more bin in edges A");
|
||||
return;
|
||||
}
|
||||
// TODO optimize by remembering at which event array index we have arrived.
|
||||
// That needs modified interfaces which can take and yield the start and latest index.
|
||||
loop {
|
||||
while item.starts_after(NanoRange {
|
||||
beg: 0,
|
||||
end: self.edges[1],
|
||||
}) {
|
||||
self.cycle();
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {SELF} no more bin in edges B");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if item.ends_before(NanoRange {
|
||||
beg: self.edges[0],
|
||||
end: u64::MAX,
|
||||
}) {
|
||||
return;
|
||||
} else {
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {SELF} edge list exhausted");
|
||||
return;
|
||||
} else {
|
||||
let agg = if let Some(agg) = self.agg.as_mut() {
|
||||
agg
|
||||
} else {
|
||||
self.agg = Some(EventValuesAggregator::new(
|
||||
// We know here that we have enough edges for another bin.
|
||||
// and `next_bin_range` will pop the first edge.
|
||||
self.next_bin_range().unwrap(),
|
||||
self.do_time_weight,
|
||||
));
|
||||
self.agg.as_mut().unwrap()
|
||||
};
|
||||
if let Some(item) = item
|
||||
.as_any()
|
||||
// TODO make statically sure that we attempt to cast to the correct type here:
|
||||
.downcast_ref::<<EventValuesAggregator<NTY> as TimeBinnableTypeAggregator>::Input>()
|
||||
{
|
||||
// TODO collect statistics associated with this request:
|
||||
agg.ingest(item);
|
||||
} else {
|
||||
error!("not correct item type");
|
||||
};
|
||||
if item.ends_after(agg.range().clone()) {
|
||||
self.cycle();
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {SELF} no more bin in edges C");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn push_in_progress(&mut self, push_empty: bool) {
|
||||
// TODO expand should be derived from AggKind. Is it still required after all?
|
||||
// TODO here, the expand means that agg will assume that the current value is kept constant during
|
||||
// the rest of the time range.
|
||||
let expand = true;
|
||||
let range_next = if self.agg.is_some() {
|
||||
if let Some(x) = self.next_bin_range() {
|
||||
Some(x)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(agg) = self.agg.as_mut() {
|
||||
let bins;
|
||||
if let Some(range_next) = range_next {
|
||||
bins = agg.result_reset(range_next, expand);
|
||||
} else {
|
||||
let range_next = NanoRange { beg: 4, end: 5 };
|
||||
bins = agg.result_reset(range_next, expand);
|
||||
self.agg = None;
|
||||
}
|
||||
assert_eq!(bins.len(), 1);
|
||||
if push_empty || bins.counts[0] != 0 {
|
||||
match self.ready.as_mut() {
|
||||
Some(_ready) => {
|
||||
error!("TODO eventsdim0 time binner append");
|
||||
err::todo();
|
||||
//ready.append(&mut bins);
|
||||
}
|
||||
None => {
|
||||
self.ready = Some(bins);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn cycle(&mut self) {
|
||||
let n = self.bins_ready_count();
|
||||
self.push_in_progress(true);
|
||||
if self.bins_ready_count() == n {
|
||||
if let Some(_range) = self.next_bin_range() {
|
||||
let bins = MinMaxAvgDim0Bins::<NTY>::empty();
|
||||
error!("TODO eventsdim0 time binner append");
|
||||
err::todo();
|
||||
//bins.append_zero(range.beg, range.end);
|
||||
match self.ready.as_mut() {
|
||||
Some(_ready) => {
|
||||
error!("TODO eventsdim0 time binner append");
|
||||
err::todo();
|
||||
//ready.append(&mut bins);
|
||||
}
|
||||
None => {
|
||||
self.ready = Some(bins);
|
||||
}
|
||||
}
|
||||
if self.bins_ready_count() <= n {
|
||||
error!("failed to push a zero bin");
|
||||
}
|
||||
} else {
|
||||
warn!("cycle: no in-progress bin pushed, but also no more bin to add as zero-bin");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
456
items_2/src/items_2.rs
Normal file
456
items_2/src/items_2.rs
Normal file
@@ -0,0 +1,456 @@
|
||||
pub mod binsdim0;
|
||||
pub mod eventsdim0;
|
||||
pub mod streams;
|
||||
|
||||
use chrono::{DateTime, TimeZone, Utc};
|
||||
use futures_util::Stream;
|
||||
use netpod::log::error;
|
||||
use netpod::timeunits::*;
|
||||
use netpod::{AggKind, NanoRange, ScalarType, Shape};
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use std::any::Any;
|
||||
use std::fmt;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use streams::Collectable;
|
||||
|
||||
pub fn bool_is_false(x: &bool) -> bool {
|
||||
*x == false
|
||||
}
|
||||
|
||||
pub fn ts_offs_from_abs(tss: &[u64]) -> (u64, Vec<u64>, Vec<u64>) {
|
||||
let ts_anchor_sec = tss.first().map_or(0, |&k| k) / SEC;
|
||||
let ts_anchor_ns = ts_anchor_sec * SEC;
|
||||
let ts_off_ms: Vec<_> = tss.iter().map(|&k| (k - ts_anchor_ns) / MS).collect();
|
||||
let ts_off_ns = tss
|
||||
.iter()
|
||||
.zip(ts_off_ms.iter().map(|&k| k * MS))
|
||||
.map(|(&j, k)| (j - ts_anchor_ns - k))
|
||||
.collect();
|
||||
(ts_anchor_sec, ts_off_ms, ts_off_ns)
|
||||
}
|
||||
|
||||
pub fn pulse_offs_from_abs(pulse: &[u64]) -> (u64, Vec<u64>) {
|
||||
let pulse_anchor = pulse.first().map_or(0, |k| *k);
|
||||
let pulse_off: Vec<_> = pulse.iter().map(|k| *k - pulse_anchor).collect();
|
||||
(pulse_anchor, pulse_off)
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
const fn is_nan_int<T>(_x: &T) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
fn is_nan_f32(x: f32) -> bool {
|
||||
x.is_nan()
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
fn is_nan_f64(x: f64) -> bool {
|
||||
x.is_nan()
|
||||
}
|
||||
|
||||
pub trait AsPrimF32 {
|
||||
fn as_prim_f32(&self) -> f32;
|
||||
}
|
||||
|
||||
macro_rules! impl_as_prim_f32 {
|
||||
($ty:ident) => {
|
||||
impl AsPrimF32 for $ty {
|
||||
fn as_prim_f32(&self) -> f32 {
|
||||
*self as f32
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_as_prim_f32!(u8);
|
||||
impl_as_prim_f32!(u16);
|
||||
impl_as_prim_f32!(u32);
|
||||
impl_as_prim_f32!(u64);
|
||||
impl_as_prim_f32!(i8);
|
||||
impl_as_prim_f32!(i16);
|
||||
impl_as_prim_f32!(i32);
|
||||
impl_as_prim_f32!(i64);
|
||||
impl_as_prim_f32!(f32);
|
||||
impl_as_prim_f32!(f64);
|
||||
|
||||
pub trait ScalarOps: fmt::Debug + Clone + PartialOrd + AsPrimF32 + Serialize + Unpin + Send + 'static {
|
||||
fn zero() -> Self;
|
||||
}
|
||||
|
||||
macro_rules! impl_num_ops {
|
||||
($ty:ident, $zero:expr) => {
|
||||
impl ScalarOps for $ty {
|
||||
fn zero() -> Self {
|
||||
$zero
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_num_ops!(u8, 0);
|
||||
impl_num_ops!(u16, 0);
|
||||
impl_num_ops!(u32, 0);
|
||||
impl_num_ops!(u64, 0);
|
||||
impl_num_ops!(i8, 0);
|
||||
impl_num_ops!(i16, 0);
|
||||
impl_num_ops!(i32, 0);
|
||||
impl_num_ops!(i64, 0);
|
||||
impl_num_ops!(f32, 0.);
|
||||
impl_num_ops!(f64, 0.);
|
||||
|
||||
#[allow(unused)]
|
||||
struct Ts(u64);
|
||||
|
||||
struct Error {
|
||||
#[allow(unused)]
|
||||
kind: ErrorKind,
|
||||
}
|
||||
|
||||
impl From<ErrorKind> for Error {
|
||||
fn from(kind: ErrorKind) -> Self {
|
||||
Self { kind }
|
||||
}
|
||||
}
|
||||
|
||||
enum ErrorKind {
|
||||
#[allow(unused)]
|
||||
MismatchedType,
|
||||
}
|
||||
|
||||
pub trait WithLen {
|
||||
fn len(&self) -> usize;
|
||||
}
|
||||
|
||||
pub trait TimeSeries {
|
||||
fn ts_min(&self) -> Option<u64>;
|
||||
fn ts_max(&self) -> Option<u64>;
|
||||
fn ts_min_max(&self) -> Option<(u64, u64)>;
|
||||
}
|
||||
|
||||
pub enum Fits {
|
||||
Empty,
|
||||
Lower,
|
||||
Greater,
|
||||
Inside,
|
||||
PartlyLower,
|
||||
PartlyGreater,
|
||||
PartlyLowerAndGreater,
|
||||
}
|
||||
|
||||
// TODO can this be removed?
|
||||
pub trait FitsInside {
|
||||
fn fits_inside(&self, range: NanoRange) -> Fits;
|
||||
}
|
||||
|
||||
impl<T> FitsInside for T
|
||||
where
|
||||
T: TimeSeries,
|
||||
{
|
||||
fn fits_inside(&self, range: NanoRange) -> Fits {
|
||||
if let Some((min, max)) = self.ts_min_max() {
|
||||
if max <= range.beg {
|
||||
Fits::Lower
|
||||
} else if min >= range.end {
|
||||
Fits::Greater
|
||||
} else if min < range.beg && max > range.end {
|
||||
Fits::PartlyLowerAndGreater
|
||||
} else if min < range.beg {
|
||||
Fits::PartlyLower
|
||||
} else if max > range.end {
|
||||
Fits::PartlyGreater
|
||||
} else {
|
||||
Fits::Inside
|
||||
}
|
||||
} else {
|
||||
Fits::Empty
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait RangeOverlapInfo {
|
||||
fn ends_before(&self, range: NanoRange) -> bool;
|
||||
fn ends_after(&self, range: NanoRange) -> bool;
|
||||
fn starts_after(&self, range: NanoRange) -> bool;
|
||||
}
|
||||
|
||||
impl<T> RangeOverlapInfo for T
|
||||
where
|
||||
T: TimeSeries,
|
||||
{
|
||||
fn ends_before(&self, range: NanoRange) -> bool {
|
||||
if let Some(max) = self.ts_max() {
|
||||
max <= range.beg
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn ends_after(&self, range: NanoRange) -> bool {
|
||||
if let Some(max) = self.ts_max() {
|
||||
max > range.end
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn starts_after(&self, range: NanoRange) -> bool {
|
||||
if let Some(min) = self.ts_min() {
|
||||
min >= range.end
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait EmptyForScalarTypeShape {
|
||||
fn empty(scalar_type: ScalarType, shape: Shape) -> Self;
|
||||
}
|
||||
|
||||
pub trait EmptyForShape {
|
||||
fn empty(shape: Shape) -> Self;
|
||||
}
|
||||
|
||||
pub trait Empty {
|
||||
fn empty() -> Self;
|
||||
}
|
||||
|
||||
pub trait AppendEmptyBin {
|
||||
fn append_empty_bin(&mut self, ts1: u64, ts2: u64);
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct IsoDateTime(DateTime<Utc>);
|
||||
|
||||
impl Serialize for IsoDateTime {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(&self.0.format("%Y-%m-%dT%H:%M:%S.%3fZ").to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_iso_ts(tss: &[u64]) -> Vec<IsoDateTime> {
|
||||
tss.iter()
|
||||
.map(|&k| IsoDateTime(Utc.timestamp_nanos(k as i64)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub trait TimeBinner: Send {
|
||||
fn bins_ready_count(&self) -> usize;
|
||||
fn bins_ready(&mut self) -> Option<Box<dyn TimeBinned>>;
|
||||
fn ingest(&mut self, item: &dyn TimeBinnable);
|
||||
|
||||
/// If there is a bin in progress with non-zero count, push it to the result set.
|
||||
/// With push_empty == true, a bin in progress is pushed even if it contains no counts.
|
||||
fn push_in_progress(&mut self, push_empty: bool);
|
||||
|
||||
/// Implies `Self::push_in_progress` but in addition, pushes a zero-count bin if the call
|
||||
/// to `push_in_progress` did not change the result count, as long as edges are left.
|
||||
/// The next call to `Self::bins_ready_count` must return one higher count than before.
|
||||
fn cycle(&mut self);
|
||||
}
|
||||
|
||||
/// Provides a time-binned representation of the implementing type.
|
||||
/// In contrast to `TimeBinnableType` this is meant for trait objects.
|
||||
pub trait TimeBinnable: WithLen + RangeOverlapInfo + Any + Send {
|
||||
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Box<dyn TimeBinner>;
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
/// Container of some form of events, for use as trait object.
|
||||
pub trait Events: Collectable + TimeBinnable {
|
||||
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnable;
|
||||
fn verify(&self);
|
||||
fn output_info(&self);
|
||||
fn as_collectable_mut(&mut self) -> &mut dyn Collectable;
|
||||
}
|
||||
|
||||
/// Data in time-binned form.
|
||||
pub trait TimeBinned: TimeBinnable {
|
||||
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnable;
|
||||
fn edges_slice(&self) -> (&[u64], &[u64]);
|
||||
fn counts(&self) -> &[u64];
|
||||
fn mins(&self) -> Vec<f32>;
|
||||
fn maxs(&self) -> Vec<f32>;
|
||||
fn avgs(&self) -> Vec<f32>;
|
||||
fn validate(&self) -> Result<(), String>;
|
||||
}
|
||||
|
||||
pub trait TimeBinnableType: Send + Unpin + RangeOverlapInfo {
|
||||
type Output: TimeBinnableType;
|
||||
type Aggregator: TimeBinnableTypeAggregator<Input = Self, Output = Self::Output> + Send + Unpin;
|
||||
fn aggregator(range: NanoRange, bin_count: usize, do_time_weight: bool) -> Self::Aggregator;
|
||||
}
|
||||
|
||||
pub trait TimeBinnableTypeAggregator: Send {
|
||||
type Input: TimeBinnableType;
|
||||
type Output: TimeBinnableType;
|
||||
fn range(&self) -> &NanoRange;
|
||||
fn ingest(&mut self, item: &Self::Input);
|
||||
fn result_reset(&mut self, range: NanoRange, expand: bool) -> Self::Output;
|
||||
}
|
||||
|
||||
pub fn empty_events_dyn(scalar_type: &ScalarType, shape: &Shape, agg_kind: &AggKind) -> Box<dyn TimeBinnable> {
|
||||
match shape {
|
||||
Shape::Scalar => match agg_kind {
|
||||
AggKind::TimeWeightedScalar => {
|
||||
use ScalarType::*;
|
||||
type K<T> = eventsdim0::EventsDim0<T>;
|
||||
match scalar_type {
|
||||
U8 => Box::new(K::<u8>::empty()),
|
||||
U16 => Box::new(K::<u16>::empty()),
|
||||
U32 => Box::new(K::<u32>::empty()),
|
||||
U64 => Box::new(K::<u64>::empty()),
|
||||
I8 => Box::new(K::<i8>::empty()),
|
||||
I16 => Box::new(K::<i16>::empty()),
|
||||
I32 => Box::new(K::<i32>::empty()),
|
||||
I64 => Box::new(K::<i64>::empty()),
|
||||
F32 => Box::new(K::<f32>::empty()),
|
||||
F64 => Box::new(K::<f64>::empty()),
|
||||
_ => {
|
||||
error!("TODO empty_events_dyn");
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
error!("TODO empty_events_dyn");
|
||||
err::todoval()
|
||||
}
|
||||
},
|
||||
Shape::Wave(..) => {
|
||||
error!("TODO empty_events_dyn");
|
||||
err::todoval()
|
||||
}
|
||||
Shape::Image(..) => {
|
||||
error!("TODO empty_events_dyn");
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn empty_binned_dyn(scalar_type: &ScalarType, shape: &Shape, agg_kind: &AggKind) -> Box<dyn TimeBinnable> {
|
||||
match shape {
|
||||
Shape::Scalar => match agg_kind {
|
||||
AggKind::TimeWeightedScalar => {
|
||||
use ScalarType::*;
|
||||
type K<T> = binsdim0::MinMaxAvgDim0Bins<T>;
|
||||
match scalar_type {
|
||||
U8 => Box::new(K::<u8>::empty()),
|
||||
U16 => Box::new(K::<u16>::empty()),
|
||||
U32 => Box::new(K::<u32>::empty()),
|
||||
U64 => Box::new(K::<u64>::empty()),
|
||||
I8 => Box::new(K::<i8>::empty()),
|
||||
I16 => Box::new(K::<i16>::empty()),
|
||||
I32 => Box::new(K::<i32>::empty()),
|
||||
I64 => Box::new(K::<i64>::empty()),
|
||||
F32 => Box::new(K::<f32>::empty()),
|
||||
F64 => Box::new(K::<f64>::empty()),
|
||||
_ => {
|
||||
error!("TODO empty_binned_dyn");
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
error!("TODO empty_binned_dyn");
|
||||
err::todoval()
|
||||
}
|
||||
},
|
||||
Shape::Wave(_n) => match agg_kind {
|
||||
AggKind::DimXBins1 => {
|
||||
use ScalarType::*;
|
||||
type K<T> = binsdim0::MinMaxAvgDim0Bins<T>;
|
||||
match scalar_type {
|
||||
U8 => Box::new(K::<u8>::empty()),
|
||||
F32 => Box::new(K::<f32>::empty()),
|
||||
F64 => Box::new(K::<f64>::empty()),
|
||||
_ => {
|
||||
error!("TODO empty_binned_dyn");
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
error!("TODO empty_binned_dyn");
|
||||
err::todoval()
|
||||
}
|
||||
},
|
||||
Shape::Image(..) => {
|
||||
error!("TODO empty_binned_dyn");
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum ConnStatus {}
|
||||
|
||||
pub struct ConnStatusEvent {
|
||||
pub ts: u64,
|
||||
pub status: ConnStatus,
|
||||
}
|
||||
|
||||
trait MergableEvents: Any {
|
||||
fn len(&self) -> usize;
|
||||
fn ts_min(&self) -> Option<u64>;
|
||||
fn ts_max(&self) -> Option<u64>;
|
||||
fn take_from(&mut self, src: &mut dyn MergableEvents, ts_end: u64) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
pub enum ChannelEvents {
|
||||
Events(Box<dyn Events>),
|
||||
Status(ConnStatusEvent),
|
||||
RangeComplete,
|
||||
}
|
||||
|
||||
impl MergableEvents for ChannelEvents {
|
||||
fn len(&self) -> usize {
|
||||
error!("TODO MergableEvents");
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ts_min(&self) -> Option<u64> {
|
||||
error!("TODO MergableEvents");
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ts_max(&self) -> Option<u64> {
|
||||
error!("TODO MergableEvents");
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn take_from(&mut self, _src: &mut dyn MergableEvents, _ts_end: u64) -> Result<(), Error> {
|
||||
error!("TODO MergableEvents");
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
struct ChannelEventsMerger {
|
||||
_inp_1: Pin<Box<dyn Stream<Item = Result<Box<dyn MergableEvents>, Error>>>>,
|
||||
_inp_2: Pin<Box<dyn Stream<Item = Result<Box<dyn MergableEvents>, Error>>>>,
|
||||
}
|
||||
|
||||
impl Stream for ChannelEventsMerger {
|
||||
type Item = Result<ChannelEvents, Error>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
//use Poll::*;
|
||||
error!("TODO ChannelEventsMerger");
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO do this with some blanket impl:
|
||||
impl Collectable for Box<dyn Collectable> {
|
||||
fn new_collector(&self) -> Box<dyn streams::Collector> {
|
||||
Collectable::new_collector(self.as_ref())
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
Collectable::as_any_mut(self.as_mut())
|
||||
}
|
||||
}
|
||||
77
items_2/src/streams.rs
Normal file
77
items_2/src/streams.rs
Normal file
@@ -0,0 +1,77 @@
|
||||
use crate::WithLen;
|
||||
use err::Error;
|
||||
use serde::Serialize;
|
||||
use std::any::Any;
|
||||
|
||||
pub trait CollectorType: Send + Unpin + WithLen {
|
||||
type Input: Collectable;
|
||||
type Output: ToJsonResult + Serialize;
|
||||
|
||||
fn ingest(&mut self, src: &mut Self::Input);
|
||||
fn set_range_complete(&mut self);
|
||||
fn set_timed_out(&mut self);
|
||||
fn result(&mut self) -> Result<Self::Output, Error>;
|
||||
}
|
||||
|
||||
pub trait Collector: Send + Unpin + WithLen {
|
||||
fn ingest(&mut self, src: &mut dyn Collectable);
|
||||
fn set_range_complete(&mut self);
|
||||
fn set_timed_out(&mut self);
|
||||
fn result(&mut self) -> Result<Box<dyn ToJsonResult>, Error>;
|
||||
}
|
||||
|
||||
pub trait CollectableType {
|
||||
type Collector: CollectorType<Input = Self>;
|
||||
|
||||
fn new_collector() -> Self::Collector;
|
||||
}
|
||||
|
||||
pub trait Collectable: Any {
|
||||
fn new_collector(&self) -> Box<dyn Collector>;
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any;
|
||||
}
|
||||
|
||||
impl<T: CollectorType + 'static> Collector for T {
|
||||
fn ingest(&mut self, src: &mut dyn Collectable) {
|
||||
let src: &mut <T as CollectorType>::Input = src.as_any_mut().downcast_mut().expect("can not downcast");
|
||||
T::ingest(self, src)
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
T::set_range_complete(self)
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
T::set_timed_out(self)
|
||||
}
|
||||
|
||||
fn result(&mut self) -> Result<Box<dyn ToJsonResult>, Error> {
|
||||
let ret = T::result(self)?;
|
||||
Ok(Box::new(ret) as _)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: CollectableType + 'static> Collectable for T {
|
||||
fn new_collector(&self) -> Box<dyn Collector> {
|
||||
Box::new(T::new_collector()) as _
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
// TODO interesting: why exactly does returning `&mut self` not work here?
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ToJsonBytes {
|
||||
fn to_json_bytes(&self) -> Result<Vec<u8>, Error>;
|
||||
}
|
||||
|
||||
pub trait ToJsonResult {
|
||||
fn to_json_result(&self) -> Result<Box<dyn ToJsonBytes>, Error>;
|
||||
}
|
||||
|
||||
impl ToJsonBytes for serde_json::Value {
|
||||
fn to_json_bytes(&self) -> Result<Vec<u8>, Error> {
|
||||
Ok(serde_json::to_vec(self)?)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user