Basic binner for new items

This commit is contained in:
Dominik Werder
2022-09-06 17:08:41 +02:00
parent 3795a57826
commit 0ea0711d46
12 changed files with 470 additions and 142 deletions

View File

@@ -1,5 +1,8 @@
use crate::streams::{CollectableType, CollectorType, ToJsonResult};
use crate::{ts_offs_from_abs, AppendEmptyBin, Empty, IsoDateTime, RangeOverlapInfo, ScalarOps, TimeBins, WithLen};
use crate::streams::{Collectable, CollectableType, CollectorType, ToJsonResult};
use crate::{
ts_offs_from_abs, ts_offs_from_abs_with_anchor, AppendEmptyBin, Empty, IsoDateTime, RangeOverlapInfo, ScalarOps,
TimeBins, WithLen,
};
use crate::{TimeBinnable, TimeBinnableType, TimeBinnableTypeAggregator, TimeBinned, TimeBinner};
use chrono::{TimeZone, Utc};
use err::Error;
@@ -10,11 +13,10 @@ use num_traits::Zero;
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::collections::VecDeque;
use std::marker::PhantomData;
use std::{fmt, mem};
#[derive(Clone, Serialize, Deserialize)]
pub struct MinMaxAvgDim0Bins<NTY> {
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct BinsDim0<NTY> {
pub ts1s: VecDeque<u64>,
pub ts2s: VecDeque<u64>,
pub counts: VecDeque<u64>,
@@ -23,7 +25,7 @@ pub struct MinMaxAvgDim0Bins<NTY> {
pub avgs: VecDeque<f32>,
}
impl<NTY> fmt::Debug for MinMaxAvgDim0Bins<NTY>
impl<NTY> fmt::Debug for BinsDim0<NTY>
where
NTY: fmt::Debug,
{
@@ -43,7 +45,7 @@ where
}
}
impl<NTY> MinMaxAvgDim0Bins<NTY> {
impl<NTY> BinsDim0<NTY> {
pub fn empty() -> Self {
Self {
ts1s: VecDeque::new(),
@@ -56,13 +58,13 @@ impl<NTY> MinMaxAvgDim0Bins<NTY> {
}
}
impl<NTY> WithLen for MinMaxAvgDim0Bins<NTY> {
impl<NTY> WithLen for BinsDim0<NTY> {
fn len(&self) -> usize {
self.ts1s.len()
}
}
impl<NTY> RangeOverlapInfo for MinMaxAvgDim0Bins<NTY> {
impl<NTY> RangeOverlapInfo for BinsDim0<NTY> {
fn ends_before(&self, range: NanoRange) -> bool {
if let Some(&max) = self.ts2s.back() {
max <= range.beg
@@ -88,7 +90,7 @@ impl<NTY> RangeOverlapInfo for MinMaxAvgDim0Bins<NTY> {
}
}
impl<NTY> Empty for MinMaxAvgDim0Bins<NTY> {
impl<NTY> Empty for BinsDim0<NTY> {
fn empty() -> Self {
Self {
ts1s: Default::default(),
@@ -101,7 +103,7 @@ impl<NTY> Empty for MinMaxAvgDim0Bins<NTY> {
}
}
impl<NTY: ScalarOps> AppendEmptyBin for MinMaxAvgDim0Bins<NTY> {
impl<NTY: ScalarOps> AppendEmptyBin for BinsDim0<NTY> {
fn append_empty_bin(&mut self, ts1: u64, ts2: u64) {
self.ts1s.push_back(ts1);
self.ts2s.push_back(ts2);
@@ -112,7 +114,7 @@ impl<NTY: ScalarOps> AppendEmptyBin for MinMaxAvgDim0Bins<NTY> {
}
}
impl<NTY: ScalarOps> TimeBins for MinMaxAvgDim0Bins<NTY> {
impl<NTY: ScalarOps> TimeBins for BinsDim0<NTY> {
fn ts_min(&self) -> Option<u64> {
self.ts1s.front().map(Clone::clone)
}
@@ -130,9 +132,9 @@ impl<NTY: ScalarOps> TimeBins for MinMaxAvgDim0Bins<NTY> {
}
}
impl<NTY: ScalarOps> TimeBinnableType for MinMaxAvgDim0Bins<NTY> {
type Output = MinMaxAvgDim0Bins<NTY>;
type Aggregator = MinMaxAvgDim0BinsAggregator<NTY>;
impl<NTY: ScalarOps> TimeBinnableType for BinsDim0<NTY> {
type Output = BinsDim0<NTY>;
type Aggregator = BinsDim0Aggregator<NTY>;
fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator {
let self_name = std::any::type_name::<Self>();
@@ -144,24 +146,18 @@ impl<NTY: ScalarOps> TimeBinnableType for MinMaxAvgDim0Bins<NTY> {
}
}
pub struct MinMaxAvgBinsCollected<NTY> {
_m1: PhantomData<NTY>,
}
impl<NTY> MinMaxAvgBinsCollected<NTY> {
pub fn new() -> Self {
Self { _m1: PhantomData }
}
}
#[derive(Serialize)]
pub struct MinMaxAvgBinsCollectedResult<NTY> {
#[derive(Debug, Serialize)]
pub struct BinsDim0CollectedResult<NTY> {
#[serde(rename = "tsAnchor")]
ts_anchor_sec: u64,
#[serde(rename = "tsMs")]
ts_off_ms: VecDeque<u64>,
#[serde(rename = "tsNs")]
ts_off_ns: VecDeque<u64>,
#[serde(rename = "ts1Ms")]
ts1_off_ms: VecDeque<u64>,
#[serde(rename = "ts2Ms")]
ts2_off_ms: VecDeque<u64>,
#[serde(rename = "ts1Ns")]
ts1_off_ns: VecDeque<u64>,
#[serde(rename = "ts2Ns")]
ts2_off_ns: VecDeque<u64>,
counts: VecDeque<u64>,
mins: VecDeque<NTY>,
maxs: VecDeque<NTY>,
@@ -174,41 +170,47 @@ pub struct MinMaxAvgBinsCollectedResult<NTY> {
continue_at: Option<IsoDateTime>,
}
impl<NTY: ScalarOps> ToJsonResult for MinMaxAvgBinsCollectedResult<NTY> {
impl<NTY: ScalarOps> ToJsonResult for BinsDim0CollectedResult<NTY> {
fn to_json_result(&self) -> Result<Box<dyn crate::streams::ToJsonBytes>, Error> {
let k = serde_json::to_value(self)?;
Ok(Box::new(k))
}
}
pub struct MinMaxAvgBinsCollector<NTY> {
pub struct BinsDim0Collector<NTY> {
timed_out: bool,
range_complete: bool,
vals: MinMaxAvgDim0Bins<NTY>,
vals: BinsDim0<NTY>,
}
impl<NTY> MinMaxAvgBinsCollector<NTY> {
impl<NTY> BinsDim0Collector<NTY> {
pub fn new() -> Self {
Self {
timed_out: false,
range_complete: false,
vals: MinMaxAvgDim0Bins::<NTY>::empty(),
vals: BinsDim0::<NTY>::empty(),
}
}
}
impl<NTY> WithLen for MinMaxAvgBinsCollector<NTY> {
impl<NTY> WithLen for BinsDim0Collector<NTY> {
fn len(&self) -> usize {
self.vals.ts1s.len()
}
}
impl<NTY: ScalarOps> CollectorType for MinMaxAvgBinsCollector<NTY> {
type Input = MinMaxAvgDim0Bins<NTY>;
type Output = MinMaxAvgBinsCollectedResult<NTY>;
impl<NTY: ScalarOps> CollectorType for BinsDim0Collector<NTY> {
type Input = BinsDim0<NTY>;
type Output = BinsDim0CollectedResult<NTY>;
fn ingest(&mut self, _src: &mut Self::Input) {
err::todo();
fn ingest(&mut self, src: &mut Self::Input) {
// TODO could be optimized by non-contiguous container.
self.vals.ts1s.append(&mut src.ts1s);
self.vals.ts2s.append(&mut src.ts2s);
self.vals.counts.append(&mut src.counts);
self.vals.mins.append(&mut src.mins);
self.vals.maxs.append(&mut src.maxs);
self.vals.avgs.append(&mut src.avgs);
}
fn set_range_complete(&mut self) {
@@ -221,7 +223,7 @@ impl<NTY: ScalarOps> CollectorType for MinMaxAvgBinsCollector<NTY> {
fn result(&mut self) -> Result<Self::Output, Error> {
let bin_count = self.vals.ts1s.len() as u32;
// TODO could save the copy:
// TODO save the clone:
let mut ts_all = self.vals.ts1s.clone();
if self.vals.ts2s.len() > 0 {
ts_all.push_back(*self.vals.ts2s.back().unwrap());
@@ -242,15 +244,18 @@ impl<NTY: ScalarOps> CollectorType for MinMaxAvgBinsCollector<NTY> {
if ts_all.as_slices().1.len() != 0 {
panic!();
}
let tst = ts_offs_from_abs(ts_all.as_slices().0);
let tst1 = ts_offs_from_abs(self.vals.ts1s.as_slices().0);
let tst2 = ts_offs_from_abs_with_anchor(tst1.0, self.vals.ts2s.as_slices().0);
let counts = mem::replace(&mut self.vals.counts, VecDeque::new());
let mins = mem::replace(&mut self.vals.mins, VecDeque::new());
let maxs = mem::replace(&mut self.vals.maxs, VecDeque::new());
let avgs = mem::replace(&mut self.vals.avgs, VecDeque::new());
let ret = MinMaxAvgBinsCollectedResult::<NTY> {
ts_anchor_sec: tst.0,
ts_off_ms: tst.1,
ts_off_ns: tst.2,
let ret = BinsDim0CollectedResult::<NTY> {
ts_anchor_sec: tst1.0,
ts1_off_ms: tst1.1,
ts1_off_ns: tst1.2,
ts2_off_ms: tst2.0,
ts2_off_ns: tst2.1,
counts,
mins,
maxs,
@@ -263,15 +268,15 @@ impl<NTY: ScalarOps> CollectorType for MinMaxAvgBinsCollector<NTY> {
}
}
impl<NTY: ScalarOps> CollectableType for MinMaxAvgDim0Bins<NTY> {
type Collector = MinMaxAvgBinsCollector<NTY>;
impl<NTY: ScalarOps> CollectableType for BinsDim0<NTY> {
type Collector = BinsDim0Collector<NTY>;
fn new_collector() -> Self::Collector {
Self::Collector::new()
}
}
pub struct MinMaxAvgDim0BinsAggregator<NTY> {
pub struct BinsDim0Aggregator<NTY> {
range: NanoRange,
count: u64,
min: NTY,
@@ -282,7 +287,7 @@ pub struct MinMaxAvgDim0BinsAggregator<NTY> {
sum: f32,
}
impl<NTY: ScalarOps> MinMaxAvgDim0BinsAggregator<NTY> {
impl<NTY: ScalarOps> BinsDim0Aggregator<NTY> {
pub fn new(range: NanoRange, _do_time_weight: bool) -> Self {
Self {
range,
@@ -296,9 +301,9 @@ impl<NTY: ScalarOps> MinMaxAvgDim0BinsAggregator<NTY> {
}
}
impl<NTY: ScalarOps> TimeBinnableTypeAggregator for MinMaxAvgDim0BinsAggregator<NTY> {
type Input = MinMaxAvgDim0Bins<NTY>;
type Output = MinMaxAvgDim0Bins<NTY>;
impl<NTY: ScalarOps> TimeBinnableTypeAggregator for BinsDim0Aggregator<NTY> {
type Input = BinsDim0<NTY>;
type Output = BinsDim0<NTY>;
fn range(&self) -> &NanoRange {
&self.range
@@ -348,9 +353,9 @@ impl<NTY: ScalarOps> TimeBinnableTypeAggregator for MinMaxAvgDim0BinsAggregator<
}
}
impl<NTY: ScalarOps> TimeBinnable for MinMaxAvgDim0Bins<NTY> {
impl<NTY: ScalarOps> TimeBinnable for BinsDim0<NTY> {
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Box<dyn TimeBinner> {
let ret = MinMaxAvgDim0BinsTimeBinner::<NTY>::new(edges.into(), do_time_weight);
let ret = BinsDim0TimeBinner::<NTY>::new(edges.into(), do_time_weight);
Box::new(ret)
}
@@ -359,14 +364,14 @@ impl<NTY: ScalarOps> TimeBinnable for MinMaxAvgDim0Bins<NTY> {
}
}
pub struct MinMaxAvgDim0BinsTimeBinner<NTY: ScalarOps> {
pub struct BinsDim0TimeBinner<NTY: ScalarOps> {
edges: VecDeque<u64>,
do_time_weight: bool,
agg: Option<MinMaxAvgDim0BinsAggregator<NTY>>,
ready: Option<<MinMaxAvgDim0BinsAggregator<NTY> as TimeBinnableTypeAggregator>::Output>,
agg: Option<BinsDim0Aggregator<NTY>>,
ready: Option<<BinsDim0Aggregator<NTY> as TimeBinnableTypeAggregator>::Output>,
}
impl<NTY: ScalarOps> MinMaxAvgDim0BinsTimeBinner<NTY> {
impl<NTY: ScalarOps> BinsDim0TimeBinner<NTY> {
fn new(edges: VecDeque<u64>, do_time_weight: bool) -> Self {
Self {
edges,
@@ -390,7 +395,7 @@ impl<NTY: ScalarOps> MinMaxAvgDim0BinsTimeBinner<NTY> {
}
}
impl<NTY: ScalarOps> TimeBinner for MinMaxAvgDim0BinsTimeBinner<NTY> {
impl<NTY: ScalarOps> TimeBinner for BinsDim0TimeBinner<NTY> {
fn ingest(&mut self, item: &dyn TimeBinnable) {
let self_name = std::any::type_name::<Self>();
if item.len() == 0 {
@@ -427,7 +432,7 @@ impl<NTY: ScalarOps> TimeBinner for MinMaxAvgDim0BinsTimeBinner<NTY> {
let agg = if let Some(agg) = self.agg.as_mut() {
agg
} else {
self.agg = Some(MinMaxAvgDim0BinsAggregator::new(
self.agg = Some(BinsDim0Aggregator::new(
// We know here that we have enough edges for another bin.
// and `next_bin_range` will pop the first edge.
self.next_bin_range().unwrap(),
@@ -438,7 +443,7 @@ impl<NTY: ScalarOps> TimeBinner for MinMaxAvgDim0BinsTimeBinner<NTY> {
if let Some(item) = item
.as_any()
// TODO make statically sure that we attempt to cast to the correct type here:
.downcast_ref::<<MinMaxAvgDim0BinsAggregator<NTY> as TimeBinnableTypeAggregator>::Input>()
.downcast_ref::<<BinsDim0Aggregator<NTY> as TimeBinnableTypeAggregator>::Input>()
{
agg.ingest(item);
} else {
@@ -502,7 +507,7 @@ impl<NTY: ScalarOps> TimeBinner for MinMaxAvgDim0BinsTimeBinner<NTY> {
self.push_in_progress(true);
if self.bins_ready_count() == n {
if let Some(_range) = self.next_bin_range() {
let bins = MinMaxAvgDim0Bins::<NTY>::empty();
let bins = BinsDim0::<NTY>::empty();
err::todo();
//bins.append_zero(range.beg, range.end);
match self.ready.as_mut() {
@@ -524,7 +529,7 @@ impl<NTY: ScalarOps> TimeBinner for MinMaxAvgDim0BinsTimeBinner<NTY> {
}
}
impl<NTY: ScalarOps> TimeBinned for MinMaxAvgDim0Bins<NTY> {
impl<NTY: ScalarOps> TimeBinned for BinsDim0<NTY> {
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnable {
self as &dyn TimeBinnable
}
@@ -576,4 +581,8 @@ impl<NTY: ScalarOps> TimeBinned for MinMaxAvgDim0Bins<NTY> {
Err(msg)
}
}
fn as_collectable_mut(&mut self) -> &mut dyn Collectable {
self
}
}

View File

@@ -1,4 +1,4 @@
use crate::binsdim0::MinMaxAvgDim0Bins;
use crate::binsdim0::BinsDim0;
use crate::streams::{CollectableType, CollectorType, ToJsonResult};
use crate::{pulse_offs_from_abs, ts_offs_from_abs, RangeOverlapInfo};
use crate::{Empty, Events, ScalarOps, WithLen};
@@ -87,7 +87,7 @@ impl<NTY: ScalarOps> RangeOverlapInfo for EventsDim0<NTY> {
}
impl<NTY: ScalarOps> TimeBinnableType for EventsDim0<NTY> {
type Output = MinMaxAvgDim0Bins<NTY>;
type Output = BinsDim0<NTY>;
type Aggregator = EventValuesAggregator<NTY>;
fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator {
@@ -122,7 +122,7 @@ impl<NTY> WithLen for EventValuesCollector<NTY> {
}
}
#[derive(Serialize)]
#[derive(Debug, Serialize)]
pub struct EventValuesCollectorOutput<NTY> {
#[serde(rename = "tsAnchor")]
ts_anchor_sec: u64,
@@ -338,7 +338,7 @@ impl<NTY: ScalarOps> EventValuesAggregator<NTY> {
}
}
fn result_reset_unweight(&mut self, range: NanoRange, _expand: bool) -> MinMaxAvgDim0Bins<NTY> {
fn result_reset_unweight(&mut self, range: NanoRange, _expand: bool) -> BinsDim0<NTY> {
let (min, max, avg) = if self.sumc > 0 {
let avg = self.sum / self.sumc as f32;
(self.min.clone(), self.max.clone(), avg)
@@ -349,7 +349,7 @@ impl<NTY: ScalarOps> EventValuesAggregator<NTY> {
};
(g.clone(), g.clone(), g.as_prim_f32())
};
let ret = MinMaxAvgDim0Bins {
let ret = BinsDim0 {
ts1s: [self.range.beg].into(),
ts2s: [self.range.end].into(),
counts: [self.count].into(),
@@ -365,7 +365,7 @@ impl<NTY: ScalarOps> EventValuesAggregator<NTY> {
ret
}
fn result_reset_time_weight(&mut self, range: NanoRange, expand: bool) -> MinMaxAvgDim0Bins<NTY> {
fn result_reset_time_weight(&mut self, range: NanoRange, expand: bool) -> BinsDim0<NTY> {
// TODO check callsite for correct expand status.
if expand {
debug!("result_reset_time_weight calls apply_event_time_weight");
@@ -383,7 +383,7 @@ impl<NTY: ScalarOps> EventValuesAggregator<NTY> {
};
(g.clone(), g.clone(), g.as_prim_f32())
};
let ret = MinMaxAvgDim0Bins {
let ret = BinsDim0 {
ts1s: [self.range.beg].into(),
ts2s: [self.range.end].into(),
counts: [self.count].into(),
@@ -402,14 +402,16 @@ impl<NTY: ScalarOps> EventValuesAggregator<NTY> {
impl<NTY: ScalarOps> TimeBinnableTypeAggregator for EventValuesAggregator<NTY> {
type Input = EventsDim0<NTY>;
type Output = MinMaxAvgDim0Bins<NTY>;
type Output = BinsDim0<NTY>;
fn range(&self) -> &NanoRange {
&self.range
}
fn ingest(&mut self, item: &Self::Input) {
debug!("ingest len {}", item.len());
for ts in &item.tss {
eprintln!("EventValuesAggregator ingest {ts:20}");
}
if self.do_time_weight {
self.ingest_time_weight(item)
} else {
@@ -512,6 +514,8 @@ pub struct ScalarEventsTimeBinner<NTY: ScalarOps> {
impl<NTY: ScalarOps> ScalarEventsTimeBinner<NTY> {
fn new(edges: VecDeque<u64>, do_time_weight: bool) -> Self {
let self_name = std::any::type_name::<Self>();
eprintln!("{self_name}::new edges {edges:?}");
Self {
edges,
do_time_weight,
@@ -661,7 +665,7 @@ impl<NTY: ScalarOps> TimeBinner for ScalarEventsTimeBinner<NTY> {
self.push_in_progress(true);
if self.bins_ready_count() == n {
if let Some(_range) = self.next_bin_range() {
let bins = MinMaxAvgDim0Bins::<NTY>::empty();
let bins = BinsDim0::<NTY>::empty();
error!("TODO eventsdim0 time binner append");
err::todo();
//bins.append_zero(range.beg, range.end);

View File

@@ -6,6 +6,7 @@ pub mod test;
use chrono::{DateTime, TimeZone, Utc};
use futures_util::Stream;
use futures_util::StreamExt;
use netpod::log::*;
use netpod::timeunits::*;
use netpod::{AggKind, NanoRange, ScalarType, Shape};
@@ -17,6 +18,7 @@ use std::ops::ControlFlow;
use std::pin::Pin;
use std::task::{Context, Poll};
use streams::Collectable;
use streams::ToJsonResult;
pub fn bool_is_false(x: &bool) -> bool {
*x == false
@@ -35,6 +37,17 @@ pub fn ts_offs_from_abs(tss: &[u64]) -> (u64, VecDeque<u64>, VecDeque<u64>) {
(ts_anchor_sec, ts_off_ms, ts_off_ns)
}
pub fn ts_offs_from_abs_with_anchor(ts_anchor_sec: u64, tss: &[u64]) -> (VecDeque<u64>, VecDeque<u64>) {
let ts_anchor_ns = ts_anchor_sec * SEC;
let ts_off_ms: VecDeque<_> = tss.iter().map(|&k| (k - ts_anchor_ns) / MS).collect();
let ts_off_ns = tss
.iter()
.zip(ts_off_ms.iter().map(|&k| k * MS))
.map(|(&j, k)| (j - ts_anchor_ns - k))
.collect();
(ts_off_ms, ts_off_ns)
}
// TODO take iterator instead of slice, because a VecDeque can't produce a slice in general.
pub fn pulse_offs_from_abs(pulse: &[u64]) -> (u64, VecDeque<u64>) {
let pulse_anchor = pulse.first().map_or(0, |k| *k);
@@ -112,6 +125,7 @@ struct Ts(u64);
#[derive(Debug, PartialEq)]
pub enum ErrorKind {
General,
#[allow(unused)]
MismatchedType,
}
@@ -120,11 +134,27 @@ pub enum ErrorKind {
pub struct Error {
#[allow(unused)]
kind: ErrorKind,
msg: Option<String>,
}
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{self:?}")
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Self {
Self { kind }
Self { kind, msg: None }
}
}
impl From<String> for Error {
fn from(msg: String) -> Self {
Self {
msg: Some(msg),
kind: ErrorKind::General,
}
}
}
@@ -190,9 +220,9 @@ pub fn make_iso_ts(tss: &[u64]) -> Vec<IsoDateTime> {
}
pub trait TimeBinner: Send {
fn ingest(&mut self, item: &dyn TimeBinnable);
fn bins_ready_count(&self) -> usize;
fn bins_ready(&mut self) -> Option<Box<dyn TimeBinned>>;
fn ingest(&mut self, item: &dyn TimeBinnable);
/// If there is a bin in progress with non-zero count, push it to the result set.
/// With push_empty == true, a bin in progress is pushed even if it contains no counts.
@@ -206,13 +236,13 @@ pub trait TimeBinner: Send {
/// Provides a time-binned representation of the implementing type.
/// In contrast to `TimeBinnableType` this is meant for trait objects.
pub trait TimeBinnable: WithLen + RangeOverlapInfo + Any + Send {
pub trait TimeBinnable: fmt::Debug + WithLen + RangeOverlapInfo + Any + Send {
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Box<dyn TimeBinner>;
fn as_any(&self) -> &dyn Any;
}
/// Container of some form of events, for use as trait object.
pub trait Events: fmt::Debug + Any + Collectable + TimeBinnable {
pub trait Events: fmt::Debug + Any + Collectable + TimeBinnable + Send {
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnable;
fn verify(&self);
fn output_info(&self);
@@ -229,8 +259,9 @@ impl PartialEq for Box<dyn Events> {
}
/// Data in time-binned form.
pub trait TimeBinned: TimeBinnable {
pub trait TimeBinned: Any + TimeBinnable {
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnable;
fn as_collectable_mut(&mut self) -> &mut dyn Collectable;
fn edges_slice(&self) -> (&[u64], &[u64]);
fn counts(&self) -> &[u64];
fn mins(&self) -> Vec<f32>;
@@ -297,7 +328,7 @@ pub fn empty_binned_dyn(scalar_type: &ScalarType, shape: &Shape, agg_kind: &AggK
Shape::Scalar => match agg_kind {
AggKind::TimeWeightedScalar => {
use ScalarType::*;
type K<T> = binsdim0::MinMaxAvgDim0Bins<T>;
type K<T> = binsdim0::BinsDim0<T>;
match scalar_type {
U8 => Box::new(K::<u8>::empty()),
U16 => Box::new(K::<u16>::empty()),
@@ -323,7 +354,7 @@ pub fn empty_binned_dyn(scalar_type: &ScalarType, shape: &Shape, agg_kind: &AggK
Shape::Wave(_n) => match agg_kind {
AggKind::DimXBins1 => {
use ScalarType::*;
type K<T> = binsdim0::MinMaxAvgDim0Bins<T>;
type K<T> = binsdim0::BinsDim0<T>;
match scalar_type {
U8 => Box::new(K::<u8>::empty()),
F32 => Box::new(K::<f32>::empty()),
@@ -418,8 +449,8 @@ impl MergableEvents for ChannelEvents {
}
pub struct ChannelEventsMerger {
inp1: Pin<Box<dyn Stream<Item = Result<ChannelEvents, Error>>>>,
inp2: Pin<Box<dyn Stream<Item = Result<ChannelEvents, Error>>>>,
inp1: Pin<Box<dyn Stream<Item = Result<ChannelEvents, Error>> + Send>>,
inp2: Pin<Box<dyn Stream<Item = Result<ChannelEvents, Error>> + Send>>,
inp1_done: bool,
inp2_done: bool,
inp1_item: Option<ChannelEvents>,
@@ -432,8 +463,8 @@ pub struct ChannelEventsMerger {
impl ChannelEventsMerger {
pub fn new(
inp1: Pin<Box<dyn Stream<Item = Result<ChannelEvents, Error>>>>,
inp2: Pin<Box<dyn Stream<Item = Result<ChannelEvents, Error>>>>,
inp1: Pin<Box<dyn Stream<Item = Result<ChannelEvents, Error>> + Send>>,
inp2: Pin<Box<dyn Stream<Item = Result<ChannelEvents, Error>> + Send>>,
) -> Self {
Self {
done: false,
@@ -682,3 +713,84 @@ impl Collectable for Box<dyn Collectable> {
Collectable::as_any_mut(self.as_mut())
}
}
pub async fn binned_collected(
inp: Pin<Box<dyn Stream<Item = Result<ChannelEvents, Error>> + Send>>,
) -> Result<Box<dyn ToJsonResult>, Error> {
let mut coll = None;
let mut binner = None;
let edges: Vec<_> = (0..10).into_iter().map(|t| SEC * 10 * t).collect();
let do_time_weight = true;
let mut inp = inp;
while let Some(item) = inp.next().await {
let item = item?;
match item {
ChannelEvents::Events(events) => {
if binner.is_none() {
let bb = events
.as_time_binnable_dyn()
.time_binner_new(edges.clone(), do_time_weight);
binner = Some(bb);
}
let binner = binner.as_mut().unwrap();
binner.ingest(events.as_time_binnable_dyn());
eprintln!("bins_ready_count: {}", binner.bins_ready_count());
if binner.bins_ready_count() > 0 {
let ready = binner.bins_ready();
match ready {
Some(mut ready) => {
eprintln!("ready {ready:?}");
if coll.is_none() {
coll = Some(ready.as_collectable_mut().new_collector());
}
let cl = coll.as_mut().unwrap();
cl.ingest(ready.as_collectable_mut());
}
None => {
return Err(format!("bins_ready_count but no result").into());
}
}
}
}
ChannelEvents::Status(_) => {
eprintln!("TODO Status");
}
ChannelEvents::RangeComplete => {
eprintln!("TODO RangeComplete");
}
}
}
if let Some(mut binner) = binner {
binner.cycle();
// TODO merge with the same logic above in the loop.
if binner.bins_ready_count() > 0 {
let ready = binner.bins_ready();
match ready {
Some(mut ready) => {
eprintln!("ready {ready:?}");
if coll.is_none() {
coll = Some(ready.as_collectable_mut().new_collector());
}
let cl = coll.as_mut().unwrap();
cl.ingest(ready.as_collectable_mut());
}
None => {
return Err(format!("bins_ready_count but no result").into());
}
}
}
}
match coll {
Some(mut coll) => {
let res = coll.result().map_err(|e| format!("{e}"))?;
//let res = res.to_json_result().map_err(|e| format!("{e}"))?;
//let res = res.to_json_bytes().map_err(|e| format!("{e}"))?;
eprintln!("res {res:?}");
Ok(res)
}
None => {
//empty_binned_dyn(scalar_type, shape, agg_kind)
Err(format!("TODO produce empty result"))?
}
}
}

View File

@@ -2,6 +2,7 @@ use crate::WithLen;
use err::Error;
use serde::Serialize;
use std::any::Any;
use std::fmt;
pub trait CollectorType: Send + Unpin + WithLen {
type Input: Collectable;
@@ -66,7 +67,7 @@ pub trait ToJsonBytes {
fn to_json_bytes(&self) -> Result<Vec<u8>, Error>;
}
pub trait ToJsonResult {
pub trait ToJsonResult: fmt::Debug + Send {
fn to_json_result(&self) -> Result<Box<dyn ToJsonBytes>, Error>;
}

View File

@@ -2,6 +2,7 @@ use crate::eventsdim0::EventsDim0;
use crate::{ChannelEvents, ChannelEventsMerger, ConnStatus, Empty};
use crate::{ConnStatusEvent, Error};
use futures_util::StreamExt;
use netpod::timeunits::SEC;
#[test]
fn merge01() {
@@ -135,3 +136,97 @@ fn merge03() {
};
tokio::runtime::Runtime::new().unwrap().block_on(fut);
}
#[test]
fn bin01() {
let fut = async {
let mut events_vec1 = Vec::new();
for j in 0..2 {
let mut events = EventsDim0::empty();
for i in 10 * j..10 * (1 + j) {
events.push(SEC * i, i, 17f32);
}
events_vec1.push(Ok(ChannelEvents::Events(Box::new(events))));
}
let inp1 = events_vec1;
let inp1 = futures_util::stream::iter(inp1);
let inp1 = Box::pin(inp1);
let inp2 = Box::pin(futures_util::stream::empty());
let mut stream = ChannelEventsMerger::new(inp1, inp2);
let mut coll = None;
let mut binner = None;
let edges: Vec<_> = (0..10).into_iter().map(|t| SEC * 10 * t).collect();
let do_time_weight = true;
while let Some(item) = stream.next().await {
let item = item?;
match item {
ChannelEvents::Events(events) => {
if binner.is_none() {
let bb = events
.as_time_binnable_dyn()
.time_binner_new(edges.clone(), do_time_weight);
binner = Some(bb);
}
let binner = binner.as_mut().unwrap();
binner.ingest(events.as_time_binnable_dyn());
eprintln!("bins_ready_count: {}", binner.bins_ready_count());
if binner.bins_ready_count() > 0 {
let ready = binner.bins_ready();
match ready {
Some(mut ready) => {
eprintln!("ready {ready:?}");
if coll.is_none() {
coll = Some(ready.as_collectable_mut().new_collector());
}
let cl = coll.as_mut().unwrap();
cl.ingest(ready.as_collectable_mut());
}
None => {
return Err(format!("bins_ready_count but no result").into());
}
}
}
}
ChannelEvents::Status(_) => {
eprintln!("TODO Status");
}
ChannelEvents::RangeComplete => {
eprintln!("TODO RangeComplete");
}
}
}
if let Some(mut binner) = binner {
binner.cycle();
// TODO merge with the same logic above in the loop.
if binner.bins_ready_count() > 0 {
let ready = binner.bins_ready();
match ready {
Some(mut ready) => {
eprintln!("ready {ready:?}");
if coll.is_none() {
coll = Some(ready.as_collectable_mut().new_collector());
}
let cl = coll.as_mut().unwrap();
cl.ingest(ready.as_collectable_mut());
}
None => {
return Err(format!("bins_ready_count but no result").into());
}
}
}
}
match coll {
Some(mut coll) => {
let res = coll.result().map_err(|e| format!("{e}"))?;
//let res = res.to_json_result().map_err(|e| format!("{e}"))?;
//let res = res.to_json_bytes().map_err(|e| format!("{e}"))?;
eprintln!("res {res:?}");
}
None => {
panic!();
}
}
Ok::<_, Error>(())
};
tokio::runtime::Runtime::new().unwrap().block_on(fut).unwrap();
}