Refactor and prepare for scylla based bin caching

This commit is contained in:
Dominik Werder
2022-07-06 15:51:05 +02:00
parent 66215f583f
commit d0a7240934
29 changed files with 1647 additions and 542 deletions

View File

@@ -60,6 +60,11 @@ impl Appendable for SingleBinWaveEvents {
}
})
}
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
// TODO can this implement Appendable in a sane way? Do we need it?
err::todo();
}
}
impl PushableIndex for SingleBinWaveEvents {
@@ -150,6 +155,11 @@ impl Appendable for MultiBinWaveEvents {
}
})
}
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
// TODO can this implement Appendable in a sane way? Do we need it?
err::todo();
}
}
impl PushableIndex for MultiBinWaveEvents {
@@ -249,6 +259,11 @@ impl Appendable for XBinnedEvents {
},
}
}
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
// TODO can this implement Appendable in a sane way? Do we need it?
err::todo();
}
}
impl PushableIndex for XBinnedEvents {

View File

@@ -1,17 +1,19 @@
use crate::numops::NumOps;
use crate::streams::{Collectable, Collector, ToJsonBytes, ToJsonResult};
use crate::{
ts_offs_from_abs, Appendable, FilterFittingInside, Fits, FitsInside, FrameTypeStatic, IsoDateTime,
RangeOverlapInfo, ReadPbv, ReadableFromFile, Sitemty, SitemtyFrameType, SubFrId, TimeBinnableDyn,
TimeBinnableDynAggregator, TimeBinnableType, TimeBinnableTypeAggregator, TimeBinned, TimeBins, WithLen,
ts_offs_from_abs, Appendable, FilterFittingInside, Fits, FitsInside, FrameTypeStatic, IsoDateTime, NewEmpty,
RangeOverlapInfo, ReadPbv, ReadableFromFile, Sitemty, SitemtyFrameType, SubFrId, TimeBinnableDyn, TimeBinnableType,
TimeBinnableTypeAggregator, TimeBinned, TimeBinnerDyn, TimeBins, WithLen,
};
use chrono::{TimeZone, Utc};
use err::Error;
use netpod::log::*;
use netpod::timeunits::SEC;
use netpod::NanoRange;
use netpod::{NanoRange, Shape};
use num_traits::Zero;
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::collections::VecDeque;
use std::fmt;
use std::marker::PhantomData;
use tokio::fs::File;
@@ -21,10 +23,9 @@ pub struct MinMaxAvgDim0Bins<NTY> {
pub ts1s: Vec<u64>,
pub ts2s: Vec<u64>,
pub counts: Vec<u64>,
// TODO get rid of Option:
pub mins: Vec<Option<NTY>>,
pub maxs: Vec<Option<NTY>>,
pub avgs: Vec<Option<f32>>,
pub mins: Vec<NTY>,
pub maxs: Vec<NTY>,
pub avgs: Vec<f32>,
}
impl<NTY> FrameTypeStatic for MinMaxAvgDim0Bins<NTY>
@@ -155,6 +156,19 @@ impl<NTY> WithLen for MinMaxAvgDim0Bins<NTY> {
}
}
impl<NTY> NewEmpty for MinMaxAvgDim0Bins<NTY> {
fn empty(_shape: Shape) -> Self {
Self {
ts1s: Vec::new(),
ts2s: Vec::new(),
counts: Vec::new(),
mins: Vec::new(),
maxs: Vec::new(),
avgs: Vec::new(),
}
}
}
impl<NTY> Appendable for MinMaxAvgDim0Bins<NTY>
where
NTY: NumOps,
@@ -171,6 +185,15 @@ where
self.maxs.extend_from_slice(&src.maxs);
self.avgs.extend_from_slice(&src.avgs);
}
fn append_zero(&mut self, ts1: u64, ts2: u64) {
self.ts1s.push(ts1);
self.ts2s.push(ts2);
self.counts.push(0);
self.mins.push(NTY::zero());
self.maxs.push(NTY::zero());
self.avgs.push(0.);
}
}
impl<NTY> ReadableFromFile for MinMaxAvgDim0Bins<NTY>
@@ -193,7 +216,7 @@ where
NTY: NumOps,
{
type Output = MinMaxAvgDim0Bins<NTY>;
type Aggregator = MinMaxAvgBinsAggregator<NTY>;
type Aggregator = MinMaxAvgDim0BinsAggregator<NTY>;
fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator {
debug!(
@@ -233,11 +256,10 @@ pub struct MinMaxAvgBinsCollectedResult<NTY> {
ts_off_ms: Vec<u64>,
#[serde(rename = "tsNs")]
ts_off_ns: Vec<u64>,
//ts_bin_edges: Vec<IsoDateTime>,
counts: Vec<u64>,
mins: Vec<Option<NTY>>,
maxs: Vec<Option<NTY>>,
avgs: Vec<Option<f32>>,
mins: Vec<NTY>,
maxs: Vec<NTY>,
avgs: Vec<f32>,
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "finalisedRange")]
finalised_range: bool,
#[serde(skip_serializing_if = "Zero::is_zero", rename = "missingBins")]
@@ -340,29 +362,29 @@ where
}
}
pub struct MinMaxAvgBinsAggregator<NTY> {
pub struct MinMaxAvgDim0BinsAggregator<NTY> {
range: NanoRange,
count: u64,
min: Option<NTY>,
max: Option<NTY>,
min: NTY,
max: NTY,
sumc: u64,
sum: f32,
}
impl<NTY> MinMaxAvgBinsAggregator<NTY> {
impl<NTY: NumOps> MinMaxAvgDim0BinsAggregator<NTY> {
pub fn new(range: NanoRange, _do_time_weight: bool) -> Self {
Self {
range,
count: 0,
min: None,
max: None,
min: NTY::zero(),
max: NTY::zero(),
sumc: 0,
sum: 0f32,
}
}
}
impl<NTY> TimeBinnableTypeAggregator for MinMaxAvgBinsAggregator<NTY>
impl<NTY> TimeBinnableTypeAggregator for MinMaxAvgDim0BinsAggregator<NTY>
where
NTY: NumOps,
{
@@ -375,55 +397,33 @@ where
fn ingest(&mut self, item: &Self::Input) {
for i1 in 0..item.ts1s.len() {
if item.ts2s[i1] <= self.range.beg {
if item.counts[i1] == 0 {
} else if item.ts2s[i1] <= self.range.beg {
} else if item.ts1s[i1] >= self.range.end {
} else {
self.min = match &self.min {
None => item.mins[i1].clone(),
Some(min) => match &item.mins[i1] {
None => Some(min.clone()),
Some(v) => {
if v < &min {
Some(v.clone())
} else {
Some(min.clone())
}
}
},
};
self.max = match &self.max {
None => item.maxs[i1].clone(),
Some(max) => match &item.maxs[i1] {
None => Some(max.clone()),
Some(v) => {
if v > &max {
Some(v.clone())
} else {
Some(max.clone())
}
}
},
};
match item.avgs[i1] {
None => {}
Some(v) => {
if v.is_nan() {
} else {
self.sum += v;
self.sumc += 1;
}
if self.count == 0 {
self.min = item.mins[i1].clone();
self.max = item.maxs[i1].clone();
} else {
if item.mins[i1] < self.min {
self.min = item.mins[i1].clone();
}
if item.maxs[i1] > self.max {
self.max = item.maxs[i1].clone();
}
}
self.count += item.counts[i1];
self.sum += item.avgs[i1];
self.sumc += 1;
}
}
}
fn result_reset(&mut self, range: NanoRange, _expand: bool) -> Self::Output {
let avg = if self.sumc == 0 {
None
0f32
} else {
Some(self.sum / self.sumc as f32)
self.sum / self.sumc as f32
};
let ret = Self::Output {
ts1s: vec![self.range.beg],
@@ -434,8 +434,8 @@ where
avgs: vec![avg],
};
self.count = 0;
self.min = None;
self.max = None;
self.min = NTY::zero();
self.max = NTY::zero();
self.range = range;
self.sum = 0f32;
self.sumc = 0;
@@ -443,9 +443,171 @@ where
}
}
impl<NTY: NumOps> TimeBinnableDyn for MinMaxAvgDim0Bins<NTY> {
fn aggregator_new(&self) -> Box<dyn TimeBinnableDynAggregator> {
todo!()
impl<NTY: NumOps + 'static> TimeBinnableDyn for MinMaxAvgDim0Bins<NTY> {
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Box<dyn TimeBinnerDyn> {
eprintln!("MinMaxAvgDim0Bins time_binner_new");
info!("MinMaxAvgDim0Bins time_binner_new");
let ret = MinMaxAvgDim0BinsTimeBinner::<NTY>::new(edges.into(), do_time_weight);
Box::new(ret)
}
fn as_any(&self) -> &dyn Any {
self as &dyn Any
}
}
pub struct MinMaxAvgDim0BinsTimeBinner<NTY: NumOps> {
edges: VecDeque<u64>,
do_time_weight: bool,
range: NanoRange,
agg: Option<MinMaxAvgDim0BinsAggregator<NTY>>,
ready: Option<<MinMaxAvgDim0BinsAggregator<NTY> as TimeBinnableTypeAggregator>::Output>,
}
impl<NTY: NumOps> MinMaxAvgDim0BinsTimeBinner<NTY> {
fn new(edges: VecDeque<u64>, do_time_weight: bool) -> Self {
let range = if edges.len() >= 2 {
NanoRange {
beg: edges[0],
end: edges[1],
}
} else {
// Using a dummy for this case.
NanoRange { beg: 1, end: 2 }
};
Self {
edges,
do_time_weight,
range,
agg: None,
ready: None,
}
}
// Move the bin from the current aggregator (if any) to our output collection,
// and step forward in our bin list.
fn cycle(&mut self) {
eprintln!("cycle");
// TODO where to take expand from? Is it still required after all?
let expand = true;
let have_next_bin = self.edges.len() >= 3;
let range_next = if have_next_bin {
NanoRange {
beg: self.edges[1],
end: self.edges[2],
}
} else {
// Using a dummy for this case.
NanoRange { beg: 1, end: 2 }
};
if let Some(agg) = self.agg.as_mut() {
eprintln!("cycle: use existing agg: {:?}", agg.range);
let mut h = agg.result_reset(range_next.clone(), expand);
match self.ready.as_mut() {
Some(fin) => {
fin.append(&mut h);
}
None => {
self.ready = Some(h);
}
}
} else if have_next_bin {
eprintln!("cycle: append a zero bin");
let mut h = MinMaxAvgDim0Bins::<NTY>::empty();
h.append_zero(self.range.beg, self.range.end);
match self.ready.as_mut() {
Some(fin) => {
fin.append(&mut h);
}
None => {
self.ready = Some(h);
}
}
} else {
eprintln!("cycle: no more next bin");
}
self.range = range_next;
self.edges.pop_front();
if !have_next_bin {
self.agg = None;
}
}
}
impl<NTY: NumOps + 'static> TimeBinnerDyn for MinMaxAvgDim0BinsTimeBinner<NTY> {
fn cycle(&mut self) {
Self::cycle(self)
}
fn ingest(&mut self, item: &dyn TimeBinnableDyn) {
const SELF: &str = "MinMaxAvgDim0BinsTimeBinner";
if item.len() == 0 {
// Return already here, RangeOverlapInfo would not give much sense.
return;
}
if self.edges.len() < 2 {
warn!("TimeBinnerDyn for {SELF} no more bin in edges A");
return;
}
// TODO optimize by remembering at which event array index we have arrived.
// That needs modified interfaces which can take and yield the start and latest index.
loop {
while item.starts_after(self.range.clone()) {
self.cycle();
if self.edges.len() < 2 {
warn!("TimeBinnerDyn for {SELF} no more bin in edges B");
return;
}
}
if item.ends_before(self.range.clone()) {
return;
} else {
if self.edges.len() < 2 {
warn!("TimeBinnerDyn for {SELF} edge list exhausted");
return;
} else {
if self.agg.is_none() {
self.agg = Some(MinMaxAvgDim0BinsAggregator::new(
self.range.clone(),
self.do_time_weight,
));
}
let agg = self.agg.as_mut().unwrap();
if let Some(item) =
item.as_any()
.downcast_ref::<<MinMaxAvgDim0BinsAggregator<NTY> as TimeBinnableTypeAggregator>::Input>()
{
agg.ingest(item);
} else {
let tyid_item = std::any::Any::type_id(item.as_any());
error!("not correct item type {:?}", tyid_item);
};
if item.ends_after(self.range.clone()) {
self.cycle();
if self.edges.len() < 2 {
warn!("TimeBinnerDyn for {SELF} no more bin in edges C");
return;
}
} else {
break;
}
}
}
}
}
fn bins_ready_count(&self) -> usize {
match &self.ready {
Some(k) => k.len(),
None => 0,
}
}
fn bins_ready(&mut self) -> Option<Box<dyn crate::TimeBinned>> {
match self.ready.take() {
Some(k) => Some(Box::new(k)),
None => None,
}
}
}
@@ -454,13 +616,23 @@ impl<NTY: NumOps> TimeBinned for MinMaxAvgDim0Bins<NTY> {
self as &dyn TimeBinnableDyn
}
fn workaround_clone(&self) -> Box<dyn TimeBinned> {
// TODO remove
panic!()
fn edges_slice(&self) -> (&[u64], &[u64]) {
(&self.ts1s[..], &self.ts2s[..])
}
fn dummy_test_i32(&self) -> i32 {
// TODO remove
panic!()
fn counts(&self) -> &[u64] {
&self.counts[..]
}
fn mins(&self) -> Vec<f32> {
self.mins.iter().map(|x| x.clone().as_prim_f32()).collect()
}
fn maxs(&self) -> Vec<f32> {
self.maxs.iter().map(|x| x.clone().as_prim_f32()).collect()
}
fn avgs(&self) -> Vec<f32> {
self.avgs.clone()
}
}

View File

@@ -3,14 +3,14 @@ use crate::streams::{Collectable, Collector, ToJsonBytes, ToJsonResult};
use crate::waveevents::WaveEvents;
use crate::{
pulse_offs_from_abs, ts_offs_from_abs, Appendable, FilterFittingInside, Fits, FitsInside, FrameTypeStatic,
IsoDateTime, RangeOverlapInfo, ReadPbv, ReadableFromFile, Sitemty, SitemtyFrameType, SubFrId, TimeBinnableDyn,
TimeBinnableDynAggregator, TimeBinnableType, TimeBinnableTypeAggregator, TimeBinned, TimeBins, WithLen,
IsoDateTime, NewEmpty, RangeOverlapInfo, ReadPbv, ReadableFromFile, Sitemty, SitemtyFrameType, SubFrId,
TimeBinnableDyn, TimeBinnableType, TimeBinnableTypeAggregator, TimeBinned, TimeBins, WithLen,
};
use chrono::{TimeZone, Utc};
use err::Error;
use netpod::log::*;
use netpod::timeunits::SEC;
use netpod::NanoRange;
use netpod::{NanoRange, Shape};
use num_traits::Zero;
use serde::{Deserialize, Serialize};
use std::fmt;
@@ -155,6 +155,19 @@ impl<NTY> WithLen for MinMaxAvgDim1Bins<NTY> {
}
}
impl<NTY> NewEmpty for MinMaxAvgDim1Bins<NTY> {
fn empty(_shape: Shape) -> Self {
Self {
ts1s: Vec::new(),
ts2s: Vec::new(),
counts: Vec::new(),
mins: Vec::new(),
maxs: Vec::new(),
avgs: Vec::new(),
}
}
}
impl<NTY> Appendable for MinMaxAvgDim1Bins<NTY>
where
NTY: NumOps,
@@ -171,6 +184,15 @@ where
self.maxs.extend_from_slice(&src.maxs);
self.avgs.extend_from_slice(&src.avgs);
}
fn append_zero(&mut self, ts1: u64, ts2: u64) {
self.ts1s.push(ts1);
self.ts2s.push(ts2);
self.counts.push(0);
self.avgs.push(None);
self.mins.push(None);
self.maxs.push(None);
}
}
impl<NTY> ReadableFromFile for MinMaxAvgDim1Bins<NTY>
@@ -546,24 +568,30 @@ where
}
}
impl<NTY: NumOps> TimeBinnableDyn for MinMaxAvgDim1Bins<NTY> {
fn aggregator_new(&self) -> Box<dyn TimeBinnableDynAggregator> {
todo!()
}
}
impl<NTY: NumOps> crate::TimeBinnableDynStub for MinMaxAvgDim1Bins<NTY> {}
impl<NTY: NumOps> TimeBinned for MinMaxAvgDim1Bins<NTY> {
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn {
self as &dyn TimeBinnableDyn
}
fn workaround_clone(&self) -> Box<dyn TimeBinned> {
// TODO remove
panic!()
fn edges_slice(&self) -> (&[u64], &[u64]) {
(&self.ts1s[..], &self.ts2s[..])
}
fn dummy_test_i32(&self) -> i32 {
// TODO remove
panic!()
fn counts(&self) -> &[u64] {
&self.counts[..]
}
fn avgs(&self) -> Vec<f32> {
err::todoval()
}
fn mins(&self) -> Vec<f32> {
err::todoval()
}
fn maxs(&self) -> Vec<f32> {
err::todoval()
}
}

View File

@@ -94,6 +94,12 @@ impl Appendable for EventsItem {
},
}
}
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
// TODO can this implement Appendable in a sane way? Do we need it?
// TODO can we remove EventsItem?
err::todo();
}
}
impl PushableIndex for EventsItem {

View File

@@ -13,15 +13,10 @@ pub fn make_frame<FT>(item: &FT) -> Result<BytesMut, Error>
where
FT: FrameType + Serialize,
{
//trace!("make_frame");
if item.is_err() {
make_error_frame(item.err().unwrap())
} else {
make_frame_2(
item,
//FT::FRAME_TYPE_ID
item.frame_type_id(),
)
make_frame_2(item, item.frame_type_id())
}
}
@@ -29,12 +24,13 @@ pub fn make_frame_2<FT>(item: &FT, fty: u32) -> Result<BytesMut, Error>
where
FT: erased_serde::Serialize,
{
//trace!("make_frame_2");
trace!("make_frame_2 fty {:x}", fty);
let mut out = vec![];
use bincode::Options;
let opts = bincode::DefaultOptions::new()
//.with_fixint_encoding()
//.allow_trailing_bytes()
;
.with_little_endian()
.with_fixint_encoding()
.allow_trailing_bytes();
let mut ser = bincode::Serializer::new(&mut out, opts);
//let mut ser = serde_json::Serializer::new(std::io::stdout());
let mut ser2 = <dyn erased_serde::Serializer>::erase(&mut ser);
@@ -136,20 +132,41 @@ where
if frame.tyid() == ERROR_FRAME_TYPE_ID {
let k: ::err::Error = match bincode::deserialize(frame.buf()) {
Ok(item) => item,
Err(e) => Err(e)?,
Err(e) => {
error!(
"ERROR bincode::deserialize len {} ERROR_FRAME_TYPE_ID",
frame.buf().len()
);
let n = frame.buf().len().min(64);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
}
};
Ok(T::from_error(k))
} else {
let tyid = T::FRAME_TYPE_ID;
if frame.tyid() != tyid {
return Err(Error::with_msg(format!(
"type id mismatch expect {:x} found {:?}",
tyid, frame
"type id mismatch expect {:x} found {:x} {:?}",
tyid,
frame.tyid(),
frame
)));
}
match bincode::deserialize(frame.buf()) {
Ok(item) => Ok(item),
Err(e) => Err(e)?,
Err(e) => {
error!(
"ERROR bincode::deserialize len {} tyid {:x}",
frame.buf().len(),
frame.tyid()
);
let n = frame.buf().len().min(64);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
}
}
}
}

View File

@@ -18,15 +18,15 @@ use crate::numops::BoolNum;
use bytes::BytesMut;
use chrono::{TimeZone, Utc};
use err::Error;
use frame::make_error_frame;
#[allow(unused)]
use netpod::log::*;
use netpod::timeunits::{MS, SEC};
use netpod::{log::Level, AggKind, EventDataReadStats, EventQueryJsonStringFrame, NanoRange, Shape};
use netpod::{DiskStats, RangeFilterStats};
use netpod::{DiskStats, RangeFilterStats, ScalarType};
use numops::StringNum;
use serde::de::{self, DeserializeOwned, Visitor};
use serde::{Deserialize, Serialize, Serializer};
use std::any::Any;
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
@@ -39,15 +39,16 @@ pub const TERM_FRAME_TYPE_ID: u32 = 0x01;
pub const ERROR_FRAME_TYPE_ID: u32 = 0x02;
pub const EVENT_QUERY_JSON_STRING_FRAME: u32 = 0x100;
pub const EVENT_VALUES_FRAME_TYPE_ID: u32 = 0x500;
pub const WAVE_EVENTS_FRAME_TYPE_ID: u32 = 0x800;
pub const X_BINNED_SCALAR_EVENTS_FRAME_TYPE_ID: u32 = 0x8800;
pub const X_BINNED_WAVE_EVENTS_FRAME_TYPE_ID: u32 = 0x900;
pub const MIN_MAX_AVG_WAVE_BINS: u32 = 0xa00;
pub const MIN_MAX_AVG_DIM_0_BINS_FRAME_TYPE_ID: u32 = 0x700;
pub const MIN_MAX_AVG_DIM_1_BINS_FRAME_TYPE_ID: u32 = 0xb00;
pub const MIN_MAX_AVG_DIM_1_BINS_FRAME_TYPE_ID: u32 = 0x800;
pub const MIN_MAX_AVG_WAVE_BINS: u32 = 0xa00;
pub const WAVE_EVENTS_FRAME_TYPE_ID: u32 = 0xb00;
pub const NON_DATA_FRAME_TYPE_ID: u32 = 0xc00;
pub const EVENT_FULL_FRAME_TYPE_ID: u32 = 0x2200;
pub const EVENTS_ITEM_FRAME_TYPE_ID: u32 = 0x2300;
pub const STATS_EVENTS_FRAME_TYPE_ID: u32 = 0x2400;
pub const X_BINNED_SCALAR_EVENTS_FRAME_TYPE_ID: u32 = 0x8800;
pub const X_BINNED_WAVE_EVENTS_FRAME_TYPE_ID: u32 = 0x8900;
pub fn bool_is_false(j: &bool) -> bool {
*j == false
@@ -177,7 +178,7 @@ impl SubFrId for u32 {
}
impl SubFrId for u64 {
const SUB: u32 = 10;
const SUB: u32 = 0xa;
}
impl SubFrId for i8 {
@@ -197,19 +198,19 @@ impl SubFrId for i64 {
}
impl SubFrId for f32 {
const SUB: u32 = 11;
const SUB: u32 = 0xb;
}
impl SubFrId for f64 {
const SUB: u32 = 12;
const SUB: u32 = 0xc;
}
impl SubFrId for StringNum {
const SUB: u32 = 13;
const SUB: u32 = 0xd;
}
impl SubFrId for BoolNum {
const SUB: u32 = 14;
const SUB: u32 = 0xe;
}
// To be implemented by the data containers, i.e. the T's in Sitemty<T>, e.g. ScalarEvents.
@@ -243,9 +244,8 @@ impl FrameTypeStatic for EventQueryJsonStringFrame {
impl<T: FrameTypeStatic> FrameTypeStatic for Sitemty<T> {
const FRAME_TYPE_ID: u32 = <T as FrameTypeStatic>::FRAME_TYPE_ID;
fn from_error(_: err::Error) -> Self {
// TODO remove this method.
panic!()
fn from_error(e: err::Error) -> Self {
Err(e)
}
}
@@ -310,46 +310,45 @@ impl SitemtyFrameType for Box<dyn TimeBinned> {
}
}
impl SitemtyFrameType for Box<dyn EventsDyn> {
fn frame_type_id(&self) -> u32 {
self.as_time_binnable_dyn().frame_type_id()
}
}
// TODO do we need Send here?
pub trait Framable {
fn make_frame(&self) -> Result<BytesMut, Error>;
}
// erased_serde::Serialize
pub trait FramableInner: SitemtyFrameType + Send {
pub trait FramableInner: erased_serde::Serialize + SitemtyFrameType + Send {
fn _dummy(&self);
}
// erased_serde::Serialize`
impl<T: SitemtyFrameType + Send> FramableInner for T {
impl<T: erased_serde::Serialize + SitemtyFrameType + Send> FramableInner for T {
fn _dummy(&self) {}
}
//impl<T: SitemtyFrameType + Serialize + Send> FramableInner for Box<T> {}
erased_serde::serialize_trait_object!(EventsDyn);
erased_serde::serialize_trait_object!(TimeBinnableDyn);
erased_serde::serialize_trait_object!(TimeBinned);
// TODO need also Framable for those types defined in other crates.
// TODO not all T have FrameTypeStatic, e.g. Box<dyn TimeBinned>
impl<T> Framable for Sitemty<T>
//where
//Self: erased_serde::Serialize,
//T: FramableInner + FrameTypeStatic,
//T: Sized,
where
T: Sized + serde::Serialize + SitemtyFrameType,
{
fn make_frame(&self) -> Result<BytesMut, Error> {
todo!()
}
/*fn make_frame(&self) -> Result<BytesMut, Error> {
//trace!("make_frame");
match self {
Ok(_) => make_frame_2(
self,
//T::FRAME_TYPE_ID
self.frame_type_id(),
),
Err(e) => make_error_frame(e),
Ok(StreamItem::DataItem(RangeCompletableItem::Data(k))) => {
let frame_type_id = k.frame_type_id();
make_frame_2(self, frame_type_id)
}
_ => {
let frame_type_id = NON_DATA_FRAME_TYPE_ID;
make_frame_2(self, frame_type_id)
}
}
}*/
}
}
impl<T> Framable for Box<T>
@@ -421,6 +420,7 @@ pub trait ByteEstimate {
}
pub trait RangeOverlapInfo {
// TODO do not take by value.
fn ends_before(&self, range: NanoRange) -> bool;
fn ends_after(&self, range: NanoRange) -> bool;
fn starts_after(&self, range: NanoRange) -> bool;
@@ -439,9 +439,16 @@ pub trait PushableIndex {
fn push_index(&mut self, src: &Self, ix: usize);
}
pub trait NewEmpty {
fn empty(shape: Shape) -> Self;
}
pub trait Appendable: WithLen {
fn empty_like_self(&self) -> Self;
fn append(&mut self, src: &Self);
// TODO the `ts2` makes no sense for non-bin-implementors
fn append_zero(&mut self, ts1: u64, ts2: u64);
}
pub trait Clearable {
@@ -462,7 +469,15 @@ pub trait TimeBins: Send + Unpin + WithLen + Appendable + FilterFittingInside {
}
pub trait TimeBinnableType:
Send + Unpin + RangeOverlapInfo + FilterFittingInside + Appendable + Serialize + ReadableFromFile + FrameTypeStatic
Send
+ Unpin
+ RangeOverlapInfo
+ FilterFittingInside
+ NewEmpty
+ Appendable
+ Serialize
+ ReadableFromFile
+ FrameTypeStatic
{
type Output: TimeBinnableType;
type Aggregator: TimeBinnableTypeAggregator<Input = Self, Output = Self::Output> + Send + Unpin;
@@ -474,33 +489,81 @@ pub trait TimeBinnableType:
// TODO should not require Sync!
// TODO SitemtyFrameType is already supertrait of FramableInner.
pub trait TimeBinnableDyn: FramableInner + SitemtyFrameType + Sync + Send {
fn aggregator_new(&self) -> Box<dyn TimeBinnableDynAggregator>;
pub trait TimeBinnableDyn:
std::fmt::Debug + FramableInner + SitemtyFrameType + WithLen + RangeOverlapInfo + Any + Sync + Send + 'static
{
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Box<dyn TimeBinnerDyn>;
fn as_any(&self) -> &dyn Any;
}
pub trait TimeBinnableDynStub:
std::fmt::Debug + FramableInner + SitemtyFrameType + WithLen + RangeOverlapInfo + Any + Sync + Send + 'static
{
}
// impl for the stubs TODO: remove
impl<T> TimeBinnableDyn for T
where
T: TimeBinnableDynStub,
{
fn time_binner_new(&self, _edges: Vec<u64>, _do_time_weight: bool) -> Box<dyn TimeBinnerDyn> {
error!("TODO impl time_binner_new for T {}", std::any::type_name::<T>());
err::todoval()
}
fn as_any(&self) -> &dyn Any {
self as &dyn Any
}
}
// TODO maybe this is no longer needed:
pub trait TimeBinnableDynAggregator: Send {
fn ingest(&mut self, item: &dyn TimeBinnableDyn);
fn result(&mut self) -> Box<dyn TimeBinned>;
}
/// Container of some form of events, for use as trait object.
pub trait EventsDyn: TimeBinnableDyn {}
pub trait EventsDyn: TimeBinnableDyn {
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn;
}
/// Data in time-binned form.
pub trait TimeBinned: TimeBinnableDyn {
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn;
fn workaround_clone(&self) -> Box<dyn TimeBinned>;
fn dummy_test_i32(&self) -> i32;
fn edges_slice(&self) -> (&[u64], &[u64]);
fn counts(&self) -> &[u64];
fn mins(&self) -> Vec<f32>;
fn maxs(&self) -> Vec<f32>;
fn avgs(&self) -> Vec<f32>;
}
// TODO this impl is already covered by the generic one:
/*impl FramableInner for Box<dyn TimeBinned> {
fn _dummy(&self) {}
}*/
impl WithLen for Box<dyn TimeBinned> {
fn len(&self) -> usize {
self.as_time_binnable_dyn().len()
}
}
impl RangeOverlapInfo for Box<dyn TimeBinned> {
fn ends_before(&self, range: NanoRange) -> bool {
self.as_time_binnable_dyn().ends_before(range)
}
fn ends_after(&self, range: NanoRange) -> bool {
self.as_time_binnable_dyn().ends_after(range)
}
fn starts_after(&self, range: NanoRange) -> bool {
self.as_time_binnable_dyn().starts_after(range)
}
}
impl TimeBinnableDyn for Box<dyn TimeBinned> {
fn aggregator_new(&self) -> Box<dyn TimeBinnableDynAggregator> {
self.as_time_binnable_dyn().aggregator_new()
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Box<dyn TimeBinnerDyn> {
self.as_time_binnable_dyn().time_binner_new(edges, do_time_weight)
}
fn as_any(&self) -> &dyn Any {
self as &dyn Any
}
}
@@ -621,3 +684,119 @@ pub fn inspect_timestamps(events: &dyn TimestampInspectable, range: NanoRange) -
}
buf
}
pub trait TimeBinnerDyn: Send {
fn bins_ready_count(&self) -> usize;
fn bins_ready(&mut self) -> Option<Box<dyn TimeBinned>>;
fn ingest(&mut self, item: &dyn TimeBinnableDyn);
/// Caller indicates that there will be no more data for the current bin.
/// Implementor is expected to prepare processing the next bin.
/// The next call to `Self::bins_ready_count` must return one higher count than before.
fn cycle(&mut self);
}
pub fn empty_events_dyn(scalar_type: &ScalarType, shape: &Shape, agg_kind: &AggKind) -> Box<dyn TimeBinnableDyn> {
match shape {
Shape::Scalar => match agg_kind {
AggKind::TimeWeightedScalar => {
use ScalarType::*;
type K<T> = scalarevents::ScalarEvents<T>;
match scalar_type {
U8 => Box::new(K::<u8>::empty()),
U16 => Box::new(K::<u16>::empty()),
U32 => Box::new(K::<u32>::empty()),
U64 => Box::new(K::<u64>::empty()),
I8 => Box::new(K::<i8>::empty()),
I16 => Box::new(K::<i16>::empty()),
I32 => Box::new(K::<i32>::empty()),
I64 => Box::new(K::<i64>::empty()),
F32 => Box::new(K::<f32>::empty()),
F64 => Box::new(K::<f64>::empty()),
_ => err::todoval(),
}
}
_ => err::todoval(),
},
Shape::Wave(_n) => match agg_kind {
AggKind::DimXBins1 => {
use ScalarType::*;
type K<T> = waveevents::WaveEvents<T>;
match scalar_type {
U8 => Box::new(K::<u8>::empty()),
F32 => Box::new(K::<f32>::empty()),
F64 => Box::new(K::<f64>::empty()),
_ => err::todoval(),
}
}
_ => err::todoval(),
},
Shape::Image(..) => err::todoval(),
}
}
pub fn empty_binned_dyn(scalar_type: &ScalarType, shape: &Shape, agg_kind: &AggKind) -> Box<dyn TimeBinnableDyn> {
match shape {
Shape::Scalar => match agg_kind {
AggKind::TimeWeightedScalar => {
use ScalarType::*;
type K<T> = binsdim0::MinMaxAvgDim0Bins<T>;
match scalar_type {
U8 => Box::new(K::<u8>::empty()),
U16 => Box::new(K::<u16>::empty()),
U32 => Box::new(K::<u32>::empty()),
U64 => Box::new(K::<u64>::empty()),
I8 => Box::new(K::<i8>::empty()),
I16 => Box::new(K::<i16>::empty()),
I32 => Box::new(K::<i32>::empty()),
I64 => Box::new(K::<i64>::empty()),
F32 => Box::new(K::<f32>::empty()),
F64 => Box::new(K::<f64>::empty()),
_ => err::todoval(),
}
}
_ => err::todoval(),
},
Shape::Wave(_n) => match agg_kind {
AggKind::DimXBins1 => {
use ScalarType::*;
type K<T> = binsdim0::MinMaxAvgDim0Bins<T>;
match scalar_type {
U8 => Box::new(K::<u8>::empty()),
F32 => Box::new(K::<f32>::empty()),
F64 => Box::new(K::<f64>::empty()),
_ => err::todoval(),
}
}
_ => err::todoval(),
},
Shape::Image(..) => err::todoval(),
}
}
#[test]
fn bin_binned_01() {
use binsdim0::MinMaxAvgDim0Bins;
let edges = vec![SEC * 1000, SEC * 1010, SEC * 1020];
let inp0 = <MinMaxAvgDim0Bins<u32> as NewEmpty>::empty(Shape::Scalar);
let mut time_binner = inp0.time_binner_new(edges, true);
let inp1 = MinMaxAvgDim0Bins::<u32> {
ts1s: vec![SEC * 1000, SEC * 1010],
ts2s: vec![SEC * 1010, SEC * 1020],
counts: vec![1, 1],
mins: vec![3, 4],
maxs: vec![10, 9],
avgs: vec![7., 6.],
};
assert_eq!(time_binner.bins_ready_count(), 0);
time_binner.ingest(&inp1);
assert_eq!(time_binner.bins_ready_count(), 1);
time_binner.cycle();
assert_eq!(time_binner.bins_ready_count(), 2);
time_binner.cycle();
//assert_eq!(time_binner.bins_ready_count(), 2);
let bins = time_binner.bins_ready().expect("bins should be ready");
eprintln!("bins: {:?}", bins);
assert_eq!(bins.counts().len(), 2);
assert_eq!(time_binner.bins_ready_count(), 0);
}

View File

@@ -115,6 +115,7 @@ pub trait NumOps:
+ AsPrimF32
+ Send
+ Sync
+ 'static
+ Unpin
+ Debug
+ Zero

View File

@@ -50,6 +50,11 @@ impl Appendable for ScalarPlainEvents {
}
})
}
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
// TODO can this implement Appendable in a sane way? Do we need it?
err::todo();
}
}
impl PushableIndex for ScalarPlainEvents {
@@ -157,6 +162,11 @@ impl Appendable for WavePlainEvents {
_ => panic!(),
} })
}
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
// TODO can this implement Appendable in a sane way? Do we need it?
err::todo();
}
}
impl PushableIndex for WavePlainEvents {
@@ -253,6 +263,11 @@ impl Appendable for PlainEvents {
},
}
}
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
// TODO can this implement Appendable in a sane way? Do we need it?
err::todo();
}
}
impl PushableIndex for PlainEvents {

View File

@@ -3,13 +3,16 @@ use crate::numops::NumOps;
use crate::streams::{Collectable, Collector};
use crate::{
pulse_offs_from_abs, ts_offs_from_abs, Appendable, ByteEstimate, Clearable, EventAppendable, EventsDyn,
FilterFittingInside, Fits, FitsInside, FrameTypeStatic, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile,
SitemtyFrameType, TimeBinnableDyn, TimeBinnableType, TimeBinnableTypeAggregator, WithLen, WithTimestamps,
FilterFittingInside, Fits, FitsInside, FrameTypeStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv,
ReadableFromFile, SitemtyFrameType, TimeBinnableDyn, TimeBinnableType, TimeBinnableTypeAggregator, TimeBinnerDyn,
WithLen, WithTimestamps,
};
use err::Error;
use netpod::log::*;
use netpod::NanoRange;
use netpod::{NanoRange, Shape};
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::collections::VecDeque;
use std::fmt;
use tokio::fs::File;
@@ -195,6 +198,16 @@ where
}
}
impl<NTY> NewEmpty for ScalarEvents<NTY> {
fn empty(_shape: Shape) -> Self {
Self {
tss: Vec::new(),
pulses: Vec::new(),
values: Vec::new(),
}
}
}
impl<NTY> Appendable for ScalarEvents<NTY>
where
NTY: NumOps,
@@ -206,6 +219,12 @@ where
fn append(&mut self, src: &Self) {
self.extend_from_slice(src);
}
fn append_zero(&mut self, ts1: u64, _ts2: u64) {
self.tss.push(ts1);
self.pulses.push(0);
self.values.push(NTY::zero());
}
}
impl<NTY> Clearable for ScalarEvents<NTY> {
@@ -335,14 +354,26 @@ where
pub struct EventValuesAggregator<NTY> {
range: NanoRange,
count: u64,
min: Option<NTY>,
max: Option<NTY>,
min: NTY,
max: NTY,
sumc: u64,
sum: f32,
int_ts: u64,
last_ts: u64,
last_val: Option<NTY>,
do_time_weight: bool,
events_taken_count: u64,
events_ignored_count: u64,
}
impl<NTY> Drop for EventValuesAggregator<NTY> {
fn drop(&mut self) {
// TODO collect as stats for the request context:
warn!(
"taken {} ignored {}",
self.events_taken_count, self.events_ignored_count
);
}
}
impl<NTY> EventValuesAggregator<NTY>
@@ -354,39 +385,32 @@ where
Self {
range,
count: 0,
min: None,
max: None,
min: NTY::zero(),
max: NTY::zero(),
sum: 0f32,
sumc: 0,
int_ts,
last_ts: 0,
last_val: None,
do_time_weight,
events_taken_count: 0,
events_ignored_count: 0,
}
}
// TODO reduce clone.. optimize via more traits to factor the trade-offs?
fn apply_min_max(&mut self, val: NTY) {
self.min = match &self.min {
None => Some(val.clone()),
Some(min) => {
if &val < min {
Some(val.clone())
} else {
Some(min.clone())
}
if self.count == 0 {
self.min = val.clone();
self.max = val;
} else {
if val < self.min {
self.min = val.clone();
}
};
self.max = match &self.max {
None => Some(val),
Some(max) => {
if &val > max {
Some(val)
} else {
Some(max.clone())
}
if val > self.max {
self.max = val;
}
};
}
}
fn apply_event_unweight(&mut self, val: NTY) {
@@ -428,10 +452,14 @@ where
let ts = item.tss[i1];
let val = item.values[i1].clone();
if ts < self.range.beg {
self.events_ignored_count += 1;
} else if ts >= self.range.end {
self.events_ignored_count += 1;
return;
} else {
self.apply_event_unweight(val);
self.count += 1;
self.events_taken_count += 1;
}
}
}
@@ -441,11 +469,11 @@ where
let ts = item.tss[i1];
let val = item.values[i1].clone();
if ts < self.int_ts {
debug!("just set int_ts");
self.events_ignored_count += 1;
self.last_ts = ts;
self.last_val = Some(val);
} else if ts >= self.range.end {
debug!("after range");
self.events_ignored_count += 1;
return;
} else {
debug!("regular");
@@ -453,15 +481,16 @@ where
self.count += 1;
self.last_ts = ts;
self.last_val = Some(val);
self.events_taken_count += 1;
}
}
}
fn result_reset_unweight(&mut self, range: NanoRange, _expand: bool) -> MinMaxAvgDim0Bins<NTY> {
let avg = if self.sumc == 0 {
None
0f32
} else {
Some(self.sum / self.sumc as f32)
self.sum / self.sumc as f32
};
let ret = MinMaxAvgDim0Bins {
ts1s: vec![self.range.beg],
@@ -474,8 +503,8 @@ where
self.int_ts = range.beg;
self.range = range;
self.count = 0;
self.min = None;
self.max = None;
self.min = NTY::zero();
self.max = NTY::zero();
self.sum = 0f32;
self.sumc = 0;
ret
@@ -491,7 +520,7 @@ where
}
let avg = {
let sc = self.range.delta() as f32 * 1e-9;
Some(self.sum / sc)
self.sum / sc
};
let ret = MinMaxAvgDim0Bins {
ts1s: vec![self.range.beg],
@@ -504,8 +533,8 @@ where
self.int_ts = range.beg;
self.range = range;
self.count = 0;
self.min = None;
self.max = None;
self.min = NTY::zero();
self.max = NTY::zero();
self.sum = 0f32;
self.sumc = 0;
ret
@@ -555,10 +584,162 @@ where
}
}
impl<NTY: NumOps> TimeBinnableDyn for ScalarEvents<NTY> {
fn aggregator_new(&self) -> Box<dyn crate::TimeBinnableDynAggregator> {
todo!()
impl<NTY: NumOps + 'static> TimeBinnableDyn for ScalarEvents<NTY> {
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Box<dyn TimeBinnerDyn> {
eprintln!("ScalarEvents time_binner_new");
info!("ScalarEvents time_binner_new");
let ret = ScalarEventsTimeBinner::<NTY>::new(edges.into(), do_time_weight);
Box::new(ret)
}
fn as_any(&self) -> &dyn Any {
self as &dyn Any
}
}
impl<NTY: NumOps> EventsDyn for ScalarEvents<NTY> {}
impl<NTY: NumOps + 'static> EventsDyn for ScalarEvents<NTY> {
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn {
self as &dyn TimeBinnableDyn
}
}
pub struct ScalarEventsTimeBinner<NTY: NumOps> {
edges: VecDeque<u64>,
do_time_weight: bool,
range: NanoRange,
agg: Option<EventValuesAggregator<NTY>>,
ready: Option<<EventValuesAggregator<NTY> as TimeBinnableTypeAggregator>::Output>,
}
impl<NTY: NumOps> ScalarEventsTimeBinner<NTY> {
fn new(edges: VecDeque<u64>, do_time_weight: bool) -> Self {
let range = if edges.len() >= 2 {
NanoRange {
beg: edges[0],
end: edges[1],
}
} else {
// Using a dummy for this case.
NanoRange { beg: 1, end: 2 }
};
Self {
edges,
do_time_weight,
range,
agg: None,
ready: None,
}
}
// Move the bin from the current aggregator (if any) to our output collection,
// and step forward in our bin list.
fn cycle(&mut self) {
// TODO expand should be derived from AggKind. Is it still required after all?
let expand = true;
if let Some(agg) = self.agg.as_mut() {
let mut h = agg.result_reset(self.range.clone(), expand);
match self.ready.as_mut() {
Some(fin) => {
fin.append(&mut h);
}
None => {
self.ready = Some(h);
}
}
} else {
let mut h = MinMaxAvgDim0Bins::<NTY>::empty();
h.append_zero(self.range.beg, self.range.end);
match self.ready.as_mut() {
Some(fin) => {
fin.append(&mut h);
}
None => {
self.ready = Some(h);
}
}
}
self.edges.pop_front();
if self.edges.len() >= 2 {
self.range = NanoRange {
beg: self.edges[0],
end: self.edges[1],
};
} else {
// Using a dummy for this case.
self.range = NanoRange { beg: 1, end: 2 };
}
}
}
impl<NTY: NumOps + 'static> TimeBinnerDyn for ScalarEventsTimeBinner<NTY> {
fn cycle(&mut self) {
Self::cycle(self)
}
fn ingest(&mut self, item: &dyn TimeBinnableDyn) {
if item.len() == 0 {
// Return already here, RangeOverlapInfo would not give much sense.
return;
}
if self.edges.len() < 2 {
warn!("TimeBinnerDyn for ScalarEventsTimeBinner no more bin in edges A");
return;
}
// TODO optimize by remembering at which event array index we have arrived.
// That needs modified interfaces which can take and yield the start and latest index.
loop {
while item.starts_after(self.range.clone()) {
self.cycle();
if self.edges.len() < 2 {
warn!("TimeBinnerDyn for ScalarEventsTimeBinner no more bin in edges B");
return;
}
}
if item.ends_before(self.range.clone()) {
return;
} else {
if self.edges.len() < 2 {
warn!("TimeBinnerDyn for ScalarEventsTimeBinner edge list exhausted");
return;
} else {
if self.agg.is_none() {
self.agg = Some(EventValuesAggregator::new(self.range.clone(), self.do_time_weight));
}
let agg = self.agg.as_mut().unwrap();
if let Some(item) = item
.as_any()
.downcast_ref::<<EventValuesAggregator<NTY> as TimeBinnableTypeAggregator>::Input>()
{
// TODO collect statistics associated with this request:
agg.ingest(item);
} else {
error!("not correct item type");
};
if item.ends_after(self.range.clone()) {
self.cycle();
if self.edges.len() < 2 {
warn!("TimeBinnerDyn for ScalarEventsTimeBinner no more bin in edges C");
return;
}
} else {
break;
}
}
}
}
}
fn bins_ready_count(&self) -> usize {
match &self.ready {
Some(k) => k.len(),
None => 0,
}
}
fn bins_ready(&mut self) -> Option<Box<dyn crate::TimeBinned>> {
match self.ready.take() {
Some(k) => Some(Box::new(k)),
None => None,
}
}
}

View File

@@ -1,12 +1,12 @@
use crate::streams::{Collectable, Collector};
use crate::{
ts_offs_from_abs, Appendable, ByteEstimate, Clearable, EventAppendable, FilterFittingInside, Fits, FitsInside,
FrameTypeStatic, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, SitemtyFrameType, TimeBinnableType,
TimeBinnableTypeAggregator, WithLen, WithTimestamps,
FrameTypeStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, SitemtyFrameType,
TimeBinnableType, TimeBinnableTypeAggregator, WithLen, WithTimestamps,
};
use err::Error;
use netpod::log::*;
use netpod::NanoRange;
use netpod::{NanoRange, Shape};
use serde::{Deserialize, Serialize};
use std::fmt;
use tokio::fs::File;
@@ -141,6 +141,15 @@ impl PushableIndex for StatsEvents {
}
}
impl NewEmpty for StatsEvents {
fn empty(_shape: Shape) -> Self {
Self {
tss: Vec::new(),
pulses: Vec::new(),
}
}
}
impl Appendable for StatsEvents {
fn empty_like_self(&self) -> Self {
Self::empty()
@@ -150,6 +159,11 @@ impl Appendable for StatsEvents {
self.tss.extend_from_slice(&src.tss);
self.pulses.extend_from_slice(&src.pulses);
}
fn append_zero(&mut self, ts1: u64, _ts2: u64) {
self.tss.push(ts1);
self.pulses.push(0);
}
}
impl Clearable for StatsEvents {

View File

@@ -4,8 +4,8 @@ use crate::xbinnedscalarevents::XBinnedScalarEvents;
use crate::xbinnedwaveevents::XBinnedWaveEvents;
use crate::{
Appendable, ByteEstimate, Clearable, EventAppendable, EventsDyn, EventsNodeProcessor, FilterFittingInside, Fits,
FitsInside, FrameTypeStatic, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, SitemtyFrameType, SubFrId,
TimeBinnableDyn, TimeBinnableType, TimeBinnableTypeAggregator, WithLen, WithTimestamps,
FitsInside, FrameTypeStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile,
SitemtyFrameType, SubFrId, TimeBinnableDyn, TimeBinnableType, TimeBinnableTypeAggregator, WithLen, WithTimestamps,
};
use err::Error;
use netpod::log::*;
@@ -161,6 +161,16 @@ where
}
}
impl<NTY> NewEmpty for WaveEvents<NTY> {
fn empty(_shape: Shape) -> Self {
Self {
tss: Vec::new(),
pulses: Vec::new(),
vals: Vec::new(),
}
}
}
impl<NTY> Appendable for WaveEvents<NTY>
where
NTY: NumOps,
@@ -173,6 +183,12 @@ where
self.tss.extend_from_slice(&src.tss);
self.vals.extend_from_slice(&src.vals);
}
fn append_zero(&mut self, ts1: u64, _ts2: u64) {
self.tss.push(ts1);
self.pulses.push(0);
self.vals.push(Vec::new());
}
}
impl<NTY> Clearable for WaveEvents<NTY> {
@@ -509,10 +525,10 @@ where
}
}
impl<NTY: NumOps> TimeBinnableDyn for WaveEvents<NTY> {
fn aggregator_new(&self) -> Box<dyn crate::TimeBinnableDynAggregator> {
todo!()
impl<NTY: NumOps> crate::TimeBinnableDynStub for WaveEvents<NTY> {}
impl<NTY: NumOps> EventsDyn for WaveEvents<NTY> {
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn {
self as &dyn TimeBinnableDyn
}
}
impl<NTY: NumOps> EventsDyn for WaveEvents<NTY> {}

View File

@@ -2,13 +2,13 @@ use crate::binsdim0::MinMaxAvgDim0Bins;
use crate::numops::NumOps;
use crate::streams::{Collectable, Collector};
use crate::{
ts_offs_from_abs, Appendable, ByteEstimate, Clearable, FilterFittingInside, Fits, FitsInside, PushableIndex,
RangeOverlapInfo, ReadPbv, ReadableFromFile, SitemtyFrameType, SubFrId, TimeBinnableType,
TimeBinnableTypeAggregator, WithLen, WithTimestamps, FrameTypeStatic,
ts_offs_from_abs, Appendable, ByteEstimate, Clearable, FilterFittingInside, Fits, FitsInside, FrameTypeStatic,
NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, SitemtyFrameType, SubFrId, TimeBinnableType,
TimeBinnableTypeAggregator, WithLen, WithTimestamps,
};
use err::Error;
use netpod::log::*;
use netpod::NanoRange;
use netpod::{NanoRange, Shape};
use serde::{Deserialize, Serialize};
use tokio::fs::File;
@@ -145,6 +145,17 @@ where
}
}
impl<NTY> NewEmpty for XBinnedScalarEvents<NTY> {
fn empty(_shape: Shape) -> Self {
Self {
tss: Vec::new(),
avgs: Vec::new(),
mins: Vec::new(),
maxs: Vec::new(),
}
}
}
impl<NTY> Appendable for XBinnedScalarEvents<NTY>
where
NTY: NumOps,
@@ -159,6 +170,13 @@ where
self.maxs.extend_from_slice(&src.maxs);
self.avgs.extend_from_slice(&src.avgs);
}
fn append_zero(&mut self, ts1: u64, _ts2: u64) {
self.tss.push(ts1);
self.mins.push(NTY::zero());
self.maxs.push(NTY::zero());
self.avgs.push(0.);
}
}
impl<NTY> Clearable for XBinnedScalarEvents<NTY> {
@@ -206,8 +224,8 @@ where
{
range: NanoRange,
count: u64,
min: Option<NTY>,
max: Option<NTY>,
min: NTY,
max: NTY,
sumc: u64,
sum: f32,
int_ts: u64,
@@ -227,8 +245,8 @@ where
int_ts: range.beg,
range,
count: 0,
min: None,
max: None,
min: NTY::zero(),
max: NTY::zero(),
sumc: 0,
sum: 0f32,
last_ts: 0,
@@ -240,26 +258,17 @@ where
}
fn apply_min_max(&mut self, min: NTY, max: NTY) {
self.min = match &self.min {
None => Some(min),
Some(cmin) => {
if &min < cmin {
Some(min)
} else {
Some(cmin.clone())
}
if self.count == 0 {
self.min = min;
self.max = max;
} else {
if min < self.min {
self.min = min;
}
};
self.max = match &self.max {
None => Some(max),
Some(cmax) => {
if &max > cmax {
Some(max)
} else {
Some(cmax.clone())
}
if max > self.max {
self.max = max;
}
};
}
}
fn apply_event_unweight(&mut self, avg: f32, min: NTY, max: NTY) {
@@ -330,9 +339,9 @@ where
fn result_reset_unweight(&mut self, range: NanoRange, _expand: bool) -> MinMaxAvgDim0Bins<NTY> {
let avg = if self.sumc == 0 {
None
0f32
} else {
Some(self.sum / self.sumc as f32)
self.sum / self.sumc as f32
};
let ret = MinMaxAvgDim0Bins {
ts1s: vec![self.range.beg],
@@ -345,8 +354,8 @@ where
self.int_ts = range.beg;
self.range = range;
self.count = 0;
self.min = None;
self.max = None;
self.min = NTY::zero();
self.max = NTY::zero();
self.sum = 0f32;
self.sumc = 0;
ret
@@ -359,7 +368,7 @@ where
}
let avg = {
let sc = self.range.delta() as f32 * 1e-9;
Some(self.sum / sc)
self.sum / sc
};
let ret = MinMaxAvgDim0Bins {
ts1s: vec![self.range.beg],
@@ -372,8 +381,8 @@ where
self.int_ts = range.beg;
self.range = range;
self.count = 0;
self.min = None;
self.max = None;
self.min = NTY::zero();
self.max = NTY::zero();
self.sum = 0f32;
self.sumc = 0;
ret

View File

@@ -2,14 +2,14 @@ use crate::binsdim1::MinMaxAvgDim1Bins;
use crate::numops::NumOps;
use crate::streams::{Collectable, Collector};
use crate::{
Appendable, ByteEstimate, Clearable, FilterFittingInside, Fits, FitsInside, FrameTypeStatic, PushableIndex,
RangeOverlapInfo, ReadPbv, ReadableFromFile, SitemtyFrameType, SubFrId, TimeBinnableType,
Appendable, ByteEstimate, Clearable, FilterFittingInside, Fits, FitsInside, FrameTypeStatic, NewEmpty,
PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, SitemtyFrameType, SubFrId, TimeBinnableType,
TimeBinnableTypeAggregator, WithLen, WithTimestamps,
};
use err::Error;
use netpod::log::*;
use netpod::timeunits::*;
use netpod::NanoRange;
use netpod::{NanoRange, Shape};
use serde::{Deserialize, Serialize};
use std::mem;
use tokio::fs::File;
@@ -147,6 +147,17 @@ where
}
}
impl<NTY> NewEmpty for XBinnedWaveEvents<NTY> {
fn empty(_shape: Shape) -> Self {
Self {
tss: Vec::new(),
avgs: Vec::new(),
mins: Vec::new(),
maxs: Vec::new(),
}
}
}
impl<NTY> Appendable for XBinnedWaveEvents<NTY>
where
NTY: NumOps,
@@ -161,6 +172,13 @@ where
self.maxs.extend_from_slice(&src.maxs);
self.avgs.extend_from_slice(&src.avgs);
}
fn append_zero(&mut self, ts1: u64, _ts2: u64) {
self.tss.push(ts1);
self.mins.push(Vec::new());
self.maxs.push(Vec::new());
self.avgs.push(Vec::new());
}
}
impl<NTY> Clearable for XBinnedWaveEvents<NTY> {