Move workspace crates into subfolder

This commit is contained in:
Dominik Werder
2023-07-10 14:45:25 +02:00
parent 8938e55f86
commit 30c7fcb1e5
212 changed files with 246 additions and 41 deletions

View File

@@ -0,0 +1,253 @@
use crate::channelevents::ChannelEvents;
use crate::empty::empty_events_dyn_ev;
use crate::ChannelEventsInput;
use crate::Error;
use futures_util::Future;
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::collect_s::Collected;
use items_0::collect_s::Collector;
use items_0::collect_s::ToJsonResult;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
use items_0::timebin::TimeBinnable;
use items_0::timebin::TimeBinner;
use items_0::transform::EventTransform;
use netpod::log::*;
use netpod::BinnedRange;
use netpod::BinnedRangeEnum;
use netpod::ScalarType;
use netpod::Shape;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
use std::time::Duration;
use std::time::Instant;
fn flush_binned(
binner: &mut Box<dyn TimeBinner>,
coll: &mut Option<Box<dyn Collector>>,
force: bool,
) -> Result<(), Error> {
trace!("flush_binned bins_ready_count: {}", binner.bins_ready_count());
if force {
if binner.bins_ready_count() == 0 {
debug!("cycle the binner forced");
binner.cycle();
} else {
debug!("bins ready, do not force");
}
}
if binner.bins_ready_count() > 0 {
let ready = binner.bins_ready();
match ready {
Some(mut ready) => {
trace!("binned_collected ready {ready:?}");
if coll.is_none() {
*coll = Some(ready.as_collectable_mut().new_collector());
}
let cl = coll.as_mut().unwrap();
cl.ingest(ready.as_collectable_mut());
Ok(())
}
None => Err(format!("bins_ready_count but no result").into()),
}
} else {
Ok(())
}
}
#[derive(Debug)]
pub struct BinnedCollectedResult {
pub range_final: bool,
pub did_timeout: bool,
pub result: Box<dyn Collected>,
}
fn _old_binned_collected(
scalar_type: ScalarType,
shape: Shape,
binrange: BinnedRangeEnum,
transformer: &dyn EventTransform,
deadline: Instant,
inp: Pin<Box<dyn Stream<Item = Sitemty<ChannelEvents>> + Send>>,
) -> Result<BinnedCollectedResult, Error> {
event!(Level::TRACE, "binned_collected");
let transprops = transformer.query_transform_properties();
// TODO use a trait to allow check of unfinished data [hcn2956jxhwsf]
// TODO implement continue-at [hcn2956jxhwsf]
// TODO maybe TimeBinner should take all ChannelEvents and handle this?
let empty_item = empty_events_dyn_ev(&scalar_type, &shape)?;
let tmp_item = Ok(StreamItem::DataItem(RangeCompletableItem::Data(ChannelEvents::Events(
empty_item,
))));
let empty_stream = futures_util::stream::once(futures_util::future::ready(tmp_item));
let mut stream = empty_stream.chain(inp);
todo!()
}
enum BinnedCollectedState {
Init,
Run,
Done,
}
pub struct BinnedCollected {
state: BinnedCollectedState,
binrange: BinnedRangeEnum,
scalar_type: ScalarType,
shape: Shape,
do_time_weight: bool,
did_timeout: bool,
range_final: bool,
coll: Option<Box<dyn Collector>>,
binner: Option<Box<dyn TimeBinner>>,
inp: Pin<Box<dyn ChannelEventsInput>>,
}
impl BinnedCollected {
const fn self_name() -> &'static str {
"BinnedCollected"
}
pub fn new(
binrange: BinnedRangeEnum,
scalar_type: ScalarType,
shape: Shape,
do_time_weight: bool,
//transformer: &dyn Transformer,
deadline: Instant,
inp: Pin<Box<dyn ChannelEventsInput>>,
) -> Self {
Self {
state: BinnedCollectedState::Init,
binrange,
scalar_type,
shape,
do_time_weight,
did_timeout: false,
range_final: false,
coll: None,
binner: None,
inp,
}
}
fn handle_item(&mut self, item: StreamItem<RangeCompletableItem<ChannelEvents>>) -> Result<(), Error> {
match item {
StreamItem::DataItem(k) => match k {
RangeCompletableItem::RangeComplete => {
self.range_final = true;
}
RangeCompletableItem::Data(k) => match k {
ChannelEvents::Events(mut events) => {
if self.binner.is_none() {
let bb = events
.as_time_binnable_mut()
.time_binner_new(self.binrange.clone(), self.do_time_weight);
self.binner = Some(bb);
}
let binner = self.binner.as_mut().unwrap();
trace!("handle_item call binner.ingest");
binner.ingest(events.as_time_binnable_mut());
flush_binned(binner, &mut self.coll, false)?;
}
ChannelEvents::Status(item) => {
trace!("{:?}", item);
}
},
},
StreamItem::Log(item) => {
// TODO collect also errors here?
trace!("{:?}", item);
}
StreamItem::Stats(item) => {
// TODO do something with the stats
trace!("{:?}", item);
}
}
Ok(())
}
fn result(&mut self) -> Result<BinnedCollectedResult, Error> {
if let Some(mut binner) = self.binner.take() {
if self.range_final {
trace!("range_final");
binner.set_range_complete();
} else {
debug!("not range_final");
}
if self.did_timeout {
warn!("did_timeout");
} else {
trace!("not did_timeout");
binner.cycle();
}
flush_binned(&mut binner, &mut self.coll, false)?;
if self.coll.is_none() {
debug!("force a bin");
flush_binned(&mut binner, &mut self.coll, true)?;
} else {
trace!("coll is already some");
}
} else {
error!("no binner, should always have one");
}
let result = match self.coll.take() {
Some(mut coll) => {
let res = coll
.result(None, Some(self.binrange.clone()))
.map_err(|e| format!("{e}"))?;
res
}
None => {
error!("binned_collected nothing collected");
return Err(Error::from(format!("binned_collected nothing collected")));
}
};
let ret = BinnedCollectedResult {
range_final: self.range_final,
did_timeout: self.did_timeout,
result,
};
Ok(ret)
}
}
impl Future for BinnedCollected {
type Output = Result<BinnedCollectedResult, Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let span = span!(Level::INFO, BinnedCollected::self_name());
let _spg = span.enter();
use Poll::*;
loop {
break match &self.state {
BinnedCollectedState::Init => {
self.state = BinnedCollectedState::Run;
continue;
}
BinnedCollectedState::Run => match self.inp.poll_next_unpin(cx) {
Ready(Some(Ok(item))) => match self.handle_item(item) {
Ok(()) => continue,
Err(e) => {
self.state = BinnedCollectedState::Done;
Ready(Err(e))
}
},
Ready(Some(Err(e))) => {
self.state = BinnedCollectedState::Done;
Ready(Err(e.into()))
}
Ready(None) => {
self.state = BinnedCollectedState::Done;
Ready(self.result())
}
Pending => Pending,
},
BinnedCollectedState::Done => Ready(Err(Error::from(format!("already done")))),
};
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,873 @@
use crate::ts_offs_from_abs;
use crate::ts_offs_from_abs_with_anchor;
use crate::IsoDateTime;
use crate::RangeOverlapInfo;
use crate::TimeBinnableType;
use crate::TimeBinnableTypeAggregator;
use chrono::{TimeZone, Utc};
use err::Error;
use items_0::collect_s::Collectable;
use items_0::collect_s::CollectableType;
use items_0::collect_s::Collected;
use items_0::collect_s::CollectorType;
use items_0::collect_s::ToJsonResult;
use items_0::scalar_ops::AsPrimF32;
use items_0::scalar_ops::ScalarOps;
use items_0::timebin::TimeBinnable;
use items_0::timebin::TimeBinned;
use items_0::timebin::TimeBinner;
use items_0::timebin::TimeBins;
use items_0::AppendEmptyBin;
use items_0::AsAnyMut;
use items_0::AsAnyRef;
use items_0::Empty;
use items_0::Resettable;
use items_0::TypeName;
use items_0::WithLen;
use netpod::is_false;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::range::evrange::SeriesRange;
use netpod::timeunits::SEC;
use netpod::BinnedRangeEnum;
use netpod::CmpZero;
use netpod::Dim0Kind;
use serde::Deserialize;
use serde::Serialize;
use std::any;
use std::any::Any;
use std::collections::VecDeque;
use std::fmt;
use std::mem;
use std::ops::Range;
#[allow(unused)]
macro_rules! trace4 {
($($arg:tt)*) => ();
($($arg:tt)*) => (eprintln!($($arg)*));
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct BinsXbinDim0<NTY> {
ts1s: VecDeque<u64>,
ts2s: VecDeque<u64>,
counts: VecDeque<u64>,
mins: VecDeque<NTY>,
maxs: VecDeque<NTY>,
avgs: VecDeque<f32>,
// TODO could consider more variables:
// ts min/max, pulse min/max, avg of mins, avg of maxs, variances, etc...
dim0kind: Option<Dim0Kind>,
}
impl<STY> TypeName for BinsXbinDim0<STY> {
fn type_name(&self) -> String {
any::type_name::<Self>().into()
}
}
impl<NTY> fmt::Debug for BinsXbinDim0<NTY>
where
NTY: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let self_name = any::type_name::<Self>();
write!(
fmt,
"{self_name} count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}",
self.ts1s.len(),
self.ts1s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
self.ts2s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
self.counts,
self.mins,
self.maxs,
self.avgs,
)
}
}
impl<NTY: ScalarOps> BinsXbinDim0<NTY> {
pub fn from_content(
ts1s: VecDeque<u64>,
ts2s: VecDeque<u64>,
counts: VecDeque<u64>,
mins: VecDeque<NTY>,
maxs: VecDeque<NTY>,
avgs: VecDeque<f32>,
) -> Self {
Self {
ts1s,
ts2s,
counts,
mins,
maxs,
avgs,
dim0kind: None,
}
}
pub fn counts(&self) -> &VecDeque<u64> {
&self.counts
}
pub fn push(&mut self, ts1: u64, ts2: u64, count: u64, min: NTY, max: NTY, avg: f32) {
self.ts1s.push_back(ts1);
self.ts2s.push_back(ts2);
self.counts.push_back(count);
self.mins.push_back(min);
self.maxs.push_back(max);
self.avgs.push_back(avg);
}
pub fn append_zero(&mut self, beg: u64, end: u64) {
self.ts1s.push_back(beg);
self.ts2s.push_back(end);
self.counts.push_back(0);
self.mins.push_back(NTY::zero_b());
self.maxs.push_back(NTY::zero_b());
self.avgs.push_back(0.);
}
pub fn append_all_from(&mut self, src: &mut Self) {
self.ts1s.extend(src.ts1s.drain(..));
self.ts2s.extend(src.ts2s.drain(..));
self.counts.extend(src.counts.drain(..));
self.mins.extend(src.mins.drain(..));
self.maxs.extend(src.maxs.drain(..));
self.avgs.extend(src.avgs.drain(..));
}
pub fn equal_slack(&self, other: &Self) -> bool {
for (&a, &b) in self.ts1s.iter().zip(other.ts1s.iter()) {
if a != b {
return false;
}
}
for (&a, &b) in self.ts2s.iter().zip(other.ts2s.iter()) {
if a != b {
return false;
}
}
for (a, b) in self.mins.iter().zip(other.mins.iter()) {
if !a.equal_slack(b) {
return false;
}
}
for (a, b) in self.maxs.iter().zip(other.maxs.iter()) {
if !a.equal_slack(b) {
return false;
}
}
for (a, b) in self.avgs.iter().zip(other.avgs.iter()) {
if !a.equal_slack(b) {
return false;
}
}
true
}
}
impl<NTY> AsAnyRef for BinsXbinDim0<NTY>
where
NTY: ScalarOps,
{
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<STY> AsAnyMut for BinsXbinDim0<STY>
where
STY: ScalarOps,
{
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<STY> Empty for BinsXbinDim0<STY> {
fn empty() -> Self {
Self {
ts1s: VecDeque::new(),
ts2s: VecDeque::new(),
counts: VecDeque::new(),
mins: VecDeque::new(),
maxs: VecDeque::new(),
avgs: VecDeque::new(),
dim0kind: None,
}
}
}
impl<STY> WithLen for BinsXbinDim0<STY> {
fn len(&self) -> usize {
self.ts1s.len()
}
}
impl<STY> Resettable for BinsXbinDim0<STY> {
fn reset(&mut self) {
self.ts1s.clear();
self.ts2s.clear();
self.counts.clear();
self.mins.clear();
self.maxs.clear();
self.avgs.clear();
}
}
impl<NTY> RangeOverlapInfo for BinsXbinDim0<NTY> {
fn ends_before(&self, range: &SeriesRange) -> bool {
todo!()
}
fn ends_after(&self, range: &SeriesRange) -> bool {
todo!()
}
fn starts_after(&self, range: &SeriesRange) -> bool {
todo!()
}
}
impl<NTY: ScalarOps> AppendEmptyBin for BinsXbinDim0<NTY> {
fn append_empty_bin(&mut self, ts1: u64, ts2: u64) {
self.ts1s.push_back(ts1);
self.ts2s.push_back(ts2);
self.counts.push_back(0);
self.mins.push_back(NTY::zero_b());
self.maxs.push_back(NTY::zero_b());
self.avgs.push_back(0.);
}
}
impl<NTY: ScalarOps> TimeBins for BinsXbinDim0<NTY> {
fn ts_min(&self) -> Option<u64> {
self.ts1s.front().map(Clone::clone)
}
fn ts_max(&self) -> Option<u64> {
self.ts2s.back().map(Clone::clone)
}
fn ts_min_max(&self) -> Option<(u64, u64)> {
if let (Some(min), Some(max)) = (self.ts1s.front().map(Clone::clone), self.ts2s.back().map(Clone::clone)) {
Some((min, max))
} else {
None
}
}
}
impl<NTY: ScalarOps> TimeBinnableType for BinsXbinDim0<NTY> {
type Output = BinsXbinDim0<NTY>;
type Aggregator = BinsXbinDim0Aggregator<NTY>;
fn aggregator(range: SeriesRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator {
/*let self_name = any::type_name::<Self>();
debug!(
"TimeBinnableType for {self_name} aggregator() range {:?} x_bin_count {} do_time_weight {}",
range, x_bin_count, do_time_weight
);
Self::Aggregator::new(range, do_time_weight)*/
todo!()
}
}
// TODO rename to BinsDim0CollectorOutput
#[derive(Debug, Serialize, Deserialize)]
pub struct BinsXbinDim0CollectedResult<NTY> {
#[serde(rename = "tsAnchor")]
ts_anchor_sec: u64,
#[serde(rename = "ts1Ms")]
ts1_off_ms: VecDeque<u64>,
#[serde(rename = "ts2Ms")]
ts2_off_ms: VecDeque<u64>,
#[serde(rename = "ts1Ns")]
ts1_off_ns: VecDeque<u64>,
#[serde(rename = "ts2Ns")]
ts2_off_ns: VecDeque<u64>,
#[serde(rename = "counts")]
counts: VecDeque<u64>,
#[serde(rename = "mins")]
mins: VecDeque<NTY>,
#[serde(rename = "maxs")]
maxs: VecDeque<NTY>,
#[serde(rename = "avgs")]
avgs: VecDeque<f32>,
#[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
range_final: bool,
#[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
timed_out: bool,
#[serde(rename = "missingBins", default, skip_serializing_if = "CmpZero::is_zero")]
missing_bins: u32,
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
continue_at: Option<IsoDateTime>,
#[serde(rename = "finishedAt", default, skip_serializing_if = "Option::is_none")]
finished_at: Option<IsoDateTime>,
}
impl<NTY> AsAnyRef for BinsXbinDim0CollectedResult<NTY>
where
NTY: ScalarOps,
{
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<NTY> AsAnyMut for BinsXbinDim0CollectedResult<NTY>
where
NTY: ScalarOps,
{
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<NTY: ScalarOps> WithLen for BinsXbinDim0CollectedResult<NTY> {
fn len(&self) -> usize {
self.mins.len()
}
}
impl<NTY: ScalarOps> Collected for BinsXbinDim0CollectedResult<NTY> {}
impl<NTY> BinsXbinDim0CollectedResult<NTY> {
pub fn ts_anchor_sec(&self) -> u64 {
self.ts_anchor_sec
}
pub fn ts1_off_ms(&self) -> &VecDeque<u64> {
&self.ts1_off_ms
}
pub fn ts2_off_ms(&self) -> &VecDeque<u64> {
&self.ts2_off_ms
}
pub fn counts(&self) -> &VecDeque<u64> {
&self.counts
}
pub fn range_final(&self) -> bool {
self.range_final
}
pub fn missing_bins(&self) -> u32 {
self.missing_bins
}
pub fn continue_at(&self) -> Option<IsoDateTime> {
self.continue_at.clone()
}
pub fn mins(&self) -> &VecDeque<NTY> {
&self.mins
}
pub fn maxs(&self) -> &VecDeque<NTY> {
&self.maxs
}
}
impl<NTY: ScalarOps> ToJsonResult for BinsXbinDim0CollectedResult<NTY> {
fn to_json_result(&self) -> Result<Box<dyn items_0::collect_s::ToJsonBytes>, Error> {
let k = serde_json::to_value(self)?;
Ok(Box::new(k))
}
}
#[derive(Debug)]
pub struct BinsXbinDim0Collector<NTY> {
vals: BinsXbinDim0<NTY>,
timed_out: bool,
range_final: bool,
}
impl<NTY> BinsXbinDim0Collector<NTY> {
pub fn new() -> Self {
Self {
vals: BinsXbinDim0::empty(),
timed_out: false,
range_final: false,
}
}
}
impl<NTY> WithLen for BinsXbinDim0Collector<NTY> {
fn len(&self) -> usize {
self.vals.ts1s.len()
}
}
impl<NTY: ScalarOps> CollectorType for BinsXbinDim0Collector<NTY> {
type Input = BinsXbinDim0<NTY>;
type Output = BinsXbinDim0CollectedResult<NTY>;
fn ingest(&mut self, src: &mut Self::Input) {
trace!("\n\n----------- BinsXbinDim0Collector ingest\n{:?}\n\n", src);
// TODO could be optimized by non-contiguous container.
self.vals.ts1s.append(&mut src.ts1s);
self.vals.ts2s.append(&mut src.ts2s);
self.vals.counts.append(&mut src.counts);
self.vals.mins.append(&mut src.mins);
self.vals.maxs.append(&mut src.maxs);
self.vals.avgs.append(&mut src.avgs);
}
fn set_range_complete(&mut self) {
self.range_final = true;
}
fn set_timed_out(&mut self) {
self.timed_out = true;
}
fn result(
&mut self,
_range: std::option::Option<SeriesRange>,
binrange: Option<BinnedRangeEnum>,
) -> Result<Self::Output, Error> {
let bin_count_exp = if let Some(r) = &binrange {
r.bin_count() as u32
} else {
0
};
let bin_count = self.vals.ts1s.len() as u32;
let (missing_bins, continue_at, finished_at) = if self.range_final {
if bin_count < bin_count_exp {
match self.vals.ts2s.back() {
Some(&k) => {
let missing_bins = bin_count_exp - bin_count;
let continue_at = IsoDateTime(Utc.timestamp_nanos(k as i64));
let u = k + (k - self.vals.ts1s.back().unwrap()) * missing_bins as u64;
let finished_at = IsoDateTime(Utc.timestamp_nanos(u as i64));
(missing_bins, Some(continue_at), Some(finished_at))
}
None => {
warn!("can not determine continue-at parameters");
(0, None, None)
}
}
} else {
(0, None, None)
}
} else {
(0, None, None)
};
if self.vals.ts1s.as_slices().1.len() != 0 {
panic!();
}
if self.vals.ts2s.as_slices().1.len() != 0 {
panic!();
}
let tst1 = ts_offs_from_abs(self.vals.ts1s.as_slices().0);
let tst2 = ts_offs_from_abs_with_anchor(tst1.0, self.vals.ts2s.as_slices().0);
let counts = mem::replace(&mut self.vals.counts, VecDeque::new());
let mins = mem::replace(&mut self.vals.mins, VecDeque::new());
let maxs = mem::replace(&mut self.vals.maxs, VecDeque::new());
let avgs = mem::replace(&mut self.vals.avgs, VecDeque::new());
let ret = BinsXbinDim0CollectedResult::<NTY> {
ts_anchor_sec: tst1.0,
ts1_off_ms: tst1.1,
ts1_off_ns: tst1.2,
ts2_off_ms: tst2.0,
ts2_off_ns: tst2.1,
counts,
mins,
maxs,
avgs,
range_final: self.range_final,
timed_out: self.timed_out,
missing_bins,
continue_at,
finished_at,
};
Ok(ret)
}
}
impl<NTY: ScalarOps> CollectableType for BinsXbinDim0<NTY> {
type Collector = BinsXbinDim0Collector<NTY>;
fn new_collector() -> Self::Collector {
Self::Collector::new()
}
}
#[derive(Debug)]
pub struct BinsXbinDim0Aggregator<NTY> {
range: SeriesRange,
count: u64,
min: NTY,
max: NTY,
// Carry over to next bin:
avg: f32,
sumc: u64,
sum: f32,
}
impl<NTY: ScalarOps> BinsXbinDim0Aggregator<NTY> {
pub fn new(range: SeriesRange, _do_time_weight: bool) -> Self {
Self {
range,
count: 0,
min: NTY::zero_b(),
max: NTY::zero_b(),
avg: 0.,
sumc: 0,
sum: 0f32,
}
}
}
impl<NTY: ScalarOps> TimeBinnableTypeAggregator for BinsXbinDim0Aggregator<NTY> {
type Input = BinsXbinDim0<NTY>;
type Output = BinsXbinDim0<NTY>;
fn range(&self) -> &SeriesRange {
&self.range
}
fn ingest(&mut self, item: &Self::Input) {
/*for i1 in 0..item.ts1s.len() {
if item.counts[i1] == 0 {
} else if item.ts2s[i1] <= self.range.beg {
} else if item.ts1s[i1] >= self.range.end {
} else {
if self.count == 0 {
self.min = item.mins[i1].clone();
self.max = item.maxs[i1].clone();
} else {
if self.min > item.mins[i1] {
self.min = item.mins[i1].clone();
}
if self.max < item.maxs[i1] {
self.max = item.maxs[i1].clone();
}
}
self.count += item.counts[i1];
self.sum += item.avgs[i1];
self.sumc += 1;
}
}*/
todo!()
}
fn result_reset(&mut self, range: SeriesRange) -> Self::Output {
/*if self.sumc > 0 {
self.avg = self.sum / self.sumc as f32;
}
let ret = Self::Output {
ts1s: [self.range.beg].into(),
ts2s: [self.range.end].into(),
counts: [self.count].into(),
mins: [self.min.clone()].into(),
maxs: [self.max.clone()].into(),
avgs: [self.avg].into(),
};
self.range = range;
self.count = 0;
self.sum = 0f32;
self.sumc = 0;
ret*/
todo!()
}
}
impl<NTY: ScalarOps> TimeBinnable for BinsXbinDim0<NTY> {
fn time_binner_new(&self, binrange: BinnedRangeEnum, do_time_weight: bool) -> Box<dyn TimeBinner> {
let ret = BinsXbinDim0TimeBinner::<NTY>::new(binrange, do_time_weight);
Box::new(ret)
}
fn to_box_to_json_result(&self) -> Box<dyn ToJsonResult> {
let k = serde_json::to_value(self).unwrap();
Box::new(k)
}
}
#[derive(Debug)]
pub struct BinsXbinDim0TimeBinner<NTY: ScalarOps> {
binrange: BinnedRangeEnum,
do_time_weight: bool,
agg: Option<BinsXbinDim0Aggregator<NTY>>,
ready: Option<<BinsXbinDim0Aggregator<NTY> as TimeBinnableTypeAggregator>::Output>,
}
impl<NTY: ScalarOps> BinsXbinDim0TimeBinner<NTY> {
fn new(binrange: BinnedRangeEnum, do_time_weight: bool) -> Self {
Self {
binrange,
do_time_weight,
agg: None,
ready: None,
}
}
fn next_bin_range(&mut self) -> Option<NanoRange> {
/*if self.edges.len() >= 2 {
let ret = NanoRange {
beg: self.edges[0],
end: self.edges[1],
};
self.edges.pop_front();
Some(ret)
} else {
None
}
*/
todo!()
}
}
impl<NTY: ScalarOps> TimeBinner for BinsXbinDim0TimeBinner<NTY> {
fn ingest(&mut self, item: &mut dyn TimeBinnable) {
/*let self_name = std::any::type_name::<Self>();
if item.len() == 0 {
// Return already here, RangeOverlapInfo would not give much sense.
return;
}
if self.edges.len() < 2 {
warn!("TimeBinnerDyn for {self_name} no more bin in edges A\n{:?}\n\n", item);
return;
}
// TODO optimize by remembering at which event array index we have arrived.
// That needs modified interfaces which can take and yield the start and latest index.
loop {
while item.starts_after(NanoRange {
beg: 0,
end: self.edges[1],
}) {
self.cycle();
if self.edges.len() < 2 {
warn!("TimeBinnerDyn for {self_name} no more bin in edges B\n{:?}\n\n", item);
return;
}
}
if item.ends_before(NanoRange {
beg: self.edges[0],
end: u64::MAX,
}) {
return;
} else {
if self.edges.len() < 2 {
warn!("TimeBinnerDyn for {self_name} edge list exhausted");
return;
} else {
let agg = if let Some(agg) = self.agg.as_mut() {
agg
} else {
self.agg = Some(BinsXbinDim0Aggregator::new(
// We know here that we have enough edges for another bin.
// and `next_bin_range` will pop the first edge.
self.next_bin_range().unwrap(),
self.do_time_weight,
));
self.agg.as_mut().unwrap()
};
if let Some(item) = item
.as_any_ref()
// TODO make statically sure that we attempt to cast to the correct type here:
.downcast_ref::<<BinsXbinDim0Aggregator<NTY> as TimeBinnableTypeAggregator>::Input>()
{
agg.ingest(item);
} else {
let tyid_item = std::any::Any::type_id(item.as_any_ref());
error!("not correct item type {:?}", tyid_item);
};
if item.ends_after(agg.range().clone()) {
self.cycle();
if self.edges.len() < 2 {
warn!("TimeBinnerDyn for {self_name} no more bin in edges C\n{:?}\n\n", item);
return;
}
} else {
break;
}
}
}
}*/
todo!()
}
fn bins_ready_count(&self) -> usize {
match &self.ready {
Some(k) => k.len(),
None => 0,
}
}
fn bins_ready(&mut self) -> Option<Box<dyn TimeBinned>> {
match self.ready.take() {
Some(k) => Some(Box::new(k)),
None => None,
}
}
// TODO there is too much common code between implementors:
fn push_in_progress(&mut self, push_empty: bool) {
// TODO expand should be derived from AggKind. Is it still required after all?
/*let expand = true;
if let Some(agg) = self.agg.as_mut() {
let dummy_range = NanoRange { beg: 4, end: 5 };
let mut bins = agg.result_reset(dummy_range, expand);
self.agg = None;
assert_eq!(bins.len(), 1);
if push_empty || bins.counts[0] != 0 {
match self.ready.as_mut() {
Some(ready) => {
ready.append_all_from(&mut bins);
}
None => {
self.ready = Some(bins);
}
}
}
}*/
todo!()
}
// TODO there is too much common code between implementors:
fn cycle(&mut self) {
let n = self.bins_ready_count();
self.push_in_progress(true);
if self.bins_ready_count() == n {
if let Some(range) = self.next_bin_range() {
let mut bins = BinsXbinDim0::<NTY>::empty();
bins.append_zero(range.beg, range.end);
match self.ready.as_mut() {
Some(ready) => {
ready.append_all_from(&mut bins);
}
None => {
self.ready = Some(bins);
}
}
if self.bins_ready_count() <= n {
error!("failed to push a zero bin");
}
} else {
warn!("cycle: no in-progress bin pushed, but also no more bin to add as zero-bin");
}
}
}
fn set_range_complete(&mut self) {}
fn empty(&self) -> Box<dyn TimeBinned> {
let ret = <BinsXbinDim0Aggregator<NTY> as TimeBinnableTypeAggregator>::Output::empty();
Box::new(ret)
}
fn append_empty_until_end(&mut self) {
// TODO
todo!();
/*while self.rng.is_some() {
TimeBinnerCommonV0Func::push_in_progress(self, true);
}*/
}
}
impl<NTY: ScalarOps> TimeBinned for BinsXbinDim0<NTY> {
fn clone_box_time_binned(&self) -> Box<dyn TimeBinned> {
Box::new(self.clone())
}
fn as_time_binnable_ref(&self) -> &dyn TimeBinnable {
self
}
fn as_time_binnable_mut(&mut self) -> &mut dyn TimeBinnable {
self
}
fn edges_slice(&self) -> (&[u64], &[u64]) {
if self.ts1s.as_slices().1.len() != 0 {
panic!();
}
if self.ts2s.as_slices().1.len() != 0 {
panic!();
}
(&self.ts1s.as_slices().0, &self.ts2s.as_slices().0)
}
fn counts(&self) -> &[u64] {
// TODO check for contiguous
self.counts.as_slices().0
}
// TODO is Vec needed?
fn mins(&self) -> Vec<f32> {
self.mins.iter().map(|x| x.clone().as_prim_f32_b()).collect()
}
// TODO is Vec needed?
fn maxs(&self) -> Vec<f32> {
self.maxs.iter().map(|x| x.clone().as_prim_f32_b()).collect()
}
// TODO is Vec needed?
fn avgs(&self) -> Vec<f32> {
self.avgs.iter().map(Clone::clone).collect()
}
fn validate(&self) -> Result<(), String> {
use std::fmt::Write;
let mut msg = String::new();
if self.ts1s.len() != self.ts2s.len() {
write!(&mut msg, "ts1s ≠ ts2s\n").unwrap();
}
for (i, ((count, min), max)) in self.counts.iter().zip(&self.mins).zip(&self.maxs).enumerate() {
if min.as_prim_f32_b() < 1. && *count != 0 {
write!(&mut msg, "i {} count {} min {:?} max {:?}\n", i, count, min, max).unwrap();
}
}
if msg.is_empty() {
Ok(())
} else {
Err(msg)
}
}
fn as_collectable_mut(&mut self) -> &mut dyn Collectable {
self
}
fn empty_like_self_box_time_binned(&self) -> Box<dyn TimeBinned> {
Box::new(Self::empty())
}
fn to_simple_bins_f32(&mut self) -> Box<dyn TimeBinned> {
use mem::replace;
let ret = super::binsdim0::BinsDim0::<f32> {
ts1s: replace(&mut self.ts1s, VecDeque::new()),
ts2s: replace(&mut self.ts2s, VecDeque::new()),
counts: replace(&mut self.counts, VecDeque::new()),
mins: self.mins.iter().map(AsPrimF32::as_prim_f32_b).collect(),
maxs: self.maxs.iter().map(AsPrimF32::as_prim_f32_b).collect(),
avgs: replace(&mut self.avgs, VecDeque::new()),
dim0kind: None,
};
Box::new(ret)
}
fn drain_into_tb(&mut self, dst: &mut dyn TimeBinned, range: Range<usize>) -> Result<(), Error> {
// TODO as_any and as_any_mut are declared on unrelated traits. Simplify.
if let Some(dst) = dst.as_any_mut().downcast_mut::<Self>() {
// TODO make it harder to forget new members when the struct may get modified in the future
dst.ts1s.extend(self.ts1s.drain(range.clone()));
dst.ts2s.extend(self.ts2s.drain(range.clone()));
dst.counts.extend(self.counts.drain(range.clone()));
dst.mins.extend(self.mins.drain(range.clone()));
dst.maxs.extend(self.maxs.drain(range.clone()));
dst.avgs.extend(self.avgs.drain(range.clone()));
Ok(())
} else {
let type_name = any::type_name::<Self>();
error!("downcast to {} FAILED", type_name);
Err(Error::with_msg_no_trace(format!("downcast to {} FAILED", type_name)))
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
use crate::eventsdim0::EventsDim0;
use crate::eventsdim1::EventsDim1;
use crate::Error;
use items_0::Empty;
use items_0::Events;
use netpod::log::*;
use netpod::ScalarType;
use netpod::Shape;
pub fn empty_events_dyn_ev(scalar_type: &ScalarType, shape: &Shape) -> Result<Box<dyn Events>, Error> {
let ret: Box<dyn Events> = match shape {
Shape::Scalar => {
use ScalarType::*;
type K<T> = EventsDim0<T>;
match scalar_type {
U8 => Box::new(K::<u8>::empty()),
U16 => Box::new(K::<u16>::empty()),
U32 => Box::new(K::<u32>::empty()),
U64 => Box::new(K::<u64>::empty()),
I8 => Box::new(K::<i8>::empty()),
I16 => Box::new(K::<i16>::empty()),
I32 => Box::new(K::<i32>::empty()),
I64 => Box::new(K::<i64>::empty()),
F32 => Box::new(K::<f32>::empty()),
F64 => Box::new(K::<f64>::empty()),
BOOL => Box::new(K::<bool>::empty()),
STRING => Box::new(K::<String>::empty()),
}
}
Shape::Wave(..) => {
use ScalarType::*;
type K<T> = EventsDim1<T>;
match scalar_type {
U8 => Box::new(K::<u8>::empty()),
U16 => Box::new(K::<u16>::empty()),
U32 => Box::new(K::<u32>::empty()),
U64 => Box::new(K::<u64>::empty()),
I8 => Box::new(K::<i8>::empty()),
I16 => Box::new(K::<i16>::empty()),
I32 => Box::new(K::<i32>::empty()),
I64 => Box::new(K::<i64>::empty()),
F32 => Box::new(K::<f32>::empty()),
F64 => Box::new(K::<f64>::empty()),
BOOL => Box::new(K::<bool>::empty()),
STRING => Box::new(K::<String>::empty()),
}
}
Shape::Image(..) => {
error!("TODO empty_events_dyn_ev {scalar_type:?} {shape:?}");
err::todoval()
}
};
Ok(ret)
}

View File

@@ -0,0 +1,215 @@
use crate::framable::FrameType;
use crate::merger::Mergeable;
use bytes::BytesMut;
use items_0::container::ByteEstimate;
use items_0::framable::FrameTypeInnerStatic;
use items_0::streamitem::EVENT_FULL_FRAME_TYPE_ID;
use items_0::Empty;
use items_0::MergeError;
use items_0::WithLen;
use netpod::ScalarType;
use netpod::Shape;
use parse::channelconfig::CompressionMethod;
use serde::Deserialize;
use serde::Deserializer;
use serde::Serialize;
use serde::Serializer;
use std::collections::VecDeque;
#[derive(Debug, Serialize, Deserialize)]
pub struct EventFull {
pub tss: VecDeque<u64>,
pub pulses: VecDeque<u64>,
pub blobs: VecDeque<Option<Vec<u8>>>,
//#[serde(with = "decomps_serde")]
// TODO allow access to `decomps` via method which checks first if `blobs` is already the decomp.
pub decomps: VecDeque<Option<Vec<u8>>>,
pub scalar_types: VecDeque<ScalarType>,
pub be: VecDeque<bool>,
pub shapes: VecDeque<Shape>,
pub comps: VecDeque<Option<CompressionMethod>>,
pub entry_payload_max: u64,
}
#[allow(unused)]
mod decomps_serde {
use super::*;
pub fn serialize<S>(t: &VecDeque<Option<BytesMut>>, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let a: Vec<_> = t
.iter()
.map(|k| match k {
None => None,
Some(j) => Some(j[..].to_vec()),
})
.collect();
Serialize::serialize(&a, s)
}
pub fn deserialize<'de, D>(d: D) -> Result<VecDeque<Option<BytesMut>>, D::Error>
where
D: Deserializer<'de>,
{
let a: Vec<Option<Vec<u8>>> = Deserialize::deserialize(d)?;
let a = a
.iter()
.map(|k| match k {
None => None,
Some(j) => {
let mut a = BytesMut::new();
a.extend_from_slice(&j);
Some(a)
}
})
.collect();
Ok(a)
}
}
impl EventFull {
pub fn add_event(
&mut self,
ts: u64,
pulse: u64,
blob: Option<Vec<u8>>,
decomp: Option<Vec<u8>>,
scalar_type: ScalarType,
be: bool,
shape: Shape,
comp: Option<CompressionMethod>,
) {
let m1 = blob.as_ref().map_or(0, |x| x.len());
let m2 = decomp.as_ref().map_or(0, |x| x.len());
self.entry_payload_max = self.entry_payload_max.max(m1 as u64 + m2 as u64);
self.tss.push_back(ts);
self.pulses.push_back(pulse);
self.blobs.push_back(blob);
self.decomps.push_back(decomp);
self.scalar_types.push_back(scalar_type);
self.be.push_back(be);
self.shapes.push_back(shape);
self.comps.push_back(comp);
}
// TODO possible to get rid of this?
pub fn truncate_ts(&mut self, end: u64) {
let mut nkeep = usize::MAX;
for (i, &ts) in self.tss.iter().enumerate() {
if ts >= end {
nkeep = i;
break;
}
}
self.tss.truncate(nkeep);
self.pulses.truncate(nkeep);
self.blobs.truncate(nkeep);
self.decomps.truncate(nkeep);
self.scalar_types.truncate(nkeep);
self.be.truncate(nkeep);
self.shapes.truncate(nkeep);
self.comps.truncate(nkeep);
}
}
impl FrameTypeInnerStatic for EventFull {
const FRAME_TYPE_ID: u32 = EVENT_FULL_FRAME_TYPE_ID;
}
impl FrameType for EventFull {
fn frame_type_id(&self) -> u32 {
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
}
}
impl Empty for EventFull {
fn empty() -> Self {
Self {
tss: VecDeque::new(),
pulses: VecDeque::new(),
blobs: VecDeque::new(),
decomps: VecDeque::new(),
scalar_types: VecDeque::new(),
be: VecDeque::new(),
shapes: VecDeque::new(),
comps: VecDeque::new(),
entry_payload_max: 0,
}
}
}
impl WithLen for EventFull {
fn len(&self) -> usize {
self.tss.len()
}
}
impl ByteEstimate for EventFull {
fn byte_estimate(&self) -> u64 {
self.len() as u64 * (64 + self.entry_payload_max)
}
}
impl Mergeable for EventFull {
fn ts_min(&self) -> Option<u64> {
self.tss.front().map(|&x| x)
}
fn ts_max(&self) -> Option<u64> {
self.tss.back().map(|&x| x)
}
fn new_empty(&self) -> Self {
Empty::empty()
}
fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError> {
// TODO make it harder to forget new members when the struct may get modified in the future
let r = range.0..range.1;
let mut max = dst.entry_payload_max;
for i in r.clone() {
let m1 = self.blobs[i].as_ref().map_or(0, |x| x.len());
let m2 = self.decomps[i].as_ref().map_or(0, |x| x.len());
max = max.max(m1 as u64 + m2 as u64);
}
dst.entry_payload_max = max;
dst.tss.extend(self.tss.drain(r.clone()));
dst.pulses.extend(self.pulses.drain(r.clone()));
dst.blobs.extend(self.blobs.drain(r.clone()));
dst.decomps.extend(self.decomps.drain(r.clone()));
dst.scalar_types.extend(self.scalar_types.drain(r.clone()));
dst.be.extend(self.be.drain(r.clone()));
dst.shapes.extend(self.shapes.drain(r.clone()));
dst.comps.extend(self.comps.drain(r.clone()));
Ok(())
}
fn find_lowest_index_gt(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate() {
if m > ts {
return Some(i);
}
}
None
}
fn find_lowest_index_ge(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate() {
if m >= ts {
return Some(i);
}
}
None
}
fn find_highest_index_lt(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate().rev() {
if m < ts {
return Some(i);
}
}
None
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,194 @@
use crate::frame::make_error_frame;
use crate::frame::make_frame_2;
use crate::frame::make_log_frame;
use crate::frame::make_range_complete_frame;
use crate::frame::make_stats_frame;
use bytes::BytesMut;
use err::Error;
use items_0::framable::FrameTypeInnerDyn;
use items_0::framable::FrameTypeInnerStatic;
use items_0::streamitem::LogItem;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StatsItem;
use items_0::streamitem::StreamItem;
use items_0::streamitem::ERROR_FRAME_TYPE_ID;
use items_0::streamitem::EVENT_QUERY_JSON_STRING_FRAME;
use items_0::streamitem::SITEMTY_NONSPEC_FRAME_TYPE_ID;
use items_0::Events;
use serde::de::DeserializeOwned;
use serde::Deserialize;
use serde::Serialize;
pub const INMEM_FRAME_ENCID: u32 = 0x12121212;
pub const INMEM_FRAME_HEAD: usize = 20;
pub const INMEM_FRAME_FOOT: usize = 4;
pub const INMEM_FRAME_MAGIC: u32 = 0xc6c3b73d;
pub trait FrameTypeStatic {
const FRAME_TYPE_ID: u32;
}
impl<T> FrameTypeStatic for Sitemty<T>
where
T: FrameTypeInnerStatic,
{
const FRAME_TYPE_ID: u32 = <T as FrameTypeInnerStatic>::FRAME_TYPE_ID;
}
// Framable trait objects need some inspection to handle the supposed-to-be common Err ser format:
// Meant to be implemented by Sitemty.
pub trait FrameType {
fn frame_type_id(&self) -> u32;
}
impl<T> FrameType for Box<T>
where
T: FrameType,
{
fn frame_type_id(&self) -> u32 {
self.as_ref().frame_type_id()
}
}
impl FrameType for Box<dyn Events> {
fn frame_type_id(&self) -> u32 {
self.as_ref().frame_type_id()
}
}
pub trait Framable {
fn make_frame(&self) -> Result<BytesMut, Error>;
}
pub trait FramableInner: erased_serde::Serialize + FrameTypeInnerDyn + Send {
fn _dummy(&self);
}
impl<T: erased_serde::Serialize + FrameTypeInnerDyn + Send> FramableInner for T {
fn _dummy(&self) {}
}
impl<T> Framable for Sitemty<T>
where
T: Sized + serde::Serialize + FrameType,
{
fn make_frame(&self) -> Result<BytesMut, Error> {
match self {
Ok(StreamItem::DataItem(RangeCompletableItem::Data(k))) => {
let frame_type_id = k.frame_type_id();
make_frame_2(self, frame_type_id)
}
Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)) => make_range_complete_frame(),
Ok(StreamItem::Log(item)) => make_log_frame(item),
Ok(StreamItem::Stats(item)) => make_stats_frame(item),
Err(e) => make_error_frame(e),
}
}
}
impl<T> Framable for Box<T>
where
T: Framable + ?Sized,
{
fn make_frame(&self) -> Result<BytesMut, Error> {
self.as_ref().make_frame()
}
}
pub trait FrameDecodable: FrameTypeStatic + DeserializeOwned {
fn from_error(e: err::Error) -> Self;
fn from_log(item: LogItem) -> Self;
fn from_stats(item: StatsItem) -> Self;
fn from_range_complete() -> Self;
}
impl<T> FrameDecodable for Sitemty<T>
where
T: FrameTypeInnerStatic + DeserializeOwned,
{
fn from_error(e: err::Error) -> Self {
Err(e)
}
fn from_log(item: LogItem) -> Self {
Ok(StreamItem::Log(item))
}
fn from_stats(item: StatsItem) -> Self {
Ok(StreamItem::Stats(item))
}
fn from_range_complete() -> Self {
Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EventQueryJsonStringFrame(pub String);
impl EventQueryJsonStringFrame {
pub fn str(&self) -> &str {
&self.0
}
}
impl FrameTypeInnerStatic for EventQueryJsonStringFrame {
const FRAME_TYPE_ID: u32 = EVENT_QUERY_JSON_STRING_FRAME;
}
impl FrameType for EventQueryJsonStringFrame {
fn frame_type_id(&self) -> u32 {
EventQueryJsonStringFrame::FRAME_TYPE_ID
}
}
impl<T> FrameType for Sitemty<T>
where
T: FrameType,
{
fn frame_type_id(&self) -> u32 {
match self {
Ok(item) => match item {
StreamItem::DataItem(item) => match item {
RangeCompletableItem::RangeComplete => SITEMTY_NONSPEC_FRAME_TYPE_ID,
RangeCompletableItem::Data(item) => item.frame_type_id(),
},
StreamItem::Log(_) => SITEMTY_NONSPEC_FRAME_TYPE_ID,
StreamItem::Stats(_) => SITEMTY_NONSPEC_FRAME_TYPE_ID,
},
Err(_) => ERROR_FRAME_TYPE_ID,
}
}
}
#[test]
fn test_frame_log() {
use crate::channelevents::ChannelEvents;
use crate::frame::decode_from_slice;
use netpod::log::Level;
let item = LogItem {
node_ix: 123,
level: Level::TRACE,
msg: format!("test-log-message"),
};
let item: Sitemty<ChannelEvents> = Ok(StreamItem::Log(item));
let buf = Framable::make_frame(&item).unwrap();
let len = u32::from_le_bytes(buf[12..16].try_into().unwrap());
let item2: LogItem = decode_from_slice(&buf[20..20 + len as usize]).unwrap();
}
#[test]
fn test_frame_error() {
use crate::channelevents::ChannelEvents;
use crate::frame::decode_from_slice;
let item: Sitemty<ChannelEvents> = Err(Error::with_msg_no_trace(format!("dummy-error-message")));
let buf = Framable::make_frame(&item).unwrap();
let len = u32::from_le_bytes(buf[12..16].try_into().unwrap());
let tyid = u32::from_le_bytes(buf[8..12].try_into().unwrap());
if tyid != ERROR_FRAME_TYPE_ID {
panic!("bad tyid");
}
eprintln!("buf len {} len {}", buf.len(), len);
let item2: Error = decode_from_slice(&buf[20..20 + len as usize]).unwrap();
}

367
crates/items_2/src/frame.rs Normal file
View File

@@ -0,0 +1,367 @@
use crate::framable::FrameDecodable;
use crate::framable::INMEM_FRAME_ENCID;
use crate::framable::INMEM_FRAME_HEAD;
use crate::framable::INMEM_FRAME_MAGIC;
use crate::inmem::InMemoryFrame;
use bincode::config::FixintEncoding;
use bincode::config::LittleEndian;
use bincode::config::RejectTrailing;
use bincode::config::WithOtherEndian;
use bincode::config::WithOtherIntEncoding;
use bincode::config::WithOtherTrailing;
use bincode::DefaultOptions;
use bytes::BufMut;
use bytes::BytesMut;
use err::Error;
use items_0::bincode;
use items_0::streamitem::LogItem;
use items_0::streamitem::StatsItem;
use items_0::streamitem::ERROR_FRAME_TYPE_ID;
use items_0::streamitem::LOG_FRAME_TYPE_ID;
use items_0::streamitem::RANGE_COMPLETE_FRAME_TYPE_ID;
use items_0::streamitem::STATS_FRAME_TYPE_ID;
use items_0::streamitem::TERM_FRAME_TYPE_ID;
use netpod::log::*;
use serde::Serialize;
use std::any;
use std::io;
trait EC {
fn ec(self) -> err::Error;
}
impl EC for rmp_serde::encode::Error {
fn ec(self) -> err::Error {
err::Error::with_msg_no_trace(format!("{self:?}"))
}
}
impl EC for rmp_serde::decode::Error {
fn ec(self) -> err::Error {
err::Error::with_msg_no_trace(format!("{self:?}"))
}
}
pub fn bincode_ser<W>(
w: W,
) -> bincode::Serializer<
W,
WithOtherTrailing<
WithOtherIntEncoding<WithOtherEndian<DefaultOptions, LittleEndian>, FixintEncoding>,
RejectTrailing,
>,
>
where
W: io::Write,
{
use bincode::Options;
let opts = DefaultOptions::new()
.with_little_endian()
.with_fixint_encoding()
.reject_trailing_bytes();
let ser = bincode::Serializer::new(w, opts);
ser
}
pub fn bincode_to_vec<S>(item: S) -> Result<Vec<u8>, Error>
where
S: Serialize,
{
let mut out = Vec::new();
let mut ser = bincode_ser(&mut out);
item.serialize(&mut ser).map_err(|e| format!("{e}"))?;
Ok(out)
}
pub fn bincode_from_slice<T>(buf: &[u8]) -> Result<T, Error>
where
T: for<'de> serde::Deserialize<'de>,
{
use bincode::Options;
let opts = DefaultOptions::new()
.with_little_endian()
.with_fixint_encoding()
.reject_trailing_bytes();
let mut de = bincode::Deserializer::from_slice(buf, opts);
<T as serde::Deserialize>::deserialize(&mut de).map_err(|e| format!("{e}").into())
}
pub fn msgpack_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
where
T: Serialize,
{
rmp_serde::to_vec_named(&item).map_err(|e| format!("{e}").into())
}
pub fn msgpack_erased_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
where
T: erased_serde::Serialize,
{
let mut out = Vec::new();
let mut ser1 = rmp_serde::Serializer::new(&mut out).with_struct_map();
let mut ser2 = <dyn erased_serde::Serializer>::erase(&mut ser1);
item.erased_serialize(&mut ser2)
.map_err(|e| Error::from(format!("{e}")))?;
Ok(out)
}
pub fn msgpack_from_slice<T>(buf: &[u8]) -> Result<T, Error>
where
T: for<'de> serde::Deserialize<'de>,
{
rmp_serde::from_slice(buf).map_err(|e| format!("{e}").into())
}
pub fn encode_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
where
T: Serialize,
{
msgpack_to_vec(item)
}
pub fn encode_erased_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
where
T: erased_serde::Serialize,
{
msgpack_erased_to_vec(item)
}
pub fn decode_from_slice<T>(buf: &[u8]) -> Result<T, Error>
where
T: for<'de> serde::Deserialize<'de>,
{
msgpack_from_slice(buf)
}
pub fn make_frame_2<T>(item: T, fty: u32) -> Result<BytesMut, Error>
where
T: erased_serde::Serialize,
{
let enc = encode_erased_to_vec(item)?;
if enc.len() > u32::MAX as usize {
return Err(Error::with_msg(format!("too long payload {}", enc.len())));
}
let mut h = crc32fast::Hasher::new();
h.update(&enc);
let payload_crc = h.finalize();
// TODO reserve also for footer via constant
let mut buf = BytesMut::with_capacity(enc.len() + INMEM_FRAME_HEAD);
buf.put_u32_le(INMEM_FRAME_MAGIC);
buf.put_u32_le(INMEM_FRAME_ENCID);
buf.put_u32_le(fty);
buf.put_u32_le(enc.len() as u32);
buf.put_u32_le(payload_crc);
// TODO add padding to align to 8 bytes.
buf.put(enc.as_ref());
let mut h = crc32fast::Hasher::new();
h.update(&buf);
let frame_crc = h.finalize();
buf.put_u32_le(frame_crc);
return Ok(buf);
}
// TODO remove duplication for these similar `make_*_frame` functions:
pub fn make_error_frame(error: &err::Error) -> Result<BytesMut, Error> {
match encode_to_vec(error) {
Ok(enc) => {
let mut h = crc32fast::Hasher::new();
h.update(&enc);
let payload_crc = h.finalize();
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD);
buf.put_u32_le(INMEM_FRAME_MAGIC);
buf.put_u32_le(INMEM_FRAME_ENCID);
buf.put_u32_le(ERROR_FRAME_TYPE_ID);
buf.put_u32_le(enc.len() as u32);
buf.put_u32_le(payload_crc);
buf.put(enc.as_ref());
let mut h = crc32fast::Hasher::new();
h.update(&buf);
let frame_crc = h.finalize();
buf.put_u32_le(frame_crc);
Ok(buf)
}
Err(e) => Err(e)?,
}
}
pub fn make_log_frame(item: &LogItem) -> Result<BytesMut, Error> {
match encode_to_vec(item) {
Ok(enc) => {
let mut h = crc32fast::Hasher::new();
h.update(&enc);
let payload_crc = h.finalize();
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD);
buf.put_u32_le(INMEM_FRAME_MAGIC);
buf.put_u32_le(INMEM_FRAME_ENCID);
buf.put_u32_le(LOG_FRAME_TYPE_ID);
buf.put_u32_le(enc.len() as u32);
buf.put_u32_le(payload_crc);
buf.put(enc.as_ref());
let mut h = crc32fast::Hasher::new();
h.update(&buf);
let frame_crc = h.finalize();
buf.put_u32_le(frame_crc);
Ok(buf)
}
Err(e) => Err(e)?,
}
}
pub fn make_stats_frame(item: &StatsItem) -> Result<BytesMut, Error> {
match encode_to_vec(item) {
Ok(enc) => {
let mut h = crc32fast::Hasher::new();
h.update(&enc);
let payload_crc = h.finalize();
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD);
buf.put_u32_le(INMEM_FRAME_MAGIC);
buf.put_u32_le(INMEM_FRAME_ENCID);
buf.put_u32_le(STATS_FRAME_TYPE_ID);
buf.put_u32_le(enc.len() as u32);
buf.put_u32_le(payload_crc);
buf.put(enc.as_ref());
let mut h = crc32fast::Hasher::new();
h.update(&buf);
let frame_crc = h.finalize();
buf.put_u32_le(frame_crc);
Ok(buf)
}
Err(e) => Err(e)?,
}
}
pub fn make_range_complete_frame() -> Result<BytesMut, Error> {
let enc = [];
let mut h = crc32fast::Hasher::new();
h.update(&enc);
let payload_crc = h.finalize();
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD);
buf.put_u32_le(INMEM_FRAME_MAGIC);
buf.put_u32_le(INMEM_FRAME_ENCID);
buf.put_u32_le(RANGE_COMPLETE_FRAME_TYPE_ID);
buf.put_u32_le(enc.len() as u32);
buf.put_u32_le(payload_crc);
buf.put(enc.as_ref());
let mut h = crc32fast::Hasher::new();
h.update(&buf);
let frame_crc = h.finalize();
buf.put_u32_le(frame_crc);
Ok(buf)
}
pub fn make_term_frame() -> Result<BytesMut, Error> {
let enc = [];
let mut h = crc32fast::Hasher::new();
h.update(&enc);
let payload_crc = h.finalize();
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD);
buf.put_u32_le(INMEM_FRAME_MAGIC);
buf.put_u32_le(INMEM_FRAME_ENCID);
buf.put_u32_le(TERM_FRAME_TYPE_ID);
buf.put_u32_le(enc.len() as u32);
buf.put_u32_le(payload_crc);
buf.put(enc.as_ref());
let mut h = crc32fast::Hasher::new();
h.update(&buf);
let frame_crc = h.finalize();
buf.put_u32_le(frame_crc);
Ok(buf)
}
pub fn decode_frame<T>(frame: &InMemoryFrame) -> Result<T, Error>
where
T: FrameDecodable,
{
if frame.encid() != INMEM_FRAME_ENCID {
return Err(Error::with_msg(format!("unknown encoder id {:?}", frame)));
}
if frame.len() as usize != frame.buf().len() {
return Err(Error::with_msg(format!(
"buf mismatch {} vs {} in {:?}",
frame.len(),
frame.buf().len(),
frame
)));
}
if frame.tyid() == ERROR_FRAME_TYPE_ID {
let k: err::Error = match decode_from_slice(frame.buf()) {
Ok(item) => item,
Err(e) => {
error!(
"ERROR deserialize len {} ERROR_FRAME_TYPE_ID {}",
frame.buf().len(),
e
);
let n = frame.buf().len().min(256);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
}
};
Ok(T::from_error(k))
} else if frame.tyid() == LOG_FRAME_TYPE_ID {
let k: LogItem = match decode_from_slice(frame.buf()) {
Ok(item) => item,
Err(e) => {
error!("ERROR deserialize len {} LOG_FRAME_TYPE_ID {}", frame.buf().len(), e);
let n = frame.buf().len().min(128);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
}
};
Ok(T::from_log(k))
} else if frame.tyid() == STATS_FRAME_TYPE_ID {
let k: StatsItem = match decode_from_slice(frame.buf()) {
Ok(item) => item,
Err(e) => {
error!(
"ERROR deserialize len {} STATS_FRAME_TYPE_ID {}",
frame.buf().len(),
e
);
let n = frame.buf().len().min(128);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
}
};
Ok(T::from_stats(k))
} else if frame.tyid() == RANGE_COMPLETE_FRAME_TYPE_ID {
// There is currently no content in this variant.
Ok(T::from_range_complete())
} else {
let tyid = T::FRAME_TYPE_ID;
if frame.tyid() != tyid {
Err(Error::with_msg(format!(
"type id mismatch expect {:x} found {:x} {:?}",
tyid,
frame.tyid(),
frame
)))
} else {
match decode_from_slice(frame.buf()) {
Ok(item) => Ok(item),
Err(e) => {
error!("decode_frame T = {}", any::type_name::<T>());
error!("ERROR deserialize len {} tyid {:x}", frame.buf().len(), frame.tyid());
let n = frame.buf().len().min(64);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
}
}
}
}
}
pub fn crchex<T>(t: T) -> String
where
T: AsRef<[u8]>,
{
let mut h = crc32fast::Hasher::new();
h.update(t.as_ref());
let crc = h.finalize();
format!("{:08x}", crc)
}

View File

@@ -0,0 +1,34 @@
use bytes::Bytes;
use std::fmt;
pub struct InMemoryFrame {
pub encid: u32,
pub tyid: u32,
pub len: u32,
pub buf: Bytes,
}
impl InMemoryFrame {
pub fn encid(&self) -> u32 {
self.encid
}
pub fn tyid(&self) -> u32 {
self.tyid
}
pub fn len(&self) -> u32 {
self.len
}
pub fn buf(&self) -> &Bytes {
&self.buf
}
}
impl fmt::Debug for InMemoryFrame {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
fmt,
"InMemoryFrame {{ encid: {:x} tyid: {:x} len {} }}",
self.encid, self.tyid, self.len
)
}
}

View File

@@ -0,0 +1,215 @@
pub mod binnedcollected;
pub mod binsdim0;
pub mod binsxbindim0;
pub mod channelevents;
pub mod empty;
pub mod eventfull;
pub mod eventsdim0;
pub mod eventsdim1;
pub mod eventsxbindim0;
pub mod framable;
pub mod frame;
pub mod inmem;
pub mod merger;
pub mod streams;
#[cfg(test)]
pub mod test;
pub mod testgen;
pub mod timebin;
pub mod transform;
use channelevents::ChannelEvents;
use chrono::DateTime;
use chrono::TimeZone;
use chrono::Utc;
use futures_util::Stream;
use items_0::overlap::RangeOverlapInfo;
use items_0::streamitem::Sitemty;
use items_0::transform::EventTransform;
use items_0::Empty;
use items_0::Events;
use items_0::MergeError;
use merger::Mergeable;
use netpod::range::evrange::SeriesRange;
use netpod::timeunits::*;
use netpod::DATETIME_FMT_3MS;
use serde::Deserialize;
use serde::Serialize;
use serde::Serializer;
use std::collections::VecDeque;
use std::fmt;
pub fn ts_offs_from_abs(tss: &[u64]) -> (u64, VecDeque<u64>, VecDeque<u64>) {
let ts_anchor_sec = tss.first().map_or(0, |&k| k) / SEC;
let ts_anchor_ns = ts_anchor_sec * SEC;
let ts_off_ms: VecDeque<_> = tss.iter().map(|&k| (k - ts_anchor_ns) / MS).collect();
let ts_off_ns = tss
.iter()
.zip(ts_off_ms.iter().map(|&k| k * MS))
.map(|(&j, k)| (j - ts_anchor_ns - k))
.collect();
(ts_anchor_sec, ts_off_ms, ts_off_ns)
}
pub fn ts_offs_from_abs_with_anchor(ts_anchor_sec: u64, tss: &[u64]) -> (VecDeque<u64>, VecDeque<u64>) {
let ts_anchor_ns = ts_anchor_sec * SEC;
let ts_off_ms: VecDeque<_> = tss.iter().map(|&k| (k - ts_anchor_ns) / MS).collect();
let ts_off_ns = tss
.iter()
.zip(ts_off_ms.iter().map(|&k| k * MS))
.map(|(&j, k)| (j - ts_anchor_ns - k))
.collect();
(ts_off_ms, ts_off_ns)
}
pub fn pulse_offs_from_abs(pulse: &[u64]) -> (u64, VecDeque<u64>) {
let pulse_anchor = pulse.first().map_or(0, |&k| k) / 10000 * 10000;
let pulse_off = pulse.iter().map(|&k| k - pulse_anchor).collect();
(pulse_anchor, pulse_off)
}
#[derive(Debug, PartialEq)]
pub enum ErrorKind {
General,
#[allow(unused)]
MismatchedType,
}
// TODO stack error better
#[derive(Debug, PartialEq)]
pub struct Error {
#[allow(unused)]
kind: ErrorKind,
msg: Option<String>,
}
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{self:?}")
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Self {
Self { kind, msg: None }
}
}
impl From<String> for Error {
fn from(msg: String) -> Self {
Self {
msg: Some(msg),
kind: ErrorKind::General,
}
}
}
// TODO this discards structure
impl From<err::Error> for Error {
fn from(e: err::Error) -> Self {
Self {
msg: Some(format!("{e}")),
kind: ErrorKind::General,
}
}
}
// TODO this discards structure
impl From<Error> for err::Error {
fn from(e: Error) -> Self {
err::Error::with_msg_no_trace(format!("{e}"))
}
}
impl std::error::Error for Error {}
impl serde::de::Error for Error {
fn custom<T>(msg: T) -> Self
where
T: fmt::Display,
{
format!("{msg}").into()
}
}
#[derive(Clone, Debug, PartialEq, Deserialize)]
pub struct IsoDateTime(DateTime<Utc>);
impl IsoDateTime {
pub fn from_u64(ts: u64) -> Self {
IsoDateTime(Utc.timestamp_nanos(ts as i64))
}
}
impl Serialize for IsoDateTime {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.0.format(DATETIME_FMT_3MS).to_string())
}
}
pub fn make_iso_ts(tss: &[u64]) -> Vec<IsoDateTime> {
tss.iter()
.map(|&k| IsoDateTime(Utc.timestamp_nanos(k as i64)))
.collect()
}
impl Mergeable for Box<dyn Events> {
fn ts_min(&self) -> Option<u64> {
self.as_ref().ts_min()
}
fn ts_max(&self) -> Option<u64> {
self.as_ref().ts_max()
}
fn new_empty(&self) -> Self {
self.as_ref().new_empty_evs()
}
fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError> {
self.as_mut().drain_into_evs(dst, range)
}
fn find_lowest_index_gt(&self, ts: u64) -> Option<usize> {
self.as_ref().find_lowest_index_gt_evs(ts)
}
fn find_lowest_index_ge(&self, ts: u64) -> Option<usize> {
self.as_ref().find_lowest_index_ge_evs(ts)
}
fn find_highest_index_lt(&self, ts: u64) -> Option<usize> {
self.as_ref().find_highest_index_lt_evs(ts)
}
}
// TODO rename to `Typed`
pub trait TimeBinnableType: Send + Unpin + RangeOverlapInfo + Empty {
type Output: TimeBinnableType;
type Aggregator: TimeBinnableTypeAggregator<Input = Self, Output = Self::Output> + Send + Unpin;
fn aggregator(range: SeriesRange, bin_count: usize, do_time_weight: bool) -> Self::Aggregator;
}
pub trait TimeBinnableTypeAggregator: Send {
type Input: TimeBinnableType;
type Output: TimeBinnableType;
fn range(&self) -> &SeriesRange;
fn ingest(&mut self, item: &Self::Input);
fn result_reset(&mut self, range: SeriesRange) -> Self::Output;
}
pub trait ChannelEventsInput: Stream<Item = Sitemty<ChannelEvents>> + EventTransform + Send {}
impl<T> ChannelEventsInput for T where T: Stream<Item = Sitemty<ChannelEvents>> + EventTransform + Send {}
pub fn runfut<T, F>(fut: F) -> Result<T, err::Error>
where
F: std::future::Future<Output = Result<T, Error>>,
{
use futures_util::TryFutureExt;
let fut = fut.map_err(|e| e.into());
taskrun::run(fut)
}

View File

@@ -0,0 +1,487 @@
use crate::Error;
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::container::ByteEstimate;
use items_0::on_sitemty_data;
use items_0::streamitem::sitem_data;
use items_0::streamitem::LogItem;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
use items_0::transform::EventTransform;
use items_0::transform::TransformProperties;
use items_0::transform::WithTransformProperties;
use items_0::Events;
use items_0::MergeError;
use items_0::WithLen;
use netpod::log::*;
use std::collections::VecDeque;
use std::fmt;
use std::ops::ControlFlow;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
const OUT_MAX_BYTES: u64 = 1024 * 200;
const DO_DETECT_NON_MONO: bool = true;
#[allow(unused)]
macro_rules! trace2 {
($($arg:tt)*) => {};
($($arg:tt)*) => { trace!($($arg)*) };
}
#[allow(unused)]
macro_rules! trace3 {
($($arg:tt)*) => {};
($($arg:tt)*) => { trace!($($arg)*) };
}
#[allow(unused)]
macro_rules! trace4 {
($($arg:tt)*) => {};
($($arg:tt)*) => { trace!($($arg)*) };
}
pub trait Mergeable<Rhs = Self>: fmt::Debug + WithLen + ByteEstimate + Unpin {
fn ts_min(&self) -> Option<u64>;
fn ts_max(&self) -> Option<u64>;
fn new_empty(&self) -> Self;
// TODO when MergeError::Full gets returned, any guarantees about what has been modified or kept unchanged?
fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError>;
fn find_lowest_index_gt(&self, ts: u64) -> Option<usize>;
fn find_lowest_index_ge(&self, ts: u64) -> Option<usize>;
fn find_highest_index_lt(&self, ts: u64) -> Option<usize>;
}
type MergeInp<T> = Pin<Box<dyn Stream<Item = Sitemty<T>> + Send>>;
pub struct Merger<T> {
inps: Vec<Option<MergeInp<T>>>,
items: Vec<Option<T>>,
out: Option<T>,
do_clear_out: bool,
out_max_len: usize,
range_complete: Vec<bool>,
out_of_band_queue: VecDeque<Sitemty<T>>,
log_queue: VecDeque<LogItem>,
dim0ix_max: u64,
done_emit_first_empty: bool,
done_data: bool,
done_buffered: bool,
done_range_complete: bool,
complete: bool,
poll_count: usize,
}
impl<T> fmt::Debug for Merger<T>
where
T: Mergeable,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let inps: Vec<_> = self.inps.iter().map(|x| x.is_some()).collect();
fmt.debug_struct(std::any::type_name::<Self>())
.field("inps", &inps)
.field("items", &self.items)
.field("out_max_len", &self.out_max_len)
.field("range_complete", &self.range_complete)
.field("out_of_band_queue", &self.out_of_band_queue.len())
.field("done_data", &self.done_data)
.field("done_buffered", &self.done_buffered)
.field("done_range_complete", &self.done_range_complete)
.finish()
}
}
impl<T> Merger<T>
where
T: Mergeable,
{
pub fn new(inps: Vec<MergeInp<T>>, out_max_len: usize) -> Self {
let n = inps.len();
Self {
inps: inps.into_iter().map(|x| Some(x)).collect(),
items: (0..n).into_iter().map(|_| None).collect(),
out: None,
do_clear_out: false,
out_max_len,
range_complete: vec![false; n],
out_of_band_queue: VecDeque::new(),
log_queue: VecDeque::new(),
dim0ix_max: 0,
done_emit_first_empty: false,
done_data: false,
done_buffered: false,
done_range_complete: false,
complete: false,
poll_count: 0,
}
}
fn drain_into_upto(src: &mut T, dst: &mut T, upto: u64) -> Result<(), MergeError> {
match src.find_lowest_index_gt(upto) {
Some(ilgt) => {
src.drain_into(dst, (0, ilgt))?;
}
None => {
// TODO should not be here.
src.drain_into(dst, (0, src.len()))?;
}
}
Ok(())
}
fn take_into_output_all(&mut self, src: &mut T) -> Result<(), MergeError> {
// TODO optimize the case when some large batch should be added to some existing small batch already in out.
// TODO maybe use two output slots?
self.take_into_output_upto(src, u64::MAX)
}
fn take_into_output_upto(&mut self, src: &mut T, upto: u64) -> Result<(), MergeError> {
// TODO optimize the case when some large batch should be added to some existing small batch already in out.
// TODO maybe use two output slots?
if let Some(out) = self.out.as_mut() {
Self::drain_into_upto(src, out, upto)?;
} else {
trace2!("move into fresh");
let mut fresh = src.new_empty();
Self::drain_into_upto(src, &mut fresh, upto)?;
self.out = Some(fresh);
}
Ok(())
}
fn process(mut self: Pin<&mut Self>, _cx: &mut Context) -> Result<ControlFlow<()>, Error> {
use ControlFlow::*;
trace4!("process");
let mut log_items = Vec::new();
let mut tslows = [None, None];
for (i1, itemopt) in self.items.iter_mut().enumerate() {
if let Some(item) = itemopt {
if let Some(t1) = item.ts_min() {
if let Some((_, a)) = tslows[0] {
if t1 < a {
tslows[1] = tslows[0];
tslows[0] = Some((i1, t1));
} else {
if let Some((_, b)) = tslows[1] {
if t1 < b {
tslows[1] = Some((i1, t1));
} else {
// nothing to do
}
} else {
tslows[1] = Some((i1, t1));
}
}
} else {
tslows[0] = Some((i1, t1));
}
} else {
// the item seems empty.
// TODO count for stats.
trace2!("empty item, something to do here?");
*itemopt = None;
return Ok(Continue(()));
}
}
}
if DO_DETECT_NON_MONO {
if let Some((i1, t1)) = tslows[0].as_ref() {
if *t1 <= self.dim0ix_max {
self.dim0ix_max = *t1;
let item = LogItem {
node_ix: *i1 as _,
level: Level::INFO,
msg: format!(
"dim0ix_max {} vs {} diff {}",
self.dim0ix_max,
t1,
self.dim0ix_max - t1
),
};
log_items.push(item);
}
}
}
trace4!("tslows {tslows:?}");
if let Some((il0, _tl0)) = tslows[0] {
if let Some((_il1, tl1)) = tslows[1] {
// There is a second input, take only up to the second highest timestamp
let item = self.items[il0].as_mut().unwrap();
if let Some(th0) = item.ts_max() {
if th0 <= tl1 {
// Can take the whole item
// TODO gather stats about this case. Should be never for databuffer, and often for scylla.
let mut item = self.items[il0].take().unwrap();
trace3!("Take all from item {item:?}");
match self.take_into_output_all(&mut item) {
Ok(()) => Ok(Break(())),
Err(MergeError::Full) | Err(MergeError::NotCompatible) => {
// TODO count for stats
trace3!("Put item back");
self.items[il0] = Some(item);
self.do_clear_out = true;
Ok(Break(()))
}
}
} else {
// Take only up to the lowest ts of the second-lowest input
let mut item = self.items[il0].take().unwrap();
trace3!("Take up to {tl1} from item {item:?}");
let res = self.take_into_output_upto(&mut item, tl1);
match res {
Ok(()) => {
if item.len() == 0 {
// TODO should never be here because we should have taken the whole item
Err(format!("Should have taken the whole item instead").into())
} else {
self.items[il0] = Some(item);
Ok(Break(()))
}
}
Err(MergeError::Full) | Err(MergeError::NotCompatible) => {
// TODO count for stats
info!("Put item back because {res:?}");
self.items[il0] = Some(item);
self.do_clear_out = true;
Ok(Break(()))
}
}
}
} else {
// TODO should never be here because ts-max should always exist here.
Err(format!("selected input without max ts").into())
}
} else {
// No other input, take the whole item
let mut item = self.items[il0].take().unwrap();
trace3!("Take all from item (no other input) {item:?}");
match self.take_into_output_all(&mut item) {
Ok(()) => Ok(Break(())),
Err(_) => {
// TODO count for stats
trace3!("Put item back");
self.items[il0] = Some(item);
self.do_clear_out = true;
Ok(Break(()))
}
}
}
} else {
Err(format!("after low ts search nothing found").into())
}
}
fn refill(mut self: Pin<&mut Self>, cx: &mut Context) -> Result<Poll<()>, Error> {
trace4!("refill");
use Poll::*;
let mut has_pending = false;
for i in 0..self.inps.len() {
if self.items[i].is_none() {
while let Some(inp) = self.inps[i].as_mut() {
match inp.poll_next_unpin(cx) {
Ready(Some(Ok(k))) => match k {
StreamItem::DataItem(k) => match k {
RangeCompletableItem::Data(k) => {
if self.done_emit_first_empty == false {
trace!("emit first empty marker item");
self.done_emit_first_empty = true;
let item = k.new_empty();
let item = sitem_data(item);
self.out_of_band_queue.push_back(item);
}
self.items[i] = Some(k);
trace4!("refilled {}", i);
}
RangeCompletableItem::RangeComplete => {
self.range_complete[i] = true;
trace!("range_complete {:?}", self.range_complete);
continue;
}
},
StreamItem::Log(item) => {
// TODO limit queue length
self.out_of_band_queue.push_back(Ok(StreamItem::Log(item)));
continue;
}
StreamItem::Stats(item) => {
// TODO limit queue length
self.out_of_band_queue.push_back(Ok(StreamItem::Stats(item)));
continue;
}
},
Ready(Some(Err(e))) => {
self.inps[i] = None;
return Err(e.into());
}
Ready(None) => {
self.inps[i] = None;
}
Pending => {
has_pending = true;
}
}
break;
}
}
}
if has_pending {
Ok(Pending)
} else {
Ok(Ready(()))
}
}
fn poll3(mut self: Pin<&mut Self>, cx: &mut Context) -> ControlFlow<Poll<Option<Result<T, Error>>>> {
use ControlFlow::*;
use Poll::*;
trace4!("poll3");
#[allow(unused)]
let ninps = self.inps.iter().filter(|a| a.is_some()).count();
let nitems = self.items.iter().filter(|a| a.is_some()).count();
let nitemsmissing = self
.inps
.iter()
.zip(self.items.iter())
.filter(|(a, b)| a.is_some() && b.is_none())
.count();
trace3!("ninps {ninps} nitems {nitems} nitemsmissing {nitemsmissing}");
if nitemsmissing != 0 {
let e = Error::from(format!("missing but no pending"));
return Break(Ready(Some(Err(e))));
}
let last_emit = nitems == 0;
if nitems != 0 {
match Self::process(Pin::new(&mut self), cx) {
Ok(Break(())) => {}
Ok(Continue(())) => {}
Err(e) => return Break(Ready(Some(Err(e)))),
}
}
if let Some(o) = self.out.as_ref() {
if o.len() >= self.out_max_len || o.byte_estimate() >= OUT_MAX_BYTES || self.do_clear_out || last_emit {
if o.len() > self.out_max_len {
debug!("MERGER OVERWEIGHT ITEM {} vs {}", o.len(), self.out_max_len);
}
trace3!("decide to output");
self.do_clear_out = false;
//Break(Ready(Some(Ok(self.out.take().unwrap()))))
let item = sitem_data(self.out.take().unwrap());
self.out_of_band_queue.push_back(item);
Continue(())
} else {
trace4!("not enough output yet");
Continue(())
}
} else {
trace!("no output candidate");
if last_emit {
Break(Ready(None))
} else {
Continue(())
}
}
}
fn poll2(mut self: Pin<&mut Self>, cx: &mut Context) -> ControlFlow<Poll<Option<Result<T, Error>>>> {
use ControlFlow::*;
use Poll::*;
match Self::refill(Pin::new(&mut self), cx) {
Ok(Ready(())) => Self::poll3(self, cx),
Ok(Pending) => Break(Pending),
Err(e) => Break(Ready(Some(Err(e)))),
}
}
}
impl<T> Stream for Merger<T>
where
T: Mergeable,
{
type Item = Sitemty<T>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
self.poll_count += 1;
let span1 = span!(Level::INFO, "Merger", pc = self.poll_count);
let _spg = span1.enter();
loop {
trace3!("poll");
break if let Some(item) = self.log_queue.pop_front() {
Ready(Some(Ok(StreamItem::Log(item))))
} else if self.poll_count == usize::MAX {
self.done_range_complete = true;
continue;
} else if self.complete {
panic!("poll after complete");
} else if self.done_range_complete {
self.complete = true;
Ready(None)
} else if self.done_buffered {
self.done_range_complete = true;
if self.range_complete.iter().all(|x| *x) {
trace!("emit RangeComplete");
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))))
} else {
continue;
}
} else if self.done_data {
trace!("done_data");
self.done_buffered = true;
if let Some(out) = self.out.take() {
trace!("done_data emit buffered len {}", out.len());
Ready(Some(sitem_data(out)))
} else {
continue;
}
} else if let Some(item) = self.out_of_band_queue.pop_front() {
let item = on_sitemty_data!(item, |k: T| {
trace3!("emit out-of-band data len {}", k.len());
sitem_data(k)
});
Ready(Some(item))
} else {
match Self::poll2(self.as_mut(), cx) {
ControlFlow::Continue(()) => continue,
ControlFlow::Break(k) => match k {
Ready(Some(Ok(out))) => {
if true {
error!("THIS BRANCH SHOULD NO LONGER OCCUR, REFACTOR");
self.done_data = true;
let e = Error::from(format!("TODO refactor direct emit in merger"));
return Ready(Some(Err(e.into())));
}
trace!("emit buffered len {}", out.len());
Ready(Some(sitem_data(out)))
}
Ready(Some(Err(e))) => {
self.done_data = true;
Ready(Some(Err(e.into())))
}
Ready(None) => {
self.done_data = true;
continue;
}
Pending => Pending,
},
}
};
}
}
}
impl<T> WithTransformProperties for Merger<T> {
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl<T> EventTransform for Merger<T>
where
T: Send,
{
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
todo!()
}
}

View File

@@ -0,0 +1,359 @@
use futures_util::Future;
use futures_util::FutureExt;
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::collect_s::Collectable;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
use items_0::timebin::TimeBinnable;
use items_0::transform::CollectableStreamTrait;
use items_0::transform::EventStreamTrait;
use items_0::transform::EventTransform;
use items_0::transform::TimeBinnableStreamTrait;
use items_0::transform::TransformProperties;
use items_0::transform::WithTransformProperties;
use items_0::Events;
use std::collections::VecDeque;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
pub struct Enumerate2<T> {
inp: T,
cnt: usize,
}
impl<T> Enumerate2<T> {
pub fn new(inp: T) -> Self
where
T: EventTransform,
{
Self { inp, cnt: 0 }
}
}
impl<T> Stream for Enumerate2<T>
where
T: Stream + Unpin,
{
type Item = (usize, <T as Stream>::Item);
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
match self.inp.poll_next_unpin(cx) {
Ready(Some(item)) => {
let i = self.cnt;
self.cnt += 1;
Ready(Some((i, item)))
}
Ready(None) => Ready(None),
Pending => Pending,
}
}
}
impl<T> WithTransformProperties for Enumerate2<T>
where
T: WithTransformProperties,
{
fn query_transform_properties(&self) -> TransformProperties {
self.inp.query_transform_properties()
}
}
impl<T> EventTransform for Enumerate2<T>
where
T: WithTransformProperties + Send,
{
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
todo!()
}
}
pub struct Then2<T, F, Fut> {
inp: Pin<Box<T>>,
f: Pin<Box<F>>,
fut: Option<Pin<Box<Fut>>>,
}
impl<T, F, Fut> Then2<T, F, Fut>
where
T: Stream,
F: Fn(<T as Stream>::Item) -> Fut,
{
pub fn new(inp: T, f: F) -> Self
where
T: EventTransform,
{
Self {
inp: Box::pin(inp),
f: Box::pin(f),
fut: None,
}
}
fn prepare_fut(&mut self, item: <T as Stream>::Item) {
self.fut = Some(Box::pin((self.f)(item)));
}
}
/*impl<T, F, Fut> Unpin for Then2<T, F, Fut>
where
T: Unpin,
F: Unpin,
Fut: Unpin,
{
}*/
impl<T, F, Fut> Stream for Then2<T, F, Fut>
where
T: Stream,
F: Fn(<T as Stream>::Item) -> Fut,
Fut: Future,
{
type Item = <Fut as Future>::Output;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
loop {
break if let Some(fut) = self.fut.as_mut() {
match fut.poll_unpin(cx) {
Ready(item) => {
self.fut = None;
Ready(Some(item))
}
Pending => Pending,
}
} else {
match self.inp.poll_next_unpin(cx) {
Ready(Some(item)) => {
self.prepare_fut(item);
continue;
}
Ready(None) => Ready(None),
Pending => Pending,
}
};
}
}
}
impl<T, F, Fut> WithTransformProperties for Then2<T, F, Fut>
where
T: EventTransform,
{
fn query_transform_properties(&self) -> TransformProperties {
self.inp.query_transform_properties()
}
}
impl<T, F, Fut> EventTransform for Then2<T, F, Fut>
where
T: EventTransform + Send,
F: Send,
Fut: Send,
{
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
todo!()
}
}
pub trait TransformerExt {
fn enumerate2(self) -> Enumerate2<Self>
where
Self: EventTransform + Sized;
fn then2<F, Fut>(self, f: F) -> Then2<Self, F, Fut>
where
Self: EventTransform + Stream + Sized,
F: Fn(<Self as Stream>::Item) -> Fut,
Fut: Future;
}
impl<T> TransformerExt for T {
fn enumerate2(self) -> Enumerate2<Self>
where
Self: EventTransform + Sized,
{
Enumerate2::new(self)
}
fn then2<F, Fut>(self, f: F) -> Then2<Self, F, Fut>
where
Self: EventTransform + Stream + Sized,
F: Fn(<Self as Stream>::Item) -> Fut,
Fut: Future,
{
Then2::new(self, f)
}
}
pub struct VecStream<T> {
inp: VecDeque<T>,
}
impl<T> VecStream<T> {
pub fn new(inp: VecDeque<T>) -> Self {
Self { inp }
}
}
/*impl<T> Unpin for VecStream<T> where T: Unpin {}*/
impl<T> Stream for VecStream<T>
where
T: Unpin,
{
type Item = T;
fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
if let Some(item) = self.inp.pop_front() {
Ready(Some(item))
} else {
Ready(None)
}
}
}
impl<T> WithTransformProperties for VecStream<T> {
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl<T> EventTransform for VecStream<T>
where
T: Send,
{
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
todo!()
}
}
/// Wrap any event stream and provide transformation properties.
pub struct PlainEventStream<INP, T>
where
T: Events,
INP: Stream<Item = Sitemty<T>>,
{
inp: Pin<Box<INP>>,
}
impl<INP, T> PlainEventStream<INP, T>
where
T: Events,
INP: Stream<Item = Sitemty<T>>,
{
pub fn new(inp: INP) -> Self {
Self { inp: Box::pin(inp) }
}
}
impl<INP, T> Stream for PlainEventStream<INP, T>
where
T: Events,
INP: Stream<Item = Sitemty<T>>,
{
type Item = Sitemty<Box<dyn Events>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
match self.inp.poll_next_unpin(cx) {
Ready(Some(item)) => Ready(Some(match item {
Ok(item) => Ok(match item {
StreamItem::DataItem(item) => StreamItem::DataItem(match item {
RangeCompletableItem::RangeComplete => RangeCompletableItem::RangeComplete,
RangeCompletableItem::Data(item) => RangeCompletableItem::Data(Box::new(item)),
}),
StreamItem::Log(item) => StreamItem::Log(item),
StreamItem::Stats(item) => StreamItem::Stats(item),
}),
Err(e) => Err(e),
})),
Ready(None) => Ready(None),
Pending => Pending,
}
}
}
impl<INP, T> WithTransformProperties for PlainEventStream<INP, T>
where
T: Events,
INP: Stream<Item = Sitemty<T>>,
{
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl<INP, T> EventStreamTrait for PlainEventStream<INP, T>
where
T: Events,
INP: Stream<Item = Sitemty<T>> + Send,
{
}
/// Wrap any event stream and provide transformation properties.
pub struct PlainTimeBinnableStream<INP, T>
where
T: TimeBinnable,
INP: Stream<Item = Sitemty<T>> + Send,
{
inp: Pin<Box<INP>>,
}
impl<INP, T> PlainTimeBinnableStream<INP, T>
where
T: TimeBinnable,
INP: Stream<Item = Sitemty<T>> + Send,
{
pub fn new(inp: INP) -> Self {
Self { inp: Box::pin(inp) }
}
}
impl<INP, T> Stream for PlainTimeBinnableStream<INP, T>
where
T: TimeBinnable,
INP: Stream<Item = Sitemty<T>> + Send,
{
type Item = Sitemty<Box<dyn TimeBinnable>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
match self.inp.poll_next_unpin(cx) {
Ready(Some(item)) => Ready(Some(match item {
Ok(item) => Ok(match item {
StreamItem::DataItem(item) => StreamItem::DataItem(match item {
RangeCompletableItem::RangeComplete => RangeCompletableItem::RangeComplete,
RangeCompletableItem::Data(item) => RangeCompletableItem::Data(Box::new(item)),
}),
StreamItem::Log(item) => StreamItem::Log(item),
StreamItem::Stats(item) => StreamItem::Stats(item),
}),
Err(e) => Err(e),
})),
Ready(None) => Ready(None),
Pending => Pending,
}
}
}
impl<INP, T> WithTransformProperties for PlainTimeBinnableStream<INP, T>
where
T: TimeBinnable,
INP: Stream<Item = Sitemty<T>> + Send,
{
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl<INP, T> TimeBinnableStreamTrait for PlainTimeBinnableStream<INP, T>
where
T: TimeBinnable,
INP: Stream<Item = Sitemty<T>> + Send,
{
}

498
crates/items_2/src/test.rs Normal file
View File

@@ -0,0 +1,498 @@
#[cfg(test)]
pub mod eventsdim0;
use crate::binnedcollected::BinnedCollected;
use crate::binsdim0::BinsDim0CollectedResult;
use crate::channelevents::ConnStatus;
use crate::channelevents::ConnStatusEvent;
use crate::eventsdim0::EventsDim0;
use crate::merger::Mergeable;
use crate::merger::Merger;
use crate::runfut;
use crate::streams::TransformerExt;
use crate::streams::VecStream;
use crate::testgen::make_some_boxed_d0_f32;
use crate::ChannelEvents;
use crate::Error;
use crate::Events;
use crate::IsoDateTime;
use chrono::TimeZone;
use chrono::Utc;
use futures_util::stream;
use futures_util::StreamExt;
use items_0::streamitem::sitem_data;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
use items_0::Appendable;
use items_0::Empty;
use items_0::WithLen;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::range::evrange::SeriesRange;
use netpod::timeunits::*;
use netpod::AggKind;
use netpod::BinnedRange;
use netpod::BinnedRangeEnum;
use netpod::ScalarType;
use netpod::Shape;
use std::time::Duration;
use std::time::Instant;
#[test]
fn items_move_events() {
let evs = make_some_boxed_d0_f32(10, SEC, SEC, 0, 1846713782);
let v0 = ChannelEvents::Events(evs);
let mut v1 = v0.clone();
eprintln!("{v1:?}");
eprintln!("{}", v1.len());
let mut v2 = v1.new_empty();
match v1.find_lowest_index_gt(4) {
Some(ilgt) => {
v1.drain_into(&mut v2, (0, ilgt)).unwrap();
}
None => {
v1.drain_into(&mut v2, (0, v1.len())).unwrap();
}
}
eprintln!("{}", v1.len());
eprintln!("{}", v2.len());
match v1.find_lowest_index_gt(u64::MAX) {
Some(ilgt) => {
v1.drain_into(&mut v2, (0, ilgt)).unwrap();
}
None => {
v1.drain_into(&mut v2, (0, v1.len())).unwrap();
}
}
eprintln!("{}", v1.len());
eprintln!("{}", v2.len());
eprintln!("{v1:?}");
eprintln!("{v2:?}");
assert_eq!(v1.len(), 0);
assert_eq!(v2.len(), 10);
assert_eq!(v2, v0);
}
#[test]
fn items_merge_00() {
let fut = async {
use crate::merger::Merger;
let evs0 = make_some_boxed_d0_f32(10, SEC * 1, SEC * 2, 0, 1846713782);
let evs1 = make_some_boxed_d0_f32(10, SEC * 2, SEC * 2, 0, 828764893);
let v0 = ChannelEvents::Events(evs0);
let v1 = ChannelEvents::Events(evs1);
let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)]));
let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)]));
let mut merger = Merger::new(vec![stream0, stream1], 8);
while let Some(item) = merger.next().await {
eprintln!("{item:?}");
}
Ok(())
};
runfut(fut).unwrap();
}
#[test]
fn items_merge_01() {
let fut = async {
use crate::merger::Merger;
let evs0 = make_some_boxed_d0_f32(10, SEC * 1, SEC * 2, 0, 1846713782);
let evs1 = make_some_boxed_d0_f32(10, SEC * 2, SEC * 2, 0, 828764893);
let v0 = ChannelEvents::Events(evs0);
let v1 = ChannelEvents::Events(evs1);
let v2 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 100, ConnStatus::Connect)));
let v3 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2300, ConnStatus::Disconnect)));
let v4 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2800, ConnStatus::Connect)));
let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)]));
let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)]));
let stream2 = Box::pin(stream::iter(vec![sitem_data(v2), sitem_data(v3), sitem_data(v4)]));
let mut merger = Merger::new(vec![stream0, stream1, stream2], 8);
let mut total_event_count = 0;
while let Some(item) = merger.next().await {
eprintln!("{item:?}");
let item = item?;
match item {
StreamItem::DataItem(item) => match item {
RangeCompletableItem::RangeComplete => {}
RangeCompletableItem::Data(item) => {
total_event_count += item.len();
}
},
StreamItem::Log(_) => {}
StreamItem::Stats(_) => {}
}
}
assert_eq!(total_event_count, 23);
Ok(())
};
runfut(fut).unwrap();
}
#[test]
fn items_merge_02() {
let fut = async {
let evs0 = make_some_boxed_d0_f32(100, SEC * 1, SEC * 2, 0, 1846713782);
let evs1 = make_some_boxed_d0_f32(100, SEC * 2, SEC * 2, 0, 828764893);
let v0 = ChannelEvents::Events(evs0);
let v1 = ChannelEvents::Events(evs1);
let v2 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 100, ConnStatus::Connect)));
let v3 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2300, ConnStatus::Disconnect)));
let v4 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2800, ConnStatus::Connect)));
let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)]));
let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)]));
let stream2 = Box::pin(stream::iter(vec![sitem_data(v2), sitem_data(v3), sitem_data(v4)]));
let mut merger = Merger::new(vec![stream0, stream1, stream2], 8);
let mut total_event_count = 0;
while let Some(item) = merger.next().await {
eprintln!("{item:?}");
let item = item.unwrap();
match item {
StreamItem::DataItem(item) => match item {
RangeCompletableItem::RangeComplete => {}
RangeCompletableItem::Data(item) => {
total_event_count += item.len();
}
},
StreamItem::Log(_) => {}
StreamItem::Stats(_) => {}
}
}
assert_eq!(total_event_count, 203);
Ok(())
};
runfut(fut).unwrap();
}
#[test]
fn merge_00() {
let fut = async {
let mut events_vec1: Vec<Sitemty<ChannelEvents>> = Vec::new();
let mut events_vec2: Vec<Sitemty<ChannelEvents>> = Vec::new();
{
let mut events = EventsDim0::empty();
for i in 0..10 {
events.push(i * 100, i, i as f32 * 100.);
}
let cev = ChannelEvents::Events(Box::new(events.clone()));
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
let cev = ChannelEvents::Events(Box::new(events.clone()));
events_vec2.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
}
let inp1 = events_vec1;
let inp1 = futures_util::stream::iter(inp1);
let inp1 = Box::pin(inp1);
let inp2: Vec<Sitemty<ChannelEvents>> = Vec::new();
let inp2 = futures_util::stream::iter(inp2);
let inp2 = Box::pin(inp2);
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], 32);
// Expect an empty first item.
let item = merger.next().await;
let item = match item {
Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))) => item,
_ => panic!(),
};
assert_eq!(item.len(), 0);
let item = merger.next().await;
assert_eq!(item.as_ref(), events_vec2.get(0));
let item = merger.next().await;
assert_eq!(item.as_ref(), None);
Ok(())
};
runfut(fut).unwrap();
}
#[test]
fn merge_01() {
let fut = async {
let events_vec1 = {
let mut vec = Vec::new();
let mut events = EventsDim0::empty();
for i in 0..10 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events.clone()));
let mut events = EventsDim0::empty();
for i in 10..20 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events.clone()));
vec
};
let exp = events_vec1.clone();
let inp1 = events_vec1;
let inp1 = futures_util::stream::iter(inp1);
let inp1 = Box::pin(inp1);
let inp2: Vec<Sitemty<ChannelEvents>> = Vec::new();
let inp2 = futures_util::stream::iter(inp2);
let inp2 = Box::pin(inp2);
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], 10);
// Expect an empty first item.
let item = merger.next().await;
let item = match item {
Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))) => item,
_ => panic!(),
};
assert_eq!(item.len(), 0);
let item = merger.next().await;
assert_eq!(item.as_ref(), exp.get(0));
let item = merger.next().await;
assert_eq!(item.as_ref(), exp.get(1));
let item = merger.next().await;
assert_eq!(item.as_ref(), None);
Ok(())
};
runfut(fut).unwrap();
}
fn push_evd0(vec: &mut Vec<Sitemty<ChannelEvents>>, events: Box<dyn Events>) {
let cev = ChannelEvents::Events(events);
vec.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
}
#[test]
fn merge_02() {
let fut = async {
let events_vec1 = {
let mut vec = Vec::new();
let mut events = EventsDim0::empty();
for i in 0..10 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events));
let mut events = EventsDim0::empty();
for i in 10..20 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events));
vec
};
let events_vec2 = {
let mut vec = Vec::new();
let mut events = EventsDim0::empty();
for i in 0..10 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events));
let mut events = EventsDim0::empty();
for i in 10..12 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events));
let mut events = EventsDim0::empty();
for i in 12..20 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events));
vec
};
let inp2_events_a = {
let ev = ConnStatusEvent {
ts: 1199,
datetime: std::time::SystemTime::UNIX_EPOCH,
status: ConnStatus::Disconnect,
};
let item: Sitemty<ChannelEvents> = Ok(StreamItem::DataItem(RangeCompletableItem::Data(
ChannelEvents::Status(Some(ev)),
)));
vec![item]
};
let inp2_events_b = {
let ev = ConnStatusEvent {
ts: 1199,
datetime: std::time::SystemTime::UNIX_EPOCH,
status: ConnStatus::Disconnect,
};
let item: Sitemty<ChannelEvents> = Ok(StreamItem::DataItem(RangeCompletableItem::Data(
ChannelEvents::Status(Some(ev)),
)));
vec![item]
};
let inp1 = events_vec1;
let inp1 = futures_util::stream::iter(inp1);
let inp1 = Box::pin(inp1);
let inp2: Vec<Sitemty<ChannelEvents>> = inp2_events_a;
let inp2 = futures_util::stream::iter(inp2);
let inp2 = Box::pin(inp2);
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], 10);
// Expect an empty first item.
let item = merger.next().await;
let item = match item {
Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))) => item,
_ => panic!(),
};
assert_eq!(item.len(), 0);
let item = merger.next().await;
assert_eq!(item.as_ref(), events_vec2.get(0));
let item = merger.next().await;
assert_eq!(item.as_ref(), events_vec2.get(1));
let item = merger.next().await;
assert_eq!(item.as_ref(), inp2_events_b.get(0));
let item = merger.next().await;
assert_eq!(item.as_ref(), events_vec2.get(2));
let item = merger.next().await;
assert_eq!(item.as_ref(), None);
Ok(())
};
runfut(fut).unwrap();
}
#[test]
fn bin_00() {
let fut = async {
let inp1 = {
let mut vec = Vec::new();
for j in 0..2 {
let mut events = EventsDim0::empty();
for i in 10 * j..10 * (1 + j) {
events.push(SEC * i, i, 17f32);
}
push_evd0(&mut vec, Box::new(events));
}
vec
};
let inp1 = futures_util::stream::iter(inp1);
let inp1 = Box::pin(inp1);
let inp2 = Box::pin(futures_util::stream::empty()) as _;
let stream = crate::merger::Merger::new(vec![inp1, inp2], 32);
let range = NanoRange {
beg: SEC * 0,
end: SEC * 100,
};
let binrange = BinnedRangeEnum::covering_range(range.into(), 10).unwrap();
let deadline = Instant::now() + Duration::from_millis(4000);
let do_time_weight = true;
let res = BinnedCollected::new(
binrange,
ScalarType::F32,
Shape::Scalar,
do_time_weight,
deadline,
Box::pin(stream),
)
.await?;
// TODO assert
Ok::<_, Error>(())
};
runfut(fut).unwrap();
}
#[test]
fn bin_01() {
const TSBASE: u64 = SEC * 1600000000;
fn val(ts: u64) -> f32 {
2f32 + ((ts / SEC) % 2) as f32 + 0.2 * ((ts / (MS * 100)) % 2) as f32
}
let fut = async {
let mut events_vec1 = Vec::new();
let mut t = TSBASE;
for _ in 0..20 {
let mut events = EventsDim0::empty();
for _ in 0..10 {
events.push(t, t, val(t));
t += MS * 100;
}
let cev = ChannelEvents::Events(Box::new(events));
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
}
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)));
let inp1 = events_vec1;
let inp1 = futures_util::stream::iter(inp1);
let inp1 = Box::pin(inp1);
let inp2 = Box::pin(futures_util::stream::empty()) as _;
let stream = crate::merger::Merger::new(vec![inp1, inp2], 32);
// covering_range result is subject to adjustments, instead, manually choose bin edges
let range = NanoRange {
beg: TSBASE + SEC * 1,
end: TSBASE + SEC * 10,
};
let binrange = BinnedRangeEnum::covering_range(range.into(), 9).map_err(|e| format!("{e}"))?;
let stream = Box::pin(stream);
let deadline = Instant::now() + Duration::from_millis(4000);
let do_time_weight = true;
let res = BinnedCollected::new(
binrange,
ScalarType::F32,
Shape::Scalar,
do_time_weight,
deadline,
Box::pin(stream),
)
.await?;
eprintln!("res {:?}", res);
Ok::<_, Error>(())
};
runfut(fut).unwrap();
}
#[test]
fn binned_timeout_00() {
if true {
return;
}
// TODO items_2::binnedcollected::BinnedCollected is currently not used.
trace!("binned_timeout_01 uses a delay");
const TSBASE: u64 = SEC * 1600000000;
fn val(ts: u64) -> f32 {
2f32 + ((ts / SEC) % 2) as f32 + 0.2 * ((ts / (MS * 100)) % 2) as f32
}
eprintln!("binned_timeout_01 ENTER");
let fut = async {
eprintln!("binned_timeout_01 IN FUT");
let mut events_vec1 = Vec::new();
let mut t = TSBASE;
for _ in 0..20 {
let mut events = EventsDim0::empty();
for _ in 0..10 {
events.push(t, t, val(t));
t += MS * 100;
}
let cev = ChannelEvents::Events(Box::new(events));
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
}
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)));
let inp1 = VecStream::new(events_vec1.into_iter().collect());
let inp1 = inp1.enumerate2().then2(|(i, k)| async move {
if i == 5 {
let _ = tokio::time::sleep(Duration::from_millis(10000)).await;
}
k
});
let edges: Vec<_> = (0..10).into_iter().map(|x| TSBASE + SEC * (1 + x)).collect();
let range = NanoRange {
beg: TSBASE + SEC * 1,
end: TSBASE + SEC * 10,
};
let binrange = BinnedRangeEnum::covering_range(range.into(), 9)?;
eprintln!("edges1: {:?}", edges);
//eprintln!("edges2: {:?}", binrange.edges());
let inp1 = Box::pin(inp1);
let timeout = Duration::from_millis(400);
let deadline = Instant::now() + timeout;
let do_time_weight = true;
let res =
BinnedCollected::new(binrange, ScalarType::F32, Shape::Scalar, do_time_weight, deadline, inp1).await?;
let r2: &BinsDim0CollectedResult<f32> = res.result.as_any_ref().downcast_ref().expect("res seems wrong type");
eprintln!("rs: {r2:?}");
assert_eq!(SEC * r2.ts_anchor_sec(), TSBASE + SEC);
assert_eq!(r2.counts(), &[10, 10, 10]);
assert_eq!(r2.mins(), &[3.0, 2.0, 3.0]);
assert_eq!(r2.maxs(), &[3.2, 2.2, 3.2]);
assert_eq!(r2.missing_bins(), 6);
assert_eq!(
r2.continue_at(),
Some(IsoDateTime(Utc.timestamp_nanos((TSBASE + SEC * 4) as i64)))
);
Ok::<_, Error>(())
};
runfut(fut).unwrap();
}

View File

@@ -0,0 +1,24 @@
use crate::eventsdim0::EventsDim0;
use items_0::Appendable;
use items_0::Empty;
use items_0::Events;
#[test]
fn collect_s_00() {
let mut evs = EventsDim0::empty();
evs.push(123, 4, 1.00f32);
evs.push(124, 5, 1.01);
let mut coll = evs.as_collectable_mut().new_collector();
coll.ingest(&mut evs);
assert_eq!(coll.len(), 2);
}
#[test]
fn collect_c_00() {
let mut evs = EventsDim0::empty();
evs.push(123, 4, 1.00f32);
evs.push(124, 5, 1.01);
let mut coll = evs.as_collectable_with_default_ref().new_collector();
coll.ingest(&mut evs);
assert_eq!(coll.len(), 2);
}

View File

@@ -0,0 +1,25 @@
use crate::eventsdim0::EventsDim0;
use crate::Events;
use items_0::Appendable;
use items_0::Empty;
#[allow(unused)]
fn xorshift32(state: u32) -> u32 {
let mut x = state;
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
x
}
pub fn make_some_boxed_d0_f32(n: usize, t0: u64, tstep: u64, tmask: u64, seed: u32) -> Box<dyn Events> {
let mut vstate = seed;
let mut events = EventsDim0::empty();
for i in 0..n {
vstate = xorshift32(vstate);
let ts = t0 + i as u64 * tstep + (vstate as u64 & tmask);
let value = i as f32 * 100. + vstate as f32 / u32::MAX as f32 / 10.;
events.push(ts, ts, value);
}
Box::new(events)
}

View File

@@ -0,0 +1,287 @@
use items_0::overlap::RangeOverlapInfo;
use items_0::timebin::TimeBinnable;
use items_0::AppendEmptyBin;
use items_0::Empty;
use items_0::HasNonemptyFirstBin;
use items_0::WithLen;
use netpod::log::*;
use netpod::range::evrange::SeriesRange;
use std::any;
use std::collections::VecDeque;
use std::ops::Range;
#[allow(unused)]
macro_rules! trace_ingest {
($($arg:tt)*) => {};
($($arg:tt)*) => { trace!($($arg)*); };
}
#[allow(unused)]
macro_rules! trace_ingest_item {
($($arg:tt)*) => {};
($($arg:tt)*) => { trace!($($arg)*); };
}
#[allow(unused)]
macro_rules! trace2 {
($($arg:tt)*) => {};
($($arg:tt)*) => { trace!($($arg)*); };
}
pub trait TimeBinnerCommonV0Trait {
type Input: RangeOverlapInfo + 'static;
type Output: WithLen + Empty + AppendEmptyBin + HasNonemptyFirstBin + 'static;
fn type_name() -> &'static str;
fn common_bins_ready_count(&self) -> usize;
fn common_range_current(&self) -> &SeriesRange;
fn common_has_more_range(&self) -> bool;
fn common_next_bin_range(&mut self) -> Option<SeriesRange>;
fn common_set_current_range(&mut self, range: Option<SeriesRange>);
fn common_take_or_append_all_from(&mut self, item: Self::Output);
fn common_result_reset(&mut self, range: Option<SeriesRange>) -> Self::Output;
fn common_agg_ingest(&mut self, item: &mut Self::Input);
}
pub struct TimeBinnerCommonV0Func {}
impl TimeBinnerCommonV0Func {
pub fn agg_ingest<B>(binner: &mut B, item: &mut <B as TimeBinnerCommonV0Trait>::Input)
where
B: TimeBinnerCommonV0Trait,
{
//self.agg.ingest(item);
<B as TimeBinnerCommonV0Trait>::common_agg_ingest(binner, item)
}
pub fn ingest<B>(binner: &mut B, item: &mut dyn TimeBinnable)
where
B: TimeBinnerCommonV0Trait,
{
let self_name = B::type_name();
trace_ingest_item!(
"TimeBinner for {} ingest agg.range {:?} item {:?}",
Self::type_name(),
self.agg.range(),
item
);
if item.len() == 0 {
// Return already here, RangeOverlapInfo would not give much sense.
return;
}
// TODO optimize by remembering at which event array index we have arrived.
// That needs modified interfaces which can take and yield the start and latest index.
// Or consume the input data.
loop {
while item.starts_after(B::common_range_current(binner)) {
trace_ingest_item!("{self_name} ignore item and cycle starts_after");
TimeBinnerCommonV0Func::cycle(binner);
if !B::common_has_more_range(binner) {
debug!("{self_name} no more bin in edges after starts_after");
return;
}
}
if item.ends_before(B::common_range_current(binner)) {
trace_ingest_item!("{self_name} ignore item ends_before");
return;
} else {
if !B::common_has_more_range(binner) {
trace_ingest_item!("{self_name} no more bin in edges");
return;
} else {
if let Some(item) = item
.as_any_mut()
// TODO make statically sure that we attempt to cast to the correct type here:
.downcast_mut::<B::Input>()
{
// TODO collect statistics associated with this request:
trace_ingest_item!("{self_name} FEED THE ITEM...");
TimeBinnerCommonV0Func::agg_ingest(binner, item);
if item.ends_after(B::common_range_current(binner)) {
trace_ingest_item!(
"{self_name} FED ITEM, ENDS AFTER agg-range {:?}",
B::common_range_current(binner)
);
TimeBinnerCommonV0Func::cycle(binner);
if !B::common_has_more_range(binner) {
warn!("{self_name} no more bin in edges after ingest and cycle");
return;
} else {
trace_ingest_item!("{self_name} item fed, cycled, continue");
}
} else {
trace_ingest_item!("{self_name} item fed, break");
break;
}
} else {
error!("{self_name}::ingest unexpected item type");
};
}
}
}
}
pub fn push_in_progress<B>(binner: &mut B, push_empty: bool)
where
B: TimeBinnerCommonV0Trait,
{
let self_name = B::type_name();
trace_ingest_item!("{self_name}::push_in_progress push_empty {push_empty}");
// TODO expand should be derived from AggKind. Is it still required after all?
// TODO here, the expand means that agg will assume that the current value is kept constant during
// the rest of the time range.
if B::common_has_more_range(binner) {
let range_next = TimeBinnerCommonV0Trait::common_next_bin_range(binner);
B::common_set_current_range(binner, range_next.clone());
let bins = TimeBinnerCommonV0Trait::common_result_reset(binner, range_next);
if bins.len() != 1 {
error!("{self_name}::push_in_progress bins.len() {}", bins.len());
return;
} else {
if push_empty || HasNonemptyFirstBin::has_nonempty_first_bin(&bins) {
TimeBinnerCommonV0Trait::common_take_or_append_all_from(binner, bins);
}
}
}
}
pub fn cycle<B>(binner: &mut B)
where
B: TimeBinnerCommonV0Trait,
{
let self_name = any::type_name::<Self>();
trace_ingest_item!("{self_name}::cycle");
// TODO refactor this logic.
let n = TimeBinnerCommonV0Trait::common_bins_ready_count(binner);
TimeBinnerCommonV0Func::push_in_progress(binner, true);
if TimeBinnerCommonV0Trait::common_bins_ready_count(binner) == n {
let range_next = TimeBinnerCommonV0Trait::common_next_bin_range(binner);
B::common_set_current_range(binner, range_next.clone());
if let Some(range) = range_next {
let mut bins = <B as TimeBinnerCommonV0Trait>::Output::empty();
if range.is_time() {
bins.append_empty_bin(range.beg_u64(), range.end_u64());
} else {
error!("TODO {self_name}::cycle is_pulse");
}
TimeBinnerCommonV0Trait::common_take_or_append_all_from(binner, bins);
if TimeBinnerCommonV0Trait::common_bins_ready_count(binner) <= n {
error!("failed to push a zero bin");
}
} else {
warn!("cycle: no in-progress bin pushed, but also no more bin to add as zero-bin");
}
}
}
}
pub trait ChooseIndicesForTimeBin {
fn choose_indices_unweight(&self, beg: u64, end: u64) -> (Option<usize>, usize, usize);
fn choose_indices_timeweight(&self, beg: u64, end: u64) -> (Option<usize>, usize, usize);
}
pub struct ChooseIndicesForTimeBinEvents {}
impl ChooseIndicesForTimeBinEvents {
pub fn choose_unweight(beg: u64, end: u64, tss: &VecDeque<u64>) -> (Option<usize>, usize, usize) {
// TODO improve via binary search.
let mut one_before = None;
let mut j = 0;
let mut k = tss.len();
for (i1, &ts) in tss.iter().enumerate() {
if ts >= end {
break;
} else if ts >= beg {
} else {
one_before = Some(i1);
j = i1 + 1;
}
}
(one_before, j, k)
}
pub fn choose_timeweight(beg: u64, end: u64, tss: &VecDeque<u64>) -> (Option<usize>, usize, usize) {
// TODO improve via binary search.
let mut one_before = None;
let mut j = 0;
let mut k = tss.len();
for (i1, &ts) in tss.iter().enumerate() {
if ts >= end {
trace_ingest!("{self_name} ingest {:6} {:20} {:10?} AFTER", i1, ts, val);
// TODO count all the ignored events for stats
k = i1;
break;
} else if ts >= beg {
trace_ingest!("{self_name} ingest {:6} {:20} {:10?} INSIDE", i1, ts, val);
} else {
trace_ingest!("{self_name} ingest {:6} {:20} {:10?} BEFORE", i1, ts, val);
one_before = Some(i1);
j = i1 + 1;
}
}
(one_before, j, k)
}
}
pub trait TimeAggregatorCommonV0Trait {
type Input: RangeOverlapInfo + ChooseIndicesForTimeBin + 'static;
type Output: WithLen + Empty + AppendEmptyBin + HasNonemptyFirstBin + 'static;
fn type_name() -> &'static str;
fn common_range_current(&self) -> &SeriesRange;
fn common_ingest_unweight_range(&mut self, item: &Self::Input, r: Range<usize>);
fn common_ingest_one_before(&mut self, item: &Self::Input, j: usize);
fn common_ingest_range(&mut self, item: &Self::Input, r: Range<usize>);
}
pub struct TimeAggregatorCommonV0Func {}
impl TimeAggregatorCommonV0Func {
pub fn ingest_unweight<B>(binner: &mut B, item: &B::Input)
where
B: TimeAggregatorCommonV0Trait,
{
let self_name = B::type_name();
trace_ingest!(
"{self_name}::ingest_unweight item len {} items_seen {}",
item.len(),
self.items_seen
);
let rng = B::common_range_current(binner);
if rng.is_time() {
let beg = rng.beg_u64();
let end = rng.end_u64();
let (one_before, j, k) = item.choose_indices_unweight(beg, end);
if let Some(j) = one_before {
//<B as TimeAggregatorCommonV0Trait>::common_ingest_one_before(binner, item, j);
}
<B as TimeAggregatorCommonV0Trait>::common_ingest_unweight_range(binner, item, j..k);
} else {
error!("TODO ingest_unweight for pulse range");
err::todo();
}
}
pub fn ingest_time_weight<B>(binner: &mut B, item: &B::Input)
where
B: TimeAggregatorCommonV0Trait,
{
let self_name = B::type_name();
trace_ingest!(
"{self_name}::ingest_time_weight item len {} items_seen {}",
item.len(),
self.items_seen
);
let rng = B::common_range_current(binner);
if rng.is_time() {
let beg = rng.beg_u64();
let end = rng.end_u64();
let (one_before, j, k) = item.choose_indices_timeweight(beg, end);
if let Some(j) = one_before {
<B as TimeAggregatorCommonV0Trait>::common_ingest_one_before(binner, item, j);
}
<B as TimeAggregatorCommonV0Trait>::common_ingest_range(binner, item, j..k);
} else {
error!("TODO ingest_time_weight for pulse range");
err::todo();
}
}
}

View File

@@ -0,0 +1,84 @@
//! Helper functions to create transforms which act locally on a batch of events.
//! Tailored to the usage pattern given by `TransformQuery`.
use crate::channelevents::ChannelEvents;
use crate::eventsdim0::EventsDim0;
use items_0::transform::EventTransform;
use items_0::transform::TransformEvent;
use items_0::transform::TransformProperties;
use items_0::transform::WithTransformProperties;
use items_0::Appendable;
use items_0::AsAnyMut;
use items_0::Empty;
use items_0::Events;
use items_0::EventsNonObj;
use netpod::log::*;
use std::mem;
struct TransformEventIdentity {}
impl WithTransformProperties for TransformEventIdentity {
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl EventTransform for TransformEventIdentity {
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
src
}
}
pub fn make_transform_identity() -> TransformEvent {
TransformEvent(Box::new(TransformEventIdentity {}))
}
struct TransformEventMinMaxAvg {}
impl WithTransformProperties for TransformEventMinMaxAvg {
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl EventTransform for TransformEventMinMaxAvg {
fn transform(&mut self, mut src: Box<dyn Events>) -> Box<dyn Events> {
src.to_min_max_avg()
}
}
pub fn make_transform_min_max_avg() -> TransformEvent {
TransformEvent(Box::new(TransformEventMinMaxAvg {}))
}
struct TransformEventPulseIdDiff {
pulse_last: Option<u64>,
}
impl WithTransformProperties for TransformEventPulseIdDiff {
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl EventTransform for TransformEventPulseIdDiff {
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
let (tss, pulses) = EventsNonObj::into_tss_pulses(src);
let mut item = EventsDim0::empty();
let pulse_last = &mut self.pulse_last;
for (ts, pulse) in tss.into_iter().zip(pulses) {
let value = if let Some(last) = pulse_last {
pulse as i64 - *last as i64
} else {
0
};
item.push(ts, pulse, value);
*pulse_last = Some(pulse);
}
Box::new(ChannelEvents::Events(Box::new(item)))
}
}
pub fn make_transform_pulse_id_diff() -> TransformEvent {
TransformEvent(Box::new(TransformEventPulseIdDiff { pulse_last: None }))
}