From b93bb9b46715ee828ef74dd72697a82c6a89342d Mon Sep 17 00:00:00 2001 From: Dominik Werder Date: Wed, 8 Feb 2023 10:09:17 +0100 Subject: [PATCH] Remove obsolete containers --- Cargo.toml | 2 +- commonio/src/commonio.rs | 39 +- daqbufp2/src/client.rs | 54 +- daqbufp2/src/test/binnedbinary.rs | 82 +-- daqbufp2/src/test/events.rs | 99 +--- disk/src/agg.rs | 1 - disk/src/agg/enp.rs | 26 - disk/src/decode.rs | 530 ------------------- disk/src/disk.rs | 1 - dq/src/bin/dq.rs | 42 +- fsio/Cargo.toml | 37 -- fsio/src/fsio.rs | 187 ------- items/src/binnedevents.rs | 313 ----------- items/src/binsdim0.rs | 683 ------------------------ items/src/binsdim1.rs | 617 ---------------------- items/src/eventsitem.rs | 146 ----- items/src/items.rs | 243 +-------- items/src/numops.rs | 282 ---------- items/src/plainevents.rs | 190 ------- items/src/scalarevents.rs | 847 ------------------------------ items/src/waveevents.rs | 561 -------------------- items/src/xbinnedscalarevents.rs | 520 ------------------ items/src/xbinnedwaveevents.rs | 550 ------------------- items_2/src/eventsdim0.rs | 70 +++ 24 files changed, 192 insertions(+), 5930 deletions(-) delete mode 100644 disk/src/agg.rs delete mode 100644 disk/src/agg/enp.rs delete mode 100644 fsio/Cargo.toml delete mode 100644 fsio/src/fsio.rs delete mode 100644 items/src/binnedevents.rs delete mode 100644 items/src/binsdim0.rs delete mode 100644 items/src/binsdim1.rs delete mode 100644 items/src/eventsitem.rs delete mode 100644 items/src/numops.rs delete mode 100644 items/src/plainevents.rs delete mode 100644 items/src/scalarevents.rs delete mode 100644 items/src/waveevents.rs delete mode 100644 items/src/xbinnedscalarevents.rs delete mode 100644 items/src/xbinnedwaveevents.rs diff --git a/Cargo.toml b/Cargo.toml index 0fb171f..d26f92f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["daqbuffer", "httpret", "h5out", "items", "items_2", "items_proc", "nodenet", "httpclient", "fsio", "dq"] +members = ["daqbuffer", "httpret", "h5out", "items", "items_2", "items_proc", "nodenet", "httpclient", "dq"] [profile.release] opt-level = 1 diff --git a/commonio/src/commonio.rs b/commonio/src/commonio.rs index e644ace..836e192 100644 --- a/commonio/src/commonio.rs +++ b/commonio/src/commonio.rs @@ -1,20 +1,33 @@ pub mod ringbuf; use async_channel::Sender; -use err::{ErrStr, Error}; +use err::ErrStr; +use err::Error; use futures_util::StreamExt; -use items::eventsitem::EventsItem; -use items::{Sitemty, StatsItem, StreamItem}; +use items::Sitemty; +use items::StatsItem; +use items::StreamItem; use netpod::log::*; -use netpod::{DiskStats, OpenStats, ReadExactStats, ReadStats, SeekStats}; -use serde::{Deserialize, Serialize}; +use netpod::DiskStats; +use netpod::OpenStats; +use netpod::ReadExactStats; +use netpod::ReadStats; +use netpod::SeekStats; +use serde::Deserialize; +use serde::Serialize; use std::fmt; -use std::io::{self, ErrorKind, SeekFrom}; -use std::path::{Path, PathBuf}; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::io; +use std::io::ErrorKind; +use std::io::SeekFrom; +use std::path::Path; +use std::path::PathBuf; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; use std::time::Instant; -use tokio::fs::{File, OpenOptions}; -use tokio::io::{AsyncReadExt, AsyncSeekExt}; +use tokio::fs::File; +use tokio::fs::OpenOptions; +use tokio::io::AsyncReadExt; +use tokio::io::AsyncSeekExt; const LOG_IO: bool = true; const STATS_IO: bool = true; @@ -62,8 +75,10 @@ pub async fn tokio_rand() -> Result { Ok(x) } +pub struct DummyEvent; + pub struct StatsChannel { - chn: Sender>, + chn: Sender>, } impl fmt::Debug for StatsChannel { @@ -73,7 +88,7 @@ impl fmt::Debug for StatsChannel { } impl StatsChannel { - pub fn new(chn: Sender>) -> Self { + pub fn new(chn: Sender>) -> Self { Self { chn } } diff --git a/daqbufp2/src/client.rs b/daqbufp2/src/client.rs index cf781b4..3602805 100644 --- a/daqbufp2/src/client.rs +++ b/daqbufp2/src/client.rs @@ -1,17 +1,24 @@ use crate::err::ErrConv; -use chrono::{DateTime, Utc}; +use chrono::DateTime; +use chrono::Utc; use disk::streamlog::Streamlog; use err::Error; use futures_util::TryStreamExt; use http::StatusCode; use httpclient::HttpBodyAsAsyncRead; use hyper::Body; -use items::xbinnedwaveevents::XBinnedWaveEvents; -use items::{Sitemty, StreamItem}; +use items::StreamItem; use netpod::log::*; -use netpod::query::{BinnedQuery, CacheUsage}; +use netpod::query::BinnedQuery; +use netpod::query::CacheUsage; +use netpod::AggKind; use netpod::AppendToUrl; -use netpod::{AggKind, ByteSize, Channel, HostPort, NanoRange, PerfOpts, APP_OCTET}; +use netpod::ByteSize; +use netpod::Channel; +use netpod::HostPort; +use netpod::NanoRange; +use netpod::PerfOpts; +use netpod::APP_OCTET; use streams::frames::inmem::InMemoryFrameAsyncReadStream; use url::Url; @@ -110,44 +117,11 @@ pub async fn get_binned( info!("Stats: {:?}", item); None } - StreamItem::DataItem(frame) => { + StreamItem::DataItem(_frame) => { // TODO // The expected type nowadays depends on the channel and agg-kind. err::todo(); - type ExpectedType = Sitemty>; - // TODO the non-data variants of Sitemty no longer carry a frame id. - //let type_id_exp = ::FRAME_TYPE_ID; - let type_id_exp: u32 = err::todoval(); - if frame.tyid() != type_id_exp { - error!("unexpected type id got {} exp {}", frame.tyid(), type_id_exp); - } - let n1 = frame.buf().len(); - match rmp_serde::from_slice::(frame.buf()) { - Ok(item) => match item { - Ok(item) => { - match item { - StreamItem::Log(item) => { - Streamlog::emit(&item); - } - StreamItem::Stats(item) => { - info!("Stats: {:?}", item); - } - StreamItem::DataItem(item) => { - info!("DataItem: {:?}", item); - } - } - Some(Ok(())) - } - Err(e) => { - error!("len {} error frame {:?}", n1, e); - Some(Err(e)) - } - }, - Err(e) => { - error!("len {} {:?}", n1, e); - Some(Err(e.into())) - } - } + Some(Ok(())) } }, Err(e) => Some(Err(Error::with_msg(format!("{:?}", e)))), diff --git a/daqbufp2/src/test/binnedbinary.rs b/daqbufp2/src/test/binnedbinary.rs index e17259c..b4bf1e6 100644 --- a/daqbufp2/src/test/binnedbinary.rs +++ b/daqbufp2/src/test/binnedbinary.rs @@ -1,19 +1,27 @@ use crate::err::ErrConv; use crate::nodes::require_test_hosts_running; -use chrono::{DateTime, Utc}; +use chrono::DateTime; +use chrono::Utc; use disk::streamlog::Streamlog; use err::Error; -use futures_util::{StreamExt, TryStreamExt}; +use futures_util::StreamExt; +use futures_util::TryStreamExt; use http::StatusCode; use httpclient::HttpBodyAsAsyncRead; use hyper::Body; -use items::binsdim0::MinMaxAvgDim0Bins; -use items::{RangeCompletableItem, Sitemty, StatsItem, StreamItem, WithLen}; +use items::StreamItem; use items_0::subfr::SubFrId; use netpod::log::*; -use netpod::query::{BinnedQuery, CacheUsage}; +use netpod::query::BinnedQuery; +use netpod::query::CacheUsage; +use netpod::AggKind; use netpod::AppendToUrl; -use netpod::{AggKind, Channel, Cluster, HostPort, NanoRange, PerfOpts, APP_OCTET}; +use netpod::Channel; +use netpod::Cluster; +use netpod::HostPort; +use netpod::NanoRange; +use netpod::PerfOpts; +use netpod::APP_OCTET; use serde::de::DeserializeOwned; use std::fmt; use std::future::ready; @@ -144,6 +152,7 @@ where } } +#[allow(unused)] #[derive(Debug)] pub struct BinnedResponse { bin_count: u64, @@ -178,6 +187,7 @@ impl BinnedResponse { } } +// TODO async fn consume_binned_response(inp: InMemoryFrameAsyncReadStream) -> Result where NTY: fmt::Debug + SubFrId + DeserializeOwned, @@ -197,68 +207,16 @@ where debug!("Stats: {:?}", item); None } - StreamItem::DataItem(frame) => { - // TODO non-data Sitety no longer carry frame id: - //if frame.tyid() != > as FrameType>::FRAME_TYPE_ID { - if frame.tyid() != err::todoval::() { - error!("test receives unexpected tyid {:x}", frame.tyid()); - } - match rmp_serde::from_slice::>>(frame.buf()) { - Ok(item) => match item { - Ok(item) => match item { - StreamItem::Log(item) => { - Streamlog::emit(&item); - Some(Ok(StreamItem::Log(item))) - } - item => Some(Ok(item)), - }, - Err(e) => { - error!("TEST GOT ERROR FRAME: {:?}", e); - Some(Err(e)) - } - }, - Err(e) => { - error!("{:?}", e); - Some(Err(e.into())) - } - } + StreamItem::DataItem(_frame) => { + err::todo(); + Some(Ok(())) } }, Err(e) => Some(Err(Error::with_msg(format!("WEIRD EMPTY ERROR {:?}", e)))), }; ready(g) }) - .fold(BinnedResponse::new(), |mut a, k| { - let g = match k { - Ok(StreamItem::Log(_item)) => { - a.log_item_count += 1; - a - } - Ok(StreamItem::Stats(item)) => match item { - StatsItem::EventDataReadStats(item) => { - a.bytes_read += item.parsed_bytes; - a - } - _ => a, - }, - Ok(StreamItem::DataItem(item)) => match item { - RangeCompletableItem::RangeComplete => { - a.range_complete_count += 1; - a - } - RangeCompletableItem::Data(item) => { - a.data_item_count += 1; - a.bin_count += WithLen::len(&item) as u64; - a - } - }, - Err(_e) => { - a.err_item_count += 1; - a - } - }; - ready(g) - }); + .fold(BinnedResponse::new(), |a, _x| ready(a)); let ret = s1.await; debug!("BinnedResponse: {:?}", ret); Ok(ret) diff --git a/daqbufp2/src/test/events.rs b/daqbufp2/src/test/events.rs index 1c7b08f..ac67cd4 100644 --- a/daqbufp2/src/test/events.rs +++ b/daqbufp2/src/test/events.rs @@ -1,18 +1,26 @@ use crate::err::ErrConv; use crate::nodes::require_test_hosts_running; -use chrono::{DateTime, Utc}; +use chrono::DateTime; +use chrono::Utc; use disk::streamlog::Streamlog; use err::Error; -use futures_util::{StreamExt, TryStreamExt}; +use futures_util::StreamExt; +use futures_util::TryStreamExt; use http::StatusCode; use httpclient::HttpBodyAsAsyncRead; use hyper::Body; -use items::numops::NumOps; -use items::scalarevents::ScalarEvents; -use items::{RangeCompletableItem, Sitemty, StatsItem, StreamItem, WithLen}; +use items::StreamItem; +use netpod::log::*; use netpod::query::PlainEventsQuery; -use netpod::{log::*, AggKind}; -use netpod::{AppendToUrl, Channel, Cluster, HostPort, NanoRange, PerfOpts, APP_JSON, APP_OCTET}; +use netpod::AggKind; +use netpod::AppendToUrl; +use netpod::Channel; +use netpod::Cluster; +use netpod::HostPort; +use netpod::NanoRange; +use netpod::PerfOpts; +use netpod::APP_JSON; +use netpod::APP_OCTET; use serde_json::Value as JsonValue; use std::fmt::Debug; use std::future::ready; @@ -42,7 +50,7 @@ async fn get_plain_events_binary_0_inner() -> Result<(), Error> { let rh = require_test_hosts_running()?; let cluster = &rh.cluster; if true { - get_plain_events_binary::( + get_plain_events_binary( "scalar-i32-be", "1970-01-01T00:20:10.000Z", "1970-01-01T00:20:50.000Z", @@ -60,17 +68,14 @@ fn get_plain_events_binary_0() { taskrun::run(get_plain_events_binary_0_inner()).unwrap(); } -async fn get_plain_events_binary( +async fn get_plain_events_binary( channel_name: &str, beg_date: &str, end_date: &str, cluster: &Cluster, _expect_range_complete: bool, _expect_event_count: u64, -) -> Result -where - NTY: NumOps, -{ +) -> Result { let t1 = Utc::now(); let node0 = &cluster.nodes[0]; let beg_date: DateTime = beg_date.parse()?; @@ -110,7 +115,7 @@ where } let s1 = HttpBodyAsAsyncRead::new(res); let s2 = InMemoryFrameAsyncReadStream::new(s1, perf_opts.inmem_bufcap); - let res = consume_plain_events_binary::(s2).await?; + let res = consume_plain_events_binary(s2).await?; let t2 = chrono::Utc::now(); let ms = t2.signed_duration_since(t1).num_milliseconds() as u64; // TODO add timeout @@ -122,6 +127,7 @@ where } } +#[allow(unused)] #[derive(Debug)] pub struct EventsResponse { event_count: u64, @@ -156,9 +162,8 @@ impl EventsResponse { } } -async fn consume_plain_events_binary(inp: InMemoryFrameAsyncReadStream) -> Result +async fn consume_plain_events_binary(inp: InMemoryFrameAsyncReadStream) -> Result where - NTY: NumOps, T: AsyncRead + Unpin, { let s1 = inp @@ -174,70 +179,16 @@ where debug!("Stats: {:?}", item); None } - StreamItem::DataItem(frame) => { - // TODO the non-data variants of Sitemty no longer carry frame type id: - //if frame.tyid() != > as FrameType>::FRAME_TYPE_ID { - if frame.tyid() != err::todoval::() { - error!("test receives unexpected tyid {:x}", frame.tyid()); - None - } else { - match rmp_serde::from_slice::>>(frame.buf()) { - Ok(item) => match item { - Ok(item) => match item { - StreamItem::Log(item) => { - Streamlog::emit(&item); - Some(Ok(StreamItem::Log(item))) - } - item => Some(Ok(item)), - }, - Err(e) => { - error!("TEST GOT ERROR FRAME: {:?}", e); - Some(Err(e)) - } - }, - Err(e) => { - error!("{:?}", e); - Some(Err(e.into())) - } - } - } + StreamItem::DataItem(_frame) => { + err::todo(); + Some(Ok(())) } }, Err(e) => Some(Err(Error::with_msg(format!("WEIRD EMPTY ERROR {:?}", e)))), }; ready(g) }) - .fold(EventsResponse::new(), |mut a, k| { - let g = match k { - Ok(StreamItem::Log(_item)) => { - a.log_item_count += 1; - a - } - Ok(StreamItem::Stats(item)) => match item { - StatsItem::EventDataReadStats(item) => { - a.bytes_read += item.parsed_bytes; - a - } - _ => a, - }, - Ok(StreamItem::DataItem(item)) => match item { - RangeCompletableItem::RangeComplete => { - a.range_complete_count += 1; - a - } - RangeCompletableItem::Data(item) => { - a.data_item_count += 1; - a.event_count += WithLen::len(&item) as u64; - a - } - }, - Err(_e) => { - a.err_item_count += 1; - a - } - }; - ready(g) - }); + .fold(EventsResponse::new(), |a, _x| ready(a)); let ret = s1.await; debug!("result: {:?}", ret); Ok(ret) diff --git a/disk/src/agg.rs b/disk/src/agg.rs deleted file mode 100644 index f4b0d43..0000000 --- a/disk/src/agg.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod enp; diff --git a/disk/src/agg/enp.rs b/disk/src/agg/enp.rs deleted file mode 100644 index c4fee51..0000000 --- a/disk/src/agg/enp.rs +++ /dev/null @@ -1,26 +0,0 @@ -use items::numops::NumOps; -use items::scalarevents::ScalarEvents; -use items::EventsNodeProcessor; -use netpod::AggKind; -use netpod::Shape; -use std::marker::PhantomData; - -pub struct Identity { - _m1: PhantomData, -} - -impl EventsNodeProcessor for Identity -where - NTY: NumOps, -{ - type Input = ScalarEvents; - type Output = ScalarEvents; - - fn create(_shape: Shape, _agg_kind: AggKind) -> Self { - Self { _m1: PhantomData } - } - - fn process(&self, inp: Self::Input) -> Self::Output { - inp - } -} diff --git a/disk/src/decode.rs b/disk/src/decode.rs index 22d7898..1640a44 100644 --- a/disk/src/decode.rs +++ b/disk/src/decode.rs @@ -1,23 +1,8 @@ -use crate::agg::enp::Identity; use crate::eventblobs::EventChunkerMultifile; use err::Error; use futures_util::Stream; use futures_util::StreamExt; use items::eventfull::EventFull; -use items::eventsitem::EventsItem; -use items::numops::BoolNum; -use items::numops::NumOps; -use items::numops::StringNum; -use items::plainevents::PlainEvents; -use items::plainevents::ScalarPlainEvents; -use items::scalarevents::ScalarEvents; -use items::waveevents::WaveEvents; -use items::waveevents::WaveNBinner; -use items::waveevents::WavePlainProc; -use items::waveevents::WaveXBinner; -use items::Appendable; -use items::EventAppendable; -use items::EventsNodeProcessor; use items::RangeCompletableItem; use items::Sitemty; use items::StreamItem; @@ -32,7 +17,6 @@ use netpod::ScalarType; use netpod::Shape; use std::marker::PhantomData; use std::mem; -use std::mem::size_of; use std::pin::Pin; use std::task::Context; use std::task::Poll; @@ -58,87 +42,6 @@ pub enum Endian { Big, } -pub trait NumFromBytes { - fn convert(buf: &[u8], big_endian: bool) -> NTY; -} - -impl NumFromBytes for BoolNum { - fn convert(buf: &[u8], _big_endian: bool) -> BoolNum { - BoolNum(buf[0]) - } -} - -impl NumFromBytes for BoolNum { - fn convert(buf: &[u8], _big_endian: bool) -> BoolNum { - BoolNum(buf[0]) - } -} - -impl NumFromBytes for StringNum { - fn convert(buf: &[u8], _big_endian: bool) -> StringNum { - if false { - // TODO remove - netpod::log::error!("TODO NumFromBytes for StringNum buf len {}", buf.len()); - } - let s = if buf.len() >= 250 { - String::from_utf8_lossy(&buf[..250]) - } else { - String::from_utf8_lossy(buf) - }; - Self(s.into()) - } -} - -impl NumFromBytes for StringNum { - fn convert(buf: &[u8], _big_endian: bool) -> StringNum { - if false { - // TODO remove - netpod::log::error!("TODO NumFromBytes for StringNum buf len {}", buf.len()); - } - let s = if buf.len() >= 250 { - String::from_utf8_lossy(&buf[..250]) - } else { - String::from_utf8_lossy(buf) - }; - Self(s.into()) - } -} - -macro_rules! impl_num_from_bytes_end { - ($nty:ident, $nl:expr, $end:ident, $ec:ident) => { - impl NumFromBytes<$nty, $end> for $nty { - fn convert(buf: &[u8], big_endian: bool) -> $nty { - // Error in data on disk: - // Can not rely on byte order as stated in the channel config. - //$nty::$ec(*arrayref::array_ref![buf, 0, $nl]) - if big_endian { - $nty::from_be_bytes(*arrayref::array_ref![buf, 0, $nl]) - } else { - $nty::from_le_bytes(*arrayref::array_ref![buf, 0, $nl]) - } - } - } - }; -} - -macro_rules! impl_num_from_bytes { - ($nty:ident, $nl:expr) => { - impl_num_from_bytes_end!($nty, $nl, LittleEndian, from_le_bytes); - impl_num_from_bytes_end!($nty, $nl, BigEndian, from_be_bytes); - }; -} - -impl_num_from_bytes!(u8, 1); -impl_num_from_bytes!(u16, 2); -impl_num_from_bytes!(u32, 4); -impl_num_from_bytes!(u64, 8); -impl_num_from_bytes!(i8, 1); -impl_num_from_bytes!(i16, 2); -impl_num_from_bytes!(i32, 4); -impl_num_from_bytes!(i64, 8); -impl_num_from_bytes!(f32, 4); -impl_num_from_bytes!(f64, 8); - pub trait ScalarValueFromBytes { fn convert(buf: &[u8], endian: Endian) -> Result; fn convert_dim1(buf: &[u8], endian: Endian, n: usize) -> Result, Error>; @@ -380,231 +283,6 @@ fn make_scalar_conv( Ok(ret) } -pub trait EventValueFromBytes -where - NTY: NumFromBytes, -{ - type Output; - type Batch: Appendable + EventAppendable; - // The written data on disk has errors: - // The endian as stated in the channel config does not match written events. - // Therefore, can not rely on that but have to check for each single event... - fn convert(&self, buf: &[u8], big_endian: bool) -> Result; -} - -impl EventValueFromBytes for EventValuesDim0Case -where - NTY: NumOps + NumFromBytes, -{ - type Output = NTY; - type Batch = ScalarEvents; - - fn convert(&self, buf: &[u8], big_endian: bool) -> Result { - Ok(NTY::convert(buf, big_endian)) - } -} - -impl EventValueFromBytes for EventValuesDim1Case -where - NTY: NumOps + NumFromBytes, -{ - type Output = Vec; - type Batch = WaveEvents; - - fn convert(&self, buf: &[u8], big_endian: bool) -> Result { - let es = size_of::(); - let n1 = buf.len() / es; - if n1 != self.n as usize { - return Err(Error::with_msg(format!("ele count got {} exp {}", n1, self.n))); - } - let mut vals = vec![]; - // TODO could optimize using unsafe code.. - for n2 in 0..n1 { - let i1 = es * n2; - vals.push(>::convert( - &buf[i1..(i1 + es)], - big_endian, - )); - } - Ok(vals) - } -} - -pub trait EventValueShape: EventValueFromBytes + Send + Unpin -where - NTY: NumFromBytes, -{ - type NumXAggToSingleBin: EventsNodeProcessor>::Batch>; - type NumXAggToNBins: EventsNodeProcessor>::Batch>; - type NumXAggPlain: EventsNodeProcessor>::Batch>; -} - -pub struct EventValuesDim0Case { - _m1: PhantomData, -} - -impl EventValuesDim0Case { - pub fn new() -> Self { - Self { _m1: PhantomData } - } -} - -impl EventValueShape for EventValuesDim0Case -where - NTY: NumOps + NumFromBytes, -{ - type NumXAggToSingleBin = Identity; - // TODO is this sufficient? - type NumXAggToNBins = Identity; - type NumXAggPlain = Identity; -} - -pub struct EventValuesDim1Case { - n: u32, - _m1: PhantomData, -} - -impl EventValuesDim1Case { - pub fn new(n: u32) -> Self { - Self { n, _m1: PhantomData } - } -} - -impl EventValueShape for EventValuesDim1Case -where - NTY: NumOps + NumFromBytes, -{ - type NumXAggToSingleBin = WaveXBinner; - type NumXAggToNBins = WaveNBinner; - type NumXAggPlain = WavePlainProc; -} - -pub struct EventsDecodedStream -where - NTY: NumOps + NumFromBytes, - END: Endianness, - EVS: EventValueShape, -{ - evs: EVS, - event_blobs: EventChunkerMultifile, - completed: bool, - errored: bool, - _m1: PhantomData, - _m2: PhantomData, - _m3: PhantomData, -} - -impl EventsDecodedStream -where - NTY: NumOps + NumFromBytes, - END: Endianness, - EVS: EventValueShape + EventValueFromBytes, -{ - pub fn new(evs: EVS, event_blobs: EventChunkerMultifile) -> Self { - Self { - evs, - event_blobs, - completed: false, - errored: false, - _m1: PhantomData, - _m2: PhantomData, - _m3: PhantomData, - } - } - - fn decode(&mut self, ev: &EventFull) -> Result>::Batch>, Error> { - //let mut ret = <>::Batch as Appendable>::empty(); - //let mut ret = EventValues::<>::Output>::empty(); - let mut ret = None; - //ret.tss.reserve(ev.tss.len()); - //ret.values.reserve(ev.tss.len()); - for i1 in 0..ev.tss.len() { - // TODO check that dtype, event endianness and event shape match our static - // expectation about the data in this channel. - let _ty = &ev.scalar_types[i1]; - let be = ev.be[i1]; - // Too bad, data on disk is inconsistent, can not rely on endian as stated in channel config. - if false && be != END::is_big() { - return Err(Error::with_msg(format!( - "endian mismatch in event got {} exp {}", - be, - END::is_big() - ))); - } - let decomp = ev.decomps[i1].as_ref().unwrap().as_ref(); - let val = self.evs.convert(decomp, be)?; - let k = <>::Batch as EventAppendable>::append_event( - ret, - ev.tss[i1], - ev.pulses[i1], - val, - ); - ret = Some(k); - } - Ok(ret) - } -} - -impl Stream for EventsDecodedStream -where - NTY: NumOps + NumFromBytes, - END: Endianness, - EVS: EventValueShape + EventValueFromBytes, -{ - type Item = Result>::Batch>>, Error>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - use Poll::*; - loop { - break if self.completed { - panic!("poll_next on completed") - } else if self.errored { - self.completed = true; - Ready(None) - } else { - match self.event_blobs.poll_next_unpin(cx) { - Ready(item) => match item { - Some(item) => match item { - Ok(item) => match item { - StreamItem::DataItem(item) => match item { - RangeCompletableItem::RangeComplete => { - Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)))) - } - RangeCompletableItem::Data(item) => match self.decode(&item) { - Ok(res) => match res { - Some(res) => { - Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(res))))) - } - None => { - continue; - } - }, - Err(e) => { - self.errored = true; - Ready(Some(Err(e))) - } - }, - }, - StreamItem::Log(item) => Ready(Some(Ok(StreamItem::Log(item)))), - StreamItem::Stats(item) => Ready(Some(Ok(StreamItem::Stats(item)))), - }, - Err(e) => { - self.errored = true; - Ready(Some(Err(e))) - } - }, - None => { - self.completed = true; - Ready(None) - } - }, - Pending => Pending, - } - }; - } - } -} - pub struct EventsDynStream { scalar_type: ScalarType, shape: Shape, @@ -749,211 +427,3 @@ impl Stream for EventsDynStream { } } } - -pub struct EventsItemStream { - inp: Pin>>>, - done: bool, - complete: bool, -} - -impl EventsItemStream { - pub fn new(inp: Pin>>>) -> Self { - Self { - inp, - done: false, - complete: false, - } - } - - // TODO need some default expectation about the content type, because real world data does not - // always contain that information per event, or even contains wrong information. - fn decode(&mut self, ev: &EventFull) -> Result, Error> { - // TODO define expected endian from parameters: - let big_endian = false; - // TODO: - let mut tyi = None; - let mut ret = None; - for i1 in 0..ev.tss.len() { - let ts = ev.tss[i1]; - let pulse = ev.pulses[i1]; - // TODO check that dtype, event endianness and event shape match our static - // expectation about the data in this channel. - let _ty = &ev.scalar_types[i1]; - let be = ev.be[i1]; - if be != big_endian { - return Err(Error::with_msg(format!("big endian mismatch {} vs {}", be, big_endian))); - } - // TODO bad, data on disk is inconsistent, can not rely on endian as stated in channel config. - let decomp = ev.decomp(i1); - // If not done yet, infer the actual type from the (undocumented) combinations of channel - // config parameters and values in the event data. - // TODO - match &tyi { - Some(_) => {} - None => { - //let cont = EventValues::::empty(); - tyi = Some((ev.scalar_types[i1].clone(), ev.shapes[i1].clone())); - match &tyi.as_ref().unwrap().1 { - Shape::Scalar => match &tyi.as_ref().unwrap().0 { - ScalarType::U8 => { - // TODO - let cont = ScalarEvents::::empty(); - ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I8(cont)))); - } - ScalarType::U16 => { - // TODO - let cont = ScalarEvents::::empty(); - ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I16(cont)))); - } - ScalarType::U32 => { - // TODO - let cont = ScalarEvents::::empty(); - ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I32(cont)))); - } - ScalarType::U64 => { - // TODO - let cont = ScalarEvents::::empty(); - ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I32(cont)))); - } - ScalarType::I8 => { - let cont = ScalarEvents::::empty(); - ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I8(cont)))); - } - ScalarType::I16 => { - let cont = ScalarEvents::::empty(); - ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I16(cont)))); - } - ScalarType::I32 => { - let cont = ScalarEvents::::empty(); - ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I32(cont)))); - } - ScalarType::I64 => { - // TODO - let cont = ScalarEvents::::empty(); - ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I32(cont)))); - } - ScalarType::F32 => { - let cont = ScalarEvents::::empty(); - ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::F32(cont)))); - } - ScalarType::F64 => { - let cont = ScalarEvents::::empty(); - ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::F64(cont)))); - } - ScalarType::BOOL => { - // TODO - let cont = ScalarEvents::::empty(); - ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I8(cont)))); - } - ScalarType::STRING => { - // TODO - let cont = ScalarEvents::::empty(); - ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::String(cont)))); - } - }, - Shape::Wave(_) => todo!(), - Shape::Image(..) => todo!(), - } - } - }; - // TODO here, I expect that we found the type. - let tyi = tyi.as_ref().unwrap(); - match &tyi.1 { - Shape::Scalar => match &tyi.0 { - ScalarType::U8 => todo!(), - ScalarType::U16 => todo!(), - ScalarType::U32 => todo!(), - ScalarType::U64 => todo!(), - ScalarType::I8 => todo!(), - ScalarType::I16 => todo!(), - ScalarType::I32 => todo!(), - ScalarType::I64 => todo!(), - ScalarType::F32 => todo!(), - ScalarType::F64 => { - let conv = EventValuesDim0Case::::new(); - let val = EventValueFromBytes::<_, LittleEndian>::convert(&conv, decomp, big_endian)?; - match &mut ret { - Some(ret) => match ret { - EventsItem::Plain(ret) => match ret { - PlainEvents::Scalar(ret) => match ret { - ScalarPlainEvents::F64(ret) => { - ret.tss.push(ts); - // TODO - let _ = pulse; - ret.values.push(val); - } - _ => panic!(), - }, - }, - EventsItem::XBinnedEvents(_) => todo!(), - }, - None => panic!(), - } - } - ScalarType::BOOL => todo!(), - ScalarType::STRING => todo!(), - }, - Shape::Wave(_) => todo!(), - Shape::Image(_, _) => todo!(), - } - //let val = self.evs.convert(decomp, be)?; - //let k = <>::Batch as EventAppendable>::append_event(ret, ev.tss[i1], val); - } - Ok(ret) - } -} - -impl Stream for EventsItemStream { - type Item = Sitemty; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - use Poll::*; - loop { - break if self.complete { - panic!("poll_next on complete") - } else if self.done { - self.complete = true; - Ready(None) - } else { - match self.inp.poll_next_unpin(cx) { - Ready(item) => match item { - Some(item) => match item { - Ok(item) => match item { - StreamItem::DataItem(item) => match item { - RangeCompletableItem::RangeComplete => { - Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)))) - } - RangeCompletableItem::Data(item) => match self.decode(&item) { - Ok(res) => match res { - Some(res) => { - Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(res))))) - } - None => { - continue; - } - }, - Err(e) => { - self.done = true; - Ready(Some(Err(e))) - } - }, - }, - StreamItem::Log(item) => Ready(Some(Ok(StreamItem::Log(item)))), - StreamItem::Stats(item) => Ready(Some(Ok(StreamItem::Stats(item)))), - }, - Err(e) => { - self.done = true; - Ready(Some(Err(e))) - } - }, - None => { - self.done = true; - Ready(None) - } - }, - Pending => Pending, - } - }; - } - } -} diff --git a/disk/src/disk.rs b/disk/src/disk.rs index 0843928..1095f45 100644 --- a/disk/src/disk.rs +++ b/disk/src/disk.rs @@ -1,4 +1,3 @@ -pub mod agg; #[cfg(test)] pub mod aggtest; pub mod binnedstream; diff --git a/dq/src/bin/dq.rs b/dq/src/bin/dq.rs index fb0577f..89e9b98 100644 --- a/dq/src/bin/dq.rs +++ b/dq/src/bin/dq.rs @@ -1,9 +1,17 @@ -use clap::{ArgAction, Parser}; +use clap::ArgAction; +use clap::Parser; use err::Error; +#[allow(unused)] use netpod::log::*; -use netpod::{ByteOrder, ByteSize, Channel, ChannelConfig, NanoRange, Shape}; +use netpod::ByteOrder; +use netpod::ByteSize; +use netpod::Channel; +use netpod::ChannelConfig; +use netpod::NanoRange; +use netpod::Shape; use std::path::PathBuf; -use streams::eventchunker::{EventChunker, EventChunkerConf}; +use streams::eventchunker::EventChunker; +use streams::eventchunker::EventChunkerConf; use tokio::fs::File; use tokio::io::AsyncReadExt; @@ -93,33 +101,9 @@ pub fn main() -> Result<(), Error> { let stats_conf = EventChunkerConf { disk_stats_every: ByteSize::mb(2), }; - let chunks = + let _chunks = EventChunker::from_start(inp, channel_config.clone(), range, stats_conf, path, false, true); - use futures_util::stream::StreamExt; - use items::WithLen; - //let evs = EventValuesDim0Case::::new(); - let mut stream = disk::decode::EventsItemStream::new(Box::pin(chunks)); - while let Some(item) = stream.next().await { - let item = item?; - match item { - items::StreamItem::DataItem(item) => { - match item { - items::RangeCompletableItem::RangeComplete => { - warn!("RangeComplete"); - } - items::RangeCompletableItem::Data(item) => { - info!("{:?} ({} events)", item, item.len()); - } - }; - } - items::StreamItem::Log(k) => { - eprintln!("Log item {:?}", k); - } - items::StreamItem::Stats(k) => { - eprintln!("Stats item {:?}", k); - } - } - } + err::todo(); Ok(()) } } diff --git a/fsio/Cargo.toml b/fsio/Cargo.toml deleted file mode 100644 index 33b0cb1..0000000 --- a/fsio/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "fsio" -version = "0.0.2" -authors = ["Dominik Werder "] -edition = "2021" - -[lib] -path = "src/fsio.rs" - -[dependencies] -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -serde_cbor = "0.11.1" -chrono = { version = "0.4.19", features = ["serde"] } -tokio = { version = "1.11.0", features = ["rt-multi-thread", "io-util", "net", "time", "sync", "fs"] } -#tokio-stream = {version = "0.1.5", features = ["fs"]} -#hyper = { version = "0.14", features = ["http1", "http2", "client", "server", "tcp", "stream"] } -async-channel = "1.6" -bytes = "1.0.1" -crc32fast = "1.2.1" -arrayref = "0.3.6" -byteorder = "1.4.3" -futures-core = "0.3.14" -futures-util = "0.3.14" -tracing = "0.1.25" -tracing-futures = { version = "0.2.5", features = ["futures-01", "futures-03", "std-future"] } -fs2 = "0.4.3" -libc = "0.2.93" -hex = "0.4.3" -url = "2.2.2" -tiny-keccak = { version = "2.0", features = ["sha3"] } -err = { path = "../err" } -taskrun = { path = "../taskrun" } -netpod = { path = "../netpod" } -bitshuffle = { path = "../bitshuffle" } -items = { path = "../items" } -streams = { path = "../streams" } diff --git a/fsio/src/fsio.rs b/fsio/src/fsio.rs deleted file mode 100644 index 07c34b2..0000000 --- a/fsio/src/fsio.rs +++ /dev/null @@ -1,187 +0,0 @@ -use err::Error; -use items::plainevents::PlainEvents; -use netpod::log::*; -use netpod::Channel; -#[allow(unused)] -use std::os::unix::prelude::OpenOptionsExt; -use std::os::unix::prelude::{AsRawFd, OsStrExt}; -use std::path::PathBuf; -use tokio::fs::OpenOptions; - -const BASE: &str = "/data/daqbuffer-testdata"; - -fn fcntl_xlock(file: &mut std::fs::File, beg: i64, cmd: libc::c_int, ty: i32) -> i32 { - unsafe { - let p = libc::flock { - l_type: ty as i16, - l_whence: libc::SEEK_SET as i16, - l_start: beg, - l_len: 8, - l_pid: 0, - }; - libc::fcntl(file.as_raw_fd(), cmd, &p) - } -} - -fn wlock(file: &mut std::fs::File, beg: i64) -> i32 { - fcntl_xlock(file, beg, libc::F_OFD_SETLK, libc::F_WRLCK) -} - -fn rlock(file: &mut std::fs::File, beg: i64) -> i32 { - fcntl_xlock(file, beg, libc::F_OFD_SETLK, libc::F_RDLCK) -} - -fn unlock(file: &mut std::fs::File, beg: i64) -> i32 { - fcntl_xlock(file, beg, libc::F_OFD_SETLK, libc::F_UNLCK) -} - -#[allow(unused)] -async fn lock_1() -> Result<(), Error> { - let path = PathBuf::from(BASE).join("tmp-daq4-f1"); - let mut f1 = OpenOptions::new() - .write(true) - .read(true) - .create(true) - .truncate(false) - .open(path) - .await?; - f1.as_raw_fd(); - - let mx1 = std::sync::Arc::new(tokio::sync::Mutex::new(0usize)); - let mg1 = mx1.lock().await; - - let (tx1, rx2) = std::sync::mpsc::channel(); - let (tx2, rx1) = std::sync::mpsc::channel(); - - let t1 = std::thread::spawn({ - move || { - let path = PathBuf::from(BASE).join("tmp-daq4-f1"); - let mut f1 = std::fs::OpenOptions::new().read(true).write(true).open(&path).unwrap(); - info!("Thread 1 rlock..."); - let ec = rlock(&mut f1, 0); - info!("Thread 1 rlock {}", ec); - tx1.send(1u32).unwrap(); - rx1.recv().unwrap(); - info!("Thread 1 unlock..."); - let ec = unlock(&mut f1, 0); - info!("Thread 1 unlock {}", ec); - tx1.send(1u32).unwrap(); - rx1.recv().unwrap(); - info!("Thread 1 rlock..."); - let ec = rlock(&mut f1, 0); - info!("Thread 1 rlock {}", ec); - tx1.send(1u32).unwrap(); - rx1.recv().unwrap(); - info!("Thread 1 done"); - } - }); - let t2 = std::thread::spawn({ - move || { - let path = PathBuf::from(BASE).join("tmp-daq4-f1"); - let mut f1 = std::fs::OpenOptions::new().read(true).write(true).open(&path).unwrap(); - rx2.recv().unwrap(); - info!("Thread 2 wlock..."); - let ec = wlock(&mut f1, 0); - info!("Thread 2 wlock {}", ec); - tx2.send(1u32).unwrap(); - rx2.recv().unwrap(); - info!("Thread 2 rlock"); - let ec = rlock(&mut f1, 0); - info!("Thread 2 rlock {}", ec); - tx2.send(1u32).unwrap(); - rx2.recv().unwrap(); - tx2.send(1u32).unwrap(); - info!("Thread 2 done"); - } - }); - tokio::task::spawn_blocking(move || { - t1.join().map_err(|_| Error::with_msg_no_trace("join error"))?; - t2.join().map_err(|_| Error::with_msg_no_trace("join error"))?; - Ok::<_, Error>(()) - }) - .await - .map_err(Error::from_string)??; - Ok(()) -} - -#[allow(unused)] -async fn write_1() -> Result<(), Error> { - let path = PathBuf::from(BASE).join("tmp-daq4-f2"); - let mut f1 = OpenOptions::new() - .write(true) - .read(true) - .create(true) - .truncate(false) - .open(path) - .await?; - unsafe { - let path_d = PathBuf::from(BASE); - let mut path_d_b = path_d.as_os_str().as_bytes().to_vec(); - //info!("path_d_b {:?}", path_d_b); - path_d_b.push(0); - let fdd = libc::open(path_d_b.as_ptr() as *const i8, libc::O_DIRECTORY | libc::O_RDONLY); - if fdd < 0 { - panic!(); - } - let ec = libc::fsync(fdd); - if ec != 0 { - panic!(); - } - let ec = libc::close(fdd); - if ec != 0 { - panic!(); - } - let fd = f1.as_raw_fd(); - let lockparam = libc::flock { - l_type: libc::F_RDLCK as i16, - l_whence: libc::SEEK_SET as i16, - l_start: 0, - l_len: 8, - l_pid: 0, - }; - let ec = libc::fcntl(f1.as_raw_fd(), libc::F_OFD_SETLK, &lockparam); - if ec != 0 { - panic!(); - } - let buf = b"world!"; - let n = libc::pwrite(fd, buf.as_ptr() as *const libc::c_void, buf.len(), 0); - if n != buf.len() as isize { - panic!(); - } - let ec = libc::fsync(fd); - if ec != 0 { - panic!(); - } - let lockparam = libc::flock { - l_type: libc::F_UNLCK as i16, - l_whence: libc::SEEK_SET as i16, - l_start: 0, - l_len: 8, - l_pid: 0, - }; - let ec = libc::fcntl(f1.as_raw_fd(), libc::F_OFD_SETLK, &lockparam); - if ec == 0 { - panic!(); - } - } - Ok(()) -} - -#[cfg(test)] -#[allow(unused)] -mod test { - use super::*; - - //#[test] - fn t1() -> Result<(), Error> { - Ok(taskrun::run(write_1()).unwrap()) - } -} - -pub struct EventSink {} - -impl EventSink { - pub fn sink(&self, _channel: &Channel, _events: PlainEvents) -> Result<(), Error> { - Ok(()) - } -} diff --git a/items/src/binnedevents.rs b/items/src/binnedevents.rs deleted file mode 100644 index 185dcfd..0000000 --- a/items/src/binnedevents.rs +++ /dev/null @@ -1,313 +0,0 @@ -use crate::eventsitem::EventsItem; -use crate::plainevents::{PlainEvents, ScalarPlainEvents}; -use crate::xbinnedscalarevents::XBinnedScalarEvents; -use crate::xbinnedwaveevents::XBinnedWaveEvents; -use crate::{Appendable, Clearable, PushableIndex, WithLen, WithTimestamps}; -use netpod::{AggKind, HasScalarType, HasShape, ScalarType, Shape}; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Serialize, Deserialize)] -pub enum SingleBinWaveEvents { - U8(XBinnedScalarEvents), - U16(XBinnedScalarEvents), - U32(XBinnedScalarEvents), - U64(XBinnedScalarEvents), - I8(XBinnedScalarEvents), - I16(XBinnedScalarEvents), - I32(XBinnedScalarEvents), - I64(XBinnedScalarEvents), - F32(XBinnedScalarEvents), - F64(XBinnedScalarEvents), - String(XBinnedScalarEvents), -} - -impl SingleBinWaveEvents { - pub fn variant_name(&self) -> String { - items_proc::tycases1!(self, Self, (k), { "$id".into() }) - } - - // TODO possible to remove? - fn x_aggregate(self, _: &AggKind) -> EventsItem { - err::todoval() - } -} - -impl Clearable for SingleBinWaveEvents { - fn clear(&mut self) { - items_proc::tycases1!(self, Self, (k), { k.clear() }) - } -} - -impl Appendable for SingleBinWaveEvents { - fn empty_like_self(&self) -> Self { - items_proc::tycases1!(self, Self, (k), { Self::$id(k.empty_like_self()) }) - } - - fn append(&mut self, src: &Self) { - items_proc::tycases1!(self, Self, (k), { - match src { - Self::$id(j) => k.append(j), - _ => panic!(), - } - }) - } - - fn append_zero(&mut self, _ts1: u64, _ts2: u64) { - // TODO can this implement Appendable in a sane way? Do we need it? - err::todo(); - } -} - -impl PushableIndex for SingleBinWaveEvents { - fn push_index(&mut self, src: &Self, ix: usize) { - items_proc::tycases1!(self, Self, (k), { - match src { - Self::$id(j) => k.push_index(j, ix), - _ => panic!(), - } - }) - } -} - -impl WithLen for SingleBinWaveEvents { - fn len(&self) -> usize { - items_proc::tycases1!(self, Self, (k), { k.len() }) - } -} - -impl WithTimestamps for SingleBinWaveEvents { - fn ts(&self, ix: usize) -> u64 { - items_proc::tycases1!(self, Self, (k), { k.ts(ix) }) - } -} - -impl HasShape for SingleBinWaveEvents { - fn shape(&self) -> Shape { - Shape::Scalar - } -} - -impl HasScalarType for SingleBinWaveEvents { - fn scalar_type(&self) -> ScalarType { - items_proc::tycases1!(self, Self, (k), { ScalarType::$id }) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub enum MultiBinWaveEvents { - U8(XBinnedWaveEvents), - U16(XBinnedWaveEvents), - U32(XBinnedWaveEvents), - U64(XBinnedWaveEvents), - I8(XBinnedWaveEvents), - I16(XBinnedWaveEvents), - I32(XBinnedWaveEvents), - I64(XBinnedWaveEvents), - F32(XBinnedWaveEvents), - F64(XBinnedWaveEvents), - String(XBinnedWaveEvents), -} - -impl MultiBinWaveEvents { - pub fn variant_name(&self) -> String { - items_proc::tycases1!(self, Self, (k), { "$id".into() }) - } - - // TODO remove - fn x_aggregate(self, _: &AggKind) -> EventsItem { - err::todoval() - } -} - -impl Clearable for MultiBinWaveEvents { - fn clear(&mut self) { - items_proc::tycases1!(self, Self, (k), { k.clear() }) - } -} - -impl Appendable for MultiBinWaveEvents { - fn empty_like_self(&self) -> Self { - items_proc::tycases1!(self, Self, (k), { Self::$id(k.empty_like_self()) }) - } - - fn append(&mut self, src: &Self) { - items_proc::tycases1!(self, Self, (k), { - match src { - Self::$id(j) => k.append(j), - _ => panic!(), - } - }) - } - - fn append_zero(&mut self, _ts1: u64, _ts2: u64) { - // TODO can this implement Appendable in a sane way? Do we need it? - err::todo(); - } -} - -impl PushableIndex for MultiBinWaveEvents { - fn push_index(&mut self, src: &Self, ix: usize) { - items_proc::tycases1!(self, Self, (k), { - match src { - Self::$id(j) => k.push_index(j, ix), - _ => panic!(), - } - }) - } -} - -impl WithLen for MultiBinWaveEvents { - fn len(&self) -> usize { - items_proc::tycases1!(self, Self, (k), { k.len() }) - } -} - -impl WithTimestamps for MultiBinWaveEvents { - fn ts(&self, ix: usize) -> u64 { - items_proc::tycases1!(self, Self, (k), { k.ts(ix) }) - } -} - -impl HasShape for MultiBinWaveEvents { - fn shape(&self) -> Shape { - Shape::Scalar - } -} - -impl HasScalarType for MultiBinWaveEvents { - fn scalar_type(&self) -> ScalarType { - items_proc::tycases1!(self, Self, (k), { ScalarType::$id }) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub enum XBinnedEvents { - Scalar(ScalarPlainEvents), - SingleBinWave(SingleBinWaveEvents), - MultiBinWave(MultiBinWaveEvents), -} - -impl XBinnedEvents { - pub fn variant_name(&self) -> String { - use XBinnedEvents::*; - match self { - Scalar(h) => format!("Scalar({})", h.variant_name()), - SingleBinWave(h) => format!("SingleBinWave({})", h.variant_name()), - MultiBinWave(h) => format!("MultiBinWave({})", h.variant_name()), - } - } - - pub fn x_aggregate(self, ak: &AggKind) -> EventsItem { - use XBinnedEvents::*; - match self { - Scalar(k) => EventsItem::Plain(PlainEvents::Scalar(k)), - SingleBinWave(k) => k.x_aggregate(ak), - MultiBinWave(k) => k.x_aggregate(ak), - } - } -} - -impl Clearable for XBinnedEvents { - fn clear(&mut self) { - match self { - XBinnedEvents::Scalar(k) => k.clear(), - XBinnedEvents::SingleBinWave(k) => k.clear(), - XBinnedEvents::MultiBinWave(k) => k.clear(), - } - } -} - -impl Appendable for XBinnedEvents { - fn empty_like_self(&self) -> Self { - match self { - Self::Scalar(k) => Self::Scalar(k.empty_like_self()), - Self::SingleBinWave(k) => Self::SingleBinWave(k.empty_like_self()), - Self::MultiBinWave(k) => Self::MultiBinWave(k.empty_like_self()), - } - } - - fn append(&mut self, src: &Self) { - match self { - Self::Scalar(k) => match src { - Self::Scalar(j) => k.append(j), - _ => panic!(), - }, - Self::SingleBinWave(k) => match src { - Self::SingleBinWave(j) => k.append(j), - _ => panic!(), - }, - Self::MultiBinWave(k) => match src { - Self::MultiBinWave(j) => k.append(j), - _ => panic!(), - }, - } - } - - fn append_zero(&mut self, _ts1: u64, _ts2: u64) { - // TODO can this implement Appendable in a sane way? Do we need it? - err::todo(); - } -} - -impl PushableIndex for XBinnedEvents { - fn push_index(&mut self, src: &Self, ix: usize) { - match self { - Self::Scalar(k) => match src { - Self::Scalar(j) => k.push_index(j, ix), - _ => panic!(), - }, - Self::SingleBinWave(k) => match src { - Self::SingleBinWave(j) => k.push_index(j, ix), - _ => panic!(), - }, - Self::MultiBinWave(k) => match src { - Self::MultiBinWave(j) => k.push_index(j, ix), - _ => panic!(), - }, - } - } -} - -impl WithLen for XBinnedEvents { - fn len(&self) -> usize { - use XBinnedEvents::*; - match self { - Scalar(j) => j.len(), - SingleBinWave(j) => j.len(), - MultiBinWave(j) => j.len(), - } - } -} - -impl WithTimestamps for XBinnedEvents { - fn ts(&self, ix: usize) -> u64 { - use XBinnedEvents::*; - match self { - Scalar(j) => j.ts(ix), - SingleBinWave(j) => j.ts(ix), - MultiBinWave(j) => j.ts(ix), - } - } -} - -impl HasShape for XBinnedEvents { - fn shape(&self) -> Shape { - use XBinnedEvents::*; - match self { - Scalar(h) => h.shape(), - SingleBinWave(h) => h.shape(), - MultiBinWave(h) => h.shape(), - } - } -} - -impl HasScalarType for XBinnedEvents { - fn scalar_type(&self) -> ScalarType { - use XBinnedEvents::*; - match self { - Scalar(h) => h.scalar_type(), - SingleBinWave(h) => h.scalar_type(), - MultiBinWave(h) => h.scalar_type(), - } - } -} diff --git a/items/src/binsdim0.rs b/items/src/binsdim0.rs deleted file mode 100644 index 4c1052a..0000000 --- a/items/src/binsdim0.rs +++ /dev/null @@ -1,683 +0,0 @@ -use crate::frame::bincode_from_slice; -use crate::numops::NumOps; -use crate::streams::{Collectable, Collector, ToJsonBytes, ToJsonResult}; -use crate::Appendable; -use crate::FilterFittingInside; -use crate::Fits; -use crate::FitsInside; -use crate::FrameTypeInnerStatic; -use crate::IsoDateTime; -use crate::ReadPbv; -use crate::ReadableFromFile; -use crate::Sitemty; -use crate::TimeBinnableDyn; -use crate::{ts_offs_from_abs, FrameType}; -use crate::{NewEmpty, RangeOverlapInfo, WithLen}; -use crate::{TimeBinnableType, TimeBinnableTypeAggregator}; -use crate::{TimeBinned, TimeBinnerDyn, TimeBins}; -use chrono::{TimeZone, Utc}; -use err::Error; -use items_0::subfr::SubFrId; -use items_0::AsAnyRef; -use netpod::log::*; -use netpod::timeunits::SEC; -use netpod::{NanoRange, Shape}; -use num_traits::Zero; -use serde::{Deserialize, Serialize}; -use std::any::Any; -use std::collections::VecDeque; -use std::fmt; -use std::marker::PhantomData; -use tokio::fs::File; - -#[derive(Clone, Serialize, Deserialize)] -pub struct MinMaxAvgDim0Bins { - pub ts1s: Vec, - pub ts2s: Vec, - pub counts: Vec, - pub mins: Vec, - pub maxs: Vec, - pub avgs: Vec, -} - -impl FrameTypeInnerStatic for MinMaxAvgDim0Bins -where - NTY: SubFrId, -{ - const FRAME_TYPE_ID: u32 = crate::MIN_MAX_AVG_DIM_0_BINS_FRAME_TYPE_ID + NTY::SUB; -} - -impl FrameType for MinMaxAvgDim0Bins -where - NTY: SubFrId, -{ - fn frame_type_id(&self) -> u32 { - ::FRAME_TYPE_ID - } -} - -impl fmt::Debug for MinMaxAvgDim0Bins -where - NTY: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!( - fmt, - "MinMaxAvgDim0Bins count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}", - self.ts1s.len(), - self.ts1s.iter().map(|k| k / SEC).collect::>(), - self.ts2s.iter().map(|k| k / SEC).collect::>(), - self.counts, - self.mins, - self.maxs, - self.avgs, - ) - } -} - -impl AsAnyRef for MinMaxAvgDim0Bins -where - NTY: NumOps, -{ - fn as_any_ref(&self) -> &dyn Any { - self - } -} - -impl MinMaxAvgDim0Bins { - pub fn empty() -> Self { - Self { - ts1s: vec![], - ts2s: vec![], - counts: vec![], - mins: vec![], - maxs: vec![], - avgs: vec![], - } - } -} - -impl FitsInside for MinMaxAvgDim0Bins { - fn fits_inside(&self, range: NanoRange) -> Fits { - if self.ts1s.is_empty() { - Fits::Empty - } else { - let t1 = *self.ts1s.first().unwrap(); - let t2 = *self.ts2s.last().unwrap(); - if t2 <= range.beg { - Fits::Lower - } else if t1 >= range.end { - Fits::Greater - } else if t1 < range.beg && t2 > range.end { - Fits::PartlyLowerAndGreater - } else if t1 < range.beg { - Fits::PartlyLower - } else if t2 > range.end { - Fits::PartlyGreater - } else { - Fits::Inside - } - } - } -} - -impl FilterFittingInside for MinMaxAvgDim0Bins { - fn filter_fitting_inside(self, fit_range: NanoRange) -> Option { - match self.fits_inside(fit_range) { - Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self), - _ => None, - } - } -} - -impl RangeOverlapInfo for MinMaxAvgDim0Bins { - fn ends_before(&self, range: NanoRange) -> bool { - match self.ts2s.last() { - Some(&ts) => ts <= range.beg, - None => true, - } - } - - fn ends_after(&self, range: NanoRange) -> bool { - match self.ts2s.last() { - Some(&ts) => ts > range.end, - None => panic!(), - } - } - - fn starts_after(&self, range: NanoRange) -> bool { - match self.ts1s.first() { - Some(&ts) => ts >= range.end, - None => panic!(), - } - } -} - -impl TimeBins for MinMaxAvgDim0Bins -where - NTY: NumOps, -{ - fn ts1s(&self) -> &Vec { - &self.ts1s - } - - fn ts2s(&self) -> &Vec { - &self.ts2s - } -} - -impl WithLen for MinMaxAvgDim0Bins { - fn len(&self) -> usize { - self.ts1s.len() - } -} - -impl NewEmpty for MinMaxAvgDim0Bins { - fn empty(_shape: Shape) -> Self { - Self { - ts1s: Vec::new(), - ts2s: Vec::new(), - counts: Vec::new(), - mins: Vec::new(), - maxs: Vec::new(), - avgs: Vec::new(), - } - } -} - -impl Appendable for MinMaxAvgDim0Bins -where - NTY: NumOps, -{ - fn empty_like_self(&self) -> Self { - Self::empty() - } - - fn append(&mut self, src: &Self) { - self.ts1s.extend_from_slice(&src.ts1s); - self.ts2s.extend_from_slice(&src.ts2s); - self.counts.extend_from_slice(&src.counts); - self.mins.extend_from_slice(&src.mins); - self.maxs.extend_from_slice(&src.maxs); - self.avgs.extend_from_slice(&src.avgs); - } - - fn append_zero(&mut self, ts1: u64, ts2: u64) { - self.ts1s.push(ts1); - self.ts2s.push(ts2); - self.counts.push(0); - self.mins.push(NTY::zero()); - self.maxs.push(NTY::zero()); - self.avgs.push(0.); - } -} - -impl ReadableFromFile for MinMaxAvgDim0Bins -where - NTY: NumOps, -{ - // TODO this function is not needed in the trait: - fn read_from_file(file: File) -> Result, Error> { - Ok(ReadPbv::new(file)) - } - - fn from_buf(buf: &[u8]) -> Result { - let dec = bincode_from_slice(buf)?; - Ok(dec) - } -} - -impl TimeBinnableType for MinMaxAvgDim0Bins -where - NTY: NumOps, -{ - type Output = MinMaxAvgDim0Bins; - type Aggregator = MinMaxAvgDim0BinsAggregator; - - fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator { - debug!( - "TimeBinnableType for XBinnedScalarEvents aggregator() range {:?} x_bin_count {} do_time_weight {}", - range, x_bin_count, do_time_weight - ); - Self::Aggregator::new(range, do_time_weight) - } -} - -impl ToJsonResult for Sitemty> -where - NTY: NumOps, -{ - fn to_json_result(&self) -> Result, Error> { - Ok(Box::new(serde_json::Value::String(format!( - "MinMaxAvgBins/non-json-item" - )))) - } -} - -pub struct MinMaxAvgBinsCollected { - _m1: PhantomData, -} - -impl MinMaxAvgBinsCollected { - pub fn new() -> Self { - Self { _m1: PhantomData } - } -} - -#[derive(Serialize)] -pub struct MinMaxAvgBinsCollectedResult { - #[serde(rename = "tsAnchor")] - ts_anchor_sec: u64, - #[serde(rename = "tsMs")] - ts_off_ms: Vec, - #[serde(rename = "tsNs")] - ts_off_ns: Vec, - counts: Vec, - mins: Vec, - maxs: Vec, - avgs: Vec, - #[serde(skip_serializing_if = "crate::bool_is_false", rename = "rangeFinal")] - finalised_range: bool, - #[serde(skip_serializing_if = "Zero::is_zero", rename = "missingBins")] - missing_bins: u32, - #[serde(skip_serializing_if = "Option::is_none", rename = "continueAt")] - continue_at: Option, -} - -pub struct MinMaxAvgBinsCollector { - bin_count_exp: u32, - timed_out: bool, - range_complete: bool, - vals: MinMaxAvgDim0Bins, - _m1: PhantomData, -} - -impl MinMaxAvgBinsCollector { - pub fn new(bin_count_exp: u32) -> Self { - Self { - bin_count_exp, - timed_out: false, - range_complete: false, - vals: MinMaxAvgDim0Bins::::empty(), - _m1: PhantomData, - } - } -} - -impl WithLen for MinMaxAvgBinsCollector -where - NTY: NumOps + Serialize, -{ - fn len(&self) -> usize { - self.vals.ts1s.len() - } -} - -impl Collector for MinMaxAvgBinsCollector -where - NTY: NumOps + Serialize, -{ - type Input = MinMaxAvgDim0Bins; - type Output = MinMaxAvgBinsCollectedResult; - - fn ingest(&mut self, src: &Self::Input) { - Appendable::append(&mut self.vals, src); - } - - fn set_range_complete(&mut self) { - self.range_complete = true; - } - - fn set_timed_out(&mut self) { - self.timed_out = true; - } - - fn result(self) -> Result { - let bin_count = self.vals.ts1s.len() as u32; - // TODO could save the copy: - let mut ts_all = self.vals.ts1s.clone(); - if self.vals.ts2s.len() > 0 { - ts_all.push(*self.vals.ts2s.last().unwrap()); - } - let continue_at = if self.vals.ts1s.len() < self.bin_count_exp as usize { - match ts_all.last() { - Some(&k) => { - let iso = IsoDateTime(Utc.timestamp_nanos(k as i64)); - Some(iso) - } - None => Err(Error::with_msg("partial_content but no bin in result"))?, - } - } else { - None - }; - let tst = ts_offs_from_abs(&ts_all); - let ret = MinMaxAvgBinsCollectedResult:: { - ts_anchor_sec: tst.0, - ts_off_ms: tst.1, - ts_off_ns: tst.2, - counts: self.vals.counts, - mins: self.vals.mins, - maxs: self.vals.maxs, - avgs: self.vals.avgs, - finalised_range: self.range_complete, - missing_bins: self.bin_count_exp - bin_count, - continue_at, - }; - Ok(ret) - } -} - -impl Collectable for MinMaxAvgDim0Bins -where - NTY: NumOps + Serialize, -{ - type Collector = MinMaxAvgBinsCollector; - - fn new_collector(bin_count_exp: u32) -> Self::Collector { - Self::Collector::new(bin_count_exp) - } -} - -pub struct MinMaxAvgDim0BinsAggregator { - range: NanoRange, - count: u64, - min: NTY, - max: NTY, - // Carry over to next bin: - avg: f32, - sumc: u64, - sum: f32, -} - -impl MinMaxAvgDim0BinsAggregator { - pub fn new(range: NanoRange, _do_time_weight: bool) -> Self { - Self { - range, - count: 0, - min: NTY::zero(), - max: NTY::zero(), - avg: 0., - sumc: 0, - sum: 0f32, - } - } -} - -impl TimeBinnableTypeAggregator for MinMaxAvgDim0BinsAggregator -where - NTY: NumOps, -{ - type Input = MinMaxAvgDim0Bins; - type Output = MinMaxAvgDim0Bins; - - fn range(&self) -> &NanoRange { - &self.range - } - - fn ingest(&mut self, item: &Self::Input) { - for i1 in 0..item.ts1s.len() { - if item.counts[i1] == 0 { - } else if item.ts2s[i1] <= self.range.beg { - } else if item.ts1s[i1] >= self.range.end { - } else { - if item.mins[i1].as_prim_f32() < 1. { - info!("small bin min {:?} counts {}", item.mins[i1], item.counts[i1]); - } - if self.count == 0 { - self.min = item.mins[i1].clone(); - self.max = item.maxs[i1].clone(); - } else { - if self.min > item.mins[i1] { - self.min = item.mins[i1].clone(); - } - if self.max < item.maxs[i1] { - self.max = item.maxs[i1].clone(); - } - } - self.count += item.counts[i1]; - self.sum += item.avgs[i1]; - self.sumc += 1; - } - } - } - - fn result_reset(&mut self, range: NanoRange, _expand: bool) -> Self::Output { - if self.sumc > 0 { - self.avg = self.sum / self.sumc as f32; - } - let ret = Self::Output { - ts1s: vec![self.range.beg], - ts2s: vec![self.range.end], - counts: vec![self.count], - mins: vec![self.min.clone()], - maxs: vec![self.max.clone()], - avgs: vec![self.avg], - }; - self.range = range; - self.count = 0; - self.sum = 0f32; - self.sumc = 0; - ret - } -} - -impl TimeBinnableDyn for MinMaxAvgDim0Bins { - fn time_binner_new(&self, edges: Vec, do_time_weight: bool) -> Box { - eprintln!("MinMaxAvgDim0Bins time_binner_new"); - info!("MinMaxAvgDim0Bins time_binner_new"); - let ret = MinMaxAvgDim0BinsTimeBinner::::new(edges.into(), do_time_weight); - Box::new(ret) - } -} - -pub struct MinMaxAvgDim0BinsTimeBinner { - edges: VecDeque, - do_time_weight: bool, - agg: Option>, - ready: Option< as TimeBinnableTypeAggregator>::Output>, -} - -impl MinMaxAvgDim0BinsTimeBinner { - fn new(edges: VecDeque, do_time_weight: bool) -> Self { - Self { - edges, - do_time_weight, - agg: None, - ready: None, - } - } - - fn next_bin_range(&mut self) -> Option { - if self.edges.len() >= 2 { - let ret = NanoRange { - beg: self.edges[0], - end: self.edges[1], - }; - self.edges.pop_front(); - Some(ret) - } else { - None - } - } - - fn struct_name() -> &'static str { - std::any::type_name::() - } -} - -impl TimeBinnerDyn for MinMaxAvgDim0BinsTimeBinner { - fn ingest(&mut self, item: &dyn TimeBinnableDyn) { - //const SELF: &str = "MinMaxAvgDim0BinsTimeBinner"; - #[allow(non_snake_case)] - let SELF = Self::struct_name(); - if item.len() == 0 { - // Return already here, RangeOverlapInfo would not give much sense. - return; - } - if self.edges.len() < 2 { - warn!("TimeBinnerDyn for {SELF} no more bin in edges A"); - return; - } - // TODO optimize by remembering at which event array index we have arrived. - // That needs modified interfaces which can take and yield the start and latest index. - loop { - while item.starts_after(NanoRange { - beg: 0, - end: self.edges[1], - }) { - self.cycle(); - if self.edges.len() < 2 { - warn!("TimeBinnerDyn for {SELF} no more bin in edges B"); - return; - } - } - if item.ends_before(NanoRange { - beg: self.edges[0], - end: u64::MAX, - }) { - return; - } else { - if self.edges.len() < 2 { - warn!("TimeBinnerDyn for {SELF} edge list exhausted"); - return; - } else { - let agg = if let Some(agg) = self.agg.as_mut() { - agg - } else { - self.agg = Some(MinMaxAvgDim0BinsAggregator::new( - // We know here that we have enough edges for another bin. - // and `next_bin_range` will pop the first edge. - self.next_bin_range().unwrap(), - self.do_time_weight, - )); - self.agg.as_mut().unwrap() - }; - if let Some(item) = item - .as_any_ref() - // TODO make statically sure that we attempt to cast to the correct type here: - .downcast_ref::< as TimeBinnableTypeAggregator>::Input>() - { - agg.ingest(item); - } else { - let tyid_item = std::any::Any::type_id(item.as_any_ref()); - error!("not correct item type {:?}", tyid_item); - }; - if item.ends_after(agg.range().clone()) { - self.cycle(); - if self.edges.len() < 2 { - warn!("TimeBinnerDyn for {SELF} no more bin in edges C"); - return; - } - } else { - break; - } - } - } - } - } - - fn bins_ready_count(&self) -> usize { - match &self.ready { - Some(k) => k.len(), - None => 0, - } - } - - fn bins_ready(&mut self) -> Option> { - match self.ready.take() { - Some(k) => Some(Box::new(k)), - None => None, - } - } - - // TODO there is too much common code between implementors: - fn push_in_progress(&mut self, push_empty: bool) { - // TODO expand should be derived from AggKind. Is it still required after all? - let expand = true; - if let Some(agg) = self.agg.as_mut() { - let dummy_range = NanoRange { beg: 4, end: 5 }; - let mut bins = agg.result_reset(dummy_range, expand); - self.agg = None; - assert_eq!(bins.len(), 1); - if push_empty || bins.counts[0] != 0 { - match self.ready.as_mut() { - Some(ready) => { - ready.append(&mut bins); - } - None => { - self.ready = Some(bins); - } - } - } - } - } - - // TODO there is too much common code between implementors: - fn cycle(&mut self) { - let n = self.bins_ready_count(); - self.push_in_progress(true); - if self.bins_ready_count() == n { - if let Some(range) = self.next_bin_range() { - let mut bins = MinMaxAvgDim0Bins::::empty(); - bins.append_zero(range.beg, range.end); - match self.ready.as_mut() { - Some(ready) => { - ready.append(&mut bins); - } - None => { - self.ready = Some(bins); - } - } - if self.bins_ready_count() <= n { - error!("failed to push a zero bin"); - } - } else { - warn!("cycle: no in-progress bin pushed, but also no more bin to add as zero-bin"); - } - } - } -} - -impl TimeBinned for MinMaxAvgDim0Bins { - fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn { - self as &dyn TimeBinnableDyn - } - - fn edges_slice(&self) -> (&[u64], &[u64]) { - (&self.ts1s[..], &self.ts2s[..]) - } - - fn counts(&self) -> &[u64] { - &self.counts[..] - } - - fn mins(&self) -> Vec { - self.mins.iter().map(|x| x.clone().as_prim_f32()).collect() - } - - fn maxs(&self) -> Vec { - self.maxs.iter().map(|x| x.clone().as_prim_f32()).collect() - } - - fn avgs(&self) -> Vec { - self.avgs.clone() - } - - fn validate(&self) -> Result<(), String> { - use std::fmt::Write; - let mut msg = String::new(); - if self.ts1s.len() != self.ts2s.len() { - write!(&mut msg, "ts1s ≠ ts2s\n").unwrap(); - } - for (i, ((count, min), max)) in self.counts.iter().zip(&self.mins).zip(&self.maxs).enumerate() { - if min.as_prim_f32() < 1. && *count != 0 { - write!(&mut msg, "i {} count {} min {:?} max {:?}\n", i, count, min, max).unwrap(); - } - } - if msg.is_empty() { - Ok(()) - } else { - Err(msg) - } - } -} diff --git a/items/src/binsdim1.rs b/items/src/binsdim1.rs deleted file mode 100644 index 4e8e701..0000000 --- a/items/src/binsdim1.rs +++ /dev/null @@ -1,617 +0,0 @@ -use crate::frame::bincode_from_slice; -use crate::numops::NumOps; -use crate::streams::{Collectable, Collector, ToJsonBytes, ToJsonResult}; -use crate::ts_offs_from_abs; -use crate::waveevents::WaveEvents; -use crate::Appendable; -use crate::FilterFittingInside; -use crate::FrameTypeInnerStatic; -use crate::IsoDateTime; -use crate::RangeOverlapInfo; -use crate::ReadableFromFile; -use crate::TimeBinnableDyn; -use crate::TimeBinnableType; -use crate::TimeBinnableTypeAggregator; -use crate::TimeBins; -use crate::{pulse_offs_from_abs, FrameType}; -use crate::{Fits, FitsInside, NewEmpty, ReadPbv, Sitemty, TimeBinned, WithLen}; -use chrono::{TimeZone, Utc}; -use err::Error; -use items_0::subfr::SubFrId; -use items_0::AsAnyRef; -use netpod::log::*; -use netpod::timeunits::SEC; -use netpod::{NanoRange, Shape}; -use num_traits::Zero; -use serde::{Deserialize, Serialize}; -use std::any::Any; -use std::fmt; -use std::marker::PhantomData; -use tokio::fs::File; - -#[derive(Serialize, Deserialize)] -pub struct MinMaxAvgDim1Bins { - pub ts1s: Vec, - pub ts2s: Vec, - pub counts: Vec, - pub mins: Vec>>, - pub maxs: Vec>>, - pub avgs: Vec>>, -} - -impl FrameTypeInnerStatic for MinMaxAvgDim1Bins -where - NTY: SubFrId, -{ - const FRAME_TYPE_ID: u32 = crate::MIN_MAX_AVG_DIM_1_BINS_FRAME_TYPE_ID + NTY::SUB; -} - -impl FrameType for MinMaxAvgDim1Bins -where - NTY: SubFrId, -{ - fn frame_type_id(&self) -> u32 { - ::FRAME_TYPE_ID - } -} - -impl fmt::Debug for MinMaxAvgDim1Bins -where - NTY: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!( - fmt, - "MinMaxAvgDim1Bins count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}", - self.ts1s.len(), - self.ts1s.iter().map(|k| k / SEC).collect::>(), - self.ts2s.iter().map(|k| k / SEC).collect::>(), - self.counts, - self.mins.first(), - self.maxs.first(), - self.avgs.first(), - ) - } -} - -impl AsAnyRef for MinMaxAvgDim1Bins -where - NTY: NumOps, -{ - fn as_any_ref(&self) -> &dyn Any { - self - } -} - -impl MinMaxAvgDim1Bins { - pub fn empty() -> Self { - Self { - ts1s: vec![], - ts2s: vec![], - counts: vec![], - mins: vec![], - maxs: vec![], - avgs: vec![], - } - } -} - -impl FitsInside for MinMaxAvgDim1Bins { - fn fits_inside(&self, range: NanoRange) -> Fits { - if self.ts1s.is_empty() { - Fits::Empty - } else { - let t1 = *self.ts1s.first().unwrap(); - let t2 = *self.ts2s.last().unwrap(); - if t2 <= range.beg { - Fits::Lower - } else if t1 >= range.end { - Fits::Greater - } else if t1 < range.beg && t2 > range.end { - Fits::PartlyLowerAndGreater - } else if t1 < range.beg { - Fits::PartlyLower - } else if t2 > range.end { - Fits::PartlyGreater - } else { - Fits::Inside - } - } - } -} - -impl FilterFittingInside for MinMaxAvgDim1Bins { - fn filter_fitting_inside(self, fit_range: NanoRange) -> Option { - match self.fits_inside(fit_range) { - Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self), - _ => None, - } - } -} - -impl RangeOverlapInfo for MinMaxAvgDim1Bins { - fn ends_before(&self, range: NanoRange) -> bool { - match self.ts2s.last() { - Some(&ts) => ts <= range.beg, - None => true, - } - } - - fn ends_after(&self, range: NanoRange) -> bool { - match self.ts2s.last() { - Some(&ts) => ts > range.end, - None => panic!(), - } - } - - fn starts_after(&self, range: NanoRange) -> bool { - match self.ts1s.first() { - Some(&ts) => ts >= range.end, - None => panic!(), - } - } -} - -impl TimeBins for MinMaxAvgDim1Bins -where - NTY: NumOps, -{ - fn ts1s(&self) -> &Vec { - &self.ts1s - } - - fn ts2s(&self) -> &Vec { - &self.ts2s - } -} - -impl WithLen for MinMaxAvgDim1Bins { - fn len(&self) -> usize { - self.ts1s.len() - } -} - -impl NewEmpty for MinMaxAvgDim1Bins { - fn empty(_shape: Shape) -> Self { - Self { - ts1s: Vec::new(), - ts2s: Vec::new(), - counts: Vec::new(), - mins: Vec::new(), - maxs: Vec::new(), - avgs: Vec::new(), - } - } -} - -impl Appendable for MinMaxAvgDim1Bins -where - NTY: NumOps, -{ - fn empty_like_self(&self) -> Self { - Self::empty() - } - - fn append(&mut self, src: &Self) { - self.ts1s.extend_from_slice(&src.ts1s); - self.ts2s.extend_from_slice(&src.ts2s); - self.counts.extend_from_slice(&src.counts); - self.mins.extend_from_slice(&src.mins); - self.maxs.extend_from_slice(&src.maxs); - self.avgs.extend_from_slice(&src.avgs); - } - - fn append_zero(&mut self, ts1: u64, ts2: u64) { - self.ts1s.push(ts1); - self.ts2s.push(ts2); - self.counts.push(0); - self.avgs.push(None); - self.mins.push(None); - self.maxs.push(None); - } -} - -impl ReadableFromFile for MinMaxAvgDim1Bins -where - NTY: NumOps, -{ - // TODO this function is not needed in the trait: - fn read_from_file(file: File) -> Result, Error> { - Ok(ReadPbv::new(file)) - } - - fn from_buf(buf: &[u8]) -> Result { - let dec = bincode_from_slice(buf)?; - Ok(dec) - } -} - -impl TimeBinnableType for MinMaxAvgDim1Bins -where - NTY: NumOps, -{ - type Output = MinMaxAvgDim1Bins; - type Aggregator = MinMaxAvgDim1BinsAggregator; - - fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator { - debug!( - "TimeBinnableType for MinMaxAvgDim1Bins aggregator() range {:?} x_bin_count {} do_time_weight {}", - range, x_bin_count, do_time_weight - ); - Self::Aggregator::new(range, x_bin_count, do_time_weight) - } -} - -impl ToJsonResult for Sitemty> -where - NTY: NumOps, -{ - fn to_json_result(&self) -> Result, Error> { - Ok(Box::new(serde_json::Value::String(format!( - "MinMaxAvgDim1Bins/non-json-item" - )))) - } -} - -pub struct MinMaxAvgDim1BinsCollected { - _m1: PhantomData, -} - -impl MinMaxAvgDim1BinsCollected { - pub fn new() -> Self { - Self { _m1: PhantomData } - } -} - -#[derive(Serialize)] -pub struct MinMaxAvgDim1BinsCollectedResult { - ts_bin_edges: Vec, - counts: Vec, - mins: Vec>>, - maxs: Vec>>, - avgs: Vec>>, - #[serde(skip_serializing_if = "crate::bool_is_false", rename = "rangeFinal")] - finalised_range: bool, - #[serde(skip_serializing_if = "Zero::is_zero", rename = "missingBins")] - missing_bins: u32, - #[serde(skip_serializing_if = "Option::is_none", rename = "continueAt")] - continue_at: Option, -} - -pub struct MinMaxAvgDim1BinsCollector { - bin_count_exp: u32, - timed_out: bool, - range_complete: bool, - vals: MinMaxAvgDim1Bins, - _m1: PhantomData, -} - -impl MinMaxAvgDim1BinsCollector { - pub fn new(bin_count_exp: u32) -> Self { - Self { - bin_count_exp, - timed_out: false, - range_complete: false, - vals: MinMaxAvgDim1Bins::::empty(), - _m1: PhantomData, - } - } -} - -impl WithLen for MinMaxAvgDim1BinsCollector -where - NTY: NumOps + Serialize, -{ - fn len(&self) -> usize { - self.vals.ts1s.len() - } -} - -impl Collector for MinMaxAvgDim1BinsCollector -where - NTY: NumOps + Serialize, -{ - type Input = MinMaxAvgDim1Bins; - type Output = MinMaxAvgDim1BinsCollectedResult; - - fn ingest(&mut self, src: &Self::Input) { - Appendable::append(&mut self.vals, src); - } - - fn set_range_complete(&mut self) { - self.range_complete = true; - } - - fn set_timed_out(&mut self) { - self.timed_out = true; - } - - fn result(self) -> Result { - let bin_count = self.vals.ts1s.len() as u32; - let mut tsa: Vec<_> = self - .vals - .ts1s - .iter() - .map(|&k| IsoDateTime(Utc.timestamp_nanos(k as i64))) - .collect(); - if let Some(&z) = self.vals.ts2s.last() { - tsa.push(IsoDateTime(Utc.timestamp_nanos(z as i64))); - } - let tsa = tsa; - let continue_at = if self.vals.ts1s.len() < self.bin_count_exp as usize { - match tsa.last() { - Some(k) => Some(k.clone()), - None => Err(Error::with_msg("partial_content but no bin in result"))?, - } - } else { - None - }; - let ret = MinMaxAvgDim1BinsCollectedResult:: { - ts_bin_edges: tsa, - counts: self.vals.counts, - mins: self.vals.mins, - maxs: self.vals.maxs, - avgs: self.vals.avgs, - finalised_range: self.range_complete, - missing_bins: self.bin_count_exp - bin_count, - continue_at, - }; - Ok(ret) - } -} - -impl Collectable for MinMaxAvgDim1Bins -where - NTY: NumOps + Serialize, -{ - type Collector = MinMaxAvgDim1BinsCollector; - - fn new_collector(bin_count_exp: u32) -> Self::Collector { - Self::Collector::new(bin_count_exp) - } -} - -pub struct MinMaxAvgDim1BinsAggregator { - range: NanoRange, - count: u64, - min: Option>, - max: Option>, - sumc: u64, - sum: Option>, -} - -impl MinMaxAvgDim1BinsAggregator { - pub fn new(range: NanoRange, _x_bin_count: usize, do_time_weight: bool) -> Self { - if do_time_weight { - err::todo(); - } - Self { - range, - count: 0, - // TODO get rid of Option - min: err::todoval(), - max: None, - sumc: 0, - sum: None, - } - } -} - -impl TimeBinnableTypeAggregator for MinMaxAvgDim1BinsAggregator -where - NTY: NumOps, -{ - type Input = MinMaxAvgDim1Bins; - type Output = MinMaxAvgDim1Bins; - - fn range(&self) -> &NanoRange { - &self.range - } - - fn ingest(&mut self, item: &Self::Input) { - for i1 in 0..item.ts1s.len() { - if item.ts2s[i1] <= self.range.beg { - continue; - } else if item.ts1s[i1] >= self.range.end { - continue; - } else { - match self.min.as_mut() { - None => self.min = item.mins[i1].clone(), - Some(min) => match item.mins[i1].as_ref() { - None => {} - Some(v) => { - for (a, b) in min.iter_mut().zip(v.iter()) { - if b < a { - *a = b.clone(); - } - } - } - }, - }; - match self.max.as_mut() { - None => self.max = item.maxs[i1].clone(), - Some(max) => match item.maxs[i1].as_ref() { - None => {} - Some(v) => { - for (a, b) in max.iter_mut().zip(v.iter()) { - if b > a { - *a = b.clone(); - } - } - } - }, - }; - match self.sum.as_mut() { - None => { - self.sum = item.avgs[i1].clone(); - } - Some(sum) => match item.avgs[i1].as_ref() { - None => {} - Some(v) => { - for (a, b) in sum.iter_mut().zip(v.iter()) { - if (*b).is_nan() { - } else { - *a += *b; - } - } - self.sumc += 1; - } - }, - } - self.count += item.counts[i1]; - } - } - } - - fn result_reset(&mut self, range: NanoRange, _expand: bool) -> Self::Output { - let avg = if self.sumc == 0 { - None - } else { - let avg = self - .sum - .as_ref() - .unwrap() - .iter() - .map(|k| k / self.sumc as f32) - .collect(); - Some(avg) - }; - let ret = Self::Output { - ts1s: vec![self.range.beg], - ts2s: vec![self.range.end], - counts: vec![self.count], - // TODO replace with reset-value instead: - mins: vec![self.min.clone()], - maxs: vec![self.max.clone()], - avgs: vec![avg], - }; - self.range = range; - self.count = 0; - self.min = None; - self.max = None; - self.sum = None; - self.sumc = 0; - ret - } -} - -#[derive(Serialize)] -pub struct WaveEventsCollectedResult { - #[serde(rename = "tsAnchor")] - ts_anchor_sec: u64, - #[serde(rename = "tsMs")] - ts_off_ms: Vec, - #[serde(rename = "tsNs")] - ts_off_ns: Vec, - #[serde(rename = "pulseAnchor")] - pulse_anchor: u64, - #[serde(rename = "pulseOff")] - pulse_off: Vec, - values: Vec>, - #[serde(skip_serializing_if = "crate::bool_is_false", rename = "rangeFinal")] - range_complete: bool, - #[serde(skip_serializing_if = "crate::bool_is_false", rename = "timedOut")] - timed_out: bool, -} - -pub struct WaveEventsCollector { - vals: WaveEvents, - range_complete: bool, - timed_out: bool, -} - -impl WaveEventsCollector { - pub fn new(_bin_count_exp: u32) -> Self { - info!("\n\nWaveEventsCollector\n\n"); - Self { - vals: WaveEvents::empty(), - range_complete: false, - timed_out: false, - } - } -} - -impl WithLen for WaveEventsCollector { - fn len(&self) -> usize { - self.vals.tss.len() - } -} - -impl Collector for WaveEventsCollector -where - NTY: NumOps, -{ - type Input = WaveEvents; - type Output = WaveEventsCollectedResult; - - fn ingest(&mut self, src: &Self::Input) { - self.vals.append(src); - } - - fn set_range_complete(&mut self) { - self.range_complete = true; - } - - fn set_timed_out(&mut self) { - self.timed_out = true; - } - - fn result(self) -> Result { - let tst = ts_offs_from_abs(&self.vals.tss); - let (pulse_anchor, pulse_off) = pulse_offs_from_abs(&self.vals.pulses); - let ret = Self::Output { - ts_anchor_sec: tst.0, - ts_off_ms: tst.1, - ts_off_ns: tst.2, - pulse_anchor, - pulse_off, - values: self.vals.vals, - range_complete: self.range_complete, - timed_out: self.timed_out, - }; - Ok(ret) - } -} - -impl Collectable for WaveEvents -where - NTY: NumOps, -{ - type Collector = WaveEventsCollector; - - fn new_collector(bin_count_exp: u32) -> Self::Collector { - Self::Collector::new(bin_count_exp) - } -} - -impl crate::TimeBinnableDynStub for MinMaxAvgDim1Bins {} - -impl TimeBinned for MinMaxAvgDim1Bins { - fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn { - self as &dyn TimeBinnableDyn - } - - fn edges_slice(&self) -> (&[u64], &[u64]) { - (&self.ts1s[..], &self.ts2s[..]) - } - - fn counts(&self) -> &[u64] { - &self.counts[..] - } - - fn avgs(&self) -> Vec { - err::todoval() - } - - fn mins(&self) -> Vec { - err::todoval() - } - - fn maxs(&self) -> Vec { - err::todoval() - } - - fn validate(&self) -> Result<(), String> { - err::todoval() - } -} diff --git a/items/src/eventsitem.rs b/items/src/eventsitem.rs deleted file mode 100644 index c297f4c..0000000 --- a/items/src/eventsitem.rs +++ /dev/null @@ -1,146 +0,0 @@ -use crate::binnedevents::XBinnedEvents; -use crate::plainevents::PlainEvents; -use crate::{Appendable, Clearable, FrameTypeInnerDyn, PushableIndex, WithLen, WithTimestamps}; -use netpod::{AggKind, HasScalarType, HasShape, ScalarType, Shape}; -use serde::{Deserialize, Serialize}; - -// TODO remove -#[derive(Debug, Serialize, Deserialize)] -pub enum EventsItem { - Plain(PlainEvents), - XBinnedEvents(XBinnedEvents), -} - -impl FrameTypeInnerDyn for EventsItem { - fn frame_type_id(&self) -> u32 { - crate::EVENTS_ITEM_FRAME_TYPE_ID - } -} - -impl EventsItem { - pub fn is_wave(&self) -> bool { - use EventsItem::*; - match self { - Plain(h) => h.is_wave(), - XBinnedEvents(h) => { - if let Shape::Wave(_) = h.shape() { - true - } else { - false - } - } - } - } - - pub fn variant_name(&self) -> String { - use EventsItem::*; - match self { - Plain(h) => format!("Plain({})", h.variant_name()), - XBinnedEvents(h) => format!("Plain({})", h.variant_name()), - } - } - - pub fn x_aggregate(self, ak: &AggKind) -> Self { - use EventsItem::*; - match self { - Plain(k) => k.x_aggregate(ak), - XBinnedEvents(k) => k.x_aggregate(ak), - } - } - - pub fn type_info(&self) -> (ScalarType, Shape) { - (self.scalar_type(), self.shape()) - } -} - -impl WithLen for EventsItem { - fn len(&self) -> usize { - use EventsItem::*; - match self { - Plain(j) => j.len(), - XBinnedEvents(j) => j.len(), - } - } -} - -impl WithTimestamps for EventsItem { - fn ts(&self, ix: usize) -> u64 { - use EventsItem::*; - match self { - Plain(j) => j.ts(ix), - XBinnedEvents(j) => j.ts(ix), - } - } -} - -impl Appendable for EventsItem { - fn empty_like_self(&self) -> Self { - match self { - EventsItem::Plain(k) => EventsItem::Plain(k.empty_like_self()), - EventsItem::XBinnedEvents(k) => EventsItem::XBinnedEvents(k.empty_like_self()), - } - } - - fn append(&mut self, src: &Self) { - match self { - Self::Plain(k) => match src { - Self::Plain(j) => k.append(j), - _ => panic!(), - }, - Self::XBinnedEvents(k) => match src { - Self::XBinnedEvents(j) => k.append(j), - _ => panic!(), - }, - } - } - - fn append_zero(&mut self, _ts1: u64, _ts2: u64) { - // TODO can this implement Appendable in a sane way? Do we need it? - // TODO can we remove EventsItem? - err::todo(); - } -} - -impl PushableIndex for EventsItem { - fn push_index(&mut self, src: &Self, ix: usize) { - match self { - Self::Plain(k) => match src { - Self::Plain(j) => k.push_index(j, ix), - _ => panic!(), - }, - Self::XBinnedEvents(k) => match src { - Self::XBinnedEvents(j) => k.push_index(j, ix), - _ => panic!(), - }, - } - } -} - -impl Clearable for EventsItem { - fn clear(&mut self) { - match self { - EventsItem::Plain(k) => k.clear(), - EventsItem::XBinnedEvents(k) => k.clear(), - } - } -} - -impl HasShape for EventsItem { - fn shape(&self) -> Shape { - use EventsItem::*; - match self { - Plain(h) => h.shape(), - XBinnedEvents(h) => h.shape(), - } - } -} - -impl HasScalarType for EventsItem { - fn scalar_type(&self) -> ScalarType { - use EventsItem::*; - match self { - Plain(h) => h.scalar_type(), - XBinnedEvents(h) => h.scalar_type(), - } - } -} diff --git a/items/src/items.rs b/items/src/items.rs index fa05355..5223d09 100644 --- a/items/src/items.rs +++ b/items/src/items.rs @@ -1,40 +1,42 @@ -pub mod binnedevents; -pub mod binsdim0; -pub mod binsdim1; pub mod eventfull; -pub mod eventsitem; pub mod frame; pub mod inmem; -pub mod numops; -pub mod plainevents; -pub mod scalarevents; pub mod streams; -pub mod waveevents; -pub mod xbinnedscalarevents; -pub mod xbinnedwaveevents; use crate::frame::make_frame_2; use bytes::BytesMut; -use chrono::{TimeZone, Utc}; +use chrono::TimeZone; +use chrono::Utc; use err::Error; -use frame::{make_error_frame, make_log_frame, make_range_complete_frame, make_stats_frame}; +use frame::make_error_frame; +use frame::make_log_frame; +use frame::make_range_complete_frame; +use frame::make_stats_frame; use items_0::AsAnyRef; +use netpod::log::Level; #[allow(unused)] use netpod::log::*; -use netpod::timeunits::{MS, SEC}; -use netpod::{log::Level, AggKind, EventDataReadStats, NanoRange, Shape}; -use netpod::{DiskStats, RangeFilterStats, ScalarType}; +use netpod::timeunits::MS; +use netpod::timeunits::SEC; +use netpod::DiskStats; +use netpod::EventDataReadStats; +use netpod::NanoRange; +use netpod::RangeFilterStats; +use netpod::Shape; use serde::de::DeserializeOwned; -use serde::{Deserialize, Serialize, Serializer}; +use serde::Deserialize; +use serde::Serialize; +use serde::Serializer; use std::any::Any; -use std::collections::VecDeque; use std::fmt; use std::future::Future; use std::marker::PhantomData; use std::pin::Pin; -use std::task::{Context, Poll}; +use std::task::Context; +use std::task::Poll; use tokio::fs::File; -use tokio::io::{AsyncRead, ReadBuf}; +use tokio::io::AsyncRead; +use tokio::io::ReadBuf; pub const TERM_FRAME_TYPE_ID: u32 = 0xaa01; pub const ERROR_FRAME_TYPE_ID: u32 = 0xaa02; @@ -371,32 +373,6 @@ impl FrameType for EventQueryJsonStringFrame { } } -pub trait EventsNodeProcessorOutput: - fmt::Debug + Send + Unpin + DeserializeOwned + WithTimestamps + TimeBinnableType + ByteEstimate -{ - fn as_any_mut(&mut self) -> &mut dyn Any; - fn into_parts(self) -> (Box, VecDeque, VecDeque); -} - -pub trait EventsNodeProcessor: Send + Unpin { - type Input; - type Output: EventsNodeProcessorOutput; - fn create(shape: Shape, agg_kind: AggKind) -> Self; - fn process(&self, inp: Self::Input) -> Self::Output; -} - -pub trait EventsTypeAliases { - type TimeBinOutput; -} - -impl EventsTypeAliases for ENP -where - ENP: EventsNodeProcessor, - ::Output: TimeBinnableType, -{ - type TimeBinOutput = <::Output as TimeBinnableType>::Output; -} - #[derive(Clone, Debug, Deserialize)] pub struct IsoDateTime(chrono::DateTime); @@ -631,7 +607,7 @@ impl ReadPbv where T: ReadableFromFile, { - fn new(file: File) -> Self { + pub fn new(file: File) -> Self { Self { // TODO make buffer size a parameter: buf: vec![0; 1024 * 32], @@ -743,178 +719,3 @@ pub trait TimeBinnerDyn: Send { /// The next call to `Self::bins_ready_count` must return one higher count than before. fn cycle(&mut self); } - -pub fn empty_events_dyn(scalar_type: &ScalarType, shape: &Shape, agg_kind: &AggKind) -> Box { - match shape { - Shape::Scalar => match agg_kind { - AggKind::TimeWeightedScalar => { - use ScalarType::*; - type K = scalarevents::ScalarEvents; - match scalar_type { - U8 => Box::new(K::::empty()), - U16 => Box::new(K::::empty()), - U32 => Box::new(K::::empty()), - U64 => Box::new(K::::empty()), - I8 => Box::new(K::::empty()), - I16 => Box::new(K::::empty()), - I32 => Box::new(K::::empty()), - I64 => Box::new(K::::empty()), - F32 => Box::new(K::::empty()), - F64 => Box::new(K::::empty()), - _ => { - error!("TODO for {:?} {:?} {:?}", scalar_type, shape, agg_kind); - err::todoval() - } - } - } - _ => { - error!("TODO for {:?} {:?} {:?}", scalar_type, shape, agg_kind); - err::todoval() - } - }, - Shape::Wave(_n) => match agg_kind { - AggKind::DimXBins1 => { - use ScalarType::*; - type K = waveevents::WaveEvents; - match scalar_type { - U8 => Box::new(K::::empty()), - F32 => Box::new(K::::empty()), - F64 => Box::new(K::::empty()), - BOOL => Box::new(K::::empty()), - _ => { - error!("TODO for {:?} {:?} {:?}", scalar_type, shape, agg_kind); - err::todoval() - } - } - } - AggKind::Plain => { - use ScalarType::*; - type K = waveevents::WaveEvents; - match scalar_type { - U8 => Box::new(K::::empty()), - F32 => Box::new(K::::empty()), - F64 => Box::new(K::::empty()), - BOOL => Box::new(K::::empty()), - _ => { - error!("TODO for {:?} {:?} {:?}", scalar_type, shape, agg_kind); - err::todoval() - } - } - } - _ => { - error!("TODO for {:?} {:?} {:?}", scalar_type, shape, agg_kind); - err::todoval() - } - }, - Shape::Image(..) => { - error!("TODO for {:?} {:?} {:?}", scalar_type, shape, agg_kind); - err::todoval() - } - } -} - -pub fn empty_binned_dyn(scalar_type: &ScalarType, shape: &Shape, agg_kind: &AggKind) -> Box { - match shape { - Shape::Scalar => match agg_kind { - AggKind::TimeWeightedScalar => { - use ScalarType::*; - type K = binsdim0::MinMaxAvgDim0Bins; - match scalar_type { - U8 => Box::new(K::::empty()), - U16 => Box::new(K::::empty()), - U32 => Box::new(K::::empty()), - U64 => Box::new(K::::empty()), - I8 => Box::new(K::::empty()), - I16 => Box::new(K::::empty()), - I32 => Box::new(K::::empty()), - I64 => Box::new(K::::empty()), - F32 => Box::new(K::::empty()), - F64 => Box::new(K::::empty()), - _ => err::todoval(), - } - } - _ => err::todoval(), - }, - Shape::Wave(_n) => match agg_kind { - AggKind::DimXBins1 => { - use ScalarType::*; - type K = binsdim0::MinMaxAvgDim0Bins; - match scalar_type { - U8 => Box::new(K::::empty()), - F32 => Box::new(K::::empty()), - F64 => Box::new(K::::empty()), - _ => err::todoval(), - } - } - _ => err::todoval(), - }, - Shape::Image(..) => err::todoval(), - } -} - -#[test] -fn bin_binned_01() { - use binsdim0::MinMaxAvgDim0Bins; - let edges = vec![SEC * 1000, SEC * 1010, SEC * 1020, SEC * 1030]; - let inp0 = as NewEmpty>::empty(Shape::Scalar); - let mut time_binner = inp0.time_binner_new(edges, true); - let inp1 = MinMaxAvgDim0Bins:: { - ts1s: vec![SEC * 1000, SEC * 1010], - ts2s: vec![SEC * 1010, SEC * 1020], - counts: vec![1, 1], - mins: vec![3, 4], - maxs: vec![10, 9], - avgs: vec![7., 6.], - }; - assert_eq!(time_binner.bins_ready_count(), 0); - time_binner.ingest(&inp1); - assert_eq!(time_binner.bins_ready_count(), 1); - time_binner.push_in_progress(false); - assert_eq!(time_binner.bins_ready_count(), 2); - // From here on, pushing any more should not change the bin count: - time_binner.push_in_progress(false); - assert_eq!(time_binner.bins_ready_count(), 2); - // On the other hand, cycling should add one more zero-bin: - time_binner.cycle(); - assert_eq!(time_binner.bins_ready_count(), 3); - time_binner.cycle(); - assert_eq!(time_binner.bins_ready_count(), 3); - let bins = time_binner.bins_ready().expect("bins should be ready"); - eprintln!("bins: {:?}", bins); - assert_eq!(time_binner.bins_ready_count(), 0); - assert_eq!(bins.counts(), &[1, 1, 0]); - // TODO use proper float-compare logic: - assert_eq!(bins.mins(), &[3., 4., 0.]); - assert_eq!(bins.maxs(), &[10., 9., 0.]); - assert_eq!(bins.avgs(), &[7., 6., 0.]); -} - -#[test] -fn bin_binned_02() { - use binsdim0::MinMaxAvgDim0Bins; - let edges = vec![SEC * 1000, SEC * 1020]; - let inp0 = as NewEmpty>::empty(Shape::Scalar); - let mut time_binner = inp0.time_binner_new(edges, true); - let inp1 = MinMaxAvgDim0Bins:: { - ts1s: vec![SEC * 1000, SEC * 1010], - ts2s: vec![SEC * 1010, SEC * 1020], - counts: vec![1, 1], - mins: vec![3, 4], - maxs: vec![10, 9], - avgs: vec![7., 6.], - }; - assert_eq!(time_binner.bins_ready_count(), 0); - time_binner.ingest(&inp1); - assert_eq!(time_binner.bins_ready_count(), 0); - time_binner.cycle(); - assert_eq!(time_binner.bins_ready_count(), 1); - time_binner.cycle(); - //assert_eq!(time_binner.bins_ready_count(), 2); - let bins = time_binner.bins_ready().expect("bins should be ready"); - eprintln!("bins: {:?}", bins); - assert_eq!(time_binner.bins_ready_count(), 0); - assert_eq!(bins.counts(), &[2]); - assert_eq!(bins.mins(), &[3.]); - assert_eq!(bins.maxs(), &[10.]); - assert_eq!(bins.avgs(), &[13. / 2.]); -} diff --git a/items/src/numops.rs b/items/src/numops.rs deleted file mode 100644 index 569a4f8..0000000 --- a/items/src/numops.rs +++ /dev/null @@ -1,282 +0,0 @@ -use items_0::subfr::SubFrId; -use serde::de::DeserializeOwned; -use serde::Deserialize; -use serde::Serialize; -use std::cmp::Ordering; -use std::fmt::Debug; -use std::ops::Add; - -#[derive(Copy, Clone, Debug, Serialize, Deserialize)] -pub struct BoolNum(pub u8); - -impl BoolNum { - pub const MIN: Self = Self(0); - pub const MAX: Self = Self(1); -} - -impl Add for BoolNum { - type Output = BoolNum; - - fn add(self, rhs: BoolNum) -> Self::Output { - Self(self.0 + rhs.0) - } -} - -impl num_traits::Zero for BoolNum { - fn zero() -> Self { - Self(0) - } - - fn is_zero(&self) -> bool { - self.0 == 0 - } -} - -impl num_traits::AsPrimitive for BoolNum { - fn as_(self) -> f32 { - self.0 as f32 - } -} - -impl num_traits::Bounded for BoolNum { - fn min_value() -> Self { - Self(0) - } - - fn max_value() -> Self { - Self(1) - } -} - -impl PartialEq for BoolNum { - fn eq(&self, other: &Self) -> bool { - PartialEq::eq(&self.0, &other.0) - } -} - -impl PartialOrd for BoolNum { - fn partial_cmp(&self, other: &Self) -> Option { - PartialOrd::partial_cmp(&self.0, &other.0) - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct StringNum(pub String); - -impl StringNum { - pub const MIN: Self = Self(String::new()); - pub const MAX: Self = Self(String::new()); -} - -impl Add for StringNum { - type Output = StringNum; - - fn add(self, _rhs: StringNum) -> Self::Output { - todo!() - } -} - -impl num_traits::Zero for StringNum { - fn zero() -> Self { - Self(String::new()) - } - - fn is_zero(&self) -> bool { - self.0.is_empty() - } -} - -impl num_traits::Bounded for StringNum { - fn min_value() -> Self { - Self(String::new()) - } - - fn max_value() -> Self { - Self(String::new()) - } -} - -impl PartialEq for StringNum { - fn eq(&self, other: &Self) -> bool { - PartialEq::eq(&self.0, &other.0) - } -} - -impl PartialOrd for StringNum { - fn partial_cmp(&self, other: &Self) -> Option { - PartialOrd::partial_cmp(&self.0, &other.0) - } -} - -pub trait NumOps: - Sized - + Clone - + AsPrimF32 - + Send - + Sync - + 'static - + Unpin - + Debug - //+ Zero - //+ Bounded - + PartialOrd - + SubFrId - + Serialize - + DeserializeOwned - + items_0::scalar_ops::ScalarOps -{ - fn min_or_nan() -> Self; - fn max_or_nan() -> Self; - fn is_nan(&self) -> bool; - fn zero() -> Self; -} - -macro_rules! impl_num_ops { - ($ty:ident, $min_or_nan:ident, $max_or_nan:ident, $is_nan:ident, $zero:expr) => { - impl NumOps for $ty { - fn min_or_nan() -> Self { - $ty::$min_or_nan - } - fn max_or_nan() -> Self { - $ty::$max_or_nan - } - fn is_nan(&self) -> bool { - $is_nan(self) - } - fn zero() -> Self { - $zero - } - } - }; -} - -impl AsPrimF32 for bool { - fn as_prim_f32(&self) -> f32 { - if *self { - 1. - } else { - 0. - } - } -} - -impl NumOps for bool { - fn min_or_nan() -> Self { - todo!() - } - - fn max_or_nan() -> Self { - todo!() - } - - fn is_nan(&self) -> bool { - todo!() - } - - fn zero() -> Self { - false - } -} - -fn is_nan_int(_x: &T) -> bool { - false -} - -fn is_nan_f32(x: &f32) -> bool { - f32::is_nan(*x) -} - -fn is_nan_f64(x: &f64) -> bool { - f64::is_nan(*x) -} - -pub trait AsPrimF32 { - fn as_prim_f32(&self) -> f32; -} - -macro_rules! impl_as_prim_f32 { - ($ty:ident) => { - impl AsPrimF32 for $ty { - fn as_prim_f32(&self) -> f32 { - *self as f32 - } - } - }; -} - -impl_as_prim_f32!(u8); -impl_as_prim_f32!(u16); -impl_as_prim_f32!(u32); -impl_as_prim_f32!(u64); -impl_as_prim_f32!(i8); -impl_as_prim_f32!(i16); -impl_as_prim_f32!(i32); -impl_as_prim_f32!(i64); -impl_as_prim_f32!(f32); -impl_as_prim_f32!(f64); - -impl AsPrimF32 for BoolNum { - fn as_prim_f32(&self) -> f32 { - self.0 as f32 - } -} - -impl AsPrimF32 for StringNum { - fn as_prim_f32(&self) -> f32 { - netpod::log::error!("TODO impl AsPrimF32 for StringNum"); - todo!() - } -} - -impl_num_ops!(u8, MIN, MAX, is_nan_int, 0); -impl_num_ops!(u16, MIN, MAX, is_nan_int, 0); -impl_num_ops!(u32, MIN, MAX, is_nan_int, 0); -impl_num_ops!(u64, MIN, MAX, is_nan_int, 0); -impl_num_ops!(i8, MIN, MAX, is_nan_int, 0); -impl_num_ops!(i16, MIN, MAX, is_nan_int, 0); -impl_num_ops!(i32, MIN, MAX, is_nan_int, 0); -impl_num_ops!(i64, MIN, MAX, is_nan_int, 0); -impl_num_ops!(f32, NAN, NAN, is_nan_f32, 0.); -impl_num_ops!(f64, NAN, NAN, is_nan_f64, 0.); -impl_num_ops!(BoolNum, MIN, MAX, is_nan_int, BoolNum(0)); -impl_num_ops!(StringNum, MIN, MAX, is_nan_int, StringNum(String::new())); - -impl SubFrId for StringNum { - const SUB: u32 = 0x0d; -} - -impl SubFrId for BoolNum { - const SUB: u32 = 0x0e; -} - -impl items_0::scalar_ops::AsPrimF32 for BoolNum { - fn as_prim_f32_b(&self) -> f32 { - todo!() - } -} - -impl items_0::scalar_ops::AsPrimF32 for StringNum { - fn as_prim_f32_b(&self) -> f32 { - todo!() - } -} - -impl items_0::scalar_ops::ScalarOps for BoolNum { - fn zero_b() -> Self { - todo!() - } - - fn equal_slack(&self, _rhs: &Self) -> bool { - todo!() - } -} - -impl items_0::scalar_ops::ScalarOps for StringNum { - fn zero_b() -> Self { - todo!() - } - - fn equal_slack(&self, _rhs: &Self) -> bool { - todo!() - } -} diff --git a/items/src/plainevents.rs b/items/src/plainevents.rs deleted file mode 100644 index 3e4933d..0000000 --- a/items/src/plainevents.rs +++ /dev/null @@ -1,190 +0,0 @@ -use crate::eventsitem::EventsItem; -use crate::scalarevents::ScalarEvents; -use crate::{Appendable, Clearable, PushableIndex, WithLen, WithTimestamps}; -use netpod::{AggKind, HasScalarType, HasShape, ScalarType, Shape}; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Serialize, Deserialize)] -pub enum ScalarPlainEvents { - U8(ScalarEvents), - U16(ScalarEvents), - U32(ScalarEvents), - U64(ScalarEvents), - I8(ScalarEvents), - I16(ScalarEvents), - I32(ScalarEvents), - I64(ScalarEvents), - F32(ScalarEvents), - F64(ScalarEvents), - String(ScalarEvents), -} - -impl ScalarPlainEvents { - pub fn variant_name(&self) -> String { - items_proc::tycases1!(self, Self, (k), { "$id".into() }) - } -} - -impl Clearable for ScalarPlainEvents { - fn clear(&mut self) { - items_proc::tycases1!(self, Self, (k), { k.clear() }) - } -} - -impl Appendable for ScalarPlainEvents { - fn empty_like_self(&self) -> Self { - items_proc::tycases1!(self, Self, (k), { Self::$id(k.empty_like_self()) }) - } - - fn append(&mut self, src: &Self) { - items_proc::tycases1!(self, Self, (k), { - match src { - Self::$id(j) => k.append(j), - _ => panic!(), - } - }) - } - - fn append_zero(&mut self, _ts1: u64, _ts2: u64) { - // TODO can this implement Appendable in a sane way? Do we need it? - err::todo(); - } -} - -impl PushableIndex for ScalarPlainEvents { - fn push_index(&mut self, src: &Self, ix: usize) { - items_proc::tycases1!(self, Self, (k), { - match src { - Self::$id(j) => k.push_index(j, ix), - _ => panic!(), - } - }) - } -} - -impl WithLen for ScalarPlainEvents { - fn len(&self) -> usize { - items_proc::tycases1!(self, Self, (k), { k.len() }) - } -} - -impl WithTimestamps for ScalarPlainEvents { - fn ts(&self, ix: usize) -> u64 { - items_proc::tycases1!(self, Self, (k), { k.ts(ix) }) - } -} - -impl HasShape for ScalarPlainEvents { - fn shape(&self) -> Shape { - Shape::Scalar - } -} - -impl HasScalarType for ScalarPlainEvents { - fn scalar_type(&self) -> ScalarType { - items_proc::tycases1!(self, Self, (k), { ScalarType::$id }) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub enum PlainEvents { - Scalar(ScalarPlainEvents), -} - -impl PlainEvents { - pub fn is_wave(&self) -> bool { - use PlainEvents::*; - match self { - Scalar(_) => false, - } - } - - pub fn variant_name(&self) -> String { - use PlainEvents::*; - match self { - Scalar(h) => format!("Scalar({})", h.variant_name()), - } - } - - pub fn x_aggregate(self, _: &AggKind) -> EventsItem { - use PlainEvents::*; - match self { - Scalar(k) => EventsItem::Plain(PlainEvents::Scalar(k)), - } - } -} - -impl Clearable for PlainEvents { - fn clear(&mut self) { - match self { - PlainEvents::Scalar(k) => k.clear(), - } - } -} - -impl Appendable for PlainEvents { - fn empty_like_self(&self) -> Self { - match self { - Self::Scalar(k) => Self::Scalar(k.empty_like_self()), - } - } - - fn append(&mut self, src: &Self) { - match self { - PlainEvents::Scalar(k) => match src { - Self::Scalar(j) => k.append(j), - }, - } - } - - fn append_zero(&mut self, _ts1: u64, _ts2: u64) { - // TODO can this implement Appendable in a sane way? Do we need it? - err::todo(); - } -} - -impl PushableIndex for PlainEvents { - fn push_index(&mut self, src: &Self, ix: usize) { - match self { - Self::Scalar(k) => match src { - Self::Scalar(j) => k.push_index(j, ix), - }, - } - } -} - -impl WithLen for PlainEvents { - fn len(&self) -> usize { - use PlainEvents::*; - match self { - Scalar(j) => j.len(), - } - } -} - -impl WithTimestamps for PlainEvents { - fn ts(&self, ix: usize) -> u64 { - use PlainEvents::*; - match self { - Scalar(j) => j.ts(ix), - } - } -} - -impl HasShape for PlainEvents { - fn shape(&self) -> Shape { - use PlainEvents::*; - match self { - Scalar(h) => HasShape::shape(h), - } - } -} - -impl HasScalarType for PlainEvents { - fn scalar_type(&self) -> ScalarType { - use PlainEvents::*; - match self { - Scalar(h) => h.scalar_type(), - } - } -} diff --git a/items/src/scalarevents.rs b/items/src/scalarevents.rs deleted file mode 100644 index d4ed030..0000000 --- a/items/src/scalarevents.rs +++ /dev/null @@ -1,847 +0,0 @@ -use crate::binsdim0::MinMaxAvgDim0Bins; -use crate::numops::NumOps; -use crate::streams::{Collectable, Collector}; -use crate::{ - pulse_offs_from_abs, ts_offs_from_abs, Appendable, ByteEstimate, Clearable, EventAppendable, EventsDyn, - EventsNodeProcessorOutput, FilterFittingInside, Fits, FitsInside, FrameType, FrameTypeInnerStatic, NewEmpty, - PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, TimeBinnableDyn, TimeBinnableType, - TimeBinnableTypeAggregator, TimeBinnerDyn, WithLen, WithTimestamps, -}; -use err::Error; -use items_0::AsAnyRef; -use netpod::log::*; -use netpod::{NanoRange, Shape}; -use serde::{Deserialize, Serialize}; -use std::any::Any; -use std::collections::VecDeque; -use std::fmt; -use tokio::fs::File; - -// TODO in this module reduce clones. - -#[derive(Serialize, Deserialize)] -pub struct ScalarEvents { - pub tss: Vec, - pub pulses: Vec, - pub values: Vec, -} - -impl ScalarEvents { - pub fn empty() -> Self { - Self { - tss: vec![], - pulses: vec![], - values: vec![], - } - } - - #[inline(always)] - pub fn push(&mut self, ts: u64, pulse: u64, value: NTY) { - self.tss.push(ts); - self.pulses.push(pulse); - self.values.push(value); - } - - // TODO should avoid the copies. - #[inline(always)] - pub fn extend_from_slice(&mut self, src: &Self) - where - NTY: Clone, - { - self.tss.extend_from_slice(&src.tss); - self.pulses.extend_from_slice(&src.pulses); - self.values.extend_from_slice(&src.values); - } - - #[inline(always)] - pub fn clearx(&mut self) { - self.tss.clear(); - self.pulses.clear(); - self.values.clear(); - } -} - -impl FrameTypeInnerStatic for ScalarEvents -where - NTY: NumOps, -{ - const FRAME_TYPE_ID: u32 = crate::EVENTS_0D_FRAME_TYPE_ID + NTY::SUB; -} - -impl FrameType for ScalarEvents -where - NTY: NumOps, -{ - fn frame_type_id(&self) -> u32 { - ::FRAME_TYPE_ID - } -} - -impl fmt::Debug for ScalarEvents -where - NTY: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!( - fmt, - "count {} ts {:?} .. {:?} vals {:?} .. {:?}", - self.tss.len(), - self.tss.first(), - self.tss.last(), - self.values.first(), - self.values.last(), - ) - } -} - -impl AsAnyRef for ScalarEvents -where - NTY: NumOps, -{ - fn as_any_ref(&self) -> &dyn Any { - self - } -} - -impl WithLen for ScalarEvents -where - NTY: NumOps, -{ - fn len(&self) -> usize { - self.tss.len() - } -} - -impl WithTimestamps for ScalarEvents -where - NTY: NumOps, -{ - fn ts(&self, ix: usize) -> u64 { - self.tss[ix] - } -} - -impl ByteEstimate for ScalarEvents -where - NTY: NumOps, -{ - fn byte_estimate(&self) -> u64 { - if self.tss.len() == 0 { - 0 - } else { - // TODO improve via a const fn on NTY - self.tss.len() as u64 * 16 - } - } -} - -impl RangeOverlapInfo for ScalarEvents { - fn ends_before(&self, range: NanoRange) -> bool { - match self.tss.last() { - Some(&ts) => ts < range.beg, - None => true, - } - } - - fn ends_after(&self, range: NanoRange) -> bool { - match self.tss.last() { - Some(&ts) => ts >= range.end, - None => panic!(), - } - } - - fn starts_after(&self, range: NanoRange) -> bool { - match self.tss.first() { - Some(&ts) => ts >= range.end, - None => panic!(), - } - } -} - -impl FitsInside for ScalarEvents { - fn fits_inside(&self, range: NanoRange) -> Fits { - if self.tss.is_empty() { - Fits::Empty - } else { - let t1 = *self.tss.first().unwrap(); - let t2 = *self.tss.last().unwrap(); - if t2 < range.beg { - Fits::Lower - } else if t1 > range.end { - Fits::Greater - } else if t1 < range.beg && t2 > range.end { - Fits::PartlyLowerAndGreater - } else if t1 < range.beg { - Fits::PartlyLower - } else if t2 > range.end { - Fits::PartlyGreater - } else { - Fits::Inside - } - } - } -} - -impl FilterFittingInside for ScalarEvents { - fn filter_fitting_inside(self, fit_range: NanoRange) -> Option { - match self.fits_inside(fit_range) { - Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self), - _ => None, - } - } -} - -impl PushableIndex for ScalarEvents -where - NTY: NumOps, -{ - fn push_index(&mut self, src: &Self, ix: usize) { - self.push(src.tss[ix], src.pulses[ix], src.values[ix].clone()); - } -} - -impl NewEmpty for ScalarEvents { - fn empty(_shape: Shape) -> Self { - Self { - tss: Vec::new(), - pulses: Vec::new(), - values: Vec::new(), - } - } -} - -impl Appendable for ScalarEvents -where - NTY: NumOps, -{ - fn empty_like_self(&self) -> Self { - Self::empty() - } - - fn append(&mut self, src: &Self) { - self.extend_from_slice(src); - } - - fn append_zero(&mut self, ts1: u64, _ts2: u64) { - self.tss.push(ts1); - self.pulses.push(0); - self.values.push(NTY::zero()); - } -} - -impl Clearable for ScalarEvents { - fn clear(&mut self) { - ScalarEvents::::clearx(self); - } -} - -impl ReadableFromFile for ScalarEvents -where - NTY: NumOps, -{ - fn read_from_file(_file: File) -> Result, Error> { - // TODO refactor types such that this can be removed. - panic!() - } - - fn from_buf(_buf: &[u8]) -> Result { - panic!() - } -} - -impl TimeBinnableType for ScalarEvents -where - NTY: NumOps, -{ - type Output = MinMaxAvgDim0Bins; - type Aggregator = EventValuesAggregator; - - fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator { - debug!( - "TimeBinnableType for EventValues aggregator() range {:?} x_bin_count {} do_time_weight {}", - range, x_bin_count, do_time_weight - ); - Self::Aggregator::new(range, do_time_weight) - } -} - -pub struct EventValuesCollector { - vals: ScalarEvents, - range_complete: bool, - timed_out: bool, -} - -impl EventValuesCollector { - pub fn new() -> Self { - Self { - vals: ScalarEvents::empty(), - range_complete: false, - timed_out: false, - } - } -} - -impl WithLen for EventValuesCollector { - fn len(&self) -> usize { - self.vals.tss.len() - } -} - -#[derive(Serialize)] -pub struct EventValuesCollectorOutput { - #[serde(rename = "tsAnchor")] - ts_anchor_sec: u64, - #[serde(rename = "tsMs")] - ts_off_ms: Vec, - #[serde(rename = "tsNs")] - ts_off_ns: Vec, - #[serde(rename = "pulseAnchor")] - pulse_anchor: u64, - #[serde(rename = "pulseOff")] - pulse_off: Vec, - values: Vec, - #[serde(skip_serializing_if = "crate::bool_is_false", rename = "rangeFinal")] - range_complete: bool, - #[serde(skip_serializing_if = "crate::bool_is_false", rename = "timedOut")] - timed_out: bool, -} - -impl Collector for EventValuesCollector -where - NTY: NumOps, -{ - type Input = ScalarEvents; - type Output = EventValuesCollectorOutput; - - fn ingest(&mut self, src: &Self::Input) { - self.vals.append(src); - } - - fn set_range_complete(&mut self) { - self.range_complete = true; - } - - fn set_timed_out(&mut self) { - self.timed_out = true; - } - - fn result(self) -> Result { - let tst = ts_offs_from_abs(&self.vals.tss); - let (pulse_anchor, pulse_off) = pulse_offs_from_abs(&self.vals.pulses); - let ret = Self::Output { - ts_anchor_sec: tst.0, - ts_off_ms: tst.1, - ts_off_ns: tst.2, - pulse_anchor, - pulse_off, - values: self.vals.values, - range_complete: self.range_complete, - timed_out: self.timed_out, - }; - Ok(ret) - } -} - -impl Collectable for ScalarEvents -where - NTY: NumOps, -{ - type Collector = EventValuesCollector; - - fn new_collector(_bin_count_exp: u32) -> Self::Collector { - Self::Collector::new() - } -} - -pub struct EventValuesAggregator { - range: NanoRange, - count: u64, - min: NTY, - max: NTY, - sumc: u64, - sum: f32, - int_ts: u64, - last_ts: u64, - last_val: Option, - do_time_weight: bool, - events_taken_count: u64, - events_ignored_count: u64, -} - -impl Drop for EventValuesAggregator { - fn drop(&mut self) { - // TODO collect as stats for the request context: - trace!( - "taken {} ignored {}", - self.events_taken_count, - self.events_ignored_count - ); - } -} - -impl EventValuesAggregator -where - NTY: NumOps, -{ - pub fn new(range: NanoRange, do_time_weight: bool) -> Self { - let int_ts = range.beg; - Self { - range, - count: 0, - min: NTY::zero(), - max: NTY::zero(), - sum: 0., - sumc: 0, - int_ts, - last_ts: 0, - last_val: None, - do_time_weight, - events_taken_count: 0, - events_ignored_count: 0, - } - } - - // TODO reduce clone.. optimize via more traits to factor the trade-offs? - fn apply_min_max(&mut self, val: NTY) { - if self.count == 0 { - self.min = val.clone(); - self.max = val.clone(); - } else { - if self.min > val { - self.min = val.clone(); - } - if self.max < val { - self.max = val.clone(); - } - } - } - - fn apply_event_unweight(&mut self, val: NTY) { - let vf = val.as_prim_f32(); - self.apply_min_max(val); - if vf.is_nan() { - } else { - self.sum += vf; - self.sumc += 1; - } - } - - fn apply_event_time_weight(&mut self, ts: u64) { - if let Some(v) = &self.last_val { - let vf = v.as_prim_f32(); - let v2 = v.clone(); - self.apply_min_max(v2); - let w = if self.do_time_weight { - (ts - self.int_ts) as f32 * 1e-9 - } else { - 1. - }; - if vf.is_nan() { - } else { - self.sum += vf * w; - self.sumc += 1; - } - self.int_ts = ts; - } else { - debug!( - "apply_event_time_weight NO VALUE {}", - ts as i64 - self.range.beg as i64 - ); - } - } - - fn ingest_unweight(&mut self, item: &::Input) { - for i1 in 0..item.tss.len() { - let ts = item.tss[i1]; - let val = item.values[i1].clone(); - if ts < self.range.beg { - self.events_ignored_count += 1; - } else if ts >= self.range.end { - self.events_ignored_count += 1; - return; - } else { - self.apply_event_unweight(val); - self.count += 1; - self.events_taken_count += 1; - } - } - } - - fn ingest_time_weight(&mut self, item: &::Input) { - for i1 in 0..item.tss.len() { - let ts = item.tss[i1]; - let val = item.values[i1].clone(); - if ts < self.int_ts { - if self.last_val.is_none() { - info!( - "ingest_time_weight event before range, only set last ts {} val {:?}", - ts, val - ); - } - self.events_ignored_count += 1; - self.last_ts = ts; - self.last_val = Some(val); - } else if ts >= self.range.end { - self.events_ignored_count += 1; - return; - } else { - self.apply_event_time_weight(ts); - if self.last_val.is_none() { - info!( - "call apply_min_max without last val, use current instead {} {:?}", - ts, val - ); - self.apply_min_max(val.clone()); - } - self.count += 1; - self.last_ts = ts; - self.last_val = Some(val); - self.events_taken_count += 1; - } - } - } - - fn result_reset_unweight(&mut self, range: NanoRange, _expand: bool) -> MinMaxAvgDim0Bins { - let (min, max, avg) = if self.sumc > 0 { - let avg = self.sum / self.sumc as f32; - (self.min.clone(), self.max.clone(), avg) - } else { - let g = match &self.last_val { - Some(x) => x.clone(), - None => NTY::zero(), - }; - (g.clone(), g.clone(), g.as_prim_f32()) - }; - let ret = MinMaxAvgDim0Bins { - ts1s: vec![self.range.beg], - ts2s: vec![self.range.end], - counts: vec![self.count], - mins: vec![min], - maxs: vec![max], - avgs: vec![avg], - }; - self.int_ts = range.beg; - self.range = range; - self.count = 0; - self.sum = 0f32; - self.sumc = 0; - ret - } - - fn result_reset_time_weight(&mut self, range: NanoRange, expand: bool) -> MinMaxAvgDim0Bins { - // TODO check callsite for correct expand status. - if expand { - debug!("result_reset_time_weight calls apply_event_time_weight"); - self.apply_event_time_weight(self.range.end); - } else { - debug!("result_reset_time_weight NO EXPAND"); - } - let (min, max, avg) = if self.sumc > 0 { - let avg = self.sum / (self.range.delta() as f32 * 1e-9); - (self.min.clone(), self.max.clone(), avg) - } else { - let g = match &self.last_val { - Some(x) => x.clone(), - None => NTY::zero(), - }; - (g.clone(), g.clone(), g.as_prim_f32()) - }; - let ret = MinMaxAvgDim0Bins { - ts1s: vec![self.range.beg], - ts2s: vec![self.range.end], - counts: vec![self.count], - mins: vec![min], - maxs: vec![max], - avgs: vec![avg], - }; - self.int_ts = range.beg; - self.range = range; - self.count = 0; - self.sum = 0f32; - self.sumc = 0; - ret - } -} - -impl TimeBinnableTypeAggregator for EventValuesAggregator -where - NTY: NumOps, -{ - type Input = ScalarEvents; - type Output = MinMaxAvgDim0Bins; - - fn range(&self) -> &NanoRange { - &self.range - } - - fn ingest(&mut self, item: &Self::Input) { - debug!("ingest len {}", item.len()); - if self.do_time_weight { - self.ingest_time_weight(item) - } else { - self.ingest_unweight(item) - } - } - - fn result_reset(&mut self, range: NanoRange, expand: bool) -> Self::Output { - debug!("Produce for {:?} next {:?}", self.range, range); - if self.do_time_weight { - self.result_reset_time_weight(range, expand) - } else { - self.result_reset_unweight(range, expand) - } - } -} - -impl EventAppendable for ScalarEvents -where - NTY: NumOps, -{ - type Value = NTY; - - fn append_event(ret: Option, ts: u64, pulse: u64, value: Self::Value) -> Self { - let mut ret = if let Some(ret) = ret { ret } else { Self::empty() }; - ret.push(ts, pulse, value); - ret - } -} - -impl TimeBinnableDyn for ScalarEvents { - fn time_binner_new(&self, edges: Vec, do_time_weight: bool) -> Box { - let ret = ScalarEventsTimeBinner::::new(edges.into(), do_time_weight); - Box::new(ret) - } -} - -impl EventsDyn for ScalarEvents { - fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn { - self as &dyn TimeBinnableDyn - } - - fn verify(&self) { - let mut ts_max = 0; - for ts in &self.tss { - let ts = *ts; - if ts < ts_max { - error!("unordered event data ts {} ts_max {}", ts, ts_max); - } - ts_max = ts_max.max(ts); - } - } - - fn output_info(&self) { - if false { - info!("output_info len {}", self.tss.len()); - if self.tss.len() == 1 { - info!( - " only: ts {} pulse {} value {:?}", - self.tss[0], self.pulses[0], self.values[0] - ); - } else if self.tss.len() > 1 { - info!( - " first: ts {} pulse {} value {:?}", - self.tss[0], self.pulses[0], self.values[0] - ); - let n = self.tss.len() - 1; - info!( - " last: ts {} pulse {} value {:?}", - self.tss[n], self.pulses[n], self.values[n] - ); - } - } - } -} - -pub struct ScalarEventsTimeBinner { - // The first two edges are used the next time that we create an aggregator, or push a zero bin. - edges: VecDeque, - do_time_weight: bool, - agg: Option>, - ready: Option< as TimeBinnableTypeAggregator>::Output>, -} - -impl ScalarEventsTimeBinner { - fn new(edges: VecDeque, do_time_weight: bool) -> Self { - Self { - edges, - do_time_weight, - agg: None, - ready: None, - } - } - - fn next_bin_range(&mut self) -> Option { - if self.edges.len() >= 2 { - let ret = NanoRange { - beg: self.edges[0], - end: self.edges[1], - }; - self.edges.pop_front(); - Some(ret) - } else { - None - } - } -} - -impl TimeBinnerDyn for ScalarEventsTimeBinner { - fn bins_ready_count(&self) -> usize { - match &self.ready { - Some(k) => k.len(), - None => 0, - } - } - - fn bins_ready(&mut self) -> Option> { - match self.ready.take() { - Some(k) => Some(Box::new(k)), - None => None, - } - } - - fn ingest(&mut self, item: &dyn TimeBinnableDyn) { - const SELF: &str = "ScalarEventsTimeBinner"; - if item.len() == 0 { - // Return already here, RangeOverlapInfo would not give much sense. - return; - } - if self.edges.len() < 2 { - warn!("TimeBinnerDyn for {SELF} no more bin in edges A"); - return; - } - // TODO optimize by remembering at which event array index we have arrived. - // That needs modified interfaces which can take and yield the start and latest index. - loop { - while item.starts_after(NanoRange { - beg: 0, - end: self.edges[1], - }) { - self.cycle(); - if self.edges.len() < 2 { - warn!("TimeBinnerDyn for {SELF} no more bin in edges B"); - return; - } - } - if item.ends_before(NanoRange { - beg: self.edges[0], - end: u64::MAX, - }) { - return; - } else { - if self.edges.len() < 2 { - warn!("TimeBinnerDyn for {SELF} edge list exhausted"); - return; - } else { - let agg = if let Some(agg) = self.agg.as_mut() { - agg - } else { - self.agg = Some(EventValuesAggregator::new( - // We know here that we have enough edges for another bin. - // and `next_bin_range` will pop the first edge. - self.next_bin_range().unwrap(), - self.do_time_weight, - )); - self.agg.as_mut().unwrap() - }; - if let Some(item) = item - .as_any_ref() - // TODO make statically sure that we attempt to cast to the correct type here: - .downcast_ref::< as TimeBinnableTypeAggregator>::Input>() - { - // TODO collect statistics associated with this request: - agg.ingest(item); - } else { - error!("not correct item type"); - }; - if item.ends_after(agg.range().clone()) { - self.cycle(); - if self.edges.len() < 2 { - warn!("TimeBinnerDyn for {SELF} no more bin in edges C"); - return; - } - } else { - break; - } - } - } - } - } - - fn push_in_progress(&mut self, push_empty: bool) { - // TODO expand should be derived from AggKind. Is it still required after all? - // TODO here, the expand means that agg will assume that the current value is kept constant during - // the rest of the time range. - let expand = true; - let range_next = if self.agg.is_some() { - if let Some(x) = self.next_bin_range() { - Some(x) - } else { - None - } - } else { - None - }; - if let Some(agg) = self.agg.as_mut() { - let mut bins; - if let Some(range_next) = range_next { - bins = agg.result_reset(range_next, expand); - } else { - let range_next = NanoRange { beg: 4, end: 5 }; - bins = agg.result_reset(range_next, expand); - self.agg = None; - } - assert_eq!(bins.len(), 1); - if push_empty || bins.counts[0] != 0 { - match self.ready.as_mut() { - Some(ready) => { - ready.append(&mut bins); - } - None => { - self.ready = Some(bins); - } - } - } - } - } - - fn cycle(&mut self) { - let n = self.bins_ready_count(); - self.push_in_progress(true); - if self.bins_ready_count() == n { - if let Some(range) = self.next_bin_range() { - let mut bins = MinMaxAvgDim0Bins::::empty(); - bins.append_zero(range.beg, range.end); - match self.ready.as_mut() { - Some(ready) => { - ready.append(&mut bins); - } - None => { - self.ready = Some(bins); - } - } - if self.bins_ready_count() <= n { - error!("failed to push a zero bin"); - } - } else { - warn!("cycle: no in-progress bin pushed, but also no more bin to add as zero-bin"); - } - } - } -} - -impl EventsNodeProcessorOutput for ScalarEvents -where - NTY: NumOps, -{ - fn as_any_mut(&mut self) -> &mut dyn Any { - self - } - - fn into_parts(self) -> (Box, VecDeque, VecDeque) { - ( - Box::new(VecDeque::from(self.values)), - self.tss.into(), - self.pulses.into(), - ) - } -} diff --git a/items/src/waveevents.rs b/items/src/waveevents.rs deleted file mode 100644 index 52bb2d1..0000000 --- a/items/src/waveevents.rs +++ /dev/null @@ -1,561 +0,0 @@ -use crate::binsdim1::MinMaxAvgDim1Bins; -use crate::numops::NumOps; -use crate::xbinnedscalarevents::XBinnedScalarEvents; -use crate::xbinnedwaveevents::XBinnedWaveEvents; -use crate::{ - Appendable, ByteEstimate, Clearable, EventAppendable, EventsDyn, EventsNodeProcessor, EventsNodeProcessorOutput, - FilterFittingInside, Fits, FitsInside, FrameType, FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo, - ReadPbv, ReadableFromFile, TimeBinnableDyn, TimeBinnableType, TimeBinnableTypeAggregator, WithLen, WithTimestamps, -}; -use err::Error; -use items_0::subfr::SubFrId; -use items_0::AsAnyRef; -use netpod::log::*; -use netpod::{x_bin_count, AggKind, NanoRange, Shape}; -use serde::{Deserialize, Serialize}; -use std::any::Any; -use std::collections::VecDeque; -use std::marker::PhantomData; -use tokio::fs::File; - -#[derive(Debug, Serialize, Deserialize)] -pub struct WaveEvents { - pub tss: Vec, - pub pulses: Vec, - pub vals: Vec>, -} - -impl WaveEvents { - pub fn push(&mut self, ts: u64, pulse: u64, value: Vec) { - self.tss.push(ts); - self.pulses.push(pulse); - self.vals.push(value); - } -} - -impl WaveEvents { - pub fn empty() -> Self { - Self { - tss: Vec::new(), - pulses: Vec::new(), - vals: Vec::new(), - } - } - - pub fn shape(&self) -> Result { - if let Some(k) = self.vals.first() { - let ret = Shape::Wave(k.len() as u32); - Ok(ret) - } else { - Err(Error::with_msg_no_trace("WaveEvents is empty, can not determine Shape")) - } - } -} - -impl FrameTypeInnerStatic for WaveEvents -where - NTY: SubFrId, -{ - const FRAME_TYPE_ID: u32 = crate::WAVE_EVENTS_FRAME_TYPE_ID + NTY::SUB; -} - -impl FrameType for WaveEvents -where - NTY: NumOps, -{ - fn frame_type_id(&self) -> u32 { - ::FRAME_TYPE_ID - } -} - -impl AsAnyRef for WaveEvents -where - NTY: NumOps, -{ - fn as_any_ref(&self) -> &dyn Any { - self - } -} - -impl WithLen for WaveEvents { - fn len(&self) -> usize { - self.tss.len() - } -} - -impl WithTimestamps for WaveEvents { - fn ts(&self, ix: usize) -> u64 { - self.tss[ix] - } -} - -impl ByteEstimate for WaveEvents { - fn byte_estimate(&self) -> u64 { - if self.tss.len() == 0 { - 0 - } else { - // TODO improve via a const fn on NTY - self.tss.len() as u64 * 8 * self.vals[0].len() as u64 - } - } -} - -impl RangeOverlapInfo for WaveEvents { - fn ends_before(&self, range: NanoRange) -> bool { - match self.tss.last() { - Some(&ts) => ts < range.beg, - None => true, - } - } - - fn ends_after(&self, range: NanoRange) -> bool { - match self.tss.last() { - Some(&ts) => ts >= range.end, - None => panic!(), - } - } - - fn starts_after(&self, range: NanoRange) -> bool { - match self.tss.first() { - Some(&ts) => ts >= range.end, - None => panic!(), - } - } -} - -impl FitsInside for WaveEvents { - fn fits_inside(&self, range: NanoRange) -> Fits { - if self.tss.is_empty() { - Fits::Empty - } else { - let t1 = *self.tss.first().unwrap(); - let t2 = *self.tss.last().unwrap(); - if t2 < range.beg { - Fits::Lower - } else if t1 > range.end { - Fits::Greater - } else if t1 < range.beg && t2 > range.end { - Fits::PartlyLowerAndGreater - } else if t1 < range.beg { - Fits::PartlyLower - } else if t2 > range.end { - Fits::PartlyGreater - } else { - Fits::Inside - } - } - } -} - -impl FilterFittingInside for WaveEvents { - fn filter_fitting_inside(self, fit_range: NanoRange) -> Option { - match self.fits_inside(fit_range) { - Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self), - _ => None, - } - } -} - -impl PushableIndex for WaveEvents -where - NTY: NumOps, -{ - fn push_index(&mut self, src: &Self, ix: usize) { - self.tss.push(src.tss[ix]); - // TODO trait should allow to move from source. - self.vals.push(src.vals[ix].clone()); - } -} - -impl NewEmpty for WaveEvents { - fn empty(_shape: Shape) -> Self { - Self { - tss: Vec::new(), - pulses: Vec::new(), - vals: Vec::new(), - } - } -} - -impl Appendable for WaveEvents -where - NTY: NumOps, -{ - fn empty_like_self(&self) -> Self { - Self::empty() - } - - fn append(&mut self, src: &Self) { - self.tss.extend_from_slice(&src.tss); - self.vals.extend_from_slice(&src.vals); - } - - fn append_zero(&mut self, ts1: u64, _ts2: u64) { - self.tss.push(ts1); - self.pulses.push(0); - self.vals.push(Vec::new()); - } -} - -impl Clearable for WaveEvents { - fn clear(&mut self) { - self.tss.clear(); - self.vals.clear(); - } -} - -impl ReadableFromFile for WaveEvents -where - NTY: NumOps, -{ - fn read_from_file(_file: File) -> Result, Error> { - // TODO refactor types such that this impl is not needed. - panic!() - } - - fn from_buf(_buf: &[u8]) -> Result { - panic!() - } -} - -impl TimeBinnableType for WaveEvents -where - NTY: NumOps, -{ - type Output = MinMaxAvgDim1Bins; - type Aggregator = WaveEventsAggregator; - - fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator { - debug!( - "TimeBinnableType for WaveEvents aggregator() range {:?} x_bin_count {} do_time_weight {}", - range, x_bin_count, do_time_weight - ); - Self::Aggregator::new(range, x_bin_count, do_time_weight) - } -} - -pub struct WaveEventsAggregator -where - NTY: NumOps, -{ - range: NanoRange, - count: u64, - min: Option>, - max: Option>, - sumc: u64, - sum: Option>, -} - -impl WaveEventsAggregator -where - NTY: NumOps, -{ - pub fn new(range: NanoRange, _x_bin_count: usize, do_time_weight: bool) -> Self { - if do_time_weight { - err::todo(); - } - Self { - range, - count: 0, - // TODO create the right number of bins right here: - min: err::todoval(), - max: None, - sumc: 0, - sum: None, - } - } -} - -impl TimeBinnableTypeAggregator for WaveEventsAggregator -where - NTY: NumOps, -{ - type Input = WaveEvents; - type Output = MinMaxAvgDim1Bins; - - fn range(&self) -> &NanoRange { - &self.range - } - - fn ingest(&mut self, item: &Self::Input) { - error!("time-weighted binning not available"); - err::todo(); - for i1 in 0..item.tss.len() { - let ts = item.tss[i1]; - if ts < self.range.beg { - continue; - } else if ts >= self.range.end { - continue; - } else { - match &mut self.min { - None => self.min = Some(item.vals[i1].clone()), - Some(min) => { - for (a, b) in min.iter_mut().zip(item.vals[i1].iter()) { - if b < a { - *a = b.clone(); - } - } - } - }; - match &mut self.max { - None => self.max = Some(item.vals[i1].clone()), - Some(max) => { - for (a, b) in max.iter_mut().zip(item.vals[i1].iter()) { - if b < a { - *a = b.clone(); - } - } - } - }; - match self.sum.as_mut() { - None => { - self.sum = Some(item.vals[i1].iter().map(|k| k.as_prim_f32()).collect()); - } - Some(sum) => { - for (a, b) in sum.iter_mut().zip(item.vals[i1].iter()) { - let vf = b.as_prim_f32(); - if vf.is_nan() { - } else { - *a += vf; - } - } - } - } - self.sumc += 1; - self.count += 1; - } - } - } - - fn result_reset(&mut self, range: NanoRange, _expand: bool) -> Self::Output { - let avg = if self.sumc == 0 { - None - } else { - let avg = self - .sum - .as_ref() - .unwrap() - .iter() - .map(|item| item / self.sumc as f32) - .collect(); - Some(avg) - }; - let ret = Self::Output { - ts1s: vec![self.range.beg], - ts2s: vec![self.range.end], - counts: vec![self.count], - // TODO replace with reset-value instead. - mins: vec![self.min.clone()], - maxs: vec![self.max.clone()], - avgs: vec![avg], - }; - self.range = range; - self.count = 0; - self.min = None; - self.max = None; - self.sum = None; - self.sumc = 0; - ret - } -} - -impl EventAppendable for WaveEvents -where - NTY: NumOps, -{ - type Value = Vec; - - fn append_event(ret: Option, ts: u64, pulse: u64, value: Self::Value) -> Self { - let mut ret = if let Some(ret) = ret { ret } else { Self::empty() }; - ret.push(ts, pulse, value); - ret - } -} - -pub struct WaveXBinner { - _m1: PhantomData, -} - -impl EventsNodeProcessor for WaveXBinner -where - NTY: NumOps, -{ - type Input = WaveEvents; - type Output = XBinnedScalarEvents; - - fn create(_shape: Shape, _agg_kind: AggKind) -> Self { - Self { _m1: PhantomData } - } - - fn process(&self, inp: Self::Input) -> Self::Output { - let nev = inp.tss.len(); - let mut ret = Self::Output { - tss: inp.tss, - mins: Vec::with_capacity(nev), - maxs: Vec::with_capacity(nev), - avgs: Vec::with_capacity(nev), - }; - for i1 in 0..nev { - let mut min = NTY::max_or_nan(); - let mut max = NTY::min_or_nan(); - let mut sum = 0f32; - let mut sumc = 0; - let vals = &inp.vals[i1]; - for v in vals.iter() { - if v < &min || min.is_nan() { - min = v.clone(); - } - if v > &max || max.is_nan() { - max = v.clone(); - } - let vf = v.as_prim_f32(); - if vf.is_nan() { - } else { - sum += vf; - sumc += 1; - } - } - ret.mins.push(min); - ret.maxs.push(max); - if sumc == 0 { - ret.avgs.push(f32::NAN); - } else { - ret.avgs.push(sum / sumc as f32); - } - } - ret - } -} - -pub struct WaveNBinner { - shape_bin_count: usize, - x_bin_count: usize, - _m1: PhantomData, -} - -impl EventsNodeProcessor for WaveNBinner -where - NTY: NumOps, -{ - type Input = WaveEvents; - type Output = XBinnedWaveEvents; - - fn create(shape: Shape, agg_kind: AggKind) -> Self { - // TODO get rid of panic potential - let shape_bin_count = if let Shape::Wave(n) = shape { n } else { panic!() } as usize; - let x_bin_count = x_bin_count(&shape, &agg_kind); - Self { - shape_bin_count, - x_bin_count, - _m1: PhantomData, - } - } - - fn process(&self, inp: Self::Input) -> Self::Output { - let nev = inp.tss.len(); - let mut ret = Self::Output { - // TODO get rid of this clone: - tss: inp.tss.clone(), - mins: Vec::with_capacity(nev), - maxs: Vec::with_capacity(nev), - avgs: Vec::with_capacity(nev), - }; - for i1 in 0..nev { - let mut min = vec![NTY::max_or_nan(); self.x_bin_count]; - let mut max = vec![NTY::min_or_nan(); self.x_bin_count]; - let mut sum = vec![0f32; self.x_bin_count]; - let mut sumc = vec![0u64; self.x_bin_count]; - for (i2, v) in inp.vals[i1].iter().enumerate() { - let i3 = i2 * self.x_bin_count / self.shape_bin_count; - if v < &min[i3] || min[i3].is_nan() { - min[i3] = v.clone(); - } - if v > &max[i3] || max[i3].is_nan() { - max[i3] = v.clone(); - } - if v.is_nan() { - } else { - sum[i3] += v.as_prim_f32(); - sumc[i3] += 1; - } - } - // TODO - if false && inp.tss[0] < 1300 { - info!("WaveNBinner process push min {:?}", min); - } - ret.mins.push(min); - ret.maxs.push(max); - let avg = sum - .into_iter() - .zip(sumc.into_iter()) - .map(|(j, k)| if k > 0 { j / k as f32 } else { f32::NAN }) - .collect(); - ret.avgs.push(avg); - } - ret - } -} - -pub struct WavePlainProc { - _m1: PhantomData, -} - -// TODO purpose? -impl EventsNodeProcessor for WavePlainProc -where - NTY: NumOps, -{ - type Input = WaveEvents; - type Output = WaveEvents; - - fn create(_shape: Shape, _agg_kind: AggKind) -> Self { - Self { _m1: PhantomData } - } - - fn process(&self, inp: Self::Input) -> Self::Output { - if false { - let n = if inp.vals.len() > 0 { inp.vals[0].len() } else { 0 }; - let n = if n > 5 { 5 } else { n }; - WaveEvents { - tss: inp.tss, - pulses: inp.pulses, - vals: inp.vals.iter().map(|k| k[..n].to_vec()).collect(), - } - } else { - WaveEvents { - tss: inp.tss, - pulses: inp.pulses, - vals: inp.vals, - } - } - } -} - -impl crate::TimeBinnableDynStub for WaveEvents {} - -impl EventsDyn for WaveEvents { - fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn { - self as &dyn TimeBinnableDyn - } - - fn verify(&self) { - todo!() - } - - fn output_info(&self) { - todo!() - } -} - -impl EventsNodeProcessorOutput for WaveEvents -where - NTY: NumOps, -{ - fn as_any_mut(&mut self) -> &mut dyn Any { - self - } - - fn into_parts(self) -> (Box, VecDeque, VecDeque) { - todo!() - } -} diff --git a/items/src/xbinnedscalarevents.rs b/items/src/xbinnedscalarevents.rs deleted file mode 100644 index 338b8ea..0000000 --- a/items/src/xbinnedscalarevents.rs +++ /dev/null @@ -1,520 +0,0 @@ -use std::any::Any; -use std::collections::VecDeque; - -use crate::binsdim0::MinMaxAvgDim0Bins; -use crate::numops::NumOps; -use crate::streams::{Collectable, Collector}; -use crate::{ - ts_offs_from_abs, Appendable, ByteEstimate, Clearable, EventsNodeProcessorOutput, FilterFittingInside, Fits, - FitsInside, FrameType, FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, - TimeBinnableType, TimeBinnableTypeAggregator, WithLen, WithTimestamps, -}; -use err::Error; -use items_0::subfr::SubFrId; -use netpod::log::*; -use netpod::{NanoRange, Shape}; -use serde::{Deserialize, Serialize}; -use tokio::fs::File; - -// TODO in this module reduce clones - -// TODO rename Scalar -> Dim0 -#[derive(Debug, Serialize, Deserialize)] -pub struct XBinnedScalarEvents { - pub tss: Vec, - pub mins: Vec, - pub maxs: Vec, - pub avgs: Vec, -} - -impl FrameTypeInnerStatic for XBinnedScalarEvents -where - NTY: SubFrId, -{ - const FRAME_TYPE_ID: u32 = crate::X_BINNED_SCALAR_EVENTS_FRAME_TYPE_ID + NTY::SUB; -} - -impl FrameType for XBinnedScalarEvents -where - NTY: SubFrId, -{ - fn frame_type_id(&self) -> u32 { - ::FRAME_TYPE_ID - } -} - -impl XBinnedScalarEvents { - pub fn empty() -> Self { - Self { - tss: Vec::new(), - mins: Vec::new(), - maxs: Vec::new(), - avgs: Vec::new(), - } - } -} - -impl WithLen for XBinnedScalarEvents { - fn len(&self) -> usize { - self.tss.len() - } -} - -impl WithTimestamps for XBinnedScalarEvents { - fn ts(&self, ix: usize) -> u64 { - self.tss[ix] - } -} - -impl ByteEstimate for XBinnedScalarEvents { - fn byte_estimate(&self) -> u64 { - if self.tss.len() == 0 { - 0 - } else { - // TODO improve via a const fn on NTY - self.tss.len() as u64 * 28 - } - } -} - -impl RangeOverlapInfo for XBinnedScalarEvents { - fn ends_before(&self, range: NanoRange) -> bool { - match self.tss.last() { - Some(&ts) => ts < range.beg, - None => true, - } - } - - fn ends_after(&self, range: NanoRange) -> bool { - match self.tss.last() { - Some(&ts) => ts >= range.end, - None => panic!(), - } - } - - fn starts_after(&self, range: NanoRange) -> bool { - match self.tss.first() { - Some(&ts) => ts >= range.end, - None => panic!(), - } - } -} - -impl FitsInside for XBinnedScalarEvents { - fn fits_inside(&self, range: NanoRange) -> Fits { - if self.tss.is_empty() { - Fits::Empty - } else { - let t1 = *self.tss.first().unwrap(); - let t2 = *self.tss.last().unwrap(); - if t2 < range.beg { - Fits::Lower - } else if t1 > range.end { - Fits::Greater - } else if t1 < range.beg && t2 > range.end { - Fits::PartlyLowerAndGreater - } else if t1 < range.beg { - Fits::PartlyLower - } else if t2 > range.end { - Fits::PartlyGreater - } else { - Fits::Inside - } - } - } -} - -impl FilterFittingInside for XBinnedScalarEvents { - fn filter_fitting_inside(self, fit_range: NanoRange) -> Option { - match self.fits_inside(fit_range) { - Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self), - _ => None, - } - } -} - -impl PushableIndex for XBinnedScalarEvents -where - NTY: NumOps, -{ - fn push_index(&mut self, src: &Self, ix: usize) { - self.tss.push(src.tss[ix]); - self.mins.push(src.mins[ix].clone()); - self.maxs.push(src.maxs[ix].clone()); - self.avgs.push(src.avgs[ix]); - } -} - -impl NewEmpty for XBinnedScalarEvents { - fn empty(_shape: Shape) -> Self { - Self { - tss: Vec::new(), - avgs: Vec::new(), - mins: Vec::new(), - maxs: Vec::new(), - } - } -} - -impl Appendable for XBinnedScalarEvents -where - NTY: NumOps, -{ - fn empty_like_self(&self) -> Self { - Self::empty() - } - - fn append(&mut self, src: &Self) { - self.tss.extend_from_slice(&src.tss); - self.mins.extend_from_slice(&src.mins); - self.maxs.extend_from_slice(&src.maxs); - self.avgs.extend_from_slice(&src.avgs); - } - - fn append_zero(&mut self, ts1: u64, _ts2: u64) { - self.tss.push(ts1); - self.mins.push(NTY::zero()); - self.maxs.push(NTY::zero()); - self.avgs.push(0.); - } -} - -impl Clearable for XBinnedScalarEvents { - fn clear(&mut self) { - self.tss.clear(); - self.avgs.clear(); - self.mins.clear(); - self.maxs.clear(); - } -} - -impl ReadableFromFile for XBinnedScalarEvents -where - NTY: NumOps, -{ - fn read_from_file(_file: File) -> Result, Error> { - // TODO refactor types such that this impl is not needed. - panic!() - } - - fn from_buf(_buf: &[u8]) -> Result { - panic!() - } -} - -impl TimeBinnableType for XBinnedScalarEvents -where - NTY: NumOps, -{ - type Output = MinMaxAvgDim0Bins; - type Aggregator = XBinnedScalarEventsAggregator; - - fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator { - debug!( - "TimeBinnableType for XBinnedScalarEvents aggregator() range {:?} x_bin_count {} do_time_weight {}", - range, x_bin_count, do_time_weight - ); - Self::Aggregator::new(range, do_time_weight) - } -} - -pub struct XBinnedScalarEventsAggregator -where - NTY: NumOps, -{ - range: NanoRange, - count: u64, - min: NTY, - max: NTY, - sumc: u64, - sum: f32, - int_ts: u64, - last_ts: u64, - last_avg: Option, - last_min: Option, - last_max: Option, - do_time_weight: bool, -} - -impl XBinnedScalarEventsAggregator -where - NTY: NumOps, -{ - pub fn new(range: NanoRange, do_time_weight: bool) -> Self { - Self { - int_ts: range.beg, - range, - count: 0, - min: NTY::zero(), - max: NTY::zero(), - sumc: 0, - sum: 0f32, - last_ts: 0, - last_avg: None, - last_min: None, - last_max: None, - do_time_weight, - } - } - - fn apply_min_max(&mut self, min: NTY, max: NTY) { - if self.count == 0 { - self.min = min; - self.max = max; - } else { - if min < self.min { - self.min = min; - } - if max > self.max { - self.max = max; - } - } - } - - fn apply_event_unweight(&mut self, avg: f32, min: NTY, max: NTY) { - //debug!("apply_event_unweight"); - self.apply_min_max(min, max); - let vf = avg; - if vf.is_nan() { - } else { - self.sum += vf; - self.sumc += 1; - } - } - - fn apply_event_time_weight(&mut self, ts: u64) { - //debug!("apply_event_time_weight"); - if let (Some(avg), Some(min), Some(max)) = (self.last_avg, &self.last_min, &self.last_max) { - let min2 = min.clone(); - let max2 = max.clone(); - self.apply_min_max(min2, max2); - let w = (ts - self.int_ts) as f32 / self.range.delta() as f32; - if avg.is_nan() { - } else { - self.sum += avg * w; - } - self.sumc += 1; - self.int_ts = ts; - } - } - - fn ingest_unweight(&mut self, item: &XBinnedScalarEvents) { - for i1 in 0..item.tss.len() { - let ts = item.tss[i1]; - let avg = item.avgs[i1]; - let min = item.mins[i1].clone(); - let max = item.maxs[i1].clone(); - if ts < self.range.beg { - } else if ts >= self.range.end { - } else { - self.apply_event_unweight(avg, min, max); - self.count += 1; - } - } - } - - fn ingest_time_weight(&mut self, item: &XBinnedScalarEvents) { - for i1 in 0..item.tss.len() { - let ts = item.tss[i1]; - let avg = item.avgs[i1]; - let min = item.mins[i1].clone(); - let max = item.maxs[i1].clone(); - if ts < self.int_ts { - self.last_ts = ts; - self.last_avg = Some(avg); - self.last_min = Some(min); - self.last_max = Some(max); - } else if ts >= self.range.end { - return; - } else { - self.apply_event_time_weight(ts); - self.count += 1; - self.last_ts = ts; - self.last_avg = Some(avg); - self.last_min = Some(min); - self.last_max = Some(max); - } - } - } - - fn result_reset_unweight(&mut self, range: NanoRange, _expand: bool) -> MinMaxAvgDim0Bins { - let avg = if self.sumc == 0 { - 0f32 - } else { - self.sum / self.sumc as f32 - }; - let ret = MinMaxAvgDim0Bins { - ts1s: vec![self.range.beg], - ts2s: vec![self.range.end], - counts: vec![self.count], - mins: vec![self.min.clone()], - maxs: vec![self.max.clone()], - avgs: vec![avg], - }; - self.int_ts = range.beg; - self.range = range; - self.count = 0; - self.min = NTY::zero(); - self.max = NTY::zero(); - self.sum = 0f32; - self.sumc = 0; - ret - } - - fn result_reset_time_weight(&mut self, range: NanoRange, expand: bool) -> MinMaxAvgDim0Bins { - // TODO check callsite for correct expand status. - if true || expand { - self.apply_event_time_weight(self.range.end); - } - let avg = { - let sc = self.range.delta() as f32 * 1e-9; - self.sum / sc - }; - let ret = MinMaxAvgDim0Bins { - ts1s: vec![self.range.beg], - ts2s: vec![self.range.end], - counts: vec![self.count], - mins: vec![self.min.clone()], - maxs: vec![self.max.clone()], - avgs: vec![avg], - }; - self.int_ts = range.beg; - self.range = range; - self.count = 0; - self.min = NTY::zero(); - self.max = NTY::zero(); - self.sum = 0f32; - self.sumc = 0; - ret - } -} - -impl TimeBinnableTypeAggregator for XBinnedScalarEventsAggregator -where - NTY: NumOps, -{ - type Input = XBinnedScalarEvents; - type Output = MinMaxAvgDim0Bins; - - fn range(&self) -> &NanoRange { - &self.range - } - - fn ingest(&mut self, item: &Self::Input) { - debug!("ingest"); - if self.do_time_weight { - self.ingest_time_weight(item) - } else { - self.ingest_unweight(item) - } - } - - fn result_reset(&mut self, range: NanoRange, expand: bool) -> Self::Output { - if self.do_time_weight { - self.result_reset_time_weight(range, expand) - } else { - self.result_reset_unweight(range, expand) - } - } -} - -#[derive(Serialize, Deserialize)] -pub struct XBinnedScalarEventsCollectedResult { - #[serde(rename = "tsAnchor")] - ts_anchor_sec: u64, - #[serde(rename = "tsMs")] - ts_off_ms: Vec, - #[serde(rename = "tsNs")] - ts_off_ns: Vec, - mins: Vec, - maxs: Vec, - avgs: Vec, - #[serde(skip_serializing_if = "crate::bool_is_false", rename = "rangeFinal")] - finalised_range: bool, - #[serde(skip_serializing_if = "crate::bool_is_false", rename = "timedOut")] - timed_out: bool, -} - -pub struct XBinnedScalarEventsCollector { - vals: XBinnedScalarEvents, - finalised_range: bool, - timed_out: bool, - #[allow(dead_code)] - bin_count_exp: u32, -} - -impl XBinnedScalarEventsCollector { - pub fn new(bin_count_exp: u32) -> Self { - Self { - finalised_range: false, - timed_out: false, - vals: XBinnedScalarEvents::empty(), - bin_count_exp, - } - } -} - -impl WithLen for XBinnedScalarEventsCollector { - fn len(&self) -> usize { - self.vals.tss.len() - } -} - -impl Collector for XBinnedScalarEventsCollector -where - NTY: NumOps, -{ - type Input = XBinnedScalarEvents; - type Output = XBinnedScalarEventsCollectedResult; - - fn ingest(&mut self, src: &Self::Input) { - self.vals.append(src); - } - - fn set_range_complete(&mut self) { - self.finalised_range = true; - } - - fn set_timed_out(&mut self) { - self.timed_out = true; - } - - fn result(self) -> Result { - let tst = ts_offs_from_abs(&self.vals.tss); - let ret = Self::Output { - ts_anchor_sec: tst.0, - ts_off_ms: tst.1, - ts_off_ns: tst.2, - mins: self.vals.mins, - maxs: self.vals.maxs, - avgs: self.vals.avgs, - finalised_range: self.finalised_range, - timed_out: self.timed_out, - }; - Ok(ret) - } -} - -impl Collectable for XBinnedScalarEvents -where - NTY: NumOps, -{ - type Collector = XBinnedScalarEventsCollector; - - fn new_collector(bin_count_exp: u32) -> Self::Collector { - Self::Collector::new(bin_count_exp) - } -} - -impl EventsNodeProcessorOutput for XBinnedScalarEvents -where - NTY: NumOps, -{ - fn as_any_mut(&mut self) -> &mut dyn Any { - self - } - - fn into_parts(self) -> (Box, VecDeque, VecDeque) { - todo!() - } -} diff --git a/items/src/xbinnedwaveevents.rs b/items/src/xbinnedwaveevents.rs deleted file mode 100644 index 516a564..0000000 --- a/items/src/xbinnedwaveevents.rs +++ /dev/null @@ -1,550 +0,0 @@ -use crate::binsdim1::MinMaxAvgDim1Bins; -use crate::numops::NumOps; -use crate::streams::{Collectable, Collector}; -use crate::{ - Appendable, ByteEstimate, Clearable, EventsNodeProcessorOutput, FilterFittingInside, Fits, FitsInside, FrameType, - FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, TimeBinnableType, - TimeBinnableTypeAggregator, WithLen, WithTimestamps, -}; -use err::Error; -use items_0::subfr::SubFrId; -use netpod::log::*; -use netpod::timeunits::*; -use netpod::{NanoRange, Shape}; -use serde::{Deserialize, Serialize}; -use std::any::Any; -use std::collections::VecDeque; -use std::mem; -use tokio::fs::File; - -// TODO rename Wave -> Dim1 -#[derive(Debug, Serialize, Deserialize)] -pub struct XBinnedWaveEvents { - pub tss: Vec, - pub mins: Vec>, - pub maxs: Vec>, - pub avgs: Vec>, -} - -impl FrameTypeInnerStatic for XBinnedWaveEvents -where - NTY: SubFrId, -{ - const FRAME_TYPE_ID: u32 = crate::X_BINNED_WAVE_EVENTS_FRAME_TYPE_ID + NTY::SUB; -} - -impl FrameType for XBinnedWaveEvents -where - NTY: SubFrId, -{ - fn frame_type_id(&self) -> u32 { - ::FRAME_TYPE_ID - } -} - -impl XBinnedWaveEvents { - pub fn empty() -> Self { - Self { - tss: vec![], - mins: vec![], - maxs: vec![], - avgs: vec![], - } - } -} - -impl WithLen for XBinnedWaveEvents { - fn len(&self) -> usize { - self.tss.len() - } -} - -impl WithTimestamps for XBinnedWaveEvents { - fn ts(&self, ix: usize) -> u64 { - self.tss[ix] - } -} - -impl ByteEstimate for XBinnedWaveEvents { - fn byte_estimate(&self) -> u64 { - if self.tss.len() == 0 { - 0 - } else { - // TODO improve via a const fn on NTY - self.tss.len() as u64 * 20 * self.avgs[0].len() as u64 - } - } -} - -impl RangeOverlapInfo for XBinnedWaveEvents { - fn ends_before(&self, range: NanoRange) -> bool { - match self.tss.last() { - Some(&ts) => ts < range.beg, - None => true, - } - } - - fn ends_after(&self, range: NanoRange) -> bool { - match self.tss.last() { - Some(&ts) => ts >= range.end, - None => panic!(), - } - } - - fn starts_after(&self, range: NanoRange) -> bool { - match self.tss.first() { - Some(&ts) => ts >= range.end, - None => panic!(), - } - } -} - -impl FitsInside for XBinnedWaveEvents { - fn fits_inside(&self, range: NanoRange) -> Fits { - if self.tss.is_empty() { - Fits::Empty - } else { - let t1 = *self.tss.first().unwrap(); - let t2 = *self.tss.last().unwrap(); - if t2 < range.beg { - Fits::Lower - } else if t1 > range.end { - Fits::Greater - } else if t1 < range.beg && t2 > range.end { - Fits::PartlyLowerAndGreater - } else if t1 < range.beg { - Fits::PartlyLower - } else if t2 > range.end { - Fits::PartlyGreater - } else { - Fits::Inside - } - } - } -} - -impl FilterFittingInside for XBinnedWaveEvents { - fn filter_fitting_inside(self, fit_range: NanoRange) -> Option { - match self.fits_inside(fit_range) { - Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self), - _ => None, - } - } -} - -impl PushableIndex for XBinnedWaveEvents -where - NTY: NumOps, -{ - fn push_index(&mut self, src: &Self, ix: usize) { - self.tss.push(src.tss[ix]); - // TODO not nice. - self.mins.push(src.mins[ix].clone()); - self.maxs.push(src.maxs[ix].clone()); - self.avgs.push(src.avgs[ix].clone()); - } -} - -impl NewEmpty for XBinnedWaveEvents { - fn empty(_shape: Shape) -> Self { - Self { - tss: Vec::new(), - avgs: Vec::new(), - mins: Vec::new(), - maxs: Vec::new(), - } - } -} - -impl Appendable for XBinnedWaveEvents -where - NTY: NumOps, -{ - fn empty_like_self(&self) -> Self { - Self::empty() - } - - fn append(&mut self, src: &Self) { - self.tss.extend_from_slice(&src.tss); - self.mins.extend_from_slice(&src.mins); - self.maxs.extend_from_slice(&src.maxs); - self.avgs.extend_from_slice(&src.avgs); - } - - fn append_zero(&mut self, ts1: u64, _ts2: u64) { - self.tss.push(ts1); - self.mins.push(Vec::new()); - self.maxs.push(Vec::new()); - self.avgs.push(Vec::new()); - } -} - -impl Clearable for XBinnedWaveEvents { - fn clear(&mut self) { - self.tss.clear(); - self.mins.clear(); - self.maxs.clear(); - self.avgs.clear(); - } -} - -impl ReadableFromFile for XBinnedWaveEvents -where - NTY: NumOps, -{ - fn read_from_file(_file: File) -> Result, Error> { - // TODO refactor types such that this impl is not needed. - panic!() - } - - fn from_buf(_buf: &[u8]) -> Result { - panic!() - } -} - -impl TimeBinnableType for XBinnedWaveEvents -where - NTY: NumOps, -{ - type Output = MinMaxAvgDim1Bins; - type Aggregator = XBinnedWaveEventsAggregator; - - fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator { - debug!( - "TimeBinnableType for XBinnedWaveEvents aggregator() range {:?} x_bin_count {} do_time_weight {}", - range, x_bin_count, do_time_weight - ); - Self::Aggregator::new(range, x_bin_count, do_time_weight) - } -} - -pub struct XBinnedWaveEventsAggregator -where - NTY: NumOps, -{ - range: NanoRange, - count: u64, - min: Option>, - max: Option>, - sumc: u64, - sum: Vec, - int_ts: u64, - last_ts: u64, - last_avg: Option>, - last_min: Option>, - last_max: Option>, - do_time_weight: bool, -} - -impl XBinnedWaveEventsAggregator -where - NTY: NumOps, -{ - pub fn new(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self { - Self { - int_ts: range.beg, - range, - count: 0, - min: None, - max: None, - sumc: 0, - sum: vec![0f32; x_bin_count], - last_ts: 0, - last_avg: None, - last_min: None, - last_max: None, - do_time_weight, - } - } - - // TODO get rid of clones. - fn apply_min_max(&mut self, min: &Vec, max: &Vec) { - self.min = match self.min.take() { - None => Some(min.clone()), - Some(cmin) => { - let a = cmin - .into_iter() - .zip(min) - .map(|(a, b)| if a < *b { a } else { b.clone() }) - .collect(); - Some(a) - } - }; - self.max = match self.max.take() { - None => Some(max.clone()), - Some(cmax) => { - let a = cmax - .into_iter() - .zip(min) - .map(|(a, b)| if a > *b { a } else { b.clone() }) - .collect(); - Some(a) - } - }; - } - - fn apply_event_unweight(&mut self, avg: &Vec, min: &Vec, max: &Vec) { - //debug!("apply_event_unweight"); - self.apply_min_max(&min, &max); - let sum = mem::replace(&mut self.sum, vec![]); - self.sum = sum - .into_iter() - .zip(avg) - .map(|(a, &b)| if b.is_nan() { a } else { a + b }) - .collect(); - self.sumc += 1; - } - - fn apply_event_time_weight(&mut self, ts: u64) { - //debug!("apply_event_time_weight"); - if let (Some(avg), Some(min), Some(max)) = (self.last_avg.take(), self.last_min.take(), self.last_max.take()) { - self.apply_min_max(&min, &max); - let w = (ts - self.int_ts) as f32 / self.range.delta() as f32; - let sum = mem::replace(&mut self.sum, vec![]); - self.sum = sum - .into_iter() - .zip(&avg) - .map(|(a, &b)| if b.is_nan() { a } else { a + b * w }) - .collect(); - self.sumc += 1; - self.int_ts = ts; - self.last_avg = Some(avg); - self.last_min = Some(min); - self.last_max = Some(max); - } - } - - fn ingest_unweight(&mut self, item: &XBinnedWaveEvents) { - for i1 in 0..item.tss.len() { - let ts = item.tss[i1]; - let avg = &item.avgs[i1]; - let min = &item.mins[i1]; - let max = &item.maxs[i1]; - if ts < self.range.beg { - } else if ts >= self.range.end { - } else { - self.apply_event_unweight(avg, min, max); - self.count += 1; - } - } - } - - fn ingest_time_weight(&mut self, item: &XBinnedWaveEvents) { - for i1 in 0..item.tss.len() { - let ts = item.tss[i1]; - let avg = &item.avgs[i1]; - let min = &item.mins[i1]; - let max = &item.maxs[i1]; - if ts < self.int_ts { - self.last_ts = ts; - self.last_avg = Some(avg.clone()); - self.last_min = Some(min.clone()); - self.last_max = Some(max.clone()); - } else if ts >= self.range.end { - return; - } else { - self.apply_event_time_weight(ts); - self.count += 1; - self.last_ts = ts; - self.last_avg = Some(avg.clone()); - self.last_min = Some(min.clone()); - self.last_max = Some(max.clone()); - } - } - } - - fn result_reset_unweight(&mut self, range: NanoRange, _expand: bool) -> MinMaxAvgDim1Bins { - let avg = if self.sumc == 0 { - None - } else { - Some(self.sum.iter().map(|k| *k / self.sumc as f32).collect()) - }; - let min = mem::replace(&mut self.min, None); - let max = mem::replace(&mut self.max, None); - let ret = MinMaxAvgDim1Bins { - ts1s: vec![self.range.beg], - ts2s: vec![self.range.end], - counts: vec![self.count], - mins: vec![min], - maxs: vec![max], - avgs: vec![avg], - }; - self.int_ts = range.beg; - self.range = range; - self.count = 0; - self.min = None; - self.max = None; - self.sumc = 0; - self.sum = vec![0f32; ret.avgs.len()]; - ret - } - - fn result_reset_time_weight(&mut self, range: NanoRange, expand: bool) -> MinMaxAvgDim1Bins { - // TODO check callsite for correct expand status. - if true || expand { - self.apply_event_time_weight(self.range.end); - } - let avg = if self.sumc == 0 { - None - } else { - let n = self.sum.len(); - Some(mem::replace(&mut self.sum, vec![0f32; n])) - }; - let min = mem::replace(&mut self.min, None); - let max = mem::replace(&mut self.max, None); - let ret = MinMaxAvgDim1Bins { - ts1s: vec![self.range.beg], - ts2s: vec![self.range.end], - counts: vec![self.count], - mins: vec![min], - maxs: vec![max], - avgs: vec![avg], - }; - self.int_ts = range.beg; - self.range = range; - self.count = 0; - //self.min = None; - //self.max = None; - //self.sum = vec![0f32; ret.avgs.len()]; - self.sumc = 0; - ret - } -} - -impl TimeBinnableTypeAggregator for XBinnedWaveEventsAggregator -where - NTY: NumOps, -{ - type Input = XBinnedWaveEvents; - type Output = MinMaxAvgDim1Bins; - - fn range(&self) -> &NanoRange { - &self.range - } - - fn ingest(&mut self, item: &Self::Input) { - if self.do_time_weight { - self.ingest_time_weight(item) - } else { - self.ingest_unweight(item) - } - } - - fn result_reset(&mut self, range: NanoRange, expand: bool) -> Self::Output { - if self.do_time_weight { - self.result_reset_time_weight(range, expand) - } else { - self.result_reset_unweight(range, expand) - } - } -} - -#[derive(Serialize, Deserialize)] -pub struct XBinnedWaveEventsCollectedResult { - #[serde(rename = "tsAnchor")] - ts_anchor_sec: u64, - #[serde(rename = "tsMs")] - ts_off_ms: Vec, - #[serde(rename = "tsNs")] - ts_off_ns: Vec, - mins: Vec>, - maxs: Vec>, - avgs: Vec>, - #[serde(skip_serializing_if = "crate::bool_is_false", rename = "rangeFinal")] - finalised_range: bool, - #[serde(skip_serializing_if = "crate::bool_is_false", rename = "timedOut")] - timed_out: bool, -} - -pub struct XBinnedWaveEventsCollector { - vals: XBinnedWaveEvents, - finalised_range: bool, - timed_out: bool, - #[allow(dead_code)] - bin_count_exp: u32, -} - -impl XBinnedWaveEventsCollector { - pub fn new(bin_count_exp: u32) -> Self { - Self { - finalised_range: false, - timed_out: false, - vals: XBinnedWaveEvents::empty(), - bin_count_exp, - } - } -} - -impl WithLen for XBinnedWaveEventsCollector { - fn len(&self) -> usize { - self.vals.tss.len() - } -} - -impl Collector for XBinnedWaveEventsCollector -where - NTY: NumOps, -{ - type Input = XBinnedWaveEvents; - type Output = XBinnedWaveEventsCollectedResult; - - fn ingest(&mut self, src: &Self::Input) { - self.vals.append(src); - } - - fn set_range_complete(&mut self) { - self.finalised_range = true; - } - - fn set_timed_out(&mut self) { - self.timed_out = true; - } - - fn result(self) -> Result { - let ts_anchor_sec = self.vals.tss.first().map_or(0, |&k| k) / SEC; - let ts_anchor_ns = ts_anchor_sec * SEC; - let ts_off_ms: Vec<_> = self.vals.tss.iter().map(|&k| (k - ts_anchor_ns) / MS).collect(); - let ts_off_ns = self - .vals - .tss - .iter() - .zip(ts_off_ms.iter().map(|&k| k * MS)) - .map(|(&j, k)| (j - ts_anchor_ns - k)) - .collect(); - let ret = Self::Output { - finalised_range: self.finalised_range, - timed_out: self.timed_out, - ts_anchor_sec, - ts_off_ms, - ts_off_ns, - mins: self.vals.mins, - maxs: self.vals.maxs, - avgs: self.vals.avgs, - }; - Ok(ret) - } -} - -impl Collectable for XBinnedWaveEvents -where - NTY: NumOps, -{ - type Collector = XBinnedWaveEventsCollector; - - fn new_collector(bin_count_exp: u32) -> Self::Collector { - Self::Collector::new(bin_count_exp) - } -} - -impl EventsNodeProcessorOutput for XBinnedWaveEvents -where - NTY: NumOps, -{ - fn as_any_mut(&mut self) -> &mut dyn Any { - self - } - - fn into_parts(self) -> (Box, VecDeque, VecDeque) { - todo!() - } -} diff --git a/items_2/src/eventsdim0.rs b/items_2/src/eventsdim0.rs index 54e99bb..dff7598 100644 --- a/items_2/src/eventsdim0.rs +++ b/items_2/src/eventsdim0.rs @@ -1146,3 +1146,73 @@ mod test_frame { assert_eq!(item.tss(), &[123]); } } + +/* +TODO adapt and enable +#[test] +fn bin_binned_01() { + use binsdim0::MinMaxAvgDim0Bins; + let edges = vec![SEC * 1000, SEC * 1010, SEC * 1020, SEC * 1030]; + let inp0 = as NewEmpty>::empty(Shape::Scalar); + let mut time_binner = inp0.time_binner_new(edges, true); + let inp1 = MinMaxAvgDim0Bins:: { + ts1s: vec![SEC * 1000, SEC * 1010], + ts2s: vec![SEC * 1010, SEC * 1020], + counts: vec![1, 1], + mins: vec![3, 4], + maxs: vec![10, 9], + avgs: vec![7., 6.], + }; + assert_eq!(time_binner.bins_ready_count(), 0); + time_binner.ingest(&inp1); + assert_eq!(time_binner.bins_ready_count(), 1); + time_binner.push_in_progress(false); + assert_eq!(time_binner.bins_ready_count(), 2); + // From here on, pushing any more should not change the bin count: + time_binner.push_in_progress(false); + assert_eq!(time_binner.bins_ready_count(), 2); + // On the other hand, cycling should add one more zero-bin: + time_binner.cycle(); + assert_eq!(time_binner.bins_ready_count(), 3); + time_binner.cycle(); + assert_eq!(time_binner.bins_ready_count(), 3); + let bins = time_binner.bins_ready().expect("bins should be ready"); + eprintln!("bins: {:?}", bins); + assert_eq!(time_binner.bins_ready_count(), 0); + assert_eq!(bins.counts(), &[1, 1, 0]); + // TODO use proper float-compare logic: + assert_eq!(bins.mins(), &[3., 4., 0.]); + assert_eq!(bins.maxs(), &[10., 9., 0.]); + assert_eq!(bins.avgs(), &[7., 6., 0.]); +} + +#[test] +fn bin_binned_02() { + use binsdim0::MinMaxAvgDim0Bins; + let edges = vec![SEC * 1000, SEC * 1020]; + let inp0 = as NewEmpty>::empty(Shape::Scalar); + let mut time_binner = inp0.time_binner_new(edges, true); + let inp1 = MinMaxAvgDim0Bins:: { + ts1s: vec![SEC * 1000, SEC * 1010], + ts2s: vec![SEC * 1010, SEC * 1020], + counts: vec![1, 1], + mins: vec![3, 4], + maxs: vec![10, 9], + avgs: vec![7., 6.], + }; + assert_eq!(time_binner.bins_ready_count(), 0); + time_binner.ingest(&inp1); + assert_eq!(time_binner.bins_ready_count(), 0); + time_binner.cycle(); + assert_eq!(time_binner.bins_ready_count(), 1); + time_binner.cycle(); + //assert_eq!(time_binner.bins_ready_count(), 2); + let bins = time_binner.bins_ready().expect("bins should be ready"); + eprintln!("bins: {:?}", bins); + assert_eq!(time_binner.bins_ready_count(), 0); + assert_eq!(bins.counts(), &[2]); + assert_eq!(bins.mins(), &[3.]); + assert_eq!(bins.maxs(), &[10.]); + assert_eq!(bins.avgs(), &[13. / 2.]); +} +*/