Remove obsolete containers
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
[workspace]
|
||||
members = ["daqbuffer", "httpret", "h5out", "items", "items_2", "items_proc", "nodenet", "httpclient", "fsio", "dq"]
|
||||
members = ["daqbuffer", "httpret", "h5out", "items", "items_2", "items_proc", "nodenet", "httpclient", "dq"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 1
|
||||
|
||||
@@ -1,20 +1,33 @@
|
||||
pub mod ringbuf;
|
||||
|
||||
use async_channel::Sender;
|
||||
use err::{ErrStr, Error};
|
||||
use err::ErrStr;
|
||||
use err::Error;
|
||||
use futures_util::StreamExt;
|
||||
use items::eventsitem::EventsItem;
|
||||
use items::{Sitemty, StatsItem, StreamItem};
|
||||
use items::Sitemty;
|
||||
use items::StatsItem;
|
||||
use items::StreamItem;
|
||||
use netpod::log::*;
|
||||
use netpod::{DiskStats, OpenStats, ReadExactStats, ReadStats, SeekStats};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use netpod::DiskStats;
|
||||
use netpod::OpenStats;
|
||||
use netpod::ReadExactStats;
|
||||
use netpod::ReadStats;
|
||||
use netpod::SeekStats;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::fmt;
|
||||
use std::io::{self, ErrorKind, SeekFrom};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::io;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::SeekFrom;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::Instant;
|
||||
use tokio::fs::{File, OpenOptions};
|
||||
use tokio::io::{AsyncReadExt, AsyncSeekExt};
|
||||
use tokio::fs::File;
|
||||
use tokio::fs::OpenOptions;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::io::AsyncSeekExt;
|
||||
|
||||
const LOG_IO: bool = true;
|
||||
const STATS_IO: bool = true;
|
||||
@@ -62,8 +75,10 @@ pub async fn tokio_rand() -> Result<u64, Error> {
|
||||
Ok(x)
|
||||
}
|
||||
|
||||
pub struct DummyEvent;
|
||||
|
||||
pub struct StatsChannel {
|
||||
chn: Sender<Sitemty<EventsItem>>,
|
||||
chn: Sender<Sitemty<DummyEvent>>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for StatsChannel {
|
||||
@@ -73,7 +88,7 @@ impl fmt::Debug for StatsChannel {
|
||||
}
|
||||
|
||||
impl StatsChannel {
|
||||
pub fn new(chn: Sender<Sitemty<EventsItem>>) -> Self {
|
||||
pub fn new(chn: Sender<Sitemty<DummyEvent>>) -> Self {
|
||||
Self { chn }
|
||||
}
|
||||
|
||||
|
||||
@@ -1,17 +1,24 @@
|
||||
use crate::err::ErrConv;
|
||||
use chrono::{DateTime, Utc};
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use disk::streamlog::Streamlog;
|
||||
use err::Error;
|
||||
use futures_util::TryStreamExt;
|
||||
use http::StatusCode;
|
||||
use httpclient::HttpBodyAsAsyncRead;
|
||||
use hyper::Body;
|
||||
use items::xbinnedwaveevents::XBinnedWaveEvents;
|
||||
use items::{Sitemty, StreamItem};
|
||||
use items::StreamItem;
|
||||
use netpod::log::*;
|
||||
use netpod::query::{BinnedQuery, CacheUsage};
|
||||
use netpod::query::BinnedQuery;
|
||||
use netpod::query::CacheUsage;
|
||||
use netpod::AggKind;
|
||||
use netpod::AppendToUrl;
|
||||
use netpod::{AggKind, ByteSize, Channel, HostPort, NanoRange, PerfOpts, APP_OCTET};
|
||||
use netpod::ByteSize;
|
||||
use netpod::Channel;
|
||||
use netpod::HostPort;
|
||||
use netpod::NanoRange;
|
||||
use netpod::PerfOpts;
|
||||
use netpod::APP_OCTET;
|
||||
use streams::frames::inmem::InMemoryFrameAsyncReadStream;
|
||||
use url::Url;
|
||||
|
||||
@@ -110,44 +117,11 @@ pub async fn get_binned(
|
||||
info!("Stats: {:?}", item);
|
||||
None
|
||||
}
|
||||
StreamItem::DataItem(frame) => {
|
||||
StreamItem::DataItem(_frame) => {
|
||||
// TODO
|
||||
// The expected type nowadays depends on the channel and agg-kind.
|
||||
err::todo();
|
||||
type ExpectedType = Sitemty<XBinnedWaveEvents<u8>>;
|
||||
// TODO the non-data variants of Sitemty no longer carry a frame id.
|
||||
//let type_id_exp = <ExpectedType as FrameType>::FRAME_TYPE_ID;
|
||||
let type_id_exp: u32 = err::todoval();
|
||||
if frame.tyid() != type_id_exp {
|
||||
error!("unexpected type id got {} exp {}", frame.tyid(), type_id_exp);
|
||||
}
|
||||
let n1 = frame.buf().len();
|
||||
match rmp_serde::from_slice::<ExpectedType>(frame.buf()) {
|
||||
Ok(item) => match item {
|
||||
Ok(item) => {
|
||||
match item {
|
||||
StreamItem::Log(item) => {
|
||||
Streamlog::emit(&item);
|
||||
}
|
||||
StreamItem::Stats(item) => {
|
||||
info!("Stats: {:?}", item);
|
||||
}
|
||||
StreamItem::DataItem(item) => {
|
||||
info!("DataItem: {:?}", item);
|
||||
}
|
||||
}
|
||||
Some(Ok(()))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("len {} error frame {:?}", n1, e);
|
||||
Some(Err(e))
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
error!("len {} {:?}", n1, e);
|
||||
Some(Err(e.into()))
|
||||
}
|
||||
}
|
||||
Some(Ok(()))
|
||||
}
|
||||
},
|
||||
Err(e) => Some(Err(Error::with_msg(format!("{:?}", e)))),
|
||||
|
||||
@@ -1,19 +1,27 @@
|
||||
use crate::err::ErrConv;
|
||||
use crate::nodes::require_test_hosts_running;
|
||||
use chrono::{DateTime, Utc};
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use disk::streamlog::Streamlog;
|
||||
use err::Error;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use futures_util::StreamExt;
|
||||
use futures_util::TryStreamExt;
|
||||
use http::StatusCode;
|
||||
use httpclient::HttpBodyAsAsyncRead;
|
||||
use hyper::Body;
|
||||
use items::binsdim0::MinMaxAvgDim0Bins;
|
||||
use items::{RangeCompletableItem, Sitemty, StatsItem, StreamItem, WithLen};
|
||||
use items::StreamItem;
|
||||
use items_0::subfr::SubFrId;
|
||||
use netpod::log::*;
|
||||
use netpod::query::{BinnedQuery, CacheUsage};
|
||||
use netpod::query::BinnedQuery;
|
||||
use netpod::query::CacheUsage;
|
||||
use netpod::AggKind;
|
||||
use netpod::AppendToUrl;
|
||||
use netpod::{AggKind, Channel, Cluster, HostPort, NanoRange, PerfOpts, APP_OCTET};
|
||||
use netpod::Channel;
|
||||
use netpod::Cluster;
|
||||
use netpod::HostPort;
|
||||
use netpod::NanoRange;
|
||||
use netpod::PerfOpts;
|
||||
use netpod::APP_OCTET;
|
||||
use serde::de::DeserializeOwned;
|
||||
use std::fmt;
|
||||
use std::future::ready;
|
||||
@@ -144,6 +152,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
#[derive(Debug)]
|
||||
pub struct BinnedResponse {
|
||||
bin_count: u64,
|
||||
@@ -178,6 +187,7 @@ impl BinnedResponse {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO
|
||||
async fn consume_binned_response<NTY, T>(inp: InMemoryFrameAsyncReadStream<T>) -> Result<BinnedResponse, Error>
|
||||
where
|
||||
NTY: fmt::Debug + SubFrId + DeserializeOwned,
|
||||
@@ -197,68 +207,16 @@ where
|
||||
debug!("Stats: {:?}", item);
|
||||
None
|
||||
}
|
||||
StreamItem::DataItem(frame) => {
|
||||
// TODO non-data Sitety no longer carry frame id:
|
||||
//if frame.tyid() != <Sitemty<MinMaxAvgDim0Bins<NTY>> as FrameType>::FRAME_TYPE_ID {
|
||||
if frame.tyid() != err::todoval::<u32>() {
|
||||
error!("test receives unexpected tyid {:x}", frame.tyid());
|
||||
}
|
||||
match rmp_serde::from_slice::<Sitemty<MinMaxAvgDim0Bins<NTY>>>(frame.buf()) {
|
||||
Ok(item) => match item {
|
||||
Ok(item) => match item {
|
||||
StreamItem::Log(item) => {
|
||||
Streamlog::emit(&item);
|
||||
Some(Ok(StreamItem::Log(item)))
|
||||
}
|
||||
item => Some(Ok(item)),
|
||||
},
|
||||
Err(e) => {
|
||||
error!("TEST GOT ERROR FRAME: {:?}", e);
|
||||
Some(Err(e))
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
error!("{:?}", e);
|
||||
Some(Err(e.into()))
|
||||
}
|
||||
}
|
||||
StreamItem::DataItem(_frame) => {
|
||||
err::todo();
|
||||
Some(Ok(()))
|
||||
}
|
||||
},
|
||||
Err(e) => Some(Err(Error::with_msg(format!("WEIRD EMPTY ERROR {:?}", e)))),
|
||||
};
|
||||
ready(g)
|
||||
})
|
||||
.fold(BinnedResponse::new(), |mut a, k| {
|
||||
let g = match k {
|
||||
Ok(StreamItem::Log(_item)) => {
|
||||
a.log_item_count += 1;
|
||||
a
|
||||
}
|
||||
Ok(StreamItem::Stats(item)) => match item {
|
||||
StatsItem::EventDataReadStats(item) => {
|
||||
a.bytes_read += item.parsed_bytes;
|
||||
a
|
||||
}
|
||||
_ => a,
|
||||
},
|
||||
Ok(StreamItem::DataItem(item)) => match item {
|
||||
RangeCompletableItem::RangeComplete => {
|
||||
a.range_complete_count += 1;
|
||||
a
|
||||
}
|
||||
RangeCompletableItem::Data(item) => {
|
||||
a.data_item_count += 1;
|
||||
a.bin_count += WithLen::len(&item) as u64;
|
||||
a
|
||||
}
|
||||
},
|
||||
Err(_e) => {
|
||||
a.err_item_count += 1;
|
||||
a
|
||||
}
|
||||
};
|
||||
ready(g)
|
||||
});
|
||||
.fold(BinnedResponse::new(), |a, _x| ready(a));
|
||||
let ret = s1.await;
|
||||
debug!("BinnedResponse: {:?}", ret);
|
||||
Ok(ret)
|
||||
|
||||
@@ -1,18 +1,26 @@
|
||||
use crate::err::ErrConv;
|
||||
use crate::nodes::require_test_hosts_running;
|
||||
use chrono::{DateTime, Utc};
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use disk::streamlog::Streamlog;
|
||||
use err::Error;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use futures_util::StreamExt;
|
||||
use futures_util::TryStreamExt;
|
||||
use http::StatusCode;
|
||||
use httpclient::HttpBodyAsAsyncRead;
|
||||
use hyper::Body;
|
||||
use items::numops::NumOps;
|
||||
use items::scalarevents::ScalarEvents;
|
||||
use items::{RangeCompletableItem, Sitemty, StatsItem, StreamItem, WithLen};
|
||||
use items::StreamItem;
|
||||
use netpod::log::*;
|
||||
use netpod::query::PlainEventsQuery;
|
||||
use netpod::{log::*, AggKind};
|
||||
use netpod::{AppendToUrl, Channel, Cluster, HostPort, NanoRange, PerfOpts, APP_JSON, APP_OCTET};
|
||||
use netpod::AggKind;
|
||||
use netpod::AppendToUrl;
|
||||
use netpod::Channel;
|
||||
use netpod::Cluster;
|
||||
use netpod::HostPort;
|
||||
use netpod::NanoRange;
|
||||
use netpod::PerfOpts;
|
||||
use netpod::APP_JSON;
|
||||
use netpod::APP_OCTET;
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::fmt::Debug;
|
||||
use std::future::ready;
|
||||
@@ -42,7 +50,7 @@ async fn get_plain_events_binary_0_inner() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
if true {
|
||||
get_plain_events_binary::<i32>(
|
||||
get_plain_events_binary(
|
||||
"scalar-i32-be",
|
||||
"1970-01-01T00:20:10.000Z",
|
||||
"1970-01-01T00:20:50.000Z",
|
||||
@@ -60,17 +68,14 @@ fn get_plain_events_binary_0() {
|
||||
taskrun::run(get_plain_events_binary_0_inner()).unwrap();
|
||||
}
|
||||
|
||||
async fn get_plain_events_binary<NTY>(
|
||||
async fn get_plain_events_binary(
|
||||
channel_name: &str,
|
||||
beg_date: &str,
|
||||
end_date: &str,
|
||||
cluster: &Cluster,
|
||||
_expect_range_complete: bool,
|
||||
_expect_event_count: u64,
|
||||
) -> Result<EventsResponse, Error>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
) -> Result<EventsResponse, Error> {
|
||||
let t1 = Utc::now();
|
||||
let node0 = &cluster.nodes[0];
|
||||
let beg_date: DateTime<Utc> = beg_date.parse()?;
|
||||
@@ -110,7 +115,7 @@ where
|
||||
}
|
||||
let s1 = HttpBodyAsAsyncRead::new(res);
|
||||
let s2 = InMemoryFrameAsyncReadStream::new(s1, perf_opts.inmem_bufcap);
|
||||
let res = consume_plain_events_binary::<NTY, _>(s2).await?;
|
||||
let res = consume_plain_events_binary(s2).await?;
|
||||
let t2 = chrono::Utc::now();
|
||||
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
|
||||
// TODO add timeout
|
||||
@@ -122,6 +127,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
#[derive(Debug)]
|
||||
pub struct EventsResponse {
|
||||
event_count: u64,
|
||||
@@ -156,9 +162,8 @@ impl EventsResponse {
|
||||
}
|
||||
}
|
||||
|
||||
async fn consume_plain_events_binary<NTY, T>(inp: InMemoryFrameAsyncReadStream<T>) -> Result<EventsResponse, Error>
|
||||
async fn consume_plain_events_binary<T>(inp: InMemoryFrameAsyncReadStream<T>) -> Result<EventsResponse, Error>
|
||||
where
|
||||
NTY: NumOps,
|
||||
T: AsyncRead + Unpin,
|
||||
{
|
||||
let s1 = inp
|
||||
@@ -174,70 +179,16 @@ where
|
||||
debug!("Stats: {:?}", item);
|
||||
None
|
||||
}
|
||||
StreamItem::DataItem(frame) => {
|
||||
// TODO the non-data variants of Sitemty no longer carry frame type id:
|
||||
//if frame.tyid() != <Sitemty<ScalarEvents<NTY>> as FrameType>::FRAME_TYPE_ID {
|
||||
if frame.tyid() != err::todoval::<u32>() {
|
||||
error!("test receives unexpected tyid {:x}", frame.tyid());
|
||||
None
|
||||
} else {
|
||||
match rmp_serde::from_slice::<Sitemty<ScalarEvents<NTY>>>(frame.buf()) {
|
||||
Ok(item) => match item {
|
||||
Ok(item) => match item {
|
||||
StreamItem::Log(item) => {
|
||||
Streamlog::emit(&item);
|
||||
Some(Ok(StreamItem::Log(item)))
|
||||
}
|
||||
item => Some(Ok(item)),
|
||||
},
|
||||
Err(e) => {
|
||||
error!("TEST GOT ERROR FRAME: {:?}", e);
|
||||
Some(Err(e))
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
error!("{:?}", e);
|
||||
Some(Err(e.into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
StreamItem::DataItem(_frame) => {
|
||||
err::todo();
|
||||
Some(Ok(()))
|
||||
}
|
||||
},
|
||||
Err(e) => Some(Err(Error::with_msg(format!("WEIRD EMPTY ERROR {:?}", e)))),
|
||||
};
|
||||
ready(g)
|
||||
})
|
||||
.fold(EventsResponse::new(), |mut a, k| {
|
||||
let g = match k {
|
||||
Ok(StreamItem::Log(_item)) => {
|
||||
a.log_item_count += 1;
|
||||
a
|
||||
}
|
||||
Ok(StreamItem::Stats(item)) => match item {
|
||||
StatsItem::EventDataReadStats(item) => {
|
||||
a.bytes_read += item.parsed_bytes;
|
||||
a
|
||||
}
|
||||
_ => a,
|
||||
},
|
||||
Ok(StreamItem::DataItem(item)) => match item {
|
||||
RangeCompletableItem::RangeComplete => {
|
||||
a.range_complete_count += 1;
|
||||
a
|
||||
}
|
||||
RangeCompletableItem::Data(item) => {
|
||||
a.data_item_count += 1;
|
||||
a.event_count += WithLen::len(&item) as u64;
|
||||
a
|
||||
}
|
||||
},
|
||||
Err(_e) => {
|
||||
a.err_item_count += 1;
|
||||
a
|
||||
}
|
||||
};
|
||||
ready(g)
|
||||
});
|
||||
.fold(EventsResponse::new(), |a, _x| ready(a));
|
||||
let ret = s1.await;
|
||||
debug!("result: {:?}", ret);
|
||||
Ok(ret)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
pub mod enp;
|
||||
@@ -1,26 +0,0 @@
|
||||
use items::numops::NumOps;
|
||||
use items::scalarevents::ScalarEvents;
|
||||
use items::EventsNodeProcessor;
|
||||
use netpod::AggKind;
|
||||
use netpod::Shape;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
pub struct Identity<NTY> {
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> EventsNodeProcessor for Identity<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = ScalarEvents<NTY>;
|
||||
type Output = ScalarEvents<NTY>;
|
||||
|
||||
fn create(_shape: Shape, _agg_kind: AggKind) -> Self {
|
||||
Self { _m1: PhantomData }
|
||||
}
|
||||
|
||||
fn process(&self, inp: Self::Input) -> Self::Output {
|
||||
inp
|
||||
}
|
||||
}
|
||||
@@ -1,23 +1,8 @@
|
||||
use crate::agg::enp::Identity;
|
||||
use crate::eventblobs::EventChunkerMultifile;
|
||||
use err::Error;
|
||||
use futures_util::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use items::eventfull::EventFull;
|
||||
use items::eventsitem::EventsItem;
|
||||
use items::numops::BoolNum;
|
||||
use items::numops::NumOps;
|
||||
use items::numops::StringNum;
|
||||
use items::plainevents::PlainEvents;
|
||||
use items::plainevents::ScalarPlainEvents;
|
||||
use items::scalarevents::ScalarEvents;
|
||||
use items::waveevents::WaveEvents;
|
||||
use items::waveevents::WaveNBinner;
|
||||
use items::waveevents::WavePlainProc;
|
||||
use items::waveevents::WaveXBinner;
|
||||
use items::Appendable;
|
||||
use items::EventAppendable;
|
||||
use items::EventsNodeProcessor;
|
||||
use items::RangeCompletableItem;
|
||||
use items::Sitemty;
|
||||
use items::StreamItem;
|
||||
@@ -32,7 +17,6 @@ use netpod::ScalarType;
|
||||
use netpod::Shape;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::mem::size_of;
|
||||
use std::pin::Pin;
|
||||
use std::task::Context;
|
||||
use std::task::Poll;
|
||||
@@ -58,87 +42,6 @@ pub enum Endian {
|
||||
Big,
|
||||
}
|
||||
|
||||
pub trait NumFromBytes<NTY, END> {
|
||||
fn convert(buf: &[u8], big_endian: bool) -> NTY;
|
||||
}
|
||||
|
||||
impl NumFromBytes<BoolNum, LittleEndian> for BoolNum {
|
||||
fn convert(buf: &[u8], _big_endian: bool) -> BoolNum {
|
||||
BoolNum(buf[0])
|
||||
}
|
||||
}
|
||||
|
||||
impl NumFromBytes<BoolNum, BigEndian> for BoolNum {
|
||||
fn convert(buf: &[u8], _big_endian: bool) -> BoolNum {
|
||||
BoolNum(buf[0])
|
||||
}
|
||||
}
|
||||
|
||||
impl NumFromBytes<StringNum, LittleEndian> for StringNum {
|
||||
fn convert(buf: &[u8], _big_endian: bool) -> StringNum {
|
||||
if false {
|
||||
// TODO remove
|
||||
netpod::log::error!("TODO NumFromBytes for StringNum buf len {}", buf.len());
|
||||
}
|
||||
let s = if buf.len() >= 250 {
|
||||
String::from_utf8_lossy(&buf[..250])
|
||||
} else {
|
||||
String::from_utf8_lossy(buf)
|
||||
};
|
||||
Self(s.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl NumFromBytes<StringNum, BigEndian> for StringNum {
|
||||
fn convert(buf: &[u8], _big_endian: bool) -> StringNum {
|
||||
if false {
|
||||
// TODO remove
|
||||
netpod::log::error!("TODO NumFromBytes for StringNum buf len {}", buf.len());
|
||||
}
|
||||
let s = if buf.len() >= 250 {
|
||||
String::from_utf8_lossy(&buf[..250])
|
||||
} else {
|
||||
String::from_utf8_lossy(buf)
|
||||
};
|
||||
Self(s.into())
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_num_from_bytes_end {
|
||||
($nty:ident, $nl:expr, $end:ident, $ec:ident) => {
|
||||
impl NumFromBytes<$nty, $end> for $nty {
|
||||
fn convert(buf: &[u8], big_endian: bool) -> $nty {
|
||||
// Error in data on disk:
|
||||
// Can not rely on byte order as stated in the channel config.
|
||||
//$nty::$ec(*arrayref::array_ref![buf, 0, $nl])
|
||||
if big_endian {
|
||||
$nty::from_be_bytes(*arrayref::array_ref![buf, 0, $nl])
|
||||
} else {
|
||||
$nty::from_le_bytes(*arrayref::array_ref![buf, 0, $nl])
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! impl_num_from_bytes {
|
||||
($nty:ident, $nl:expr) => {
|
||||
impl_num_from_bytes_end!($nty, $nl, LittleEndian, from_le_bytes);
|
||||
impl_num_from_bytes_end!($nty, $nl, BigEndian, from_be_bytes);
|
||||
};
|
||||
}
|
||||
|
||||
impl_num_from_bytes!(u8, 1);
|
||||
impl_num_from_bytes!(u16, 2);
|
||||
impl_num_from_bytes!(u32, 4);
|
||||
impl_num_from_bytes!(u64, 8);
|
||||
impl_num_from_bytes!(i8, 1);
|
||||
impl_num_from_bytes!(i16, 2);
|
||||
impl_num_from_bytes!(i32, 4);
|
||||
impl_num_from_bytes!(i64, 8);
|
||||
impl_num_from_bytes!(f32, 4);
|
||||
impl_num_from_bytes!(f64, 8);
|
||||
|
||||
pub trait ScalarValueFromBytes<STY> {
|
||||
fn convert(buf: &[u8], endian: Endian) -> Result<STY, Error>;
|
||||
fn convert_dim1(buf: &[u8], endian: Endian, n: usize) -> Result<Vec<STY>, Error>;
|
||||
@@ -380,231 +283,6 @@ fn make_scalar_conv(
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub trait EventValueFromBytes<NTY, END>
|
||||
where
|
||||
NTY: NumFromBytes<NTY, END>,
|
||||
{
|
||||
type Output;
|
||||
type Batch: Appendable + EventAppendable<Value = Self::Output>;
|
||||
// The written data on disk has errors:
|
||||
// The endian as stated in the channel config does not match written events.
|
||||
// Therefore, can not rely on that but have to check for each single event...
|
||||
fn convert(&self, buf: &[u8], big_endian: bool) -> Result<Self::Output, Error>;
|
||||
}
|
||||
|
||||
impl<NTY, END> EventValueFromBytes<NTY, END> for EventValuesDim0Case<NTY>
|
||||
where
|
||||
NTY: NumOps + NumFromBytes<NTY, END>,
|
||||
{
|
||||
type Output = NTY;
|
||||
type Batch = ScalarEvents<NTY>;
|
||||
|
||||
fn convert(&self, buf: &[u8], big_endian: bool) -> Result<Self::Output, Error> {
|
||||
Ok(NTY::convert(buf, big_endian))
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY, END> EventValueFromBytes<NTY, END> for EventValuesDim1Case<NTY>
|
||||
where
|
||||
NTY: NumOps + NumFromBytes<NTY, END>,
|
||||
{
|
||||
type Output = Vec<NTY>;
|
||||
type Batch = WaveEvents<NTY>;
|
||||
|
||||
fn convert(&self, buf: &[u8], big_endian: bool) -> Result<Self::Output, Error> {
|
||||
let es = size_of::<NTY>();
|
||||
let n1 = buf.len() / es;
|
||||
if n1 != self.n as usize {
|
||||
return Err(Error::with_msg(format!("ele count got {} exp {}", n1, self.n)));
|
||||
}
|
||||
let mut vals = vec![];
|
||||
// TODO could optimize using unsafe code..
|
||||
for n2 in 0..n1 {
|
||||
let i1 = es * n2;
|
||||
vals.push(<NTY as NumFromBytes<NTY, END>>::convert(
|
||||
&buf[i1..(i1 + es)],
|
||||
big_endian,
|
||||
));
|
||||
}
|
||||
Ok(vals)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait EventValueShape<NTY, END>: EventValueFromBytes<NTY, END> + Send + Unpin
|
||||
where
|
||||
NTY: NumFromBytes<NTY, END>,
|
||||
{
|
||||
type NumXAggToSingleBin: EventsNodeProcessor<Input = <Self as EventValueFromBytes<NTY, END>>::Batch>;
|
||||
type NumXAggToNBins: EventsNodeProcessor<Input = <Self as EventValueFromBytes<NTY, END>>::Batch>;
|
||||
type NumXAggPlain: EventsNodeProcessor<Input = <Self as EventValueFromBytes<NTY, END>>::Batch>;
|
||||
}
|
||||
|
||||
pub struct EventValuesDim0Case<NTY> {
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> EventValuesDim0Case<NTY> {
|
||||
pub fn new() -> Self {
|
||||
Self { _m1: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY, END> EventValueShape<NTY, END> for EventValuesDim0Case<NTY>
|
||||
where
|
||||
NTY: NumOps + NumFromBytes<NTY, END>,
|
||||
{
|
||||
type NumXAggToSingleBin = Identity<NTY>;
|
||||
// TODO is this sufficient?
|
||||
type NumXAggToNBins = Identity<NTY>;
|
||||
type NumXAggPlain = Identity<NTY>;
|
||||
}
|
||||
|
||||
pub struct EventValuesDim1Case<NTY> {
|
||||
n: u32,
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> EventValuesDim1Case<NTY> {
|
||||
pub fn new(n: u32) -> Self {
|
||||
Self { n, _m1: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY, END> EventValueShape<NTY, END> for EventValuesDim1Case<NTY>
|
||||
where
|
||||
NTY: NumOps + NumFromBytes<NTY, END>,
|
||||
{
|
||||
type NumXAggToSingleBin = WaveXBinner<NTY>;
|
||||
type NumXAggToNBins = WaveNBinner<NTY>;
|
||||
type NumXAggPlain = WavePlainProc<NTY>;
|
||||
}
|
||||
|
||||
pub struct EventsDecodedStream<NTY, END, EVS>
|
||||
where
|
||||
NTY: NumOps + NumFromBytes<NTY, END>,
|
||||
END: Endianness,
|
||||
EVS: EventValueShape<NTY, END>,
|
||||
{
|
||||
evs: EVS,
|
||||
event_blobs: EventChunkerMultifile,
|
||||
completed: bool,
|
||||
errored: bool,
|
||||
_m1: PhantomData<NTY>,
|
||||
_m2: PhantomData<END>,
|
||||
_m3: PhantomData<EVS>,
|
||||
}
|
||||
|
||||
impl<NTY, END, EVS> EventsDecodedStream<NTY, END, EVS>
|
||||
where
|
||||
NTY: NumOps + NumFromBytes<NTY, END>,
|
||||
END: Endianness,
|
||||
EVS: EventValueShape<NTY, END> + EventValueFromBytes<NTY, END>,
|
||||
{
|
||||
pub fn new(evs: EVS, event_blobs: EventChunkerMultifile) -> Self {
|
||||
Self {
|
||||
evs,
|
||||
event_blobs,
|
||||
completed: false,
|
||||
errored: false,
|
||||
_m1: PhantomData,
|
||||
_m2: PhantomData,
|
||||
_m3: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn decode(&mut self, ev: &EventFull) -> Result<Option<<EVS as EventValueFromBytes<NTY, END>>::Batch>, Error> {
|
||||
//let mut ret = <<EVS as EventValueFromBytes<NTY, END>>::Batch as Appendable>::empty();
|
||||
//let mut ret = EventValues::<<EVS as EventValueFromBytes<NTY, END>>::Output>::empty();
|
||||
let mut ret = None;
|
||||
//ret.tss.reserve(ev.tss.len());
|
||||
//ret.values.reserve(ev.tss.len());
|
||||
for i1 in 0..ev.tss.len() {
|
||||
// TODO check that dtype, event endianness and event shape match our static
|
||||
// expectation about the data in this channel.
|
||||
let _ty = &ev.scalar_types[i1];
|
||||
let be = ev.be[i1];
|
||||
// Too bad, data on disk is inconsistent, can not rely on endian as stated in channel config.
|
||||
if false && be != END::is_big() {
|
||||
return Err(Error::with_msg(format!(
|
||||
"endian mismatch in event got {} exp {}",
|
||||
be,
|
||||
END::is_big()
|
||||
)));
|
||||
}
|
||||
let decomp = ev.decomps[i1].as_ref().unwrap().as_ref();
|
||||
let val = self.evs.convert(decomp, be)?;
|
||||
let k = <<EVS as EventValueFromBytes<NTY, END>>::Batch as EventAppendable>::append_event(
|
||||
ret,
|
||||
ev.tss[i1],
|
||||
ev.pulses[i1],
|
||||
val,
|
||||
);
|
||||
ret = Some(k);
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY, END, EVS> Stream for EventsDecodedStream<NTY, END, EVS>
|
||||
where
|
||||
NTY: NumOps + NumFromBytes<NTY, END>,
|
||||
END: Endianness,
|
||||
EVS: EventValueShape<NTY, END> + EventValueFromBytes<NTY, END>,
|
||||
{
|
||||
type Item = Result<StreamItem<RangeCompletableItem<<EVS as EventValueFromBytes<NTY, END>>::Batch>>, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
loop {
|
||||
break if self.completed {
|
||||
panic!("poll_next on completed")
|
||||
} else if self.errored {
|
||||
self.completed = true;
|
||||
Ready(None)
|
||||
} else {
|
||||
match self.event_blobs.poll_next_unpin(cx) {
|
||||
Ready(item) => match item {
|
||||
Some(item) => match item {
|
||||
Ok(item) => match item {
|
||||
StreamItem::DataItem(item) => match item {
|
||||
RangeCompletableItem::RangeComplete => {
|
||||
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))))
|
||||
}
|
||||
RangeCompletableItem::Data(item) => match self.decode(&item) {
|
||||
Ok(res) => match res {
|
||||
Some(res) => {
|
||||
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(res)))))
|
||||
}
|
||||
None => {
|
||||
continue;
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
self.errored = true;
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
},
|
||||
},
|
||||
StreamItem::Log(item) => Ready(Some(Ok(StreamItem::Log(item)))),
|
||||
StreamItem::Stats(item) => Ready(Some(Ok(StreamItem::Stats(item)))),
|
||||
},
|
||||
Err(e) => {
|
||||
self.errored = true;
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
},
|
||||
None => {
|
||||
self.completed = true;
|
||||
Ready(None)
|
||||
}
|
||||
},
|
||||
Pending => Pending,
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EventsDynStream {
|
||||
scalar_type: ScalarType,
|
||||
shape: Shape,
|
||||
@@ -749,211 +427,3 @@ impl Stream for EventsDynStream {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EventsItemStream {
|
||||
inp: Pin<Box<dyn Stream<Item = Sitemty<EventFull>>>>,
|
||||
done: bool,
|
||||
complete: bool,
|
||||
}
|
||||
|
||||
impl EventsItemStream {
|
||||
pub fn new(inp: Pin<Box<dyn Stream<Item = Sitemty<EventFull>>>>) -> Self {
|
||||
Self {
|
||||
inp,
|
||||
done: false,
|
||||
complete: false,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO need some default expectation about the content type, because real world data does not
|
||||
// always contain that information per event, or even contains wrong information.
|
||||
fn decode(&mut self, ev: &EventFull) -> Result<Option<EventsItem>, Error> {
|
||||
// TODO define expected endian from parameters:
|
||||
let big_endian = false;
|
||||
// TODO:
|
||||
let mut tyi = None;
|
||||
let mut ret = None;
|
||||
for i1 in 0..ev.tss.len() {
|
||||
let ts = ev.tss[i1];
|
||||
let pulse = ev.pulses[i1];
|
||||
// TODO check that dtype, event endianness and event shape match our static
|
||||
// expectation about the data in this channel.
|
||||
let _ty = &ev.scalar_types[i1];
|
||||
let be = ev.be[i1];
|
||||
if be != big_endian {
|
||||
return Err(Error::with_msg(format!("big endian mismatch {} vs {}", be, big_endian)));
|
||||
}
|
||||
// TODO bad, data on disk is inconsistent, can not rely on endian as stated in channel config.
|
||||
let decomp = ev.decomp(i1);
|
||||
// If not done yet, infer the actual type from the (undocumented) combinations of channel
|
||||
// config parameters and values in the event data.
|
||||
// TODO
|
||||
match &tyi {
|
||||
Some(_) => {}
|
||||
None => {
|
||||
//let cont = EventValues::<f64>::empty();
|
||||
tyi = Some((ev.scalar_types[i1].clone(), ev.shapes[i1].clone()));
|
||||
match &tyi.as_ref().unwrap().1 {
|
||||
Shape::Scalar => match &tyi.as_ref().unwrap().0 {
|
||||
ScalarType::U8 => {
|
||||
// TODO
|
||||
let cont = ScalarEvents::<i8>::empty();
|
||||
ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I8(cont))));
|
||||
}
|
||||
ScalarType::U16 => {
|
||||
// TODO
|
||||
let cont = ScalarEvents::<i16>::empty();
|
||||
ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I16(cont))));
|
||||
}
|
||||
ScalarType::U32 => {
|
||||
// TODO
|
||||
let cont = ScalarEvents::<i32>::empty();
|
||||
ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I32(cont))));
|
||||
}
|
||||
ScalarType::U64 => {
|
||||
// TODO
|
||||
let cont = ScalarEvents::<i32>::empty();
|
||||
ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I32(cont))));
|
||||
}
|
||||
ScalarType::I8 => {
|
||||
let cont = ScalarEvents::<i8>::empty();
|
||||
ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I8(cont))));
|
||||
}
|
||||
ScalarType::I16 => {
|
||||
let cont = ScalarEvents::<i16>::empty();
|
||||
ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I16(cont))));
|
||||
}
|
||||
ScalarType::I32 => {
|
||||
let cont = ScalarEvents::<i32>::empty();
|
||||
ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I32(cont))));
|
||||
}
|
||||
ScalarType::I64 => {
|
||||
// TODO
|
||||
let cont = ScalarEvents::<i32>::empty();
|
||||
ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I32(cont))));
|
||||
}
|
||||
ScalarType::F32 => {
|
||||
let cont = ScalarEvents::<f32>::empty();
|
||||
ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::F32(cont))));
|
||||
}
|
||||
ScalarType::F64 => {
|
||||
let cont = ScalarEvents::<f64>::empty();
|
||||
ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::F64(cont))));
|
||||
}
|
||||
ScalarType::BOOL => {
|
||||
// TODO
|
||||
let cont = ScalarEvents::<i8>::empty();
|
||||
ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::I8(cont))));
|
||||
}
|
||||
ScalarType::STRING => {
|
||||
// TODO
|
||||
let cont = ScalarEvents::<String>::empty();
|
||||
ret = Some(EventsItem::Plain(PlainEvents::Scalar(ScalarPlainEvents::String(cont))));
|
||||
}
|
||||
},
|
||||
Shape::Wave(_) => todo!(),
|
||||
Shape::Image(..) => todo!(),
|
||||
}
|
||||
}
|
||||
};
|
||||
// TODO here, I expect that we found the type.
|
||||
let tyi = tyi.as_ref().unwrap();
|
||||
match &tyi.1 {
|
||||
Shape::Scalar => match &tyi.0 {
|
||||
ScalarType::U8 => todo!(),
|
||||
ScalarType::U16 => todo!(),
|
||||
ScalarType::U32 => todo!(),
|
||||
ScalarType::U64 => todo!(),
|
||||
ScalarType::I8 => todo!(),
|
||||
ScalarType::I16 => todo!(),
|
||||
ScalarType::I32 => todo!(),
|
||||
ScalarType::I64 => todo!(),
|
||||
ScalarType::F32 => todo!(),
|
||||
ScalarType::F64 => {
|
||||
let conv = EventValuesDim0Case::<f64>::new();
|
||||
let val = EventValueFromBytes::<_, LittleEndian>::convert(&conv, decomp, big_endian)?;
|
||||
match &mut ret {
|
||||
Some(ret) => match ret {
|
||||
EventsItem::Plain(ret) => match ret {
|
||||
PlainEvents::Scalar(ret) => match ret {
|
||||
ScalarPlainEvents::F64(ret) => {
|
||||
ret.tss.push(ts);
|
||||
// TODO
|
||||
let _ = pulse;
|
||||
ret.values.push(val);
|
||||
}
|
||||
_ => panic!(),
|
||||
},
|
||||
},
|
||||
EventsItem::XBinnedEvents(_) => todo!(),
|
||||
},
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
ScalarType::BOOL => todo!(),
|
||||
ScalarType::STRING => todo!(),
|
||||
},
|
||||
Shape::Wave(_) => todo!(),
|
||||
Shape::Image(_, _) => todo!(),
|
||||
}
|
||||
//let val = self.evs.convert(decomp, be)?;
|
||||
//let k = <<EVS as EventValueFromBytes<NTY, END>>::Batch as EventAppendable>::append_event(ret, ev.tss[i1], val);
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for EventsItemStream {
|
||||
type Item = Sitemty<EventsItem>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
loop {
|
||||
break if self.complete {
|
||||
panic!("poll_next on complete")
|
||||
} else if self.done {
|
||||
self.complete = true;
|
||||
Ready(None)
|
||||
} else {
|
||||
match self.inp.poll_next_unpin(cx) {
|
||||
Ready(item) => match item {
|
||||
Some(item) => match item {
|
||||
Ok(item) => match item {
|
||||
StreamItem::DataItem(item) => match item {
|
||||
RangeCompletableItem::RangeComplete => {
|
||||
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))))
|
||||
}
|
||||
RangeCompletableItem::Data(item) => match self.decode(&item) {
|
||||
Ok(res) => match res {
|
||||
Some(res) => {
|
||||
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(res)))))
|
||||
}
|
||||
None => {
|
||||
continue;
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
self.done = true;
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
},
|
||||
},
|
||||
StreamItem::Log(item) => Ready(Some(Ok(StreamItem::Log(item)))),
|
||||
StreamItem::Stats(item) => Ready(Some(Ok(StreamItem::Stats(item)))),
|
||||
},
|
||||
Err(e) => {
|
||||
self.done = true;
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
},
|
||||
None => {
|
||||
self.done = true;
|
||||
Ready(None)
|
||||
}
|
||||
},
|
||||
Pending => Pending,
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
pub mod agg;
|
||||
#[cfg(test)]
|
||||
pub mod aggtest;
|
||||
pub mod binnedstream;
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
use clap::{ArgAction, Parser};
|
||||
use clap::ArgAction;
|
||||
use clap::Parser;
|
||||
use err::Error;
|
||||
#[allow(unused)]
|
||||
use netpod::log::*;
|
||||
use netpod::{ByteOrder, ByteSize, Channel, ChannelConfig, NanoRange, Shape};
|
||||
use netpod::ByteOrder;
|
||||
use netpod::ByteSize;
|
||||
use netpod::Channel;
|
||||
use netpod::ChannelConfig;
|
||||
use netpod::NanoRange;
|
||||
use netpod::Shape;
|
||||
use std::path::PathBuf;
|
||||
use streams::eventchunker::{EventChunker, EventChunkerConf};
|
||||
use streams::eventchunker::EventChunker;
|
||||
use streams::eventchunker::EventChunkerConf;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
@@ -93,33 +101,9 @@ pub fn main() -> Result<(), Error> {
|
||||
let stats_conf = EventChunkerConf {
|
||||
disk_stats_every: ByteSize::mb(2),
|
||||
};
|
||||
let chunks =
|
||||
let _chunks =
|
||||
EventChunker::from_start(inp, channel_config.clone(), range, stats_conf, path, false, true);
|
||||
use futures_util::stream::StreamExt;
|
||||
use items::WithLen;
|
||||
//let evs = EventValuesDim0Case::<f64>::new();
|
||||
let mut stream = disk::decode::EventsItemStream::new(Box::pin(chunks));
|
||||
while let Some(item) = stream.next().await {
|
||||
let item = item?;
|
||||
match item {
|
||||
items::StreamItem::DataItem(item) => {
|
||||
match item {
|
||||
items::RangeCompletableItem::RangeComplete => {
|
||||
warn!("RangeComplete");
|
||||
}
|
||||
items::RangeCompletableItem::Data(item) => {
|
||||
info!("{:?} ({} events)", item, item.len());
|
||||
}
|
||||
};
|
||||
}
|
||||
items::StreamItem::Log(k) => {
|
||||
eprintln!("Log item {:?}", k);
|
||||
}
|
||||
items::StreamItem::Stats(k) => {
|
||||
eprintln!("Stats item {:?}", k);
|
||||
}
|
||||
}
|
||||
}
|
||||
err::todo();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
[package]
|
||||
name = "fsio"
|
||||
version = "0.0.2"
|
||||
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
path = "src/fsio.rs"
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_cbor = "0.11.1"
|
||||
chrono = { version = "0.4.19", features = ["serde"] }
|
||||
tokio = { version = "1.11.0", features = ["rt-multi-thread", "io-util", "net", "time", "sync", "fs"] }
|
||||
#tokio-stream = {version = "0.1.5", features = ["fs"]}
|
||||
#hyper = { version = "0.14", features = ["http1", "http2", "client", "server", "tcp", "stream"] }
|
||||
async-channel = "1.6"
|
||||
bytes = "1.0.1"
|
||||
crc32fast = "1.2.1"
|
||||
arrayref = "0.3.6"
|
||||
byteorder = "1.4.3"
|
||||
futures-core = "0.3.14"
|
||||
futures-util = "0.3.14"
|
||||
tracing = "0.1.25"
|
||||
tracing-futures = { version = "0.2.5", features = ["futures-01", "futures-03", "std-future"] }
|
||||
fs2 = "0.4.3"
|
||||
libc = "0.2.93"
|
||||
hex = "0.4.3"
|
||||
url = "2.2.2"
|
||||
tiny-keccak = { version = "2.0", features = ["sha3"] }
|
||||
err = { path = "../err" }
|
||||
taskrun = { path = "../taskrun" }
|
||||
netpod = { path = "../netpod" }
|
||||
bitshuffle = { path = "../bitshuffle" }
|
||||
items = { path = "../items" }
|
||||
streams = { path = "../streams" }
|
||||
187
fsio/src/fsio.rs
187
fsio/src/fsio.rs
@@ -1,187 +0,0 @@
|
||||
use err::Error;
|
||||
use items::plainevents::PlainEvents;
|
||||
use netpod::log::*;
|
||||
use netpod::Channel;
|
||||
#[allow(unused)]
|
||||
use std::os::unix::prelude::OpenOptionsExt;
|
||||
use std::os::unix::prelude::{AsRawFd, OsStrExt};
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs::OpenOptions;
|
||||
|
||||
const BASE: &str = "/data/daqbuffer-testdata";
|
||||
|
||||
fn fcntl_xlock(file: &mut std::fs::File, beg: i64, cmd: libc::c_int, ty: i32) -> i32 {
|
||||
unsafe {
|
||||
let p = libc::flock {
|
||||
l_type: ty as i16,
|
||||
l_whence: libc::SEEK_SET as i16,
|
||||
l_start: beg,
|
||||
l_len: 8,
|
||||
l_pid: 0,
|
||||
};
|
||||
libc::fcntl(file.as_raw_fd(), cmd, &p)
|
||||
}
|
||||
}
|
||||
|
||||
fn wlock(file: &mut std::fs::File, beg: i64) -> i32 {
|
||||
fcntl_xlock(file, beg, libc::F_OFD_SETLK, libc::F_WRLCK)
|
||||
}
|
||||
|
||||
fn rlock(file: &mut std::fs::File, beg: i64) -> i32 {
|
||||
fcntl_xlock(file, beg, libc::F_OFD_SETLK, libc::F_RDLCK)
|
||||
}
|
||||
|
||||
fn unlock(file: &mut std::fs::File, beg: i64) -> i32 {
|
||||
fcntl_xlock(file, beg, libc::F_OFD_SETLK, libc::F_UNLCK)
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
async fn lock_1() -> Result<(), Error> {
|
||||
let path = PathBuf::from(BASE).join("tmp-daq4-f1");
|
||||
let mut f1 = OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
.open(path)
|
||||
.await?;
|
||||
f1.as_raw_fd();
|
||||
|
||||
let mx1 = std::sync::Arc::new(tokio::sync::Mutex::new(0usize));
|
||||
let mg1 = mx1.lock().await;
|
||||
|
||||
let (tx1, rx2) = std::sync::mpsc::channel();
|
||||
let (tx2, rx1) = std::sync::mpsc::channel();
|
||||
|
||||
let t1 = std::thread::spawn({
|
||||
move || {
|
||||
let path = PathBuf::from(BASE).join("tmp-daq4-f1");
|
||||
let mut f1 = std::fs::OpenOptions::new().read(true).write(true).open(&path).unwrap();
|
||||
info!("Thread 1 rlock...");
|
||||
let ec = rlock(&mut f1, 0);
|
||||
info!("Thread 1 rlock {}", ec);
|
||||
tx1.send(1u32).unwrap();
|
||||
rx1.recv().unwrap();
|
||||
info!("Thread 1 unlock...");
|
||||
let ec = unlock(&mut f1, 0);
|
||||
info!("Thread 1 unlock {}", ec);
|
||||
tx1.send(1u32).unwrap();
|
||||
rx1.recv().unwrap();
|
||||
info!("Thread 1 rlock...");
|
||||
let ec = rlock(&mut f1, 0);
|
||||
info!("Thread 1 rlock {}", ec);
|
||||
tx1.send(1u32).unwrap();
|
||||
rx1.recv().unwrap();
|
||||
info!("Thread 1 done");
|
||||
}
|
||||
});
|
||||
let t2 = std::thread::spawn({
|
||||
move || {
|
||||
let path = PathBuf::from(BASE).join("tmp-daq4-f1");
|
||||
let mut f1 = std::fs::OpenOptions::new().read(true).write(true).open(&path).unwrap();
|
||||
rx2.recv().unwrap();
|
||||
info!("Thread 2 wlock...");
|
||||
let ec = wlock(&mut f1, 0);
|
||||
info!("Thread 2 wlock {}", ec);
|
||||
tx2.send(1u32).unwrap();
|
||||
rx2.recv().unwrap();
|
||||
info!("Thread 2 rlock");
|
||||
let ec = rlock(&mut f1, 0);
|
||||
info!("Thread 2 rlock {}", ec);
|
||||
tx2.send(1u32).unwrap();
|
||||
rx2.recv().unwrap();
|
||||
tx2.send(1u32).unwrap();
|
||||
info!("Thread 2 done");
|
||||
}
|
||||
});
|
||||
tokio::task::spawn_blocking(move || {
|
||||
t1.join().map_err(|_| Error::with_msg_no_trace("join error"))?;
|
||||
t2.join().map_err(|_| Error::with_msg_no_trace("join error"))?;
|
||||
Ok::<_, Error>(())
|
||||
})
|
||||
.await
|
||||
.map_err(Error::from_string)??;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
async fn write_1() -> Result<(), Error> {
|
||||
let path = PathBuf::from(BASE).join("tmp-daq4-f2");
|
||||
let mut f1 = OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
.open(path)
|
||||
.await?;
|
||||
unsafe {
|
||||
let path_d = PathBuf::from(BASE);
|
||||
let mut path_d_b = path_d.as_os_str().as_bytes().to_vec();
|
||||
//info!("path_d_b {:?}", path_d_b);
|
||||
path_d_b.push(0);
|
||||
let fdd = libc::open(path_d_b.as_ptr() as *const i8, libc::O_DIRECTORY | libc::O_RDONLY);
|
||||
if fdd < 0 {
|
||||
panic!();
|
||||
}
|
||||
let ec = libc::fsync(fdd);
|
||||
if ec != 0 {
|
||||
panic!();
|
||||
}
|
||||
let ec = libc::close(fdd);
|
||||
if ec != 0 {
|
||||
panic!();
|
||||
}
|
||||
let fd = f1.as_raw_fd();
|
||||
let lockparam = libc::flock {
|
||||
l_type: libc::F_RDLCK as i16,
|
||||
l_whence: libc::SEEK_SET as i16,
|
||||
l_start: 0,
|
||||
l_len: 8,
|
||||
l_pid: 0,
|
||||
};
|
||||
let ec = libc::fcntl(f1.as_raw_fd(), libc::F_OFD_SETLK, &lockparam);
|
||||
if ec != 0 {
|
||||
panic!();
|
||||
}
|
||||
let buf = b"world!";
|
||||
let n = libc::pwrite(fd, buf.as_ptr() as *const libc::c_void, buf.len(), 0);
|
||||
if n != buf.len() as isize {
|
||||
panic!();
|
||||
}
|
||||
let ec = libc::fsync(fd);
|
||||
if ec != 0 {
|
||||
panic!();
|
||||
}
|
||||
let lockparam = libc::flock {
|
||||
l_type: libc::F_UNLCK as i16,
|
||||
l_whence: libc::SEEK_SET as i16,
|
||||
l_start: 0,
|
||||
l_len: 8,
|
||||
l_pid: 0,
|
||||
};
|
||||
let ec = libc::fcntl(f1.as_raw_fd(), libc::F_OFD_SETLK, &lockparam);
|
||||
if ec == 0 {
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(unused)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
//#[test]
|
||||
fn t1() -> Result<(), Error> {
|
||||
Ok(taskrun::run(write_1()).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EventSink {}
|
||||
|
||||
impl EventSink {
|
||||
pub fn sink(&self, _channel: &Channel, _events: PlainEvents) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,313 +0,0 @@
|
||||
use crate::eventsitem::EventsItem;
|
||||
use crate::plainevents::{PlainEvents, ScalarPlainEvents};
|
||||
use crate::xbinnedscalarevents::XBinnedScalarEvents;
|
||||
use crate::xbinnedwaveevents::XBinnedWaveEvents;
|
||||
use crate::{Appendable, Clearable, PushableIndex, WithLen, WithTimestamps};
|
||||
use netpod::{AggKind, HasScalarType, HasShape, ScalarType, Shape};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum SingleBinWaveEvents {
|
||||
U8(XBinnedScalarEvents<u8>),
|
||||
U16(XBinnedScalarEvents<u16>),
|
||||
U32(XBinnedScalarEvents<u32>),
|
||||
U64(XBinnedScalarEvents<u64>),
|
||||
I8(XBinnedScalarEvents<i8>),
|
||||
I16(XBinnedScalarEvents<i16>),
|
||||
I32(XBinnedScalarEvents<i32>),
|
||||
I64(XBinnedScalarEvents<i64>),
|
||||
F32(XBinnedScalarEvents<f32>),
|
||||
F64(XBinnedScalarEvents<f64>),
|
||||
String(XBinnedScalarEvents<String>),
|
||||
}
|
||||
|
||||
impl SingleBinWaveEvents {
|
||||
pub fn variant_name(&self) -> String {
|
||||
items_proc::tycases1!(self, Self, (k), { "$id".into() })
|
||||
}
|
||||
|
||||
// TODO possible to remove?
|
||||
fn x_aggregate(self, _: &AggKind) -> EventsItem {
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for SingleBinWaveEvents {
|
||||
fn clear(&mut self) {
|
||||
items_proc::tycases1!(self, Self, (k), { k.clear() })
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for SingleBinWaveEvents {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
items_proc::tycases1!(self, Self, (k), { Self::$id(k.empty_like_self()) })
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
items_proc::tycases1!(self, Self, (k), {
|
||||
match src {
|
||||
Self::$id(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
|
||||
// TODO can this implement Appendable in a sane way? Do we need it?
|
||||
err::todo();
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for SingleBinWaveEvents {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
items_proc::tycases1!(self, Self, (k), {
|
||||
match src {
|
||||
Self::$id(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for SingleBinWaveEvents {
|
||||
fn len(&self) -> usize {
|
||||
items_proc::tycases1!(self, Self, (k), { k.len() })
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for SingleBinWaveEvents {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
items_proc::tycases1!(self, Self, (k), { k.ts(ix) })
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for SingleBinWaveEvents {
|
||||
fn shape(&self) -> Shape {
|
||||
Shape::Scalar
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for SingleBinWaveEvents {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
items_proc::tycases1!(self, Self, (k), { ScalarType::$id })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum MultiBinWaveEvents {
|
||||
U8(XBinnedWaveEvents<u8>),
|
||||
U16(XBinnedWaveEvents<u16>),
|
||||
U32(XBinnedWaveEvents<u32>),
|
||||
U64(XBinnedWaveEvents<u64>),
|
||||
I8(XBinnedWaveEvents<i8>),
|
||||
I16(XBinnedWaveEvents<i16>),
|
||||
I32(XBinnedWaveEvents<i32>),
|
||||
I64(XBinnedWaveEvents<i64>),
|
||||
F32(XBinnedWaveEvents<f32>),
|
||||
F64(XBinnedWaveEvents<f64>),
|
||||
String(XBinnedWaveEvents<String>),
|
||||
}
|
||||
|
||||
impl MultiBinWaveEvents {
|
||||
pub fn variant_name(&self) -> String {
|
||||
items_proc::tycases1!(self, Self, (k), { "$id".into() })
|
||||
}
|
||||
|
||||
// TODO remove
|
||||
fn x_aggregate(self, _: &AggKind) -> EventsItem {
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for MultiBinWaveEvents {
|
||||
fn clear(&mut self) {
|
||||
items_proc::tycases1!(self, Self, (k), { k.clear() })
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for MultiBinWaveEvents {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
items_proc::tycases1!(self, Self, (k), { Self::$id(k.empty_like_self()) })
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
items_proc::tycases1!(self, Self, (k), {
|
||||
match src {
|
||||
Self::$id(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
|
||||
// TODO can this implement Appendable in a sane way? Do we need it?
|
||||
err::todo();
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for MultiBinWaveEvents {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
items_proc::tycases1!(self, Self, (k), {
|
||||
match src {
|
||||
Self::$id(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for MultiBinWaveEvents {
|
||||
fn len(&self) -> usize {
|
||||
items_proc::tycases1!(self, Self, (k), { k.len() })
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for MultiBinWaveEvents {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
items_proc::tycases1!(self, Self, (k), { k.ts(ix) })
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for MultiBinWaveEvents {
|
||||
fn shape(&self) -> Shape {
|
||||
Shape::Scalar
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for MultiBinWaveEvents {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
items_proc::tycases1!(self, Self, (k), { ScalarType::$id })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum XBinnedEvents {
|
||||
Scalar(ScalarPlainEvents),
|
||||
SingleBinWave(SingleBinWaveEvents),
|
||||
MultiBinWave(MultiBinWaveEvents),
|
||||
}
|
||||
|
||||
impl XBinnedEvents {
|
||||
pub fn variant_name(&self) -> String {
|
||||
use XBinnedEvents::*;
|
||||
match self {
|
||||
Scalar(h) => format!("Scalar({})", h.variant_name()),
|
||||
SingleBinWave(h) => format!("SingleBinWave({})", h.variant_name()),
|
||||
MultiBinWave(h) => format!("MultiBinWave({})", h.variant_name()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn x_aggregate(self, ak: &AggKind) -> EventsItem {
|
||||
use XBinnedEvents::*;
|
||||
match self {
|
||||
Scalar(k) => EventsItem::Plain(PlainEvents::Scalar(k)),
|
||||
SingleBinWave(k) => k.x_aggregate(ak),
|
||||
MultiBinWave(k) => k.x_aggregate(ak),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for XBinnedEvents {
|
||||
fn clear(&mut self) {
|
||||
match self {
|
||||
XBinnedEvents::Scalar(k) => k.clear(),
|
||||
XBinnedEvents::SingleBinWave(k) => k.clear(),
|
||||
XBinnedEvents::MultiBinWave(k) => k.clear(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for XBinnedEvents {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
match self {
|
||||
Self::Scalar(k) => Self::Scalar(k.empty_like_self()),
|
||||
Self::SingleBinWave(k) => Self::SingleBinWave(k.empty_like_self()),
|
||||
Self::MultiBinWave(k) => Self::MultiBinWave(k.empty_like_self()),
|
||||
}
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
match self {
|
||||
Self::Scalar(k) => match src {
|
||||
Self::Scalar(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::SingleBinWave(k) => match src {
|
||||
Self::SingleBinWave(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::MultiBinWave(k) => match src {
|
||||
Self::MultiBinWave(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
|
||||
// TODO can this implement Appendable in a sane way? Do we need it?
|
||||
err::todo();
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for XBinnedEvents {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
match self {
|
||||
Self::Scalar(k) => match src {
|
||||
Self::Scalar(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::SingleBinWave(k) => match src {
|
||||
Self::SingleBinWave(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::MultiBinWave(k) => match src {
|
||||
Self::MultiBinWave(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for XBinnedEvents {
|
||||
fn len(&self) -> usize {
|
||||
use XBinnedEvents::*;
|
||||
match self {
|
||||
Scalar(j) => j.len(),
|
||||
SingleBinWave(j) => j.len(),
|
||||
MultiBinWave(j) => j.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for XBinnedEvents {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
use XBinnedEvents::*;
|
||||
match self {
|
||||
Scalar(j) => j.ts(ix),
|
||||
SingleBinWave(j) => j.ts(ix),
|
||||
MultiBinWave(j) => j.ts(ix),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for XBinnedEvents {
|
||||
fn shape(&self) -> Shape {
|
||||
use XBinnedEvents::*;
|
||||
match self {
|
||||
Scalar(h) => h.shape(),
|
||||
SingleBinWave(h) => h.shape(),
|
||||
MultiBinWave(h) => h.shape(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for XBinnedEvents {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
use XBinnedEvents::*;
|
||||
match self {
|
||||
Scalar(h) => h.scalar_type(),
|
||||
SingleBinWave(h) => h.scalar_type(),
|
||||
MultiBinWave(h) => h.scalar_type(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,683 +0,0 @@
|
||||
use crate::frame::bincode_from_slice;
|
||||
use crate::numops::NumOps;
|
||||
use crate::streams::{Collectable, Collector, ToJsonBytes, ToJsonResult};
|
||||
use crate::Appendable;
|
||||
use crate::FilterFittingInside;
|
||||
use crate::Fits;
|
||||
use crate::FitsInside;
|
||||
use crate::FrameTypeInnerStatic;
|
||||
use crate::IsoDateTime;
|
||||
use crate::ReadPbv;
|
||||
use crate::ReadableFromFile;
|
||||
use crate::Sitemty;
|
||||
use crate::TimeBinnableDyn;
|
||||
use crate::{ts_offs_from_abs, FrameType};
|
||||
use crate::{NewEmpty, RangeOverlapInfo, WithLen};
|
||||
use crate::{TimeBinnableType, TimeBinnableTypeAggregator};
|
||||
use crate::{TimeBinned, TimeBinnerDyn, TimeBins};
|
||||
use chrono::{TimeZone, Utc};
|
||||
use err::Error;
|
||||
use items_0::subfr::SubFrId;
|
||||
use items_0::AsAnyRef;
|
||||
use netpod::log::*;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::{NanoRange, Shape};
|
||||
use num_traits::Zero;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use tokio::fs::File;
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct MinMaxAvgDim0Bins<NTY> {
|
||||
pub ts1s: Vec<u64>,
|
||||
pub ts2s: Vec<u64>,
|
||||
pub counts: Vec<u64>,
|
||||
pub mins: Vec<NTY>,
|
||||
pub maxs: Vec<NTY>,
|
||||
pub avgs: Vec<f32>,
|
||||
}
|
||||
|
||||
impl<NTY> FrameTypeInnerStatic for MinMaxAvgDim0Bins<NTY>
|
||||
where
|
||||
NTY: SubFrId,
|
||||
{
|
||||
const FRAME_TYPE_ID: u32 = crate::MIN_MAX_AVG_DIM_0_BINS_FRAME_TYPE_ID + NTY::SUB;
|
||||
}
|
||||
|
||||
impl<NTY> FrameType for MinMaxAvgDim0Bins<NTY>
|
||||
where
|
||||
NTY: SubFrId,
|
||||
{
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> fmt::Debug for MinMaxAvgDim0Bins<NTY>
|
||||
where
|
||||
NTY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
fmt,
|
||||
"MinMaxAvgDim0Bins count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}",
|
||||
self.ts1s.len(),
|
||||
self.ts1s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
|
||||
self.ts2s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
|
||||
self.counts,
|
||||
self.mins,
|
||||
self.maxs,
|
||||
self.avgs,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> AsAnyRef for MinMaxAvgDim0Bins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgDim0Bins<NTY> {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
ts1s: vec![],
|
||||
ts2s: vec![],
|
||||
counts: vec![],
|
||||
mins: vec![],
|
||||
maxs: vec![],
|
||||
avgs: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FitsInside for MinMaxAvgDim0Bins<NTY> {
|
||||
fn fits_inside(&self, range: NanoRange) -> Fits {
|
||||
if self.ts1s.is_empty() {
|
||||
Fits::Empty
|
||||
} else {
|
||||
let t1 = *self.ts1s.first().unwrap();
|
||||
let t2 = *self.ts2s.last().unwrap();
|
||||
if t2 <= range.beg {
|
||||
Fits::Lower
|
||||
} else if t1 >= range.end {
|
||||
Fits::Greater
|
||||
} else if t1 < range.beg && t2 > range.end {
|
||||
Fits::PartlyLowerAndGreater
|
||||
} else if t1 < range.beg {
|
||||
Fits::PartlyLower
|
||||
} else if t2 > range.end {
|
||||
Fits::PartlyGreater
|
||||
} else {
|
||||
Fits::Inside
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FilterFittingInside for MinMaxAvgDim0Bins<NTY> {
|
||||
fn filter_fitting_inside(self, fit_range: NanoRange) -> Option<Self> {
|
||||
match self.fits_inside(fit_range) {
|
||||
Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> RangeOverlapInfo for MinMaxAvgDim0Bins<NTY> {
|
||||
fn ends_before(&self, range: NanoRange) -> bool {
|
||||
match self.ts2s.last() {
|
||||
Some(&ts) => ts <= range.beg,
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn ends_after(&self, range: NanoRange) -> bool {
|
||||
match self.ts2s.last() {
|
||||
Some(&ts) => ts > range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn starts_after(&self, range: NanoRange) -> bool {
|
||||
match self.ts1s.first() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBins for MinMaxAvgDim0Bins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn ts1s(&self) -> &Vec<u64> {
|
||||
&self.ts1s
|
||||
}
|
||||
|
||||
fn ts2s(&self) -> &Vec<u64> {
|
||||
&self.ts2s
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for MinMaxAvgDim0Bins<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.ts1s.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> NewEmpty for MinMaxAvgDim0Bins<NTY> {
|
||||
fn empty(_shape: Shape) -> Self {
|
||||
Self {
|
||||
ts1s: Vec::new(),
|
||||
ts2s: Vec::new(),
|
||||
counts: Vec::new(),
|
||||
mins: Vec::new(),
|
||||
maxs: Vec::new(),
|
||||
avgs: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Appendable for MinMaxAvgDim0Bins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn empty_like_self(&self) -> Self {
|
||||
Self::empty()
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
self.ts1s.extend_from_slice(&src.ts1s);
|
||||
self.ts2s.extend_from_slice(&src.ts2s);
|
||||
self.counts.extend_from_slice(&src.counts);
|
||||
self.mins.extend_from_slice(&src.mins);
|
||||
self.maxs.extend_from_slice(&src.maxs);
|
||||
self.avgs.extend_from_slice(&src.avgs);
|
||||
}
|
||||
|
||||
fn append_zero(&mut self, ts1: u64, ts2: u64) {
|
||||
self.ts1s.push(ts1);
|
||||
self.ts2s.push(ts2);
|
||||
self.counts.push(0);
|
||||
self.mins.push(NTY::zero());
|
||||
self.maxs.push(NTY::zero());
|
||||
self.avgs.push(0.);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ReadableFromFile for MinMaxAvgDim0Bins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
// TODO this function is not needed in the trait:
|
||||
fn read_from_file(file: File) -> Result<ReadPbv<Self>, Error> {
|
||||
Ok(ReadPbv::new(file))
|
||||
}
|
||||
|
||||
fn from_buf(buf: &[u8]) -> Result<Self, Error> {
|
||||
let dec = bincode_from_slice(buf)?;
|
||||
Ok(dec)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableType for MinMaxAvgDim0Bins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Output = MinMaxAvgDim0Bins<NTY>;
|
||||
type Aggregator = MinMaxAvgDim0BinsAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator {
|
||||
debug!(
|
||||
"TimeBinnableType for XBinnedScalarEvents aggregator() range {:?} x_bin_count {} do_time_weight {}",
|
||||
range, x_bin_count, do_time_weight
|
||||
);
|
||||
Self::Aggregator::new(range, do_time_weight)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ToJsonResult for Sitemty<MinMaxAvgDim0Bins<NTY>>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn to_json_result(&self) -> Result<Box<dyn ToJsonBytes>, Error> {
|
||||
Ok(Box::new(serde_json::Value::String(format!(
|
||||
"MinMaxAvgBins/non-json-item"
|
||||
))))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgBinsCollected<NTY> {
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgBinsCollected<NTY> {
|
||||
pub fn new() -> Self {
|
||||
Self { _m1: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct MinMaxAvgBinsCollectedResult<NTY> {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "tsMs")]
|
||||
ts_off_ms: Vec<u64>,
|
||||
#[serde(rename = "tsNs")]
|
||||
ts_off_ns: Vec<u64>,
|
||||
counts: Vec<u64>,
|
||||
mins: Vec<NTY>,
|
||||
maxs: Vec<NTY>,
|
||||
avgs: Vec<f32>,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "rangeFinal")]
|
||||
finalised_range: bool,
|
||||
#[serde(skip_serializing_if = "Zero::is_zero", rename = "missingBins")]
|
||||
missing_bins: u32,
|
||||
#[serde(skip_serializing_if = "Option::is_none", rename = "continueAt")]
|
||||
continue_at: Option<IsoDateTime>,
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgBinsCollector<NTY> {
|
||||
bin_count_exp: u32,
|
||||
timed_out: bool,
|
||||
range_complete: bool,
|
||||
vals: MinMaxAvgDim0Bins<NTY>,
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgBinsCollector<NTY> {
|
||||
pub fn new(bin_count_exp: u32) -> Self {
|
||||
Self {
|
||||
bin_count_exp,
|
||||
timed_out: false,
|
||||
range_complete: false,
|
||||
vals: MinMaxAvgDim0Bins::<NTY>::empty(),
|
||||
_m1: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for MinMaxAvgBinsCollector<NTY>
|
||||
where
|
||||
NTY: NumOps + Serialize,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
self.vals.ts1s.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collector for MinMaxAvgBinsCollector<NTY>
|
||||
where
|
||||
NTY: NumOps + Serialize,
|
||||
{
|
||||
type Input = MinMaxAvgDim0Bins<NTY>;
|
||||
type Output = MinMaxAvgBinsCollectedResult<NTY>;
|
||||
|
||||
fn ingest(&mut self, src: &Self::Input) {
|
||||
Appendable::append(&mut self.vals, src);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_complete = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn result(self) -> Result<Self::Output, Error> {
|
||||
let bin_count = self.vals.ts1s.len() as u32;
|
||||
// TODO could save the copy:
|
||||
let mut ts_all = self.vals.ts1s.clone();
|
||||
if self.vals.ts2s.len() > 0 {
|
||||
ts_all.push(*self.vals.ts2s.last().unwrap());
|
||||
}
|
||||
let continue_at = if self.vals.ts1s.len() < self.bin_count_exp as usize {
|
||||
match ts_all.last() {
|
||||
Some(&k) => {
|
||||
let iso = IsoDateTime(Utc.timestamp_nanos(k as i64));
|
||||
Some(iso)
|
||||
}
|
||||
None => Err(Error::with_msg("partial_content but no bin in result"))?,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let tst = ts_offs_from_abs(&ts_all);
|
||||
let ret = MinMaxAvgBinsCollectedResult::<NTY> {
|
||||
ts_anchor_sec: tst.0,
|
||||
ts_off_ms: tst.1,
|
||||
ts_off_ns: tst.2,
|
||||
counts: self.vals.counts,
|
||||
mins: self.vals.mins,
|
||||
maxs: self.vals.maxs,
|
||||
avgs: self.vals.avgs,
|
||||
finalised_range: self.range_complete,
|
||||
missing_bins: self.bin_count_exp - bin_count,
|
||||
continue_at,
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collectable for MinMaxAvgDim0Bins<NTY>
|
||||
where
|
||||
NTY: NumOps + Serialize,
|
||||
{
|
||||
type Collector = MinMaxAvgBinsCollector<NTY>;
|
||||
|
||||
fn new_collector(bin_count_exp: u32) -> Self::Collector {
|
||||
Self::Collector::new(bin_count_exp)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgDim0BinsAggregator<NTY> {
|
||||
range: NanoRange,
|
||||
count: u64,
|
||||
min: NTY,
|
||||
max: NTY,
|
||||
// Carry over to next bin:
|
||||
avg: f32,
|
||||
sumc: u64,
|
||||
sum: f32,
|
||||
}
|
||||
|
||||
impl<NTY: NumOps> MinMaxAvgDim0BinsAggregator<NTY> {
|
||||
pub fn new(range: NanoRange, _do_time_weight: bool) -> Self {
|
||||
Self {
|
||||
range,
|
||||
count: 0,
|
||||
min: NTY::zero(),
|
||||
max: NTY::zero(),
|
||||
avg: 0.,
|
||||
sumc: 0,
|
||||
sum: 0f32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableTypeAggregator for MinMaxAvgDim0BinsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = MinMaxAvgDim0Bins<NTY>;
|
||||
type Output = MinMaxAvgDim0Bins<NTY>;
|
||||
|
||||
fn range(&self) -> &NanoRange {
|
||||
&self.range
|
||||
}
|
||||
|
||||
fn ingest(&mut self, item: &Self::Input) {
|
||||
for i1 in 0..item.ts1s.len() {
|
||||
if item.counts[i1] == 0 {
|
||||
} else if item.ts2s[i1] <= self.range.beg {
|
||||
} else if item.ts1s[i1] >= self.range.end {
|
||||
} else {
|
||||
if item.mins[i1].as_prim_f32() < 1. {
|
||||
info!("small bin min {:?} counts {}", item.mins[i1], item.counts[i1]);
|
||||
}
|
||||
if self.count == 0 {
|
||||
self.min = item.mins[i1].clone();
|
||||
self.max = item.maxs[i1].clone();
|
||||
} else {
|
||||
if self.min > item.mins[i1] {
|
||||
self.min = item.mins[i1].clone();
|
||||
}
|
||||
if self.max < item.maxs[i1] {
|
||||
self.max = item.maxs[i1].clone();
|
||||
}
|
||||
}
|
||||
self.count += item.counts[i1];
|
||||
self.sum += item.avgs[i1];
|
||||
self.sumc += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset(&mut self, range: NanoRange, _expand: bool) -> Self::Output {
|
||||
if self.sumc > 0 {
|
||||
self.avg = self.sum / self.sumc as f32;
|
||||
}
|
||||
let ret = Self::Output {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![self.min.clone()],
|
||||
maxs: vec![self.max.clone()],
|
||||
avgs: vec![self.avg],
|
||||
};
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.sum = 0f32;
|
||||
self.sumc = 0;
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: NumOps + 'static> TimeBinnableDyn for MinMaxAvgDim0Bins<NTY> {
|
||||
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Box<dyn TimeBinnerDyn> {
|
||||
eprintln!("MinMaxAvgDim0Bins time_binner_new");
|
||||
info!("MinMaxAvgDim0Bins time_binner_new");
|
||||
let ret = MinMaxAvgDim0BinsTimeBinner::<NTY>::new(edges.into(), do_time_weight);
|
||||
Box::new(ret)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgDim0BinsTimeBinner<NTY: NumOps> {
|
||||
edges: VecDeque<u64>,
|
||||
do_time_weight: bool,
|
||||
agg: Option<MinMaxAvgDim0BinsAggregator<NTY>>,
|
||||
ready: Option<<MinMaxAvgDim0BinsAggregator<NTY> as TimeBinnableTypeAggregator>::Output>,
|
||||
}
|
||||
|
||||
impl<NTY: NumOps> MinMaxAvgDim0BinsTimeBinner<NTY> {
|
||||
fn new(edges: VecDeque<u64>, do_time_weight: bool) -> Self {
|
||||
Self {
|
||||
edges,
|
||||
do_time_weight,
|
||||
agg: None,
|
||||
ready: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn next_bin_range(&mut self) -> Option<NanoRange> {
|
||||
if self.edges.len() >= 2 {
|
||||
let ret = NanoRange {
|
||||
beg: self.edges[0],
|
||||
end: self.edges[1],
|
||||
};
|
||||
self.edges.pop_front();
|
||||
Some(ret)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn struct_name() -> &'static str {
|
||||
std::any::type_name::<Self>()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: NumOps + 'static> TimeBinnerDyn for MinMaxAvgDim0BinsTimeBinner<NTY> {
|
||||
fn ingest(&mut self, item: &dyn TimeBinnableDyn) {
|
||||
//const SELF: &str = "MinMaxAvgDim0BinsTimeBinner";
|
||||
#[allow(non_snake_case)]
|
||||
let SELF = Self::struct_name();
|
||||
if item.len() == 0 {
|
||||
// Return already here, RangeOverlapInfo would not give much sense.
|
||||
return;
|
||||
}
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {SELF} no more bin in edges A");
|
||||
return;
|
||||
}
|
||||
// TODO optimize by remembering at which event array index we have arrived.
|
||||
// That needs modified interfaces which can take and yield the start and latest index.
|
||||
loop {
|
||||
while item.starts_after(NanoRange {
|
||||
beg: 0,
|
||||
end: self.edges[1],
|
||||
}) {
|
||||
self.cycle();
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {SELF} no more bin in edges B");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if item.ends_before(NanoRange {
|
||||
beg: self.edges[0],
|
||||
end: u64::MAX,
|
||||
}) {
|
||||
return;
|
||||
} else {
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {SELF} edge list exhausted");
|
||||
return;
|
||||
} else {
|
||||
let agg = if let Some(agg) = self.agg.as_mut() {
|
||||
agg
|
||||
} else {
|
||||
self.agg = Some(MinMaxAvgDim0BinsAggregator::new(
|
||||
// We know here that we have enough edges for another bin.
|
||||
// and `next_bin_range` will pop the first edge.
|
||||
self.next_bin_range().unwrap(),
|
||||
self.do_time_weight,
|
||||
));
|
||||
self.agg.as_mut().unwrap()
|
||||
};
|
||||
if let Some(item) = item
|
||||
.as_any_ref()
|
||||
// TODO make statically sure that we attempt to cast to the correct type here:
|
||||
.downcast_ref::<<MinMaxAvgDim0BinsAggregator<NTY> as TimeBinnableTypeAggregator>::Input>()
|
||||
{
|
||||
agg.ingest(item);
|
||||
} else {
|
||||
let tyid_item = std::any::Any::type_id(item.as_any_ref());
|
||||
error!("not correct item type {:?}", tyid_item);
|
||||
};
|
||||
if item.ends_after(agg.range().clone()) {
|
||||
self.cycle();
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {SELF} no more bin in edges C");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn bins_ready_count(&self) -> usize {
|
||||
match &self.ready {
|
||||
Some(k) => k.len(),
|
||||
None => 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn bins_ready(&mut self) -> Option<Box<dyn crate::TimeBinned>> {
|
||||
match self.ready.take() {
|
||||
Some(k) => Some(Box::new(k)),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO there is too much common code between implementors:
|
||||
fn push_in_progress(&mut self, push_empty: bool) {
|
||||
// TODO expand should be derived from AggKind. Is it still required after all?
|
||||
let expand = true;
|
||||
if let Some(agg) = self.agg.as_mut() {
|
||||
let dummy_range = NanoRange { beg: 4, end: 5 };
|
||||
let mut bins = agg.result_reset(dummy_range, expand);
|
||||
self.agg = None;
|
||||
assert_eq!(bins.len(), 1);
|
||||
if push_empty || bins.counts[0] != 0 {
|
||||
match self.ready.as_mut() {
|
||||
Some(ready) => {
|
||||
ready.append(&mut bins);
|
||||
}
|
||||
None => {
|
||||
self.ready = Some(bins);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO there is too much common code between implementors:
|
||||
fn cycle(&mut self) {
|
||||
let n = self.bins_ready_count();
|
||||
self.push_in_progress(true);
|
||||
if self.bins_ready_count() == n {
|
||||
if let Some(range) = self.next_bin_range() {
|
||||
let mut bins = MinMaxAvgDim0Bins::<NTY>::empty();
|
||||
bins.append_zero(range.beg, range.end);
|
||||
match self.ready.as_mut() {
|
||||
Some(ready) => {
|
||||
ready.append(&mut bins);
|
||||
}
|
||||
None => {
|
||||
self.ready = Some(bins);
|
||||
}
|
||||
}
|
||||
if self.bins_ready_count() <= n {
|
||||
error!("failed to push a zero bin");
|
||||
}
|
||||
} else {
|
||||
warn!("cycle: no in-progress bin pushed, but also no more bin to add as zero-bin");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: NumOps> TimeBinned for MinMaxAvgDim0Bins<NTY> {
|
||||
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn {
|
||||
self as &dyn TimeBinnableDyn
|
||||
}
|
||||
|
||||
fn edges_slice(&self) -> (&[u64], &[u64]) {
|
||||
(&self.ts1s[..], &self.ts2s[..])
|
||||
}
|
||||
|
||||
fn counts(&self) -> &[u64] {
|
||||
&self.counts[..]
|
||||
}
|
||||
|
||||
fn mins(&self) -> Vec<f32> {
|
||||
self.mins.iter().map(|x| x.clone().as_prim_f32()).collect()
|
||||
}
|
||||
|
||||
fn maxs(&self) -> Vec<f32> {
|
||||
self.maxs.iter().map(|x| x.clone().as_prim_f32()).collect()
|
||||
}
|
||||
|
||||
fn avgs(&self) -> Vec<f32> {
|
||||
self.avgs.clone()
|
||||
}
|
||||
|
||||
fn validate(&self) -> Result<(), String> {
|
||||
use std::fmt::Write;
|
||||
let mut msg = String::new();
|
||||
if self.ts1s.len() != self.ts2s.len() {
|
||||
write!(&mut msg, "ts1s ≠ ts2s\n").unwrap();
|
||||
}
|
||||
for (i, ((count, min), max)) in self.counts.iter().zip(&self.mins).zip(&self.maxs).enumerate() {
|
||||
if min.as_prim_f32() < 1. && *count != 0 {
|
||||
write!(&mut msg, "i {} count {} min {:?} max {:?}\n", i, count, min, max).unwrap();
|
||||
}
|
||||
}
|
||||
if msg.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,617 +0,0 @@
|
||||
use crate::frame::bincode_from_slice;
|
||||
use crate::numops::NumOps;
|
||||
use crate::streams::{Collectable, Collector, ToJsonBytes, ToJsonResult};
|
||||
use crate::ts_offs_from_abs;
|
||||
use crate::waveevents::WaveEvents;
|
||||
use crate::Appendable;
|
||||
use crate::FilterFittingInside;
|
||||
use crate::FrameTypeInnerStatic;
|
||||
use crate::IsoDateTime;
|
||||
use crate::RangeOverlapInfo;
|
||||
use crate::ReadableFromFile;
|
||||
use crate::TimeBinnableDyn;
|
||||
use crate::TimeBinnableType;
|
||||
use crate::TimeBinnableTypeAggregator;
|
||||
use crate::TimeBins;
|
||||
use crate::{pulse_offs_from_abs, FrameType};
|
||||
use crate::{Fits, FitsInside, NewEmpty, ReadPbv, Sitemty, TimeBinned, WithLen};
|
||||
use chrono::{TimeZone, Utc};
|
||||
use err::Error;
|
||||
use items_0::subfr::SubFrId;
|
||||
use items_0::AsAnyRef;
|
||||
use netpod::log::*;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::{NanoRange, Shape};
|
||||
use num_traits::Zero;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use tokio::fs::File;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct MinMaxAvgDim1Bins<NTY> {
|
||||
pub ts1s: Vec<u64>,
|
||||
pub ts2s: Vec<u64>,
|
||||
pub counts: Vec<u64>,
|
||||
pub mins: Vec<Option<Vec<NTY>>>,
|
||||
pub maxs: Vec<Option<Vec<NTY>>>,
|
||||
pub avgs: Vec<Option<Vec<f32>>>,
|
||||
}
|
||||
|
||||
impl<NTY> FrameTypeInnerStatic for MinMaxAvgDim1Bins<NTY>
|
||||
where
|
||||
NTY: SubFrId,
|
||||
{
|
||||
const FRAME_TYPE_ID: u32 = crate::MIN_MAX_AVG_DIM_1_BINS_FRAME_TYPE_ID + NTY::SUB;
|
||||
}
|
||||
|
||||
impl<NTY> FrameType for MinMaxAvgDim1Bins<NTY>
|
||||
where
|
||||
NTY: SubFrId,
|
||||
{
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> fmt::Debug for MinMaxAvgDim1Bins<NTY>
|
||||
where
|
||||
NTY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
fmt,
|
||||
"MinMaxAvgDim1Bins count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}",
|
||||
self.ts1s.len(),
|
||||
self.ts1s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
|
||||
self.ts2s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
|
||||
self.counts,
|
||||
self.mins.first(),
|
||||
self.maxs.first(),
|
||||
self.avgs.first(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> AsAnyRef for MinMaxAvgDim1Bins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgDim1Bins<NTY> {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
ts1s: vec![],
|
||||
ts2s: vec![],
|
||||
counts: vec![],
|
||||
mins: vec![],
|
||||
maxs: vec![],
|
||||
avgs: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FitsInside for MinMaxAvgDim1Bins<NTY> {
|
||||
fn fits_inside(&self, range: NanoRange) -> Fits {
|
||||
if self.ts1s.is_empty() {
|
||||
Fits::Empty
|
||||
} else {
|
||||
let t1 = *self.ts1s.first().unwrap();
|
||||
let t2 = *self.ts2s.last().unwrap();
|
||||
if t2 <= range.beg {
|
||||
Fits::Lower
|
||||
} else if t1 >= range.end {
|
||||
Fits::Greater
|
||||
} else if t1 < range.beg && t2 > range.end {
|
||||
Fits::PartlyLowerAndGreater
|
||||
} else if t1 < range.beg {
|
||||
Fits::PartlyLower
|
||||
} else if t2 > range.end {
|
||||
Fits::PartlyGreater
|
||||
} else {
|
||||
Fits::Inside
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FilterFittingInside for MinMaxAvgDim1Bins<NTY> {
|
||||
fn filter_fitting_inside(self, fit_range: NanoRange) -> Option<Self> {
|
||||
match self.fits_inside(fit_range) {
|
||||
Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> RangeOverlapInfo for MinMaxAvgDim1Bins<NTY> {
|
||||
fn ends_before(&self, range: NanoRange) -> bool {
|
||||
match self.ts2s.last() {
|
||||
Some(&ts) => ts <= range.beg,
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn ends_after(&self, range: NanoRange) -> bool {
|
||||
match self.ts2s.last() {
|
||||
Some(&ts) => ts > range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn starts_after(&self, range: NanoRange) -> bool {
|
||||
match self.ts1s.first() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBins for MinMaxAvgDim1Bins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn ts1s(&self) -> &Vec<u64> {
|
||||
&self.ts1s
|
||||
}
|
||||
|
||||
fn ts2s(&self) -> &Vec<u64> {
|
||||
&self.ts2s
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for MinMaxAvgDim1Bins<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.ts1s.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> NewEmpty for MinMaxAvgDim1Bins<NTY> {
|
||||
fn empty(_shape: Shape) -> Self {
|
||||
Self {
|
||||
ts1s: Vec::new(),
|
||||
ts2s: Vec::new(),
|
||||
counts: Vec::new(),
|
||||
mins: Vec::new(),
|
||||
maxs: Vec::new(),
|
||||
avgs: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Appendable for MinMaxAvgDim1Bins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn empty_like_self(&self) -> Self {
|
||||
Self::empty()
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
self.ts1s.extend_from_slice(&src.ts1s);
|
||||
self.ts2s.extend_from_slice(&src.ts2s);
|
||||
self.counts.extend_from_slice(&src.counts);
|
||||
self.mins.extend_from_slice(&src.mins);
|
||||
self.maxs.extend_from_slice(&src.maxs);
|
||||
self.avgs.extend_from_slice(&src.avgs);
|
||||
}
|
||||
|
||||
fn append_zero(&mut self, ts1: u64, ts2: u64) {
|
||||
self.ts1s.push(ts1);
|
||||
self.ts2s.push(ts2);
|
||||
self.counts.push(0);
|
||||
self.avgs.push(None);
|
||||
self.mins.push(None);
|
||||
self.maxs.push(None);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ReadableFromFile for MinMaxAvgDim1Bins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
// TODO this function is not needed in the trait:
|
||||
fn read_from_file(file: File) -> Result<ReadPbv<Self>, Error> {
|
||||
Ok(ReadPbv::new(file))
|
||||
}
|
||||
|
||||
fn from_buf(buf: &[u8]) -> Result<Self, Error> {
|
||||
let dec = bincode_from_slice(buf)?;
|
||||
Ok(dec)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableType for MinMaxAvgDim1Bins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Output = MinMaxAvgDim1Bins<NTY>;
|
||||
type Aggregator = MinMaxAvgDim1BinsAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator {
|
||||
debug!(
|
||||
"TimeBinnableType for MinMaxAvgDim1Bins aggregator() range {:?} x_bin_count {} do_time_weight {}",
|
||||
range, x_bin_count, do_time_weight
|
||||
);
|
||||
Self::Aggregator::new(range, x_bin_count, do_time_weight)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ToJsonResult for Sitemty<MinMaxAvgDim1Bins<NTY>>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn to_json_result(&self) -> Result<Box<dyn ToJsonBytes>, Error> {
|
||||
Ok(Box::new(serde_json::Value::String(format!(
|
||||
"MinMaxAvgDim1Bins/non-json-item"
|
||||
))))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgDim1BinsCollected<NTY> {
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgDim1BinsCollected<NTY> {
|
||||
pub fn new() -> Self {
|
||||
Self { _m1: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct MinMaxAvgDim1BinsCollectedResult<NTY> {
|
||||
ts_bin_edges: Vec<IsoDateTime>,
|
||||
counts: Vec<u64>,
|
||||
mins: Vec<Option<Vec<NTY>>>,
|
||||
maxs: Vec<Option<Vec<NTY>>>,
|
||||
avgs: Vec<Option<Vec<f32>>>,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "rangeFinal")]
|
||||
finalised_range: bool,
|
||||
#[serde(skip_serializing_if = "Zero::is_zero", rename = "missingBins")]
|
||||
missing_bins: u32,
|
||||
#[serde(skip_serializing_if = "Option::is_none", rename = "continueAt")]
|
||||
continue_at: Option<IsoDateTime>,
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgDim1BinsCollector<NTY> {
|
||||
bin_count_exp: u32,
|
||||
timed_out: bool,
|
||||
range_complete: bool,
|
||||
vals: MinMaxAvgDim1Bins<NTY>,
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgDim1BinsCollector<NTY> {
|
||||
pub fn new(bin_count_exp: u32) -> Self {
|
||||
Self {
|
||||
bin_count_exp,
|
||||
timed_out: false,
|
||||
range_complete: false,
|
||||
vals: MinMaxAvgDim1Bins::<NTY>::empty(),
|
||||
_m1: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for MinMaxAvgDim1BinsCollector<NTY>
|
||||
where
|
||||
NTY: NumOps + Serialize,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
self.vals.ts1s.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collector for MinMaxAvgDim1BinsCollector<NTY>
|
||||
where
|
||||
NTY: NumOps + Serialize,
|
||||
{
|
||||
type Input = MinMaxAvgDim1Bins<NTY>;
|
||||
type Output = MinMaxAvgDim1BinsCollectedResult<NTY>;
|
||||
|
||||
fn ingest(&mut self, src: &Self::Input) {
|
||||
Appendable::append(&mut self.vals, src);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_complete = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn result(self) -> Result<Self::Output, Error> {
|
||||
let bin_count = self.vals.ts1s.len() as u32;
|
||||
let mut tsa: Vec<_> = self
|
||||
.vals
|
||||
.ts1s
|
||||
.iter()
|
||||
.map(|&k| IsoDateTime(Utc.timestamp_nanos(k as i64)))
|
||||
.collect();
|
||||
if let Some(&z) = self.vals.ts2s.last() {
|
||||
tsa.push(IsoDateTime(Utc.timestamp_nanos(z as i64)));
|
||||
}
|
||||
let tsa = tsa;
|
||||
let continue_at = if self.vals.ts1s.len() < self.bin_count_exp as usize {
|
||||
match tsa.last() {
|
||||
Some(k) => Some(k.clone()),
|
||||
None => Err(Error::with_msg("partial_content but no bin in result"))?,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let ret = MinMaxAvgDim1BinsCollectedResult::<NTY> {
|
||||
ts_bin_edges: tsa,
|
||||
counts: self.vals.counts,
|
||||
mins: self.vals.mins,
|
||||
maxs: self.vals.maxs,
|
||||
avgs: self.vals.avgs,
|
||||
finalised_range: self.range_complete,
|
||||
missing_bins: self.bin_count_exp - bin_count,
|
||||
continue_at,
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collectable for MinMaxAvgDim1Bins<NTY>
|
||||
where
|
||||
NTY: NumOps + Serialize,
|
||||
{
|
||||
type Collector = MinMaxAvgDim1BinsCollector<NTY>;
|
||||
|
||||
fn new_collector(bin_count_exp: u32) -> Self::Collector {
|
||||
Self::Collector::new(bin_count_exp)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgDim1BinsAggregator<NTY> {
|
||||
range: NanoRange,
|
||||
count: u64,
|
||||
min: Option<Vec<NTY>>,
|
||||
max: Option<Vec<NTY>>,
|
||||
sumc: u64,
|
||||
sum: Option<Vec<f32>>,
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgDim1BinsAggregator<NTY> {
|
||||
pub fn new(range: NanoRange, _x_bin_count: usize, do_time_weight: bool) -> Self {
|
||||
if do_time_weight {
|
||||
err::todo();
|
||||
}
|
||||
Self {
|
||||
range,
|
||||
count: 0,
|
||||
// TODO get rid of Option
|
||||
min: err::todoval(),
|
||||
max: None,
|
||||
sumc: 0,
|
||||
sum: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableTypeAggregator for MinMaxAvgDim1BinsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = MinMaxAvgDim1Bins<NTY>;
|
||||
type Output = MinMaxAvgDim1Bins<NTY>;
|
||||
|
||||
fn range(&self) -> &NanoRange {
|
||||
&self.range
|
||||
}
|
||||
|
||||
fn ingest(&mut self, item: &Self::Input) {
|
||||
for i1 in 0..item.ts1s.len() {
|
||||
if item.ts2s[i1] <= self.range.beg {
|
||||
continue;
|
||||
} else if item.ts1s[i1] >= self.range.end {
|
||||
continue;
|
||||
} else {
|
||||
match self.min.as_mut() {
|
||||
None => self.min = item.mins[i1].clone(),
|
||||
Some(min) => match item.mins[i1].as_ref() {
|
||||
None => {}
|
||||
Some(v) => {
|
||||
for (a, b) in min.iter_mut().zip(v.iter()) {
|
||||
if b < a {
|
||||
*a = b.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
match self.max.as_mut() {
|
||||
None => self.max = item.maxs[i1].clone(),
|
||||
Some(max) => match item.maxs[i1].as_ref() {
|
||||
None => {}
|
||||
Some(v) => {
|
||||
for (a, b) in max.iter_mut().zip(v.iter()) {
|
||||
if b > a {
|
||||
*a = b.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
match self.sum.as_mut() {
|
||||
None => {
|
||||
self.sum = item.avgs[i1].clone();
|
||||
}
|
||||
Some(sum) => match item.avgs[i1].as_ref() {
|
||||
None => {}
|
||||
Some(v) => {
|
||||
for (a, b) in sum.iter_mut().zip(v.iter()) {
|
||||
if (*b).is_nan() {
|
||||
} else {
|
||||
*a += *b;
|
||||
}
|
||||
}
|
||||
self.sumc += 1;
|
||||
}
|
||||
},
|
||||
}
|
||||
self.count += item.counts[i1];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset(&mut self, range: NanoRange, _expand: bool) -> Self::Output {
|
||||
let avg = if self.sumc == 0 {
|
||||
None
|
||||
} else {
|
||||
let avg = self
|
||||
.sum
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|k| k / self.sumc as f32)
|
||||
.collect();
|
||||
Some(avg)
|
||||
};
|
||||
let ret = Self::Output {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
// TODO replace with reset-value instead:
|
||||
mins: vec![self.min.clone()],
|
||||
maxs: vec![self.max.clone()],
|
||||
avgs: vec![avg],
|
||||
};
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.min = None;
|
||||
self.max = None;
|
||||
self.sum = None;
|
||||
self.sumc = 0;
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct WaveEventsCollectedResult<NTY> {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "tsMs")]
|
||||
ts_off_ms: Vec<u64>,
|
||||
#[serde(rename = "tsNs")]
|
||||
ts_off_ns: Vec<u64>,
|
||||
#[serde(rename = "pulseAnchor")]
|
||||
pulse_anchor: u64,
|
||||
#[serde(rename = "pulseOff")]
|
||||
pulse_off: Vec<u64>,
|
||||
values: Vec<Vec<NTY>>,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "rangeFinal")]
|
||||
range_complete: bool,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "timedOut")]
|
||||
timed_out: bool,
|
||||
}
|
||||
|
||||
pub struct WaveEventsCollector<NTY> {
|
||||
vals: WaveEvents<NTY>,
|
||||
range_complete: bool,
|
||||
timed_out: bool,
|
||||
}
|
||||
|
||||
impl<NTY> WaveEventsCollector<NTY> {
|
||||
pub fn new(_bin_count_exp: u32) -> Self {
|
||||
info!("\n\nWaveEventsCollector\n\n");
|
||||
Self {
|
||||
vals: WaveEvents::empty(),
|
||||
range_complete: false,
|
||||
timed_out: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for WaveEventsCollector<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.vals.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collector for WaveEventsCollector<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = WaveEvents<NTY>;
|
||||
type Output = WaveEventsCollectedResult<NTY>;
|
||||
|
||||
fn ingest(&mut self, src: &Self::Input) {
|
||||
self.vals.append(src);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_complete = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn result(self) -> Result<Self::Output, Error> {
|
||||
let tst = ts_offs_from_abs(&self.vals.tss);
|
||||
let (pulse_anchor, pulse_off) = pulse_offs_from_abs(&self.vals.pulses);
|
||||
let ret = Self::Output {
|
||||
ts_anchor_sec: tst.0,
|
||||
ts_off_ms: tst.1,
|
||||
ts_off_ns: tst.2,
|
||||
pulse_anchor,
|
||||
pulse_off,
|
||||
values: self.vals.vals,
|
||||
range_complete: self.range_complete,
|
||||
timed_out: self.timed_out,
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collectable for WaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Collector = WaveEventsCollector<NTY>;
|
||||
|
||||
fn new_collector(bin_count_exp: u32) -> Self::Collector {
|
||||
Self::Collector::new(bin_count_exp)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: NumOps> crate::TimeBinnableDynStub for MinMaxAvgDim1Bins<NTY> {}
|
||||
|
||||
impl<NTY: NumOps> TimeBinned for MinMaxAvgDim1Bins<NTY> {
|
||||
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn {
|
||||
self as &dyn TimeBinnableDyn
|
||||
}
|
||||
|
||||
fn edges_slice(&self) -> (&[u64], &[u64]) {
|
||||
(&self.ts1s[..], &self.ts2s[..])
|
||||
}
|
||||
|
||||
fn counts(&self) -> &[u64] {
|
||||
&self.counts[..]
|
||||
}
|
||||
|
||||
fn avgs(&self) -> Vec<f32> {
|
||||
err::todoval()
|
||||
}
|
||||
|
||||
fn mins(&self) -> Vec<f32> {
|
||||
err::todoval()
|
||||
}
|
||||
|
||||
fn maxs(&self) -> Vec<f32> {
|
||||
err::todoval()
|
||||
}
|
||||
|
||||
fn validate(&self) -> Result<(), String> {
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
@@ -1,146 +0,0 @@
|
||||
use crate::binnedevents::XBinnedEvents;
|
||||
use crate::plainevents::PlainEvents;
|
||||
use crate::{Appendable, Clearable, FrameTypeInnerDyn, PushableIndex, WithLen, WithTimestamps};
|
||||
use netpod::{AggKind, HasScalarType, HasShape, ScalarType, Shape};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
// TODO remove
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum EventsItem {
|
||||
Plain(PlainEvents),
|
||||
XBinnedEvents(XBinnedEvents),
|
||||
}
|
||||
|
||||
impl FrameTypeInnerDyn for EventsItem {
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
crate::EVENTS_ITEM_FRAME_TYPE_ID
|
||||
}
|
||||
}
|
||||
|
||||
impl EventsItem {
|
||||
pub fn is_wave(&self) -> bool {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(h) => h.is_wave(),
|
||||
XBinnedEvents(h) => {
|
||||
if let Shape::Wave(_) = h.shape() {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn variant_name(&self) -> String {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(h) => format!("Plain({})", h.variant_name()),
|
||||
XBinnedEvents(h) => format!("Plain({})", h.variant_name()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn x_aggregate(self, ak: &AggKind) -> Self {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(k) => k.x_aggregate(ak),
|
||||
XBinnedEvents(k) => k.x_aggregate(ak),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn type_info(&self) -> (ScalarType, Shape) {
|
||||
(self.scalar_type(), self.shape())
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for EventsItem {
|
||||
fn len(&self) -> usize {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(j) => j.len(),
|
||||
XBinnedEvents(j) => j.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for EventsItem {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(j) => j.ts(ix),
|
||||
XBinnedEvents(j) => j.ts(ix),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for EventsItem {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
match self {
|
||||
EventsItem::Plain(k) => EventsItem::Plain(k.empty_like_self()),
|
||||
EventsItem::XBinnedEvents(k) => EventsItem::XBinnedEvents(k.empty_like_self()),
|
||||
}
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
match self {
|
||||
Self::Plain(k) => match src {
|
||||
Self::Plain(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::XBinnedEvents(k) => match src {
|
||||
Self::XBinnedEvents(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
|
||||
// TODO can this implement Appendable in a sane way? Do we need it?
|
||||
// TODO can we remove EventsItem?
|
||||
err::todo();
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for EventsItem {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
match self {
|
||||
Self::Plain(k) => match src {
|
||||
Self::Plain(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::XBinnedEvents(k) => match src {
|
||||
Self::XBinnedEvents(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for EventsItem {
|
||||
fn clear(&mut self) {
|
||||
match self {
|
||||
EventsItem::Plain(k) => k.clear(),
|
||||
EventsItem::XBinnedEvents(k) => k.clear(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for EventsItem {
|
||||
fn shape(&self) -> Shape {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(h) => h.shape(),
|
||||
XBinnedEvents(h) => h.shape(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for EventsItem {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(h) => h.scalar_type(),
|
||||
XBinnedEvents(h) => h.scalar_type(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,40 +1,42 @@
|
||||
pub mod binnedevents;
|
||||
pub mod binsdim0;
|
||||
pub mod binsdim1;
|
||||
pub mod eventfull;
|
||||
pub mod eventsitem;
|
||||
pub mod frame;
|
||||
pub mod inmem;
|
||||
pub mod numops;
|
||||
pub mod plainevents;
|
||||
pub mod scalarevents;
|
||||
pub mod streams;
|
||||
pub mod waveevents;
|
||||
pub mod xbinnedscalarevents;
|
||||
pub mod xbinnedwaveevents;
|
||||
|
||||
use crate::frame::make_frame_2;
|
||||
use bytes::BytesMut;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use chrono::TimeZone;
|
||||
use chrono::Utc;
|
||||
use err::Error;
|
||||
use frame::{make_error_frame, make_log_frame, make_range_complete_frame, make_stats_frame};
|
||||
use frame::make_error_frame;
|
||||
use frame::make_log_frame;
|
||||
use frame::make_range_complete_frame;
|
||||
use frame::make_stats_frame;
|
||||
use items_0::AsAnyRef;
|
||||
use netpod::log::Level;
|
||||
#[allow(unused)]
|
||||
use netpod::log::*;
|
||||
use netpod::timeunits::{MS, SEC};
|
||||
use netpod::{log::Level, AggKind, EventDataReadStats, NanoRange, Shape};
|
||||
use netpod::{DiskStats, RangeFilterStats, ScalarType};
|
||||
use netpod::timeunits::MS;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::DiskStats;
|
||||
use netpod::EventDataReadStats;
|
||||
use netpod::NanoRange;
|
||||
use netpod::RangeFilterStats;
|
||||
use netpod::Shape;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde::Serializer;
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::task::Context;
|
||||
use std::task::Poll;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncRead, ReadBuf};
|
||||
use tokio::io::AsyncRead;
|
||||
use tokio::io::ReadBuf;
|
||||
|
||||
pub const TERM_FRAME_TYPE_ID: u32 = 0xaa01;
|
||||
pub const ERROR_FRAME_TYPE_ID: u32 = 0xaa02;
|
||||
@@ -371,32 +373,6 @@ impl FrameType for EventQueryJsonStringFrame {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait EventsNodeProcessorOutput:
|
||||
fmt::Debug + Send + Unpin + DeserializeOwned + WithTimestamps + TimeBinnableType + ByteEstimate
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any;
|
||||
fn into_parts(self) -> (Box<dyn Any>, VecDeque<u64>, VecDeque<u64>);
|
||||
}
|
||||
|
||||
pub trait EventsNodeProcessor: Send + Unpin {
|
||||
type Input;
|
||||
type Output: EventsNodeProcessorOutput;
|
||||
fn create(shape: Shape, agg_kind: AggKind) -> Self;
|
||||
fn process(&self, inp: Self::Input) -> Self::Output;
|
||||
}
|
||||
|
||||
pub trait EventsTypeAliases {
|
||||
type TimeBinOutput;
|
||||
}
|
||||
|
||||
impl<ENP> EventsTypeAliases for ENP
|
||||
where
|
||||
ENP: EventsNodeProcessor,
|
||||
<ENP as EventsNodeProcessor>::Output: TimeBinnableType,
|
||||
{
|
||||
type TimeBinOutput = <<ENP as EventsNodeProcessor>::Output as TimeBinnableType>::Output;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct IsoDateTime(chrono::DateTime<Utc>);
|
||||
|
||||
@@ -631,7 +607,7 @@ impl<T> ReadPbv<T>
|
||||
where
|
||||
T: ReadableFromFile,
|
||||
{
|
||||
fn new(file: File) -> Self {
|
||||
pub fn new(file: File) -> Self {
|
||||
Self {
|
||||
// TODO make buffer size a parameter:
|
||||
buf: vec![0; 1024 * 32],
|
||||
@@ -743,178 +719,3 @@ pub trait TimeBinnerDyn: Send {
|
||||
/// The next call to `Self::bins_ready_count` must return one higher count than before.
|
||||
fn cycle(&mut self);
|
||||
}
|
||||
|
||||
pub fn empty_events_dyn(scalar_type: &ScalarType, shape: &Shape, agg_kind: &AggKind) -> Box<dyn TimeBinnableDyn> {
|
||||
match shape {
|
||||
Shape::Scalar => match agg_kind {
|
||||
AggKind::TimeWeightedScalar => {
|
||||
use ScalarType::*;
|
||||
type K<T> = scalarevents::ScalarEvents<T>;
|
||||
match scalar_type {
|
||||
U8 => Box::new(K::<u8>::empty()),
|
||||
U16 => Box::new(K::<u16>::empty()),
|
||||
U32 => Box::new(K::<u32>::empty()),
|
||||
U64 => Box::new(K::<u64>::empty()),
|
||||
I8 => Box::new(K::<i8>::empty()),
|
||||
I16 => Box::new(K::<i16>::empty()),
|
||||
I32 => Box::new(K::<i32>::empty()),
|
||||
I64 => Box::new(K::<i64>::empty()),
|
||||
F32 => Box::new(K::<f32>::empty()),
|
||||
F64 => Box::new(K::<f64>::empty()),
|
||||
_ => {
|
||||
error!("TODO for {:?} {:?} {:?}", scalar_type, shape, agg_kind);
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
error!("TODO for {:?} {:?} {:?}", scalar_type, shape, agg_kind);
|
||||
err::todoval()
|
||||
}
|
||||
},
|
||||
Shape::Wave(_n) => match agg_kind {
|
||||
AggKind::DimXBins1 => {
|
||||
use ScalarType::*;
|
||||
type K<T> = waveevents::WaveEvents<T>;
|
||||
match scalar_type {
|
||||
U8 => Box::new(K::<u8>::empty()),
|
||||
F32 => Box::new(K::<f32>::empty()),
|
||||
F64 => Box::new(K::<f64>::empty()),
|
||||
BOOL => Box::new(K::<bool>::empty()),
|
||||
_ => {
|
||||
error!("TODO for {:?} {:?} {:?}", scalar_type, shape, agg_kind);
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
}
|
||||
AggKind::Plain => {
|
||||
use ScalarType::*;
|
||||
type K<T> = waveevents::WaveEvents<T>;
|
||||
match scalar_type {
|
||||
U8 => Box::new(K::<u8>::empty()),
|
||||
F32 => Box::new(K::<f32>::empty()),
|
||||
F64 => Box::new(K::<f64>::empty()),
|
||||
BOOL => Box::new(K::<bool>::empty()),
|
||||
_ => {
|
||||
error!("TODO for {:?} {:?} {:?}", scalar_type, shape, agg_kind);
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
error!("TODO for {:?} {:?} {:?}", scalar_type, shape, agg_kind);
|
||||
err::todoval()
|
||||
}
|
||||
},
|
||||
Shape::Image(..) => {
|
||||
error!("TODO for {:?} {:?} {:?}", scalar_type, shape, agg_kind);
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn empty_binned_dyn(scalar_type: &ScalarType, shape: &Shape, agg_kind: &AggKind) -> Box<dyn TimeBinnableDyn> {
|
||||
match shape {
|
||||
Shape::Scalar => match agg_kind {
|
||||
AggKind::TimeWeightedScalar => {
|
||||
use ScalarType::*;
|
||||
type K<T> = binsdim0::MinMaxAvgDim0Bins<T>;
|
||||
match scalar_type {
|
||||
U8 => Box::new(K::<u8>::empty()),
|
||||
U16 => Box::new(K::<u16>::empty()),
|
||||
U32 => Box::new(K::<u32>::empty()),
|
||||
U64 => Box::new(K::<u64>::empty()),
|
||||
I8 => Box::new(K::<i8>::empty()),
|
||||
I16 => Box::new(K::<i16>::empty()),
|
||||
I32 => Box::new(K::<i32>::empty()),
|
||||
I64 => Box::new(K::<i64>::empty()),
|
||||
F32 => Box::new(K::<f32>::empty()),
|
||||
F64 => Box::new(K::<f64>::empty()),
|
||||
_ => err::todoval(),
|
||||
}
|
||||
}
|
||||
_ => err::todoval(),
|
||||
},
|
||||
Shape::Wave(_n) => match agg_kind {
|
||||
AggKind::DimXBins1 => {
|
||||
use ScalarType::*;
|
||||
type K<T> = binsdim0::MinMaxAvgDim0Bins<T>;
|
||||
match scalar_type {
|
||||
U8 => Box::new(K::<u8>::empty()),
|
||||
F32 => Box::new(K::<f32>::empty()),
|
||||
F64 => Box::new(K::<f64>::empty()),
|
||||
_ => err::todoval(),
|
||||
}
|
||||
}
|
||||
_ => err::todoval(),
|
||||
},
|
||||
Shape::Image(..) => err::todoval(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bin_binned_01() {
|
||||
use binsdim0::MinMaxAvgDim0Bins;
|
||||
let edges = vec![SEC * 1000, SEC * 1010, SEC * 1020, SEC * 1030];
|
||||
let inp0 = <MinMaxAvgDim0Bins<u32> as NewEmpty>::empty(Shape::Scalar);
|
||||
let mut time_binner = inp0.time_binner_new(edges, true);
|
||||
let inp1 = MinMaxAvgDim0Bins::<u32> {
|
||||
ts1s: vec![SEC * 1000, SEC * 1010],
|
||||
ts2s: vec![SEC * 1010, SEC * 1020],
|
||||
counts: vec![1, 1],
|
||||
mins: vec![3, 4],
|
||||
maxs: vec![10, 9],
|
||||
avgs: vec![7., 6.],
|
||||
};
|
||||
assert_eq!(time_binner.bins_ready_count(), 0);
|
||||
time_binner.ingest(&inp1);
|
||||
assert_eq!(time_binner.bins_ready_count(), 1);
|
||||
time_binner.push_in_progress(false);
|
||||
assert_eq!(time_binner.bins_ready_count(), 2);
|
||||
// From here on, pushing any more should not change the bin count:
|
||||
time_binner.push_in_progress(false);
|
||||
assert_eq!(time_binner.bins_ready_count(), 2);
|
||||
// On the other hand, cycling should add one more zero-bin:
|
||||
time_binner.cycle();
|
||||
assert_eq!(time_binner.bins_ready_count(), 3);
|
||||
time_binner.cycle();
|
||||
assert_eq!(time_binner.bins_ready_count(), 3);
|
||||
let bins = time_binner.bins_ready().expect("bins should be ready");
|
||||
eprintln!("bins: {:?}", bins);
|
||||
assert_eq!(time_binner.bins_ready_count(), 0);
|
||||
assert_eq!(bins.counts(), &[1, 1, 0]);
|
||||
// TODO use proper float-compare logic:
|
||||
assert_eq!(bins.mins(), &[3., 4., 0.]);
|
||||
assert_eq!(bins.maxs(), &[10., 9., 0.]);
|
||||
assert_eq!(bins.avgs(), &[7., 6., 0.]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bin_binned_02() {
|
||||
use binsdim0::MinMaxAvgDim0Bins;
|
||||
let edges = vec![SEC * 1000, SEC * 1020];
|
||||
let inp0 = <MinMaxAvgDim0Bins<u32> as NewEmpty>::empty(Shape::Scalar);
|
||||
let mut time_binner = inp0.time_binner_new(edges, true);
|
||||
let inp1 = MinMaxAvgDim0Bins::<u32> {
|
||||
ts1s: vec![SEC * 1000, SEC * 1010],
|
||||
ts2s: vec![SEC * 1010, SEC * 1020],
|
||||
counts: vec![1, 1],
|
||||
mins: vec![3, 4],
|
||||
maxs: vec![10, 9],
|
||||
avgs: vec![7., 6.],
|
||||
};
|
||||
assert_eq!(time_binner.bins_ready_count(), 0);
|
||||
time_binner.ingest(&inp1);
|
||||
assert_eq!(time_binner.bins_ready_count(), 0);
|
||||
time_binner.cycle();
|
||||
assert_eq!(time_binner.bins_ready_count(), 1);
|
||||
time_binner.cycle();
|
||||
//assert_eq!(time_binner.bins_ready_count(), 2);
|
||||
let bins = time_binner.bins_ready().expect("bins should be ready");
|
||||
eprintln!("bins: {:?}", bins);
|
||||
assert_eq!(time_binner.bins_ready_count(), 0);
|
||||
assert_eq!(bins.counts(), &[2]);
|
||||
assert_eq!(bins.mins(), &[3.]);
|
||||
assert_eq!(bins.maxs(), &[10.]);
|
||||
assert_eq!(bins.avgs(), &[13. / 2.]);
|
||||
}
|
||||
|
||||
@@ -1,282 +0,0 @@
|
||||
use items_0::subfr::SubFrId;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::cmp::Ordering;
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Add;
|
||||
|
||||
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct BoolNum(pub u8);
|
||||
|
||||
impl BoolNum {
|
||||
pub const MIN: Self = Self(0);
|
||||
pub const MAX: Self = Self(1);
|
||||
}
|
||||
|
||||
impl Add<BoolNum> for BoolNum {
|
||||
type Output = BoolNum;
|
||||
|
||||
fn add(self, rhs: BoolNum) -> Self::Output {
|
||||
Self(self.0 + rhs.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl num_traits::Zero for BoolNum {
|
||||
fn zero() -> Self {
|
||||
Self(0)
|
||||
}
|
||||
|
||||
fn is_zero(&self) -> bool {
|
||||
self.0 == 0
|
||||
}
|
||||
}
|
||||
|
||||
impl num_traits::AsPrimitive<f32> for BoolNum {
|
||||
fn as_(self) -> f32 {
|
||||
self.0 as f32
|
||||
}
|
||||
}
|
||||
|
||||
impl num_traits::Bounded for BoolNum {
|
||||
fn min_value() -> Self {
|
||||
Self(0)
|
||||
}
|
||||
|
||||
fn max_value() -> Self {
|
||||
Self(1)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for BoolNum {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
PartialEq::eq(&self.0, &other.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for BoolNum {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
PartialOrd::partial_cmp(&self.0, &other.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct StringNum(pub String);
|
||||
|
||||
impl StringNum {
|
||||
pub const MIN: Self = Self(String::new());
|
||||
pub const MAX: Self = Self(String::new());
|
||||
}
|
||||
|
||||
impl Add<StringNum> for StringNum {
|
||||
type Output = StringNum;
|
||||
|
||||
fn add(self, _rhs: StringNum) -> Self::Output {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl num_traits::Zero for StringNum {
|
||||
fn zero() -> Self {
|
||||
Self(String::new())
|
||||
}
|
||||
|
||||
fn is_zero(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl num_traits::Bounded for StringNum {
|
||||
fn min_value() -> Self {
|
||||
Self(String::new())
|
||||
}
|
||||
|
||||
fn max_value() -> Self {
|
||||
Self(String::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for StringNum {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
PartialEq::eq(&self.0, &other.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for StringNum {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
PartialOrd::partial_cmp(&self.0, &other.0)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait NumOps:
|
||||
Sized
|
||||
+ Clone
|
||||
+ AsPrimF32
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static
|
||||
+ Unpin
|
||||
+ Debug
|
||||
//+ Zero
|
||||
//+ Bounded
|
||||
+ PartialOrd
|
||||
+ SubFrId
|
||||
+ Serialize
|
||||
+ DeserializeOwned
|
||||
+ items_0::scalar_ops::ScalarOps
|
||||
{
|
||||
fn min_or_nan() -> Self;
|
||||
fn max_or_nan() -> Self;
|
||||
fn is_nan(&self) -> bool;
|
||||
fn zero() -> Self;
|
||||
}
|
||||
|
||||
macro_rules! impl_num_ops {
|
||||
($ty:ident, $min_or_nan:ident, $max_or_nan:ident, $is_nan:ident, $zero:expr) => {
|
||||
impl NumOps for $ty {
|
||||
fn min_or_nan() -> Self {
|
||||
$ty::$min_or_nan
|
||||
}
|
||||
fn max_or_nan() -> Self {
|
||||
$ty::$max_or_nan
|
||||
}
|
||||
fn is_nan(&self) -> bool {
|
||||
$is_nan(self)
|
||||
}
|
||||
fn zero() -> Self {
|
||||
$zero
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl AsPrimF32 for bool {
|
||||
fn as_prim_f32(&self) -> f32 {
|
||||
if *self {
|
||||
1.
|
||||
} else {
|
||||
0.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NumOps for bool {
|
||||
fn min_or_nan() -> Self {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn max_or_nan() -> Self {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn is_nan(&self) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn zero() -> Self {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn is_nan_int<T>(_x: &T) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn is_nan_f32(x: &f32) -> bool {
|
||||
f32::is_nan(*x)
|
||||
}
|
||||
|
||||
fn is_nan_f64(x: &f64) -> bool {
|
||||
f64::is_nan(*x)
|
||||
}
|
||||
|
||||
pub trait AsPrimF32 {
|
||||
fn as_prim_f32(&self) -> f32;
|
||||
}
|
||||
|
||||
macro_rules! impl_as_prim_f32 {
|
||||
($ty:ident) => {
|
||||
impl AsPrimF32 for $ty {
|
||||
fn as_prim_f32(&self) -> f32 {
|
||||
*self as f32
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_as_prim_f32!(u8);
|
||||
impl_as_prim_f32!(u16);
|
||||
impl_as_prim_f32!(u32);
|
||||
impl_as_prim_f32!(u64);
|
||||
impl_as_prim_f32!(i8);
|
||||
impl_as_prim_f32!(i16);
|
||||
impl_as_prim_f32!(i32);
|
||||
impl_as_prim_f32!(i64);
|
||||
impl_as_prim_f32!(f32);
|
||||
impl_as_prim_f32!(f64);
|
||||
|
||||
impl AsPrimF32 for BoolNum {
|
||||
fn as_prim_f32(&self) -> f32 {
|
||||
self.0 as f32
|
||||
}
|
||||
}
|
||||
|
||||
impl AsPrimF32 for StringNum {
|
||||
fn as_prim_f32(&self) -> f32 {
|
||||
netpod::log::error!("TODO impl AsPrimF32 for StringNum");
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl_num_ops!(u8, MIN, MAX, is_nan_int, 0);
|
||||
impl_num_ops!(u16, MIN, MAX, is_nan_int, 0);
|
||||
impl_num_ops!(u32, MIN, MAX, is_nan_int, 0);
|
||||
impl_num_ops!(u64, MIN, MAX, is_nan_int, 0);
|
||||
impl_num_ops!(i8, MIN, MAX, is_nan_int, 0);
|
||||
impl_num_ops!(i16, MIN, MAX, is_nan_int, 0);
|
||||
impl_num_ops!(i32, MIN, MAX, is_nan_int, 0);
|
||||
impl_num_ops!(i64, MIN, MAX, is_nan_int, 0);
|
||||
impl_num_ops!(f32, NAN, NAN, is_nan_f32, 0.);
|
||||
impl_num_ops!(f64, NAN, NAN, is_nan_f64, 0.);
|
||||
impl_num_ops!(BoolNum, MIN, MAX, is_nan_int, BoolNum(0));
|
||||
impl_num_ops!(StringNum, MIN, MAX, is_nan_int, StringNum(String::new()));
|
||||
|
||||
impl SubFrId for StringNum {
|
||||
const SUB: u32 = 0x0d;
|
||||
}
|
||||
|
||||
impl SubFrId for BoolNum {
|
||||
const SUB: u32 = 0x0e;
|
||||
}
|
||||
|
||||
impl items_0::scalar_ops::AsPrimF32 for BoolNum {
|
||||
fn as_prim_f32_b(&self) -> f32 {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl items_0::scalar_ops::AsPrimF32 for StringNum {
|
||||
fn as_prim_f32_b(&self) -> f32 {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl items_0::scalar_ops::ScalarOps for BoolNum {
|
||||
fn zero_b() -> Self {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn equal_slack(&self, _rhs: &Self) -> bool {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl items_0::scalar_ops::ScalarOps for StringNum {
|
||||
fn zero_b() -> Self {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn equal_slack(&self, _rhs: &Self) -> bool {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -1,190 +0,0 @@
|
||||
use crate::eventsitem::EventsItem;
|
||||
use crate::scalarevents::ScalarEvents;
|
||||
use crate::{Appendable, Clearable, PushableIndex, WithLen, WithTimestamps};
|
||||
use netpod::{AggKind, HasScalarType, HasShape, ScalarType, Shape};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum ScalarPlainEvents {
|
||||
U8(ScalarEvents<u8>),
|
||||
U16(ScalarEvents<u16>),
|
||||
U32(ScalarEvents<u32>),
|
||||
U64(ScalarEvents<u64>),
|
||||
I8(ScalarEvents<i8>),
|
||||
I16(ScalarEvents<i16>),
|
||||
I32(ScalarEvents<i32>),
|
||||
I64(ScalarEvents<i64>),
|
||||
F32(ScalarEvents<f32>),
|
||||
F64(ScalarEvents<f64>),
|
||||
String(ScalarEvents<String>),
|
||||
}
|
||||
|
||||
impl ScalarPlainEvents {
|
||||
pub fn variant_name(&self) -> String {
|
||||
items_proc::tycases1!(self, Self, (k), { "$id".into() })
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for ScalarPlainEvents {
|
||||
fn clear(&mut self) {
|
||||
items_proc::tycases1!(self, Self, (k), { k.clear() })
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for ScalarPlainEvents {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
items_proc::tycases1!(self, Self, (k), { Self::$id(k.empty_like_self()) })
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
items_proc::tycases1!(self, Self, (k), {
|
||||
match src {
|
||||
Self::$id(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
|
||||
// TODO can this implement Appendable in a sane way? Do we need it?
|
||||
err::todo();
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for ScalarPlainEvents {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
items_proc::tycases1!(self, Self, (k), {
|
||||
match src {
|
||||
Self::$id(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for ScalarPlainEvents {
|
||||
fn len(&self) -> usize {
|
||||
items_proc::tycases1!(self, Self, (k), { k.len() })
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for ScalarPlainEvents {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
items_proc::tycases1!(self, Self, (k), { k.ts(ix) })
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for ScalarPlainEvents {
|
||||
fn shape(&self) -> Shape {
|
||||
Shape::Scalar
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for ScalarPlainEvents {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
items_proc::tycases1!(self, Self, (k), { ScalarType::$id })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum PlainEvents {
|
||||
Scalar(ScalarPlainEvents),
|
||||
}
|
||||
|
||||
impl PlainEvents {
|
||||
pub fn is_wave(&self) -> bool {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn variant_name(&self) -> String {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(h) => format!("Scalar({})", h.variant_name()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn x_aggregate(self, _: &AggKind) -> EventsItem {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(k) => EventsItem::Plain(PlainEvents::Scalar(k)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for PlainEvents {
|
||||
fn clear(&mut self) {
|
||||
match self {
|
||||
PlainEvents::Scalar(k) => k.clear(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for PlainEvents {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
match self {
|
||||
Self::Scalar(k) => Self::Scalar(k.empty_like_self()),
|
||||
}
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
match self {
|
||||
PlainEvents::Scalar(k) => match src {
|
||||
Self::Scalar(j) => k.append(j),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn append_zero(&mut self, _ts1: u64, _ts2: u64) {
|
||||
// TODO can this implement Appendable in a sane way? Do we need it?
|
||||
err::todo();
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for PlainEvents {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
match self {
|
||||
Self::Scalar(k) => match src {
|
||||
Self::Scalar(j) => k.push_index(j, ix),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for PlainEvents {
|
||||
fn len(&self) -> usize {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(j) => j.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for PlainEvents {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(j) => j.ts(ix),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for PlainEvents {
|
||||
fn shape(&self) -> Shape {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(h) => HasShape::shape(h),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for PlainEvents {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(h) => h.scalar_type(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,847 +0,0 @@
|
||||
use crate::binsdim0::MinMaxAvgDim0Bins;
|
||||
use crate::numops::NumOps;
|
||||
use crate::streams::{Collectable, Collector};
|
||||
use crate::{
|
||||
pulse_offs_from_abs, ts_offs_from_abs, Appendable, ByteEstimate, Clearable, EventAppendable, EventsDyn,
|
||||
EventsNodeProcessorOutput, FilterFittingInside, Fits, FitsInside, FrameType, FrameTypeInnerStatic, NewEmpty,
|
||||
PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, TimeBinnableDyn, TimeBinnableType,
|
||||
TimeBinnableTypeAggregator, TimeBinnerDyn, WithLen, WithTimestamps,
|
||||
};
|
||||
use err::Error;
|
||||
use items_0::AsAnyRef;
|
||||
use netpod::log::*;
|
||||
use netpod::{NanoRange, Shape};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use tokio::fs::File;
|
||||
|
||||
// TODO in this module reduce clones.
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct ScalarEvents<NTY> {
|
||||
pub tss: Vec<u64>,
|
||||
pub pulses: Vec<u64>,
|
||||
pub values: Vec<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> ScalarEvents<NTY> {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
tss: vec![],
|
||||
pulses: vec![],
|
||||
values: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn push(&mut self, ts: u64, pulse: u64, value: NTY) {
|
||||
self.tss.push(ts);
|
||||
self.pulses.push(pulse);
|
||||
self.values.push(value);
|
||||
}
|
||||
|
||||
// TODO should avoid the copies.
|
||||
#[inline(always)]
|
||||
pub fn extend_from_slice(&mut self, src: &Self)
|
||||
where
|
||||
NTY: Clone,
|
||||
{
|
||||
self.tss.extend_from_slice(&src.tss);
|
||||
self.pulses.extend_from_slice(&src.pulses);
|
||||
self.values.extend_from_slice(&src.values);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn clearx(&mut self) {
|
||||
self.tss.clear();
|
||||
self.pulses.clear();
|
||||
self.values.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FrameTypeInnerStatic for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
const FRAME_TYPE_ID: u32 = crate::EVENTS_0D_FRAME_TYPE_ID + NTY::SUB;
|
||||
}
|
||||
|
||||
impl<NTY> FrameType for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> fmt::Debug for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
fmt,
|
||||
"count {} ts {:?} .. {:?} vals {:?} .. {:?}",
|
||||
self.tss.len(),
|
||||
self.tss.first(),
|
||||
self.tss.last(),
|
||||
self.values.first(),
|
||||
self.values.last(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> AsAnyRef for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithTimestamps for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
self.tss[ix]
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ByteEstimate for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
if self.tss.len() == 0 {
|
||||
0
|
||||
} else {
|
||||
// TODO improve via a const fn on NTY
|
||||
self.tss.len() as u64 * 16
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> RangeOverlapInfo for ScalarEvents<NTY> {
|
||||
fn ends_before(&self, range: NanoRange) -> bool {
|
||||
match self.tss.last() {
|
||||
Some(&ts) => ts < range.beg,
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn ends_after(&self, range: NanoRange) -> bool {
|
||||
match self.tss.last() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn starts_after(&self, range: NanoRange) -> bool {
|
||||
match self.tss.first() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FitsInside for ScalarEvents<NTY> {
|
||||
fn fits_inside(&self, range: NanoRange) -> Fits {
|
||||
if self.tss.is_empty() {
|
||||
Fits::Empty
|
||||
} else {
|
||||
let t1 = *self.tss.first().unwrap();
|
||||
let t2 = *self.tss.last().unwrap();
|
||||
if t2 < range.beg {
|
||||
Fits::Lower
|
||||
} else if t1 > range.end {
|
||||
Fits::Greater
|
||||
} else if t1 < range.beg && t2 > range.end {
|
||||
Fits::PartlyLowerAndGreater
|
||||
} else if t1 < range.beg {
|
||||
Fits::PartlyLower
|
||||
} else if t2 > range.end {
|
||||
Fits::PartlyGreater
|
||||
} else {
|
||||
Fits::Inside
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FilterFittingInside for ScalarEvents<NTY> {
|
||||
fn filter_fitting_inside(self, fit_range: NanoRange) -> Option<Self> {
|
||||
match self.fits_inside(fit_range) {
|
||||
Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> PushableIndex for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
self.push(src.tss[ix], src.pulses[ix], src.values[ix].clone());
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> NewEmpty for ScalarEvents<NTY> {
|
||||
fn empty(_shape: Shape) -> Self {
|
||||
Self {
|
||||
tss: Vec::new(),
|
||||
pulses: Vec::new(),
|
||||
values: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Appendable for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn empty_like_self(&self) -> Self {
|
||||
Self::empty()
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
self.extend_from_slice(src);
|
||||
}
|
||||
|
||||
fn append_zero(&mut self, ts1: u64, _ts2: u64) {
|
||||
self.tss.push(ts1);
|
||||
self.pulses.push(0);
|
||||
self.values.push(NTY::zero());
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Clearable for ScalarEvents<NTY> {
|
||||
fn clear(&mut self) {
|
||||
ScalarEvents::<NTY>::clearx(self);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ReadableFromFile for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn read_from_file(_file: File) -> Result<ReadPbv<Self>, Error> {
|
||||
// TODO refactor types such that this can be removed.
|
||||
panic!()
|
||||
}
|
||||
|
||||
fn from_buf(_buf: &[u8]) -> Result<Self, Error> {
|
||||
panic!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableType for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Output = MinMaxAvgDim0Bins<NTY>;
|
||||
type Aggregator = EventValuesAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator {
|
||||
debug!(
|
||||
"TimeBinnableType for EventValues aggregator() range {:?} x_bin_count {} do_time_weight {}",
|
||||
range, x_bin_count, do_time_weight
|
||||
);
|
||||
Self::Aggregator::new(range, do_time_weight)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EventValuesCollector<NTY> {
|
||||
vals: ScalarEvents<NTY>,
|
||||
range_complete: bool,
|
||||
timed_out: bool,
|
||||
}
|
||||
|
||||
impl<NTY> EventValuesCollector<NTY> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
vals: ScalarEvents::empty(),
|
||||
range_complete: false,
|
||||
timed_out: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for EventValuesCollector<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.vals.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct EventValuesCollectorOutput<NTY> {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "tsMs")]
|
||||
ts_off_ms: Vec<u64>,
|
||||
#[serde(rename = "tsNs")]
|
||||
ts_off_ns: Vec<u64>,
|
||||
#[serde(rename = "pulseAnchor")]
|
||||
pulse_anchor: u64,
|
||||
#[serde(rename = "pulseOff")]
|
||||
pulse_off: Vec<u64>,
|
||||
values: Vec<NTY>,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "rangeFinal")]
|
||||
range_complete: bool,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "timedOut")]
|
||||
timed_out: bool,
|
||||
}
|
||||
|
||||
impl<NTY> Collector for EventValuesCollector<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = ScalarEvents<NTY>;
|
||||
type Output = EventValuesCollectorOutput<NTY>;
|
||||
|
||||
fn ingest(&mut self, src: &Self::Input) {
|
||||
self.vals.append(src);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_complete = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn result(self) -> Result<Self::Output, Error> {
|
||||
let tst = ts_offs_from_abs(&self.vals.tss);
|
||||
let (pulse_anchor, pulse_off) = pulse_offs_from_abs(&self.vals.pulses);
|
||||
let ret = Self::Output {
|
||||
ts_anchor_sec: tst.0,
|
||||
ts_off_ms: tst.1,
|
||||
ts_off_ns: tst.2,
|
||||
pulse_anchor,
|
||||
pulse_off,
|
||||
values: self.vals.values,
|
||||
range_complete: self.range_complete,
|
||||
timed_out: self.timed_out,
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collectable for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Collector = EventValuesCollector<NTY>;
|
||||
|
||||
fn new_collector(_bin_count_exp: u32) -> Self::Collector {
|
||||
Self::Collector::new()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EventValuesAggregator<NTY> {
|
||||
range: NanoRange,
|
||||
count: u64,
|
||||
min: NTY,
|
||||
max: NTY,
|
||||
sumc: u64,
|
||||
sum: f32,
|
||||
int_ts: u64,
|
||||
last_ts: u64,
|
||||
last_val: Option<NTY>,
|
||||
do_time_weight: bool,
|
||||
events_taken_count: u64,
|
||||
events_ignored_count: u64,
|
||||
}
|
||||
|
||||
impl<NTY> Drop for EventValuesAggregator<NTY> {
|
||||
fn drop(&mut self) {
|
||||
// TODO collect as stats for the request context:
|
||||
trace!(
|
||||
"taken {} ignored {}",
|
||||
self.events_taken_count,
|
||||
self.events_ignored_count
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> EventValuesAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
pub fn new(range: NanoRange, do_time_weight: bool) -> Self {
|
||||
let int_ts = range.beg;
|
||||
Self {
|
||||
range,
|
||||
count: 0,
|
||||
min: NTY::zero(),
|
||||
max: NTY::zero(),
|
||||
sum: 0.,
|
||||
sumc: 0,
|
||||
int_ts,
|
||||
last_ts: 0,
|
||||
last_val: None,
|
||||
do_time_weight,
|
||||
events_taken_count: 0,
|
||||
events_ignored_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO reduce clone.. optimize via more traits to factor the trade-offs?
|
||||
fn apply_min_max(&mut self, val: NTY) {
|
||||
if self.count == 0 {
|
||||
self.min = val.clone();
|
||||
self.max = val.clone();
|
||||
} else {
|
||||
if self.min > val {
|
||||
self.min = val.clone();
|
||||
}
|
||||
if self.max < val {
|
||||
self.max = val.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_event_unweight(&mut self, val: NTY) {
|
||||
let vf = val.as_prim_f32();
|
||||
self.apply_min_max(val);
|
||||
if vf.is_nan() {
|
||||
} else {
|
||||
self.sum += vf;
|
||||
self.sumc += 1;
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_event_time_weight(&mut self, ts: u64) {
|
||||
if let Some(v) = &self.last_val {
|
||||
let vf = v.as_prim_f32();
|
||||
let v2 = v.clone();
|
||||
self.apply_min_max(v2);
|
||||
let w = if self.do_time_weight {
|
||||
(ts - self.int_ts) as f32 * 1e-9
|
||||
} else {
|
||||
1.
|
||||
};
|
||||
if vf.is_nan() {
|
||||
} else {
|
||||
self.sum += vf * w;
|
||||
self.sumc += 1;
|
||||
}
|
||||
self.int_ts = ts;
|
||||
} else {
|
||||
debug!(
|
||||
"apply_event_time_weight NO VALUE {}",
|
||||
ts as i64 - self.range.beg as i64
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn ingest_unweight(&mut self, item: &<Self as TimeBinnableTypeAggregator>::Input) {
|
||||
for i1 in 0..item.tss.len() {
|
||||
let ts = item.tss[i1];
|
||||
let val = item.values[i1].clone();
|
||||
if ts < self.range.beg {
|
||||
self.events_ignored_count += 1;
|
||||
} else if ts >= self.range.end {
|
||||
self.events_ignored_count += 1;
|
||||
return;
|
||||
} else {
|
||||
self.apply_event_unweight(val);
|
||||
self.count += 1;
|
||||
self.events_taken_count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn ingest_time_weight(&mut self, item: &<Self as TimeBinnableTypeAggregator>::Input) {
|
||||
for i1 in 0..item.tss.len() {
|
||||
let ts = item.tss[i1];
|
||||
let val = item.values[i1].clone();
|
||||
if ts < self.int_ts {
|
||||
if self.last_val.is_none() {
|
||||
info!(
|
||||
"ingest_time_weight event before range, only set last ts {} val {:?}",
|
||||
ts, val
|
||||
);
|
||||
}
|
||||
self.events_ignored_count += 1;
|
||||
self.last_ts = ts;
|
||||
self.last_val = Some(val);
|
||||
} else if ts >= self.range.end {
|
||||
self.events_ignored_count += 1;
|
||||
return;
|
||||
} else {
|
||||
self.apply_event_time_weight(ts);
|
||||
if self.last_val.is_none() {
|
||||
info!(
|
||||
"call apply_min_max without last val, use current instead {} {:?}",
|
||||
ts, val
|
||||
);
|
||||
self.apply_min_max(val.clone());
|
||||
}
|
||||
self.count += 1;
|
||||
self.last_ts = ts;
|
||||
self.last_val = Some(val);
|
||||
self.events_taken_count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset_unweight(&mut self, range: NanoRange, _expand: bool) -> MinMaxAvgDim0Bins<NTY> {
|
||||
let (min, max, avg) = if self.sumc > 0 {
|
||||
let avg = self.sum / self.sumc as f32;
|
||||
(self.min.clone(), self.max.clone(), avg)
|
||||
} else {
|
||||
let g = match &self.last_val {
|
||||
Some(x) => x.clone(),
|
||||
None => NTY::zero(),
|
||||
};
|
||||
(g.clone(), g.clone(), g.as_prim_f32())
|
||||
};
|
||||
let ret = MinMaxAvgDim0Bins {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![min],
|
||||
maxs: vec![max],
|
||||
avgs: vec![avg],
|
||||
};
|
||||
self.int_ts = range.beg;
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.sum = 0f32;
|
||||
self.sumc = 0;
|
||||
ret
|
||||
}
|
||||
|
||||
fn result_reset_time_weight(&mut self, range: NanoRange, expand: bool) -> MinMaxAvgDim0Bins<NTY> {
|
||||
// TODO check callsite for correct expand status.
|
||||
if expand {
|
||||
debug!("result_reset_time_weight calls apply_event_time_weight");
|
||||
self.apply_event_time_weight(self.range.end);
|
||||
} else {
|
||||
debug!("result_reset_time_weight NO EXPAND");
|
||||
}
|
||||
let (min, max, avg) = if self.sumc > 0 {
|
||||
let avg = self.sum / (self.range.delta() as f32 * 1e-9);
|
||||
(self.min.clone(), self.max.clone(), avg)
|
||||
} else {
|
||||
let g = match &self.last_val {
|
||||
Some(x) => x.clone(),
|
||||
None => NTY::zero(),
|
||||
};
|
||||
(g.clone(), g.clone(), g.as_prim_f32())
|
||||
};
|
||||
let ret = MinMaxAvgDim0Bins {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![min],
|
||||
maxs: vec![max],
|
||||
avgs: vec![avg],
|
||||
};
|
||||
self.int_ts = range.beg;
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.sum = 0f32;
|
||||
self.sumc = 0;
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableTypeAggregator for EventValuesAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = ScalarEvents<NTY>;
|
||||
type Output = MinMaxAvgDim0Bins<NTY>;
|
||||
|
||||
fn range(&self) -> &NanoRange {
|
||||
&self.range
|
||||
}
|
||||
|
||||
fn ingest(&mut self, item: &Self::Input) {
|
||||
debug!("ingest len {}", item.len());
|
||||
if self.do_time_weight {
|
||||
self.ingest_time_weight(item)
|
||||
} else {
|
||||
self.ingest_unweight(item)
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset(&mut self, range: NanoRange, expand: bool) -> Self::Output {
|
||||
debug!("Produce for {:?} next {:?}", self.range, range);
|
||||
if self.do_time_weight {
|
||||
self.result_reset_time_weight(range, expand)
|
||||
} else {
|
||||
self.result_reset_unweight(range, expand)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> EventAppendable for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Value = NTY;
|
||||
|
||||
fn append_event(ret: Option<Self>, ts: u64, pulse: u64, value: Self::Value) -> Self {
|
||||
let mut ret = if let Some(ret) = ret { ret } else { Self::empty() };
|
||||
ret.push(ts, pulse, value);
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: NumOps + 'static> TimeBinnableDyn for ScalarEvents<NTY> {
|
||||
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Box<dyn TimeBinnerDyn> {
|
||||
let ret = ScalarEventsTimeBinner::<NTY>::new(edges.into(), do_time_weight);
|
||||
Box::new(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: NumOps + 'static> EventsDyn for ScalarEvents<NTY> {
|
||||
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn {
|
||||
self as &dyn TimeBinnableDyn
|
||||
}
|
||||
|
||||
fn verify(&self) {
|
||||
let mut ts_max = 0;
|
||||
for ts in &self.tss {
|
||||
let ts = *ts;
|
||||
if ts < ts_max {
|
||||
error!("unordered event data ts {} ts_max {}", ts, ts_max);
|
||||
}
|
||||
ts_max = ts_max.max(ts);
|
||||
}
|
||||
}
|
||||
|
||||
fn output_info(&self) {
|
||||
if false {
|
||||
info!("output_info len {}", self.tss.len());
|
||||
if self.tss.len() == 1 {
|
||||
info!(
|
||||
" only: ts {} pulse {} value {:?}",
|
||||
self.tss[0], self.pulses[0], self.values[0]
|
||||
);
|
||||
} else if self.tss.len() > 1 {
|
||||
info!(
|
||||
" first: ts {} pulse {} value {:?}",
|
||||
self.tss[0], self.pulses[0], self.values[0]
|
||||
);
|
||||
let n = self.tss.len() - 1;
|
||||
info!(
|
||||
" last: ts {} pulse {} value {:?}",
|
||||
self.tss[n], self.pulses[n], self.values[n]
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ScalarEventsTimeBinner<NTY: NumOps> {
|
||||
// The first two edges are used the next time that we create an aggregator, or push a zero bin.
|
||||
edges: VecDeque<u64>,
|
||||
do_time_weight: bool,
|
||||
agg: Option<EventValuesAggregator<NTY>>,
|
||||
ready: Option<<EventValuesAggregator<NTY> as TimeBinnableTypeAggregator>::Output>,
|
||||
}
|
||||
|
||||
impl<NTY: NumOps> ScalarEventsTimeBinner<NTY> {
|
||||
fn new(edges: VecDeque<u64>, do_time_weight: bool) -> Self {
|
||||
Self {
|
||||
edges,
|
||||
do_time_weight,
|
||||
agg: None,
|
||||
ready: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn next_bin_range(&mut self) -> Option<NanoRange> {
|
||||
if self.edges.len() >= 2 {
|
||||
let ret = NanoRange {
|
||||
beg: self.edges[0],
|
||||
end: self.edges[1],
|
||||
};
|
||||
self.edges.pop_front();
|
||||
Some(ret)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: NumOps + 'static> TimeBinnerDyn for ScalarEventsTimeBinner<NTY> {
|
||||
fn bins_ready_count(&self) -> usize {
|
||||
match &self.ready {
|
||||
Some(k) => k.len(),
|
||||
None => 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn bins_ready(&mut self) -> Option<Box<dyn crate::TimeBinned>> {
|
||||
match self.ready.take() {
|
||||
Some(k) => Some(Box::new(k)),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn ingest(&mut self, item: &dyn TimeBinnableDyn) {
|
||||
const SELF: &str = "ScalarEventsTimeBinner";
|
||||
if item.len() == 0 {
|
||||
// Return already here, RangeOverlapInfo would not give much sense.
|
||||
return;
|
||||
}
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {SELF} no more bin in edges A");
|
||||
return;
|
||||
}
|
||||
// TODO optimize by remembering at which event array index we have arrived.
|
||||
// That needs modified interfaces which can take and yield the start and latest index.
|
||||
loop {
|
||||
while item.starts_after(NanoRange {
|
||||
beg: 0,
|
||||
end: self.edges[1],
|
||||
}) {
|
||||
self.cycle();
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {SELF} no more bin in edges B");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if item.ends_before(NanoRange {
|
||||
beg: self.edges[0],
|
||||
end: u64::MAX,
|
||||
}) {
|
||||
return;
|
||||
} else {
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {SELF} edge list exhausted");
|
||||
return;
|
||||
} else {
|
||||
let agg = if let Some(agg) = self.agg.as_mut() {
|
||||
agg
|
||||
} else {
|
||||
self.agg = Some(EventValuesAggregator::new(
|
||||
// We know here that we have enough edges for another bin.
|
||||
// and `next_bin_range` will pop the first edge.
|
||||
self.next_bin_range().unwrap(),
|
||||
self.do_time_weight,
|
||||
));
|
||||
self.agg.as_mut().unwrap()
|
||||
};
|
||||
if let Some(item) = item
|
||||
.as_any_ref()
|
||||
// TODO make statically sure that we attempt to cast to the correct type here:
|
||||
.downcast_ref::<<EventValuesAggregator<NTY> as TimeBinnableTypeAggregator>::Input>()
|
||||
{
|
||||
// TODO collect statistics associated with this request:
|
||||
agg.ingest(item);
|
||||
} else {
|
||||
error!("not correct item type");
|
||||
};
|
||||
if item.ends_after(agg.range().clone()) {
|
||||
self.cycle();
|
||||
if self.edges.len() < 2 {
|
||||
warn!("TimeBinnerDyn for {SELF} no more bin in edges C");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn push_in_progress(&mut self, push_empty: bool) {
|
||||
// TODO expand should be derived from AggKind. Is it still required after all?
|
||||
// TODO here, the expand means that agg will assume that the current value is kept constant during
|
||||
// the rest of the time range.
|
||||
let expand = true;
|
||||
let range_next = if self.agg.is_some() {
|
||||
if let Some(x) = self.next_bin_range() {
|
||||
Some(x)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(agg) = self.agg.as_mut() {
|
||||
let mut bins;
|
||||
if let Some(range_next) = range_next {
|
||||
bins = agg.result_reset(range_next, expand);
|
||||
} else {
|
||||
let range_next = NanoRange { beg: 4, end: 5 };
|
||||
bins = agg.result_reset(range_next, expand);
|
||||
self.agg = None;
|
||||
}
|
||||
assert_eq!(bins.len(), 1);
|
||||
if push_empty || bins.counts[0] != 0 {
|
||||
match self.ready.as_mut() {
|
||||
Some(ready) => {
|
||||
ready.append(&mut bins);
|
||||
}
|
||||
None => {
|
||||
self.ready = Some(bins);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn cycle(&mut self) {
|
||||
let n = self.bins_ready_count();
|
||||
self.push_in_progress(true);
|
||||
if self.bins_ready_count() == n {
|
||||
if let Some(range) = self.next_bin_range() {
|
||||
let mut bins = MinMaxAvgDim0Bins::<NTY>::empty();
|
||||
bins.append_zero(range.beg, range.end);
|
||||
match self.ready.as_mut() {
|
||||
Some(ready) => {
|
||||
ready.append(&mut bins);
|
||||
}
|
||||
None => {
|
||||
self.ready = Some(bins);
|
||||
}
|
||||
}
|
||||
if self.bins_ready_count() <= n {
|
||||
error!("failed to push a zero bin");
|
||||
}
|
||||
} else {
|
||||
warn!("cycle: no in-progress bin pushed, but also no more bin to add as zero-bin");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> EventsNodeProcessorOutput for ScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn into_parts(self) -> (Box<dyn Any>, VecDeque<u64>, VecDeque<u64>) {
|
||||
(
|
||||
Box::new(VecDeque::from(self.values)),
|
||||
self.tss.into(),
|
||||
self.pulses.into(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,561 +0,0 @@
|
||||
use crate::binsdim1::MinMaxAvgDim1Bins;
|
||||
use crate::numops::NumOps;
|
||||
use crate::xbinnedscalarevents::XBinnedScalarEvents;
|
||||
use crate::xbinnedwaveevents::XBinnedWaveEvents;
|
||||
use crate::{
|
||||
Appendable, ByteEstimate, Clearable, EventAppendable, EventsDyn, EventsNodeProcessor, EventsNodeProcessorOutput,
|
||||
FilterFittingInside, Fits, FitsInside, FrameType, FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo,
|
||||
ReadPbv, ReadableFromFile, TimeBinnableDyn, TimeBinnableType, TimeBinnableTypeAggregator, WithLen, WithTimestamps,
|
||||
};
|
||||
use err::Error;
|
||||
use items_0::subfr::SubFrId;
|
||||
use items_0::AsAnyRef;
|
||||
use netpod::log::*;
|
||||
use netpod::{x_bin_count, AggKind, NanoRange, Shape};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::marker::PhantomData;
|
||||
use tokio::fs::File;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct WaveEvents<NTY> {
|
||||
pub tss: Vec<u64>,
|
||||
pub pulses: Vec<u64>,
|
||||
pub vals: Vec<Vec<NTY>>,
|
||||
}
|
||||
|
||||
impl<NTY> WaveEvents<NTY> {
|
||||
pub fn push(&mut self, ts: u64, pulse: u64, value: Vec<NTY>) {
|
||||
self.tss.push(ts);
|
||||
self.pulses.push(pulse);
|
||||
self.vals.push(value);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WaveEvents<NTY> {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
tss: Vec::new(),
|
||||
pulses: Vec::new(),
|
||||
vals: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn shape(&self) -> Result<Shape, Error> {
|
||||
if let Some(k) = self.vals.first() {
|
||||
let ret = Shape::Wave(k.len() as u32);
|
||||
Ok(ret)
|
||||
} else {
|
||||
Err(Error::with_msg_no_trace("WaveEvents is empty, can not determine Shape"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FrameTypeInnerStatic for WaveEvents<NTY>
|
||||
where
|
||||
NTY: SubFrId,
|
||||
{
|
||||
const FRAME_TYPE_ID: u32 = crate::WAVE_EVENTS_FRAME_TYPE_ID + NTY::SUB;
|
||||
}
|
||||
|
||||
impl<NTY> FrameType for WaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> AsAnyRef for WaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn as_any_ref(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for WaveEvents<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithTimestamps for WaveEvents<NTY> {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
self.tss[ix]
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ByteEstimate for WaveEvents<NTY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
if self.tss.len() == 0 {
|
||||
0
|
||||
} else {
|
||||
// TODO improve via a const fn on NTY
|
||||
self.tss.len() as u64 * 8 * self.vals[0].len() as u64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> RangeOverlapInfo for WaveEvents<NTY> {
|
||||
fn ends_before(&self, range: NanoRange) -> bool {
|
||||
match self.tss.last() {
|
||||
Some(&ts) => ts < range.beg,
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn ends_after(&self, range: NanoRange) -> bool {
|
||||
match self.tss.last() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn starts_after(&self, range: NanoRange) -> bool {
|
||||
match self.tss.first() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FitsInside for WaveEvents<NTY> {
|
||||
fn fits_inside(&self, range: NanoRange) -> Fits {
|
||||
if self.tss.is_empty() {
|
||||
Fits::Empty
|
||||
} else {
|
||||
let t1 = *self.tss.first().unwrap();
|
||||
let t2 = *self.tss.last().unwrap();
|
||||
if t2 < range.beg {
|
||||
Fits::Lower
|
||||
} else if t1 > range.end {
|
||||
Fits::Greater
|
||||
} else if t1 < range.beg && t2 > range.end {
|
||||
Fits::PartlyLowerAndGreater
|
||||
} else if t1 < range.beg {
|
||||
Fits::PartlyLower
|
||||
} else if t2 > range.end {
|
||||
Fits::PartlyGreater
|
||||
} else {
|
||||
Fits::Inside
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FilterFittingInside for WaveEvents<NTY> {
|
||||
fn filter_fitting_inside(self, fit_range: NanoRange) -> Option<Self> {
|
||||
match self.fits_inside(fit_range) {
|
||||
Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> PushableIndex for WaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
self.tss.push(src.tss[ix]);
|
||||
// TODO trait should allow to move from source.
|
||||
self.vals.push(src.vals[ix].clone());
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> NewEmpty for WaveEvents<NTY> {
|
||||
fn empty(_shape: Shape) -> Self {
|
||||
Self {
|
||||
tss: Vec::new(),
|
||||
pulses: Vec::new(),
|
||||
vals: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Appendable for WaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn empty_like_self(&self) -> Self {
|
||||
Self::empty()
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
self.tss.extend_from_slice(&src.tss);
|
||||
self.vals.extend_from_slice(&src.vals);
|
||||
}
|
||||
|
||||
fn append_zero(&mut self, ts1: u64, _ts2: u64) {
|
||||
self.tss.push(ts1);
|
||||
self.pulses.push(0);
|
||||
self.vals.push(Vec::new());
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Clearable for WaveEvents<NTY> {
|
||||
fn clear(&mut self) {
|
||||
self.tss.clear();
|
||||
self.vals.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ReadableFromFile for WaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn read_from_file(_file: File) -> Result<ReadPbv<Self>, Error> {
|
||||
// TODO refactor types such that this impl is not needed.
|
||||
panic!()
|
||||
}
|
||||
|
||||
fn from_buf(_buf: &[u8]) -> Result<Self, Error> {
|
||||
panic!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableType for WaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Output = MinMaxAvgDim1Bins<NTY>;
|
||||
type Aggregator = WaveEventsAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator {
|
||||
debug!(
|
||||
"TimeBinnableType for WaveEvents aggregator() range {:?} x_bin_count {} do_time_weight {}",
|
||||
range, x_bin_count, do_time_weight
|
||||
);
|
||||
Self::Aggregator::new(range, x_bin_count, do_time_weight)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WaveEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
range: NanoRange,
|
||||
count: u64,
|
||||
min: Option<Vec<NTY>>,
|
||||
max: Option<Vec<NTY>>,
|
||||
sumc: u64,
|
||||
sum: Option<Vec<f32>>,
|
||||
}
|
||||
|
||||
impl<NTY> WaveEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
pub fn new(range: NanoRange, _x_bin_count: usize, do_time_weight: bool) -> Self {
|
||||
if do_time_weight {
|
||||
err::todo();
|
||||
}
|
||||
Self {
|
||||
range,
|
||||
count: 0,
|
||||
// TODO create the right number of bins right here:
|
||||
min: err::todoval(),
|
||||
max: None,
|
||||
sumc: 0,
|
||||
sum: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableTypeAggregator for WaveEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = WaveEvents<NTY>;
|
||||
type Output = MinMaxAvgDim1Bins<NTY>;
|
||||
|
||||
fn range(&self) -> &NanoRange {
|
||||
&self.range
|
||||
}
|
||||
|
||||
fn ingest(&mut self, item: &Self::Input) {
|
||||
error!("time-weighted binning not available");
|
||||
err::todo();
|
||||
for i1 in 0..item.tss.len() {
|
||||
let ts = item.tss[i1];
|
||||
if ts < self.range.beg {
|
||||
continue;
|
||||
} else if ts >= self.range.end {
|
||||
continue;
|
||||
} else {
|
||||
match &mut self.min {
|
||||
None => self.min = Some(item.vals[i1].clone()),
|
||||
Some(min) => {
|
||||
for (a, b) in min.iter_mut().zip(item.vals[i1].iter()) {
|
||||
if b < a {
|
||||
*a = b.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
match &mut self.max {
|
||||
None => self.max = Some(item.vals[i1].clone()),
|
||||
Some(max) => {
|
||||
for (a, b) in max.iter_mut().zip(item.vals[i1].iter()) {
|
||||
if b < a {
|
||||
*a = b.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
match self.sum.as_mut() {
|
||||
None => {
|
||||
self.sum = Some(item.vals[i1].iter().map(|k| k.as_prim_f32()).collect());
|
||||
}
|
||||
Some(sum) => {
|
||||
for (a, b) in sum.iter_mut().zip(item.vals[i1].iter()) {
|
||||
let vf = b.as_prim_f32();
|
||||
if vf.is_nan() {
|
||||
} else {
|
||||
*a += vf;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
self.sumc += 1;
|
||||
self.count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset(&mut self, range: NanoRange, _expand: bool) -> Self::Output {
|
||||
let avg = if self.sumc == 0 {
|
||||
None
|
||||
} else {
|
||||
let avg = self
|
||||
.sum
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|item| item / self.sumc as f32)
|
||||
.collect();
|
||||
Some(avg)
|
||||
};
|
||||
let ret = Self::Output {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
// TODO replace with reset-value instead.
|
||||
mins: vec![self.min.clone()],
|
||||
maxs: vec![self.max.clone()],
|
||||
avgs: vec![avg],
|
||||
};
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.min = None;
|
||||
self.max = None;
|
||||
self.sum = None;
|
||||
self.sumc = 0;
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> EventAppendable for WaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Value = Vec<NTY>;
|
||||
|
||||
fn append_event(ret: Option<Self>, ts: u64, pulse: u64, value: Self::Value) -> Self {
|
||||
let mut ret = if let Some(ret) = ret { ret } else { Self::empty() };
|
||||
ret.push(ts, pulse, value);
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WaveXBinner<NTY> {
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> EventsNodeProcessor for WaveXBinner<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = WaveEvents<NTY>;
|
||||
type Output = XBinnedScalarEvents<NTY>;
|
||||
|
||||
fn create(_shape: Shape, _agg_kind: AggKind) -> Self {
|
||||
Self { _m1: PhantomData }
|
||||
}
|
||||
|
||||
fn process(&self, inp: Self::Input) -> Self::Output {
|
||||
let nev = inp.tss.len();
|
||||
let mut ret = Self::Output {
|
||||
tss: inp.tss,
|
||||
mins: Vec::with_capacity(nev),
|
||||
maxs: Vec::with_capacity(nev),
|
||||
avgs: Vec::with_capacity(nev),
|
||||
};
|
||||
for i1 in 0..nev {
|
||||
let mut min = NTY::max_or_nan();
|
||||
let mut max = NTY::min_or_nan();
|
||||
let mut sum = 0f32;
|
||||
let mut sumc = 0;
|
||||
let vals = &inp.vals[i1];
|
||||
for v in vals.iter() {
|
||||
if v < &min || min.is_nan() {
|
||||
min = v.clone();
|
||||
}
|
||||
if v > &max || max.is_nan() {
|
||||
max = v.clone();
|
||||
}
|
||||
let vf = v.as_prim_f32();
|
||||
if vf.is_nan() {
|
||||
} else {
|
||||
sum += vf;
|
||||
sumc += 1;
|
||||
}
|
||||
}
|
||||
ret.mins.push(min);
|
||||
ret.maxs.push(max);
|
||||
if sumc == 0 {
|
||||
ret.avgs.push(f32::NAN);
|
||||
} else {
|
||||
ret.avgs.push(sum / sumc as f32);
|
||||
}
|
||||
}
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WaveNBinner<NTY> {
|
||||
shape_bin_count: usize,
|
||||
x_bin_count: usize,
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> EventsNodeProcessor for WaveNBinner<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = WaveEvents<NTY>;
|
||||
type Output = XBinnedWaveEvents<NTY>;
|
||||
|
||||
fn create(shape: Shape, agg_kind: AggKind) -> Self {
|
||||
// TODO get rid of panic potential
|
||||
let shape_bin_count = if let Shape::Wave(n) = shape { n } else { panic!() } as usize;
|
||||
let x_bin_count = x_bin_count(&shape, &agg_kind);
|
||||
Self {
|
||||
shape_bin_count,
|
||||
x_bin_count,
|
||||
_m1: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn process(&self, inp: Self::Input) -> Self::Output {
|
||||
let nev = inp.tss.len();
|
||||
let mut ret = Self::Output {
|
||||
// TODO get rid of this clone:
|
||||
tss: inp.tss.clone(),
|
||||
mins: Vec::with_capacity(nev),
|
||||
maxs: Vec::with_capacity(nev),
|
||||
avgs: Vec::with_capacity(nev),
|
||||
};
|
||||
for i1 in 0..nev {
|
||||
let mut min = vec![NTY::max_or_nan(); self.x_bin_count];
|
||||
let mut max = vec![NTY::min_or_nan(); self.x_bin_count];
|
||||
let mut sum = vec![0f32; self.x_bin_count];
|
||||
let mut sumc = vec![0u64; self.x_bin_count];
|
||||
for (i2, v) in inp.vals[i1].iter().enumerate() {
|
||||
let i3 = i2 * self.x_bin_count / self.shape_bin_count;
|
||||
if v < &min[i3] || min[i3].is_nan() {
|
||||
min[i3] = v.clone();
|
||||
}
|
||||
if v > &max[i3] || max[i3].is_nan() {
|
||||
max[i3] = v.clone();
|
||||
}
|
||||
if v.is_nan() {
|
||||
} else {
|
||||
sum[i3] += v.as_prim_f32();
|
||||
sumc[i3] += 1;
|
||||
}
|
||||
}
|
||||
// TODO
|
||||
if false && inp.tss[0] < 1300 {
|
||||
info!("WaveNBinner process push min {:?}", min);
|
||||
}
|
||||
ret.mins.push(min);
|
||||
ret.maxs.push(max);
|
||||
let avg = sum
|
||||
.into_iter()
|
||||
.zip(sumc.into_iter())
|
||||
.map(|(j, k)| if k > 0 { j / k as f32 } else { f32::NAN })
|
||||
.collect();
|
||||
ret.avgs.push(avg);
|
||||
}
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WavePlainProc<NTY> {
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
// TODO purpose?
|
||||
impl<NTY> EventsNodeProcessor for WavePlainProc<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = WaveEvents<NTY>;
|
||||
type Output = WaveEvents<NTY>;
|
||||
|
||||
fn create(_shape: Shape, _agg_kind: AggKind) -> Self {
|
||||
Self { _m1: PhantomData }
|
||||
}
|
||||
|
||||
fn process(&self, inp: Self::Input) -> Self::Output {
|
||||
if false {
|
||||
let n = if inp.vals.len() > 0 { inp.vals[0].len() } else { 0 };
|
||||
let n = if n > 5 { 5 } else { n };
|
||||
WaveEvents {
|
||||
tss: inp.tss,
|
||||
pulses: inp.pulses,
|
||||
vals: inp.vals.iter().map(|k| k[..n].to_vec()).collect(),
|
||||
}
|
||||
} else {
|
||||
WaveEvents {
|
||||
tss: inp.tss,
|
||||
pulses: inp.pulses,
|
||||
vals: inp.vals,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY: NumOps> crate::TimeBinnableDynStub for WaveEvents<NTY> {}
|
||||
|
||||
impl<NTY: NumOps> EventsDyn for WaveEvents<NTY> {
|
||||
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn {
|
||||
self as &dyn TimeBinnableDyn
|
||||
}
|
||||
|
||||
fn verify(&self) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn output_info(&self) {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> EventsNodeProcessorOutput for WaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn into_parts(self) -> (Box<dyn Any>, VecDeque<u64>, VecDeque<u64>) {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -1,520 +0,0 @@
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
use crate::binsdim0::MinMaxAvgDim0Bins;
|
||||
use crate::numops::NumOps;
|
||||
use crate::streams::{Collectable, Collector};
|
||||
use crate::{
|
||||
ts_offs_from_abs, Appendable, ByteEstimate, Clearable, EventsNodeProcessorOutput, FilterFittingInside, Fits,
|
||||
FitsInside, FrameType, FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile,
|
||||
TimeBinnableType, TimeBinnableTypeAggregator, WithLen, WithTimestamps,
|
||||
};
|
||||
use err::Error;
|
||||
use items_0::subfr::SubFrId;
|
||||
use netpod::log::*;
|
||||
use netpod::{NanoRange, Shape};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::fs::File;
|
||||
|
||||
// TODO in this module reduce clones
|
||||
|
||||
// TODO rename Scalar -> Dim0
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct XBinnedScalarEvents<NTY> {
|
||||
pub tss: Vec<u64>,
|
||||
pub mins: Vec<NTY>,
|
||||
pub maxs: Vec<NTY>,
|
||||
pub avgs: Vec<f32>,
|
||||
}
|
||||
|
||||
impl<NTY> FrameTypeInnerStatic for XBinnedScalarEvents<NTY>
|
||||
where
|
||||
NTY: SubFrId,
|
||||
{
|
||||
const FRAME_TYPE_ID: u32 = crate::X_BINNED_SCALAR_EVENTS_FRAME_TYPE_ID + NTY::SUB;
|
||||
}
|
||||
|
||||
impl<NTY> FrameType for XBinnedScalarEvents<NTY>
|
||||
where
|
||||
NTY: SubFrId,
|
||||
{
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> XBinnedScalarEvents<NTY> {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
tss: Vec::new(),
|
||||
mins: Vec::new(),
|
||||
maxs: Vec::new(),
|
||||
avgs: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for XBinnedScalarEvents<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithTimestamps for XBinnedScalarEvents<NTY> {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
self.tss[ix]
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ByteEstimate for XBinnedScalarEvents<NTY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
if self.tss.len() == 0 {
|
||||
0
|
||||
} else {
|
||||
// TODO improve via a const fn on NTY
|
||||
self.tss.len() as u64 * 28
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> RangeOverlapInfo for XBinnedScalarEvents<NTY> {
|
||||
fn ends_before(&self, range: NanoRange) -> bool {
|
||||
match self.tss.last() {
|
||||
Some(&ts) => ts < range.beg,
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn ends_after(&self, range: NanoRange) -> bool {
|
||||
match self.tss.last() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn starts_after(&self, range: NanoRange) -> bool {
|
||||
match self.tss.first() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FitsInside for XBinnedScalarEvents<NTY> {
|
||||
fn fits_inside(&self, range: NanoRange) -> Fits {
|
||||
if self.tss.is_empty() {
|
||||
Fits::Empty
|
||||
} else {
|
||||
let t1 = *self.tss.first().unwrap();
|
||||
let t2 = *self.tss.last().unwrap();
|
||||
if t2 < range.beg {
|
||||
Fits::Lower
|
||||
} else if t1 > range.end {
|
||||
Fits::Greater
|
||||
} else if t1 < range.beg && t2 > range.end {
|
||||
Fits::PartlyLowerAndGreater
|
||||
} else if t1 < range.beg {
|
||||
Fits::PartlyLower
|
||||
} else if t2 > range.end {
|
||||
Fits::PartlyGreater
|
||||
} else {
|
||||
Fits::Inside
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FilterFittingInside for XBinnedScalarEvents<NTY> {
|
||||
fn filter_fitting_inside(self, fit_range: NanoRange) -> Option<Self> {
|
||||
match self.fits_inside(fit_range) {
|
||||
Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> PushableIndex for XBinnedScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
self.tss.push(src.tss[ix]);
|
||||
self.mins.push(src.mins[ix].clone());
|
||||
self.maxs.push(src.maxs[ix].clone());
|
||||
self.avgs.push(src.avgs[ix]);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> NewEmpty for XBinnedScalarEvents<NTY> {
|
||||
fn empty(_shape: Shape) -> Self {
|
||||
Self {
|
||||
tss: Vec::new(),
|
||||
avgs: Vec::new(),
|
||||
mins: Vec::new(),
|
||||
maxs: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Appendable for XBinnedScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn empty_like_self(&self) -> Self {
|
||||
Self::empty()
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
self.tss.extend_from_slice(&src.tss);
|
||||
self.mins.extend_from_slice(&src.mins);
|
||||
self.maxs.extend_from_slice(&src.maxs);
|
||||
self.avgs.extend_from_slice(&src.avgs);
|
||||
}
|
||||
|
||||
fn append_zero(&mut self, ts1: u64, _ts2: u64) {
|
||||
self.tss.push(ts1);
|
||||
self.mins.push(NTY::zero());
|
||||
self.maxs.push(NTY::zero());
|
||||
self.avgs.push(0.);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Clearable for XBinnedScalarEvents<NTY> {
|
||||
fn clear(&mut self) {
|
||||
self.tss.clear();
|
||||
self.avgs.clear();
|
||||
self.mins.clear();
|
||||
self.maxs.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ReadableFromFile for XBinnedScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn read_from_file(_file: File) -> Result<ReadPbv<Self>, Error> {
|
||||
// TODO refactor types such that this impl is not needed.
|
||||
panic!()
|
||||
}
|
||||
|
||||
fn from_buf(_buf: &[u8]) -> Result<Self, Error> {
|
||||
panic!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableType for XBinnedScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Output = MinMaxAvgDim0Bins<NTY>;
|
||||
type Aggregator = XBinnedScalarEventsAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator {
|
||||
debug!(
|
||||
"TimeBinnableType for XBinnedScalarEvents aggregator() range {:?} x_bin_count {} do_time_weight {}",
|
||||
range, x_bin_count, do_time_weight
|
||||
);
|
||||
Self::Aggregator::new(range, do_time_weight)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct XBinnedScalarEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
range: NanoRange,
|
||||
count: u64,
|
||||
min: NTY,
|
||||
max: NTY,
|
||||
sumc: u64,
|
||||
sum: f32,
|
||||
int_ts: u64,
|
||||
last_ts: u64,
|
||||
last_avg: Option<f32>,
|
||||
last_min: Option<NTY>,
|
||||
last_max: Option<NTY>,
|
||||
do_time_weight: bool,
|
||||
}
|
||||
|
||||
impl<NTY> XBinnedScalarEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
pub fn new(range: NanoRange, do_time_weight: bool) -> Self {
|
||||
Self {
|
||||
int_ts: range.beg,
|
||||
range,
|
||||
count: 0,
|
||||
min: NTY::zero(),
|
||||
max: NTY::zero(),
|
||||
sumc: 0,
|
||||
sum: 0f32,
|
||||
last_ts: 0,
|
||||
last_avg: None,
|
||||
last_min: None,
|
||||
last_max: None,
|
||||
do_time_weight,
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_min_max(&mut self, min: NTY, max: NTY) {
|
||||
if self.count == 0 {
|
||||
self.min = min;
|
||||
self.max = max;
|
||||
} else {
|
||||
if min < self.min {
|
||||
self.min = min;
|
||||
}
|
||||
if max > self.max {
|
||||
self.max = max;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_event_unweight(&mut self, avg: f32, min: NTY, max: NTY) {
|
||||
//debug!("apply_event_unweight");
|
||||
self.apply_min_max(min, max);
|
||||
let vf = avg;
|
||||
if vf.is_nan() {
|
||||
} else {
|
||||
self.sum += vf;
|
||||
self.sumc += 1;
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_event_time_weight(&mut self, ts: u64) {
|
||||
//debug!("apply_event_time_weight");
|
||||
if let (Some(avg), Some(min), Some(max)) = (self.last_avg, &self.last_min, &self.last_max) {
|
||||
let min2 = min.clone();
|
||||
let max2 = max.clone();
|
||||
self.apply_min_max(min2, max2);
|
||||
let w = (ts - self.int_ts) as f32 / self.range.delta() as f32;
|
||||
if avg.is_nan() {
|
||||
} else {
|
||||
self.sum += avg * w;
|
||||
}
|
||||
self.sumc += 1;
|
||||
self.int_ts = ts;
|
||||
}
|
||||
}
|
||||
|
||||
fn ingest_unweight(&mut self, item: &XBinnedScalarEvents<NTY>) {
|
||||
for i1 in 0..item.tss.len() {
|
||||
let ts = item.tss[i1];
|
||||
let avg = item.avgs[i1];
|
||||
let min = item.mins[i1].clone();
|
||||
let max = item.maxs[i1].clone();
|
||||
if ts < self.range.beg {
|
||||
} else if ts >= self.range.end {
|
||||
} else {
|
||||
self.apply_event_unweight(avg, min, max);
|
||||
self.count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn ingest_time_weight(&mut self, item: &XBinnedScalarEvents<NTY>) {
|
||||
for i1 in 0..item.tss.len() {
|
||||
let ts = item.tss[i1];
|
||||
let avg = item.avgs[i1];
|
||||
let min = item.mins[i1].clone();
|
||||
let max = item.maxs[i1].clone();
|
||||
if ts < self.int_ts {
|
||||
self.last_ts = ts;
|
||||
self.last_avg = Some(avg);
|
||||
self.last_min = Some(min);
|
||||
self.last_max = Some(max);
|
||||
} else if ts >= self.range.end {
|
||||
return;
|
||||
} else {
|
||||
self.apply_event_time_weight(ts);
|
||||
self.count += 1;
|
||||
self.last_ts = ts;
|
||||
self.last_avg = Some(avg);
|
||||
self.last_min = Some(min);
|
||||
self.last_max = Some(max);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset_unweight(&mut self, range: NanoRange, _expand: bool) -> MinMaxAvgDim0Bins<NTY> {
|
||||
let avg = if self.sumc == 0 {
|
||||
0f32
|
||||
} else {
|
||||
self.sum / self.sumc as f32
|
||||
};
|
||||
let ret = MinMaxAvgDim0Bins {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![self.min.clone()],
|
||||
maxs: vec![self.max.clone()],
|
||||
avgs: vec![avg],
|
||||
};
|
||||
self.int_ts = range.beg;
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.min = NTY::zero();
|
||||
self.max = NTY::zero();
|
||||
self.sum = 0f32;
|
||||
self.sumc = 0;
|
||||
ret
|
||||
}
|
||||
|
||||
fn result_reset_time_weight(&mut self, range: NanoRange, expand: bool) -> MinMaxAvgDim0Bins<NTY> {
|
||||
// TODO check callsite for correct expand status.
|
||||
if true || expand {
|
||||
self.apply_event_time_weight(self.range.end);
|
||||
}
|
||||
let avg = {
|
||||
let sc = self.range.delta() as f32 * 1e-9;
|
||||
self.sum / sc
|
||||
};
|
||||
let ret = MinMaxAvgDim0Bins {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![self.min.clone()],
|
||||
maxs: vec![self.max.clone()],
|
||||
avgs: vec![avg],
|
||||
};
|
||||
self.int_ts = range.beg;
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.min = NTY::zero();
|
||||
self.max = NTY::zero();
|
||||
self.sum = 0f32;
|
||||
self.sumc = 0;
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableTypeAggregator for XBinnedScalarEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = XBinnedScalarEvents<NTY>;
|
||||
type Output = MinMaxAvgDim0Bins<NTY>;
|
||||
|
||||
fn range(&self) -> &NanoRange {
|
||||
&self.range
|
||||
}
|
||||
|
||||
fn ingest(&mut self, item: &Self::Input) {
|
||||
debug!("ingest");
|
||||
if self.do_time_weight {
|
||||
self.ingest_time_weight(item)
|
||||
} else {
|
||||
self.ingest_unweight(item)
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset(&mut self, range: NanoRange, expand: bool) -> Self::Output {
|
||||
if self.do_time_weight {
|
||||
self.result_reset_time_weight(range, expand)
|
||||
} else {
|
||||
self.result_reset_unweight(range, expand)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct XBinnedScalarEventsCollectedResult<NTY> {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "tsMs")]
|
||||
ts_off_ms: Vec<u64>,
|
||||
#[serde(rename = "tsNs")]
|
||||
ts_off_ns: Vec<u64>,
|
||||
mins: Vec<NTY>,
|
||||
maxs: Vec<NTY>,
|
||||
avgs: Vec<f32>,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "rangeFinal")]
|
||||
finalised_range: bool,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "timedOut")]
|
||||
timed_out: bool,
|
||||
}
|
||||
|
||||
pub struct XBinnedScalarEventsCollector<NTY> {
|
||||
vals: XBinnedScalarEvents<NTY>,
|
||||
finalised_range: bool,
|
||||
timed_out: bool,
|
||||
#[allow(dead_code)]
|
||||
bin_count_exp: u32,
|
||||
}
|
||||
|
||||
impl<NTY> XBinnedScalarEventsCollector<NTY> {
|
||||
pub fn new(bin_count_exp: u32) -> Self {
|
||||
Self {
|
||||
finalised_range: false,
|
||||
timed_out: false,
|
||||
vals: XBinnedScalarEvents::empty(),
|
||||
bin_count_exp,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for XBinnedScalarEventsCollector<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.vals.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collector for XBinnedScalarEventsCollector<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = XBinnedScalarEvents<NTY>;
|
||||
type Output = XBinnedScalarEventsCollectedResult<NTY>;
|
||||
|
||||
fn ingest(&mut self, src: &Self::Input) {
|
||||
self.vals.append(src);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.finalised_range = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn result(self) -> Result<Self::Output, Error> {
|
||||
let tst = ts_offs_from_abs(&self.vals.tss);
|
||||
let ret = Self::Output {
|
||||
ts_anchor_sec: tst.0,
|
||||
ts_off_ms: tst.1,
|
||||
ts_off_ns: tst.2,
|
||||
mins: self.vals.mins,
|
||||
maxs: self.vals.maxs,
|
||||
avgs: self.vals.avgs,
|
||||
finalised_range: self.finalised_range,
|
||||
timed_out: self.timed_out,
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collectable for XBinnedScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Collector = XBinnedScalarEventsCollector<NTY>;
|
||||
|
||||
fn new_collector(bin_count_exp: u32) -> Self::Collector {
|
||||
Self::Collector::new(bin_count_exp)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> EventsNodeProcessorOutput for XBinnedScalarEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn into_parts(self) -> (Box<dyn Any>, VecDeque<u64>, VecDeque<u64>) {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -1,550 +0,0 @@
|
||||
use crate::binsdim1::MinMaxAvgDim1Bins;
|
||||
use crate::numops::NumOps;
|
||||
use crate::streams::{Collectable, Collector};
|
||||
use crate::{
|
||||
Appendable, ByteEstimate, Clearable, EventsNodeProcessorOutput, FilterFittingInside, Fits, FitsInside, FrameType,
|
||||
FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, TimeBinnableType,
|
||||
TimeBinnableTypeAggregator, WithLen, WithTimestamps,
|
||||
};
|
||||
use err::Error;
|
||||
use items_0::subfr::SubFrId;
|
||||
use netpod::log::*;
|
||||
use netpod::timeunits::*;
|
||||
use netpod::{NanoRange, Shape};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::mem;
|
||||
use tokio::fs::File;
|
||||
|
||||
// TODO rename Wave -> Dim1
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct XBinnedWaveEvents<NTY> {
|
||||
pub tss: Vec<u64>,
|
||||
pub mins: Vec<Vec<NTY>>,
|
||||
pub maxs: Vec<Vec<NTY>>,
|
||||
pub avgs: Vec<Vec<f32>>,
|
||||
}
|
||||
|
||||
impl<NTY> FrameTypeInnerStatic for XBinnedWaveEvents<NTY>
|
||||
where
|
||||
NTY: SubFrId,
|
||||
{
|
||||
const FRAME_TYPE_ID: u32 = crate::X_BINNED_WAVE_EVENTS_FRAME_TYPE_ID + NTY::SUB;
|
||||
}
|
||||
|
||||
impl<NTY> FrameType for XBinnedWaveEvents<NTY>
|
||||
where
|
||||
NTY: SubFrId,
|
||||
{
|
||||
fn frame_type_id(&self) -> u32 {
|
||||
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> XBinnedWaveEvents<NTY> {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
tss: vec![],
|
||||
mins: vec![],
|
||||
maxs: vec![],
|
||||
avgs: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for XBinnedWaveEvents<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithTimestamps for XBinnedWaveEvents<NTY> {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
self.tss[ix]
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ByteEstimate for XBinnedWaveEvents<NTY> {
|
||||
fn byte_estimate(&self) -> u64 {
|
||||
if self.tss.len() == 0 {
|
||||
0
|
||||
} else {
|
||||
// TODO improve via a const fn on NTY
|
||||
self.tss.len() as u64 * 20 * self.avgs[0].len() as u64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> RangeOverlapInfo for XBinnedWaveEvents<NTY> {
|
||||
fn ends_before(&self, range: NanoRange) -> bool {
|
||||
match self.tss.last() {
|
||||
Some(&ts) => ts < range.beg,
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn ends_after(&self, range: NanoRange) -> bool {
|
||||
match self.tss.last() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn starts_after(&self, range: NanoRange) -> bool {
|
||||
match self.tss.first() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FitsInside for XBinnedWaveEvents<NTY> {
|
||||
fn fits_inside(&self, range: NanoRange) -> Fits {
|
||||
if self.tss.is_empty() {
|
||||
Fits::Empty
|
||||
} else {
|
||||
let t1 = *self.tss.first().unwrap();
|
||||
let t2 = *self.tss.last().unwrap();
|
||||
if t2 < range.beg {
|
||||
Fits::Lower
|
||||
} else if t1 > range.end {
|
||||
Fits::Greater
|
||||
} else if t1 < range.beg && t2 > range.end {
|
||||
Fits::PartlyLowerAndGreater
|
||||
} else if t1 < range.beg {
|
||||
Fits::PartlyLower
|
||||
} else if t2 > range.end {
|
||||
Fits::PartlyGreater
|
||||
} else {
|
||||
Fits::Inside
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FilterFittingInside for XBinnedWaveEvents<NTY> {
|
||||
fn filter_fitting_inside(self, fit_range: NanoRange) -> Option<Self> {
|
||||
match self.fits_inside(fit_range) {
|
||||
Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> PushableIndex for XBinnedWaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
self.tss.push(src.tss[ix]);
|
||||
// TODO not nice.
|
||||
self.mins.push(src.mins[ix].clone());
|
||||
self.maxs.push(src.maxs[ix].clone());
|
||||
self.avgs.push(src.avgs[ix].clone());
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> NewEmpty for XBinnedWaveEvents<NTY> {
|
||||
fn empty(_shape: Shape) -> Self {
|
||||
Self {
|
||||
tss: Vec::new(),
|
||||
avgs: Vec::new(),
|
||||
mins: Vec::new(),
|
||||
maxs: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Appendable for XBinnedWaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn empty_like_self(&self) -> Self {
|
||||
Self::empty()
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
self.tss.extend_from_slice(&src.tss);
|
||||
self.mins.extend_from_slice(&src.mins);
|
||||
self.maxs.extend_from_slice(&src.maxs);
|
||||
self.avgs.extend_from_slice(&src.avgs);
|
||||
}
|
||||
|
||||
fn append_zero(&mut self, ts1: u64, _ts2: u64) {
|
||||
self.tss.push(ts1);
|
||||
self.mins.push(Vec::new());
|
||||
self.maxs.push(Vec::new());
|
||||
self.avgs.push(Vec::new());
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Clearable for XBinnedWaveEvents<NTY> {
|
||||
fn clear(&mut self) {
|
||||
self.tss.clear();
|
||||
self.mins.clear();
|
||||
self.maxs.clear();
|
||||
self.avgs.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ReadableFromFile for XBinnedWaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn read_from_file(_file: File) -> Result<ReadPbv<Self>, Error> {
|
||||
// TODO refactor types such that this impl is not needed.
|
||||
panic!()
|
||||
}
|
||||
|
||||
fn from_buf(_buf: &[u8]) -> Result<Self, Error> {
|
||||
panic!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableType for XBinnedWaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Output = MinMaxAvgDim1Bins<NTY>;
|
||||
type Aggregator = XBinnedWaveEventsAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self::Aggregator {
|
||||
debug!(
|
||||
"TimeBinnableType for XBinnedWaveEvents aggregator() range {:?} x_bin_count {} do_time_weight {}",
|
||||
range, x_bin_count, do_time_weight
|
||||
);
|
||||
Self::Aggregator::new(range, x_bin_count, do_time_weight)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct XBinnedWaveEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
range: NanoRange,
|
||||
count: u64,
|
||||
min: Option<Vec<NTY>>,
|
||||
max: Option<Vec<NTY>>,
|
||||
sumc: u64,
|
||||
sum: Vec<f32>,
|
||||
int_ts: u64,
|
||||
last_ts: u64,
|
||||
last_avg: Option<Vec<f32>>,
|
||||
last_min: Option<Vec<NTY>>,
|
||||
last_max: Option<Vec<NTY>>,
|
||||
do_time_weight: bool,
|
||||
}
|
||||
|
||||
impl<NTY> XBinnedWaveEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
pub fn new(range: NanoRange, x_bin_count: usize, do_time_weight: bool) -> Self {
|
||||
Self {
|
||||
int_ts: range.beg,
|
||||
range,
|
||||
count: 0,
|
||||
min: None,
|
||||
max: None,
|
||||
sumc: 0,
|
||||
sum: vec![0f32; x_bin_count],
|
||||
last_ts: 0,
|
||||
last_avg: None,
|
||||
last_min: None,
|
||||
last_max: None,
|
||||
do_time_weight,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO get rid of clones.
|
||||
fn apply_min_max(&mut self, min: &Vec<NTY>, max: &Vec<NTY>) {
|
||||
self.min = match self.min.take() {
|
||||
None => Some(min.clone()),
|
||||
Some(cmin) => {
|
||||
let a = cmin
|
||||
.into_iter()
|
||||
.zip(min)
|
||||
.map(|(a, b)| if a < *b { a } else { b.clone() })
|
||||
.collect();
|
||||
Some(a)
|
||||
}
|
||||
};
|
||||
self.max = match self.max.take() {
|
||||
None => Some(max.clone()),
|
||||
Some(cmax) => {
|
||||
let a = cmax
|
||||
.into_iter()
|
||||
.zip(min)
|
||||
.map(|(a, b)| if a > *b { a } else { b.clone() })
|
||||
.collect();
|
||||
Some(a)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn apply_event_unweight(&mut self, avg: &Vec<f32>, min: &Vec<NTY>, max: &Vec<NTY>) {
|
||||
//debug!("apply_event_unweight");
|
||||
self.apply_min_max(&min, &max);
|
||||
let sum = mem::replace(&mut self.sum, vec![]);
|
||||
self.sum = sum
|
||||
.into_iter()
|
||||
.zip(avg)
|
||||
.map(|(a, &b)| if b.is_nan() { a } else { a + b })
|
||||
.collect();
|
||||
self.sumc += 1;
|
||||
}
|
||||
|
||||
fn apply_event_time_weight(&mut self, ts: u64) {
|
||||
//debug!("apply_event_time_weight");
|
||||
if let (Some(avg), Some(min), Some(max)) = (self.last_avg.take(), self.last_min.take(), self.last_max.take()) {
|
||||
self.apply_min_max(&min, &max);
|
||||
let w = (ts - self.int_ts) as f32 / self.range.delta() as f32;
|
||||
let sum = mem::replace(&mut self.sum, vec![]);
|
||||
self.sum = sum
|
||||
.into_iter()
|
||||
.zip(&avg)
|
||||
.map(|(a, &b)| if b.is_nan() { a } else { a + b * w })
|
||||
.collect();
|
||||
self.sumc += 1;
|
||||
self.int_ts = ts;
|
||||
self.last_avg = Some(avg);
|
||||
self.last_min = Some(min);
|
||||
self.last_max = Some(max);
|
||||
}
|
||||
}
|
||||
|
||||
fn ingest_unweight(&mut self, item: &XBinnedWaveEvents<NTY>) {
|
||||
for i1 in 0..item.tss.len() {
|
||||
let ts = item.tss[i1];
|
||||
let avg = &item.avgs[i1];
|
||||
let min = &item.mins[i1];
|
||||
let max = &item.maxs[i1];
|
||||
if ts < self.range.beg {
|
||||
} else if ts >= self.range.end {
|
||||
} else {
|
||||
self.apply_event_unweight(avg, min, max);
|
||||
self.count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn ingest_time_weight(&mut self, item: &XBinnedWaveEvents<NTY>) {
|
||||
for i1 in 0..item.tss.len() {
|
||||
let ts = item.tss[i1];
|
||||
let avg = &item.avgs[i1];
|
||||
let min = &item.mins[i1];
|
||||
let max = &item.maxs[i1];
|
||||
if ts < self.int_ts {
|
||||
self.last_ts = ts;
|
||||
self.last_avg = Some(avg.clone());
|
||||
self.last_min = Some(min.clone());
|
||||
self.last_max = Some(max.clone());
|
||||
} else if ts >= self.range.end {
|
||||
return;
|
||||
} else {
|
||||
self.apply_event_time_weight(ts);
|
||||
self.count += 1;
|
||||
self.last_ts = ts;
|
||||
self.last_avg = Some(avg.clone());
|
||||
self.last_min = Some(min.clone());
|
||||
self.last_max = Some(max.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset_unweight(&mut self, range: NanoRange, _expand: bool) -> MinMaxAvgDim1Bins<NTY> {
|
||||
let avg = if self.sumc == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(self.sum.iter().map(|k| *k / self.sumc as f32).collect())
|
||||
};
|
||||
let min = mem::replace(&mut self.min, None);
|
||||
let max = mem::replace(&mut self.max, None);
|
||||
let ret = MinMaxAvgDim1Bins {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![min],
|
||||
maxs: vec![max],
|
||||
avgs: vec![avg],
|
||||
};
|
||||
self.int_ts = range.beg;
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.min = None;
|
||||
self.max = None;
|
||||
self.sumc = 0;
|
||||
self.sum = vec![0f32; ret.avgs.len()];
|
||||
ret
|
||||
}
|
||||
|
||||
fn result_reset_time_weight(&mut self, range: NanoRange, expand: bool) -> MinMaxAvgDim1Bins<NTY> {
|
||||
// TODO check callsite for correct expand status.
|
||||
if true || expand {
|
||||
self.apply_event_time_weight(self.range.end);
|
||||
}
|
||||
let avg = if self.sumc == 0 {
|
||||
None
|
||||
} else {
|
||||
let n = self.sum.len();
|
||||
Some(mem::replace(&mut self.sum, vec![0f32; n]))
|
||||
};
|
||||
let min = mem::replace(&mut self.min, None);
|
||||
let max = mem::replace(&mut self.max, None);
|
||||
let ret = MinMaxAvgDim1Bins {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![min],
|
||||
maxs: vec![max],
|
||||
avgs: vec![avg],
|
||||
};
|
||||
self.int_ts = range.beg;
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
//self.min = None;
|
||||
//self.max = None;
|
||||
//self.sum = vec![0f32; ret.avgs.len()];
|
||||
self.sumc = 0;
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableTypeAggregator for XBinnedWaveEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = XBinnedWaveEvents<NTY>;
|
||||
type Output = MinMaxAvgDim1Bins<NTY>;
|
||||
|
||||
fn range(&self) -> &NanoRange {
|
||||
&self.range
|
||||
}
|
||||
|
||||
fn ingest(&mut self, item: &Self::Input) {
|
||||
if self.do_time_weight {
|
||||
self.ingest_time_weight(item)
|
||||
} else {
|
||||
self.ingest_unweight(item)
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset(&mut self, range: NanoRange, expand: bool) -> Self::Output {
|
||||
if self.do_time_weight {
|
||||
self.result_reset_time_weight(range, expand)
|
||||
} else {
|
||||
self.result_reset_unweight(range, expand)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct XBinnedWaveEventsCollectedResult<NTY> {
|
||||
#[serde(rename = "tsAnchor")]
|
||||
ts_anchor_sec: u64,
|
||||
#[serde(rename = "tsMs")]
|
||||
ts_off_ms: Vec<u64>,
|
||||
#[serde(rename = "tsNs")]
|
||||
ts_off_ns: Vec<u64>,
|
||||
mins: Vec<Vec<NTY>>,
|
||||
maxs: Vec<Vec<NTY>>,
|
||||
avgs: Vec<Vec<f32>>,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "rangeFinal")]
|
||||
finalised_range: bool,
|
||||
#[serde(skip_serializing_if = "crate::bool_is_false", rename = "timedOut")]
|
||||
timed_out: bool,
|
||||
}
|
||||
|
||||
pub struct XBinnedWaveEventsCollector<NTY> {
|
||||
vals: XBinnedWaveEvents<NTY>,
|
||||
finalised_range: bool,
|
||||
timed_out: bool,
|
||||
#[allow(dead_code)]
|
||||
bin_count_exp: u32,
|
||||
}
|
||||
|
||||
impl<NTY> XBinnedWaveEventsCollector<NTY> {
|
||||
pub fn new(bin_count_exp: u32) -> Self {
|
||||
Self {
|
||||
finalised_range: false,
|
||||
timed_out: false,
|
||||
vals: XBinnedWaveEvents::empty(),
|
||||
bin_count_exp,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for XBinnedWaveEventsCollector<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.vals.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collector for XBinnedWaveEventsCollector<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = XBinnedWaveEvents<NTY>;
|
||||
type Output = XBinnedWaveEventsCollectedResult<NTY>;
|
||||
|
||||
fn ingest(&mut self, src: &Self::Input) {
|
||||
self.vals.append(src);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.finalised_range = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn result(self) -> Result<Self::Output, Error> {
|
||||
let ts_anchor_sec = self.vals.tss.first().map_or(0, |&k| k) / SEC;
|
||||
let ts_anchor_ns = ts_anchor_sec * SEC;
|
||||
let ts_off_ms: Vec<_> = self.vals.tss.iter().map(|&k| (k - ts_anchor_ns) / MS).collect();
|
||||
let ts_off_ns = self
|
||||
.vals
|
||||
.tss
|
||||
.iter()
|
||||
.zip(ts_off_ms.iter().map(|&k| k * MS))
|
||||
.map(|(&j, k)| (j - ts_anchor_ns - k))
|
||||
.collect();
|
||||
let ret = Self::Output {
|
||||
finalised_range: self.finalised_range,
|
||||
timed_out: self.timed_out,
|
||||
ts_anchor_sec,
|
||||
ts_off_ms,
|
||||
ts_off_ns,
|
||||
mins: self.vals.mins,
|
||||
maxs: self.vals.maxs,
|
||||
avgs: self.vals.avgs,
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collectable for XBinnedWaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Collector = XBinnedWaveEventsCollector<NTY>;
|
||||
|
||||
fn new_collector(bin_count_exp: u32) -> Self::Collector {
|
||||
Self::Collector::new(bin_count_exp)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> EventsNodeProcessorOutput for XBinnedWaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn into_parts(self) -> (Box<dyn Any>, VecDeque<u64>, VecDeque<u64>) {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -1146,3 +1146,73 @@ mod test_frame {
|
||||
assert_eq!(item.tss(), &[123]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
TODO adapt and enable
|
||||
#[test]
|
||||
fn bin_binned_01() {
|
||||
use binsdim0::MinMaxAvgDim0Bins;
|
||||
let edges = vec![SEC * 1000, SEC * 1010, SEC * 1020, SEC * 1030];
|
||||
let inp0 = <MinMaxAvgDim0Bins<u32> as NewEmpty>::empty(Shape::Scalar);
|
||||
let mut time_binner = inp0.time_binner_new(edges, true);
|
||||
let inp1 = MinMaxAvgDim0Bins::<u32> {
|
||||
ts1s: vec![SEC * 1000, SEC * 1010],
|
||||
ts2s: vec![SEC * 1010, SEC * 1020],
|
||||
counts: vec![1, 1],
|
||||
mins: vec![3, 4],
|
||||
maxs: vec![10, 9],
|
||||
avgs: vec![7., 6.],
|
||||
};
|
||||
assert_eq!(time_binner.bins_ready_count(), 0);
|
||||
time_binner.ingest(&inp1);
|
||||
assert_eq!(time_binner.bins_ready_count(), 1);
|
||||
time_binner.push_in_progress(false);
|
||||
assert_eq!(time_binner.bins_ready_count(), 2);
|
||||
// From here on, pushing any more should not change the bin count:
|
||||
time_binner.push_in_progress(false);
|
||||
assert_eq!(time_binner.bins_ready_count(), 2);
|
||||
// On the other hand, cycling should add one more zero-bin:
|
||||
time_binner.cycle();
|
||||
assert_eq!(time_binner.bins_ready_count(), 3);
|
||||
time_binner.cycle();
|
||||
assert_eq!(time_binner.bins_ready_count(), 3);
|
||||
let bins = time_binner.bins_ready().expect("bins should be ready");
|
||||
eprintln!("bins: {:?}", bins);
|
||||
assert_eq!(time_binner.bins_ready_count(), 0);
|
||||
assert_eq!(bins.counts(), &[1, 1, 0]);
|
||||
// TODO use proper float-compare logic:
|
||||
assert_eq!(bins.mins(), &[3., 4., 0.]);
|
||||
assert_eq!(bins.maxs(), &[10., 9., 0.]);
|
||||
assert_eq!(bins.avgs(), &[7., 6., 0.]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bin_binned_02() {
|
||||
use binsdim0::MinMaxAvgDim0Bins;
|
||||
let edges = vec![SEC * 1000, SEC * 1020];
|
||||
let inp0 = <MinMaxAvgDim0Bins<u32> as NewEmpty>::empty(Shape::Scalar);
|
||||
let mut time_binner = inp0.time_binner_new(edges, true);
|
||||
let inp1 = MinMaxAvgDim0Bins::<u32> {
|
||||
ts1s: vec![SEC * 1000, SEC * 1010],
|
||||
ts2s: vec![SEC * 1010, SEC * 1020],
|
||||
counts: vec![1, 1],
|
||||
mins: vec![3, 4],
|
||||
maxs: vec![10, 9],
|
||||
avgs: vec![7., 6.],
|
||||
};
|
||||
assert_eq!(time_binner.bins_ready_count(), 0);
|
||||
time_binner.ingest(&inp1);
|
||||
assert_eq!(time_binner.bins_ready_count(), 0);
|
||||
time_binner.cycle();
|
||||
assert_eq!(time_binner.bins_ready_count(), 1);
|
||||
time_binner.cycle();
|
||||
//assert_eq!(time_binner.bins_ready_count(), 2);
|
||||
let bins = time_binner.bins_ready().expect("bins should be ready");
|
||||
eprintln!("bins: {:?}", bins);
|
||||
assert_eq!(time_binner.bins_ready_count(), 0);
|
||||
assert_eq!(bins.counts(), &[2]);
|
||||
assert_eq!(bins.mins(), &[3.]);
|
||||
assert_eq!(bins.maxs(), &[10.]);
|
||||
assert_eq!(bins.avgs(), &[13. / 2.]);
|
||||
}
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user