TMP WIP
This commit is contained in:
@@ -28,7 +28,7 @@ pub trait TimeBinnableType:
|
||||
{
|
||||
type Output: TimeBinnableType;
|
||||
type Aggregator: TimeBinnableTypeAggregator<Input = Self, Output = Self::Output> + Send + Unpin;
|
||||
fn aggregator(range: NanoRange) -> Self::Aggregator;
|
||||
fn aggregator(range: NanoRange, bin_count: usize) -> Self::Aggregator;
|
||||
}
|
||||
|
||||
pub struct TBinnerStream<S, TBT>
|
||||
@@ -38,6 +38,7 @@ where
|
||||
{
|
||||
inp: Pin<Box<S>>,
|
||||
spec: BinnedRange,
|
||||
bin_count: usize,
|
||||
curbin: u32,
|
||||
left: Option<Poll<Option<Sitemty<TBT>>>>,
|
||||
aggtor: Option<<TBT as TimeBinnableType>::Aggregator>,
|
||||
@@ -55,14 +56,15 @@ where
|
||||
S: Stream<Item = Sitemty<TBT>> + Send + Unpin + 'static,
|
||||
TBT: TimeBinnableType,
|
||||
{
|
||||
pub fn new(inp: S, spec: BinnedRange) -> Self {
|
||||
pub fn new(inp: S, spec: BinnedRange, bin_count: usize) -> Self {
|
||||
let range = spec.get_range(0);
|
||||
Self {
|
||||
inp: Box::pin(inp),
|
||||
spec,
|
||||
bin_count,
|
||||
curbin: 0,
|
||||
left: None,
|
||||
aggtor: Some(<TBT as TimeBinnableType>::aggregator(range)),
|
||||
aggtor: Some(<TBT as TimeBinnableType>::aggregator(range, bin_count)),
|
||||
tmp_agg_results: VecDeque::new(),
|
||||
inp_completed: false,
|
||||
all_bins_emitted: false,
|
||||
@@ -90,7 +92,7 @@ where
|
||||
let range = self.spec.get_range(self.curbin);
|
||||
let ret = self
|
||||
.aggtor
|
||||
.replace(<TBT as TimeBinnableType>::aggregator(range))
|
||||
.replace(<TBT as TimeBinnableType>::aggregator(range, self.bin_count))
|
||||
.unwrap()
|
||||
.result();
|
||||
// TODO should we accumulate bins before emit? Maybe not, we want to stay responsive.
|
||||
|
||||
@@ -3,12 +3,12 @@ use crate::agg::streams::Appendable;
|
||||
use crate::agg::{Fits, FitsInside};
|
||||
use crate::binned::dim1::MinMaxAvgDim1Bins;
|
||||
use crate::binned::{
|
||||
EventsNodeProcessor, FilterFittingInside, MinMaxAvgBins, NumOps, PushableIndex, RangeOverlapInfo, ReadPbv,
|
||||
ReadableFromFile, WithLen, WithTimestamps,
|
||||
EventsNodeProcessor, FilterFittingInside, MinMaxAvgBins, MinMaxAvgWaveBins, NumOps, PushableIndex,
|
||||
RangeOverlapInfo, ReadPbv, ReadableFromFile, WithLen, WithTimestamps,
|
||||
};
|
||||
use crate::decode::EventValues;
|
||||
use err::Error;
|
||||
use netpod::NanoRange;
|
||||
use netpod::{NanoRange, Shape};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::marker::PhantomData;
|
||||
use tokio::fs::File;
|
||||
@@ -24,11 +24,16 @@ where
|
||||
type Input = NTY;
|
||||
type Output = EventValues<NTY>;
|
||||
|
||||
fn process(inp: EventValues<Self::Input>) -> Self::Output {
|
||||
fn create(shape: Shape) -> Self {
|
||||
Self { _m1: PhantomData }
|
||||
}
|
||||
|
||||
fn process(&self, inp: EventValues<Self::Input>) -> Self::Output {
|
||||
inp
|
||||
}
|
||||
}
|
||||
|
||||
// TODO rename Scalar -> Dim0
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct XBinnedScalarEvents<NTY> {
|
||||
tss: Vec<u64>,
|
||||
@@ -169,7 +174,7 @@ where
|
||||
type Output = MinMaxAvgBins<NTY>;
|
||||
type Aggregator = XBinnedScalarEventsAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange) -> Self::Aggregator {
|
||||
fn aggregator(range: NanoRange, bin_count: usize) -> Self::Aggregator {
|
||||
Self::Aggregator::new(range)
|
||||
}
|
||||
}
|
||||
@@ -269,6 +274,241 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
// TODO rename Wave -> Dim1
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct XBinnedWaveEvents<NTY> {
|
||||
tss: Vec<u64>,
|
||||
mins: Vec<Vec<NTY>>,
|
||||
maxs: Vec<Vec<NTY>>,
|
||||
avgs: Vec<Vec<f32>>,
|
||||
}
|
||||
|
||||
impl<NTY> XBinnedWaveEvents<NTY> {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
tss: vec![],
|
||||
mins: vec![],
|
||||
maxs: vec![],
|
||||
avgs: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for XBinnedWaveEvents<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.tss.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithTimestamps for XBinnedWaveEvents<NTY> {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
self.tss[ix]
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> RangeOverlapInfo for XBinnedWaveEvents<NTY> {
|
||||
fn ends_before(&self, range: NanoRange) -> bool {
|
||||
match self.tss.last() {
|
||||
Some(&ts) => ts < range.beg,
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn ends_after(&self, range: NanoRange) -> bool {
|
||||
match self.tss.last() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn starts_after(&self, range: NanoRange) -> bool {
|
||||
match self.tss.first() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FitsInside for XBinnedWaveEvents<NTY> {
|
||||
fn fits_inside(&self, range: NanoRange) -> Fits {
|
||||
if self.tss.is_empty() {
|
||||
Fits::Empty
|
||||
} else {
|
||||
let t1 = *self.tss.first().unwrap();
|
||||
let t2 = *self.tss.last().unwrap();
|
||||
if t2 < range.beg {
|
||||
Fits::Lower
|
||||
} else if t1 > range.end {
|
||||
Fits::Greater
|
||||
} else if t1 < range.beg && t2 > range.end {
|
||||
Fits::PartlyLowerAndGreater
|
||||
} else if t1 < range.beg {
|
||||
Fits::PartlyLower
|
||||
} else if t2 > range.end {
|
||||
Fits::PartlyGreater
|
||||
} else {
|
||||
Fits::Inside
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FilterFittingInside for XBinnedWaveEvents<NTY> {
|
||||
fn filter_fitting_inside(self, fit_range: NanoRange) -> Option<Self> {
|
||||
match self.fits_inside(fit_range) {
|
||||
Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> PushableIndex for XBinnedWaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
self.tss.push(src.tss[ix]);
|
||||
self.mins.push(src.mins[ix]);
|
||||
self.maxs.push(src.maxs[ix]);
|
||||
self.avgs.push(src.avgs[ix]);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Appendable for XBinnedWaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn empty() -> Self {
|
||||
Self::empty()
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
self.tss.extend_from_slice(&src.tss);
|
||||
self.mins.extend_from_slice(&src.mins);
|
||||
self.maxs.extend_from_slice(&src.maxs);
|
||||
self.avgs.extend_from_slice(&src.avgs);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ReadableFromFile for XBinnedWaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn read_from_file(_file: File) -> Result<ReadPbv<Self>, Error> {
|
||||
// TODO refactor types such that this impl is not needed.
|
||||
panic!()
|
||||
}
|
||||
|
||||
fn from_buf(_buf: &[u8]) -> Result<Self, Error> {
|
||||
panic!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableType for XBinnedWaveEvents<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Output = MinMaxAvgWaveBins<NTY>;
|
||||
type Aggregator = XBinnedWaveEventsAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange, bin_count: usize) -> Self::Aggregator {
|
||||
Self::Aggregator::new(range, bin_count)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct XBinnedWaveEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
range: NanoRange,
|
||||
count: u64,
|
||||
min: Vec<NTY>,
|
||||
max: Vec<NTY>,
|
||||
sum: Vec<f32>,
|
||||
sumc: u64,
|
||||
}
|
||||
|
||||
impl<NTY> XBinnedWaveEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
pub fn new(range: NanoRange, bin_count: usize) -> Self {
|
||||
Self {
|
||||
range,
|
||||
count: 0,
|
||||
min: vec![NTY::min_or_nan(); bin_count],
|
||||
max: vec![NTY::max_or_nan(); bin_count],
|
||||
sum: vec![0f32; bin_count],
|
||||
sumc: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableTypeAggregator for XBinnedWaveEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = XBinnedWaveEvents<NTY>;
|
||||
type Output = MinMaxAvgWaveBins<NTY>;
|
||||
|
||||
fn range(&self) -> &NanoRange {
|
||||
&self.range
|
||||
}
|
||||
|
||||
fn ingest(&mut self, item: &Self::Input) {
|
||||
for i1 in 0..item.tss.len() {
|
||||
let ts = item.tss[i1];
|
||||
if ts < self.range.beg {
|
||||
continue;
|
||||
} else if ts >= self.range.end {
|
||||
continue;
|
||||
} else {
|
||||
for (i2, v) in item.mins[i1].iter().enumerate() {
|
||||
if *v < self.min[i2] || self.min[i2].is_nan() {
|
||||
self.min[i2] = *v;
|
||||
}
|
||||
}
|
||||
for (i2, v) in item.maxs[i1].iter().enumerate() {
|
||||
if *v > self.max[i2] || self.max[i2].is_nan() {
|
||||
self.max[i2] = *v;
|
||||
}
|
||||
}
|
||||
for (i2, v) in item.avgs[i1].iter().enumerate() {
|
||||
if v.is_nan() {
|
||||
} else {
|
||||
self.sum[i2] += v;
|
||||
}
|
||||
}
|
||||
self.sumc += 1;
|
||||
self.count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn result(self) -> Self::Output {
|
||||
if self.sumc == 0 {
|
||||
Self::Output {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![None],
|
||||
maxs: vec![None],
|
||||
avgs: vec![None],
|
||||
}
|
||||
} else {
|
||||
let avg = self.sum.iter().map(|k| *k / self.sumc as f32).collect();
|
||||
Self::Output {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![Some(self.min)],
|
||||
maxs: vec![Some(self.max)],
|
||||
avgs: vec![Some(avg)],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct WaveEvents<NTY> {
|
||||
pub tss: Vec<u64>,
|
||||
@@ -398,8 +638,8 @@ where
|
||||
type Output = MinMaxAvgDim1Bins<NTY>;
|
||||
type Aggregator = WaveEventsAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange) -> Self::Aggregator {
|
||||
Self::Aggregator::new(range)
|
||||
fn aggregator(range: NanoRange, bin_count: usize) -> Self::Aggregator {
|
||||
Self::Aggregator::new(range, bin_count)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -419,11 +659,12 @@ impl<NTY> WaveEventsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
pub fn new(range: NanoRange) -> Self {
|
||||
pub fn new(range: NanoRange, bin_count: usize) -> Self {
|
||||
Self {
|
||||
range,
|
||||
count: 0,
|
||||
min: None,
|
||||
// TODO create the right number of bins right here:
|
||||
min: err::todoval(),
|
||||
max: None,
|
||||
sumc: 0,
|
||||
sum: None,
|
||||
@@ -525,9 +766,13 @@ where
|
||||
type Input = Vec<NTY>;
|
||||
type Output = XBinnedScalarEvents<NTY>;
|
||||
|
||||
fn process(inp: EventValues<Self::Input>) -> Self::Output {
|
||||
fn create(shape: Shape) -> Self {
|
||||
Self { _m1: PhantomData }
|
||||
}
|
||||
|
||||
fn process(&self, inp: EventValues<Self::Input>) -> Self::Output {
|
||||
let nev = inp.tss.len();
|
||||
let mut ret = XBinnedScalarEvents {
|
||||
let mut ret = Self::Output {
|
||||
tss: inp.tss,
|
||||
xbincount: Vec::with_capacity(nev),
|
||||
mins: Vec::with_capacity(nev),
|
||||
@@ -535,6 +780,8 @@ where
|
||||
avgs: Vec::with_capacity(nev),
|
||||
};
|
||||
for i1 in 0..nev {
|
||||
// TODO why do I work here with Option?
|
||||
err::todo();
|
||||
let mut min = None;
|
||||
let mut max = None;
|
||||
let mut sum = 0f32;
|
||||
@@ -584,6 +831,7 @@ where
|
||||
}
|
||||
|
||||
pub struct WaveNBinner<NTY> {
|
||||
bin_count: usize,
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
@@ -592,11 +840,60 @@ where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = Vec<NTY>;
|
||||
// TODO need new container type for this case:
|
||||
type Output = XBinnedScalarEvents<NTY>;
|
||||
type Output = XBinnedWaveEvents<NTY>;
|
||||
|
||||
fn process(_inp: EventValues<Self::Input>) -> Self::Output {
|
||||
err::todoval()
|
||||
fn create(shape: Shape) -> Self {
|
||||
// TODO get rid of panic potential
|
||||
let bin_count = if let Shape::Wave(n) = shape { n } else { panic!() } as usize;
|
||||
Self {
|
||||
bin_count,
|
||||
_m1: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn process(&self, inp: EventValues<Self::Input>) -> Self::Output {
|
||||
let nev = inp.tss.len();
|
||||
let mut ret = Self::Output {
|
||||
tss: inp.tss,
|
||||
mins: Vec::with_capacity(nev),
|
||||
maxs: Vec::with_capacity(nev),
|
||||
avgs: Vec::with_capacity(nev),
|
||||
};
|
||||
for i1 in 0..nev {
|
||||
let mut min = vec![NTY::min_or_nan(); self.bin_count];
|
||||
let mut max = vec![NTY::max_or_nan(); self.bin_count];
|
||||
let mut sum = vec![0f32; self.bin_count];
|
||||
let mut sumc = vec![0; self.bin_count];
|
||||
for (i2, &v) in inp.values[i1].iter().enumerate() {
|
||||
let i3 = i2 * self.bin_count / inp.values[i1].len();
|
||||
if v < min[i3] {
|
||||
min[i3] = v;
|
||||
}
|
||||
if v > max[i3] {
|
||||
max[i3] = v;
|
||||
}
|
||||
if v.is_nan() {
|
||||
} else {
|
||||
sum[i3] += v.as_();
|
||||
sumc[i3] += 1;
|
||||
}
|
||||
}
|
||||
ret.mins.push(min);
|
||||
ret.maxs.push(max);
|
||||
let avg = sum
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i3, &k)| {
|
||||
if sumc[i3] > 0 {
|
||||
sum[i3] / sumc[i3] as f32
|
||||
} else {
|
||||
f32::NAN
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
ret.avgs.push(avg);
|
||||
}
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
@@ -611,7 +908,11 @@ where
|
||||
type Input = Vec<NTY>;
|
||||
type Output = WaveEvents<NTY>;
|
||||
|
||||
fn process(inp: EventValues<Self::Input>) -> Self::Output {
|
||||
fn create(shape: Shape) -> Self {
|
||||
Self { _m1: PhantomData }
|
||||
}
|
||||
|
||||
fn process(&self, inp: EventValues<Self::Input>) -> Self::Output {
|
||||
if false {
|
||||
let n = if inp.values.len() > 0 { inp.values[0].len() } else { 0 };
|
||||
let n = if n > 5 { 5 } else { n };
|
||||
|
||||
@@ -23,13 +23,14 @@ use futures_util::{FutureExt, StreamExt};
|
||||
use netpod::log::*;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::{
|
||||
BinnedRange, ByteOrder, NanoRange, NodeConfigCached, PerfOpts, PreBinnedPatchIterator, PreBinnedPatchRange,
|
||||
ScalarType, Shape,
|
||||
AggKind, BinnedRange, ByteOrder, NanoRange, NodeConfigCached, PerfOpts, PreBinnedPatchIterator,
|
||||
PreBinnedPatchRange, ScalarType, Shape,
|
||||
};
|
||||
use num_traits::{AsPrimitive, Bounded, Zero};
|
||||
use num_traits::{AsPrimitive, Bounded, Float, Zero};
|
||||
use parse::channelconfig::{extract_matching_config_entry, read_local_config, MatchingConfigEntry};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use std::fmt;
|
||||
use std::fmt::Debug;
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
@@ -122,8 +123,13 @@ where
|
||||
range: query.range().clone(),
|
||||
agg_kind: query.agg_kind().clone(),
|
||||
};
|
||||
let x_bin_count = if let AggKind::DimXBinsN(n) = query.agg_kind() {
|
||||
*n as usize
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let s = MergedFromRemotes::<ENP>::new(evq, perf_opts, node_config.node_config.cluster.clone());
|
||||
let s = TBinnerStream::<_, <ENP as EventsNodeProcessor>::Output>::new(s, range);
|
||||
let s = TBinnerStream::<_, <ENP as EventsNodeProcessor>::Output>::new(s, range, x_bin_count);
|
||||
let ret = BinnedResponseStat {
|
||||
stream: Box::pin(s),
|
||||
bin_count,
|
||||
@@ -770,13 +776,47 @@ pub trait NumOps:
|
||||
+ Serialize
|
||||
+ DeserializeOwned
|
||||
{
|
||||
fn min_or_nan() -> Self;
|
||||
fn max_or_nan() -> Self;
|
||||
fn is_nan(&self) -> bool;
|
||||
}
|
||||
|
||||
impl<T> NumOps for T where
|
||||
T: Send + Unpin + Debug + Zero + AsPrimitive<f32> + Bounded + PartialOrd + SubFrId + Serialize + DeserializeOwned
|
||||
{
|
||||
fn tmp() {}
|
||||
macro_rules! impl_num_ops {
|
||||
($ty:ident, $min_or_nan:ident, $max_or_nan:ident, $is_nan:ident) => {
|
||||
impl NumOps for $ty {
|
||||
fn min_or_nan() -> Self {
|
||||
$ty::$min_or_nan
|
||||
}
|
||||
fn max_or_nan() -> Self {
|
||||
$ty::$max_or_nan
|
||||
}
|
||||
fn is_nan(&self) -> bool {
|
||||
$is_nan(self)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn is_nan_int<T>(x: &T) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn is_nan_float<T: Float>(x: &T) -> bool {
|
||||
x.is_nan()
|
||||
}
|
||||
|
||||
impl_num_ops!(u8, MIN, MAX, is_nan_int);
|
||||
impl_num_ops!(u16, MIN, MAX, is_nan_int);
|
||||
impl_num_ops!(u32, MIN, MAX, is_nan_int);
|
||||
impl_num_ops!(u64, MIN, MAX, is_nan_int);
|
||||
impl_num_ops!(i8, MIN, MAX, is_nan_int);
|
||||
impl_num_ops!(i16, MIN, MAX, is_nan_int);
|
||||
impl_num_ops!(i32, MIN, MAX, is_nan_int);
|
||||
impl_num_ops!(i64, MIN, MAX, is_nan_int);
|
||||
impl_num_ops!(f32, NAN, NAN, is_nan_float);
|
||||
impl_num_ops!(f64, NAN, NAN, is_nan_float);
|
||||
|
||||
pub trait EventsDecoder {
|
||||
type Output;
|
||||
fn ingest(&mut self, event: &[u8]);
|
||||
@@ -786,7 +826,8 @@ pub trait EventsDecoder {
|
||||
pub trait EventsNodeProcessor: Send + Unpin {
|
||||
type Input;
|
||||
type Output: Send + Unpin + DeserializeOwned + WithTimestamps + TimeBinnableType;
|
||||
fn process(inp: EventValues<Self::Input>) -> Self::Output;
|
||||
fn create(shape: Shape) -> Self;
|
||||
fn process(&self, inp: EventValues<Self::Input>) -> Self::Output;
|
||||
}
|
||||
|
||||
pub trait TimeBins: Send + Unpin + WithLen + Appendable + FilterFittingInside {
|
||||
@@ -799,16 +840,17 @@ pub struct MinMaxAvgBins<NTY> {
|
||||
pub ts1s: Vec<u64>,
|
||||
pub ts2s: Vec<u64>,
|
||||
pub counts: Vec<u64>,
|
||||
// TODO get rid of Option:
|
||||
pub mins: Vec<Option<NTY>>,
|
||||
pub maxs: Vec<Option<NTY>>,
|
||||
pub avgs: Vec<Option<f32>>,
|
||||
}
|
||||
|
||||
impl<NTY> std::fmt::Debug for MinMaxAvgBins<NTY>
|
||||
impl<NTY> fmt::Debug for MinMaxAvgBins<NTY>
|
||||
where
|
||||
NTY: std::fmt::Debug,
|
||||
NTY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
fmt,
|
||||
"MinMaxAvgBins count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}",
|
||||
@@ -951,7 +993,7 @@ where
|
||||
type Output = MinMaxAvgBins<NTY>;
|
||||
type Aggregator = MinMaxAvgBinsAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange) -> Self::Aggregator {
|
||||
fn aggregator(range: NanoRange, bin_count: usize) -> Self::Aggregator {
|
||||
Self::Aggregator::new(range)
|
||||
}
|
||||
}
|
||||
@@ -1103,7 +1145,8 @@ impl<NTY> EventValuesAggregator<NTY> {
|
||||
Self {
|
||||
range,
|
||||
count: 0,
|
||||
min: None,
|
||||
// TODO get rid of Option
|
||||
min: err::todoval(),
|
||||
max: None,
|
||||
sumc: 0,
|
||||
sum: 0f32,
|
||||
@@ -1282,3 +1325,400 @@ pub enum RangeCompletableItem<T> {
|
||||
RangeComplete,
|
||||
Data(T),
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct MinMaxAvgWaveBins<NTY> {
|
||||
pub ts1s: Vec<u64>,
|
||||
pub ts2s: Vec<u64>,
|
||||
pub counts: Vec<u64>,
|
||||
pub mins: Vec<Option<Vec<NTY>>>,
|
||||
pub maxs: Vec<Option<Vec<NTY>>>,
|
||||
pub avgs: Vec<Option<Vec<f32>>>,
|
||||
}
|
||||
|
||||
impl<NTY> fmt::Debug for MinMaxAvgWaveBins<NTY>
|
||||
where
|
||||
NTY: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
fmt,
|
||||
"MinMaxAvgBins count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}",
|
||||
self.ts1s.len(),
|
||||
self.ts1s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
|
||||
self.ts2s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
|
||||
self.counts,
|
||||
self.mins,
|
||||
self.maxs,
|
||||
self.avgs,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgWaveBins<NTY> {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
ts1s: vec![],
|
||||
ts2s: vec![],
|
||||
counts: vec![],
|
||||
mins: vec![],
|
||||
maxs: vec![],
|
||||
avgs: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FitsInside for MinMaxAvgWaveBins<NTY> {
|
||||
fn fits_inside(&self, range: NanoRange) -> Fits {
|
||||
if self.ts1s.is_empty() {
|
||||
Fits::Empty
|
||||
} else {
|
||||
let t1 = *self.ts1s.first().unwrap();
|
||||
let t2 = *self.ts2s.last().unwrap();
|
||||
if t2 <= range.beg {
|
||||
Fits::Lower
|
||||
} else if t1 >= range.end {
|
||||
Fits::Greater
|
||||
} else if t1 < range.beg && t2 > range.end {
|
||||
Fits::PartlyLowerAndGreater
|
||||
} else if t1 < range.beg {
|
||||
Fits::PartlyLower
|
||||
} else if t2 > range.end {
|
||||
Fits::PartlyGreater
|
||||
} else {
|
||||
Fits::Inside
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> FilterFittingInside for MinMaxAvgWaveBins<NTY> {
|
||||
fn filter_fitting_inside(self, fit_range: NanoRange) -> Option<Self> {
|
||||
match self.fits_inside(fit_range) {
|
||||
Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> RangeOverlapInfo for MinMaxAvgWaveBins<NTY> {
|
||||
fn ends_before(&self, range: NanoRange) -> bool {
|
||||
match self.ts2s.last() {
|
||||
Some(&ts) => ts <= range.beg,
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn ends_after(&self, range: NanoRange) -> bool {
|
||||
match self.ts2s.last() {
|
||||
Some(&ts) => ts > range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn starts_after(&self, range: NanoRange) -> bool {
|
||||
match self.ts1s.first() {
|
||||
Some(&ts) => ts >= range.end,
|
||||
None => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBins for MinMaxAvgWaveBins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn ts1s(&self) -> &Vec<u64> {
|
||||
&self.ts1s
|
||||
}
|
||||
|
||||
fn ts2s(&self) -> &Vec<u64> {
|
||||
&self.ts2s
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for MinMaxAvgWaveBins<NTY> {
|
||||
fn len(&self) -> usize {
|
||||
self.ts1s.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Appendable for MinMaxAvgWaveBins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn empty() -> Self {
|
||||
Self::empty()
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
self.ts1s.extend_from_slice(&src.ts1s);
|
||||
self.ts2s.extend_from_slice(&src.ts2s);
|
||||
self.counts.extend_from_slice(&src.counts);
|
||||
self.mins.extend_from_slice(&src.mins);
|
||||
self.maxs.extend_from_slice(&src.maxs);
|
||||
self.avgs.extend_from_slice(&src.avgs);
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ReadableFromFile for MinMaxAvgWaveBins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
// TODO this function is not needed in the trait:
|
||||
fn read_from_file(file: File) -> Result<ReadPbv<Self>, Error> {
|
||||
Ok(ReadPbv::new(file))
|
||||
}
|
||||
|
||||
fn from_buf(buf: &[u8]) -> Result<Self, Error> {
|
||||
let dec = serde_cbor::from_slice(&buf)?;
|
||||
Ok(dec)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableType for MinMaxAvgWaveBins<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Output = MinMaxAvgWaveBins<NTY>;
|
||||
type Aggregator = MinMaxAvgWaveBinsAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange, x_bin_count: usize) -> Self::Aggregator {
|
||||
Self::Aggregator::new(range, x_bin_count)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> ToJsonResult for Sitemty<MinMaxAvgWaveBins<NTY>>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
fn to_json_result(&self) -> Result<Box<dyn ToJsonBytes>, Error> {
|
||||
Ok(Box::new(serde_json::Value::String(format!(
|
||||
"MinMaxAvgBins/non-json-item"
|
||||
))))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgWaveBinsCollected<NTY> {
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgWaveBinsCollected<NTY> {
|
||||
pub fn new() -> Self {
|
||||
Self { _m1: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct MinMaxAvgWaveBinsCollectedResult<NTY> {
|
||||
ts0: u64,
|
||||
tsoff: Vec<u64>,
|
||||
//ts_bin_edges: Vec<IsoDateTime>,
|
||||
counts: Vec<u64>,
|
||||
mins: Vec<Option<Vec<NTY>>>,
|
||||
maxs: Vec<Option<Vec<NTY>>>,
|
||||
avgs: Vec<Option<Vec<f32>>>,
|
||||
#[serde(skip_serializing_if = "Bool::is_false", rename = "finalisedRange")]
|
||||
finalised_range: bool,
|
||||
#[serde(skip_serializing_if = "Zero::is_zero", rename = "missingBins")]
|
||||
missing_bins: u32,
|
||||
#[serde(skip_serializing_if = "Option::is_none", rename = "continueAt")]
|
||||
//continue_at: Option<IsoDateTime>,
|
||||
continue_at: Option<u64>,
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgWaveBinsCollector<NTY> {
|
||||
bin_count_exp: u32,
|
||||
timed_out: bool,
|
||||
range_complete: bool,
|
||||
vals: MinMaxAvgWaveBins<NTY>,
|
||||
_m1: PhantomData<NTY>,
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgWaveBinsCollector<NTY> {
|
||||
pub fn new(bin_count_exp: u32) -> Self {
|
||||
Self {
|
||||
bin_count_exp,
|
||||
timed_out: false,
|
||||
range_complete: false,
|
||||
vals: MinMaxAvgWaveBins::<NTY>::empty(),
|
||||
_m1: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> WithLen for MinMaxAvgWaveBinsCollector<NTY>
|
||||
where
|
||||
NTY: NumOps + Serialize,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
self.vals.ts1s.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collector for MinMaxAvgWaveBinsCollector<NTY>
|
||||
where
|
||||
NTY: NumOps + Serialize,
|
||||
{
|
||||
type Input = MinMaxAvgWaveBins<NTY>;
|
||||
type Output = MinMaxAvgWaveBinsCollectedResult<NTY>;
|
||||
|
||||
fn ingest(&mut self, src: &Self::Input) {
|
||||
Appendable::append(&mut self.vals, src);
|
||||
}
|
||||
|
||||
fn set_range_complete(&mut self) {
|
||||
self.range_complete = true;
|
||||
}
|
||||
|
||||
fn set_timed_out(&mut self) {
|
||||
self.timed_out = true;
|
||||
}
|
||||
|
||||
fn result(self) -> Result<Self::Output, Error> {
|
||||
let ts0 = self.vals.ts1s.first().map_or(0, |k| *k / SEC);
|
||||
let bin_count = self.vals.ts1s.len() as u32;
|
||||
let mut tsoff: Vec<_> = self.vals.ts1s.iter().map(|k| *k - ts0 * SEC).collect();
|
||||
if let Some(&k) = self.vals.ts2s.last() {
|
||||
tsoff.push(k - ts0 * SEC);
|
||||
}
|
||||
let tsoff = tsoff;
|
||||
let _iso: Vec<_> = tsoff
|
||||
.iter()
|
||||
.map(|&k| IsoDateTime(Utc.timestamp_nanos(k as i64)))
|
||||
.collect();
|
||||
let continue_at = if self.vals.ts1s.len() < self.bin_count_exp as usize {
|
||||
match tsoff.last() {
|
||||
Some(k) => Some(k.clone()),
|
||||
None => Err(Error::with_msg("partial_content but no bin in result"))?,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let ret = MinMaxAvgWaveBinsCollectedResult {
|
||||
ts0,
|
||||
tsoff,
|
||||
counts: self.vals.counts,
|
||||
mins: self.vals.mins,
|
||||
maxs: self.vals.maxs,
|
||||
avgs: self.vals.avgs,
|
||||
finalised_range: self.range_complete,
|
||||
missing_bins: self.bin_count_exp - bin_count,
|
||||
continue_at,
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Collectable for MinMaxAvgWaveBins<NTY>
|
||||
where
|
||||
NTY: NumOps + Serialize,
|
||||
{
|
||||
type Collector = MinMaxAvgWaveBinsCollector<NTY>;
|
||||
|
||||
fn new_collector(bin_count_exp: u32) -> Self::Collector {
|
||||
Self::Collector::new(bin_count_exp)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgWaveBinsAggregator<NTY> {
|
||||
range: NanoRange,
|
||||
count: u64,
|
||||
min: Vec<NTY>,
|
||||
max: Vec<NTY>,
|
||||
sum: Vec<f32>,
|
||||
sumc: u64,
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgWaveBinsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
pub fn new(range: NanoRange, x_bin_count: usize) -> Self {
|
||||
Self {
|
||||
range,
|
||||
count: 0,
|
||||
min: vec![NTY::min_or_nan(); x_bin_count],
|
||||
max: vec![NTY::max_or_nan(); x_bin_count],
|
||||
sum: vec![0f32; x_bin_count],
|
||||
sumc: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> TimeBinnableTypeAggregator for MinMaxAvgWaveBinsAggregator<NTY>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
type Input = MinMaxAvgWaveBins<NTY>;
|
||||
type Output = MinMaxAvgWaveBins<NTY>;
|
||||
|
||||
fn range(&self) -> &NanoRange {
|
||||
&self.range
|
||||
}
|
||||
|
||||
fn ingest(&mut self, item: &Self::Input) {
|
||||
for i1 in 0..item.ts1s.len() {
|
||||
if item.ts2s[i1] <= self.range.beg {
|
||||
continue;
|
||||
} else if item.ts1s[i1] >= self.range.end {
|
||||
continue;
|
||||
} else {
|
||||
// the input can contain bins where no events did fall into.
|
||||
match &item.mins[i1] {
|
||||
None => {}
|
||||
Some(inp) => {
|
||||
for (a, b) in self.min.iter_mut().zip(inp.iter()) {
|
||||
if *b < *a {
|
||||
*a = *b;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
match &item.maxs[i1] {
|
||||
None => {}
|
||||
Some(inp) => {
|
||||
for (a, b) in self.max.iter_mut().zip(inp.iter()) {
|
||||
if *b > *a {
|
||||
*a = *b;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
match &item.avgs[i1] {
|
||||
None => {}
|
||||
Some(inp) => {
|
||||
for (a, b) in self.sum.iter_mut().zip(inp.iter()) {
|
||||
*a += *b;
|
||||
}
|
||||
}
|
||||
}
|
||||
self.sumc += 1;
|
||||
self.count += item.counts[i1];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn result(self) -> Self::Output {
|
||||
if self.sumc == 0 {
|
||||
Self::Output {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![None],
|
||||
maxs: vec![None],
|
||||
avgs: vec![None],
|
||||
}
|
||||
} else {
|
||||
let avg = self.sum.iter().map(|j| *j / self.sumc as f32).collect();
|
||||
Self::Output {
|
||||
ts1s: vec![self.range.beg],
|
||||
ts2s: vec![self.range.end],
|
||||
counts: vec![self.count],
|
||||
mins: vec![Some(self.min)],
|
||||
maxs: vec![Some(self.max)],
|
||||
avgs: vec![Some(avg)],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -173,7 +173,7 @@ where
|
||||
type Output = MinMaxAvgDim1Bins<NTY>;
|
||||
type Aggregator = MinMaxAvgDim1BinsAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange) -> Self::Aggregator {
|
||||
fn aggregator(range: NanoRange, bin_count: usize) -> Self::Aggregator {
|
||||
Self::Aggregator::new(range)
|
||||
}
|
||||
}
|
||||
@@ -317,11 +317,12 @@ pub struct MinMaxAvgDim1BinsAggregator<NTY> {
|
||||
}
|
||||
|
||||
impl<NTY> MinMaxAvgDim1BinsAggregator<NTY> {
|
||||
pub fn new(range: NanoRange) -> Self {
|
||||
pub fn new(range: NanoRange, bin_count: usize) -> Self {
|
||||
Self {
|
||||
range,
|
||||
count: 0,
|
||||
min: None,
|
||||
// TODO get rid of Option
|
||||
min: err::todoval(),
|
||||
max: None,
|
||||
sumc: 0,
|
||||
sum: None,
|
||||
|
||||
@@ -57,11 +57,7 @@ impl PreBinnedQuery {
|
||||
.map_err(|e| Error::with_msg(format!("can not parse diskStatsEveryKb {:?}", e)))?;
|
||||
let ret = Self {
|
||||
patch: PreBinnedPatchCoord::new(bin_t_len, patch_t_len, patch_ix),
|
||||
agg_kind: params
|
||||
.get("aggKind")
|
||||
.map_or(&format!("{}", AggKind::DimXBins1), |k| k)
|
||||
.parse()
|
||||
.map_err(|e| Error::with_msg(format!("can not parse aggKind {:?}", e)))?,
|
||||
agg_kind: agg_kind_from_binning_scheme(¶ms).unwrap_or(AggKind::DimXBins1),
|
||||
channel: channel_from_params(¶ms)?,
|
||||
cache_usage: CacheUsage::from_params(¶ms)?,
|
||||
disk_stats_every: ByteSize::kb(disk_stats_every),
|
||||
@@ -76,11 +72,11 @@ impl PreBinnedQuery {
|
||||
|
||||
pub fn make_query_string(&self) -> String {
|
||||
format!(
|
||||
"{}&channelBackend={}&channelName={}&aggKind={}&cacheUsage={}&diskStatsEveryKb={}&reportError={}",
|
||||
"{}&channelBackend={}&channelName={}&binningScheme={}&cacheUsage={}&diskStatsEveryKb={}&reportError={}",
|
||||
self.patch.to_url_params_strings(),
|
||||
self.channel.backend,
|
||||
self.channel.name,
|
||||
self.agg_kind,
|
||||
binning_scheme_string(&self.agg_kind),
|
||||
self.cache_usage,
|
||||
self.disk_stats_every.bytes() / 1024,
|
||||
self.report_error(),
|
||||
@@ -201,6 +197,7 @@ impl BinnedQuery {
|
||||
.parse()
|
||||
.map_err(|e| Error::with_msg(format!("can not parse diskStatsEveryKb {:?}", e)))?;
|
||||
let ret = Self {
|
||||
channel: channel_from_params(¶ms)?,
|
||||
range: NanoRange {
|
||||
beg: beg_date.parse::<DateTime<Utc>>()?.to_nanos(),
|
||||
end: end_date.parse::<DateTime<Utc>>()?.to_nanos(),
|
||||
@@ -210,12 +207,7 @@ impl BinnedQuery {
|
||||
.ok_or(Error::with_msg("missing binCount"))?
|
||||
.parse()
|
||||
.map_err(|e| Error::with_msg(format!("can not parse binCount {:?}", e)))?,
|
||||
agg_kind: params
|
||||
.get("aggKind")
|
||||
.map_or(&format!("{}", AggKind::DimXBins1), |k| k)
|
||||
.parse()
|
||||
.map_err(|e| Error::with_msg(format!("can not parse aggKind {:?}", e)))?,
|
||||
channel: channel_from_params(¶ms)?,
|
||||
agg_kind: agg_kind_from_binning_scheme(¶ms).unwrap_or(AggKind::DimXBins1),
|
||||
cache_usage: CacheUsage::from_params(¶ms)?,
|
||||
disk_stats_every: ByteSize::kb(disk_stats_every),
|
||||
report_error: params
|
||||
@@ -292,7 +284,7 @@ impl BinnedQuery {
|
||||
pub fn url(&self, host: &HostPort) -> String {
|
||||
let date_fmt = "%Y-%m-%dT%H:%M:%S.%3fZ";
|
||||
format!(
|
||||
"http://{}:{}/api/4/binned?cacheUsage={}&channelBackend={}&channelName={}&binCount={}&begDate={}&endDate={}&diskStatsEveryKb={}&timeout={}&abortAfterBinCount={}",
|
||||
"http://{}:{}/api/4/binned?cacheUsage={}&channelBackend={}&channelName={}&binCount={}&begDate={}&endDate={}&binningScheme={}&diskStatsEveryKb={}&timeout={}&abortAfterBinCount={}",
|
||||
host.host,
|
||||
host.port,
|
||||
self.cache_usage,
|
||||
@@ -301,9 +293,35 @@ impl BinnedQuery {
|
||||
self.bin_count,
|
||||
Utc.timestamp_nanos(self.range.beg as i64).format(date_fmt),
|
||||
Utc.timestamp_nanos(self.range.end as i64).format(date_fmt),
|
||||
binning_scheme_string(&self.agg_kind),
|
||||
self.disk_stats_every.bytes() / 1024,
|
||||
self.timeout.as_millis(),
|
||||
self.abort_after_bin_count,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn binning_scheme_string(agg_kind: &AggKind) -> String {
|
||||
match agg_kind {
|
||||
AggKind::Plain => "fullValue".into(),
|
||||
AggKind::DimXBins1 => "toScalarX".into(),
|
||||
AggKind::DimXBinsN(n) => format!("binnedXcount{}", n),
|
||||
}
|
||||
}
|
||||
|
||||
fn agg_kind_from_binning_scheme(params: &BTreeMap<String, String>) -> Result<AggKind, Error> {
|
||||
let key = "binningScheme";
|
||||
let s = params
|
||||
.get(key)
|
||||
.map_or(Err(Error::with_msg(format!("can not find {}", key))), |k| Ok(k))?;
|
||||
let ret = if s == "fullValue" {
|
||||
AggKind::Plain
|
||||
} else if s == "toScalarX" {
|
||||
AggKind::DimXBins1
|
||||
} else if s.starts_with("binnedXcount") {
|
||||
AggKind::DimXBinsN(s[12..].parse()?)
|
||||
} else {
|
||||
return Err(Error::with_msg("can not extract binningScheme"));
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
@@ -295,7 +295,7 @@ where
|
||||
type Output = MinMaxAvgBins<NTY>;
|
||||
type Aggregator = EventValuesAggregator<NTY>;
|
||||
|
||||
fn aggregator(range: NanoRange) -> Self::Aggregator {
|
||||
fn aggregator(range: NanoRange, _bin_count: usize) -> Self::Aggregator {
|
||||
Self::Aggregator::new(range)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::agg::enp::{WaveEvents, XBinnedScalarEvents};
|
||||
use crate::agg::enp::{WaveEvents, XBinnedScalarEvents, XBinnedWaveEvents};
|
||||
use crate::agg::eventbatch::MinMaxAvgScalarEventBatch;
|
||||
use crate::agg::scalarbinbatch::MinMaxAvgScalarBinBatch;
|
||||
use crate::agg::streams::StreamItem;
|
||||
@@ -104,6 +104,13 @@ where
|
||||
const FRAME_TYPE_ID: u32 = 0x800 + NTY::SUB;
|
||||
}
|
||||
|
||||
impl<NTY> FrameType for Sitemty<XBinnedWaveEvents<NTY>>
|
||||
where
|
||||
NTY: SubFrId,
|
||||
{
|
||||
const FRAME_TYPE_ID: u32 = 0x800 + NTY::SUB;
|
||||
}
|
||||
|
||||
pub trait ProvidesFrameType {
|
||||
fn frame_type_id(&self) -> u32;
|
||||
}
|
||||
@@ -160,6 +167,15 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<NTY> Framable for Sitemty<XBinnedWaveEvents<NTY>>
|
||||
where
|
||||
NTY: NumOps + Serialize,
|
||||
{
|
||||
fn make_frame(&self) -> Result<BytesMut, Error> {
|
||||
make_frame(self)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_frame<FT>(item: &FT) -> Result<BytesMut, Error>
|
||||
where
|
||||
FT: FrameType + Serialize,
|
||||
|
||||
@@ -96,18 +96,14 @@ impl<E: Into<Error>> From<(E, OwnedWriteHalf)> for ConnErr {
|
||||
}
|
||||
}
|
||||
|
||||
// returns Pin<Box<dyn Stream<Item = Sitemty<<ENP as EventsNodeProcessor>::Output>> + Send>>
|
||||
|
||||
fn make_num_pipeline_stream_evs<NTY, END, EVS, ENP>(
|
||||
event_value_shape: EVS,
|
||||
events_node_proc: ENP,
|
||||
event_blobs: EventBlobsComplete,
|
||||
) -> Pin<Box<dyn Stream<Item = Box<dyn Framable>> + Send>>
|
||||
where
|
||||
NTY: NumOps + NumFromBytes<NTY, END> + 'static,
|
||||
END: Endianness + 'static,
|
||||
|
||||
// TODO
|
||||
// Can this work?
|
||||
EVS: EventValueShape<NTY, END> + EventValueFromBytes<NTY, END> + 'static,
|
||||
ENP: EventsNodeProcessor<Input = <EVS as EventValueFromBytes<NTY, END>>::Output>,
|
||||
Sitemty<<ENP as EventsNodeProcessor>::Output>: Framable + 'static,
|
||||
@@ -118,7 +114,7 @@ where
|
||||
Ok(item) => match item {
|
||||
StreamItem::DataItem(item) => match item {
|
||||
RangeCompletableItem::Data(item) => {
|
||||
let item = <ENP as EventsNodeProcessor>::process(item);
|
||||
let item = events_node_proc.process(item);
|
||||
Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))
|
||||
}
|
||||
RangeCompletableItem::RangeComplete => Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)),
|
||||
@@ -133,30 +129,44 @@ where
|
||||
}
|
||||
|
||||
macro_rules! pipe4 {
|
||||
($nty:ident, $end:ident, $evs:ident, $evsv:expr, $agg_kind:expr, $event_blobs:expr) => {
|
||||
($nty:ident, $end:ident, $shape:expr, $evs:ident, $evsv:expr, $agg_kind:expr, $event_blobs:expr) => {
|
||||
match $agg_kind {
|
||||
AggKind::DimXBins1 => make_num_pipeline_stream_evs::<
|
||||
$nty,
|
||||
$end,
|
||||
$evs<$nty>,
|
||||
<$evs<$nty> as EventValueShape<$nty, $end>>::NumXAggToSingleBin,
|
||||
//<$evs<$nty> as EventValueShape<$nty, $end>>::NumXAggToSingleBin,
|
||||
_,
|
||||
//Identity<$nty>,
|
||||
>($evsv, $event_blobs),
|
||||
>(
|
||||
$evsv,
|
||||
<$evs<$nty> as EventValueShape<$nty, $end>>::NumXAggToSingleBin::create($shape),
|
||||
$event_blobs,
|
||||
),
|
||||
AggKind::DimXBinsN(_) => make_num_pipeline_stream_evs::<
|
||||
$nty,
|
||||
$end,
|
||||
$evs<$nty>,
|
||||
// TODO must pass on the requested number of bins:
|
||||
<$evs<$nty> as EventValueShape<$nty, $end>>::NumXAggToNBins,
|
||||
//<$evs<$nty> as EventValueShape<$nty, $end>>::NumXAggToNBins,
|
||||
_,
|
||||
//WaveXBinner<$nty>,
|
||||
>($evsv, $event_blobs),
|
||||
>(
|
||||
$evsv,
|
||||
<$evs<$nty> as EventValueShape<$nty, $end>>::NumXAggToNBins::create($shape),
|
||||
$event_blobs,
|
||||
),
|
||||
AggKind::Plain => make_num_pipeline_stream_evs::<
|
||||
$nty,
|
||||
$end,
|
||||
$evs<$nty>,
|
||||
<$evs<$nty> as EventValueShape<$nty, $end>>::NumXAggPlain,
|
||||
//<$evs<$nty> as EventValueShape<$nty, $end>>::NumXAggPlain,
|
||||
_,
|
||||
//WaveXBinner<$nty>,
|
||||
>($evsv, $event_blobs),
|
||||
>(
|
||||
$evsv,
|
||||
<$evs<$nty> as EventValueShape<$nty, $end>>::NumXAggPlain::create($shape),
|
||||
$event_blobs,
|
||||
),
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -168,6 +178,7 @@ macro_rules! pipe3 {
|
||||
pipe4!(
|
||||
$nty,
|
||||
$end,
|
||||
$shape,
|
||||
EventValuesDim0Case,
|
||||
EventValuesDim0Case::<$nty>::new(),
|
||||
$agg_kind,
|
||||
@@ -181,6 +192,7 @@ macro_rules! pipe3 {
|
||||
pipe4!(
|
||||
$nty,
|
||||
$end,
|
||||
$shape,
|
||||
EventValuesDim1Case,
|
||||
EventValuesDim1Case::<$nty>::new(n),
|
||||
$agg_kind,
|
||||
@@ -258,6 +270,7 @@ async fn events_conn_handler_inner_try(
|
||||
return Err((Error::with_msg("json parse error"), netout))?;
|
||||
}
|
||||
};
|
||||
info!("---------------------------------------------------\nevq {:?}", evq);
|
||||
match dbconn::channel_exists(&evq.channel, &node_config).await {
|
||||
Ok(_) => (),
|
||||
Err(e) => return Err((e, netout))?,
|
||||
|
||||
Reference in New Issue
Block a user