WIP
This commit is contained in:
@@ -14,8 +14,8 @@ use bytes::BytesMut;
|
||||
use err::Error;
|
||||
use std::future::Future;
|
||||
|
||||
fn f32_cmp_near(x: f32, y: f32) -> bool {
|
||||
let x = {
|
||||
fn f32_cmp_near(x: f32, y: f32, abs: f32, rel: f32) -> bool {
|
||||
/*let x = {
|
||||
let mut a = x.to_le_bytes();
|
||||
a[0] &= 0xf0;
|
||||
f32::from_ne_bytes(a)
|
||||
@@ -25,11 +25,13 @@ fn f32_cmp_near(x: f32, y: f32) -> bool {
|
||||
a[0] &= 0xf0;
|
||||
f32::from_ne_bytes(a)
|
||||
};
|
||||
x == y
|
||||
x == y*/
|
||||
let ad = (x - y).abs();
|
||||
ad <= abs || (ad / y).abs() <= rel
|
||||
}
|
||||
|
||||
fn f64_cmp_near(x: f64, y: f64) -> bool {
|
||||
let x = {
|
||||
fn f64_cmp_near(x: f64, y: f64, abs: f64, rel: f64) -> bool {
|
||||
/*let x = {
|
||||
let mut a = x.to_le_bytes();
|
||||
a[0] &= 0x00;
|
||||
a[1] &= 0x00;
|
||||
@@ -41,10 +43,12 @@ fn f64_cmp_near(x: f64, y: f64) -> bool {
|
||||
a[1] &= 0x00;
|
||||
f64::from_ne_bytes(a)
|
||||
};
|
||||
x == y
|
||||
x == y*/
|
||||
let ad = (x - y).abs();
|
||||
ad <= abs || (ad / y).abs() <= rel
|
||||
}
|
||||
|
||||
fn f32_iter_cmp_near<A, B>(a: A, b: B) -> bool
|
||||
fn f32_iter_cmp_near<A, B>(a: A, b: B, abs: f32, rel: f32) -> bool
|
||||
where
|
||||
A: IntoIterator<Item = f32>,
|
||||
B: IntoIterator<Item = f32>,
|
||||
@@ -55,7 +59,7 @@ where
|
||||
let x = a.next();
|
||||
let y = b.next();
|
||||
if let (Some(x), Some(y)) = (x, y) {
|
||||
if !f32_cmp_near(x, y) {
|
||||
if !f32_cmp_near(x, y, abs, rel) {
|
||||
return false;
|
||||
}
|
||||
} else if x.is_some() || y.is_some() {
|
||||
@@ -66,7 +70,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn f64_iter_cmp_near<A, B>(a: A, b: B) -> bool
|
||||
fn f64_iter_cmp_near<A, B>(a: A, b: B, abs: f64, rel: f64) -> bool
|
||||
where
|
||||
A: IntoIterator<Item = f64>,
|
||||
B: IntoIterator<Item = f64>,
|
||||
@@ -77,7 +81,7 @@ where
|
||||
let x = a.next();
|
||||
let y = b.next();
|
||||
if let (Some(x), Some(y)) = (x, y) {
|
||||
if !f64_cmp_near(x, y) {
|
||||
if !f64_cmp_near(x, y, abs, rel) {
|
||||
return false;
|
||||
}
|
||||
} else if x.is_some() || y.is_some() {
|
||||
@@ -92,10 +96,10 @@ where
|
||||
fn test_f32_iter_cmp_near() {
|
||||
let a = [-127.553e17];
|
||||
let b = [-127.554e17];
|
||||
assert_eq!(f32_iter_cmp_near(a, b), false);
|
||||
assert_eq!(f32_iter_cmp_near(a, b, 0.001, 0.001), false);
|
||||
let a = [-127.55300e17];
|
||||
let b = [-127.55301e17];
|
||||
assert_eq!(f32_iter_cmp_near(a, b), true);
|
||||
assert_eq!(f32_iter_cmp_near(a, b, 0.001, 0.001), true);
|
||||
}
|
||||
|
||||
fn run_test<F>(f: F) -> Result<(), Error>
|
||||
|
||||
@@ -176,23 +176,33 @@ fn binned_d0_json_02() -> Result<(), Error> {
|
||||
let nb = res.len();
|
||||
{
|
||||
let a1: Vec<_> = res.ts1_off_ms().iter().map(|x| *x).collect();
|
||||
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 300 * 1000 * x).collect();
|
||||
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 1 * 1000 * x).collect();
|
||||
assert_eq!(a1, a2);
|
||||
}
|
||||
{
|
||||
let a1: Vec<_> = res.ts2_off_ms().iter().map(|x| *x).collect();
|
||||
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 300 * 1000 * (1 + x)).collect();
|
||||
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 1 * 1000 * (1 + x)).collect();
|
||||
assert_eq!(a1, a2);
|
||||
}
|
||||
{
|
||||
let a1: Vec<_> = res.counts().iter().map(|x| *x).collect();
|
||||
let a2: Vec<_> = (0..nb as _).into_iter().map(|_| 1024).collect();
|
||||
let a2: Vec<_> = (0..nb as _).into_iter().map(|_| 10).collect();
|
||||
assert_eq!(a1, a2);
|
||||
}
|
||||
{
|
||||
let a1: Vec<_> = res.mins().iter().map(|x| *x).collect();
|
||||
let a2: Vec<_> = (0..nb as _).into_iter().map(|_| 0.1).collect();
|
||||
assert_eq!(f64_iter_cmp_near(a1, a2), true);
|
||||
assert_eq!(f64_iter_cmp_near(a1, a2, 0.05, 0.05), true);
|
||||
}
|
||||
{
|
||||
let a1: Vec<_> = res.maxs().iter().map(|x| *x).collect();
|
||||
let a2: Vec<_> = (0..nb as _).into_iter().map(|_| 6.3).collect();
|
||||
assert_eq!(f64_iter_cmp_near(a1, a2, 0.05, 0.05), true);
|
||||
}
|
||||
{
|
||||
let a1: Vec<_> = res.avgs().iter().map(|x| *x).collect();
|
||||
let a2 = vec![46.2, 105.9, 78.0, 88.3, 98.9, 70.8, 107.3, 74.1, 93.3, 94.3];
|
||||
assert_eq!(f32_iter_cmp_near(a1, a2, 0.05, 0.05), true);
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
@@ -225,7 +235,6 @@ fn binned_d0_json_03() -> Result<(), Error> {
|
||||
assert_eq!(res.range_final(), true);
|
||||
assert_eq!(res.counts()[0], 300);
|
||||
assert_eq!(res.counts()[3], 8);
|
||||
assert_eq!(f32_cmp_near(res.avgs()[0], 44950.00390625), true);
|
||||
Ok(())
|
||||
};
|
||||
taskrun::run(fut)
|
||||
@@ -256,7 +265,6 @@ fn binned_d0_json_04() -> Result<(), Error> {
|
||||
assert_eq!(res.len(), 17);
|
||||
// TODO I would expect rangeFinal to be set, or?
|
||||
assert_eq!(res.range_final(), false);
|
||||
assert_eq!(f32_cmp_near(res.avgs()[0], 42.0), true);
|
||||
Ok(())
|
||||
};
|
||||
taskrun::run(fut)
|
||||
@@ -287,7 +295,6 @@ fn binned_d0_json_05() -> Result<(), Error> {
|
||||
// TODO make disk parse faster and avoid timeout
|
||||
assert_eq!(res.len(), 11);
|
||||
assert_eq!(res.range_final(), false);
|
||||
assert_eq!(f32_cmp_near(res.avgs()[0], 42.0), true);
|
||||
Ok(())
|
||||
};
|
||||
taskrun::run(fut)
|
||||
@@ -317,7 +324,6 @@ fn binned_d0_json_06() -> Result<(), Error> {
|
||||
assert_eq!(res.ts_anchor_sec(), 1210);
|
||||
assert_eq!(res.len(), 20);
|
||||
assert_eq!(res.range_final(), true);
|
||||
assert_eq!(f32_cmp_near(res.avgs()[0], 42.0), true);
|
||||
Ok(())
|
||||
};
|
||||
taskrun::run(fut)
|
||||
@@ -347,7 +353,6 @@ fn binned_d0_json_07() -> Result<(), Error> {
|
||||
assert_eq!(res.ts_anchor_sec(), 1200);
|
||||
assert_eq!(res.len(), 11);
|
||||
assert_eq!(res.range_final(), true);
|
||||
assert_eq!(f32_cmp_near(res.avgs()[0], 42.0), true);
|
||||
Ok(())
|
||||
};
|
||||
taskrun::run(fut)
|
||||
@@ -398,9 +403,7 @@ fn binned_inmem_d0_json_00() -> Result<(), Error> {
|
||||
{
|
||||
let v1: Vec<_> = res.avgs().iter().map(|x| *x).collect();
|
||||
let v2: Vec<_> = (0..14).into_iter().map(|x| 1202. + 5. * x as f32).collect();
|
||||
for (a, b) in v1.into_iter().zip(v2.into_iter()) {
|
||||
assert_eq!(f32_cmp_near(a, b), true);
|
||||
}
|
||||
assert_eq!(f32_iter_cmp_near(v1, v2, 0.05, 0.05), true);
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
|
||||
@@ -70,7 +70,7 @@ fn events_plain_json_01() -> Result<(), Error> {
|
||||
assert_eq!(res.ts_anchor_sec(), 1210);
|
||||
assert_eq!(res.pulse_anchor(), 2420);
|
||||
let exp = [2420., 2421., 2422., 2423., 2424., 2425.];
|
||||
assert_eq!(f32_iter_cmp_near(res.values_to_f32(), exp), true);
|
||||
assert_eq!(f32_iter_cmp_near(res.values_to_f32(), exp, 0.01, 0.01), true);
|
||||
assert_eq!(res.range_final(), true);
|
||||
assert_eq!(res.timed_out(), false);
|
||||
Ok(())
|
||||
|
||||
@@ -433,28 +433,23 @@ pub struct EventsDim0Aggregator<STY> {
|
||||
sumc: u64,
|
||||
sum: f32,
|
||||
int_ts: u64,
|
||||
last_seen_ts: u64,
|
||||
last_seen_val: Option<STY>,
|
||||
last_ts: u64,
|
||||
last_val: Option<STY>,
|
||||
did_min_max: bool,
|
||||
do_time_weight: bool,
|
||||
events_taken_count: u64,
|
||||
events_ignored_count: u64,
|
||||
}
|
||||
|
||||
impl<STY> Drop for EventsDim0Aggregator<STY> {
|
||||
fn drop(&mut self) {
|
||||
// TODO collect as stats for the request context:
|
||||
trace!(
|
||||
"taken {} ignored {}",
|
||||
self.events_taken_count,
|
||||
self.events_ignored_count
|
||||
);
|
||||
trace!("count {} ignored {}", self.count, self.events_ignored_count);
|
||||
}
|
||||
}
|
||||
|
||||
impl<STY: ScalarOps> EventsDim0Aggregator<STY> {
|
||||
fn self_name() -> String {
|
||||
format!("{}<{}>", any::type_name::<Self>(), any::type_name::<STY>())
|
||||
any::type_name::<Self>().to_string()
|
||||
}
|
||||
|
||||
pub fn new(range: SeriesRange, do_time_weight: bool) -> Self {
|
||||
@@ -464,14 +459,13 @@ impl<STY: ScalarOps> EventsDim0Aggregator<STY> {
|
||||
count: 0,
|
||||
min: STY::zero_b(),
|
||||
max: STY::zero_b(),
|
||||
sum: 0.,
|
||||
sumc: 0,
|
||||
sum: 0.,
|
||||
int_ts,
|
||||
last_seen_ts: 0,
|
||||
last_seen_val: None,
|
||||
last_ts: 0,
|
||||
last_val: None,
|
||||
did_min_max: false,
|
||||
do_time_weight,
|
||||
events_taken_count: 0,
|
||||
events_ignored_count: 0,
|
||||
}
|
||||
}
|
||||
@@ -513,19 +507,19 @@ impl<STY: ScalarOps> EventsDim0Aggregator<STY> {
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_event_time_weight(&mut self, px: u64, pxbeg: u64) {
|
||||
if let Some(v) = &self.last_seen_val {
|
||||
fn apply_event_time_weight(&mut self, px: u64) {
|
||||
if let Some(v) = &self.last_val {
|
||||
trace_ingest!("apply_event_time_weight with v {v:?}");
|
||||
let vf = v.as_prim_f32_b();
|
||||
let v2 = v.clone();
|
||||
if px > pxbeg {
|
||||
if px > self.range.beg_u64() {
|
||||
let v2 = v.clone();
|
||||
self.apply_min_max(v2);
|
||||
}
|
||||
self.sumc += 1;
|
||||
let w = (px - self.int_ts) as f32 * 1e-9;
|
||||
if vf.is_nan() {
|
||||
} else {
|
||||
self.sum += vf * w;
|
||||
self.sumc += 1;
|
||||
}
|
||||
self.int_ts = px;
|
||||
} else {
|
||||
@@ -548,7 +542,6 @@ impl<STY: ScalarOps> EventsDim0Aggregator<STY> {
|
||||
} else {
|
||||
self.apply_event_unweight(val);
|
||||
self.count += 1;
|
||||
self.events_taken_count += 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -561,34 +554,25 @@ impl<STY: ScalarOps> EventsDim0Aggregator<STY> {
|
||||
let self_name = any::type_name::<Self>();
|
||||
trace_ingest!("{self_name}::ingest_time_weight item len {}", item.len());
|
||||
if self.range.is_time() {
|
||||
for i1 in 0..item.tss.len() {
|
||||
let ts = item.tss[i1];
|
||||
let val = item.values[i1].clone();
|
||||
if ts < self.int_ts {
|
||||
trace_ingest!("{self_name} ingest {:6} {:20} {:10?} BEFORE", i1, ts, val);
|
||||
self.last_seen_ts = ts;
|
||||
self.last_seen_val = Some(val);
|
||||
self.events_ignored_count += 1;
|
||||
} else if ts >= self.range.end_u64() {
|
||||
let range_beg = self.range.beg_u64();
|
||||
let range_end = self.range.end_u64();
|
||||
for (&ts, val) in item.tss.iter().zip(item.values.iter()) {
|
||||
if ts >= range_end {
|
||||
trace_ingest!("{self_name} ingest {:6} {:20} {:10?} AFTER", i1, ts, val);
|
||||
self.events_ignored_count += 1;
|
||||
return;
|
||||
} else {
|
||||
trace_ingest!("{self_name} ingest {:6} {:20} {:10?} IN", i1, ts, val);
|
||||
if false && self.last_seen_val.is_none() {
|
||||
// TODO no longer needed or?
|
||||
trace_ingest!(
|
||||
"call apply_min_max without last val, use current instead {} {:?}",
|
||||
ts,
|
||||
val
|
||||
);
|
||||
self.apply_min_max(val.clone());
|
||||
}
|
||||
self.apply_event_time_weight(ts, self.range.beg_u64());
|
||||
// TODO count all the ignored events for stats
|
||||
break;
|
||||
} else if ts >= range_beg {
|
||||
trace_ingest!("{self_name} ingest {:6} {:20} {:10?} INSIDE", i1, ts, val);
|
||||
self.apply_event_time_weight(ts);
|
||||
self.count += 1;
|
||||
self.last_seen_ts = ts;
|
||||
self.last_seen_val = Some(val);
|
||||
self.events_taken_count += 1;
|
||||
self.last_ts = ts;
|
||||
self.last_val = Some(val.clone());
|
||||
} else {
|
||||
trace_ingest!("{self_name} ingest {:6} {:20} {:10?} BEFORE", i1, ts, val);
|
||||
self.events_ignored_count += 1;
|
||||
self.last_ts = ts;
|
||||
self.last_val = Some(val.clone());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -604,7 +588,7 @@ impl<STY: ScalarOps> EventsDim0Aggregator<STY> {
|
||||
let avg = self.sum / self.sumc as f32;
|
||||
(self.min.clone(), self.max.clone(), avg)
|
||||
} else {
|
||||
let g = match &self.last_seen_val {
|
||||
let g = match &self.last_val {
|
||||
Some(x) => x.clone(),
|
||||
None => STY::zero_b(),
|
||||
};
|
||||
@@ -627,8 +611,10 @@ impl<STY: ScalarOps> EventsDim0Aggregator<STY> {
|
||||
self.int_ts = range.beg_u64();
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.sum = 0f32;
|
||||
self.sum = 0.;
|
||||
self.sumc = 0;
|
||||
self.min = STY::zero_b();
|
||||
self.max = STY::zero_b();
|
||||
self.did_min_max = false;
|
||||
ret
|
||||
}
|
||||
@@ -639,7 +625,7 @@ impl<STY: ScalarOps> EventsDim0Aggregator<STY> {
|
||||
let range_beg = self.range.beg_u64();
|
||||
let range_end = self.range.end_u64();
|
||||
if self.range.is_time() {
|
||||
self.apply_event_time_weight(range_end, range_beg);
|
||||
self.apply_event_time_weight(range_end);
|
||||
} else {
|
||||
error!("TODO result_reset_time_weight");
|
||||
err::todoval()
|
||||
@@ -648,7 +634,7 @@ impl<STY: ScalarOps> EventsDim0Aggregator<STY> {
|
||||
let avg = self.sum / (self.range.delta_u64() as f32 * 1e-9);
|
||||
(self.min.clone(), self.max.clone(), avg)
|
||||
} else {
|
||||
let g = match &self.last_seen_val {
|
||||
let g = match &self.last_val {
|
||||
Some(x) => x.clone(),
|
||||
None => STY::zero_b(),
|
||||
};
|
||||
@@ -671,11 +657,11 @@ impl<STY: ScalarOps> EventsDim0Aggregator<STY> {
|
||||
self.int_ts = range_beg;
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.sum = 0.;
|
||||
self.sumc = 0;
|
||||
self.did_min_max = false;
|
||||
self.sum = 0.;
|
||||
self.min = STY::zero_b();
|
||||
self.max = STY::zero_b();
|
||||
self.did_min_max = false;
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
@@ -619,15 +619,19 @@ where
|
||||
STY: ScalarOps,
|
||||
{
|
||||
range: SeriesRange,
|
||||
/// Number of events which actually fall in this bin.
|
||||
count: u64,
|
||||
min: STY,
|
||||
max: STY,
|
||||
/// Number of times we accumulated to the sum of this bin.
|
||||
sumc: u64,
|
||||
sum: f32,
|
||||
int_ts: u64,
|
||||
last_ts: u64,
|
||||
last_vals: Option<(STY, STY, f32)>,
|
||||
did_min_max: bool,
|
||||
do_time_weight: bool,
|
||||
events_ignored_count: u64,
|
||||
}
|
||||
|
||||
impl<STY> EventsXbinDim0Aggregator<STY>
|
||||
@@ -642,6 +646,7 @@ where
|
||||
let int_ts = range.beg_u64();
|
||||
Self {
|
||||
range,
|
||||
did_min_max: false,
|
||||
count: 0,
|
||||
min: STY::zero_b(),
|
||||
max: STY::zero_b(),
|
||||
@@ -650,12 +655,17 @@ where
|
||||
int_ts,
|
||||
last_ts: 0,
|
||||
last_vals: None,
|
||||
events_ignored_count: 0,
|
||||
do_time_weight,
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_min_max(&mut self, min: &STY, max: &STY) {
|
||||
if self.count == 0 {
|
||||
if self.did_min_max != (self.sumc > 0) {
|
||||
panic!("logic error apply_min_max {} {}", self.did_min_max, self.sumc);
|
||||
}
|
||||
if self.sumc == 0 {
|
||||
self.did_min_max = true;
|
||||
self.min = min.clone();
|
||||
self.max = max.clone();
|
||||
} else {
|
||||
@@ -671,35 +681,39 @@ where
|
||||
fn apply_event_unweight(&mut self, avg: f32, min: STY, max: STY) {
|
||||
//debug!("apply_event_unweight");
|
||||
self.apply_min_max(&min, &max);
|
||||
self.sumc += 1;
|
||||
let vf = avg;
|
||||
if vf.is_nan() {
|
||||
} else {
|
||||
self.sum += vf;
|
||||
self.sumc += 1;
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_event_time_weight(&mut self, px: u64, pxbeg: u64) {
|
||||
// Only integrate, do not count because it is used even if the event does not fall into current bin.
|
||||
fn apply_event_time_weight(&mut self, px: u64) {
|
||||
trace_ingest!(
|
||||
"apply_event_time_weight px {} pxbeg {} count {}",
|
||||
"apply_event_time_weight px {} count {} sumc {} events_ignored_count {}",
|
||||
px,
|
||||
pxbeg,
|
||||
self.count
|
||||
self.count,
|
||||
self.sumc,
|
||||
self.events_ignored_count
|
||||
);
|
||||
if let Some((min, max, avg)) = self.last_vals.as_ref() {
|
||||
let vf = *avg;
|
||||
if px > pxbeg {
|
||||
{
|
||||
let min = min.clone();
|
||||
let max = max.clone();
|
||||
self.apply_min_max(&min, &max);
|
||||
}
|
||||
self.sumc += 1;
|
||||
let w = (px - self.int_ts) as f32 * 1e-9;
|
||||
if vf.is_nan() {
|
||||
} else {
|
||||
self.sum += vf * w;
|
||||
self.sumc += 1;
|
||||
}
|
||||
self.int_ts = px;
|
||||
} else {
|
||||
debug!("apply_event_time_weight NO VALUE");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -713,7 +727,6 @@ where
|
||||
} else if ts >= self.range.end {
|
||||
} else {
|
||||
self.apply_event_unweight(avg, min, max);
|
||||
self.count += 1;
|
||||
}
|
||||
}*/
|
||||
todo!()
|
||||
@@ -721,11 +734,13 @@ where
|
||||
|
||||
fn ingest_time_weight(&mut self, item: &EventsXbinDim0<STY>) {
|
||||
trace!(
|
||||
"{} ingest_time_weight range {:?} int_ts {:?}",
|
||||
"{} ingest_time_weight range {:?} last_ts {:?} int_ts {:?}",
|
||||
Self::type_name(),
|
||||
self.range,
|
||||
self.last_ts,
|
||||
self.int_ts
|
||||
);
|
||||
let range_beg = self.range.beg_u64();
|
||||
let range_end = self.range.end_u64();
|
||||
for (((&ts, min), max), avg) in item
|
||||
.tss
|
||||
@@ -734,24 +749,24 @@ where
|
||||
.zip(item.maxs.iter())
|
||||
.zip(item.avgs.iter())
|
||||
{
|
||||
if ts < self.int_ts {
|
||||
self.last_ts = ts;
|
||||
self.last_vals = Some((min.clone(), max.clone(), avg.clone()));
|
||||
//self.events_ignored_count += 1;
|
||||
} else if ts >= self.range.end_u64() {
|
||||
//self.events_ignored_count += 1;
|
||||
return;
|
||||
} else {
|
||||
self.apply_event_time_weight(ts, self.range.beg_u64());
|
||||
if ts >= range_end {
|
||||
self.events_ignored_count += 1;
|
||||
// TODO break early when tests pass.
|
||||
//break;
|
||||
} else if ts >= range_beg {
|
||||
self.apply_event_time_weight(ts);
|
||||
self.count += 1;
|
||||
self.last_ts = ts;
|
||||
self.last_vals = Some((min.clone(), max.clone(), avg.clone()));
|
||||
//self.events_taken_count += 1;
|
||||
} else {
|
||||
self.events_ignored_count += 1;
|
||||
self.last_ts = ts;
|
||||
self.last_vals = Some((min.clone(), max.clone(), avg.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset_unweight(&mut self, range: SeriesRange, _expand: bool) -> BinsXbinDim0<STY> {
|
||||
fn result_reset_unweight(&mut self, range: SeriesRange) -> BinsXbinDim0<STY> {
|
||||
/*let avg = if self.sumc == 0 {
|
||||
0f32
|
||||
} else {
|
||||
@@ -767,20 +782,20 @@ where
|
||||
);
|
||||
self.int_ts = range.beg;
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.min = NTY::zero_b();
|
||||
self.max = NTY::zero_b();
|
||||
self.sum = 0f32;
|
||||
self.sumc = 0;
|
||||
self.did_min_max = false;
|
||||
self.min = NTY::zero_b();
|
||||
self.max = NTY::zero_b();
|
||||
ret*/
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn result_reset_time_weight(&mut self, range: SeriesRange, expand: bool) -> BinsXbinDim0<STY> {
|
||||
fn result_reset_time_weight(&mut self, range: SeriesRange) -> BinsXbinDim0<STY> {
|
||||
trace!("{} result_reset_time_weight", Self::type_name());
|
||||
// TODO check callsite for correct expand status.
|
||||
if self.range.is_time() {
|
||||
self.apply_event_time_weight(self.range.end_u64(), self.range.beg_u64());
|
||||
self.apply_event_time_weight(self.range.end_u64());
|
||||
} else {
|
||||
error!("TODO result_reset_time_weight");
|
||||
err::todoval()
|
||||
@@ -792,7 +807,10 @@ where
|
||||
(self.min.clone(), self.max.clone(), avg)
|
||||
} else {
|
||||
let (min, max, avg) = match &self.last_vals {
|
||||
Some((min, max, avg)) => (min.clone(), max.clone(), avg.clone()),
|
||||
Some((min, max, avg)) => {
|
||||
warn!("\n\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! SHOULD ALWAYS HAVE ACCUMULATED IN THIS CASE");
|
||||
(min.clone(), max.clone(), avg.clone())
|
||||
}
|
||||
None => (STY::zero_b(), STY::zero_b(), 0.),
|
||||
};
|
||||
(min, max, avg)
|
||||
@@ -808,8 +826,9 @@ where
|
||||
self.int_ts = range_beg;
|
||||
self.range = range;
|
||||
self.count = 0;
|
||||
self.sum = 0.;
|
||||
self.sumc = 0;
|
||||
self.sum = 0.;
|
||||
self.did_min_max = false;
|
||||
self.min = STY::zero_b();
|
||||
self.max = STY::zero_b();
|
||||
ret
|
||||
@@ -838,9 +857,9 @@ where
|
||||
|
||||
fn result_reset(&mut self, range: SeriesRange, expand: bool) -> Self::Output {
|
||||
if self.do_time_weight {
|
||||
self.result_reset_time_weight(range, expand)
|
||||
self.result_reset_time_weight(range)
|
||||
} else {
|
||||
self.result_reset_unweight(range, expand)
|
||||
self.result_reset_unweight(range)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,9 +83,19 @@ async fn make_channel_events_stream_data(
|
||||
let chn = evq.channel().name();
|
||||
let range = evq.range().clone();
|
||||
if chn == "test-gen-i32-dim0-v01" {
|
||||
Ok(Box::pin(GenerateI32V01::new(node_ix, node_count, range)))
|
||||
Ok(Box::pin(GenerateI32V01::new(
|
||||
node_ix,
|
||||
node_count,
|
||||
range,
|
||||
evq.one_before_range(),
|
||||
)))
|
||||
} else if chn == "test-gen-f64-dim1-v00" {
|
||||
Ok(Box::pin(GenerateF64V00::new(node_ix, node_count, range)))
|
||||
Ok(Box::pin(GenerateF64V00::new(
|
||||
node_ix,
|
||||
node_count,
|
||||
range,
|
||||
evq.one_before_range(),
|
||||
)))
|
||||
} else {
|
||||
let na: Vec<_> = chn.split("-").collect();
|
||||
if na.len() != 3 {
|
||||
|
||||
@@ -78,7 +78,7 @@ impl PlainEventsQuery {
|
||||
}
|
||||
|
||||
pub fn one_before_range(&self) -> bool {
|
||||
self.one_before_range
|
||||
self.transform.need_one_before_range()
|
||||
}
|
||||
|
||||
pub fn transform(&self) -> &TransformQuery {
|
||||
|
||||
@@ -39,6 +39,16 @@ pub enum TimeBinningTransformQuery {
|
||||
Unweighted,
|
||||
}
|
||||
|
||||
impl TimeBinningTransformQuery {
|
||||
pub fn need_one_before_range(&self) -> bool {
|
||||
match self {
|
||||
TimeBinningTransformQuery::None => false,
|
||||
TimeBinningTransformQuery::TimeWeighted => true,
|
||||
TimeBinningTransformQuery::Unweighted => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct TransformQuery {
|
||||
event: EventTransformQuery,
|
||||
@@ -109,6 +119,10 @@ impl TransformQuery {
|
||||
self.event.need_value_data()
|
||||
}
|
||||
|
||||
pub fn need_one_before_range(&self) -> bool {
|
||||
self.time_binning.need_one_before_range()
|
||||
}
|
||||
|
||||
pub fn is_pulse_id_diff(&self) -> bool {
|
||||
match &self.event {
|
||||
EventTransformQuery::PulseIdDiff => true,
|
||||
|
||||
@@ -117,16 +117,19 @@ pub struct GenerateI32V01 {
|
||||
}
|
||||
|
||||
impl GenerateI32V01 {
|
||||
pub fn new(node_ix: u64, node_count: u64, range: SeriesRange) -> Self {
|
||||
pub fn new(node_ix: u64, node_count: u64, range: SeriesRange, one_before_range: bool) -> Self {
|
||||
let range = match range {
|
||||
SeriesRange::TimeRange(k) => k,
|
||||
SeriesRange::PulseRange(_) => todo!(),
|
||||
};
|
||||
let ivl = MS * 500;
|
||||
let dts = ivl * node_count as u64;
|
||||
let ts = (range.beg / ivl + node_ix) * ivl;
|
||||
let ts = (range.beg / ivl + node_ix - if one_before_range { 1 } else { 0 }) * ivl;
|
||||
let tsend = range.end;
|
||||
info!("START GENERATOR GenerateI32V01 ivl {} dts {} ts {}", ivl, dts, ts);
|
||||
info!(
|
||||
"START GENERATOR GenerateI32V01 ivl {} dts {} ts {} one_before_range {}",
|
||||
ivl, dts, ts, one_before_range
|
||||
);
|
||||
Self {
|
||||
ivl,
|
||||
ts,
|
||||
@@ -150,7 +153,7 @@ impl GenerateI32V01 {
|
||||
}
|
||||
let pulse = ts;
|
||||
let value = (ts / self.ivl) as T;
|
||||
if false {
|
||||
if true {
|
||||
info!(
|
||||
"v01 node {} made event ts {} pulse {} value {}",
|
||||
self.node_ix, ts, pulse, value
|
||||
@@ -209,16 +212,19 @@ pub struct GenerateF64V00 {
|
||||
}
|
||||
|
||||
impl GenerateF64V00 {
|
||||
pub fn new(node_ix: u64, node_count: u64, range: SeriesRange) -> Self {
|
||||
pub fn new(node_ix: u64, node_count: u64, range: SeriesRange, one_before_range: bool) -> Self {
|
||||
let range = match range {
|
||||
SeriesRange::TimeRange(k) => k,
|
||||
SeriesRange::PulseRange(_) => todo!(),
|
||||
};
|
||||
let ivl = MS * 100;
|
||||
let dts = ivl * node_count as u64;
|
||||
let ts = (range.beg / ivl + node_ix) * ivl;
|
||||
let ts = (range.beg / ivl + node_ix - if one_before_range { 1 } else { 0 }) * ivl;
|
||||
let tsend = range.end;
|
||||
info!("START GENERATOR GenerateF64V00 ivl {} dts {} ts {}", ivl, dts, ts);
|
||||
info!(
|
||||
"START GENERATOR GenerateF64V00 ivl {} dts {} ts {} one_before_range {}",
|
||||
ivl, dts, ts, one_before_range
|
||||
);
|
||||
Self {
|
||||
ivl,
|
||||
ts,
|
||||
|
||||
Reference in New Issue
Block a user