Fix warnings
This commit is contained in:
231
disk/src/agg.rs
231
disk/src/agg.rs
@@ -1,12 +1,10 @@
|
||||
use crate::merge::MergeDim1F32Stream;
|
||||
use crate::EventFull;
|
||||
use err::Error;
|
||||
use futures_core::Stream;
|
||||
use futures_util::{future::ready, pin_mut, StreamExt};
|
||||
use futures_util::StreamExt;
|
||||
use netpod::BinSpecDimT;
|
||||
use netpod::{timeunits::*, Channel, ChannelConfig, Node, ScalarType, Shape};
|
||||
use netpod::{Node, ScalarType};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
#[allow(unused_imports)]
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
@@ -42,7 +40,7 @@ impl AggregatableXdim1Bin for () {
|
||||
impl AggregatableTdim for () {
|
||||
type Output = ();
|
||||
type Aggregator = ();
|
||||
fn aggregator_new(&self, ts1: u64, ts2: u64) -> Self::Aggregator {
|
||||
fn aggregator_new(&self, _ts1: u64, _ts2: u64) -> Self::Aggregator {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -50,19 +48,19 @@ impl AggregatorTdim for () {
|
||||
type InputValue = ();
|
||||
type OutputValue = ();
|
||||
|
||||
fn ends_before(&self, inp: &Self::InputValue) -> bool {
|
||||
fn ends_before(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ends_after(&self, inp: &Self::InputValue) -> bool {
|
||||
fn ends_after(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn starts_after(&self, inp: &Self::InputValue) -> bool {
|
||||
fn starts_after(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ingest(&mut self, v: &Self::InputValue) {
|
||||
fn ingest(&mut self, _v: &Self::InputValue) {
|
||||
todo!()
|
||||
}
|
||||
fn result(self) -> Self::OutputValue {
|
||||
@@ -305,6 +303,7 @@ impl AggregatorTdim for MinMaxAvgScalarEventBatchAggregator {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct MinMaxAvgScalarBinBatch {
|
||||
ts1s: Vec<u64>,
|
||||
ts2s: Vec<u64>,
|
||||
@@ -330,7 +329,7 @@ impl AggregatableXdim1Bin for MinMaxAvgScalarBinBatch {
|
||||
impl AggregatableTdim for MinMaxAvgScalarBinBatch {
|
||||
type Output = MinMaxAvgScalarBinSingle;
|
||||
type Aggregator = MinMaxAvgScalarBinBatchAggregator;
|
||||
fn aggregator_new(&self, ts1: u64, ts2: u64) -> Self::Aggregator {
|
||||
fn aggregator_new(&self, _ts1: u64, _ts2: u64) -> Self::Aggregator {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -341,19 +340,19 @@ impl AggregatorTdim for MinMaxAvgScalarBinBatchAggregator {
|
||||
type InputValue = MinMaxAvgScalarBinBatch;
|
||||
type OutputValue = MinMaxAvgScalarBinSingle;
|
||||
|
||||
fn ends_before(&self, inp: &Self::InputValue) -> bool {
|
||||
fn ends_before(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ends_after(&self, inp: &Self::InputValue) -> bool {
|
||||
fn ends_after(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn starts_after(&self, inp: &Self::InputValue) -> bool {
|
||||
fn starts_after(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ingest(&mut self, v: &Self::InputValue) {
|
||||
fn ingest(&mut self, _v: &Self::InputValue) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
@@ -384,7 +383,7 @@ impl std::fmt::Debug for MinMaxAvgScalarBinSingle {
|
||||
impl AggregatableTdim for MinMaxAvgScalarBinSingle {
|
||||
type Output = MinMaxAvgScalarBinSingle;
|
||||
type Aggregator = MinMaxAvgScalarBinSingleAggregator;
|
||||
fn aggregator_new(&self, ts1: u64, ts2: u64) -> Self::Aggregator {
|
||||
fn aggregator_new(&self, _ts1: u64, _ts2: u64) -> Self::Aggregator {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -402,19 +401,19 @@ impl AggregatorTdim for MinMaxAvgScalarBinSingleAggregator {
|
||||
type InputValue = MinMaxAvgScalarBinSingle;
|
||||
type OutputValue = MinMaxAvgScalarBinSingle;
|
||||
|
||||
fn ends_before(&self, inp: &Self::InputValue) -> bool {
|
||||
fn ends_before(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ends_after(&self, inp: &Self::InputValue) -> bool {
|
||||
fn ends_after(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn starts_after(&self, inp: &Self::InputValue) -> bool {
|
||||
fn starts_after(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ingest(&mut self, v: &Self::InputValue) {
|
||||
fn ingest(&mut self, _v: &Self::InputValue) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
@@ -455,7 +454,9 @@ where
|
||||
// do the conversion
|
||||
|
||||
// TODO only a scalar!
|
||||
todo!();
|
||||
if true {
|
||||
todo!();
|
||||
}
|
||||
|
||||
let n1 = decomp.len();
|
||||
assert!(n1 % ty.bytes() as usize == 0);
|
||||
@@ -479,10 +480,10 @@ where
|
||||
ret.tss.push(k.tss[i1]);
|
||||
ret.values.push(j);
|
||||
}
|
||||
_ => todo!(),
|
||||
_ => err::todoval(),
|
||||
}
|
||||
}
|
||||
Ready(Some(Ok(todo!())))
|
||||
Ready(Some(Ok(err::todoval())))
|
||||
}
|
||||
Ready(Some(Err(e))) => Ready(Some(Err(e))),
|
||||
Ready(None) => Ready(None),
|
||||
@@ -744,193 +745,9 @@ pub fn make_test_node(id: u32) -> Node {
|
||||
id,
|
||||
host: "localhost".into(),
|
||||
port: 8800 + id as u16,
|
||||
port_raw: 8800 + id as u16 + 100,
|
||||
data_base_path: format!("../tmpdata/node{:02}", id).into(),
|
||||
split: id,
|
||||
ksprefix: "ks".into(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn agg_x_dim_0() {
|
||||
taskrun::run(async {
|
||||
agg_x_dim_0_inner().await;
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
async fn agg_x_dim_0_inner() {
|
||||
let node = make_test_node(0);
|
||||
let node = Arc::new(node);
|
||||
let query = netpod::AggQuerySingleChannel {
|
||||
channel_config: ChannelConfig {
|
||||
channel: Channel {
|
||||
backend: "sf-databuffer".into(),
|
||||
name: "S10BC01-DBAM070:EOM1_T1".into(),
|
||||
},
|
||||
keyspace: 2,
|
||||
time_bin_size: DAY,
|
||||
array: false,
|
||||
shape: Shape::Scalar,
|
||||
scalar_type: ScalarType::F64,
|
||||
big_endian: true,
|
||||
compression: true,
|
||||
},
|
||||
timebin: 18723,
|
||||
tb_file_count: 1,
|
||||
buffer_size: 1024 * 4,
|
||||
};
|
||||
let bin_count = 20;
|
||||
let ts1 = query.timebin as u64 * query.channel_config.time_bin_size;
|
||||
let ts2 = ts1 + HOUR * 24;
|
||||
let fut1 = crate::EventBlobsComplete::new(&query, query.channel_config.clone(), node)
|
||||
.into_dim_1_f32_stream()
|
||||
//.take(1000)
|
||||
.map(|q| {
|
||||
if let Ok(ref k) = q {
|
||||
//info!("vals: {:?}", k);
|
||||
}
|
||||
q
|
||||
})
|
||||
.into_binned_x_bins_1()
|
||||
.map(|k| {
|
||||
//info!("after X binning {:?}", k.as_ref().unwrap());
|
||||
k
|
||||
})
|
||||
.into_binned_t(BinSpecDimT::over_range(bin_count, ts1, ts2))
|
||||
.map(|k| {
|
||||
info!("after T binning {:?}", k.as_ref().unwrap());
|
||||
k
|
||||
})
|
||||
.for_each(|k| ready(()));
|
||||
fut1.await;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn agg_x_dim_1() {
|
||||
taskrun::run(async {
|
||||
agg_x_dim_1_inner().await;
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
async fn agg_x_dim_1_inner() {
|
||||
// sf-databuffer
|
||||
// /data/sf-databuffer/daq_swissfel/daq_swissfel_3/byTime/S10BC01-DBAM070\:BAM_CH1_NORM/*
|
||||
// S10BC01-DBAM070:BAM_CH1_NORM
|
||||
let node = make_test_node(0);
|
||||
let node = Arc::new(node);
|
||||
let query = netpod::AggQuerySingleChannel {
|
||||
channel_config: ChannelConfig {
|
||||
channel: Channel {
|
||||
backend: "ks".into(),
|
||||
name: "wave1".into(),
|
||||
},
|
||||
keyspace: 3,
|
||||
time_bin_size: DAY,
|
||||
array: true,
|
||||
shape: Shape::Wave(1024),
|
||||
scalar_type: ScalarType::F64,
|
||||
big_endian: true,
|
||||
compression: true,
|
||||
},
|
||||
timebin: 0,
|
||||
tb_file_count: 1,
|
||||
buffer_size: 17,
|
||||
};
|
||||
let bin_count = 10;
|
||||
let ts1 = query.timebin as u64 * query.channel_config.time_bin_size;
|
||||
let ts2 = ts1 + HOUR * 24;
|
||||
let fut1 = crate::EventBlobsComplete::new(&query, query.channel_config.clone(), node)
|
||||
.into_dim_1_f32_stream()
|
||||
//.take(1000)
|
||||
.map(|q| {
|
||||
if let Ok(ref k) = q {
|
||||
//info!("vals: {:?}", k);
|
||||
}
|
||||
q
|
||||
})
|
||||
.into_binned_x_bins_1()
|
||||
.map(|k| {
|
||||
//info!("after X binning {:?}", k.as_ref().unwrap());
|
||||
k
|
||||
})
|
||||
.into_binned_t(BinSpecDimT::over_range(bin_count, ts1, ts2))
|
||||
.map(|k| {
|
||||
info!("after T binning {:?}", k.as_ref().unwrap());
|
||||
k
|
||||
})
|
||||
.for_each(|k| ready(()));
|
||||
fut1.await;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_0() {
|
||||
taskrun::run(async {
|
||||
merge_0_inner().await;
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
async fn merge_0_inner() {
|
||||
let query = netpod::AggQuerySingleChannel {
|
||||
channel_config: ChannelConfig {
|
||||
channel: Channel {
|
||||
backend: "ks".into(),
|
||||
name: "wave1".into(),
|
||||
},
|
||||
keyspace: 3,
|
||||
time_bin_size: DAY,
|
||||
array: true,
|
||||
shape: Shape::Wave(17),
|
||||
scalar_type: ScalarType::F64,
|
||||
big_endian: true,
|
||||
compression: true,
|
||||
},
|
||||
timebin: 0,
|
||||
tb_file_count: 1,
|
||||
buffer_size: 1024 * 8,
|
||||
};
|
||||
let streams = (0..13)
|
||||
.into_iter()
|
||||
.map(|k| make_test_node(k))
|
||||
.map(|node| {
|
||||
let node = Arc::new(node);
|
||||
crate::EventBlobsComplete::new(&query, query.channel_config.clone(), node).into_dim_1_f32_stream()
|
||||
})
|
||||
.collect();
|
||||
MergeDim1F32Stream::new(streams)
|
||||
.map(|k| {
|
||||
//info!("NEXT MERGED ITEM ts {:?}", k.as_ref().unwrap().tss);
|
||||
})
|
||||
.fold(0, |k, q| ready(0))
|
||||
.await;
|
||||
}
|
||||
|
||||
pub fn tmp_some_older_things() {
|
||||
let vals = ValuesDim1 {
|
||||
tss: vec![0, 1, 2, 3],
|
||||
values: vec![vec![0., 0., 0.], vec![1., 1., 1.], vec![2., 2., 2.], vec![3., 3., 3.]],
|
||||
};
|
||||
// I want to distinguish already in the outer part between dim-0 and dim-1 and generate
|
||||
// separate code for these cases...
|
||||
// That means that also the reading chain itself needs to be typed on that.
|
||||
// Need to supply some event-payload converter type which has that type as Output type.
|
||||
let vals2 = vals.into_agg();
|
||||
// Now the T-binning:
|
||||
|
||||
/*
|
||||
T-aggregator must be able to produce empty-values of correct type even if we never get
|
||||
a single value of input data.
|
||||
Therefore, it needs the bin range definition.
|
||||
How do I want to drive the system?
|
||||
If I write the T-binner as a Stream, then I also need to pass it the input!
|
||||
Meaning, I need to pass the Stream which produces the actual numbers from disk.
|
||||
|
||||
readchannel() -> Stream of timestamped byte blobs
|
||||
.to_f32() -> Stream ? indirection to branch on the underlying shape
|
||||
.agg_x_bins_1() -> Stream ? can I keep it at the single indirection on the top level?
|
||||
*/
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user