WIP checks but has many todo panics
This commit is contained in:
285
disk/src/agg.rs
285
disk/src/agg.rs
@@ -4,11 +4,13 @@ Aggregation and binning support.
|
||||
|
||||
use super::eventchunker::EventFull;
|
||||
use crate::agg::eventbatch::MinMaxAvgScalarEventBatch;
|
||||
use crate::agg::scalarbinbatch::MinMaxAvgScalarBinBatch;
|
||||
use crate::eventchunker::EventChunkerItem;
|
||||
use err::Error;
|
||||
use futures_core::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use netpod::ScalarType;
|
||||
use netpod::{EventDataReadStats, NanoRange};
|
||||
use netpod::{Node, ScalarType};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
@@ -332,19 +334,13 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Dim1F32Stream<S>
|
||||
where
|
||||
S: Stream,
|
||||
{
|
||||
pub struct Dim1F32Stream<S> {
|
||||
inp: S,
|
||||
errored: bool,
|
||||
completed: bool,
|
||||
}
|
||||
|
||||
impl<S> Dim1F32Stream<S>
|
||||
where
|
||||
S: Stream,
|
||||
{
|
||||
impl<S> Dim1F32Stream<S> {
|
||||
pub fn new(inp: S) -> Self {
|
||||
Self {
|
||||
inp,
|
||||
@@ -352,13 +348,82 @@ where
|
||||
completed: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn process_event_data(&mut self, k: &EventFull) -> Result<ValuesDim1, Error> {
|
||||
let mut ret = ValuesDim1::empty();
|
||||
use ScalarType::*;
|
||||
for i1 in 0..k.tss.len() {
|
||||
// TODO iterate sibling arrays after single bounds check
|
||||
let ty = &k.scalar_types[i1];
|
||||
let decomp = k.decomps[i1].as_ref().unwrap();
|
||||
match ty {
|
||||
U16 => {
|
||||
const BY: usize = 2;
|
||||
// do the conversion
|
||||
let n1 = decomp.len();
|
||||
assert!(n1 % ty.bytes() as usize == 0);
|
||||
let ele_count = n1 / ty.bytes() as usize;
|
||||
let mut j = Vec::with_capacity(ele_count);
|
||||
let mut p1 = 0;
|
||||
for _ in 0..ele_count {
|
||||
let u = unsafe {
|
||||
let mut r = [0u8; BY];
|
||||
std::ptr::copy_nonoverlapping(&decomp[p1], r.as_mut_ptr(), BY);
|
||||
u16::from_be_bytes(r)
|
||||
};
|
||||
j.push(u as f32);
|
||||
p1 += BY;
|
||||
}
|
||||
ret.tss.push(k.tss[i1]);
|
||||
ret.values.push(j);
|
||||
}
|
||||
F64 => {
|
||||
const BY: usize = 8;
|
||||
// do the conversion
|
||||
let n1 = decomp.len();
|
||||
assert!(n1 % ty.bytes() as usize == 0);
|
||||
let ele_count = n1 / ty.bytes() as usize;
|
||||
let mut j = Vec::with_capacity(ele_count);
|
||||
unsafe {
|
||||
j.set_len(ele_count);
|
||||
}
|
||||
let mut p1 = 0;
|
||||
for i1 in 0..ele_count {
|
||||
let u = unsafe {
|
||||
let mut r = [0u8; BY];
|
||||
std::ptr::copy_nonoverlapping(&decomp[p1], r.as_mut_ptr(), BY);
|
||||
f64::from_be_bytes(r)
|
||||
//f64::from_be_bytes(std::mem::transmute::<_, [u8; 8]>(&decomp[p1]))
|
||||
};
|
||||
j[i1] = u as f32;
|
||||
p1 += BY;
|
||||
}
|
||||
ret.tss.push(k.tss[i1]);
|
||||
ret.values.push(j);
|
||||
}
|
||||
_ => {
|
||||
let e = Error::with_msg(format!("Dim1F32Stream unhandled scalar type: {:?}", ty));
|
||||
self.errored = true;
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Dim1F32StreamItem {
|
||||
Values(ValuesDim1),
|
||||
RangeComplete,
|
||||
EventDataReadStats(EventDataReadStats),
|
||||
}
|
||||
|
||||
impl<S> Stream for Dim1F32Stream<S>
|
||||
where
|
||||
S: Stream<Item = Result<EventFull, Error>> + Unpin,
|
||||
S: Stream<Item = Result<EventChunkerItem, Error>> + Unpin,
|
||||
{
|
||||
type Item = Result<ValuesDim1, Error>;
|
||||
type Item = Result<Dim1F32StreamItem, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
@@ -372,72 +437,28 @@ where
|
||||
match self.inp.poll_next_unpin(cx) {
|
||||
Ready(Some(Ok(k))) => {
|
||||
let inst1 = Instant::now();
|
||||
let mut ret = ValuesDim1::empty();
|
||||
use ScalarType::*;
|
||||
if k.end_of_range_observed {
|
||||
ret.range_complete_observed = true;
|
||||
}
|
||||
for i1 in 0..k.tss.len() {
|
||||
// TODO iterate sibling arrays after single bounds check
|
||||
let ty = &k.scalar_types[i1];
|
||||
let decomp = k.decomps[i1].as_ref().unwrap();
|
||||
match ty {
|
||||
U16 => {
|
||||
const BY: usize = 2;
|
||||
// do the conversion
|
||||
let n1 = decomp.len();
|
||||
assert!(n1 % ty.bytes() as usize == 0);
|
||||
let ele_count = n1 / ty.bytes() as usize;
|
||||
let mut j = Vec::with_capacity(ele_count);
|
||||
let mut p1 = 0;
|
||||
for _ in 0..ele_count {
|
||||
let u = unsafe {
|
||||
let mut r = [0u8; BY];
|
||||
std::ptr::copy_nonoverlapping(&decomp[p1], r.as_mut_ptr(), BY);
|
||||
u16::from_be_bytes(r)
|
||||
};
|
||||
j.push(u as f32);
|
||||
p1 += BY;
|
||||
}
|
||||
ret.tss.push(k.tss[i1]);
|
||||
ret.values.push(j);
|
||||
let u = match &k {
|
||||
EventChunkerItem::Events(events) => match self.process_event_data(events) {
|
||||
Ok(k) => {
|
||||
let ret = Dim1F32StreamItem::Values(k);
|
||||
Ready(Some(Ok(ret)))
|
||||
}
|
||||
F64 => {
|
||||
const BY: usize = 8;
|
||||
// do the conversion
|
||||
let n1 = decomp.len();
|
||||
assert!(n1 % ty.bytes() as usize == 0);
|
||||
let ele_count = n1 / ty.bytes() as usize;
|
||||
let mut j = Vec::with_capacity(ele_count);
|
||||
unsafe {
|
||||
j.set_len(ele_count);
|
||||
}
|
||||
let mut p1 = 0;
|
||||
for i1 in 0..ele_count {
|
||||
let u = unsafe {
|
||||
let mut r = [0u8; BY];
|
||||
std::ptr::copy_nonoverlapping(&decomp[p1], r.as_mut_ptr(), BY);
|
||||
f64::from_be_bytes(r)
|
||||
//f64::from_be_bytes(std::mem::transmute::<_, [u8; 8]>(&decomp[p1]))
|
||||
};
|
||||
j[i1] = u as f32;
|
||||
p1 += BY;
|
||||
}
|
||||
ret.tss.push(k.tss[i1]);
|
||||
ret.values.push(j);
|
||||
}
|
||||
_ => {
|
||||
let e = Error::with_msg(format!("Dim1F32Stream unhandled scalar type: {:?}", ty));
|
||||
Err(e) => {
|
||||
self.errored = true;
|
||||
return Ready(Some(Err(e)));
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
},
|
||||
EventChunkerItem::RangeComplete => err::todoval(),
|
||||
EventChunkerItem::EventDataReadStats(_stats) => {
|
||||
// TODO ret.event_data_read_stats.trans(&mut k.event_data_read_stats);
|
||||
// TODO ret.values_extract_stats.dur += inst2.duration_since(inst1);
|
||||
err::todoval()
|
||||
}
|
||||
}
|
||||
};
|
||||
let inst2 = Instant::now();
|
||||
let mut k = k;
|
||||
ret.event_data_read_stats.trans(&mut k.event_data_read_stats);
|
||||
ret.values_extract_stats.dur += inst2.duration_since(inst1);
|
||||
Ready(Some(Ok(ret)))
|
||||
// TODO do something with the measured time.
|
||||
let _ = inst2.duration_since(inst1);
|
||||
u
|
||||
}
|
||||
Ready(Some(Err(e))) => {
|
||||
self.errored = true;
|
||||
@@ -455,26 +476,124 @@ where
|
||||
pub trait IntoDim1F32Stream {
|
||||
fn into_dim_1_f32_stream(self) -> Dim1F32Stream<Self>
|
||||
where
|
||||
Self: Stream<Item = Result<EventFull, Error>> + Sized;
|
||||
Self: Stream<Item = Result<EventChunkerItem, Error>> + Sized;
|
||||
}
|
||||
|
||||
impl<T> IntoDim1F32Stream for T
|
||||
where
|
||||
T: Stream<Item = Result<EventFull, Error>>,
|
||||
T: Stream<Item = Result<EventChunkerItem, Error>>,
|
||||
{
|
||||
fn into_dim_1_f32_stream(self) -> Dim1F32Stream<T> {
|
||||
Dim1F32Stream::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_test_node(id: u32) -> Node {
|
||||
Node {
|
||||
host: "localhost".into(),
|
||||
listen: "0.0.0.0".into(),
|
||||
port: 8800 + id as u16,
|
||||
port_raw: 8800 + id as u16 + 100,
|
||||
data_base_path: format!("../tmpdata/node{:02}", id).into(),
|
||||
split: id,
|
||||
ksprefix: "ks".into(),
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum MinMaxAvgScalarEventBatchStreamItem {
|
||||
Values(MinMaxAvgScalarEventBatch),
|
||||
RangeComplete,
|
||||
EventDataReadStats(EventDataReadStats),
|
||||
}
|
||||
|
||||
impl AggregatableXdim1Bin for Dim1F32StreamItem {
|
||||
type Output = MinMaxAvgScalarEventBatchStreamItem;
|
||||
|
||||
fn into_agg(self) -> Self::Output {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum MinMaxAvgScalarBinBatchStreamItem {
|
||||
Values(MinMaxAvgScalarBinBatch),
|
||||
RangeComplete,
|
||||
EventDataReadStats(EventDataReadStats),
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgScalarEventBatchStreamItemAggregator {}
|
||||
|
||||
impl AggregatorTdim for MinMaxAvgScalarEventBatchStreamItemAggregator {
|
||||
type InputValue = MinMaxAvgScalarEventBatchStreamItem;
|
||||
type OutputValue = MinMaxAvgScalarBinBatchStreamItem;
|
||||
|
||||
fn ends_before(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ends_after(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn starts_after(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ingest(&mut self, _inp: &mut Self::InputValue) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn result(self) -> Self::OutputValue {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl AggregatableTdim for MinMaxAvgScalarEventBatchStreamItem {
|
||||
type Output = MinMaxAvgScalarBinBatchStreamItem;
|
||||
type Aggregator = MinMaxAvgScalarEventBatchStreamItemAggregator;
|
||||
|
||||
fn aggregator_new_static(_ts1: u64, _ts2: u64) -> Self::Aggregator {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl AggregatableXdim1Bin for MinMaxAvgScalarEventBatchStreamItem {
|
||||
type Output = MinMaxAvgScalarEventBatchStreamItem;
|
||||
|
||||
fn into_agg(self) -> Self::Output {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinMaxAvgScalarBinBatchStreamItemAggregator {}
|
||||
|
||||
impl AggregatorTdim for MinMaxAvgScalarBinBatchStreamItemAggregator {
|
||||
type InputValue = MinMaxAvgScalarBinBatchStreamItem;
|
||||
type OutputValue = MinMaxAvgScalarBinBatchStreamItem;
|
||||
|
||||
fn ends_before(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ends_after(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn starts_after(&self, _inp: &Self::InputValue) -> bool {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn ingest(&mut self, _inp: &mut Self::InputValue) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn result(self) -> Self::OutputValue {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl AggregatableTdim for MinMaxAvgScalarBinBatchStreamItem {
|
||||
type Output = MinMaxAvgScalarBinBatchStreamItem;
|
||||
type Aggregator = MinMaxAvgScalarBinBatchStreamItemAggregator;
|
||||
|
||||
fn aggregator_new_static(_ts1: u64, _ts2: u64) -> Self::Aggregator {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl AggregatableXdim1Bin for MinMaxAvgScalarBinBatchStreamItem {
|
||||
type Output = MinMaxAvgScalarBinBatchStreamItem;
|
||||
|
||||
fn into_agg(self) -> Self::Output {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,26 @@
|
||||
use super::agg::{make_test_node, IntoDim1F32Stream};
|
||||
use super::agg::IntoDim1F32Stream;
|
||||
use super::merge::MergeDim1F32Stream;
|
||||
use crate::agg::binnedt::IntoBinnedT;
|
||||
use crate::agg::binnedx::IntoBinnedXBins1;
|
||||
use futures_util::StreamExt;
|
||||
use netpod::timeunits::*;
|
||||
use netpod::{BinnedRange, Channel, ChannelConfig, NanoRange, Nanos, ScalarType, Shape};
|
||||
use netpod::{BinnedRange, Channel, ChannelConfig, NanoRange, Nanos, Node, ScalarType, Shape};
|
||||
use std::future::ready;
|
||||
#[allow(unused_imports)]
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
pub fn make_test_node(id: u32) -> Node {
|
||||
Node {
|
||||
host: "localhost".into(),
|
||||
listen: "0.0.0.0".into(),
|
||||
port: 8800 + id as u16,
|
||||
port_raw: 8800 + id as u16 + 100,
|
||||
data_base_path: format!("../tmpdata/node{:02}", id).into(),
|
||||
split: id,
|
||||
ksprefix: "ks".into(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn agg_x_dim_0() {
|
||||
taskrun::run(async {
|
||||
@@ -49,15 +61,6 @@ async fn agg_x_dim_0_inner() {
|
||||
query.buffer_size as usize,
|
||||
)
|
||||
.into_dim_1_f32_stream()
|
||||
//.take(1000)
|
||||
.map(|q| {
|
||||
if false {
|
||||
if let Ok(ref k) = q {
|
||||
trace!("vals: {:?}", k);
|
||||
}
|
||||
}
|
||||
q
|
||||
})
|
||||
.into_binned_x_bins_1()
|
||||
.map(|k| {
|
||||
if false {
|
||||
|
||||
@@ -50,7 +50,7 @@ impl BinnedStream {
|
||||
let range = range.clone();
|
||||
move |k| {
|
||||
let fit_range = range.full_range();
|
||||
let g = match k.0 {
|
||||
let g = match k {
|
||||
Ok(PreBinnedItem::Batch(k)) => {
|
||||
use super::agg::{Fits, FitsInside};
|
||||
match k.fits_inside(fit_range) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::agg::eventbatch::MinMaxAvgScalarEventBatch;
|
||||
use crate::agg::MinMaxAvgScalarEventBatchStreamItem;
|
||||
use crate::binnedstream::BinnedStream;
|
||||
use crate::cache::pbv::PreBinnedValueByteStream;
|
||||
use crate::channelconfig::{extract_matching_config_entry, read_local_config};
|
||||
@@ -308,7 +308,7 @@ impl AsyncRead for HttpBodyAsAsyncRead {
|
||||
}
|
||||
}
|
||||
|
||||
type T001 = Pin<Box<dyn Stream<Item = Result<MinMaxAvgScalarEventBatch, Error>> + Send>>;
|
||||
type T001 = Pin<Box<dyn Stream<Item = Result<MinMaxAvgScalarEventBatchStreamItem, Error>> + Send>>;
|
||||
type T002 = Pin<Box<dyn Future<Output = Result<T001, Error>> + Send>>;
|
||||
pub struct MergedFromRemotes {
|
||||
tcp_establish_futs: Vec<T002>,
|
||||
@@ -339,7 +339,7 @@ impl MergedFromRemotes {
|
||||
|
||||
impl Stream for MergedFromRemotes {
|
||||
// TODO need this generic for scalar and array (when wave is not binned down to a single scalar point)
|
||||
type Item = Result<MinMaxAvgScalarEventBatch, Error>;
|
||||
type Item = Result<MinMaxAvgScalarEventBatchStreamItem, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
|
||||
25
disk/src/cache/pbv.rs
vendored
25
disk/src/cache/pbv.rs
vendored
@@ -1,5 +1,6 @@
|
||||
use crate::agg::binnedt::IntoBinnedT;
|
||||
use crate::cache::pbvfs::{PreBinnedFrame, PreBinnedItem, PreBinnedValueFetchedStream};
|
||||
use crate::agg::MinMaxAvgScalarBinBatchStreamItem;
|
||||
use crate::cache::pbvfs::{PreBinnedItem, PreBinnedValueFetchedStream};
|
||||
use crate::cache::{node_ix_for_patch, MergedFromRemotes, PreBinnedQuery};
|
||||
use crate::frame::makeframe::make_frame;
|
||||
use crate::raw::EventsQuery;
|
||||
@@ -35,7 +36,7 @@ impl Stream for PreBinnedValueByteStreamInner {
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
match self.inp.poll_next_unpin(cx) {
|
||||
Ready(Some(item)) => match make_frame::<PreBinnedFrame>(&item) {
|
||||
Ready(Some(item)) => match make_frame::<Result<PreBinnedItem, Error>>(&item) {
|
||||
Ok(buf) => Ready(Some(Ok(buf.freeze()))),
|
||||
Err(e) => Ready(Some(Err(e.into()))),
|
||||
},
|
||||
@@ -49,7 +50,7 @@ pub struct PreBinnedValueStream {
|
||||
query: PreBinnedQuery,
|
||||
node_config: NodeConfigCached,
|
||||
open_check_local_file: Option<Pin<Box<dyn Future<Output = Result<tokio::fs::File, std::io::Error>> + Send>>>,
|
||||
fut2: Option<Pin<Box<dyn Stream<Item = PreBinnedFrame> + Send>>>,
|
||||
fut2: Option<Pin<Box<dyn Stream<Item = Result<PreBinnedItem, Error>> + Send>>>,
|
||||
errored: bool,
|
||||
completed: bool,
|
||||
}
|
||||
@@ -141,8 +142,9 @@ impl PreBinnedValueStream {
|
||||
let range = BinnedRange::covering_range(evq.range.clone(), count).unwrap();
|
||||
let s1 = MergedFromRemotes::new(evq, self.node_config.node_config.cluster.clone());
|
||||
let s2 = s1.into_binned_t(range).map(|k| match k {
|
||||
Ok(k) => PreBinnedFrame(Ok(PreBinnedItem::Batch(k))),
|
||||
Err(e) => PreBinnedFrame(Err(e)),
|
||||
Ok(MinMaxAvgScalarBinBatchStreamItem::Values(k)) => Ok(PreBinnedItem::Batch(k)),
|
||||
Err(e) => Err(e),
|
||||
_ => todo!(),
|
||||
});
|
||||
self.fut2 = Some(Box::pin(s2));
|
||||
}
|
||||
@@ -152,7 +154,7 @@ impl PreBinnedValueStream {
|
||||
|
||||
impl Stream for PreBinnedValueStream {
|
||||
// TODO need this generic for scalar and array (when wave is not binned down to a single scalar point)
|
||||
type Item = PreBinnedFrame;
|
||||
type Item = Result<PreBinnedItem, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
@@ -166,11 +168,11 @@ impl Stream for PreBinnedValueStream {
|
||||
'outer: loop {
|
||||
break if let Some(fut) = self.fut2.as_mut() {
|
||||
match fut.poll_next_unpin(cx) {
|
||||
Ready(Some(k)) => match k.0 {
|
||||
Ok(k) => Ready(Some(PreBinnedFrame(Ok(k)))),
|
||||
Ready(Some(k)) => match k {
|
||||
Ok(k) => Ready(Some(Ok(k))),
|
||||
Err(e) => {
|
||||
self.errored = true;
|
||||
Ready(Some(PreBinnedFrame(Err(e))))
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
},
|
||||
Ready(None) => Ready(None),
|
||||
@@ -180,7 +182,6 @@ impl Stream for PreBinnedValueStream {
|
||||
match fut.poll_unpin(cx) {
|
||||
Ready(Ok(_file)) => {
|
||||
let e = Err(Error::with_msg(format!("TODO use the cached data from file")));
|
||||
let e = PreBinnedFrame(e);
|
||||
self.errored = true;
|
||||
Ready(Some(e))
|
||||
}
|
||||
@@ -190,7 +191,6 @@ impl Stream for PreBinnedValueStream {
|
||||
self.try_setup_fetch_prebinned_higher_res();
|
||||
if self.fut2.is_none() {
|
||||
let e = Err(Error::with_msg(format!("try_setup_fetch_prebinned_higher_res failed")));
|
||||
let e = PreBinnedFrame(e);
|
||||
self.errored = true;
|
||||
Ready(Some(e))
|
||||
} else {
|
||||
@@ -200,8 +200,7 @@ impl Stream for PreBinnedValueStream {
|
||||
_ => {
|
||||
error!("File I/O error: {:?}", e);
|
||||
self.errored = true;
|
||||
let e = PreBinnedFrame(Err(e.into()));
|
||||
Ready(Some(e))
|
||||
Ready(Some(Err(e.into())))
|
||||
}
|
||||
},
|
||||
Pending => Pending,
|
||||
|
||||
36
disk/src/cache/pbvfs.rs
vendored
36
disk/src/cache/pbvfs.rs
vendored
@@ -44,18 +44,6 @@ impl PreBinnedValueFetchedStream {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct PreBinnedFrame(pub Result<PreBinnedItem, Error>);
|
||||
|
||||
impl<T> From<T> for PreBinnedFrame
|
||||
where
|
||||
T: Into<Error>,
|
||||
{
|
||||
fn from(k: T) -> Self {
|
||||
PreBinnedFrame(Err(k.into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub enum PreBinnedItem {
|
||||
Batch(MinMaxAvgScalarBinBatch),
|
||||
@@ -63,7 +51,7 @@ pub enum PreBinnedItem {
|
||||
|
||||
impl Stream for PreBinnedValueFetchedStream {
|
||||
// TODO need this generic for scalar and array (when wave is not binned down to a single scalar point)
|
||||
type Item = PreBinnedFrame;
|
||||
type Item = Result<PreBinnedItem, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
@@ -78,22 +66,20 @@ impl Stream for PreBinnedValueFetchedStream {
|
||||
break if let Some(res) = self.res.as_mut() {
|
||||
pin_mut!(res);
|
||||
match res.poll_next(cx) {
|
||||
Ready(Some(Ok(frame))) => match decode_frame::<PreBinnedFrame>(&frame) {
|
||||
Ok(item) => match item.0 {
|
||||
Ok(item) => Ready(Some(PreBinnedFrame(Ok(item)))),
|
||||
Err(e) => {
|
||||
self.errored = true;
|
||||
Ready(Some(PreBinnedFrame(Err(e))))
|
||||
}
|
||||
},
|
||||
Ready(Some(Ok(frame))) => match decode_frame::<Result<PreBinnedItem, Error>>(&frame) {
|
||||
Ok(Ok(item)) => Ready(Some(Ok(item))),
|
||||
Ok(Err(e)) => {
|
||||
self.errored = true;
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
Err(e) => {
|
||||
self.errored = true;
|
||||
Ready(Some(e.into()))
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
},
|
||||
Ready(Some(Err(e))) => {
|
||||
self.errored = true;
|
||||
Ready(Some(e.into()))
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
Ready(None) => {
|
||||
self.completed = true;
|
||||
@@ -114,7 +100,7 @@ impl Stream for PreBinnedValueFetchedStream {
|
||||
Err(e) => {
|
||||
error!("PreBinnedValueStream error in stream {:?}", e);
|
||||
self.errored = true;
|
||||
Ready(Some(PreBinnedFrame(Err(e.into()))))
|
||||
Ready(Some(Err(e.into())))
|
||||
}
|
||||
},
|
||||
Pending => Pending,
|
||||
@@ -133,7 +119,7 @@ impl Stream for PreBinnedValueFetchedStream {
|
||||
}
|
||||
Err(e) => {
|
||||
self.errored = true;
|
||||
Ready(Some(e.into()))
|
||||
Ready(Some(Err(e.into())))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::dataopen::open_files;
|
||||
use crate::eventchunker::{EventChunker, EventFull};
|
||||
use crate::eventchunker::{EventChunker, EventChunkerItem};
|
||||
use crate::file_content_stream;
|
||||
use err::Error;
|
||||
use futures_core::Stream;
|
||||
@@ -34,7 +34,7 @@ impl EventBlobsComplete {
|
||||
}
|
||||
|
||||
impl Stream for EventBlobsComplete {
|
||||
type Item = Result<EventFull, Error>;
|
||||
type Item = Result<EventChunkerItem, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
@@ -84,7 +84,7 @@ impl Stream for EventBlobsComplete {
|
||||
pub fn event_blobs_complete(
|
||||
query: &netpod::AggQuerySingleChannel,
|
||||
node: Node,
|
||||
) -> impl Stream<Item = Result<EventFull, Error>> + Send {
|
||||
) -> impl Stream<Item = Result<EventChunkerItem, Error>> + Send {
|
||||
let query = query.clone();
|
||||
let node = node.clone();
|
||||
async_stream::stream! {
|
||||
|
||||
@@ -20,6 +20,7 @@ pub struct EventChunker {
|
||||
completed: bool,
|
||||
range: NanoRange,
|
||||
seen_beyond_range: bool,
|
||||
sent_beyond_range: bool,
|
||||
}
|
||||
|
||||
enum DataFileState {
|
||||
@@ -27,6 +28,10 @@ enum DataFileState {
|
||||
Event,
|
||||
}
|
||||
|
||||
struct ParseResult {
|
||||
events: EventFull,
|
||||
}
|
||||
|
||||
impl EventChunker {
|
||||
pub fn from_start(
|
||||
inp: Pin<Box<dyn Stream<Item = Result<FileChunkRead, Error>> + Send>>,
|
||||
@@ -44,6 +49,7 @@ impl EventChunker {
|
||||
completed: false,
|
||||
range,
|
||||
seen_beyond_range: false,
|
||||
sent_beyond_range: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,6 +69,7 @@ impl EventChunker {
|
||||
completed: false,
|
||||
range,
|
||||
seen_beyond_range: false,
|
||||
sent_beyond_range: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,7 +107,7 @@ impl EventChunker {
|
||||
self.state = DataFileState::Event;
|
||||
self.need_min = 4;
|
||||
buf.advance(totlen);
|
||||
ret.event_data_read_stats.parsed_bytes += totlen as u64;
|
||||
// TODO ret.event_data_read_stats.parsed_bytes += totlen as u64;
|
||||
}
|
||||
}
|
||||
DataFileState::Event => {
|
||||
@@ -120,7 +127,7 @@ impl EventChunker {
|
||||
let pulse = sl.read_i64::<BE>().unwrap() as u64;
|
||||
if ts >= self.range.end {
|
||||
self.seen_beyond_range = true;
|
||||
ret.end_of_range_observed = true;
|
||||
// TODO ret.end_of_range_observed = true;
|
||||
info!("END OF RANGE OBSERVED");
|
||||
break;
|
||||
}
|
||||
@@ -223,7 +230,7 @@ impl EventChunker {
|
||||
}
|
||||
trace!("advance and reset need_min");
|
||||
buf.advance(len as usize);
|
||||
ret.event_data_read_stats.parsed_bytes += len as u64;
|
||||
// TODO ret.event_data_read_stats.parsed_bytes += len as u64;
|
||||
self.need_min = 4;
|
||||
}
|
||||
}
|
||||
@@ -234,12 +241,39 @@ impl EventChunker {
|
||||
}
|
||||
}
|
||||
|
||||
struct ParseResult {
|
||||
events: EventFull,
|
||||
pub struct EventFull {
|
||||
pub tss: Vec<u64>,
|
||||
pub pulses: Vec<u64>,
|
||||
pub decomps: Vec<Option<BytesMut>>,
|
||||
pub scalar_types: Vec<ScalarType>,
|
||||
}
|
||||
|
||||
impl EventFull {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
tss: vec![],
|
||||
pulses: vec![],
|
||||
decomps: vec![],
|
||||
scalar_types: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn add_event(&mut self, ts: u64, pulse: u64, decomp: Option<BytesMut>, scalar_type: ScalarType) {
|
||||
self.tss.push(ts);
|
||||
self.pulses.push(pulse);
|
||||
self.decomps.push(decomp);
|
||||
self.scalar_types.push(scalar_type);
|
||||
}
|
||||
}
|
||||
|
||||
pub enum EventChunkerItem {
|
||||
Events(EventFull),
|
||||
RangeComplete,
|
||||
EventDataReadStats(EventDataReadStats),
|
||||
}
|
||||
|
||||
impl Stream for EventChunker {
|
||||
type Item = Result<EventFull, Error>;
|
||||
type Item = Result<EventChunkerItem, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
@@ -251,8 +285,13 @@ impl Stream for EventChunker {
|
||||
return Ready(None);
|
||||
}
|
||||
if self.seen_beyond_range {
|
||||
self.completed = true;
|
||||
return Ready(None);
|
||||
if self.sent_beyond_range {
|
||||
self.completed = true;
|
||||
return Ready(None);
|
||||
} else {
|
||||
self.sent_beyond_range = true;
|
||||
return Ready(Some(Ok(EventChunkerItem::RangeComplete)));
|
||||
}
|
||||
}
|
||||
match self.inp.poll_next_unpin(cx) {
|
||||
Ready(Some(Ok(mut fcr))) => {
|
||||
@@ -271,7 +310,9 @@ impl Stream for EventChunker {
|
||||
} else {
|
||||
let x = self.need_min;
|
||||
self.inp.set_need_min(x);
|
||||
Ready(Some(Ok(res.events)))
|
||||
let ret = EventChunkerItem::Events(res.events);
|
||||
let ret = Ok(ret);
|
||||
Ready(Some(ret))
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -293,32 +334,3 @@ impl Stream for EventChunker {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EventFull {
|
||||
pub tss: Vec<u64>,
|
||||
pub pulses: Vec<u64>,
|
||||
pub decomps: Vec<Option<BytesMut>>,
|
||||
pub scalar_types: Vec<ScalarType>,
|
||||
pub event_data_read_stats: EventDataReadStats,
|
||||
pub end_of_range_observed: bool,
|
||||
}
|
||||
|
||||
impl EventFull {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
tss: vec![],
|
||||
pulses: vec![],
|
||||
decomps: vec![],
|
||||
scalar_types: vec![],
|
||||
event_data_read_stats: EventDataReadStats::new(),
|
||||
end_of_range_observed: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn add_event(&mut self, ts: u64, pulse: u64, decomp: Option<BytesMut>, scalar_type: ScalarType) {
|
||||
self.tss.push(ts);
|
||||
self.pulses.push(pulse);
|
||||
self.decomps.push(decomp);
|
||||
self.scalar_types.push(scalar_type);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::cache::pbvfs::PreBinnedFrame;
|
||||
use crate::cache::pbvfs::PreBinnedItem;
|
||||
use crate::cache::BinnedBytesForHttpStreamFrame;
|
||||
use crate::frame::inmem::InMemoryFrame;
|
||||
use crate::raw::conn::RawConnOut;
|
||||
@@ -27,7 +27,7 @@ impl FrameType for RawConnOut {
|
||||
const FRAME_TYPE_ID: u32 = 0x04;
|
||||
}
|
||||
|
||||
impl FrameType for PreBinnedFrame {
|
||||
impl FrameType for Result<PreBinnedItem, Error> {
|
||||
const FRAME_TYPE_ID: u32 = 0x05;
|
||||
}
|
||||
|
||||
|
||||
@@ -344,8 +344,9 @@ pub fn parsed1(query: &netpod::AggQuerySingleChannel, node: &Node) -> impl Strea
|
||||
let range = err::todoval();
|
||||
let mut chunker = eventchunker::EventChunker::from_event_boundary(inp, err::todoval(), range);
|
||||
while let Some(evres) = chunker.next().await {
|
||||
use eventchunker::EventChunkerItem;
|
||||
match evres {
|
||||
Ok(evres) => {
|
||||
Ok(EventChunkerItem::Events(evres)) => {
|
||||
//let mut buf = BytesMut::with_capacity(16);
|
||||
// TODO put some interesting information to test
|
||||
//buf.put_u64_le(0xcafecafe);
|
||||
@@ -359,6 +360,7 @@ pub fn parsed1(query: &netpod::AggQuerySingleChannel, node: &Node) -> impl Strea
|
||||
Err(e) => {
|
||||
yield Err(e)
|
||||
}
|
||||
_ => todo!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::agg::eventbatch::MinMaxAvgScalarEventBatch;
|
||||
use crate::agg::{Dim1F32Stream, ValuesDim1};
|
||||
use crate::eventchunker::EventFull;
|
||||
use crate::agg::{Dim1F32Stream, Dim1F32StreamItem, MinMaxAvgScalarEventBatchStreamItem, ValuesDim1};
|
||||
use crate::eventchunker::EventChunkerItem;
|
||||
use err::Error;
|
||||
use futures_core::Stream;
|
||||
use futures_util::StreamExt;
|
||||
@@ -9,21 +9,17 @@ use std::task::{Context, Poll};
|
||||
#[allow(unused_imports)]
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
pub struct MergeDim1F32Stream<S>
|
||||
where
|
||||
S: Stream<Item = Result<EventFull, Error>>,
|
||||
{
|
||||
pub struct MergeDim1F32Stream<S> {
|
||||
// yields Dim1F32StreamItem
|
||||
inps: Vec<Dim1F32Stream<S>>,
|
||||
current: Vec<CurVal>,
|
||||
ixs: Vec<usize>,
|
||||
emitted_complete: bool,
|
||||
errored: bool,
|
||||
completed: bool,
|
||||
batch: ValuesDim1,
|
||||
}
|
||||
|
||||
impl<S> MergeDim1F32Stream<S>
|
||||
where
|
||||
S: Stream<Item = Result<EventFull, Error>>,
|
||||
{
|
||||
impl<S> MergeDim1F32Stream<S> {
|
||||
pub fn new(inps: Vec<Dim1F32Stream<S>>) -> Self {
|
||||
let n = inps.len();
|
||||
let mut current = vec![];
|
||||
@@ -34,32 +30,49 @@ where
|
||||
inps,
|
||||
current: current,
|
||||
ixs: vec![0; n],
|
||||
emitted_complete: false,
|
||||
batch: ValuesDim1::empty(),
|
||||
errored: false,
|
||||
completed: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Stream for MergeDim1F32Stream<S>
|
||||
where
|
||||
S: Stream<Item = Result<EventFull, Error>> + Unpin,
|
||||
S: Stream<Item = Result<EventChunkerItem, Error>> + Unpin,
|
||||
{
|
||||
//type Item = <Dim1F32Stream as Stream>::Item;
|
||||
type Item = Result<ValuesDim1, Error>;
|
||||
type Item = Result<Dim1F32StreamItem, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
// TODO rewrite making the break the default and explicit continue.
|
||||
'outer: loop {
|
||||
if self.emitted_complete {
|
||||
if self.completed {
|
||||
panic!("poll on complete stream");
|
||||
}
|
||||
if self.errored {
|
||||
self.completed = true;
|
||||
return Ready(None);
|
||||
}
|
||||
// can only run logic if all streams are either finished, errored or have some current value.
|
||||
for i1 in 0..self.inps.len() {
|
||||
match self.current[i1] {
|
||||
CurVal::None => {
|
||||
match self.inps[i1].poll_next_unpin(cx) {
|
||||
Ready(Some(Ok(k))) => {
|
||||
self.current[i1] = CurVal::Val(k);
|
||||
// TODO do I keep only the values as "current" or also the other kinds of items?
|
||||
// Can I process the other kinds instantly?
|
||||
match k {
|
||||
Dim1F32StreamItem::Values(vals) => {
|
||||
self.current[i1] = CurVal::Val(vals);
|
||||
}
|
||||
Dim1F32StreamItem::RangeComplete => {
|
||||
todo!();
|
||||
}
|
||||
Dim1F32StreamItem::EventDataReadStats(_stats) => {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
}
|
||||
Ready(Some(Err(e))) => {
|
||||
self.current[i1] = CurVal::Err(Error::with_msg(format!(
|
||||
@@ -120,7 +133,8 @@ where
|
||||
}
|
||||
if self.batch.tss.len() >= 64 {
|
||||
let k = std::mem::replace(&mut self.batch, ValuesDim1::empty());
|
||||
break Ready(Some(Ok(k)));
|
||||
let ret = Dim1F32StreamItem::Values(k);
|
||||
break Ready(Some(Ok(ret)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -135,7 +149,7 @@ enum CurVal {
|
||||
|
||||
pub struct MergedMinMaxAvgScalarStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<MinMaxAvgScalarEventBatch, Error>>,
|
||||
S: Stream<Item = Result<MinMaxAvgScalarEventBatchStreamItem, Error>>,
|
||||
{
|
||||
inps: Vec<S>,
|
||||
current: Vec<MergedMinMaxAvgScalarStreamCurVal>,
|
||||
@@ -150,7 +164,7 @@ where
|
||||
|
||||
impl<S> MergedMinMaxAvgScalarStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<MinMaxAvgScalarEventBatch, Error>>,
|
||||
S: Stream<Item = Result<MinMaxAvgScalarEventBatchStreamItem, Error>>,
|
||||
{
|
||||
pub fn new(inps: Vec<S>) -> Self {
|
||||
let n = inps.len();
|
||||
@@ -174,9 +188,9 @@ where
|
||||
|
||||
impl<S> Stream for MergedMinMaxAvgScalarStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<MinMaxAvgScalarEventBatch, Error>> + Unpin,
|
||||
S: Stream<Item = Result<MinMaxAvgScalarEventBatchStreamItem, Error>> + Unpin,
|
||||
{
|
||||
type Item = Result<MinMaxAvgScalarEventBatch, Error>;
|
||||
type Item = Result<MinMaxAvgScalarEventBatchStreamItem, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
@@ -193,20 +207,27 @@ where
|
||||
match self.current[i1] {
|
||||
MergedMinMaxAvgScalarStreamCurVal::None => {
|
||||
match self.inps[i1].poll_next_unpin(cx) {
|
||||
Ready(Some(Ok(mut k))) => {
|
||||
self.batch.event_data_read_stats.trans(&mut k.event_data_read_stats);
|
||||
self.batch.values_extract_stats.trans(&mut k.values_extract_stats);
|
||||
if k.range_complete_observed {
|
||||
self.range_complete_observed[i1] = true;
|
||||
let d = self.range_complete_observed.iter().filter(|&&k| k).count();
|
||||
if d == self.range_complete_observed.len() {
|
||||
self.range_complete_observed_all = true;
|
||||
info!("\n\n:::::: range_complete d {} COMPLETE", d);
|
||||
} else {
|
||||
info!("\n\n:::::: range_complete d {}", d);
|
||||
Ready(Some(Ok(k))) => {
|
||||
match k {
|
||||
MinMaxAvgScalarEventBatchStreamItem::Values(vals) => {
|
||||
self.current[i1] = MergedMinMaxAvgScalarStreamCurVal::Val(vals);
|
||||
}
|
||||
MinMaxAvgScalarEventBatchStreamItem::RangeComplete => {
|
||||
self.range_complete_observed[i1] = true;
|
||||
let d = self.range_complete_observed.iter().filter(|&&k| k).count();
|
||||
if d == self.range_complete_observed.len() {
|
||||
self.range_complete_observed_all = true;
|
||||
info!("\n\n:::::: range_complete d {} COMPLETE", d);
|
||||
} else {
|
||||
info!("\n\n:::::: range_complete d {}", d);
|
||||
}
|
||||
// TODO what else to do here?
|
||||
todo!();
|
||||
}
|
||||
MinMaxAvgScalarEventBatchStreamItem::EventDataReadStats(_stats) => {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
self.current[i1] = MergedMinMaxAvgScalarStreamCurVal::Val(k);
|
||||
}
|
||||
Ready(Some(Err(e))) => {
|
||||
// TODO emit this error, consider this stream as done, anything more to do here?
|
||||
@@ -255,7 +276,8 @@ where
|
||||
k.range_complete_observed = true;
|
||||
}
|
||||
info!("MergedMinMaxAvgScalarStream no more lowest emit Ready(Some( current batch ))");
|
||||
break Ready(Some(Ok(k)));
|
||||
let ret = MinMaxAvgScalarEventBatchStreamItem::Values(k);
|
||||
break Ready(Some(Ok(ret)));
|
||||
} else {
|
||||
info!("MergedMinMaxAvgScalarStream no more lowest emit Ready(None)");
|
||||
self.completed = true;
|
||||
@@ -281,7 +303,8 @@ where
|
||||
if self.range_complete_observed_all {
|
||||
k.range_complete_observed = true;
|
||||
}
|
||||
break Ready(Some(Ok(k)));
|
||||
let ret = MinMaxAvgScalarEventBatchStreamItem::Values(k);
|
||||
break Ready(Some(Ok(ret)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ Delivers event data (not yet time-binned) from local storage and provides client
|
||||
to request such data from nodes.
|
||||
*/
|
||||
|
||||
use crate::agg::eventbatch::MinMaxAvgScalarEventBatch;
|
||||
use crate::agg::MinMaxAvgScalarEventBatchStreamItem;
|
||||
use crate::frame::inmem::InMemoryFrameAsyncReadStream;
|
||||
use crate::frame::makeframe::{make_frame, make_term_frame};
|
||||
use crate::raw::bffr::MinMaxAvgScalarEventBatchStreamFromFrames;
|
||||
@@ -38,7 +38,7 @@ pub struct EventQueryJsonStringFrame(String);
|
||||
pub async fn x_processed_stream_from_node(
|
||||
query: EventsQuery,
|
||||
node: Node,
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Result<MinMaxAvgScalarEventBatch, Error>> + Send>>, Error> {
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Result<MinMaxAvgScalarEventBatchStreamItem, Error>> + Send>>, Error> {
|
||||
let net = TcpStream::connect(format!("{}:{}", node.host, node.port_raw)).await?;
|
||||
let qjs = serde_json::to_string(&query)?;
|
||||
let (netin, mut netout) = net.into_split();
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::agg::eventbatch::MinMaxAvgScalarEventBatch;
|
||||
use crate::agg::MinMaxAvgScalarEventBatchStreamItem;
|
||||
use crate::frame::inmem::InMemoryFrameAsyncReadStream;
|
||||
use crate::frame::makeframe::decode_frame;
|
||||
use crate::raw::conn::RawConnOut;
|
||||
@@ -37,7 +37,7 @@ impl<T> Stream for MinMaxAvgScalarEventBatchStreamFromFrames<T>
|
||||
where
|
||||
T: AsyncRead + Unpin,
|
||||
{
|
||||
type Item = Result<MinMaxAvgScalarEventBatch, Error>;
|
||||
type Item = Result<MinMaxAvgScalarEventBatchStreamItem, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::agg::binnedx::IntoBinnedXBins1;
|
||||
use crate::agg::eventbatch::MinMaxAvgScalarEventBatch;
|
||||
use crate::agg::IntoDim1F32Stream;
|
||||
use crate::agg::{IntoDim1F32Stream, MinMaxAvgScalarEventBatchStreamItem};
|
||||
use crate::channelconfig::{extract_matching_config_entry, read_local_config};
|
||||
use crate::eventblobs::EventBlobsComplete;
|
||||
use crate::frame::inmem::InMemoryFrameAsyncReadStream;
|
||||
@@ -46,7 +46,7 @@ async fn raw_conn_handler(stream: TcpStream, addr: SocketAddr, node_config: Node
|
||||
}
|
||||
}
|
||||
|
||||
pub type RawConnOut = Result<MinMaxAvgScalarEventBatch, Error>;
|
||||
pub type RawConnOut = Result<MinMaxAvgScalarEventBatchStreamItem, Error>;
|
||||
|
||||
async fn raw_conn_handler_inner(
|
||||
stream: TcpStream,
|
||||
@@ -178,18 +178,21 @@ async fn raw_conn_handler_inner_try(
|
||||
.into_binned_x_bins_1();
|
||||
let mut e = 0;
|
||||
while let Some(item) = s1.next().await {
|
||||
if let Ok(k) = &item {
|
||||
e += 1;
|
||||
if false {
|
||||
trace!(
|
||||
"emit items sp {:2} e {:3} len {:3} {:10?} {:10?}",
|
||||
node_config.node.split,
|
||||
e,
|
||||
k.tss.len(),
|
||||
k.tss.first().map(|k| k / SEC),
|
||||
k.tss.last().map(|k| k / SEC),
|
||||
);
|
||||
match &item {
|
||||
Ok(MinMaxAvgScalarEventBatchStreamItem::Values(k)) => {
|
||||
e += 1;
|
||||
if false {
|
||||
trace!(
|
||||
"emit items sp {:2} e {:3} len {:3} {:10?} {:10?}",
|
||||
node_config.node.split,
|
||||
e,
|
||||
k.tss.len(),
|
||||
k.tss.first().map(|k| k / SEC),
|
||||
k.tss.last().map(|k| k / SEC),
|
||||
);
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
match make_frame::<RawConnOut>(&item) {
|
||||
Ok(buf) => match netout.write_all(&buf).await {
|
||||
@@ -212,6 +215,7 @@ async fn raw_conn_handler_inner_try(
|
||||
batch.maxs.push(8.4);
|
||||
batch.avgs.push(9.5);
|
||||
batch.avgs.push(9.6);
|
||||
let batch = MinMaxAvgScalarEventBatchStreamItem::Values(batch);
|
||||
let mut s1 = futures_util::stream::iter(vec![batch]).map(Result::Ok);
|
||||
while let Some(item) = s1.next().await {
|
||||
match make_frame::<RawConnOut>(&item) {
|
||||
|
||||
Reference in New Issue
Block a user