This commit is contained in:
Dominik Werder
2023-02-28 09:47:27 +01:00
parent 9c1522f9bb
commit c54eaa6fcb
48 changed files with 909 additions and 940 deletions

View File

@@ -1,10 +1,18 @@
use err::Error;
use futures_util::{Stream, StreamExt};
use items::{RangeCompletableItem, Sitemty, StreamItem};
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::collect_c::Collectable;
use netpod::{log::*, BinnedRange, NanoRange};
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StatsItem;
use items_0::streamitem::StreamItem;
use netpod::log::*;
use netpod::BinnedRange;
use netpod::DiskStats;
use netpod::NanoRange;
use std::fmt;
use std::time::{Duration, Instant};
use std::time::Duration;
use std::time::Instant;
use tracing::Instrument;
#[allow(unused)]
@@ -88,8 +96,6 @@ where
}
StreamItem::Stats(item) => {
trace!("Stats {:?}", item);
use items::StatsItem;
use netpod::DiskStats;
match item {
// TODO factor and simplify the stats collection:
StatsItem::EventDataReadStats(_) => {}

View File

@@ -1,19 +1,30 @@
use crate::filechunkread::FileChunkRead;
use crate::needminbuffer::NeedMinBuffer;
use bitshuffle::bitshuffle_decompress;
use bytes::{Buf, BytesMut};
use bytes::Buf;
use bytes::BytesMut;
use err::Error;
use futures_util::{Stream, StreamExt};
use items::eventfull::EventFull;
use items::{RangeCompletableItem, StatsItem, StreamItem, WithLen};
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::StatsItem;
use items_0::streamitem::StreamItem;
use items_0::WithLen;
use items_2::eventfull::EventFull;
use netpod::histo::HistoLog2;
use netpod::log::*;
use netpod::timeunits::SEC;
use netpod::{ByteSize, ChannelConfig, EventDataReadStats, NanoRange, ScalarType, Shape};
use netpod::ByteSize;
use netpod::ChannelConfig;
use netpod::EventDataReadStats;
use netpod::NanoRange;
use netpod::ScalarType;
use netpod::Shape;
use parse::channelconfig::CompressionMethod;
use std::path::PathBuf;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::task::Context;
use std::task::Poll;
use std::time::Instant;
pub struct EventChunker {

View File

@@ -1,11 +1,11 @@
use err::Error;
use futures_util::Stream;
use futures_util::StreamExt;
use items::frame::decode_frame;
use items::inmem::InMemoryFrame;
use items::FrameTypeInnerStatic;
use items::Sitemty;
use items::StreamItem;
use items_0::framable::FrameTypeInnerStatic;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
use items_2::frame::decode_frame;
use items_2::inmem::InMemoryFrame;
use netpod::log::*;
use serde::de::DeserializeOwned;
use std::marker::PhantomData;

View File

@@ -1,15 +1,20 @@
use crate::slidebuf::SlideBuf;
use bytes::Bytes;
use err::Error;
use futures_util::{pin_mut, Stream};
use items::inmem::InMemoryFrame;
use items::{StreamItem, TERM_FRAME_TYPE_ID};
use items::{INMEM_FRAME_FOOT, INMEM_FRAME_HEAD, INMEM_FRAME_MAGIC};
use futures_util::pin_mut;
use futures_util::Stream;
use items_0::streamitem::StreamItem;
use items_0::streamitem::TERM_FRAME_TYPE_ID;
use items_2::framable::INMEM_FRAME_FOOT;
use items_2::framable::INMEM_FRAME_HEAD;
use items_2::framable::INMEM_FRAME_MAGIC;
use items_2::inmem::InMemoryFrame;
use netpod::log::*;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
use tracing::Instrument;
use std::task::Context;
use std::task::Poll;
use tokio::io::AsyncRead;
use tokio::io::ReadBuf;
#[allow(unused)]
macro_rules! trace2 {

View File

@@ -1,7 +1,9 @@
use crate::rangefilter2::RangeFilter2;
use crate::tcprawclient::open_tcp_streams;
use err::Error;
use futures_util::stream;
use futures_util::StreamExt;
use items_0::streamitem::sitem_data;
use items_2::channelevents::ChannelEvents;
use netpod::log::*;
use netpod::query::PlainEventsQuery;
@@ -38,7 +40,7 @@ pub async fn plain_events_json(
let empty = items_2::empty_events_dyn_ev(&chconf.scalar_type, &chconf.shape, &ev_agg_kind)?;
info!("plain_events_json with empty item {}", empty.type_name());
let empty = ChannelEvents::Events(empty);
let empty = items::sitem_data(empty);
let empty = sitem_data(empty);
// TODO should be able to ask for data-events only, instead of mixed data and status events.
let inps = open_tcp_streams::<_, items_2::channelevents::ChannelEvents>(&evquery, cluster).await?;
//let inps = open_tcp_streams::<_, Box<dyn items_2::Events>>(&query, cluster).await?;
@@ -48,7 +50,7 @@ pub async fn plain_events_json(
info!("item after merge: {item:?}");
item
});
let stream = crate::rangefilter2::RangeFilter2::new(stream, query.range().clone(), evquery.one_before_range());
let stream = RangeFilter2::new(stream, query.range().clone(), evquery.one_before_range());
let stream = stream.map(|item| {
info!("item after rangefilter: {item:?}");
item

View File

@@ -1,12 +1,22 @@
use err::Error;
use futures_util::{Stream, StreamExt};
use items::StatsItem;
use items::{Appendable, Clearable, PushableIndex, RangeCompletableItem, Sitemty, StreamItem, WithTimestamps};
use netpod::{log::*, RangeFilterStats};
use netpod::{NanoRange, Nanos};
use futures_util::Stream;
use futures_util::StreamExt;
use items::Appendable;
use items::Clearable;
use items::PushableIndex;
use items::WithTimestamps;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StatsItem;
use items_0::streamitem::StreamItem;
use netpod::log::*;
use netpod::NanoRange;
use netpod::Nanos;
use netpod::RangeFilterStats;
use std::fmt;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::task::Context;
use std::task::Poll;
pub struct RangeFilter<S, ITY>
where

View File

@@ -1,10 +1,10 @@
use err::Error;
use futures_util::Stream;
use futures_util::StreamExt;
use items::RangeCompletableItem;
use items::Sitemty;
use items::StatsItem;
use items::StreamItem;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StatsItem;
use items_0::streamitem::StreamItem;
use items_2::merger::MergeError;
use items_2::merger::Mergeable;
use netpod::log::*;

View File

@@ -10,19 +10,22 @@ use crate::frames::inmem::InMemoryFrameAsyncReadStream;
use err::Error;
use futures_util::Stream;
use futures_util::StreamExt;
use items::eventfull::EventFull;
use items::frame::make_frame;
use items::frame::make_term_frame;
use items::sitem_data;
use items::EventQueryJsonStringFrame;
use items::RangeCompletableItem;
use items::Sitemty;
use items::StreamItem;
use items_0::framable::FrameTypeInnerStatic;
use items_0::streamitem::sitem_data;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
use items_2::eventfull::EventFull;
use items_2::framable::EventQueryJsonStringFrame;
use items_2::frame::make_frame;
use items_2::frame::make_term_frame;
use netpod::log::*;
use netpod::query::PlainEventsQuery;
use netpod::Cluster;
use netpod::Node;
use netpod::PerfOpts;
use serde::de::DeserializeOwned;
use serde::Serialize;
use std::fmt;
use std::pin::Pin;
use tokio::io::AsyncWriteExt;
@@ -59,9 +62,9 @@ pub type BoxedStream<T> = Pin<Box<dyn Stream<Item = Sitemty<T>> + Send>>;
pub async fn open_tcp_streams<Q, T>(query: Q, cluster: &Cluster) -> Result<Vec<BoxedStream<T>>, Error>
where
Q: serde::Serialize,
Q: Serialize,
// Group bounds in new trait
T: items::FrameTypeInnerStatic + serde::de::DeserializeOwned + Send + Unpin + fmt::Debug + 'static,
T: FrameTypeInnerStatic + DeserializeOwned + Send + Unpin + fmt::Debug + 'static,
{
// TODO when unit tests established, change to async connect:
let mut streams = Vec::new();

View File

@@ -4,8 +4,10 @@ mod collect;
mod timebin;
use err::Error;
use futures_util::{stream, Stream};
use items::{sitem_data, Sitemty};
use futures_util::stream;
use futures_util::Stream;
use items_0::streamitem::sitem_data;
use items_0::streamitem::Sitemty;
use items_0::Empty;
use items_2::channelevents::ChannelEvents;
use items_2::eventsdim0::EventsDim0;

View File

@@ -1,11 +1,12 @@
use crate::test::runfut;
use err::Error;
use futures_util::stream;
use items::sitem_data;
use items_0::streamitem::sitem_data;
use items_2::eventsdim0::EventsDim0CollectorOutput;
use items_2::testgen::make_some_boxed_d0_f32;
use netpod::timeunits::SEC;
use std::time::{Duration, Instant};
use std::time::Duration;
use std::time::Instant;
#[test]
fn collect_channel_events() -> Result<(), Error> {

View File

@@ -1,14 +1,21 @@
use crate::test::runfut;
use err::Error;
use futures_util::{stream, StreamExt};
use items::{sitem_data, RangeCompletableItem, StreamItem};
use futures_util::stream;
use futures_util::StreamExt;
use items_0::streamitem::sitem_data;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::StreamItem;
use items_0::Empty;
use items_2::binsdim0::BinsDim0;
use items_2::channelevents::{ChannelEvents, ConnStatus, ConnStatusEvent};
use items_2::channelevents::ChannelEvents;
use items_2::channelevents::ConnStatus;
use items_2::channelevents::ConnStatusEvent;
use items_2::testgen::make_some_boxed_d0_f32;
use netpod::timeunits::{MS, SEC};
use netpod::timeunits::MS;
use netpod::timeunits::SEC;
use std::collections::VecDeque;
use std::time::{Duration, Instant};
use std::time::Duration;
use std::time::Instant;
#[test]
fn time_bin_00() {

View File

@@ -1,12 +1,19 @@
use err::Error;
use futures_util::{Future, FutureExt, Stream, StreamExt};
use items::{sitem_data, RangeCompletableItem, Sitemty, StreamItem};
//use items_0::{TimeBinnable, TimeBinner};
use items_2::timebin::{TimeBinnable, TimeBinner};
use futures_util::Future;
use futures_util::FutureExt;
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::streamitem::sitem_data;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
use items_2::timebin::TimeBinnable;
use items_2::timebin::TimeBinner;
use netpod::log::*;
use std::fmt;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::task::Context;
use std::task::Poll;
use std::time::Instant;
#[allow(unused)]

View File

@@ -1,8 +1,13 @@
use crate::rangefilter2::RangeFilter2;
use crate::tcprawclient::open_tcp_streams;
use crate::timebin::TimeBinnedStream;
use err::Error;
use futures_util::stream;
use futures_util::StreamExt;
use items_0::streamitem::sitem_data;
use items_0::streamitem::Sitemty;
use items_2::channelevents::ChannelEvents;
use items_2::merger::Merger;
#[allow(unused)]
use netpod::log::*;
use netpod::query::BinnedQuery;
@@ -20,7 +25,7 @@ pub async fn timebinned_json(query: &BinnedQuery, chconf: &ChConf, cluster: &Clu
let deadline = Instant::now() + query.timeout_value();
let empty = items_2::empty_events_dyn_ev(&chconf.scalar_type, &chconf.shape, &query.agg_kind())?;
let empty = ChannelEvents::Events(empty);
let empty = items::sitem_data(empty);
let empty = sitem_data(empty);
let evquery = PlainEventsQuery::new(
query.channel().clone(),
query.range().clone(),
@@ -31,14 +36,14 @@ pub async fn timebinned_json(query: &BinnedQuery, chconf: &ChConf, cluster: &Clu
let inps = open_tcp_streams::<_, items_2::channelevents::ChannelEvents>(&evquery, cluster).await?;
// TODO propagate also the max-buf-len for the first stage event reader:
info!("timebinned_json with empty item {empty:?}");
let stream = items_2::merger::Merger::new(inps, 128);
let stream = Merger::new(inps, 128);
let stream = stream::iter([empty]).chain(stream);
let stream = crate::rangefilter2::RangeFilter2::new(stream, query.range().clone(), evquery.one_before_range());
let stream = RangeFilter2::new(stream, query.range().clone(), evquery.one_before_range());
let stream = Box::pin(stream);
let stream = crate::timebin::TimeBinnedStream::new(stream, binned_range.edges(), do_time_weight, deadline);
let stream = TimeBinnedStream::new(stream, binned_range.edges(), do_time_weight, deadline);
if false {
let mut stream = stream;
let _: Option<items::Sitemty<Box<dyn items_0::TimeBinned>>> = stream.next().await;
let _: Option<Sitemty<Box<dyn items_0::TimeBinned>>> = stream.next().await;
panic!()
}
let collected = crate::collect::collect(stream, deadline, bins_max, None, Some(binned_range.clone())).await?;