Reenable test for plain event json data

This commit is contained in:
Dominik Werder
2022-12-01 16:10:43 +01:00
parent 8082271c2a
commit 74af61f7fb
18 changed files with 432 additions and 260 deletions

View File

@@ -5,6 +5,7 @@ use items_0::collect_c::{Collectable, Collector};
use netpod::log::*;
use std::fmt;
use std::time::{Duration, Instant};
use tracing::Instrument;
#[allow(unused)]
macro_rules! trace2 {
@@ -33,88 +34,90 @@ where
S: Stream<Item = Sitemty<T>> + Unpin,
T: Collectable + fmt::Debug,
{
let mut collector: Option<<T as Collectable>::Collector> = None;
let mut stream = stream;
let deadline = deadline.into();
let mut range_complete = false;
let mut total_duration = Duration::ZERO;
loop {
let item = match tokio::time::timeout_at(deadline, stream.next()).await {
Ok(Some(k)) => k,
Ok(None) => break,
Err(_e) => {
if let Some(coll) = collector.as_mut() {
coll.set_timed_out();
} else {
eprintln!("TODO [861a95813]");
err::todo();
}
break;
}
};
match item {
Ok(item) => match item {
StreamItem::DataItem(item) => match item {
RangeCompletableItem::RangeComplete => {
range_complete = true;
if let Some(coll) = collector.as_mut() {
coll.set_range_complete();
} else {
eprintln!("TODO [7cc0fca8f]");
err::todo();
}
let span = tracing::span!(tracing::Level::TRACE, "collect");
let fut = async {
let mut collector: Option<<T as Collectable>::Collector> = None;
let mut stream = stream;
let deadline = deadline.into();
let mut range_complete = false;
let mut total_duration = Duration::ZERO;
loop {
let item = match tokio::time::timeout_at(deadline, stream.next()).await {
Ok(Some(k)) => k,
Ok(None) => break,
Err(_e) => {
if let Some(coll) = collector.as_mut() {
coll.set_timed_out();
} else {
warn!("Timeout but no collector yet");
}
RangeCompletableItem::Data(mut item) => {
eprintln!("COLLECTOR INGEST ITEM");
if collector.is_none() {
let c = item.new_collector();
collector = Some(c);
break;
}
};
match item {
Ok(item) => match item {
StreamItem::DataItem(item) => match item {
RangeCompletableItem::RangeComplete => {
range_complete = true;
if let Some(coll) = collector.as_mut() {
coll.set_range_complete();
} else {
warn!("Received RangeComplete but no collector yet");
}
}
let coll = collector.as_mut().unwrap();
coll.ingest(&mut item);
if coll.len() as u64 >= events_max {
break;
RangeCompletableItem::Data(mut item) => {
if collector.is_none() {
let c = item.new_collector();
collector = Some(c);
}
let coll = collector.as_mut().unwrap();
coll.ingest(&mut item);
if coll.len() as u64 >= events_max {
warn!("Reached events_max {} abort", events_max);
break;
}
}
},
StreamItem::Log(item) => {
trace!("Log {:?}", item);
}
StreamItem::Stats(item) => {
trace!("Stats {:?}", item);
use items::StatsItem;
use netpod::DiskStats;
match item {
// TODO factor and simplify the stats collection:
StatsItem::EventDataReadStats(_) => {}
StatsItem::RangeFilterStats(_) => {}
StatsItem::DiskStats(item) => match item {
DiskStats::OpenStats(k) => {
total_duration += k.duration;
}
DiskStats::SeekStats(k) => {
total_duration += k.duration;
}
DiskStats::ReadStats(k) => {
total_duration += k.duration;
}
DiskStats::ReadExactStats(k) => {
total_duration += k.duration;
}
},
}
}
},
StreamItem::Log(item) => {
trace!("Log {:?}", item);
Err(e) => {
// TODO Need to use some flags to get good enough error message for remote user.
return Err(e);
}
StreamItem::Stats(item) => {
trace!("Stats {:?}", item);
use items::StatsItem;
use netpod::DiskStats;
match item {
// TODO factor and simplify the stats collection:
StatsItem::EventDataReadStats(_) => {}
StatsItem::RangeFilterStats(_) => {}
StatsItem::DiskStats(item) => match item {
DiskStats::OpenStats(k) => {
total_duration += k.duration;
}
DiskStats::SeekStats(k) => {
total_duration += k.duration;
}
DiskStats::ReadStats(k) => {
total_duration += k.duration;
}
DiskStats::ReadExactStats(k) => {
total_duration += k.duration;
}
},
}
}
},
Err(e) => {
// TODO Need to use some flags to get good enough error message for remote user.
Err(e)?;
}
}
}
let _ = range_complete;
let res = collector
.ok_or_else(|| Error::with_msg_no_trace(format!("no collector created")))?
.result()?;
debug!("Total duration: {:?}", total_duration);
Ok(res)
let _ = range_complete;
let res = collector
.ok_or_else(|| Error::with_msg_no_trace(format!("no result because no collector was created")))?
.result()?;
debug!("Total duration: {:?}", total_duration);
Ok(res)
};
fut.instrument(span).await
}

View File

@@ -3,13 +3,19 @@ use bytes::Bytes;
use err::Error;
use futures_util::{pin_mut, Stream};
use items::inmem::InMemoryFrame;
use items::StreamItem;
use items::{StreamItem, TERM_FRAME_TYPE_ID};
use items::{INMEM_FRAME_FOOT, INMEM_FRAME_HEAD, INMEM_FRAME_MAGIC};
use netpod::log::*;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
#[allow(unused)]
macro_rules! trace2 {
($($arg:tt)*) => ();
($($arg:tt)*) => (trace!($($arg)*));
}
impl err::ToErr for crate::slidebuf::Error {
fn to_err(self) -> Error {
Error::with_msg_no_trace(format!("{self}"))
@@ -29,7 +35,6 @@ where
done: bool,
complete: bool,
inp_bytes_consumed: u64,
npoll: u64,
}
impl<T> InMemoryFrameAsyncReadStream<T>
@@ -44,12 +49,11 @@ where
done: false,
complete: false,
inp_bytes_consumed: 0,
npoll: 0,
}
}
fn poll_upstream(&mut self, cx: &mut Context) -> Poll<Result<usize, Error>> {
trace!("poll_upstream");
trace2!("poll_upstream");
use Poll::*;
let mut buf = ReadBuf::new(self.buf.available_writable_area(self.need_min - self.buf.len())?);
let inp = &mut self.inp;
@@ -58,7 +62,7 @@ where
Ready(Ok(())) => {
let n = buf.filled().len();
self.buf.wadv(n)?;
trace!("InMemoryFrameAsyncReadStream READ {} FROM UPSTREAM", n);
trace!("recv bytes {}", n);
Ready(Ok(n))
}
Ready(Err(e)) => Ready(Err(e.into())),
@@ -66,12 +70,10 @@ where
}
}
// Try to parse a frame.
// Try to consume bytes to parse a frame.
// Update the need_min to the most current state.
// If successful, return item and number of bytes consumed.
// Must only be called when at least `need_min` bytes are available.
fn parse(&mut self) -> Result<Option<InMemoryFrame>, Error> {
trace!("parse");
let buf = self.buf.data();
if buf.len() < self.need_min {
return Err(Error::with_msg_no_trace("expect at least need_min"));
@@ -116,9 +118,6 @@ where
h.update(&buf[INMEM_FRAME_HEAD..p1]);
let payload_crc = h.finalize();
let frame_crc_ind = u32::from_le_bytes(buf[p1..p1 + 4].try_into()?);
//info!("len {}", len);
//info!("payload_crc_ind {}", payload_crc_ind);
//info!("frame_crc_ind {}", frame_crc_ind);
let payload_crc_match = payload_crc_exp == payload_crc;
let frame_crc_match = frame_crc_ind == frame_crc;
if !frame_crc_match || !payload_crc_match {
@@ -152,11 +151,8 @@ where
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
trace!("poll");
self.npoll += 1;
if self.npoll > 2000 {
panic!()
}
let span = span!(Level::TRACE, "inmem");
let _spanguard = span.enter();
loop {
break if self.complete {
panic!("poll_next on complete")
@@ -171,11 +167,17 @@ where
let e = Error::with_msg_no_trace("enough bytes but nothing parsed");
Ready(Some(Err(e)))
} else {
debug!("not enouh for parse, need to wait for more");
continue;
}
}
Ok(Some(item)) => Ready(Some(Ok(StreamItem::DataItem(item)))),
Ok(Some(item)) => {
if item.tyid() == TERM_FRAME_TYPE_ID {
self.done = true;
continue;
} else {
Ready(Some(Ok(StreamItem::DataItem(item))))
}
}
Err(e) => {
self.done = true;
Ready(Some(Err(e)))
@@ -184,7 +186,6 @@ where
} else {
match self.poll_upstream(cx) {
Ready(Ok(n1)) => {
debug!("read {n1}");
if n1 == 0 {
self.done = true;
continue;
@@ -197,10 +198,7 @@ where
self.done = true;
Ready(Some(Err(e)))
}
Pending => {
debug!("PENDING");
Pending
}
Pending => Pending,
}
};
}

View File

@@ -1,51 +1 @@
pub mod mergedstream;
use crate::frames::eventsfromframes::EventsFromFrames;
use crate::frames::inmem::InMemoryFrameAsyncReadStream;
use err::Error;
use futures_util::Stream;
use futures_util::StreamExt;
use items::frame::make_frame;
use items::frame::make_term_frame;
use items::sitem_data;
use items::EventQueryJsonStringFrame;
use items::Sitemty;
use netpod::log::*;
use netpod::Cluster;
use std::pin::Pin;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
pub type BoxedStream<T> = Pin<Box<dyn Stream<Item = Sitemty<T>> + Send>>;
pub async fn open_tcp_streams<Q, T>(query: Q, cluster: &Cluster) -> Result<Vec<BoxedStream<T>>, Error>
where
Q: serde::Serialize,
// Group bounds in new trait
T: items::FrameTypeInnerStatic + serde::de::DeserializeOwned + Send + Unpin + 'static,
{
// TODO when unit tests established, change to async connect:
let mut streams = Vec::new();
for node in &cluster.nodes {
debug!("open_tcp_streams to: {}:{}", node.host, node.port_raw);
let net = TcpStream::connect(format!("{}:{}", node.host, node.port_raw)).await?;
let qjs = serde_json::to_string(&query)?;
let (netin, mut netout) = net.into_split();
let item = EventQueryJsonStringFrame(qjs);
let item = sitem_data(item);
let buf = make_frame(&item)?;
netout.write_all(&buf).await?;
let buf = make_term_frame()?;
netout.write_all(&buf).await?;
netout.flush().await?;
netout.forget();
// TODO for images, we need larger buffer capacity
let frames = InMemoryFrameAsyncReadStream::new(netin, 1024 * 128);
let stream = EventsFromFrames::<_, T>::new(frames);
let stream = stream.inspect(|x| {
items::on_sitemty_range_complete!(x, warn!("RangeComplete SEEN IN RECEIVED TCP STREAM"));
});
streams.push(Box::pin(stream) as _);
}
Ok(streams)
}

View File

@@ -1,4 +1,4 @@
use crate::merge::open_tcp_streams;
use crate::tcprawclient::open_tcp_streams;
use bytes::Bytes;
use err::Error;
use futures_util::{Stream, StreamExt};
@@ -41,7 +41,7 @@ where
stream
};
let stream = { items_2::merger::Merger::new(inps, 1) };
let deadline = Instant::now() + Duration::from_millis(2000);
let deadline = Instant::now() + Duration::from_millis(8000);
let events_max = 100;
let collected = crate::collect::collect(stream, deadline, events_max).await?;
let jsval = serde_json::to_value(&collected)?;

View File

@@ -11,9 +11,11 @@ use err::Error;
use futures_util::Stream;
use items::eventfull::EventFull;
use items::frame::{make_frame, make_term_frame};
use items::sitem_data;
use items::{EventQueryJsonStringFrame, EventsNodeProcessor, RangeCompletableItem, Sitemty, StreamItem};
use netpod::log::*;
use netpod::query::RawEventsQuery;
use netpod::Cluster;
use netpod::{Node, PerfOpts};
use std::pin::Pin;
use tokio::io::AsyncWriteExt;
@@ -71,3 +73,34 @@ pub async fn x_processed_event_blobs_stream_from_node(
let items = EventsFromFrames::new(frames);
Ok(Box::pin(items))
}
pub type BoxedStream<T> = Pin<Box<dyn Stream<Item = Sitemty<T>> + Send>>;
pub async fn open_tcp_streams<Q, T>(query: Q, cluster: &Cluster) -> Result<Vec<BoxedStream<T>>, Error>
where
Q: serde::Serialize,
// Group bounds in new trait
T: items::FrameTypeInnerStatic + serde::de::DeserializeOwned + Send + Unpin + 'static,
{
// TODO when unit tests established, change to async connect:
let mut streams = Vec::new();
for node in &cluster.nodes {
debug!("open_tcp_streams to: {}:{}", node.host, node.port_raw);
let net = TcpStream::connect(format!("{}:{}", node.host, node.port_raw)).await?;
let qjs = serde_json::to_string(&query)?;
let (netin, mut netout) = net.into_split();
let item = EventQueryJsonStringFrame(qjs);
let item = sitem_data(item);
let buf = make_frame(&item)?;
netout.write_all(&buf).await?;
let buf = make_term_frame()?;
netout.write_all(&buf).await?;
netout.flush().await?;
netout.forget();
// TODO for images, we need larger buffer capacity
let frames = InMemoryFrameAsyncReadStream::new(netin, 1024 * 128);
let stream = EventsFromFrames::<_, T>::new(frames);
streams.push(Box::pin(stream) as _);
}
Ok(streams)
}