Begin refactor frame handling, update clap

This commit is contained in:
Dominik Werder
2022-11-09 15:50:41 +01:00
parent fc22d1ebaf
commit 9036160253
37 changed files with 466 additions and 326 deletions
+78 -44
View File
@@ -1,18 +1,17 @@
#[cfg(test)]
mod test;
use dbconn::events_scylla::make_scylla_stream;
use disk::frame::inmem::InMemoryFrameAsyncReadStream;
use err::Error;
use futures_core::Stream;
use futures_util::StreamExt;
use items::frame::{decode_frame, make_term_frame};
use items::{Framable, StreamItem};
use items::{EventQueryJsonStringFrame, Framable, RangeCompletableItem, Sitemty, StreamItem};
use netpod::histo::HistoLog2;
use netpod::log::*;
use netpod::query::RawEventsQuery;
use netpod::query::{PlainEventsQuery, RawEventsQuery};
use netpod::AggKind;
use netpod::{EventQueryJsonStringFrame, NodeConfigCached, PerfOpts};
use netpod::{NodeConfigCached, PerfOpts};
use std::net::SocketAddr;
use std::pin::Pin;
use tokio::io::AsyncWriteExt;
@@ -33,20 +32,6 @@ pub async fn events_service(node_config: NodeConfigCached) -> Result<(), Error>
}
}
async fn events_conn_handler(stream: TcpStream, addr: SocketAddr, node_config: NodeConfigCached) -> Result<(), Error> {
let span1 = span!(Level::INFO, "events_conn_handler");
let r = events_conn_handler_inner(stream, addr, &node_config)
.instrument(span1)
.await;
match r {
Ok(k) => Ok(k),
Err(e) => {
error!("events_conn_handler sees error: {:?}", e);
Err(e)
}
}
}
struct ConnErr {
err: Error,
#[allow(dead_code)]
@@ -91,8 +76,18 @@ async fn events_conn_handler_inner_try(
error!("missing command frame");
return Err((Error::with_msg("missing command frame"), netout))?;
}
let qitem: EventQueryJsonStringFrame = match decode_frame(&frames[0]) {
Ok(k) => k,
// TODO this does not need all variants of Sitemty.
let qitem = match decode_frame::<Sitemty<EventQueryJsonStringFrame>>(&frames[0]) {
Ok(k) => match k {
Ok(k) => match k {
StreamItem::DataItem(k) => match k {
RangeCompletableItem::Data(k) => k,
RangeCompletableItem::RangeComplete => panic!(),
},
_ => panic!(),
},
Err(e) => return Err((e, netout).into()),
},
Err(e) => return Err((e, netout).into()),
};
let res: Result<RawEventsQuery, _> = serde_json::from_str(&qitem.0);
@@ -113,31 +108,53 @@ async fn events_conn_handler_inner_try(
let mut p1: Pin<Box<dyn Stream<Item = Box<dyn Framable + Send>> + Send>> =
if let Some(conf) = &node_config.node_config.cluster.scylla {
// TODO depends in general on the query
// TODO why both in PlainEventsQuery and as separate parameter? Check other usages.
let do_one_before_range = false;
// TODO use better builder pattern with shortcuts for production and dev defaults
let qu = PlainEventsQuery::new(evq.channel, evq.range, 1024 * 8, None, true);
let scyco = conf;
let dbconf = node_config.node_config.cluster.database.clone();
match make_scylla_stream(&evq, scyco, dbconf, evq.do_test_stream_error).await {
Ok(s) => {
//
let s = s.map(|item| {
//
/*match item {
Ok(StreamItem::Data(RangeCompletableItem::Data(k))) => {
let b = Box::new(b);
Ok(StreamItem::Data(RangeCompletableItem::Data(b)))
}
Ok(StreamItem::Data(RangeCompletableItem::Complete)) => {
Ok(StreamItem::Data(RangeCompletableItem::Complete))
}
Ok(StreamItem::Log(k)) => Ok(StreamItem::Log(k)),
Ok(StreamItem::Stats(k)) => Ok(StreamItem::Stats(k)),
Err(e) => Err(e),
}*/
Box::new(item) as Box<dyn Framable + Send>
});
Box::pin(s)
}
let _dbconf = node_config.node_config.cluster.database.clone();
let scy = match scyllaconn::create_scy_session(scyco).await {
Ok(k) => k,
Err(e) => return Err((e, netout))?,
}
};
let series = err::todoval();
let scalar_type = err::todoval();
let shape = err::todoval();
let do_test_stream_error = false;
let stream = match scyllaconn::events::make_scylla_stream(
&qu,
do_one_before_range,
series,
scalar_type,
shape,
scy,
do_test_stream_error,
)
.await
{
Ok(k) => k,
Err(e) => return Err((e, netout))?,
};
let s = stream.map(|item| {
let item = match item {
Ok(item) => match item {
items_2::ChannelEvents::Events(_item) => {
// TODO
let item = items::scalarevents::ScalarEvents::<f64>::empty();
Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))
}
items_2::ChannelEvents::RangeComplete => {
Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))
}
items_2::ChannelEvents::Status(_item) => todo!(),
},
Err(e) => Err(e),
};
Box::new(item) as Box<dyn Framable + Send>
});
Box::pin(s)
} else if let Some(_) = &node_config.node.channel_archiver {
let e = Error::with_msg_no_trace("archapp not built");
return Err((e, netout))?;
@@ -172,7 +189,10 @@ async fn events_conn_handler_inner_try(
}
}
}
let buf = make_term_frame();
let buf = match make_term_frame() {
Ok(k) => k,
Err(e) => return Err((e, netout))?,
};
match netout.write_all(&buf).await {
Ok(_) => (),
Err(e) => return Err((e, netout))?,
@@ -205,3 +225,17 @@ async fn events_conn_handler_inner(
}
Ok(())
}
async fn events_conn_handler(stream: TcpStream, addr: SocketAddr, node_config: NodeConfigCached) -> Result<(), Error> {
let span1 = span!(Level::INFO, "events_conn_handler");
let r = events_conn_handler_inner(stream, addr, &node_config)
.instrument(span1)
.await;
match r {
Ok(k) => Ok(k),
Err(e) => {
error!("events_conn_handler sees error: {:?}", e);
Err(e)
}
}
}
+62 -6
View File
@@ -1,20 +1,23 @@
use netpod::{Cluster, Database, FileIoBufferSize, Node, NodeConfig, SfDatabuffer};
use tokio::net::TcpListener;
use super::*;
use disk::eventchunker::EventFull;
use items::frame::make_frame;
use items::Sitemty;
use netpod::timeunits::SEC;
use netpod::{Channel, Cluster, Database, DiskIoTune, FileIoBufferSize, NanoRange, Node, NodeConfig, SfDatabuffer};
use tokio::net::TcpListener;
#[test]
fn raw_data_00() {
//taskrun::run(disk::gen::gen_test_data()).unwrap();
let fut = async {
let lis = TcpListener::bind("127.0.0.1:0").await.unwrap();
let con = TcpStream::connect(lis.local_addr().unwrap()).await.unwrap();
let mut con = TcpStream::connect(lis.local_addr().unwrap()).await.unwrap();
let (client, addr) = lis.accept().await.unwrap();
let cfg = NodeConfigCached {
node_config: NodeConfig {
name: "node_name_dummy".into(),
cluster: Cluster {
backend: "backend_dummy".into(),
backend: "testbackend".into(),
nodes: vec![],
database: Database {
name: "".into(),
@@ -47,7 +50,60 @@ fn raw_data_00() {
},
ix: 0,
};
events_conn_handler(client, addr, cfg).await.unwrap();
let qu = RawEventsQuery {
channel: Channel {
series: None,
backend: "testbackend".into(),
name: "scalar-i32-be".into(),
},
range: NanoRange {
beg: SEC,
end: SEC * 10,
},
agg_kind: AggKind::Plain,
disk_io_tune: DiskIoTune {
read_sys: netpod::ReadSys::TokioAsyncRead,
read_buffer_len: 1024 * 4,
read_queue_len: 1,
},
do_decompress: true,
do_test_main_error: false,
do_test_stream_error: false,
};
let query = EventQueryJsonStringFrame(serde_json::to_string(&qu).unwrap());
let item = Ok(StreamItem::DataItem(RangeCompletableItem::Data(query)));
let frame = make_frame(&item).unwrap();
let jh = taskrun::spawn(events_conn_handler(client, addr, cfg));
con.write_all(&frame).await.unwrap();
eprintln!("written");
con.shutdown().await.unwrap();
eprintln!("shut down");
let mut frames = InMemoryFrameAsyncReadStream::new(con, 1024 * 128);
while let Some(frame) = frames.next().await {
match frame {
Ok(frame) => match frame {
StreamItem::DataItem(k) => {
eprintln!("{k:?}");
if k.tyid() == items::EVENT_FULL_FRAME_TYPE_ID {
} else if k.tyid() == items::ERROR_FRAME_TYPE_ID {
} else if k.tyid() == items::LOG_FRAME_TYPE_ID {
} else if k.tyid() == items::STATS_FRAME_TYPE_ID {
} else {
panic!("unexpected frame type id {:x}", k.tyid());
}
let item: Result<Sitemty<EventFull>, Error> = decode_frame(&k);
eprintln!("decoded: {:?}", item);
}
StreamItem::Log(_) => todo!(),
StreamItem::Stats(_) => todo!(),
},
Err(e) => {
panic!("{e:?}");
}
}
}
jh.await.unwrap().unwrap();
Ok(())
};
taskrun::run(fut).unwrap();