WIP checks

This commit is contained in:
Dominik Werder
2023-06-17 23:07:59 +02:00
parent 3cd74601a4
commit 44dd43240b
24 changed files with 492 additions and 368 deletions

View File

@@ -1,5 +1,3 @@
pub mod configquorum;
use crate::err::Error;
use crate::gather::gather_get_json_generic;
use crate::gather::SubRes;
@@ -34,13 +32,16 @@ use netpod::query::api1::Api1Query;
use netpod::range::evrange::NanoRange;
use netpod::timeunits::SEC;
use netpod::ByteSize;
use netpod::ChConf;
use netpod::ChannelSearchQuery;
use netpod::ChannelSearchResult;
use netpod::ChannelTypeConfigGen;
use netpod::DiskIoTune;
use netpod::NodeConfigCached;
use netpod::PerfOpts;
use netpod::ProxyConfig;
use netpod::ScalarType;
use netpod::SfChFetchInfo;
use netpod::SfDbChannel;
use netpod::Shape;
use netpod::ACCEPT_ALL;
@@ -639,13 +640,19 @@ impl Api1ChannelHeader {
}
}
async fn find_ch_conf(channel: SfDbChannel, ncc: NodeConfigCached) -> Result<SfChFetchInfo, Error> {
//find_sf_channel_config_basics_quorum()
todo!()
}
pub struct DataApiPython3DataStream {
range: NanoRange,
channels: VecDeque<SfDbChannel>,
current_channel: Option<SfDbChannel>,
node_config: NodeConfigCached,
chan_stream: Option<Pin<Box<dyn Stream<Item = Result<BytesMut, Error>> + Send>>>,
config_fut: Option<Pin<Box<dyn Future<Output = Result<ChannelConfigs, Error>> + Send>>>,
config_fut: Option<Pin<Box<dyn Future<Output = Result<SfChFetchInfo, Error>> + Send>>>,
ch_conf: Option<SfChFetchInfo>,
disk_io_tune: DiskIoTune,
do_decompress: bool,
#[allow(unused)]
@@ -674,6 +681,7 @@ impl DataApiPython3DataStream {
node_config,
chan_stream: None,
config_fut: None,
ch_conf: None,
disk_io_tune,
do_decompress,
event_count: 0,
@@ -688,10 +696,11 @@ impl DataApiPython3DataStream {
fn convert_item(
b: EventFull,
channel: &SfDbChannel,
entry: &ConfigEntry,
fetch_info: &SfChFetchInfo,
header_out: &mut bool,
count_events: &mut usize,
) -> Result<BytesMut, Error> {
let shape = fetch_info.shape();
let mut d = BytesMut::new();
for i1 in 0..b.len() {
const EVIMAX: usize = 6;
@@ -727,7 +736,7 @@ impl DataApiPython3DataStream {
},
// The shape is inconsistent on the events.
// Seems like the config is to be trusted in this case.
shape: shape_to_api3proto(&entry.shape),
shape: shape.to_u32_vec(),
compression,
};
let h = serde_json::to_string(&head)?;
@@ -787,86 +796,66 @@ impl DataApiPython3DataStream {
}
}
fn handle_config_fut_ready(&mut self, item: Result<ChannelConfigs, Error>) -> Result<(), Error> {
match item {
Ok(config) => {
self.config_fut = None;
let res = extract_matching_config_entry(&self.range, &config)?;
let entry = match res.best() {
Some(k) => k,
None => {
warn!("DataApiPython3DataStream no config entry found for {:?}", config);
self.chan_stream = Some(Box::pin(stream::empty()));
// TODO remember the issue for status and metrics
return Ok(());
}
};
let entry = entry.clone();
let channel = self.current_channel.as_ref().unwrap();
debug!("found channel_config for {}: {:?}", channel.name(), entry);
let evq = PlainEventsQuery::new(channel.clone(), self.range.clone()).for_event_blobs();
debug!("query for event blobs retrieval: evq {evq:?}");
// TODO important TODO
debug!("TODO fix magic inmem_bufcap");
debug!("TODO add timeout option to data api3 download");
let perf_opts = PerfOpts::default();
// TODO is this a good to place decide this?
let s = if self.node_config.node_config.cluster.is_central_storage {
info!("Set up central storage stream");
// TODO pull up this config
let event_chunker_conf = EventChunkerConf::new(ByteSize::kb(1024));
let s = make_local_event_blobs_stream(
evq.range().try_into()?,
evq.channel().clone(),
&entry,
evq.one_before_range(),
self.do_decompress,
event_chunker_conf,
self.disk_io_tune.clone(),
&self.node_config,
)?;
Box::pin(s) as Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>
} else {
if let Some(sh) = &entry.shape {
if sh.len() > 1 {
warn!("Remote stream fetch for shape {sh:?}");
}
}
debug!("Set up merged remote stream");
let s = MergedBlobsFromRemotes::new(evq, perf_opts, self.node_config.node_config.cluster.clone());
Box::pin(s) as Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>
};
let s = s.map({
let mut header_out = false;
let mut count_events = 0;
let channel = self.current_channel.clone().unwrap();
move |b| {
let ret = match b {
Ok(b) => {
let f = match b {
StreamItem::DataItem(RangeCompletableItem::Data(b)) => {
Self::convert_item(b, &channel, &entry, &mut header_out, &mut count_events)?
}
_ => BytesMut::new(),
};
Ok(f)
// TODO this stream can currently only handle sf-databuffer type backend anyway.
fn handle_config_fut_ready(&mut self, fetch_info: SfChFetchInfo) -> Result<(), Error> {
self.config_fut = None;
debug!("found channel_config {:?}", fetch_info);
let channel = SfDbChannel::from_name(fetch_info.backend(), fetch_info.name());
let evq = PlainEventsQuery::new(channel.clone(), self.range.clone()).for_event_blobs();
debug!("query for event blobs retrieval: evq {evq:?}");
// TODO important TODO
debug!("TODO fix magic inmem_bufcap");
debug!("TODO add timeout option to data api3 download");
let perf_opts = PerfOpts::default();
// TODO is this a good to place decide this?
let s = if self.node_config.node_config.cluster.is_central_storage {
info!("Set up central storage stream");
// TODO pull up this config
let event_chunker_conf = EventChunkerConf::new(ByteSize::kb(1024));
let s = make_local_event_blobs_stream(
evq.range().try_into()?,
&fetch_info,
evq.one_before_range(),
self.do_decompress,
event_chunker_conf,
self.disk_io_tune.clone(),
&self.node_config,
)?;
Box::pin(s) as Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>
} else {
debug!("Set up merged remote stream");
let ch_conf: ChannelTypeConfigGen = fetch_info.clone().into();
let s = MergedBlobsFromRemotes::new(evq, perf_opts, ch_conf, self.node_config.node_config.cluster.clone());
Box::pin(s) as Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>
};
let s = s.map({
let mut header_out = false;
let mut count_events = 0;
let channel = self.current_channel.clone().unwrap();
move |b| {
let ret = match b {
Ok(b) => {
let f = match b {
StreamItem::DataItem(RangeCompletableItem::Data(b)) => {
Self::convert_item(b, &channel, &fetch_info, &mut header_out, &mut count_events)?
}
Err(e) => Err(e),
_ => BytesMut::new(),
};
ret
Ok(f)
}
});
//let _ = Box::new(s) as Box<dyn Stream<Item = Result<BytesMut, Error>> + Unpin>;
let evm = if self.events_max == 0 {
usize::MAX
} else {
self.events_max as usize
Err(e) => Err(e),
};
self.chan_stream = Some(Box::pin(s.map_err(Error::from).take(evm)));
Ok(())
ret
}
Err(e) => Err(Error::with_msg_no_trace(format!("can not parse channel config {e}"))),
}
});
//let _ = Box::new(s) as Box<dyn Stream<Item = Result<BytesMut, Error>> + Unpin>;
let evm = if self.events_max == 0 {
usize::MAX
} else {
self.events_max as usize
};
self.chan_stream = Some(Box::pin(s.map_err(Error::from).take(evm)));
Ok(())
}
}
@@ -893,7 +882,7 @@ impl Stream for DataApiPython3DataStream {
}
} else if let Some(fut) = &mut self.config_fut {
match fut.poll_unpin(cx) {
Ready(k) => match self.handle_config_fut_ready(k) {
Ready(Ok(k)) => match self.handle_config_fut_ready(k) {
Ok(()) => continue,
Err(e) => {
self.config_fut = None;
@@ -902,13 +891,16 @@ impl Stream for DataApiPython3DataStream {
Ready(Some(Err(e)))
}
},
Ready(Err(e)) => {
self.data_done = true;
Ready(Some(Err(e)))
}
Pending => Pending,
}
} else {
if let Some(channel) = self.channels.pop_front() {
self.current_channel = Some(channel.clone());
let fut = read_local_config(channel, self.node_config.clone()).map_err(Error::from);
self.config_fut = Some(Box::pin(fut));
self.config_fut = Some(Box::pin(find_ch_conf(channel, self.node_config.clone())));
continue;
} else {
self.data_done = true;
@@ -987,9 +979,11 @@ impl Api1EventsBinaryHandler {
}
};
let span = if qu.log_level() == "trace" {
tracing::span!(tracing::Level::TRACE, "log_span_t")
debug!("enable trace for handler");
tracing::span!(tracing::Level::TRACE, "log_span_trace")
} else if qu.log_level() == "debug" {
tracing::span!(tracing::Level::DEBUG, "log_span_d")
debug!("enable debug for handler");
tracing::span!(tracing::Level::DEBUG, "log_span_debug")
} else {
tracing::Span::none()
};

View File

@@ -1,4 +0,0 @@
pub async fn find_config_quorum() {
// TODO create new endpoint which only returns the most matching config entry
// for some given channel and time range.
}

View File

@@ -1,6 +1,6 @@
use crate::bodystream::response;
use crate::bodystream::ToPublicResponse;
use crate::channelconfig::chconf_from_binned;
use crate::channelconfig::ch_conf_from_binned;
use crate::err::Error;
use crate::response_err;
use err::anyhow::Context;
@@ -28,13 +28,7 @@ async fn binned_json(url: Url, req: Request<Body>, node_config: &NodeConfigCache
let msg = format!("can not parse query: {}", e.msg());
e.add_public_msg(msg)
})?;
let chconf = chconf_from_binned(&query, node_config).await?;
// Update the series id since we don't require some unique identifier yet.
let query = {
let mut query = query;
query.set_series_id(chconf.try_series().context("binned_json")?);
query
};
let ch_conf = ch_conf_from_binned(&query, node_config).await?;
let span1 = span!(
Level::INFO,
"httpret::binned",
@@ -45,7 +39,7 @@ async fn binned_json(url: Url, req: Request<Body>, node_config: &NodeConfigCache
span1.in_scope(|| {
debug!("begin");
});
let item = streams::timebinnedjson::timebinned_json(query, chconf, node_config.node_config.cluster.clone())
let item = streams::timebinnedjson::timebinned_json(query, &ch_conf, node_config.node_config.cluster.clone())
.instrument(span1)
.await?;
let buf = serde_json::to_vec(&item)?;

View File

@@ -4,7 +4,6 @@ use crate::response;
use crate::response_err;
use crate::BodyStream;
use crate::ToPublicResponse;
use err::anyhow::Context;
use futures_util::stream;
use futures_util::TryStreamExt;
use http::Method;
@@ -75,14 +74,8 @@ async fn plain_events_binary(
) -> Result<Response<Body>, Error> {
debug!("plain_events_binary req: {:?}", req);
let query = PlainEventsQuery::from_url(&url).map_err(|e| e.add_public_msg(format!("Can not understand query")))?;
let chconf = chconf_from_events_v1(&query, node_config).await?;
info!("plain_events_binary chconf_from_events_v1: {chconf:?}");
// Update the series id since we don't require some unique identifier yet.
let mut query = query;
query.set_series_id(chconf.try_series().context("plain_events_binary")?);
let query = query;
// ---
let _ = query;
let ch_conf = chconf_from_events_v1(&query, node_config).await?;
info!("plain_events_binary chconf_from_events_v1: {ch_conf:?}");
let s = stream::iter([Ok::<_, Error>(String::from("TODO_PREBINNED_BINARY_STREAM"))]);
let ret = response(StatusCode::OK).body(BodyStream::wrapped(
s.map_err(Error::from),
@@ -100,21 +93,9 @@ async fn plain_events_json(
let (_head, _body) = req.into_parts();
let query = PlainEventsQuery::from_url(&url)?;
info!("plain_events_json query {query:?}");
let chconf = chconf_from_events_v1(&query, node_config).await.map_err(Error::from)?;
info!("plain_events_json chconf_from_events_v1: {chconf:?}");
// Update the series id since we don't require some unique identifier yet.
let mut query = query;
let kk = chconf.try_series();
let kk = kk.context("plain_events_json");
if let Err(e) = &kk {
warn!("kk ctx debug {kk:?}");
warn!("kk e ctx display {e}");
}
query.set_series_id(kk?);
let query = query;
// ---
//let query = RawEventsQuery::new(query.channel().clone(), query.range().clone(), AggKind::Plain);
let item = streams::plaineventsjson::plain_events_json(&query, &chconf, &node_config.node_config.cluster).await;
let ch_conf = chconf_from_events_v1(&query, node_config).await.map_err(Error::from)?;
info!("plain_events_json chconf_from_events_v1: {ch_conf:?}");
let item = streams::plaineventsjson::plain_events_json(&query, &ch_conf, &node_config.node_config.cluster).await;
let item = match item {
Ok(item) => item,
Err(e) => {

View File

@@ -15,6 +15,7 @@ use netpod::timeunits::*;
use netpod::ChConf;
use netpod::ChannelConfigQuery;
use netpod::ChannelConfigResponse;
use netpod::ChannelTypeConfigGen;
use netpod::FromUrl;
use netpod::NodeConfigCached;
use netpod::ScalarType;
@@ -33,8 +34,12 @@ use serde::Serialize;
use std::collections::BTreeMap;
use url::Url;
pub async fn chconf_from_events_v1(q: &PlainEventsQuery, ncc: &NodeConfigCached) -> Result<ChConf, Error> {
let ret = nodenet::channelconfig::channel_config(q.range().try_into()?, q.channel().clone(), ncc).await?;
pub async fn chconf_from_events_v1(
q: &PlainEventsQuery,
ncc: &NodeConfigCached,
) -> Result<ChannelTypeConfigGen, Error> {
// let ret = nodenet::channelconfig::channel_config(q.range().try_into()?, q.channel().clone(), ncc).await?;
let ret = nodenet::configquorum::find_config_basics_quorum(q.channel(), ncc).await?;
Ok(ret)
}
@@ -49,8 +54,9 @@ pub async fn chconf_from_prebinned(q: &PreBinnedQuery, _ncc: &NodeConfigCached)
Ok(ret)
}
pub async fn chconf_from_binned(q: &BinnedQuery, ncc: &NodeConfigCached) -> Result<ChConf, Error> {
let ret = nodenet::channelconfig::channel_config(q.range().try_into()?, q.channel().clone(), ncc).await?;
pub async fn ch_conf_from_binned(q: &BinnedQuery, ncc: &NodeConfigCached) -> Result<ChannelTypeConfigGen, Error> {
// let ret = nodenet::channelconfig::channel_config(q.range().try_into()?, q.channel().clone(), ncc).await?;
let ret = nodenet::configquorum::find_config_basics_quorum(q.channel(), ncc).await?;
Ok(ret)
}