Restructure scylla caching
This commit is contained in:
@@ -1,9 +1,130 @@
|
||||
use crate::ErrConv;
|
||||
use err::Error;
|
||||
use futures_util::{Future, Stream, StreamExt};
|
||||
use items::TimeBinned;
|
||||
use netpod::log::*;
|
||||
use netpod::{ChannelTyped, NanoRange, PreBinnedPatchCoord, PreBinnedPatchIterator, PreBinnedPatchRange, ScyllaConfig};
|
||||
use scylla::Session as ScySession;
|
||||
use std::pin::Pin;
|
||||
|
||||
pub async fn search_channel_scylla<BINC>(_scy: &ScySession) -> Result<(), Error>
|
||||
where
|
||||
BINC: Clone,
|
||||
{
|
||||
todo!()
|
||||
pub async fn read_cached_scylla(
|
||||
chn: &ChannelTyped,
|
||||
coord: &PreBinnedPatchCoord,
|
||||
scy: &ScySession,
|
||||
) -> Result<Option<Box<dyn TimeBinned>>, Error> {
|
||||
let _ = coord;
|
||||
let series = chn.series_id()?;
|
||||
let res = scy.query_iter("", (series as i64,)).await.err_conv()?;
|
||||
let _ = res;
|
||||
// TODO look for the data. Based on the ChannelTyped we know what type the caller expects.
|
||||
err::todoval()
|
||||
}
|
||||
|
||||
pub async fn write_cached_scylla(
|
||||
chn: &ChannelTyped,
|
||||
coord: &PreBinnedPatchCoord,
|
||||
data: &dyn TimeBinned,
|
||||
scy: &ScySession,
|
||||
) -> Result<(), Error> {
|
||||
let _ = coord;
|
||||
let _ = data;
|
||||
let series = chn.series_id()?;
|
||||
let res = scy.query_iter("", (series as i64,)).await.err_conv()?;
|
||||
let _ = res;
|
||||
// TODO write the data.
|
||||
err::todoval()
|
||||
}
|
||||
|
||||
// TODO must indicate to the caller whether it is safe to cache this (complete).
|
||||
pub async fn fetch_uncached_data(
|
||||
chn: ChannelTyped,
|
||||
coord: PreBinnedPatchCoord,
|
||||
scy: &ScySession,
|
||||
) -> Result<Option<Box<dyn TimeBinned>>, Error> {
|
||||
info!("fetch_uncached_data");
|
||||
let range = coord.patch_range();
|
||||
// TODO why the extra plus one?
|
||||
let bin = match PreBinnedPatchRange::covering_range(range, coord.bin_count() + 1) {
|
||||
Ok(Some(range)) => fetch_uncached_higher_res_prebinned(&chn, &range, scy).await,
|
||||
Ok(None) => fetch_uncached_binned_events(&chn, &coord.patch_range(), scy).await,
|
||||
Err(e) => Err(e),
|
||||
}?;
|
||||
err::todoval()
|
||||
}
|
||||
|
||||
pub fn fetch_uncached_data_box(
|
||||
chn: &ChannelTyped,
|
||||
coord: &PreBinnedPatchCoord,
|
||||
scy: &ScySession,
|
||||
) -> Pin<Box<dyn Future<Output = Result<Option<Box<dyn TimeBinned>>, Error>> + Send>> {
|
||||
let scy = unsafe { &*(scy as *const _) };
|
||||
Box::pin(fetch_uncached_data(chn.clone(), coord.clone(), scy))
|
||||
}
|
||||
|
||||
pub async fn fetch_uncached_higher_res_prebinned(
|
||||
chn: &ChannelTyped,
|
||||
range: &PreBinnedPatchRange,
|
||||
scy: &ScySession,
|
||||
) -> Result<Box<dyn TimeBinned>, Error> {
|
||||
let mut aggt = None;
|
||||
let patch_it = PreBinnedPatchIterator::from_range(range.clone());
|
||||
for patch in patch_it {
|
||||
let coord = PreBinnedPatchCoord::new(patch.bin_t_len(), patch.patch_t_len(), patch.ix());
|
||||
let mut stream = pre_binned_value_stream_with_scy(chn, &coord, scy).await?;
|
||||
while let Some(item) = stream.next().await {
|
||||
let item = item?;
|
||||
// TODO here I will need some new API to aggregate (time-bin) trait objects.
|
||||
// Each TimeBinned must provide some way to do that...
|
||||
// I also need an Aggregator which does not know before the first item what output type it will produce.
|
||||
let _ = item;
|
||||
if aggt.is_none() {
|
||||
aggt = Some(item.aggregator_new());
|
||||
}
|
||||
let aggt = aggt.as_mut().unwrap();
|
||||
aggt.ingest(item.as_time_binnable_dyn());
|
||||
}
|
||||
}
|
||||
let mut aggt = aggt.unwrap();
|
||||
let res = aggt.result();
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub async fn fetch_uncached_binned_events(
|
||||
chn: &ChannelTyped,
|
||||
range: &NanoRange,
|
||||
scy: &ScySession,
|
||||
) -> Result<Box<dyn TimeBinned>, Error> {
|
||||
// TODO ask Scylla directly, do not go through HTTP.
|
||||
// Refactor the event fetch stream code such that I can use that easily here.
|
||||
err::todoval()
|
||||
}
|
||||
|
||||
pub async fn pre_binned_value_stream_with_scy(
|
||||
chn: &ChannelTyped,
|
||||
coord: &PreBinnedPatchCoord,
|
||||
scy: &ScySession,
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Result<Box<dyn TimeBinned>, Error>> + Send>>, Error> {
|
||||
info!("pre_binned_value_stream_with_scy {chn:?} {coord:?}");
|
||||
let range = err::todoval();
|
||||
if let Some(item) = read_cached_scylla(chn, &range, &scy).await? {
|
||||
Ok(Box::pin(futures_util::stream::iter([Ok(item)])))
|
||||
} else {
|
||||
let bin = fetch_uncached_data_box(chn, coord, scy).await?;
|
||||
Ok(Box::pin(futures_util::stream::empty()))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn pre_binned_value_stream(
|
||||
chn: &ChannelTyped,
|
||||
coord: &PreBinnedPatchCoord,
|
||||
scyconf: &ScyllaConfig,
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Result<Box<dyn TimeBinned>, Error>> + Send>>, Error> {
|
||||
info!("pre_binned_value_stream {chn:?} {coord:?} {scyconf:?}");
|
||||
let scy = scylla::SessionBuilder::new()
|
||||
.known_nodes(&scyconf.hosts)
|
||||
.use_keyspace(&scyconf.keyspace, true)
|
||||
.build()
|
||||
.await
|
||||
.err_conv()?;
|
||||
pre_binned_value_stream_with_scy(chn, coord, &scy).await
|
||||
}
|
||||
|
||||
@@ -336,7 +336,7 @@ impl ChannelExecFunction for BinnedJsonChannelExec {
|
||||
let perf_opts = PerfOpts { inmem_bufcap: 512 };
|
||||
let souter = match PreBinnedPatchRange::covering_range(self.query.range().clone(), self.query.bin_count()) {
|
||||
Ok(Some(pre_range)) => {
|
||||
debug!("BinnedJsonChannelExec found pre_range: {pre_range:?}");
|
||||
info!("BinnedJsonChannelExec found pre_range: {pre_range:?}");
|
||||
if range.grid_spec.bin_t_len() < pre_range.grid_spec.bin_t_len() {
|
||||
let msg = format!(
|
||||
"BinnedJsonChannelExec incompatible ranges:\npre_range: {pre_range:?}\nrange: {range:?}"
|
||||
@@ -364,7 +364,7 @@ impl ChannelExecFunction for BinnedJsonChannelExec {
|
||||
Ok(Box::pin(s) as Pin<Box<dyn Stream<Item = Result<Bytes, Error>> + Send>>)
|
||||
}
|
||||
Ok(None) => {
|
||||
debug!("BinnedJsonChannelExec no covering range for prebinned, merge from remotes instead {range:?}");
|
||||
info!("BinnedJsonChannelExec no covering range for prebinned, merge from remotes instead {range:?}");
|
||||
// TODO let BinnedQuery provide the DiskIoTune and pass to RawEventsQuery:
|
||||
let evq = RawEventsQuery::new(
|
||||
self.query.channel().clone(),
|
||||
@@ -392,6 +392,7 @@ impl ChannelExecFunction for BinnedJsonChannelExec {
|
||||
}
|
||||
|
||||
fn empty() -> Self::Output {
|
||||
info!("BinnedJsonChannelExec fn empty");
|
||||
Box::pin(futures_util::stream::empty())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,14 +32,9 @@ pub struct FetchedPreBinned<TBT> {
|
||||
}
|
||||
|
||||
impl<TBT> FetchedPreBinned<TBT> {
|
||||
pub fn new(query: &PreBinnedQuery, node_config: &NodeConfigCached) -> Result<Self, Error> {
|
||||
let nodeix = node_ix_for_patch(&query.patch(), &query.channel(), &node_config.node_config.cluster);
|
||||
let node = &node_config.node_config.cluster.nodes[nodeix as usize];
|
||||
let mut url = {
|
||||
let host = &node.host;
|
||||
let port = node.port;
|
||||
Url::parse(&format!("http://{host}:{port}/api/4/prebinned"))?
|
||||
};
|
||||
pub fn new(query: &PreBinnedQuery, host: String, port: u16) -> Result<Self, Error> {
|
||||
// TODO should not assume http:
|
||||
let mut url = Url::parse(&format!("http://{host}:{port}/api/4/prebinned"))?;
|
||||
query.append_to_url(&mut url);
|
||||
let ret = Self {
|
||||
uri: Uri::from_str(&url.to_string()).map_err(Error::from_string)?,
|
||||
@@ -201,8 +196,10 @@ where
|
||||
disk_stats_every.clone(),
|
||||
report_error,
|
||||
);
|
||||
let nodeix = node_ix_for_patch(&query.patch(), &query.channel(), &node_config.node_config.cluster);
|
||||
let node = &node_config.node_config.cluster.nodes[nodeix as usize];
|
||||
let ret: Pin<Box<dyn Stream<Item = _> + Send>> =
|
||||
match FetchedPreBinned::<TBT>::new(&query, &node_config) {
|
||||
match FetchedPreBinned::<TBT>::new(&query, node.host.clone(), node.port.clone()) {
|
||||
Ok(stream) => Box::pin(stream.map(move |q| (pix, q))),
|
||||
Err(e) => {
|
||||
error!("error from PreBinnedValueFetchedStream::new {e:?}");
|
||||
|
||||
@@ -185,10 +185,17 @@ where
|
||||
disk_stats_every.clone(),
|
||||
report_error,
|
||||
);
|
||||
let nodeix = crate::cache::node_ix_for_patch(
|
||||
&query.patch(),
|
||||
&query.channel(),
|
||||
&node_config.node_config.cluster,
|
||||
);
|
||||
let node = &node_config.node_config.cluster.nodes[nodeix as usize];
|
||||
let ret =
|
||||
FetchedPreBinned::<<<ENP as EventsNodeProcessor>::Output as TimeBinnableType>::Output>::new(
|
||||
&query,
|
||||
&node_config,
|
||||
node.host.clone(),
|
||||
node.port.clone(),
|
||||
)?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::decode::{
|
||||
LittleEndian, NumFromBytes,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use dbconn::bincache::pre_binned_value_stream;
|
||||
use err::Error;
|
||||
use futures_core::Stream;
|
||||
use futures_util::StreamExt;
|
||||
@@ -13,19 +14,21 @@ use items::numops::{BoolNum, NumOps, StringNum};
|
||||
use items::{
|
||||
Appendable, Clearable, EventsNodeProcessor, Framable, FrameType, PushableIndex, Sitemty, TimeBinnableType,
|
||||
};
|
||||
use netpod::{AggKind, ByteOrder, NodeConfigCached, ScalarType, Shape};
|
||||
use netpod::log::*;
|
||||
use netpod::{AggKind, ByteOrder, ChannelTyped, NodeConfigCached, ScalarType, Shape};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::Serialize;
|
||||
use std::pin::Pin;
|
||||
|
||||
fn make_num_pipeline_nty_end_evs_enp<NTY, END, EVS, ENP>(
|
||||
_shape: Shape,
|
||||
async fn make_num_pipeline_nty_end_evs_enp<NTY, END, EVS, ENP>(
|
||||
scalar_type: ScalarType,
|
||||
shape: Shape,
|
||||
agg_kind: AggKind,
|
||||
_event_value_shape: EVS,
|
||||
_events_node_proc: ENP,
|
||||
query: PreBinnedQuery,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Pin<Box<dyn Stream<Item = Box<dyn Framable>> + Send>>
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Box<dyn Framable>> + Send>>, Error>
|
||||
where
|
||||
NTY: NumOps + NumFromBytes<NTY, END> + Serialize + 'static,
|
||||
END: Endianness + 'static,
|
||||
@@ -36,17 +39,43 @@ where
|
||||
Sitemty<<<ENP as EventsNodeProcessor>::Output as TimeBinnableType>::Output>:
|
||||
Framable + FrameType + DeserializeOwned,
|
||||
{
|
||||
let ret = PreBinnedValueStream::<NTY, END, EVS, ENP>::new(query, agg_kind, node_config);
|
||||
let ret = StreamExt::map(ret, |item| Box::new(item) as Box<dyn Framable>);
|
||||
Box::pin(ret)
|
||||
if let Some(scyconf) = &node_config.node_config.cluster.cache_scylla {
|
||||
info!("~~~~~~~~~~~~~~~ make_num_pipeline_nty_end_evs_enp using scylla as cache");
|
||||
let chn = ChannelTyped {
|
||||
channel: query.channel().clone(),
|
||||
scalar_type,
|
||||
shape,
|
||||
};
|
||||
let stream = pre_binned_value_stream(&chn, query.patch(), scyconf).await?;
|
||||
let stream = stream.map(|x| {
|
||||
//
|
||||
match x {
|
||||
Ok(k) => {
|
||||
let g = Box::new(k) as Box<dyn Framable>;
|
||||
g
|
||||
}
|
||||
Err(e) => {
|
||||
let u: Sitemty<items::scalarevents::ScalarEvents<f32>> = Err(e);
|
||||
Box::new(u) as Box<dyn Framable>
|
||||
}
|
||||
}
|
||||
});
|
||||
let stream = Box::pin(stream) as Pin<Box<dyn Stream<Item = Box<dyn Framable>> + Send>>;
|
||||
Ok(stream)
|
||||
} else {
|
||||
let ret = PreBinnedValueStream::<NTY, END, EVS, ENP>::new(query, agg_kind, node_config);
|
||||
let ret = StreamExt::map(ret, |item| Box::new(item) as Box<dyn Framable>);
|
||||
Ok(Box::pin(ret))
|
||||
}
|
||||
}
|
||||
|
||||
fn make_num_pipeline_nty_end<NTY, END>(
|
||||
async fn make_num_pipeline_nty_end<NTY, END>(
|
||||
scalar_type: ScalarType,
|
||||
shape: Shape,
|
||||
agg_kind: AggKind,
|
||||
query: PreBinnedQuery,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Pin<Box<dyn Stream<Item = Box<dyn Framable>> + Send>>
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Box<dyn Framable>> + Send>>, Error>
|
||||
where
|
||||
NTY: NumOps + NumFromBytes<NTY, END> + Serialize + 'static,
|
||||
END: Endianness + 'static,
|
||||
@@ -59,6 +88,7 @@ where
|
||||
AggKind::TimeWeightedScalar | AggKind::DimXBins1 => {
|
||||
let events_node_proc = <<EventValuesDim0Case<NTY> as EventValueShape<NTY, END>>::NumXAggToSingleBin as EventsNodeProcessor>::create(shape.clone(), agg_kind.clone());
|
||||
make_num_pipeline_nty_end_evs_enp::<NTY, END, _, _>(
|
||||
scalar_type,
|
||||
shape,
|
||||
agg_kind,
|
||||
evs,
|
||||
@@ -66,10 +96,12 @@ where
|
||||
query,
|
||||
node_config,
|
||||
)
|
||||
.await
|
||||
}
|
||||
AggKind::DimXBinsN(_) => {
|
||||
let events_node_proc = <<EventValuesDim0Case<NTY> as EventValueShape<NTY, END>>::NumXAggToNBins as EventsNodeProcessor>::create(shape.clone(), agg_kind.clone());
|
||||
make_num_pipeline_nty_end_evs_enp::<NTY, END, _, _>(
|
||||
scalar_type,
|
||||
shape,
|
||||
agg_kind,
|
||||
evs,
|
||||
@@ -77,6 +109,7 @@ where
|
||||
query,
|
||||
node_config,
|
||||
)
|
||||
.await
|
||||
}
|
||||
AggKind::Plain => {
|
||||
panic!();
|
||||
@@ -94,6 +127,7 @@ where
|
||||
AggKind::TimeWeightedScalar | AggKind::DimXBins1 => {
|
||||
let events_node_proc = <<EventValuesDim1Case<NTY> as EventValueShape<NTY, END>>::NumXAggToSingleBin as EventsNodeProcessor>::create(shape.clone(), agg_kind.clone());
|
||||
make_num_pipeline_nty_end_evs_enp::<NTY, END, _, _>(
|
||||
scalar_type,
|
||||
shape,
|
||||
agg_kind,
|
||||
evs,
|
||||
@@ -101,10 +135,12 @@ where
|
||||
query,
|
||||
node_config,
|
||||
)
|
||||
.await
|
||||
}
|
||||
AggKind::DimXBinsN(_) => {
|
||||
let events_node_proc = <<EventValuesDim1Case<NTY> as EventValueShape<NTY, END>>::NumXAggToNBins as EventsNodeProcessor>::create(shape.clone(), agg_kind.clone());
|
||||
make_num_pipeline_nty_end_evs_enp::<NTY, END, _, _>(
|
||||
scalar_type,
|
||||
shape,
|
||||
agg_kind,
|
||||
evs,
|
||||
@@ -112,6 +148,7 @@ where
|
||||
query,
|
||||
node_config,
|
||||
)
|
||||
.await
|
||||
}
|
||||
AggKind::Plain => {
|
||||
panic!();
|
||||
@@ -130,35 +167,41 @@ where
|
||||
}
|
||||
|
||||
macro_rules! match_end {
|
||||
($nty:ident, $end:expr, $shape:expr, $agg_kind:expr, $query:expr, $node_config:expr) => {
|
||||
($nty:ident, $end:expr, $scalar_type:expr, $shape:expr, $agg_kind:expr, $query:expr, $node_config:expr) => {
|
||||
match $end {
|
||||
ByteOrder::LE => make_num_pipeline_nty_end::<$nty, LittleEndian>($shape, $agg_kind, $query, $node_config),
|
||||
ByteOrder::BE => make_num_pipeline_nty_end::<$nty, BigEndian>($shape, $agg_kind, $query, $node_config),
|
||||
ByteOrder::LE => {
|
||||
make_num_pipeline_nty_end::<$nty, LittleEndian>($scalar_type, $shape, $agg_kind, $query, $node_config)
|
||||
.await
|
||||
}
|
||||
ByteOrder::BE => {
|
||||
make_num_pipeline_nty_end::<$nty, BigEndian>($scalar_type, $shape, $agg_kind, $query, $node_config)
|
||||
.await
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn make_num_pipeline(
|
||||
async fn make_num_pipeline(
|
||||
scalar_type: ScalarType,
|
||||
byte_order: ByteOrder,
|
||||
shape: Shape,
|
||||
agg_kind: AggKind,
|
||||
query: PreBinnedQuery,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Pin<Box<dyn Stream<Item = Box<dyn Framable>> + Send>> {
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Box<dyn Framable>> + Send>>, Error> {
|
||||
match scalar_type {
|
||||
ScalarType::U8 => match_end!(u8, byte_order, shape, agg_kind, query, node_config),
|
||||
ScalarType::U16 => match_end!(u16, byte_order, shape, agg_kind, query, node_config),
|
||||
ScalarType::U32 => match_end!(u32, byte_order, shape, agg_kind, query, node_config),
|
||||
ScalarType::U64 => match_end!(u64, byte_order, shape, agg_kind, query, node_config),
|
||||
ScalarType::I8 => match_end!(i8, byte_order, shape, agg_kind, query, node_config),
|
||||
ScalarType::I16 => match_end!(i16, byte_order, shape, agg_kind, query, node_config),
|
||||
ScalarType::I32 => match_end!(i32, byte_order, shape, agg_kind, query, node_config),
|
||||
ScalarType::I64 => match_end!(i64, byte_order, shape, agg_kind, query, node_config),
|
||||
ScalarType::F32 => match_end!(f32, byte_order, shape, agg_kind, query, node_config),
|
||||
ScalarType::F64 => match_end!(f64, byte_order, shape, agg_kind, query, node_config),
|
||||
ScalarType::BOOL => match_end!(BoolNum, byte_order, shape, agg_kind, query, node_config),
|
||||
ScalarType::STRING => match_end!(StringNum, byte_order, shape, agg_kind, query, node_config),
|
||||
ScalarType::U8 => match_end!(u8, byte_order, scalar_type, shape, agg_kind, query, node_config),
|
||||
ScalarType::U16 => match_end!(u16, byte_order, scalar_type, shape, agg_kind, query, node_config),
|
||||
ScalarType::U32 => match_end!(u32, byte_order, scalar_type, shape, agg_kind, query, node_config),
|
||||
ScalarType::U64 => match_end!(u64, byte_order, scalar_type, shape, agg_kind, query, node_config),
|
||||
ScalarType::I8 => match_end!(i8, byte_order, scalar_type, shape, agg_kind, query, node_config),
|
||||
ScalarType::I16 => match_end!(i16, byte_order, scalar_type, shape, agg_kind, query, node_config),
|
||||
ScalarType::I32 => match_end!(i32, byte_order, scalar_type, shape, agg_kind, query, node_config),
|
||||
ScalarType::I64 => match_end!(i64, byte_order, scalar_type, shape, agg_kind, query, node_config),
|
||||
ScalarType::F32 => match_end!(f32, byte_order, scalar_type, shape, agg_kind, query, node_config),
|
||||
ScalarType::F64 => match_end!(f64, byte_order, scalar_type, shape, agg_kind, query, node_config),
|
||||
ScalarType::BOOL => match_end!(BoolNum, byte_order, scalar_type, shape, agg_kind, query, node_config),
|
||||
ScalarType::STRING => match_end!(StringNum, byte_order, scalar_type, shape, agg_kind, query, node_config),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,6 +234,7 @@ pub async fn pre_binned_bytes_for_http(
|
||||
query.clone(),
|
||||
node_config,
|
||||
)
|
||||
.await?
|
||||
.map(|item| match item.make_frame() {
|
||||
Ok(item) => Ok(item.freeze()),
|
||||
Err(e) => Err(e),
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use err::Error;
|
||||
use http::request::Parts;
|
||||
use netpod::query::{agg_kind_from_binning_scheme, binning_scheme_append_to_url, CacheUsage};
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::{
|
||||
channel_append_to_url, channel_from_pairs, AggKind, AppendToUrl, ByteSize, Channel, PreBinnedPatchCoord,
|
||||
ScalarType, Shape,
|
||||
@@ -52,11 +53,11 @@ impl PreBinnedQuery {
|
||||
pairs.insert(j.to_string(), k.to_string());
|
||||
}
|
||||
let pairs = pairs;
|
||||
let bin_t_len = pairs
|
||||
let bin_t_len: u64 = pairs
|
||||
.get("binTlen")
|
||||
.ok_or_else(|| Error::with_msg("missing binTlen"))?
|
||||
.parse()?;
|
||||
let patch_t_len = pairs
|
||||
let patch_t_len: u64 = pairs
|
||||
.get("patchTlen")
|
||||
.ok_or_else(|| Error::with_msg("missing patchTlen"))?
|
||||
.parse()?;
|
||||
@@ -79,7 +80,7 @@ impl PreBinnedQuery {
|
||||
.ok_or_else(|| Error::with_msg("missing shape"))
|
||||
.map(|x| Shape::from_url_str(&x))??;
|
||||
let ret = Self {
|
||||
patch: PreBinnedPatchCoord::new(bin_t_len, patch_t_len, patch_ix),
|
||||
patch: PreBinnedPatchCoord::new(bin_t_len * SEC, patch_t_len * SEC, patch_ix),
|
||||
channel: channel_from_pairs(&pairs)?,
|
||||
scalar_type,
|
||||
shape,
|
||||
@@ -148,9 +149,10 @@ impl AppendToUrl for PreBinnedQuery {
|
||||
self.patch.append_to_url(url);
|
||||
binning_scheme_append_to_url(&self.agg_kind, url);
|
||||
channel_append_to_url(url, &self.channel);
|
||||
self.shape.append_to_url(url);
|
||||
self.scalar_type.append_to_url(url);
|
||||
let mut g = url.query_pairs_mut();
|
||||
g.append_pair("scalarType", &format!("{:?}", self.scalar_type));
|
||||
g.append_pair("shape", &format!("{:?}", self.shape));
|
||||
// TODO add also impl AppendToUrl for these if applicable:
|
||||
g.append_pair("cacheUsage", &format!("{}", self.cache_usage.query_param_value()));
|
||||
g.append_pair("diskIoBufferSize", &format!("{}", self.disk_io_buffer_size));
|
||||
g.append_pair("diskStatsEveryKb", &format!("{}", self.disk_stats_every.bytes() / 1024));
|
||||
|
||||
@@ -24,66 +24,71 @@ pub struct ChConf {
|
||||
pub shape: Shape,
|
||||
}
|
||||
|
||||
pub async fn chconf_from_events_binary(_q: &PlainEventsQuery, _conf: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
err::todoval()
|
||||
pub async fn chconf_from_database(channel: &Channel, ncc: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
if channel.backend != ncc.node_config.cluster.backend {
|
||||
warn!(
|
||||
"mismatched backend {} vs {}",
|
||||
channel.backend, ncc.node_config.cluster.backend
|
||||
);
|
||||
}
|
||||
// This requires the series id.
|
||||
let series = channel.series.ok_or_else(|| {
|
||||
Error::with_msg_no_trace(format!("needs a series id {:?}", channel))
|
||||
.add_public_msg(format!("series id of channel not supplied"))
|
||||
})?;
|
||||
// TODO use a common already running worker pool for these queries:
|
||||
let dbconf = &ncc.node_config.cluster.database;
|
||||
let dburl = format!(
|
||||
"postgresql://{}:{}@{}:{}/{}",
|
||||
dbconf.user, dbconf.pass, dbconf.host, dbconf.port, dbconf.name
|
||||
);
|
||||
let (pgclient, pgconn) = tokio_postgres::connect(&dburl, tokio_postgres::NoTls)
|
||||
.await
|
||||
.err_conv()?;
|
||||
tokio::spawn(pgconn);
|
||||
let res = pgclient
|
||||
.query(
|
||||
"select scalar_type, shape_dims from series_by_channel where series = $1",
|
||||
&[&(series as i64)],
|
||||
)
|
||||
.await
|
||||
.err_conv()?;
|
||||
if res.len() == 0 {
|
||||
warn!("can not find channel information for series {series}");
|
||||
let e = Error::with_public_msg_no_trace(format!("can not find channel information for series {series}"));
|
||||
Err(e)
|
||||
} else if res.len() > 1 {
|
||||
error!("multiple channel information for series {series}");
|
||||
let e = Error::with_public_msg_no_trace(format!("can not find channel information for series {series}"));
|
||||
Err(e)
|
||||
} else {
|
||||
let row = res.first().unwrap();
|
||||
let scalar_type = ScalarType::from_dtype_index(row.get::<_, i32>(0) as u8)?;
|
||||
// TODO can I get a slice from psql driver?
|
||||
let shape = Shape::from_scylla_shape_dims(&row.get::<_, Vec<i32>>(1))?;
|
||||
let ret = ChConf { scalar_type, shape };
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn chconf_from_events_binary(q: &PlainEventsQuery, ncc: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
chconf_from_database(q.channel(), ncc).await
|
||||
}
|
||||
|
||||
pub async fn chconf_from_events_json(q: &PlainEventsQuery, ncc: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
if q.channel().backend != ncc.node_config.cluster.backend {
|
||||
warn!(
|
||||
"Mismatched backend {} VS {}",
|
||||
q.channel().backend,
|
||||
ncc.node_config.cluster.backend
|
||||
);
|
||||
}
|
||||
if let Some(_conf) = &ncc.node_config.cluster.scylla {
|
||||
// This requires the series id.
|
||||
let series = q
|
||||
.channel()
|
||||
.series
|
||||
.ok_or_else(|| Error::with_msg_no_trace(format!("needs a series id")))?;
|
||||
// TODO use a common already running worker pool for these queries:
|
||||
let dbconf = &ncc.node_config.cluster.database;
|
||||
let dburl = format!(
|
||||
"postgresql://{}:{}@{}:{}/{}",
|
||||
dbconf.user, dbconf.pass, dbconf.host, dbconf.port, dbconf.name
|
||||
);
|
||||
let (pgclient, pgconn) = tokio_postgres::connect(&dburl, tokio_postgres::NoTls)
|
||||
.await
|
||||
.err_conv()?;
|
||||
tokio::spawn(pgconn);
|
||||
let res = pgclient
|
||||
.query(
|
||||
"select scalar_type, shape_dims from series_by_channel where series = $1",
|
||||
&[&(series as i64)],
|
||||
)
|
||||
.await
|
||||
.err_conv()?;
|
||||
if res.len() == 0 {
|
||||
error!("can not find channel for series {series}");
|
||||
err::todoval()
|
||||
} else if res.len() > 1 {
|
||||
error!("can not find channel for series {series}");
|
||||
err::todoval()
|
||||
} else {
|
||||
let row = res.first().unwrap();
|
||||
let scalar_type = ScalarType::from_dtype_index(row.get::<_, i32>(0) as u8)?;
|
||||
// TODO can I get a slice from psql driver?
|
||||
let shape = Shape::from_scylla_shape_dims(&row.get::<_, Vec<i32>>(1))?;
|
||||
let ret = ChConf { scalar_type, shape };
|
||||
Ok(ret)
|
||||
}
|
||||
} else {
|
||||
err::todoval()
|
||||
}
|
||||
chconf_from_database(q.channel(), ncc).await
|
||||
}
|
||||
|
||||
pub async fn chconf_from_prebinned(_q: &PreBinnedQuery, _conf: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
err::todoval()
|
||||
pub async fn chconf_from_prebinned(q: &PreBinnedQuery, _ncc: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
let ret = ChConf {
|
||||
scalar_type: q.scalar_type().clone(),
|
||||
shape: q.shape().clone(),
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub async fn chconf_from_binned(_q: &BinnedQuery, _conf: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
err::todoval()
|
||||
pub async fn chconf_from_binned(q: &BinnedQuery, ncc: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
chconf_from_database(q.channel(), ncc).await
|
||||
}
|
||||
|
||||
pub struct ChannelConfigHandler {}
|
||||
|
||||
@@ -32,6 +32,10 @@ impl Error {
|
||||
pub fn public_msg(&self) -> Option<&Vec<String>> {
|
||||
self.0.public_msg()
|
||||
}
|
||||
|
||||
pub fn add_public_msg(self, msg: impl Into<String>) -> Self {
|
||||
Error(self.0.add_public_msg(msg))
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Error {
|
||||
|
||||
@@ -419,7 +419,10 @@ async fn binned(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Re
|
||||
async fn binned_inner(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
let (head, _body) = req.into_parts();
|
||||
let url = Url::parse(&format!("dummy:{}", head.uri))?;
|
||||
let query = BinnedQuery::from_url(&url)?;
|
||||
let query = BinnedQuery::from_url(&url).map_err(|e| {
|
||||
let msg = format!("can not parse query: {}", e.msg());
|
||||
e.add_public_msg(msg)
|
||||
})?;
|
||||
let chconf = chconf_from_binned(&query, node_config).await?;
|
||||
let desc = format!("binned-BEG-{}-END-{}", query.range().beg / SEC, query.range().end / SEC);
|
||||
let span1 = span!(Level::INFO, "httpret::binned", desc = &desc.as_str());
|
||||
@@ -463,7 +466,10 @@ async fn binned_json(
|
||||
async fn prebinned(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
match prebinned_inner(req, node_config).await {
|
||||
Ok(ret) => Ok(ret),
|
||||
Err(e) => Ok(response(StatusCode::BAD_REQUEST).body(Body::from(e.msg().to_string()))?),
|
||||
Err(e) => {
|
||||
error!("fn prebinned: {e:?}");
|
||||
Ok(response(StatusCode::BAD_REQUEST).body(Body::from(e.msg().to_string()))?)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -291,6 +291,19 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Framable for Box<T>
|
||||
where
|
||||
T: Framable + ?Sized,
|
||||
{
|
||||
fn typeid(&self) -> u32 {
|
||||
self.as_ref().typeid()
|
||||
}
|
||||
|
||||
fn make_frame(&self) -> Result<BytesMut, Error> {
|
||||
self.as_ref().make_frame()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait EventsNodeProcessor: Send + Unpin {
|
||||
type Input;
|
||||
type Output: Send + Unpin + DeserializeOwned + WithTimestamps + TimeBinnableType + ByteEstimate;
|
||||
@@ -399,6 +412,20 @@ pub trait TimeBinnableType:
|
||||
fn aggregator(range: NanoRange, bin_count: usize, do_time_weight: bool) -> Self::Aggregator;
|
||||
}
|
||||
|
||||
/// Provides a time-binned representation of the implementing type.
|
||||
/// In contrast to `TimeBinnableType` this is meant for trait objects.
|
||||
pub trait TimeBinnableDyn {}
|
||||
|
||||
pub trait TimeBinnableDynAggregator: Send {
|
||||
fn ingest(&mut self, item: &dyn TimeBinnableDyn);
|
||||
fn result(&mut self) -> Box<dyn TimeBinned>;
|
||||
}
|
||||
|
||||
pub trait TimeBinned: Framable + Send + TimeBinnableDyn {
|
||||
fn aggregator_new(&self) -> Box<dyn TimeBinnableDynAggregator>;
|
||||
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnableDyn;
|
||||
}
|
||||
|
||||
// TODO should get I/O and tokio dependence out of this crate
|
||||
pub trait ReadableFromFile: Sized {
|
||||
fn read_from_file(file: File) -> Result<ReadPbv<Self>, Error>;
|
||||
|
||||
@@ -319,6 +319,13 @@ impl ScalarType {
|
||||
}
|
||||
}
|
||||
|
||||
impl AppendToUrl for ScalarType {
|
||||
fn append_to_url(&self, url: &mut Url) {
|
||||
let mut g = url.query_pairs_mut();
|
||||
g.append_pair("scalarType", self.to_variant_str());
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct SfDatabuffer {
|
||||
pub data_base_path: PathBuf,
|
||||
@@ -450,6 +457,7 @@ pub struct Cluster {
|
||||
#[serde(rename = "fileIoBufferSize", default)]
|
||||
pub file_io_buffer_size: FileIoBufferSize,
|
||||
pub scylla: Option<ScyllaConfig>,
|
||||
pub cache_scylla: Option<ScyllaConfig>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
@@ -519,12 +527,49 @@ pub struct NodeStatus {
|
||||
pub archiver_appliance_status: Option<NodeStatusArchiverAppliance>,
|
||||
}
|
||||
|
||||
/**
|
||||
Describes a "channel" which is a time-series with a unique name within a "backend".
|
||||
In the near future, each channel should have assigned a unique id within a "backend".
|
||||
Also the concept of "backend" should be split into "facility" and some optional other identifier
|
||||
for cases like post-mortem, or to differentiate between channel-access and bsread for cases where
|
||||
the same channel-name is delivered via different methods.
|
||||
*/
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Channel {
|
||||
// TODO ideally, all channels would have a unique id. For scylla backends, we require the id.
|
||||
// In the near future, we should also require a unique id for "databuffer" backends, indexed in postgres.
|
||||
pub series: Option<u64>,
|
||||
// "backend" is currently used in the existing systems for multiple purposes:
|
||||
// it can indicate the facility (eg. sf-databuffer, hipa, ...) but also some special subsystem (eg. sf-rf-databuffer).
|
||||
pub backend: String,
|
||||
pub name: String,
|
||||
// TODO ideally, all channels would have a unique id. For scylla backends, we require the id.
|
||||
pub series: Option<u64>,
|
||||
}
|
||||
|
||||
impl Channel {
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn series_id(&self) -> Result<u64, Error> {
|
||||
self.series
|
||||
.ok_or_else(|| Error::with_msg_no_trace(format!("no series id in channel")))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
*/
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ChannelTyped {
|
||||
pub channel: Channel,
|
||||
pub scalar_type: ScalarType,
|
||||
pub shape: Shape,
|
||||
}
|
||||
|
||||
impl ChannelTyped {
|
||||
pub fn series_id(&self) -> Result<u64, Error> {
|
||||
self.channel.series_id()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HostPort {
|
||||
@@ -548,12 +593,6 @@ impl HostPort {
|
||||
}
|
||||
}
|
||||
|
||||
impl Channel {
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
|
||||
pub struct FilePos {
|
||||
pub pos: u64,
|
||||
@@ -925,6 +964,13 @@ impl Shape {
|
||||
}
|
||||
}
|
||||
|
||||
impl AppendToUrl for Shape {
|
||||
fn append_to_url(&self, url: &mut Url) {
|
||||
let mut g = url.query_pairs_mut();
|
||||
g.append_pair("shape", &format!("{:?}", self.to_scylla_vec()));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shape_serde() {
|
||||
let s = serde_json::to_string(&Shape::Image(42, 43)).unwrap();
|
||||
@@ -1074,16 +1120,14 @@ impl PreBinnedPatchGridSpec {
|
||||
|
||||
impl std::fmt::Debug for PreBinnedPatchGridSpec {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(
|
||||
fmt,
|
||||
"PreBinnedPatchGridSpec {{ bin_t_len: {:?}, patch_t_len(): {:?} }}",
|
||||
self.bin_t_len / SEC,
|
||||
self.patch_t_len() / SEC,
|
||||
)
|
||||
fmt.debug_struct("PreBinnedPatchGridSpec")
|
||||
.field("bin_t_len", &(self.bin_t_len / SEC))
|
||||
.field("patch_t_len", &(self.patch_t_len() / SEC))
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct PreBinnedPatchRange {
|
||||
pub grid_spec: PreBinnedPatchGridSpec,
|
||||
pub offset: u64,
|
||||
@@ -1218,8 +1262,8 @@ impl PreBinnedPatchCoord {
|
||||
impl AppendToUrl for PreBinnedPatchCoord {
|
||||
fn append_to_url(&self, url: &mut Url) {
|
||||
let mut g = url.query_pairs_mut();
|
||||
g.append_pair("patchTlen", &format!("{}", self.spec.patch_t_len()));
|
||||
g.append_pair("binTlen", &format!("{}", self.spec.bin_t_len()));
|
||||
g.append_pair("patchTlen", &format!("{}", self.spec.patch_t_len() / SEC));
|
||||
g.append_pair("binTlen", &format!("{}", self.spec.bin_t_len() / SEC));
|
||||
g.append_pair("patchIx", &format!("{}", self.ix()));
|
||||
}
|
||||
}
|
||||
@@ -1932,6 +1976,7 @@ pub struct EventQueryJsonStringFrame(pub String);
|
||||
|
||||
/**
|
||||
Provide basic information about a channel, especially it's shape.
|
||||
Also, byte-order is important for clients that process the raw databuffer event data (python data_api3).
|
||||
*/
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct ChannelInfo {
|
||||
@@ -1990,6 +2035,7 @@ pub fn test_cluster() -> Cluster {
|
||||
pass: "testingdaq".into(),
|
||||
},
|
||||
scylla: None,
|
||||
cache_scylla: None,
|
||||
run_map_pulse_task: false,
|
||||
is_central_storage: false,
|
||||
file_io_buffer_size: Default::default(),
|
||||
@@ -2023,6 +2069,7 @@ pub fn sls_test_cluster() -> Cluster {
|
||||
pass: "testingdaq".into(),
|
||||
},
|
||||
scylla: None,
|
||||
cache_scylla: None,
|
||||
run_map_pulse_task: false,
|
||||
is_central_storage: false,
|
||||
file_io_buffer_size: Default::default(),
|
||||
@@ -2056,6 +2103,7 @@ pub fn archapp_test_cluster() -> Cluster {
|
||||
pass: "testingdaq".into(),
|
||||
},
|
||||
scylla: None,
|
||||
cache_scylla: None,
|
||||
run_map_pulse_task: false,
|
||||
is_central_storage: false,
|
||||
file_io_buffer_size: Default::default(),
|
||||
|
||||
Reference in New Issue
Block a user