Update deps, wip on bin read index
This commit is contained in:
@@ -2,11 +2,11 @@
|
||||
name = "daqbuf-redis"
|
||||
version = "0.0.1"
|
||||
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
|
||||
edition = "2021"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
autoerr = "0.0.3"
|
||||
taskrun = { path = "../taskrun" }
|
||||
redis = { version = "0.30.0", features = [] }
|
||||
redis = { version = "0.31.0", features = [] }
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
pub mod accounting;
|
||||
pub mod backend;
|
||||
pub mod binned;
|
||||
pub mod binned_v2;
|
||||
pub mod binwriteindex;
|
||||
pub mod databuffer_tools;
|
||||
pub mod docs;
|
||||
|
||||
302
crates/httpret/src/api4/binned_v2.rs
Normal file
302
crates/httpret/src/api4/binned_v2.rs
Normal file
@@ -0,0 +1,302 @@
|
||||
use crate::bodystream::response;
|
||||
use crate::channelconfig::ch_conf_from_binned;
|
||||
use crate::requests::accepts_cbor_framed;
|
||||
use crate::requests::accepts_json_framed;
|
||||
use crate::requests::accepts_json_or_all;
|
||||
use crate::requests::accepts_octets;
|
||||
use crate::ServiceSharedResources;
|
||||
use daqbuf_err as err;
|
||||
use dbconn::worker::PgQueue;
|
||||
use futures_util::StreamExt;
|
||||
use futures_util::TryStreamExt;
|
||||
use http::header::CONTENT_TYPE;
|
||||
use http::request::Parts;
|
||||
use http::Method;
|
||||
use http::StatusCode;
|
||||
use httpclient::bad_request_response;
|
||||
use httpclient::body_empty;
|
||||
use httpclient::body_stream;
|
||||
use httpclient::error_response;
|
||||
use httpclient::error_status_response;
|
||||
use httpclient::not_found_response;
|
||||
use httpclient::IntoBody;
|
||||
use httpclient::Requ;
|
||||
use httpclient::StreamResponse;
|
||||
use httpclient::ToJsonBody;
|
||||
use netpod::log;
|
||||
use netpod::req_uri_to_url;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::ttl::RetentionTime;
|
||||
use netpod::ChannelTypeConfigGen;
|
||||
use netpod::FromUrl;
|
||||
use netpod::NodeConfigCached;
|
||||
use netpod::ReqCtx;
|
||||
use netpod::APP_CBOR_FRAMED;
|
||||
use netpod::APP_JSON;
|
||||
use netpod::APP_JSON_FRAMED;
|
||||
use netpod::HEADER_NAME_REQUEST_ID;
|
||||
use nodenet::client::OpenBoxedBytesViaHttp;
|
||||
use nodenet::scylla::ScyllaEventReadProvider;
|
||||
use query::api4::binned::BinWriteIndexQuery;
|
||||
use query::api4::binned::BinnedQuery;
|
||||
use scyllaconn::worker::ScyllaQueue;
|
||||
use series::msp::PrebinnedPartitioning;
|
||||
use series::SeriesId;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use streams::eventsplainreader::DummyCacheReadProvider;
|
||||
use streams::eventsplainreader::SfDatabufferEventReadProvider;
|
||||
use streams::streamtimeout::StreamTimeout2;
|
||||
use streams::timebin::cached::reader::EventsReadProvider;
|
||||
use streams::timebin::CacheReadProvider;
|
||||
use tracing::Instrument;
|
||||
use tracing::Span;
|
||||
use url::Url;
|
||||
|
||||
macro_rules! error { ($($arg:expr),*) => ( if true { log::error!($($arg),*); } ); }
|
||||
macro_rules! info { ($($arg:expr),*) => ( if true { log::info!($($arg),*); } ); }
|
||||
macro_rules! debug { ($($arg:expr),*) => ( if true { log::debug!($($arg),*); } ); }
|
||||
macro_rules! trace { ($($arg:expr),*) => ( if true { log::trace!($($arg),*); } ); }
|
||||
|
||||
autoerr::create_error_v1!(
|
||||
name(Error, "Api4BinnedV2"),
|
||||
enum variants {
|
||||
ChannelNotFound,
|
||||
BadQuery(String),
|
||||
HttpLib(#[from] http::Error),
|
||||
ChannelConfig(crate::channelconfig::Error),
|
||||
Retrieval(#[from] crate::RetrievalError),
|
||||
EventsCbor(#[from] streams::plaineventscbor::Error),
|
||||
EventsJson(#[from] streams::plaineventsjson::Error),
|
||||
ServerError,
|
||||
BinnedStream(err::Error),
|
||||
TimebinnedJson(#[from] streams::timebinnedjson::Error),
|
||||
ReadAllCoarse(#[from] scyllaconn::binwriteindex::read_all_coarse::Error),
|
||||
},
|
||||
);
|
||||
|
||||
impl From<crate::channelconfig::Error> for Error {
|
||||
fn from(value: crate::channelconfig::Error) -> Self {
|
||||
use crate::channelconfig::Error::*;
|
||||
match value {
|
||||
NotFound(_) => Self::ChannelNotFound,
|
||||
_ => Self::ChannelConfig(value),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for crate::RetrievalError {
|
||||
fn from(value: Error) -> Self {
|
||||
crate::RetrievalError::TextError(value.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BinnedV2Handler {}
|
||||
|
||||
impl BinnedV2Handler {
|
||||
pub fn handler(req: &Requ) -> Option<Self> {
|
||||
if req.uri().path() == "/api/4/private/binnedv2" {
|
||||
Some(Self {})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle(
|
||||
&self,
|
||||
req: Requ,
|
||||
ctx: &ReqCtx,
|
||||
shared_res: &ServiceSharedResources,
|
||||
ncc: &NodeConfigCached,
|
||||
) -> Result<StreamResponse, Error> {
|
||||
if req.method() != Method::GET {
|
||||
return Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(body_empty())?);
|
||||
}
|
||||
match handle_request(req, ctx, &shared_res.pgqueue, shared_res.scyqueue.clone(), ncc).await {
|
||||
Ok(ret) => Ok(ret),
|
||||
Err(e) => match e {
|
||||
Error::ChannelNotFound => {
|
||||
let res = not_found_response("channel not found".into(), ctx.reqid());
|
||||
Ok(res)
|
||||
}
|
||||
Error::BadQuery(msg) => {
|
||||
let res = bad_request_response(msg, ctx.reqid());
|
||||
Ok(res)
|
||||
}
|
||||
_ => {
|
||||
error!("EventsHandler sees: {}", e);
|
||||
Ok(error_response(e.to_string(), ctx.reqid()))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_request(
|
||||
req: Requ,
|
||||
ctx: &ReqCtx,
|
||||
pgqueue: &PgQueue,
|
||||
scyqueue: Option<ScyllaQueue>,
|
||||
ncc: &NodeConfigCached,
|
||||
) -> Result<StreamResponse, Error> {
|
||||
let url = req_uri_to_url(req.uri()).map_err(|e| Error::BadQuery(e.to_string()))?;
|
||||
if req
|
||||
.uri()
|
||||
.path_and_query()
|
||||
.map_or(false, |x| x.as_str().contains("DOERR"))
|
||||
{
|
||||
Err(Error::ServerError)?;
|
||||
}
|
||||
let reqid = ctx.reqid();
|
||||
let (head, _body) = req.into_parts();
|
||||
let query = BinnedQuery::from_url(&url).map_err(|e| {
|
||||
error!("handle_request: {}", e);
|
||||
Error::BadQuery(e.to_string())
|
||||
})?;
|
||||
info!("{:?}", query);
|
||||
let logspan = if query.log_level() == "trace" {
|
||||
trace!("enable trace for handler");
|
||||
tracing::span!(tracing::Level::INFO, "log_span_trace")
|
||||
} else if query.log_level() == "debug" {
|
||||
debug!("enable debug for handler");
|
||||
tracing::span!(tracing::Level::INFO, "log_span_debug")
|
||||
} else {
|
||||
tracing::Span::none()
|
||||
};
|
||||
let span1 = tracing::span!(
|
||||
tracing::Level::INFO,
|
||||
"binwriteindex",
|
||||
reqid,
|
||||
beg = query.range().beg_u64() / SEC,
|
||||
end = query.range().end_u64() / SEC,
|
||||
ch = query.channel().name(),
|
||||
);
|
||||
span1.in_scope(|| {
|
||||
debug!("binned begin {:?}", query);
|
||||
});
|
||||
binned_instrumented(head, ctx, url, query, pgqueue, scyqueue, ncc, logspan.clone())
|
||||
.instrument(logspan)
|
||||
.instrument(span1)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn binned_instrumented(
|
||||
head: Parts,
|
||||
ctx: &ReqCtx,
|
||||
url: Url,
|
||||
query: BinnedQuery,
|
||||
pgqueue: &PgQueue,
|
||||
scyqueue: Option<ScyllaQueue>,
|
||||
ncc: &NodeConfigCached,
|
||||
logspan: Span,
|
||||
) -> Result<StreamResponse, Error> {
|
||||
let res2 = HandleRes2::new(ctx, logspan, url, query.clone(), pgqueue, scyqueue, ncc).await?;
|
||||
if accepts_json_framed(&head.headers) {
|
||||
Ok(binned_json_framed(res2, ctx, ncc).await?)
|
||||
} else {
|
||||
let ret = error_response(format!("unsupported accept: {:?}", &head.headers), ctx.reqid());
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
fn make_read_provider(
|
||||
chname: &str,
|
||||
scyqueue: Option<ScyllaQueue>,
|
||||
open_bytes: Pin<Arc<OpenBoxedBytesViaHttp>>,
|
||||
ctx: &ReqCtx,
|
||||
ncc: &NodeConfigCached,
|
||||
) -> (Arc<dyn EventsReadProvider>, Arc<dyn CacheReadProvider>) {
|
||||
let events_read_provider = if chname.starts_with("unittest") {
|
||||
let x = streams::teststream::UnitTestStream::new();
|
||||
Arc::new(x)
|
||||
} else if ncc.node_config.cluster.scylla_lt().is_some() {
|
||||
scyqueue
|
||||
.clone()
|
||||
.map(|qu| ScyllaEventReadProvider::new(qu))
|
||||
.map(|x| Arc::new(x) as Arc<dyn EventsReadProvider>)
|
||||
.expect("scylla queue")
|
||||
} else if ncc.node.sf_databuffer.is_some() {
|
||||
// TODO do not clone the request. Pass an Arc up to here.
|
||||
let x = SfDatabufferEventReadProvider::new(Arc::new(ctx.clone()), open_bytes);
|
||||
Arc::new(x)
|
||||
} else {
|
||||
panic!("unexpected backend")
|
||||
};
|
||||
let cache_read_provider = if ncc.node_config.cluster.scylla_lt().is_some() {
|
||||
scyqueue
|
||||
.clone()
|
||||
.map(|qu| scyllaconn::bincache::ScyllaPrebinnedReadProvider::new(qu))
|
||||
.map(|x| Arc::new(x) as Arc<dyn CacheReadProvider>)
|
||||
.expect("scylla queue")
|
||||
} else if ncc.node.sf_databuffer.is_some() {
|
||||
let x = DummyCacheReadProvider::new();
|
||||
Arc::new(x)
|
||||
} else {
|
||||
panic!("unexpected backend")
|
||||
};
|
||||
(events_read_provider, cache_read_provider)
|
||||
}
|
||||
|
||||
async fn binned_json_framed(
|
||||
res2: HandleRes2<'_>,
|
||||
ctx: &ReqCtx,
|
||||
_ncc: &NodeConfigCached,
|
||||
) -> Result<StreamResponse, Error> {
|
||||
let series = SeriesId::new(res2.ch_conf.series().unwrap());
|
||||
let range = res2.query.range().to_time().unwrap();
|
||||
let scyqueue = res2.scyqueue.as_ref().unwrap();
|
||||
let res = scyllaconn::binwriteindex::read_all_coarse::read_all_coarse(series, range, scyqueue).await?;
|
||||
let mut strings = Vec::new();
|
||||
for e in res {
|
||||
strings.push(format!("{:?}", e));
|
||||
}
|
||||
let ret = response(StatusCode::OK)
|
||||
.header(CONTENT_TYPE, APP_JSON)
|
||||
.header(HEADER_NAME_REQUEST_ID, ctx.reqid())
|
||||
.body(ToJsonBody::from(&strings).into_body())?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
struct HandleRes2<'a> {
|
||||
logspan: Span,
|
||||
url: Url,
|
||||
query: BinnedQuery,
|
||||
ch_conf: ChannelTypeConfigGen,
|
||||
events_read_provider: Arc<dyn EventsReadProvider>,
|
||||
cache_read_provider: Arc<dyn CacheReadProvider>,
|
||||
timeout_provider: Box<dyn StreamTimeout2>,
|
||||
pgqueue: &'a PgQueue,
|
||||
scyqueue: Option<ScyllaQueue>,
|
||||
}
|
||||
|
||||
impl<'a> HandleRes2<'a> {
|
||||
async fn new(
|
||||
ctx: &ReqCtx,
|
||||
logspan: Span,
|
||||
url: Url,
|
||||
query: BinnedQuery,
|
||||
pgqueue: &'a PgQueue,
|
||||
scyqueue: Option<ScyllaQueue>,
|
||||
ncc: &NodeConfigCached,
|
||||
) -> Result<Self, Error> {
|
||||
let ch_conf = ch_conf_from_binned(&query, ctx, pgqueue, ncc)
|
||||
.await?
|
||||
.ok_or_else(|| Error::ChannelNotFound)?;
|
||||
let open_bytes = Arc::pin(OpenBoxedBytesViaHttp::new(ncc.node_config.cluster.clone()));
|
||||
let (events_read_provider, cache_read_provider) =
|
||||
make_read_provider(ch_conf.name(), scyqueue.clone(), open_bytes, ctx, ncc);
|
||||
let timeout_provider = streamio::streamtimeout::StreamTimeout::boxed();
|
||||
let ret = Self {
|
||||
logspan,
|
||||
url,
|
||||
query,
|
||||
ch_conf,
|
||||
events_read_provider,
|
||||
cache_read_provider,
|
||||
timeout_provider,
|
||||
pgqueue,
|
||||
scyqueue,
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
@@ -68,6 +68,7 @@ autoerr::create_error_v1!(
|
||||
Async(#[from] netpod::AsyncChannelError),
|
||||
ChannelConfig(#[from] dbconn::channelconfig::Error),
|
||||
Netpod(#[from] netpod::Error),
|
||||
ScyllaConn(#[from] scyllaconn::conn::Error),
|
||||
ScyllaExecution(#[from] scyllaconn::scylla::errors::ExecutionError),
|
||||
ScyllaPagerExecution(#[from] scyllaconn::scylla::errors::PagerExecutionError),
|
||||
ScyllanextRow(#[from] scyllaconn::scylla::errors::NextRowError),
|
||||
@@ -484,9 +485,7 @@ impl ScyllaChannelsActive {
|
||||
.cluster
|
||||
.scylla_st()
|
||||
.ok_or_else(|| Error::ExpectScyllaBackend)?;
|
||||
let scy = scyllaconn::conn::create_scy_session(scyco)
|
||||
.await
|
||||
.map_err(other_err_error)?;
|
||||
let scy = scyllaconn::conn::create_scy_session(scyco).await?;
|
||||
// Database stores tsedge/ts_msp in units of (10 sec), and we additionally map to the grid.
|
||||
let tsedge = q.tsedge / 10 / (6 * 2) * (6 * 2);
|
||||
info!(
|
||||
@@ -875,9 +874,7 @@ impl GenerateScyllaTestData {
|
||||
|
||||
async fn process(&self, node_config: &NodeConfigCached) -> Result<(), Error> {
|
||||
let scyconf = node_config.node_config.cluster.scylla_st().unwrap();
|
||||
let scy = scyllaconn::conn::create_scy_session(scyconf)
|
||||
.await
|
||||
.map_err(other_err_error)?;
|
||||
let scy = scyllaconn::conn::create_scy_session(scyconf).await?;
|
||||
let series: u64 = 42001;
|
||||
// TODO query `ts_msp` for all MSP values und use that to delete from event table first.
|
||||
// Only later delete also from the `ts_msp` table.
|
||||
|
||||
@@ -115,3 +115,4 @@ impl Convable for query::api4::Error {}
|
||||
impl Convable for query::api4::events::Error {}
|
||||
impl Convable for netpod::Error {}
|
||||
impl Convable for crate::http3::Error {}
|
||||
impl Convable for scyllaconn::conn::Error {}
|
||||
|
||||
@@ -381,6 +381,8 @@ async fn http_service_inner(
|
||||
}
|
||||
} else if let Some(h) = api4::binwriteindex::BinWriteIndexHandler::handler(&req) {
|
||||
Ok(h.handle(req, ctx, &shared_res, &node_config).await?)
|
||||
} else if let Some(h) = api4::binned_v2::BinnedV2Handler::handler(&req) {
|
||||
Ok(h.handle(req, ctx, &shared_res, &node_config).await?)
|
||||
} else if let Some(h) = api4::eventdata::EventDataHandler::handler(&req) {
|
||||
Ok(h.handle(req, ctx, &node_config, shared_res)
|
||||
.await
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
use daqbuf_err as err;
|
||||
use err::thiserror;
|
||||
use err::ThisError;
|
||||
use futures_util::Future;
|
||||
use futures_util::FutureExt;
|
||||
use futures_util::Stream;
|
||||
@@ -25,11 +23,12 @@ use std::task::Poll;
|
||||
use streams::timebin::cached::reader::EventsReadProvider;
|
||||
use taskrun::tokio;
|
||||
|
||||
#[derive(Debug, ThisError)]
|
||||
#[cstm(name = "ScyllaChannelEventStream")]
|
||||
pub enum Error {
|
||||
MergeRt(#[from] mergert::Error),
|
||||
}
|
||||
autoerr::create_error_v1!(
|
||||
name(Error, "ScyllaChannelEventStream"),
|
||||
enum variants {
|
||||
MergeRt(#[from] mergert::Error),
|
||||
},
|
||||
);
|
||||
|
||||
pub async fn scylla_channel_event_stream(
|
||||
evq: EventsSubQuery,
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
[package]
|
||||
name = "scyllaconn"
|
||||
version = "0.0.1"
|
||||
version = "0.0.2"
|
||||
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
path = "src/scyllaconn.rs"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
futures-util = "0.3.31"
|
||||
|
||||
@@ -9,10 +9,10 @@ use items_0::Empty;
|
||||
use items_0::Extendable;
|
||||
use items_0::WithLen;
|
||||
use items_2::accounting::AccountingEvents;
|
||||
use netpod::EMIT_ACCOUNTING_SNAP;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::timeunits;
|
||||
use netpod::EMIT_ACCOUNTING_SNAP;
|
||||
use scylla::statement::prepared::PreparedStatement;
|
||||
use std::collections::VecDeque;
|
||||
use std::pin::Pin;
|
||||
@@ -215,7 +215,7 @@ impl Stream for AccountingStreamScylla {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
FrState::ReadValues(ref mut st) => match st.fut.poll_unpin(cx) {
|
||||
FrState::ReadValues(st) => match st.fut.poll_unpin(cx) {
|
||||
Ready(Ok(mut item)) => {
|
||||
if !st.next() {
|
||||
self.state = FrState::Done;
|
||||
|
||||
4
crates/scyllaconn/src/binned2.rs
Normal file
4
crates/scyllaconn/src/binned2.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub mod binnedrtbinlen;
|
||||
pub mod binnedrtmsplsps;
|
||||
pub mod intraday;
|
||||
pub mod msplspiter;
|
||||
47
crates/scyllaconn/src/binned2/binnedrtbinlen.rs
Normal file
47
crates/scyllaconn/src/binned2/binnedrtbinlen.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
use crate::worker::ScyllaQueue;
|
||||
use daqbuf_series::SeriesId;
|
||||
use daqbuf_series::msp::PrebinnedPartitioning;
|
||||
use netpod::BinnedRange;
|
||||
use netpod::TsNano;
|
||||
use netpod::ttl::RetentionTime;
|
||||
|
||||
/*
|
||||
Given RT, PBP and range, loop over all the bins to retrieve.
|
||||
|
||||
When there is no content, skip over.
|
||||
*/
|
||||
|
||||
pub struct BinnedRtBinlenStream {
|
||||
series: SeriesId,
|
||||
rt: RetentionTime,
|
||||
pbp: PrebinnedPartitioning,
|
||||
range: BinnedRange<TsNano>,
|
||||
scyqueue: ScyllaQueue,
|
||||
}
|
||||
|
||||
impl BinnedRtBinlenStream {
|
||||
pub fn new(
|
||||
series: SeriesId,
|
||||
rt: RetentionTime,
|
||||
pbp: PrebinnedPartitioning,
|
||||
range: BinnedRange<TsNano>,
|
||||
scyqueue: ScyllaQueue,
|
||||
) -> Self {
|
||||
Self {
|
||||
series,
|
||||
rt,
|
||||
pbp,
|
||||
range,
|
||||
scyqueue,
|
||||
}
|
||||
}
|
||||
|
||||
fn make_next_fut(&mut self) -> Option<()> {
|
||||
let series = self.series.clone();
|
||||
let rt = self.rt.clone();
|
||||
let msp = todo!();
|
||||
let binlen = todo!();
|
||||
let lsps = todo!();
|
||||
super::binnedrtmsplsps::BinnedRtMspLsps::new(series, rt, msp, binlen, lsps, self.scyqueue.clone());
|
||||
}
|
||||
}
|
||||
82
crates/scyllaconn/src/binned2/binnedrtmsplsps.rs
Normal file
82
crates/scyllaconn/src/binned2/binnedrtmsplsps.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
Fetches the bins for a given RT, binlen and MSP.
|
||||
Issues the scylla commands.
|
||||
Assembles the results.
|
||||
Does basic sanity checks.
|
||||
May re-chunk the result if too large.
|
||||
*/
|
||||
|
||||
use crate::worker::ScyllaQueue;
|
||||
use daqbuf_series::SeriesId;
|
||||
use daqbuf_series::msp::LspU32;
|
||||
use daqbuf_series::msp::MspU32;
|
||||
use items_2::binning::container_bins::ContainerBins;
|
||||
use netpod::DtMs;
|
||||
use netpod::ttl::RetentionTime;
|
||||
use std::fmt;
|
||||
use std::pin::Pin;
|
||||
|
||||
macro_rules! info { ($($arg:expr),*) => ( if true { log::info!($($arg),*); } ); }
|
||||
macro_rules! debug { ($($arg:expr),*) => ( if true { log::debug!($($arg),*); } ); }
|
||||
|
||||
autoerr::create_error_v1!(
|
||||
name(Error, "BinnedRtMsp"),
|
||||
enum variants {
|
||||
ReadJob(#[from] streams::timebin::cached::reader::Error),
|
||||
},
|
||||
);
|
||||
|
||||
type Fut =
|
||||
Pin<Box<dyn Future<Output = Result<ContainerBins<f32, f32>, streams::timebin::cached::reader::Error>> + Send>>;
|
||||
|
||||
struct FutW(Fut);
|
||||
|
||||
impl fmt::Debug for FutW {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_tuple("Fut").finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BinnedRtMspLsps {
|
||||
series: SeriesId,
|
||||
rt: RetentionTime,
|
||||
msp: MspU32,
|
||||
lsps: (LspU32, LspU32),
|
||||
binlen: DtMs,
|
||||
scyqueue: ScyllaQueue,
|
||||
fut: Option<FutW>,
|
||||
}
|
||||
|
||||
impl BinnedRtMspLsps {
|
||||
pub fn new(
|
||||
series: SeriesId,
|
||||
rt: RetentionTime,
|
||||
msp: MspU32,
|
||||
binlen: DtMs,
|
||||
lsps: (LspU32, LspU32),
|
||||
scyqueue: ScyllaQueue,
|
||||
) -> Self {
|
||||
Self {
|
||||
series,
|
||||
rt,
|
||||
msp,
|
||||
lsps,
|
||||
binlen,
|
||||
scyqueue,
|
||||
fut: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn make_next_fut(&mut self) -> Option<Fut> {
|
||||
let rt = self.rt.clone();
|
||||
let series = self.series.id();
|
||||
let binlen = self.binlen.clone();
|
||||
let msp = self.msp.to_u64();
|
||||
let offs = self.lsps.0.to_u32()..self.lsps.1.to_u32();
|
||||
// SAFETY we only use scyqueue while we self are alive.
|
||||
let scyqueue = unsafe { &mut *(&mut self.scyqueue as *mut ScyllaQueue) };
|
||||
let fut = scyqueue.read_prebinned_f32(rt, series, binlen, msp, offs);
|
||||
let fut = Box::pin(fut);
|
||||
Some(fut)
|
||||
}
|
||||
}
|
||||
1
crates/scyllaconn/src/binned2/intraday.rs
Normal file
1
crates/scyllaconn/src/binned2/intraday.rs
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
51
crates/scyllaconn/src/binned2/msplspiter.rs
Normal file
51
crates/scyllaconn/src/binned2/msplspiter.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use daqbuf_series::msp::LspU32;
|
||||
use daqbuf_series::msp::MspU32;
|
||||
use daqbuf_series::msp::PrebinnedPartitioning;
|
||||
use netpod::TsMs;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MspLspItem {
|
||||
pub msp: MspU32,
|
||||
pub lsp: LspU32,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MspLspIter {
|
||||
range: NanoRange,
|
||||
pbp: PrebinnedPartitioning,
|
||||
ts: TsMs,
|
||||
}
|
||||
|
||||
impl MspLspIter {
|
||||
pub fn new(range: NanoRange, pbp: PrebinnedPartitioning) -> Self {
|
||||
let ts = range.beg_ts().to_ts_ms();
|
||||
Self { range, pbp, ts }
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for MspLspIter {
|
||||
type Item = (MspU32, LspU32);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.ts >= self.range.end_ts().to_ts_ms() {
|
||||
None
|
||||
} else {
|
||||
let x = self.pbp.msp_lsp(self.ts);
|
||||
let msp = MspU32(x.0);
|
||||
let lsp = LspU32(x.1);
|
||||
self.ts = self.ts.add_dt_ms(self.pbp.bin_len());
|
||||
Some((msp, lsp))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iter_00() {
|
||||
let range = NanoRange::from_strings("", "").unwrap();
|
||||
let pbp = PrebinnedPartitioning::Sec1;
|
||||
let mut it = MspLspIter::new(range, pbp);
|
||||
for x in it {
|
||||
eprintln!("{:?}", x);
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
pub mod bwxcmb;
|
||||
pub mod read_all_coarse;
|
||||
|
||||
use crate::worker::ScyllaQueue;
|
||||
use daqbuf_series::msp::MspU32;
|
||||
@@ -10,6 +11,7 @@ use futures_util::Stream;
|
||||
use netpod::log;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::ttl::RetentionTime;
|
||||
use netpod::DtMs;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::pin::Pin;
|
||||
@@ -45,7 +47,6 @@ pub struct BinWriteIndexEntry {
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BinWriteIndexSet {
|
||||
pub rt: RetentionTime,
|
||||
pub msp: MspU32,
|
||||
pub entries: VecDeque<BinWriteIndexEntry>,
|
||||
}
|
||||
@@ -78,7 +79,12 @@ impl BinWriteIndexRtStream {
|
||||
info!("{}::new INFO/DEBUG test", Self::type_name());
|
||||
debug!("{}::new", Self::type_name());
|
||||
let (msp_beg, lsp_beg) = pbp.msp_lsp(range.beg_ts().to_ts_ms());
|
||||
let (msp_end, lsp_end) = pbp.msp_lsp(range.end_ts().add_dt_nano(pbp.bin_len().dt_ns()).to_ts_ms());
|
||||
let (msp_end, lsp_end) = pbp.msp_lsp(
|
||||
range
|
||||
.end_ts()
|
||||
.add_dt_nano(DtMs::from_ms_u64(pbp.bin_len().ms() - 1).dt_ns())
|
||||
.to_ts_ms(),
|
||||
);
|
||||
BinWriteIndexRtStream {
|
||||
rt1,
|
||||
series,
|
||||
@@ -148,7 +154,6 @@ impl Stream for BinWriteIndexRtStream {
|
||||
Ready(Ok(x)) => {
|
||||
self.fut1 = None;
|
||||
let item = BinWriteIndexSet {
|
||||
rt: self.rt1.clone(),
|
||||
msp: MspU32(x.0),
|
||||
entries: x.3,
|
||||
};
|
||||
|
||||
54
crates/scyllaconn/src/binwriteindex/read_all_coarse.rs
Normal file
54
crates/scyllaconn/src/binwriteindex/read_all_coarse.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
use super::BinWriteIndexRtStream;
|
||||
use crate::worker::ScyllaQueue;
|
||||
use daqbuf_series::msp::MspU32;
|
||||
use daqbuf_series::msp::PrebinnedPartitioning;
|
||||
use daqbuf_series::SeriesId;
|
||||
use futures_util::TryStreamExt;
|
||||
use netpod::log;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::ttl::RetentionTime;
|
||||
use netpod::DtMs;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
macro_rules! info { ($($arg:expr),*) => ( if true { log::info!($($arg),*); } ); }
|
||||
macro_rules! debug { ($($arg:expr),*) => ( if true { log::debug!($($arg),*); } ); }
|
||||
|
||||
autoerr::create_error_v1!(
|
||||
name(Error, "BinIndexReadAllCoarse"),
|
||||
enum variants {
|
||||
Worker(#[from] crate::worker::Error),
|
||||
BinWriteIndexRead(#[from] super::Error),
|
||||
},
|
||||
);
|
||||
|
||||
pub async fn read_all_coarse(
|
||||
series: SeriesId,
|
||||
range: NanoRange,
|
||||
scyqueue: &ScyllaQueue,
|
||||
) -> Result<VecDeque<(RetentionTime, MspU32, u32, DtMs)>, Error> {
|
||||
let rts = {
|
||||
use RetentionTime::*;
|
||||
[Long, Medium, Short]
|
||||
};
|
||||
let mut ret = VecDeque::new();
|
||||
for rt in rts {
|
||||
let pbp = PrebinnedPartitioning::Day1;
|
||||
let mut stream = BinWriteIndexRtStream::new(rt.clone(), series, pbp, range.clone(), scyqueue.clone());
|
||||
while let Some(x) = stream.try_next().await? {
|
||||
for e in x.entries {
|
||||
let binlen = DtMs::from_ms_u64(e.binlen as u64);
|
||||
let item = (rt.clone(), x.msp.clone(), e.lsp, binlen);
|
||||
ret.push_back(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub fn select_potential_binlen(options: VecDeque<(RetentionTime, MspU32, u32, DtMs)>) -> Result<(), Error> {
|
||||
// Check first if there are common binlen over all the range.
|
||||
// If not, filter out the options which could build content from finer resolution.
|
||||
// Then heuristically select the best match.
|
||||
// PrebinnedPartitioning::Day1.msp_lsp(val)
|
||||
todo!()
|
||||
}
|
||||
@@ -1,19 +1,23 @@
|
||||
use crate::errconv::ErrConv;
|
||||
use daqbuf_err as err;
|
||||
use err::Error;
|
||||
use netpod::log::*;
|
||||
use netpod::ScyllaConfig;
|
||||
use scylla::client::execution_profile::ExecutionProfileBuilder;
|
||||
use scylla::client::session::Session;
|
||||
use scylla::client::session_builder::SessionBuilder;
|
||||
use scylla::errors::NewSessionError;
|
||||
use scylla::statement::Consistency;
|
||||
use std::sync::Arc;
|
||||
|
||||
autoerr::create_error_v1!(
|
||||
name(Error, "ScyllaSessionCreate"),
|
||||
enum variants {
|
||||
ScyllaSessionNew(#[from] NewSessionError),
|
||||
ScyllaUseKeyspace(#[from] scylla::errors::UseKeyspaceError),
|
||||
},
|
||||
);
|
||||
|
||||
pub async fn create_scy_session(scyconf: &ScyllaConfig) -> Result<Arc<Session>, Error> {
|
||||
let scy = create_scy_session_no_ks(scyconf).await?;
|
||||
scy.use_keyspace(&scyconf.keyspace, true)
|
||||
.await
|
||||
.map_err(Error::from_string)?;
|
||||
scy.use_keyspace(&scyconf.keyspace, true).await?;
|
||||
let ret = Arc::new(scy);
|
||||
Ok(ret)
|
||||
}
|
||||
@@ -24,12 +28,11 @@ pub async fn create_scy_session_no_ks(scyconf: &ScyllaConfig) -> Result<Session,
|
||||
.known_nodes(&scyconf.hosts)
|
||||
.default_execution_profile_handle(
|
||||
ExecutionProfileBuilder::default()
|
||||
.consistency(Consistency::LocalOne)
|
||||
.consistency(Consistency::Quorum)
|
||||
.build()
|
||||
.into_handle(),
|
||||
)
|
||||
.build()
|
||||
.await
|
||||
.err_conv()?;
|
||||
.await?;
|
||||
Ok(scy)
|
||||
}
|
||||
|
||||
@@ -9,18 +9,16 @@ use futures_util::FutureExt;
|
||||
use futures_util::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use futures_util::TryStreamExt;
|
||||
use items_0::Appendable;
|
||||
use items_0::Empty;
|
||||
use items_0::WithLen;
|
||||
use items_0::container::ByteEstimate;
|
||||
use items_0::merge::DrainIntoNewDynResult;
|
||||
use items_0::merge::MergeableDyn;
|
||||
use items_0::scalar_ops::ScalarOps;
|
||||
use items_0::timebin::BinningggContainerEventsDyn;
|
||||
use items_0::Appendable;
|
||||
use items_0::Empty;
|
||||
use items_0::WithLen;
|
||||
use items_2::binning::container_events::ContainerEvents;
|
||||
use items_2::channelevents::ChannelEvents;
|
||||
use netpod::log;
|
||||
use netpod::ttl::RetentionTime;
|
||||
use netpod::ChConf;
|
||||
use netpod::DtNano;
|
||||
use netpod::EnumVariant;
|
||||
@@ -29,6 +27,8 @@ use netpod::Shape;
|
||||
use netpod::TsMs;
|
||||
use netpod::TsMsVecFmt;
|
||||
use netpod::TsNano;
|
||||
use netpod::log;
|
||||
use netpod::ttl::RetentionTime;
|
||||
use scylla::client::session::Session;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
@@ -70,7 +70,7 @@ impl EventReadOpts {
|
||||
Self {
|
||||
one_before,
|
||||
with_values,
|
||||
qucap: qucap.unwrap_or(3),
|
||||
qucap: qucap.unwrap_or(6),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
pub mod accounting;
|
||||
pub mod bincache;
|
||||
pub mod binned2;
|
||||
pub mod binwriteindex;
|
||||
pub mod conn;
|
||||
pub mod errconv;
|
||||
@@ -5,7 +5,6 @@ use crate::events2::prepare::StmtsEvents;
|
||||
use crate::range::ScyllaSeriesRange;
|
||||
use async_channel::Receiver;
|
||||
use async_channel::Sender;
|
||||
use daqbuf_err as err;
|
||||
use daqbuf_series::msp::MspU32;
|
||||
use daqbuf_series::msp::PrebinnedPartitioning;
|
||||
use daqbuf_series::SeriesId;
|
||||
@@ -34,7 +33,7 @@ const SCYLLA_WORKER_QUEUE_LEN: usize = 200;
|
||||
autoerr::create_error_v1!(
|
||||
name(Error, "ScyllaWorker"),
|
||||
enum variants {
|
||||
ScyllaConnection(err::Error),
|
||||
ScyllaConnection(#[from] crate::conn::Error),
|
||||
Prepare(#[from] crate::events2::prepare::Error),
|
||||
Events(#[from] crate::events2::events::Error),
|
||||
Msp(#[from] crate::events2::msp::Error),
|
||||
@@ -326,9 +325,7 @@ impl ScyllaWorker {
|
||||
}
|
||||
|
||||
pub async fn work(self) -> Result<(), Error> {
|
||||
let scy = create_scy_session_no_ks(&self.scyconf_st)
|
||||
.await
|
||||
.map_err(Error::ScyllaConnection)?;
|
||||
let scy = create_scy_session_no_ks(&self.scyconf_st).await?;
|
||||
let scy = Arc::new(scy);
|
||||
let kss = [
|
||||
self.scyconf_st.keyspace.as_str(),
|
||||
|
||||
Reference in New Issue
Block a user