Udate deps, backend list for plain node

This commit is contained in:
Dominik Werder
2024-06-12 13:43:28 +02:00
parent 437e6d0d76
commit 03e8ac7a70
35 changed files with 1459 additions and 343 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -64,7 +64,7 @@ async fn go() -> Result<(), Error> {
};
match opts.subcmd {
SubCmd::Retrieval(subcmd) => {
info!("daqbuffer version {} +0003", clap::crate_version!());
info!("daqbuffer version {} +0005", clap::crate_version!());
info!(" service_version {}", service_version);
if false {
#[allow(non_snake_case)]

View File

@@ -19,7 +19,7 @@ pub struct ChannelInfo {
pub kind: u16,
}
pub async fn info_for_series_ids(series_ids: &[u64], pg: &Client) -> Result<Vec<ChannelInfo>, Error> {
pub async fn info_for_series_ids(series_ids: &[u64], pg: &Client) -> Result<Vec<(u64, Option<ChannelInfo>)>, Error> {
let (ord, seriess) = series_ids
.iter()
.enumerate()
@@ -34,33 +34,38 @@ pub async fn info_for_series_ids(series_ids: &[u64], pg: &Client) -> Result<Vec<
")",
"select q1.ord, q1.series, t.facility, t.channel, t.scalar_type, t.shape_dims, t.kind",
" from q1",
" join series_by_channel t on t.series = q1.series",
" left join series_by_channel t",
" on t.series = q1.series",
" and t.kind = 2",
" order by q1.ord",
);
use crate::pg::Type;
let st = pg.prepare_typed(sql, &[Type::INT4_ARRAY, Type::INT8_ARRAY]).await?;
use crate::pg::Type as PgType;
let st = pg.prepare_typed(sql, &[PgType::INT4_ARRAY, PgType::INT8_ARRAY]).await?;
let rows = pg.query(&st, &[&ord, &seriess]).await?;
let mut ret = Vec::new();
for row in rows {
let series: i64 = row.get(1);
let backend: String = row.get(2);
let channel: String = row.get(3);
let scalar_type: i32 = row.get(4);
let shape_dims: Vec<i32> = row.get(5);
let kind: i16 = row.get(6);
let series = series as u64;
let scalar_type = ScalarType::from_scylla_i32(scalar_type).map_err(|_| Error::BadValue)?;
let shape = Shape::from_scylla_shape_dims(&shape_dims).map_err(|_| Error::BadValue)?;
let kind = kind as u16;
let e = ChannelInfo {
series,
backend,
name: channel,
scalar_type,
shape,
kind,
};
ret.push(e);
let series = row.get::<_, i64>(1) as u64;
let backend: Option<String> = row.get(2);
if let Some(backend) = backend {
let channel: String = row.get(3);
let scalar_type: i32 = row.get(4);
let shape_dims: Vec<i32> = row.get(5);
let kind: i16 = row.get(6);
let scalar_type = ScalarType::from_scylla_i32(scalar_type).map_err(|_| Error::BadValue)?;
let shape = Shape::from_scylla_shape_dims(&shape_dims).map_err(|_| Error::BadValue)?;
let kind = kind as u16;
let e = ChannelInfo {
series,
backend,
name: channel,
scalar_type,
shape,
kind,
};
ret.push((series, Some(e)));
} else {
ret.push((series, None));
}
}
Ok(ret)
}

View File

@@ -269,6 +269,7 @@ fn make_scalar_conv(
ScalarType::F64 => ValueDim0FromBytesImpl::<f64>::boxed(),
ScalarType::BOOL => ValueDim0FromBytesImpl::<bool>::boxed(),
ScalarType::STRING => ValueDim0FromBytesImpl::<String>::boxed(),
ScalarType::Enum => ValueDim0FromBytesImpl::<String>::boxed(),
ScalarType::ChannelStatus => ValueDim0FromBytesImpl::<u32>::boxed(),
},
Shape::Wave(_) => {
@@ -286,6 +287,7 @@ fn make_scalar_conv(
ScalarType::F64 => ValueDim1FromBytesImpl::<f64>::boxed(shape),
ScalarType::BOOL => ValueDim1FromBytesImpl::<bool>::boxed(shape),
ScalarType::STRING => ValueDim1FromBytesImpl::<String>::boxed(shape),
ScalarType::Enum => ValueDim1FromBytesImpl::<String>::boxed(shape),
ScalarType::ChannelStatus => ValueDim1FromBytesImpl::<u32>::boxed(shape),
}
}

View File

@@ -766,7 +766,8 @@ impl DataApiPython3DataStream {
self.range.clone().into(),
TransformQuery::for_event_blobs(),
);
let subq = EventsSubQuery::from_parts(select, self.settings.clone(), self.ctx.reqid().into());
let log_level = String::new();
let subq = EventsSubQuery::from_parts(select, self.settings.clone(), self.ctx.reqid().into(), log_level);
debug!("query for event blobs retrieval subq {subq:?}");
// TODO important TODO
debug!("TODO fix magic inmem_bufcap");
@@ -922,12 +923,12 @@ impl Api1EventsBinaryHandler {
return Err(Error::with_msg_no_trace("can not parse query"));
}
};
let span = if qu.log_level() == "trace" {
debug!("enable trace for handler");
tracing::span!(tracing::Level::TRACE, "log_span_trace")
let span = if false {
tracing::Span::none()
} else if qu.log_level() == "trace" {
tracing::span!(tracing::Level::INFO, "log_span_trace")
} else if qu.log_level() == "debug" {
debug!("enable debug for handler");
tracing::span!(tracing::Level::DEBUG, "log_span_debug")
tracing::span!(tracing::Level::INFO, "log_span_debug")
} else {
tracing::Span::none()
};

View File

@@ -1,4 +1,5 @@
pub mod accounting;
pub mod backend;
pub mod binned;
pub mod databuffer_tools;
pub mod docs;

View File

@@ -19,10 +19,12 @@ use netpod::log::*;
use netpod::req_uri_to_url;
use netpod::FromUrl;
use netpod::NodeConfigCached;
use netpod::Shape;
use query::api4::AccountingIngestedBytesQuery;
use query::api4::AccountingToplistQuery;
use serde::Deserialize;
use serde::Serialize;
use std::collections::BTreeMap;
pub struct AccountingIngestedBytes {}
@@ -87,7 +89,33 @@ impl AccountingIngestedBytes {
#[derive(Debug, Serialize, Deserialize)]
pub struct Toplist {
toplist: Vec<(String, u64, u64)>,
dim0: Vec<(String, u64, u64)>,
dim1: Vec<(String, u64, u64)>,
infos_count_total: usize,
infos_missing_count: usize,
top1_usage_len: usize,
scalar_count: usize,
wave_count: usize,
found: usize,
incomplete_count: usize,
mismatch_count: usize,
}
impl Toplist {
fn new() -> Self {
Self {
dim0: Vec::new(),
dim1: Vec::new(),
infos_count_total: 0,
infos_missing_count: 0,
top1_usage_len: 0,
scalar_count: 0,
wave_count: 0,
found: 0,
incomplete_count: 0,
mismatch_count: 0,
}
}
}
pub struct AccountingToplistCounts {}
@@ -135,6 +163,7 @@ impl AccountingToplistCounts {
_ctx: &ReqCtx,
ncc: &NodeConfigCached,
) -> Result<Toplist, Error> {
let list_len_max = qu.limit() as usize;
// TODO assumes that accounting data is in the LT keyspace
let scyco = ncc
.node_config
@@ -145,22 +174,74 @@ impl AccountingToplistCounts {
let pgconf = &ncc.node_config.cluster.database;
let (pg, pgjh) = dbconn::create_connection(&pgconf).await?;
let mut top1 = scyllaconn::accounting::toplist::read_ts(qu.ts().ns(), scy).await?;
top1.sort_by_bytes();
let mut ret = Toplist { toplist: Vec::new() };
let series_ids: Vec<_> = top1.usage().iter().take(qu.limit() as _).map(|x| x.0).collect();
let infos = dbconn::channelinfo::info_for_series_ids(&series_ids, &pg)
.await
.map_err(Error::from_to_string)?;
let mut it = top1.usage().iter();
for info in infos {
let h = it.next().ok_or_else(|| Error::with_msg_no_trace("logic error"))?;
if info.series != h.0 {
let e = Error::with_msg_no_trace(format!("mismatch {} != {}", info.series, h.0));
warn!("{e}");
return Err(e);
top1.sort_by_counts();
let mut ret = Toplist::new();
let top1_usage = top1.usage();
ret.top1_usage_len = top1_usage.len();
let usage_map_0: BTreeMap<u64, (u64, u64)> = top1_usage.iter().map(|x| (x.0, (x.1, x.2))).collect();
let mut usage_it = usage_map_0.iter();
loop {
let mut series_ids = Vec::new();
let mut usages = Vec::new();
while let Some(u) = usage_it.next() {
series_ids.push(*u.0);
usages.push(u.1.clone());
if series_ids.len() >= 200 {
break;
}
}
if series_ids.len() == 0 {
break;
}
let infos = dbconn::channelinfo::info_for_series_ids(&series_ids, &pg)
.await
.map_err(Error::from_to_string)?;
for (_series, info_res) in &infos {
if let Some(info) = info_res {
match &info.shape {
Shape::Scalar => {
ret.scalar_count += 1;
}
Shape::Wave(_) => {
ret.wave_count += 1;
}
_ => {}
}
}
}
if usages.len() > infos.len() {
ret.incomplete_count += usages.len() - infos.len();
}
if infos.len() > usages.len() {
ret.incomplete_count += infos.len() - usages.len();
}
for ((series2, info_res), usage) in infos.into_iter().zip(usages.into_iter()) {
if let Some(info) = info_res {
if series2 != info.series {
ret.mismatch_count += 1;
}
ret.infos_count_total += 1;
// if info.name == "SINSB04-RMOD:PULSE-I-WF" {
// ret.found += 1;
// }
match &info.shape {
Shape::Scalar => {
ret.dim0.push((info.name, usage.0, usage.1));
}
Shape::Wave(_) => {
ret.dim1.push((info.name, usage.0, usage.1));
}
Shape::Image(_, _) => {}
}
} else {
ret.infos_missing_count += 1;
}
}
ret.toplist.push((info.name, h.1, h.2));
}
ret.dim0.sort_by_cached_key(|x| u64::MAX - x.1);
ret.dim1.sort_by_cached_key(|x| u64::MAX - x.1);
ret.dim0.truncate(list_len_max);
ret.dim1.truncate(list_len_max);
Ok(ret)
}
}

View File

@@ -0,0 +1,55 @@
use crate::bodystream::response;
use crate::err::Error;
use crate::requests::accepts_json_or_all;
use http::Method;
use http::StatusCode;
use httpclient::body_empty;
use httpclient::body_string;
use httpclient::Requ;
use httpclient::StreamResponse;
use netpod::NodeConfigCached;
use netpod::ReqCtx;
use netpod::ServiceVersion;
use std::collections::BTreeMap;
pub struct BackendListHandler {}
impl BackendListHandler {
pub fn handler(req: &Requ) -> Option<Self> {
if req.uri().path() == "/api/4/backend/list" {
Some(Self {})
} else {
None
}
}
pub async fn handle(
&self,
req: Requ,
_ctx: &ReqCtx,
ncc: &NodeConfigCached,
_service_version: &ServiceVersion,
) -> Result<StreamResponse, Error> {
if req.method() == Method::GET {
if accepts_json_or_all(req.headers()) {
let mut list = Vec::new();
if let Some(g) = &ncc.node_config.cluster.announce_backends {
for j in g {
let mut map = BTreeMap::new();
map.insert("name", j.clone());
list.push(map);
}
}
let res = serde_json::json!({
"backends_available": list,
});
let body = serde_json::to_string(&res)?;
Ok(response(StatusCode::OK).body(body_string(body))?)
} else {
Ok(response(StatusCode::BAD_REQUEST).body(body_empty())?)
}
} else {
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(body_empty())?)
}
}
}

View File

@@ -16,6 +16,7 @@ use httpclient::StreamResponse;
use netpod::log::*;
use netpod::NodeConfigCached;
use std::sync::Arc;
use tracing::Instrument;
#[derive(Debug, ThisError)]
pub enum EventDataError {
@@ -84,7 +85,19 @@ impl EventDataHandler {
.await
.map_err(|_| EventDataError::InternalError)?;
let (evsubq,) = nodenet::conn::events_parse_input_query(frames).map_err(|_| EventDataError::QueryParse)?;
let logspan = if false {
tracing::Span::none()
} else if evsubq.log_level() == "trace" {
trace!("enable trace for handler");
tracing::span!(tracing::Level::INFO, "log_span_trace")
} else if evsubq.log_level() == "debug" {
debug!("enable debug for handler");
tracing::span!(tracing::Level::INFO, "log_span_debug")
} else {
tracing::Span::none()
};
let stream = nodenet::conn::create_response_bytes_stream(evsubq, shared_res.scyqueue.as_ref(), ncc)
.instrument(logspan)
.await
.map_err(|e| EventDataError::Error(Box::new(e)))?;
let ret = response(StatusCode::OK)

View File

@@ -29,7 +29,7 @@ use netpod::NodeConfigCached;
use netpod::ReqCtx;
use nodenet::client::OpenBoxedBytesViaHttp;
use query::api4::events::PlainEventsQuery;
use url::Url;
use tracing::Instrument;
pub struct EventsHandler {}
@@ -52,7 +52,26 @@ impl EventsHandler {
if req.method() != Method::GET {
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(body_empty())?);
}
match plain_events(req, ctx, &shared_res.pgqueue, ncc).await {
let self_name = "handle";
let url = req_uri_to_url(req.uri())?;
let evq =
PlainEventsQuery::from_url(&url).map_err(|e| e.add_public_msg(format!("Can not understand query")))?;
debug!("{self_name} evq {evq:?}");
let logspan = if false {
tracing::Span::none()
} else if evq.log_level() == "trace" {
trace!("enable trace for handler");
tracing::span!(tracing::Level::INFO, "log_span_trace")
} else if evq.log_level() == "debug" {
debug!("enable debug for handler");
tracing::span!(tracing::Level::INFO, "log_span_debug")
} else {
tracing::Span::none()
};
match plain_events(req, evq, ctx, &shared_res.pgqueue, ncc)
.instrument(logspan)
.await
{
Ok(ret) => Ok(ret),
Err(e) => {
error!("EventsHandler sees: {e}");
@@ -64,17 +83,17 @@ impl EventsHandler {
async fn plain_events(
req: Requ,
evq: PlainEventsQuery,
ctx: &ReqCtx,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
let url = req_uri_to_url(req.uri())?;
if accepts_cbor_framed(req.headers()) {
Ok(plain_events_cbor_framed(url, req, ctx, pgqueue, ncc).await?)
Ok(plain_events_cbor_framed(req, evq, ctx, pgqueue, ncc).await?)
} else if accepts_json_framed(req.headers()) {
Ok(plain_events_json_framed(url, req, ctx, pgqueue, ncc).await?)
Ok(plain_events_json_framed(req, evq, ctx, pgqueue, ncc).await?)
} else if accepts_json_or_all(req.headers()) {
Ok(plain_events_json(url, req, ctx, pgqueue, ncc).await?)
Ok(plain_events_json(req, evq, ctx, pgqueue, ncc).await?)
} else {
let ret = response_err_msg(StatusCode::NOT_ACCEPTABLE, format!("unsupported accept {:?}", req))?;
Ok(ret)
@@ -82,17 +101,16 @@ async fn plain_events(
}
async fn plain_events_cbor_framed(
url: Url,
req: Requ,
evq: PlainEventsQuery,
ctx: &ReqCtx,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
let evq = PlainEventsQuery::from_url(&url).map_err(|e| e.add_public_msg(format!("Can not understand query")))?;
let ch_conf = chconf_from_events_quorum(&evq, ctx, pgqueue, ncc)
.await?
.ok_or_else(|| Error::with_msg_no_trace("channel not found"))?;
info!("plain_events_cbor_framed chconf_from_events_quorum: {ch_conf:?} {req:?}");
debug!("plain_events_cbor_framed chconf_from_events_quorum: {ch_conf:?} {req:?}");
let open_bytes = OpenBoxedBytesViaHttp::new(ncc.node_config.cluster.clone());
let stream = streams::plaineventscbor::plain_events_cbor_stream(&evq, ch_conf, ctx, Box::pin(open_bytes)).await?;
use future::ready;
@@ -121,17 +139,16 @@ async fn plain_events_cbor_framed(
}
async fn plain_events_json_framed(
url: Url,
req: Requ,
evq: PlainEventsQuery,
ctx: &ReqCtx,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
let evq = PlainEventsQuery::from_url(&url).map_err(|e| e.add_public_msg(format!("Can not understand query")))?;
let ch_conf = chconf_from_events_quorum(&evq, ctx, pgqueue, ncc)
.await?
.ok_or_else(|| Error::with_msg_no_trace("channel not found"))?;
info!("plain_events_json_framed chconf_from_events_quorum: {ch_conf:?} {req:?}");
debug!("plain_events_json_framed chconf_from_events_quorum: {ch_conf:?} {req:?}");
let open_bytes = OpenBoxedBytesViaHttp::new(ncc.node_config.cluster.clone());
let stream = streams::plaineventsjson::plain_events_json_stream(&evq, ch_conf, ctx, Box::pin(open_bytes)).await?;
let stream = bytes_chunks_to_framed(stream);
@@ -140,33 +157,26 @@ async fn plain_events_json_framed(
}
async fn plain_events_json(
url: Url,
req: Requ,
evq: PlainEventsQuery,
ctx: &ReqCtx,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
let self_name = "plain_events_json";
info!("{self_name} req: {:?}", req);
debug!("{self_name} req: {:?}", req);
let (_head, _body) = req.into_parts();
let query = PlainEventsQuery::from_url(&url)?;
info!("{self_name} query {query:?}");
// TODO handle None case better and return 404
let ch_conf = chconf_from_events_quorum(&query, ctx, pgqueue, ncc)
let ch_conf = chconf_from_events_quorum(&evq, ctx, pgqueue, ncc)
.await
.map_err(Error::from)?
.ok_or_else(|| Error::with_msg_no_trace("channel not found"))?;
info!("{self_name} chconf_from_events_quorum: {ch_conf:?}");
debug!("{self_name} chconf_from_events_quorum: {ch_conf:?}");
let open_bytes = OpenBoxedBytesViaHttp::new(ncc.node_config.cluster.clone());
let item = streams::plaineventsjson::plain_events_json(
&query,
ch_conf,
ctx,
&ncc.node_config.cluster,
Box::pin(open_bytes),
)
.await;
info!("{self_name} returned {}", item.is_ok());
let item =
streams::plaineventsjson::plain_events_json(&evq, ch_conf, ctx, &ncc.node_config.cluster, Box::pin(open_bytes))
.await;
debug!("{self_name} returned {}", item.is_ok());
let item = match item {
Ok(item) => item,
Err(e) => {
@@ -175,7 +185,7 @@ async fn plain_events_json(
}
};
let ret = response(StatusCode::OK).body(ToJsonBody::from(&item).into_body())?;
info!("{self_name} response created");
debug!("{self_name} response created");
Ok(ret)
}

View File

@@ -127,10 +127,12 @@ pub async fn host(ncc: NodeConfigCached, service_version: ServiceVersion) -> Res
ncc.node_config.cluster.scylla_mt(),
ncc.node_config.cluster.scylla_lt(),
) {
let (scyqueue, scylla_worker) = ScyllaWorker::new(st, mt, lt).await.map_err(|e| {
error!("{e}");
RetrievalError::TextError(e.to_string())
})?;
let (scyqueue, scylla_worker) = ScyllaWorker::new(st.clone(), mt.clone(), lt.clone())
.await
.map_err(|e| {
error!("{e}");
RetrievalError::TextError(e.to_string())
})?;
let scylla_worker_jh = taskrun::spawn(scylla_worker.work());
Some(scyqueue)
} else {
@@ -342,6 +344,8 @@ async fn http_service_inner(
Ok(h.handle(req, ctx, &node_config, shared_res)
.await
.map_err(|e| Error::with_msg_no_trace(e.to_string()))?)
} else if let Some(h) = api4::backend::BackendListHandler::handler(&req) {
Ok(h.handle(req, ctx, &node_config, service_version).await?)
} else if let Some(h) = api4::status::StatusNodesRecursive::handler(&req) {
Ok(h.handle(req, ctx, &node_config, service_version).await?)
} else if let Some(h) = StatusBoardAllHandler::handler(&req) {

View File

@@ -44,6 +44,7 @@ use netpod::ChannelSearchSingleResult;
use netpod::FromUrl;
use netpod::HasBackend;
use netpod::HasTimeout;
use netpod::MapQuery;
use netpod::ProxyConfig;
use netpod::ReqCtx;
use netpod::ServiceVersion;
@@ -196,6 +197,8 @@ async fn proxy_http_service_inner(
h.handle(req, ctx, &proxy_config).await
} else if let Some(h) = api4::events::EventsHandler::handler(&req) {
h.handle(req, ctx, &proxy_config).await
} else if path == "/api/4/accounting/toplist/counts" {
Ok(proxy_backend_query::<MapQuery>(req, ctx, proxy_config).await?)
} else if path == "/api/4/status/connection/events" {
Ok(proxy_backend_query::<ChannelStateEventsQuery>(req, ctx, proxy_config).await?)
} else if path == "/api/4/status/channel/events" {

View File

@@ -32,6 +32,9 @@ impl BackendListHandler {
{
"name": "sf-imagebuffer",
},
{
"name": "sf-archiver",
},
]
});
let body = serde_json::to_string(&res)?;

View File

@@ -128,7 +128,7 @@ pub trait Events:
fn as_time_binnable_ref(&self) -> &dyn TimeBinnable;
fn as_time_binnable_mut(&mut self) -> &mut dyn TimeBinnable;
fn verify(&self) -> bool;
fn output_info(&self);
fn output_info(&self) -> String;
fn as_collectable_mut(&mut self) -> &mut dyn Collectable;
fn as_collectable_with_default_ref(&self) -> &dyn Collectable;
fn as_collectable_with_default_mut(&mut self) -> &mut dyn Collectable;
@@ -190,7 +190,7 @@ impl Events for Box<dyn Events> {
Events::verify(self.as_ref())
}
fn output_info(&self) {
fn output_info(&self) -> String {
Events::output_info(self.as_ref())
}

View File

@@ -1,5 +1,7 @@
use crate::container::ByteEstimate;
use crate::subfr::SubFrId;
use netpod::EnumVariant;
use netpod::StringFix;
use serde::Serialize;
use std::fmt;
use std::ops;
@@ -247,3 +249,49 @@ impl_scalar_ops!(
"string",
16
);
impl ByteEstimate for EnumVariant {
fn byte_estimate(&self) -> u64 {
12
}
}
impl AsPrimF32 for EnumVariant {
fn as_prim_f32_b(&self) -> f32 {
0.
}
}
impl ScalarOps for EnumVariant {
fn scalar_type_name() -> &'static str {
"enumvariant"
}
fn zero_b() -> Self {
EnumVariant::empty()
}
fn equal_slack(&self, rhs: &Self) -> bool {
self == rhs
}
fn add(&mut self, _rhs: &Self) {
// undefined so far
}
fn div(&mut self, _n: usize) {
// undefined so far
}
fn find_vec_min(a: &Vec<Self>) -> Option<Self> {
todo!()
}
fn find_vec_max(a: &Vec<Self>) -> Option<Self> {
todo!()
}
fn avg_vec(a: &Vec<Self>) -> Option<Self> {
todo!()
}
}

View File

@@ -1,3 +1,5 @@
use netpod::EnumVariant;
pub trait SubFrId {
const SUB: u32;
}
@@ -49,3 +51,7 @@ impl SubFrId for bool {
impl SubFrId for String {
const SUB: u32 = 0x0e;
}
impl SubFrId for EnumVariant {
const SUB: u32 = 0x0f;
}

View File

@@ -832,7 +832,7 @@ impl Events for ChannelEvents {
todo!()
}
fn output_info(&self) {
fn output_info(&self) -> String {
todo!()
}
@@ -1220,11 +1220,15 @@ impl Collector for ChannelEventsCollector {
) -> Result<Box<dyn Collected>, err::Error> {
match self.coll.as_mut() {
Some(coll) => {
coll.set_continue_at_here();
if self.needs_continue_at {
debug!("ChannelEventsCollector set_continue_at_here");
coll.set_continue_at_here();
}
if self.range_complete {
coll.set_range_complete();
}
if self.timed_out {
debug!("ChannelEventsCollector set_timed_out");
coll.set_timed_out();
}
let res = coll.result(range, binrange)?;

View File

@@ -4,6 +4,7 @@ use crate::Error;
use items_0::Empty;
use items_0::Events;
use netpod::log::*;
use netpod::EnumVariant;
use netpod::ScalarType;
use netpod::Shape;
@@ -25,6 +26,7 @@ pub fn empty_events_dyn_ev(scalar_type: &ScalarType, shape: &Shape) -> Result<Bo
F64 => Box::new(K::<f64>::empty()),
BOOL => Box::new(K::<bool>::empty()),
STRING => Box::new(K::<String>::empty()),
Enum => Box::new(K::<EnumVariant>::empty()),
ChannelStatus => Box::new(K::<u32>::empty()),
}
}
@@ -44,6 +46,7 @@ pub fn empty_events_dyn_ev(scalar_type: &ScalarType, shape: &Shape) -> Result<Bo
F64 => Box::new(K::<f64>::empty()),
BOOL => Box::new(K::<bool>::empty()),
STRING => Box::new(K::<String>::empty()),
Enum => Box::new(K::<EnumVariant>::empty()),
ChannelStatus => Box::new(K::<u32>::empty()),
}
}

View File

@@ -812,38 +812,25 @@ impl<STY: ScalarOps> Events for EventsDim0<STY> {
fn verify(&self) -> bool {
let mut good = true;
let mut ts_max = 0;
for ts in &self.tss {
let ts = *ts;
if ts < ts_max {
let n = self.tss.len();
for (&ts1, &ts2) in self.tss.iter().zip(self.tss.range(n.min(1)..n)) {
if ts1 > ts2 {
good = false;
error!("unordered event data ts {} ts_max {}", ts, ts_max);
error!("unordered event data ts1 {} ts2 {}", ts1, ts2);
break;
}
ts_max = ts_max.max(ts);
}
good
}
fn output_info(&self) {
if false {
info!("output_info len {}", self.tss.len());
if self.tss.len() == 1 {
info!(
" only: ts {} pulse {} value {:?}",
self.tss[0], self.pulses[0], self.values[0]
);
} else if self.tss.len() > 1 {
info!(
" first: ts {} pulse {} value {:?}",
self.tss[0], self.pulses[0], self.values[0]
);
let n = self.tss.len() - 1;
info!(
" last: ts {} pulse {} value {:?}",
self.tss[n], self.pulses[n], self.values[n]
);
}
}
fn output_info(&self) -> String {
let n2 = self.tss.len().max(1) - 1;
format!(
"EventsDim0OutputInfo {{ len {}, ts_min {}, ts_max {} }}",
self.tss.len(),
self.tss.get(0).map_or(-1i64, |&x| x as i64),
self.tss.get(n2).map_or(-1i64, |&x| x as i64),
)
}
fn as_collectable_mut(&mut self) -> &mut dyn Collectable {

View File

@@ -777,26 +777,14 @@ impl<STY: ScalarOps> Events for EventsDim1<STY> {
good
}
fn output_info(&self) {
if false {
info!("output_info len {}", self.tss.len());
if self.tss.len() == 1 {
info!(
" only: ts {} pulse {} value {:?}",
self.tss[0], self.pulses[0], self.values[0]
);
} else if self.tss.len() > 1 {
info!(
" first: ts {} pulse {} value {:?}",
self.tss[0], self.pulses[0], self.values[0]
);
let n = self.tss.len() - 1;
info!(
" last: ts {} pulse {} value {:?}",
self.tss[n], self.pulses[n], self.values[n]
);
}
}
fn output_info(&self) -> String {
let n2 = self.tss.len().max(1) - 1;
format!(
"EventsDim1OutputInfo {{ len {}, ts_min {}, ts_max {} }}",
self.tss.len(),
self.tss.get(0).map_or(-1i64, |&x| x as i64),
self.tss.get(n2).map_or(-1i64, |&x| x as i64),
)
}
fn as_collectable_mut(&mut self) -> &mut dyn Collectable {

View File

@@ -217,26 +217,14 @@ impl<STY: ScalarOps> Events for EventsXbinDim0<STY> {
good
}
fn output_info(&self) {
if false {
info!("output_info len {}", self.tss.len());
if self.tss.len() == 1 {
info!(
" only: ts {} pulse {} value {:?}",
self.tss[0], self.pulses[0], self.avgs[0]
);
} else if self.tss.len() > 1 {
info!(
" first: ts {} pulse {} value {:?}",
self.tss[0], self.pulses[0], self.avgs[0]
);
let n = self.tss.len() - 1;
info!(
" last: ts {} pulse {} value {:?}",
self.tss[n], self.pulses[n], self.avgs[n]
);
}
}
fn output_info(&self) -> String {
let n2 = self.tss.len().max(1) - 1;
format!(
"EventsXbinDim0OutputInfo {{ len {}, ts_min {}, ts_max {} }}",
self.tss.len(),
self.tss.get(0).map_or(-1i64, |&x| x as i64),
self.tss.get(n2).map_or(-1i64, |&x| x as i64),
)
}
fn as_collectable_mut(&mut self) -> &mut dyn Collectable {

View File

@@ -121,7 +121,7 @@ pub struct BodyStream {
pub inner: Box<dyn Stream<Item = Result<Bytes, Error>> + Send + Unpin>,
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, PartialEq)]
pub enum SeriesKind {
ChannelStatus,
ChannelData,
@@ -160,6 +160,7 @@ pub enum ScalarType {
F64,
BOOL,
STRING,
Enum,
ChannelStatus,
}
@@ -194,6 +195,7 @@ impl Serialize for ScalarType {
F64 => ser.serialize_str("f64"),
BOOL => ser.serialize_str("bool"),
STRING => ser.serialize_str("string"),
Enum => ser.serialize_str("enum"),
ChannelStatus => ser.serialize_str("ChannelStatus"),
}
}
@@ -223,6 +225,7 @@ impl<'de> serde::de::Visitor<'de> for ScalarTypeVis {
"f64" => ScalarType::F64,
"bool" => ScalarType::BOOL,
"string" => ScalarType::STRING,
"enum" => ScalarType::Enum,
"channelstatus" => ScalarType::ChannelStatus,
k => return Err(E::custom(format!("can not understand variant {k:?}"))),
};
@@ -261,7 +264,7 @@ impl ScalarType {
12 => F64,
13 => STRING,
14 => ChannelStatus,
//13 => return Err(Error::with_msg(format!("STRING not supported"))),
15 => Enum,
6 => return Err(Error::with_msg(format!("CHARACTER not supported"))),
_ => return Err(Error::with_msg(format!("unknown dtype code: {:?}", ix))),
};
@@ -283,6 +286,7 @@ impl ScalarType {
F64 => "f64",
BOOL => "bool",
STRING => "string",
Enum => "enum",
ChannelStatus => "ChannelStatus",
}
}
@@ -302,6 +306,7 @@ impl ScalarType {
"f64" => F64,
"bool" => BOOL,
"string" => STRING,
"enum" => Enum,
"ChannelStatus" => ChannelStatus,
_ => {
return Err(Error::with_msg_no_trace(format!(
@@ -328,6 +333,7 @@ impl ScalarType {
F64 => "float64",
BOOL => "bool",
STRING => "string",
Enum => "enum",
ChannelStatus => "ChannelStatus",
}
}
@@ -347,8 +353,9 @@ impl ScalarType {
"double" => F64,
"float32" => F32,
"float64" => F64,
"string" => STRING,
"bool" => BOOL,
"string" => STRING,
"enum" => Enum,
"ChannelStatus" => ChannelStatus,
_ => {
return Err(Error::with_msg_no_trace(format!(
@@ -366,7 +373,7 @@ impl ScalarType {
0 => STRING,
1 => I16,
2 => F32,
3 => I16,
3 => Enum,
4 => I8,
5 => I32,
6 => F64,
@@ -389,6 +396,7 @@ impl ScalarType {
F32 => 2,
F64 => 6,
STRING => 0,
Enum => 3,
_ => return Err(Error::with_msg_no_trace(format!("can not represent {self:?} as CA id"))),
};
Ok(ret)
@@ -420,6 +428,7 @@ impl ScalarType {
Self::from_dtype_index(k as u8)
}
// TODO this is useless for strings and enums.
pub fn bytes(&self) -> u8 {
use ScalarType::*;
match self {
@@ -435,6 +444,7 @@ impl ScalarType {
F64 => 8,
BOOL => 1,
STRING => 1,
Enum => 2,
ChannelStatus => 4,
}
}
@@ -455,6 +465,7 @@ impl ScalarType {
BOOL => 0,
STRING => 13,
ChannelStatus => 14,
Enum => 15,
}
}
@@ -468,6 +479,77 @@ impl ScalarType {
}
}
#[derive(Debug, Clone, PartialOrd, PartialEq)]
pub struct StringFix<const N: usize> {
data: [char; N],
}
impl<const N: usize> StringFix<N> {
pub fn new() -> Self {
Self {
data: [char::REPLACEMENT_CHARACTER; N],
}
}
}
mod string_fix_impl_serde {
use crate::StringFix;
use serde::de::Visitor;
use serde::Deserialize;
use serde::Serialize;
use std::fmt;
impl<const N: usize> Serialize for StringFix<N> {
fn serialize<S>(&self, ser: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
ser.serialize_unit()
}
}
impl<'de, const N: usize> Deserialize<'de> for StringFix<N> {
fn deserialize<D>(de: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
de.deserialize_unit(Vis::<N>)
}
}
struct Vis<const N: usize>;
impl<'de, const N: usize> Visitor<'de> for Vis<N> {
type Value = StringFix<N>;
fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "deserialize enum error")
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Self::Value::new())
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialOrd, PartialEq)]
pub struct EnumVariant {
ix: u16,
name: StringFix<26>,
}
impl EnumVariant {
pub fn empty() -> Self {
Self {
ix: u16::MAX,
name: StringFix::new(),
}
}
}
impl AppendToUrl for ScalarType {
fn append_to_url(&self, url: &mut Url) {
let mut g = url.query_pairs_mut();
@@ -650,6 +732,7 @@ pub struct Cluster {
#[serde(rename = "scylla_lt")]
scylla_lt: Option<ScyllaConfig>,
cache_scylla: Option<ScyllaConfig>,
pub announce_backends: Option<Vec<String>>,
}
impl Cluster {
@@ -692,6 +775,7 @@ impl Cluster {
scylla_mt: None,
scylla_lt: None,
cache_scylla: None,
announce_backends: None,
}
}
}
@@ -2879,6 +2963,50 @@ pub trait AppendToUrl {
fn append_to_url(&self, url: &mut Url);
}
pub type MapQuery = BTreeMap<String, String>;
impl AppendToUrl for MapQuery {
fn append_to_url(&self, url: &mut Url) {
let mut g = url.query_pairs_mut();
for (k, v) in self {
g.append_pair(k, v);
}
}
}
impl FromUrl for MapQuery {
fn from_url(url: &Url) -> Result<Self, Error> {
let pairs = get_url_query_pairs(url);
Self::from_pairs(&pairs)
}
fn from_pairs(pairs: &BTreeMap<String, String>) -> Result<Self, Error> {
Ok(pairs.clone())
}
}
impl HasBackend for MapQuery {
fn backend(&self) -> &str {
self.get("backend").map_or("NOBACKEND", AsRef::as_ref)
}
}
impl HasTimeout for MapQuery {
fn timeout(&self) -> Duration {
let x: Option<u32> = if let Some(v) = self.get("timeout") {
v.parse::<u32>().ok()
} else {
None
};
let x = x.unwrap_or(5000);
Duration::from_millis(x as _)
}
fn set_timeout(&mut self, timeout: Duration) {
self.insert("timeout".into(), format!("{:.0}", 1e3 * timeout.as_secs_f32()));
}
}
pub fn get_url_query_pairs(url: &Url) -> BTreeMap<String, String> {
BTreeMap::from_iter(url.query_pairs().map(|(j, k)| (j.to_string(), k.to_string())))
}
@@ -3291,6 +3419,7 @@ pub fn test_cluster() -> Cluster {
run_map_pulse_task: false,
is_central_storage: false,
file_io_buffer_size: Default::default(),
announce_backends: None,
}
}
@@ -3328,6 +3457,7 @@ pub fn sls_test_cluster() -> Cluster {
run_map_pulse_task: false,
is_central_storage: false,
file_io_buffer_size: Default::default(),
announce_backends: None,
}
}
@@ -3365,6 +3495,7 @@ pub fn archapp_test_cluster() -> Cluster {
run_map_pulse_task: false,
is_central_storage: false,
file_io_buffer_size: Default::default(),
announce_backends: None,
}
}

View File

@@ -84,7 +84,8 @@ fn raw_data_00() {
);
let select = EventsSubQuerySelect::new(fetch_info.into(), range.into(), TransformQuery::default_events());
let settings = EventsSubQuerySettings::default();
let qu = EventsSubQuery::from_parts(select, settings, "dummy".into());
let log_level = String::new();
let qu = EventsSubQuery::from_parts(select, settings, "dummy".into(), log_level);
let frame1 = Frame1Parts::new(qu.clone());
let query = EventQueryJsonStringFrame(serde_json::to_string(&frame1).unwrap());
let frame = sitem_data(query).make_frame()?;

View File

@@ -110,6 +110,8 @@ impl From<&ScalarType> for Api1ScalarType {
A::F64 => B::F64,
A::BOOL => B::BOOL,
A::STRING => B::STRING,
// TODO treat enum as number only
A::Enum => B::U16,
A::ChannelStatus => todo!("ChannelStatus not in Api1ScalarType"),
}
}

View File

@@ -148,6 +148,11 @@ impl BinnedQuery {
None => None,
}
}
pub fn log_level(&self) -> &str {
// TODO take from query
""
}
}
impl HasBackend for BinnedQuery {

View File

@@ -54,6 +54,8 @@ pub struct PlainEventsQuery {
merger_out_len_max: Option<usize>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
create_errors: Vec<String>,
#[serde(default)]
log_level: String,
}
impl PlainEventsQuery {
@@ -78,6 +80,7 @@ impl PlainEventsQuery {
test_do_wasm: None,
merger_out_len_max: None,
create_errors: Vec::new(),
log_level: String::new(),
}
}
@@ -199,6 +202,10 @@ impl PlainEventsQuery {
self.range()
)
}
pub fn log_level(&self) -> &str {
&self.log_level
}
}
impl HasBackend for PlainEventsQuery {
@@ -275,6 +282,7 @@ impl FromUrl for PlainEventsQuery {
.get("create_errors")
.map(|x| x.split(",").map(|x| x.to_string()).collect())
.unwrap_or(Vec::new()),
log_level: pairs.get("log_level").map_or(String::new(), String::from),
};
Ok(ret)
}
@@ -331,6 +339,9 @@ impl AppendToUrl for PlainEventsQuery {
if self.create_errors.len() != 0 {
g.append_pair("create_errors", &self.create_errors.join(","));
}
if self.log_level.len() != 0 {
g.append_pair("log_level", &self.log_level);
}
}
}
@@ -447,15 +458,22 @@ pub struct EventsSubQuery {
settings: EventsSubQuerySettings,
ty: String,
reqid: String,
log_level: String,
}
impl EventsSubQuery {
pub fn from_parts(select: EventsSubQuerySelect, settings: EventsSubQuerySettings, reqid: String) -> Self {
pub fn from_parts(
select: EventsSubQuerySelect,
settings: EventsSubQuerySettings,
reqid: String,
log_level: String,
) -> Self {
Self {
select,
settings,
ty: "EventsSubQuery".into(),
reqid,
log_level,
}
}
@@ -529,6 +547,10 @@ impl EventsSubQuery {
pub fn wasm1(&self) -> Option<&str> {
self.select.wasm1()
}
pub fn log_level(&self) -> &str {
&self.log_level
}
}
#[derive(Debug, Serialize, Deserialize)]

View File

@@ -43,14 +43,14 @@ pub async fn read_ts(ts: u64, scy: Arc<ScySession>) -> Result<UsageData, Error>
let snap = EMIT_ACCOUNTING_SNAP.ms() / 1000;
info!("ts {ts} snap {snap:?}");
let ts = ts / timeunits::SEC / snap * snap;
let cql = concat!("select series, count, bytes from lt_account_00 where part = ? and ts = ?");
let qu = prep(cql, scy.clone()).await?;
let ret = read_ts_inner(ts, qu, scy).await?;
let ret = read_ts_inner(ts, scy).await?;
Ok(ret)
}
async fn read_ts_inner(ts: u64, qu: PreparedStatement, scy: Arc<ScySession>) -> Result<UsageData, Error> {
async fn read_ts_inner(ts: u64, scy: Arc<ScySession>) -> Result<UsageData, Error> {
type RowType = (i64, i64, i64);
let cql = concat!("select series, count, bytes from lt_account_00 where part = ? and ts = ?");
let qu = prep(cql, scy.clone()).await?;
let mut ret = UsageData::new(ts);
for part in 0..255_u32 {
let mut res = scy

View File

@@ -224,6 +224,36 @@ macro_rules! impl_scaty_array {
};
}
impl ValTy for Vec<String> {
type ScaTy = String;
type ScyTy = Vec<String>;
type Container = EventsDim1<String>;
fn from_scyty(inp: Self::ScyTy) -> Self {
inp
}
fn from_valueblob(inp: Vec<u8>) -> Self {
todo!()
}
fn table_name() -> &'static str {
"st_events_array_enum"
}
fn default() -> Self {
Vec::new()
}
fn is_valueblob() -> bool {
false
}
fn st_name() -> &'static str {
"enum"
}
}
impl_scaty_scalar!(u8, i8, "u8", "st_events_scalar_u8");
impl_scaty_scalar!(u16, i16, "u16", "st_events_scalar_u16");
impl_scaty_scalar!(u32, i32, "u32", "st_events_scalar_u32");
@@ -248,7 +278,6 @@ impl_scaty_array!(Vec<i64>, i64, Vec<i64>, "i64", "st_events_array_i64");
impl_scaty_array!(Vec<f32>, f32, Vec<f32>, "f32", "st_events_array_f32");
impl_scaty_array!(Vec<f64>, f64, Vec<f64>, "f64", "st_events_array_f64");
impl_scaty_array!(Vec<bool>, bool, Vec<bool>, "bool", "st_events_array_bool");
// impl_scaty_array!(Vec<String>, String, Vec<String>, "string", "st_events_array_string");
struct ReadNextValuesOpts {
series: u64,
@@ -307,14 +336,19 @@ where
ts_lsp_max,
table_name,
);
let dir = "fwd";
let qu_name = if opts.with_values {
if ST::is_valueblob() {
format!("array_{}_valueblobs_fwd", ST::st_name())
format!("array_{}_valueblobs_{}", ST::st_name(), dir)
} else {
format!("array_{}_values_fwd", ST::st_name())
format!("scalar_{}_values_{}", ST::st_name(), dir)
}
} else {
format!("array_{}_timestamps_fwd", ST::st_name())
if ST::is_valueblob() {
format!("array_{}_timestamps_{}", ST::st_name(), dir)
} else {
format!("scalar_{}_timestamps_{}", ST::st_name(), dir)
}
};
let qu = stmts.read_value_queries.get(&qu_name).ok_or_else(|| {
let e = Error::with_msg_no_trace(format!("can not find query name {}", qu_name));
@@ -343,14 +377,19 @@ where
DtNano::from_ns(0)
};
trace!("BCK ts_msp {} ts_lsp_max {} {}", ts_msp, ts_lsp_max, table_name,);
let dir = "bck";
let qu_name = if opts.with_values {
if ST::is_valueblob() {
format!("array_{}_valueblobs_bck", ST::st_name())
format!("array_{}_valueblobs_{}", ST::st_name(), dir)
} else {
format!("array_{}_values_bck", ST::st_name())
format!("scalar_{}_values_{}", ST::st_name(), dir)
}
} else {
format!("array_{}_timestamps_bck", ST::st_name())
if ST::is_valueblob() {
format!("array_{}_timestamps_{}", ST::st_name(), dir)
} else {
format!("scalar_{}_timestamps_{}", ST::st_name(), dir)
}
};
let qu = stmts.read_value_queries.get(&qu_name).ok_or_else(|| {
let e = Error::with_msg_no_trace(format!("can not find query name {}", qu_name));
@@ -512,6 +551,7 @@ impl ReadValues {
ScalarType::F64 => read_next_values::<f64>(opts).await,
ScalarType::BOOL => read_next_values::<bool>(opts).await,
ScalarType::STRING => read_next_values::<String>(opts).await,
ScalarType::Enum => read_next_values::<String>(opts).await,
ScalarType::ChannelStatus => {
warn!("read scalar channel status not yet supported");
err::todoval()
@@ -533,6 +573,7 @@ impl ReadValues {
warn!("read array string not yet supported");
err::todoval()
}
ScalarType::Enum => read_next_values::<Vec<String>>(opts).await,
ScalarType::ChannelStatus => {
warn!("read array channel status not yet supported");
err::todoval()
@@ -572,6 +613,7 @@ pub struct EventsStreamScylla {
found_one_after: bool,
with_values: bool,
outqueue: VecDeque<Box<dyn Events>>,
ts_seen_max: u64,
}
impl EventsStreamScylla {
@@ -600,6 +642,7 @@ impl EventsStreamScylla {
found_one_after: false,
with_values,
outqueue: VecDeque::new(),
ts_seen_max: 0,
}
}
@@ -618,7 +661,7 @@ impl EventsStreamScylla {
trace!("ts_msp_bck {:?}", self.ts_msp_bck);
trace!("ts_msp_fwd {:?}", self.ts_msp_fwd);
if let Some(msp) = self.ts_msp_bck.pop_back() {
trace!("Try ReadBack1");
trace!("start ReadBack1 msp {}", msp);
let st = ReadValues::new(
self.series,
self.scalar_type.clone(),
@@ -631,7 +674,7 @@ impl EventsStreamScylla {
);
self.state = FrState::ReadBack1(st);
} else if self.ts_msp_fwd.len() > 0 {
trace!("Go straight for forward read");
trace!("begin immediately with forward read");
let st = ReadValues::new(
self.series,
self.scalar_type.clone(),
@@ -653,6 +696,7 @@ impl EventsStreamScylla {
if item.len() > 0 {
self.outqueue.push_back(item);
if self.ts_msp_fwd.len() > 0 {
trace!("start forward read after back1");
let st = ReadValues::new(
self.series,
self.scalar_type.clone(),
@@ -669,7 +713,7 @@ impl EventsStreamScylla {
}
} else {
if let Some(msp) = self.ts_msp_bck.pop_back() {
trace!("Try ReadBack2");
trace!("start ReadBack2 msp {}", msp);
let st = ReadValues::new(
self.series,
self.scalar_type.clone(),
@@ -682,7 +726,7 @@ impl EventsStreamScylla {
);
self.state = FrState::ReadBack2(st);
} else if self.ts_msp_fwd.len() > 0 {
trace!("No 2nd back MSP, go for forward read");
trace!("no 2nd back MSP, go for forward read");
let st = ReadValues::new(
self.series,
self.scalar_type.clone(),
@@ -695,18 +739,19 @@ impl EventsStreamScylla {
);
self.state = FrState::ReadValues(st);
} else {
trace!("No 2nd back MSP, but also nothing to go forward");
trace!("no 2nd back msp, but also nothing to go forward");
self.state = FrState::DataDone;
}
}
}
fn back_2_done(&mut self, item: Box<dyn Events>) {
trace!("back_1_done item len {}", item.len());
trace!("back_2_done item len {}", item.len());
if item.len() > 0 {
self.outqueue.push_back(item);
}
if self.ts_msp_fwd.len() > 0 {
trace!("start forward read after back2");
let st = ReadValues::new(
self.series,
self.scalar_type.clone(),
@@ -719,6 +764,7 @@ impl EventsStreamScylla {
);
self.state = FrState::ReadValues(st);
} else {
trace!("nothing to forward read after back 2");
self.state = FrState::DataDone;
}
}
@@ -745,7 +791,19 @@ impl Stream for EventsStreamScylla {
loop {
if let Some(item) = self.outqueue.pop_front() {
item.verify();
item.output_info();
if let Some(item_min) = item.ts_min() {
if item_min < self.ts_seen_max {
debug!("ordering error A {} {}", item_min, self.ts_seen_max);
}
}
if let Some(item_max) = item.ts_max() {
if item_max < self.ts_seen_max {
debug!("ordering error B {} {}", item_max, self.ts_seen_max);
} else {
self.ts_seen_max = item_max;
}
}
debug!("deliver item {}", item.output_info());
break Ready(Some(Ok(ChannelEvents::Events(item))));
}
break match self.state {

View File

@@ -100,28 +100,34 @@ impl ScyllaQueue {
#[derive(Debug)]
pub struct ScyllaWorker {
rx: Receiver<Job>,
scy: Arc<Session>,
stmts_st: Arc<StmtsEventsRt>,
scyconf_st: ScyllaConfig,
scyconf_mt: ScyllaConfig,
scyconf_lt: ScyllaConfig,
}
impl ScyllaWorker {
pub async fn new(
scyconf_st: &ScyllaConfig,
scyconf_mt: &ScyllaConfig,
scyconf_lt: &ScyllaConfig,
scyconf_st: ScyllaConfig,
scyconf_mt: ScyllaConfig,
scyconf_lt: ScyllaConfig,
) -> Result<(ScyllaQueue, Self), Error> {
let (tx, rx) = async_channel::bounded(64);
let scy = create_scy_session_no_ks(scyconf_st).await?;
let scy = Arc::new(scy);
let rtpre = format!("{}.st_", scyconf_st.keyspace);
let stmts_st = StmtsEventsRt::new(&rtpre, &scy).await?;
let stmts_st = Arc::new(stmts_st);
let queue = ScyllaQueue { tx };
let worker = Self { rx, scy, stmts_st };
let worker = Self {
rx,
scyconf_st,
scyconf_mt,
scyconf_lt,
};
Ok((queue, worker))
}
pub async fn work(self) -> Result<(), Error> {
let scy = create_scy_session_no_ks(&self.scyconf_st).await?;
let scy = Arc::new(scy);
let rtpre = format!("{}.st_", self.scyconf_st.keyspace);
let stmts_st = StmtsEventsRt::new(&rtpre, &scy).await?;
let stmts_st = Arc::new(stmts_st);
loop {
let x = self.rx.recv().await;
let job = match x {
@@ -133,13 +139,13 @@ impl ScyllaWorker {
};
match job {
Job::FindTsMsp(series, range, tx) => {
let res = crate::events::find_ts_msp_worker(series, range, &self.stmts_st, &self.scy).await;
let res = crate::events::find_ts_msp_worker(series, range, &stmts_st, &scy).await;
if tx.send(res.map_err(Into::into)).await.is_err() {
// TODO count for stats
}
}
Job::ReadNextValues(job) => {
let fut = (job.futgen)(self.scy.clone(), self.stmts_st.clone());
let fut = (job.futgen)(scy.clone(), stmts_st.clone());
let res = fut.await;
if job.tx.send(res.map_err(Into::into)).await.is_err() {
// TODO count for stats

View File

@@ -166,6 +166,7 @@ impl Future for Collect {
break if self.done_input {
if self.timeout {
if let Some(coll) = self.collector.as_mut() {
info!("Collect call set_timed_out");
coll.set_timed_out();
} else {
warn!("collect timeout but no collector yet");
@@ -199,7 +200,7 @@ impl Future for Collect {
continue;
}
Err(e) => {
error!("{e}");
error!("Collect {e}");
Ready(Err(e))
}
},
@@ -241,6 +242,7 @@ where
warn!("collect timeout");
timed_out = true;
if let Some(coll) = collector.as_mut() {
info!("collect_in_span call set_timed_out");
coll.set_timed_out();
} else {
warn!("collect timeout but no collector yet");
@@ -269,6 +271,7 @@ where
coll.ingest(&mut item);
if coll.len() as u64 >= events_max {
warn!("span reached events_max {}", events_max);
info!("collect_in_span call set_continue_at_here");
coll.set_continue_at_here();
break;
}

View File

@@ -24,7 +24,7 @@ pub async fn plain_events_json(
_cluster: &Cluster,
open_bytes: OpenBoxedBytesStreamsBox,
) -> Result<JsonValue, Error> {
info!("plain_events_json evquery {:?}", evq);
debug!("plain_events_json evquery {:?}", evq);
let deadline = Instant::now() + evq.timeout();
let stream = dyn_events_stream(evq, ch_conf, ctx, open_bytes).await?;
@@ -40,7 +40,7 @@ pub async fn plain_events_json(
//let stream = EventsToTimeBinnable::new(stream);
//let stream = TimeBinnableToCollectable::new(stream);
let stream = Box::pin(stream);
info!("plain_events_json boxed stream created");
debug!("plain_events_json boxed stream created");
let collected = Collect::new(
stream,
deadline,
@@ -50,9 +50,9 @@ pub async fn plain_events_json(
None,
)
.await?;
info!("plain_events_json collected");
debug!("plain_events_json collected");
let jsval = serde_json::to_value(&collected)?;
info!("plain_events_json json serialized");
debug!("plain_events_json json serialized");
Ok(jsval)
}

View File

@@ -33,6 +33,7 @@ pub async fn dyn_events_stream(
evq.transform().clone(),
evq.test_do_wasm(),
evq,
evq.log_level().into(),
ctx,
);
let inmem_bufcap = subq.inmem_bufcap();

View File

@@ -203,6 +203,7 @@ pub fn make_sub_query<SUB>(
transform: TransformQuery,
test_do_wasm: Option<&str>,
sub: SUB,
log_level: String,
ctx: &ReqCtx,
) -> EventsSubQuery
where
@@ -213,6 +214,6 @@ where
select.set_wasm1(wasm1.into());
}
let settings = sub.into();
let subq = EventsSubQuery::from_parts(select, settings, ctx.reqid().into());
let subq = EventsSubQuery::from_parts(select, settings, ctx.reqid().into(), log_level);
subq
}

View File

@@ -50,6 +50,7 @@ async fn timebinnable_stream(
query.transform().clone(),
query.test_do_wasm(),
&query,
query.log_level().into(),
ctx,
);
let inmem_bufcap = subq.inmem_bufcap();

View File

@@ -8,6 +8,7 @@ use err::Error;
use std::fmt;
use std::future::Future;
use std::io;
use std::marker::PhantomData;
use std::panic;
use std::sync::Arc;
use std::sync::Mutex;
@@ -40,7 +41,7 @@ fn on_thread_start() {
format!("unknown payload type")
};
error!(
"✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗ panicking\n{:?}\nLOCATION: {:?}\nPAYLOAD: {:?}\ninfo object: {:?}\nerr: {:?}",
"panicking\n{:?}\nLOCATION: {:?}\nPAYLOAD: {:?}\ninfo object: {:?}\nerr: {:?}",
Error::with_msg("catched panic in taskrun::run"),
info.location(),
info.payload(),
@@ -105,6 +106,37 @@ where
}
}
struct LogFilterLayer<S, L>
where
L: tracing_subscriber::Layer<S>,
S: tracing::Subscriber,
{
name: String,
inner: L,
_ph1: PhantomData<S>,
}
impl<S, L> LogFilterLayer<S, L>
where
L: tracing_subscriber::Layer<S>,
S: tracing::Subscriber,
{
fn new(name: String, inner: L) -> Self {
Self {
name,
inner,
_ph1: PhantomData,
}
}
}
impl<S, L> tracing_subscriber::Layer<S> for LogFilterLayer<S, L>
where
L: tracing_subscriber::Layer<S>,
S: tracing::Subscriber,
{
}
fn tracing_init_inner(mode: TracingMode) -> Result<(), Error> {
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
@@ -131,6 +163,19 @@ fn tracing_init_inner(mode: TracingMode) -> Result<(), Error> {
.with_default_directive(tracing::metadata::LevelFilter::INFO.into())
.from_env()
.map_err(|e| Error::with_msg_no_trace(format!("can not build tracing env filter {e}")))?;
let filter_2 = tracing_subscriber::EnvFilter::builder()
.with_env_var("RUST_LOG_2")
.with_default_directive(tracing::metadata::LevelFilter::INFO.into())
.from_env()
.map_err(|e| Error::with_msg_no_trace(format!("can not build tracing env filter {e}")))?;
// let filter_3 = tracing_subscriber::filter::dynamic_filter_fn(|meta, ctx| {
// //
// if ["scyllaconn"].contains(&meta.target()) {
// true
// } else {
// true
// }
// });
let fmt_layer = tracing_subscriber::fmt::Layer::new()
.with_writer(io::stderr)
.with_timer(timer)
@@ -138,7 +183,12 @@ fn tracing_init_inner(mode: TracingMode) -> Result<(), Error> {
.with_ansi(false)
.with_thread_names(true)
.event_format(formatter::FormatTxt)
.with_filter(filter);
.with_filter(filter_2)
.with_filter(filter)
// .and_then(LogFilterLayer::new("lay1".into()))
// .and_then(LogFilterLayer::new("lay2".into()))
;
// let layer_2 = LogFilterLayer::new("lay1".into(), fmt_layer);
let reg = tracing_subscriber::registry();