Rework support for vec string

This commit is contained in:
Dominik Werder
2024-12-11 16:57:20 +01:00
parent c52de68418
commit 53dc4b7361
5 changed files with 213 additions and 318 deletions

View File

@@ -20,6 +20,7 @@ use netpod::req_uri_to_url;
use netpod::ttl::RetentionTime;
use netpod::FromUrl;
use netpod::NodeConfigCached;
use netpod::ScalarType;
use netpod::Shape;
use netpod::TsMs;
use query::api4::AccountingIngestedBytesQuery;
@@ -28,6 +29,71 @@ use scyllaconn::accounting::toplist::UsageData;
use serde::Deserialize;
use serde::Serialize;
#[derive(Debug, Serialize, Deserialize)]
pub struct AccountedIngested {
names: Vec<String>,
counts: Vec<u64>,
bytes: Vec<u64>,
scalar_types: Vec<ScalarType>,
shapes: Vec<Shape>,
}
impl AccountedIngested {
fn new() -> Self {
Self {
names: Vec::new(),
counts: Vec::new(),
bytes: Vec::new(),
scalar_types: Vec::new(),
shapes: Vec::new(),
}
}
fn push(&mut self, name: String, counts: u64, bytes: u64, scalar_type: ScalarType, shape: Shape) {
self.names.push(name);
self.counts.push(counts);
self.bytes.push(bytes);
self.scalar_types.push(scalar_type);
self.shapes.push(shape);
}
fn sort_by_counts(&mut self) {
let mut tmp: Vec<_> = self
.counts
.iter()
.map(|&x| x)
.enumerate()
.map(|(i, x)| (x, i))
.collect();
tmp.sort_unstable();
let tmp: Vec<_> = tmp.into_iter().rev().map(|x| x.1).collect();
self.reorder_by_index_list(&tmp);
}
fn sort_by_bytes(&mut self) {
let mut tmp: Vec<_> = self.bytes.iter().map(|&x| x).enumerate().map(|(i, x)| (x, i)).collect();
tmp.sort_unstable();
let tmp: Vec<_> = tmp.into_iter().rev().map(|x| x.1).collect();
self.reorder_by_index_list(&tmp);
}
fn reorder_by_index_list(&mut self, tmp: &[usize]) {
self.names = tmp.iter().map(|&x| self.names[x].clone()).collect();
self.counts = tmp.iter().map(|&x| self.counts[x]).collect();
self.bytes = tmp.iter().map(|&x| self.bytes[x]).collect();
self.scalar_types = tmp.iter().map(|&x| self.scalar_types[x].clone()).collect();
self.shapes = tmp.iter().map(|&x| self.shapes[x].clone()).collect();
}
fn truncate(&mut self, len: usize) {
self.names.truncate(len);
self.counts.truncate(len);
self.bytes.truncate(len);
self.scalar_types.truncate(len);
self.shapes.truncate(len);
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Toplist {
dim0: AccountedIngested,
@@ -57,61 +123,6 @@ impl Toplist {
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct AccountedIngested {
names: Vec<String>,
counts: Vec<u64>,
bytes: Vec<u64>,
}
impl AccountedIngested {
fn new() -> Self {
Self {
names: Vec::new(),
counts: Vec::new(),
bytes: Vec::new(),
}
}
fn push(&mut self, name: String, counts: u64, bytes: u64) {
self.names.push(name);
self.counts.push(counts);
self.bytes.push(bytes);
}
fn sort_by_counts(&mut self) {
let mut tmp: Vec<_> = self
.counts
.iter()
.map(|&x| x)
.enumerate()
.map(|(i, x)| (x, i))
.collect();
tmp.sort_unstable();
let tmp: Vec<_> = tmp.into_iter().rev().map(|x| x.1).collect();
self.reorder_by_index_list(&tmp);
}
fn sort_by_bytes(&mut self) {
let mut tmp: Vec<_> = self.bytes.iter().map(|&x| x).enumerate().map(|(i, x)| (x, i)).collect();
tmp.sort_unstable();
let tmp: Vec<_> = tmp.into_iter().rev().map(|x| x.1).collect();
self.reorder_by_index_list(&tmp);
}
fn reorder_by_index_list(&mut self, tmp: &[usize]) {
self.names = tmp.iter().map(|&x| self.names[x].clone()).collect();
self.counts = tmp.iter().map(|&x| self.counts[x]).collect();
self.bytes = tmp.iter().map(|&x| self.bytes[x]).collect();
}
fn truncate(&mut self, len: usize) {
self.names.truncate(len);
self.counts.truncate(len);
self.bytes.truncate(len);
}
}
pub struct AccountingIngested {}
impl AccountingIngested {
@@ -169,6 +180,12 @@ impl AccountingIngested {
for e in res.dim0.bytes {
ret.bytes.push(e)
}
for e in res.dim0.scalar_types {
ret.scalar_types.push(e)
}
for e in res.dim0.shapes {
ret.shapes.push(e)
}
for e in res.dim1.names {
ret.names.push(e)
}
@@ -178,6 +195,12 @@ impl AccountingIngested {
for e in res.dim1.bytes {
ret.bytes.push(e)
}
for e in res.dim1.scalar_types {
ret.scalar_types.push(e)
}
for e in res.dim1.shapes {
ret.shapes.push(e)
}
if let Some(sort) = qu.sort() {
if sort == "counts" {
// ret.sort_by_counts();
@@ -309,17 +332,23 @@ async fn resolve_usages(usage: UsageData, pgqu: &PgQueue) -> Result<Toplist, Err
match &info.shape {
Shape::Scalar => {
ret.scalar_count += 1;
ret.dim0.push(info.name, counts, bytes);
ret.dim0.push(info.name, counts, bytes, info.scalar_type, info.shape);
}
Shape::Wave(_) => {
ret.wave_count += 1;
ret.dim1.push(info.name, counts, bytes);
ret.dim1.push(info.name, counts, bytes, info.scalar_type, info.shape);
}
Shape::Image(_, _) => {}
}
} else {
ret.infos_missing_count += 1;
ret.dim0.push("UNRESOLVEDSERIES".into(), counts, bytes);
ret.dim0.push(
"UNRESOLVEDSERIES".into(),
counts,
bytes,
ScalarType::BOOL,
Shape::Scalar,
);
}
}
usage_skip += nn;

View File

@@ -1,5 +1,6 @@
use crate::bodystream::response;
use crate::channelconfig::ch_conf_from_binned;
use crate::requests::accepts_cbor_framed;
use crate::requests::accepts_json_framed;
use crate::requests::accepts_json_or_all;
use crate::requests::accepts_octets;
@@ -26,6 +27,7 @@ use netpod::timeunits::SEC;
use netpod::FromUrl;
use netpod::NodeConfigCached;
use netpod::ReqCtx;
use netpod::APP_CBOR_FRAMED;
use netpod::APP_JSON;
use netpod::APP_JSON_FRAMED;
use netpod::HEADER_NAME_REQUEST_ID;
@@ -38,7 +40,6 @@ use std::sync::Arc;
use streams::collect::CollectResult;
use streams::eventsplainreader::DummyCacheReadProvider;
use streams::eventsplainreader::SfDatabufferEventReadProvider;
use streams::lenframe::bytes_chunks_to_len_framed_str;
use streams::timebin::cached::reader::EventsReadProvider;
use streams::timebin::CacheReadProvider;
use tracing::Instrument;
@@ -125,7 +126,9 @@ async fn binned(
{
Err(Error::ServerError)?;
}
if accepts_json_framed(req.headers()) {
if accepts_cbor_framed(req.headers()) {
Ok(binned_cbor_framed(url, req, ctx, pgqueue, scyqueue, ncc).await?)
} else if accepts_json_framed(req.headers()) {
Ok(binned_json_framed(url, req, ctx, pgqueue, scyqueue, ncc).await?)
} else if accepts_json_or_all(req.headers()) {
Ok(binned_json_single(url, req, ctx, pgqueue, scyqueue, ncc).await?)
@@ -253,7 +256,7 @@ async fn binned_json_framed(
let reqid = crate::status_board().map_err(|_e| Error::ServerError)?.new_status_id();
let (_head, _body) = req.into_parts();
let query = BinnedQuery::from_url(&url).map_err(|e| {
error!("binned_json: {e:?}");
error!("binned_json_framed: {e:?}");
Error::BadQuery(e.to_string())
})?;
// TODO handle None case better and return 404
@@ -285,10 +288,62 @@ async fn binned_json_framed(
)
.instrument(span1)
.await?;
let stream = bytes_chunks_to_len_framed_str(stream);
let stream = streams::lenframe::bytes_chunks_to_len_framed_str(stream);
let ret = response(StatusCode::OK)
.header(CONTENT_TYPE, APP_JSON_FRAMED)
.header(HEADER_NAME_REQUEST_ID, ctx.reqid())
.body(body_stream(stream))?;
Ok(ret)
}
async fn binned_cbor_framed(
url: Url,
req: Requ,
ctx: &ReqCtx,
pgqueue: &PgQueue,
scyqueue: Option<ScyllaQueue>,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
debug!("binned_cbor_framed {:?}", req);
let reqid = crate::status_board().map_err(|_e| Error::ServerError)?.new_status_id();
let (_head, _body) = req.into_parts();
let query = BinnedQuery::from_url(&url).map_err(|e| {
error!("binned_cbor_framed: {e:?}");
Error::BadQuery(e.to_string())
})?;
// TODO handle None case better and return 404
let ch_conf = ch_conf_from_binned(&query, ctx, pgqueue, ncc)
.await?
.ok_or_else(|| Error::ChannelNotFound)?;
let span1 = span!(
Level::INFO,
"httpret::binned_cbor_framed",
reqid,
beg = query.range().beg_u64() / SEC,
end = query.range().end_u64() / SEC,
ch = query.channel().name(),
);
span1.in_scope(|| {
debug!("begin");
});
let open_bytes = Arc::pin(OpenBoxedBytesViaHttp::new(ncc.node_config.cluster.clone()));
let (events_read_provider, cache_read_provider) =
make_read_provider(ch_conf.name(), scyqueue, open_bytes, ctx, ncc);
let timeout_provider = streamio::streamtimeout::StreamTimeout::boxed();
let stream = streams::timebinnedjson::timebinned_cbor_framed(
query,
ch_conf,
ctx,
cache_read_provider,
events_read_provider,
timeout_provider,
)
.instrument(span1)
.await?;
let stream = streams::lenframe::bytes_chunks_to_framed(stream);
let ret = response(StatusCode::OK)
.header(CONTENT_TYPE, APP_CBOR_FRAMED)
.header(HEADER_NAME_REQUEST_ID, ctx.reqid())
.body(body_stream(stream))?;
Ok(ret)
}