Better accounting data retrieve

This commit is contained in:
Dominik Werder
2024-06-27 11:03:57 +02:00
parent 8e286b455d
commit 21259e6591
14 changed files with 456 additions and 220 deletions

View File

@@ -8,6 +8,7 @@ use err::Error;
use netpod::get_url_query_pairs;
use netpod::log::*;
use netpod::range::evrange::SeriesRange;
use netpod::ttl::RetentionTime;
use netpod::AppendToUrl;
use netpod::FromUrl;
use netpod::HasBackend;
@@ -82,12 +83,18 @@ impl AppendToUrl for AccountingIngestedBytesQuery {
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AccountingToplistQuery {
rt: RetentionTime,
backend: String,
ts: TsNano,
limit: u32,
sort: Option<String>,
}
impl AccountingToplistQuery {
pub fn rt(&self) -> RetentionTime {
self.rt.clone()
}
pub fn ts(&self) -> TsNano {
self.ts.clone()
}
@@ -95,6 +102,10 @@ impl AccountingToplistQuery {
pub fn limit(&self) -> u32 {
self.limit
}
pub fn sort(&self) -> Option<&str> {
self.sort.as_ref().map(|x| x.as_str())
}
}
impl HasBackend for AccountingToplistQuery {
@@ -135,12 +146,20 @@ impl FromUrl for AccountingToplistQuery {
Ok::<_, Error>(TsNano::from_ns(w.to_nanos()))
};
let ret = Self {
rt: pairs
.get("retentionTime")
.ok_or_else(|| Error::with_public_msg_no_trace("missing retentionTime"))
.and_then(|x| {
x.parse()
.map_err(|_| Error::with_public_msg_no_trace("missing retentionTime"))
})?,
backend: pairs
.get("backend")
.ok_or_else(|| Error::with_public_msg_no_trace("missing backend"))?
.to_string(),
ts: fn1(pairs)?,
limit: pairs.get("limit").map_or(None, |x| x.parse().ok()).unwrap_or(20),
sort: pairs.get("sort").map(ToString::to_string),
};
Ok(ret)
}

View File

@@ -4,6 +4,7 @@ use netpod::get_url_query_pairs;
use netpod::log::*;
use netpod::query::CacheUsage;
use netpod::range::evrange::SeriesRange;
use netpod::ttl::RetentionTime;
use netpod::AppendToUrl;
use netpod::ByteSize;
use netpod::FromUrl;
@@ -40,6 +41,10 @@ pub struct BinnedQuery {
pub merger_out_len_max: Option<usize>,
#[serde(default, skip_serializing_if = "Option::is_none")]
test_do_wasm: Option<String>,
#[serde(default)]
log_level: String,
#[serde(default)]
use_rt: Option<RetentionTime>,
}
impl BinnedQuery {
@@ -56,6 +61,8 @@ impl BinnedQuery {
timeout: None,
merger_out_len_max: None,
test_do_wasm: None,
log_level: String::new(),
use_rt: None,
}
}
@@ -150,8 +157,11 @@ impl BinnedQuery {
}
pub fn log_level(&self) -> &str {
// TODO take from query
""
&self.log_level
}
pub fn use_rt(&self) -> Option<RetentionTime> {
self.use_rt.clone()
}
}
@@ -211,6 +221,12 @@ impl FromUrl for BinnedQuery {
.get("mergerOutLenMax")
.map_or(Ok(None), |k| k.parse().map(|k| Some(k)))?,
test_do_wasm: pairs.get("testDoWasm").map(|x| String::from(x)),
log_level: pairs.get("log_level").map_or(String::new(), String::from),
use_rt: pairs.get("useRt").map_or(Ok(None), |k| {
k.parse()
.map(Some)
.map_err(|_| Error::with_public_msg_no_trace(format!("can not parse useRt: {}", k)))
})?,
};
debug!("BinnedQuery::from_url {:?}", ret);
Ok(ret)
@@ -248,5 +264,11 @@ impl AppendToUrl for BinnedQuery {
if let Some(x) = &self.test_do_wasm {
g.append_pair("testDoWasm", &x);
}
if self.log_level.len() != 0 {
g.append_pair("log_level", &self.log_level);
}
if let Some(x) = self.use_rt.as_ref() {
g.append_pair("useRt", &x.to_string());
}
}
}

View File

@@ -450,7 +450,7 @@ impl From<&BinnedQuery> for EventsSubQuerySettings {
// TODO add to query
queue_len_disk_io: None,
create_errors: Vec::new(),
use_rt: None,
use_rt: value.use_rt(),
}
}
}