Clean up, collect with timeout

This commit is contained in:
Dominik Werder
2022-12-19 14:09:37 +01:00
parent 64233b0ccb
commit 646ec38b3c
32 changed files with 622 additions and 321 deletions

View File

@@ -11,7 +11,7 @@ use err::Error;
use futures_util::{Stream, StreamExt};
use serde::{Deserialize, Serialize};
use serde_json::Value as JsVal;
use std::collections::BTreeMap;
use std::collections::{BTreeMap, VecDeque};
use std::fmt;
use std::iter::FromIterator;
use std::net::SocketAddr;
@@ -543,11 +543,28 @@ pub struct TableSizes {
pub sizes: Vec<(String, String)>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct NodeStatusSub {
pub url: String,
pub status: Result<NodeStatus, Error>,
}
fn is_false<T>(x: T) -> bool
where
T: std::borrow::Borrow<bool>,
{
*x.borrow() == false
}
#[derive(Debug, Serialize, Deserialize)]
pub struct NodeStatus {
pub name: String,
pub version: String,
#[serde(default, skip_serializing_if = "is_false")]
pub is_sf_databuffer: bool,
#[serde(default, skip_serializing_if = "is_false")]
pub is_archiver_engine: bool,
#[serde(default, skip_serializing_if = "is_false")]
pub is_archiver_appliance: bool,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub database_size: Option<Result<u64, String>>,
@@ -555,8 +572,8 @@ pub struct NodeStatus {
pub table_sizes: Option<Result<TableSizes, Error>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub archiver_appliance_status: Option<NodeStatusArchiverAppliance>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subs: Option<BTreeMap<String, Result<NodeStatus, Error>>>,
#[serde(default, skip_serializing_if = "VecDeque::is_empty")]
pub subs: VecDeque<NodeStatusSub>,
}
// Describes a "channel" which is a time-series with a unique name within a "backend".
@@ -601,12 +618,19 @@ impl FromUrl for Channel {
.into(),
name: pairs
.get("channelName")
.ok_or(Error::with_public_msg("missing channelName"))?
//.ok_or(Error::with_public_msg("missing channelName"))?
.map(String::from)
.unwrap_or(String::new())
.into(),
series: pairs
.get("seriesId")
.and_then(|x| x.parse::<u64>().map_or(None, |x| Some(x))),
};
if ret.name.is_empty() && ret.series.is_none() {
return Err(Error::with_public_msg(format!(
"Missing one of channelName or seriesId parameters."
)));
}
Ok(ret)
}
}
@@ -615,7 +639,9 @@ impl AppendToUrl for Channel {
fn append_to_url(&self, url: &mut Url) {
let mut g = url.query_pairs_mut();
g.append_pair("backend", &self.backend);
g.append_pair("channelName", &self.name);
if self.name().len() > 0 {
g.append_pair("channelName", &self.name);
}
if let Some(series) = self.series {
g.append_pair("seriesId", &series.to_string());
}
@@ -2088,7 +2114,7 @@ pub struct ChannelConfigResponse {
Provide basic information about a channel, especially it's shape.
Also, byte-order is important for clients that process the raw databuffer event data (python data_api3).
*/
#[derive(Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChannelInfo {
pub scalar_type: ScalarType,
pub byte_order: Option<ByteOrder>,
@@ -2096,6 +2122,15 @@ pub struct ChannelInfo {
pub msg: serde_json::Value,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChConf {
pub backend: String,
pub series: u64,
pub name: String,
pub scalar_type: ScalarType,
pub shape: Shape,
}
pub fn f32_close(a: f32, b: f32) -> bool {
if (a - b).abs() < 1e-4 || (a / b > 0.999 && a / b < 1.001) {
true

View File

@@ -3,11 +3,23 @@ pub mod datetime;
pub mod prebinned;
use crate::get_url_query_pairs;
use crate::is_false;
use crate::log::*;
use crate::{AggKind, AppendToUrl, ByteSize, Channel, FromUrl, HasBackend, HasTimeout, NanoRange, ToNanos};
use chrono::{DateTime, TimeZone, Utc};
use crate::AggKind;
use crate::AppendToUrl;
use crate::ByteSize;
use crate::Channel;
use crate::FromUrl;
use crate::HasBackend;
use crate::HasTimeout;
use crate::NanoRange;
use crate::ToNanos;
use chrono::DateTime;
use chrono::TimeZone;
use chrono::Utc;
use err::Error;
use serde::{Deserialize, Serialize};
use serde::Deserialize;
use serde::Serialize;
use std::collections::BTreeMap;
use std::fmt;
use std::time::Duration;
@@ -71,11 +83,19 @@ pub struct PlainEventsQuery {
range: NanoRange,
agg_kind: AggKind,
timeout: Duration,
#[serde(default, skip_serializing_if = "Option::is_none")]
events_max: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none", with = "humantime_serde")]
event_delay: Option<Duration>,
#[serde(default, skip_serializing_if = "Option::is_none")]
stream_batch_len: Option<usize>,
#[serde(default, skip_serializing_if = "is_false")]
report_error: bool,
#[serde(default, skip_serializing_if = "is_false")]
do_log: bool,
#[serde(default, skip_serializing_if = "is_false")]
do_test_main_error: bool,
#[serde(default, skip_serializing_if = "is_false")]
do_test_stream_error: bool,
}
@@ -94,6 +114,7 @@ impl PlainEventsQuery {
agg_kind,
timeout,
events_max,
event_delay: None,
stream_batch_len: None,
report_error: false,
do_log,
@@ -126,8 +147,12 @@ impl PlainEventsQuery {
self.timeout
}
pub fn events_max(&self) -> Option<u64> {
self.events_max
pub fn events_max(&self) -> u64 {
self.events_max.unwrap_or(1024 * 1024)
}
pub fn event_delay(&self) -> &Option<Duration> {
&self.event_delay
}
pub fn do_log(&self) -> bool {
@@ -196,6 +221,9 @@ impl FromUrl for PlainEventsQuery {
events_max: pairs
.get("eventsMax")
.map_or(Ok(None), |k| k.parse().map(|k| Some(k)))?,
event_delay: pairs.get("eventDelay").map_or(Ok(None), |k| {
k.parse::<u64>().map(|x| Duration::from_millis(x)).map(|k| Some(k))
})?,
stream_batch_len: pairs
.get("streamBatchLen")
.map_or(Ok(None), |k| k.parse().map(|k| Some(k)))?,
@@ -242,10 +270,15 @@ impl AppendToUrl for PlainEventsQuery {
if let Some(x) = self.events_max.as_ref() {
g.append_pair("eventsMax", &format!("{}", x));
}
if let Some(x) = self.event_delay.as_ref() {
g.append_pair("eventDelay", &format!("{:.0}", x.as_secs_f64() * 1e3));
}
if let Some(x) = self.stream_batch_len.as_ref() {
g.append_pair("streamBatchLen", &format!("{}", x));
}
g.append_pair("doLog", &format!("{}", self.do_log));
if self.do_log {
g.append_pair("doLog", &format!("{}", self.do_log));
}
}
}
@@ -427,7 +460,12 @@ impl AppendToUrl for BinnedQuery {
{
self.channel.append_to_url(url);
let mut g = url.query_pairs_mut();
g.append_pair("cacheUsage", &self.cache_usage.to_string());
match &self.cache_usage {
CacheUsage::Use => {}
_ => {
g.append_pair("cacheUsage", &self.cache_usage.to_string());
}
}
g.append_pair("binCount", &format!("{}", self.bin_count));
g.append_pair(
"begDate",
@@ -443,11 +481,16 @@ impl AppendToUrl for BinnedQuery {
}
{
let mut g = url.query_pairs_mut();
g.append_pair("diskIoBufferSize", &format!("{}", self.disk_io_buffer_size));
g.append_pair("diskStatsEveryKb", &format!("{}", self.disk_stats_every.bytes() / 1024));
// TODO
//g.append_pair("diskIoBufferSize", &format!("{}", self.disk_io_buffer_size));
//g.append_pair("diskStatsEveryKb", &format!("{}", self.disk_stats_every.bytes() / 1024));
g.append_pair("timeout", &format!("{}", self.timeout.as_millis()));
g.append_pair("abortAfterBinCount", &format!("{}", self.abort_after_bin_count));
g.append_pair("doLog", &format!("{}", self.do_log));
if self.abort_after_bin_count > 0 {
g.append_pair("abortAfterBinCount", &format!("{}", self.abort_after_bin_count));
}
if self.do_log {
g.append_pair("doLog", &format!("{}", self.do_log));
}
}
}
}