diff --git a/crates/commonio/Cargo.toml b/crates/commonio/Cargo.toml index bbf9d0b..0aa1f75 100644 --- a/crates/commonio/Cargo.toml +++ b/crates/commonio/Cargo.toml @@ -18,7 +18,7 @@ async-channel = "1.9.0" parking_lot = "0.12" crc32fast = "1.2" daqbuf-err = { path = "../../../daqbuf-err" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } taskrun = { path = "../taskrun" } -netpod = { path = "../netpod" } items_0 = { path = "../items_0" } items_proc = { path = "../items_proc" } diff --git a/crates/daqbuffer/Cargo.toml b/crates/daqbuffer/Cargo.toml index 7541c47..a41169c 100644 --- a/crates/daqbuffer/Cargo.toml +++ b/crates/daqbuffer/Cargo.toml @@ -16,8 +16,11 @@ url = "2.5.0" clap = { version = "4.5.7", features = ["derive", "cargo"] } daqbuf-err = { path = "../../../daqbuf-err" } taskrun = { path = "../taskrun" } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } disk = { path = "../disk" } httpclient = { path = "../httpclient" } streams = { path = "../streams" } daqbufp2 = { path = "../daqbufp2" } + +[features] +DISABLED = [] diff --git a/crates/daqbuffer/src/bin/daqbuffer.rs b/crates/daqbuffer/src/bin/daqbuffer.rs index 3ada2f4..021850f 100644 --- a/crates/daqbuffer/src/bin/daqbuffer.rs +++ b/crates/daqbuffer/src/bin/daqbuffer.rs @@ -169,7 +169,7 @@ async fn test_log() { // TODO use httpclient for the request: need to add binary POST. //#[test] #[allow(unused)] -#[cfg(DISABLED)] +#[cfg(feature = "DISABLED")] fn simple_fetch() { use daqbuffer::err::ErrConv; use netpod::timeunits::*; diff --git a/crates/daqbufp2/Cargo.toml b/crates/daqbufp2/Cargo.toml index d667c6f..b298d61 100644 --- a/crates/daqbufp2/Cargo.toml +++ b/crates/daqbufp2/Cargo.toml @@ -23,7 +23,7 @@ url = "2.2.2" lazy_static = "1.4.0" daqbuf-err = { path = "../../../daqbuf-err" } taskrun = { path = "../taskrun" } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } query = { path = "../query" } httpret = { path = "../httpret" } httpclient = { path = "../httpclient" } diff --git a/crates/dbconn/Cargo.toml b/crates/dbconn/Cargo.toml index 2fd6417..06410c5 100644 --- a/crates/dbconn/Cargo.toml +++ b/crates/dbconn/Cargo.toml @@ -21,6 +21,6 @@ async-channel = "1.9.0" chrono = "0.4.38" regex = "1.10.4" daqbuf-err = { path = "../../../daqbuf-err" } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } parse = { path = "../parse" } taskrun = { path = "../taskrun" } diff --git a/crates/disk/Cargo.toml b/crates/disk/Cargo.toml index 4fcc0f4..dddfe2b 100644 --- a/crates/disk/Cargo.toml +++ b/crates/disk/Cargo.toml @@ -32,7 +32,7 @@ url = "2.5.0" tiny-keccak = { version = "2.0", features = ["sha3"] } daqbuf-err = { path = "../../../daqbuf-err" } taskrun = { path = "../taskrun" } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } query = { path = "../query" } dbconn = { path = "../dbconn" } parse = { path = "../parse" } diff --git a/crates/dq/Cargo.toml b/crates/dq/Cargo.toml index 58c8ff3..3c5f0b7 100644 --- a/crates/dq/Cargo.toml +++ b/crates/dq/Cargo.toml @@ -15,7 +15,7 @@ chrono = "0.4.19" bytes = "1.7" daqbuf-err = { path = "../../../daqbuf-err" } taskrun = { path = "../taskrun" } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } parse = { path = "../parse" } disk = { path = "../disk" } streams = { path = "../streams" } diff --git a/crates/httpclient/Cargo.toml b/crates/httpclient/Cargo.toml index 9ebf83f..df507e5 100644 --- a/crates/httpclient/Cargo.toml +++ b/crates/httpclient/Cargo.toml @@ -20,7 +20,7 @@ hyper-util = { version = "0.1.1", features = ["full"] } bytes = "1.5.0" async-channel = "1.9.0" daqbuf-err = { path = "../../../daqbuf-err" } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } parse = { path = "../parse" } streams = { path = "../streams" } thiserror = "0.0.1" diff --git a/crates/httpret/Cargo.toml b/crates/httpret/Cargo.toml index f93d0cb..db52481 100644 --- a/crates/httpret/Cargo.toml +++ b/crates/httpret/Cargo.toml @@ -29,7 +29,7 @@ ciborium = "0.2.1" flate2 = "1" brotli = "3.4.0" daqbuf-err = { path = "../../../daqbuf-err" } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } query = { path = "../query" } dbconn = { path = "../dbconn" } disk = { path = "../disk" } diff --git a/crates/items_0/Cargo.toml b/crates/items_0/Cargo.toml index 1533b40..c3a5022 100644 --- a/crates/items_0/Cargo.toml +++ b/crates/items_0/Cargo.toml @@ -16,5 +16,5 @@ bincode = "1.3.3" bytes = "1.2.1" futures-util = "0.3.24" chrono = { version = "0.4.19", features = ["serde"] } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } daqbuf-err = { path = "../../../daqbuf-err" } diff --git a/crates/items_2/Cargo.toml b/crates/items_2/Cargo.toml index f71723e..1e80da4 100644 --- a/crates/items_2/Cargo.toml +++ b/crates/items_2/Cargo.toml @@ -26,7 +26,7 @@ thiserror = "0.0.1" daqbuf-err = { path = "../../../daqbuf-err" } items_0 = { path = "../items_0" } items_proc = { path = "../items_proc" } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } parse = { path = "../parse" } bitshuffle = { path = "../bitshuffle" } diff --git a/crates/netpod/Cargo.toml b/crates/netpod/Cargo.toml deleted file mode 100644 index e92f10c..0000000 --- a/crates/netpod/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "netpod" -version = "0.0.2" -authors = ["Dominik Werder "] -edition = "2021" - -[lib] -path = "src/netpod.rs" - -[dependencies] -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -http = "1.0.0" -humantime = "2.1.0" -humantime-serde = "1.1.1" -bytes = "1.4.0" -chrono = { version = "0.4.19", features = ["serde"] } -futures-util = "0.3.14" -tracing = "0.1.37" -url = "2.5.0" -num-traits = "0.2.16" -hex = "0.4.3" -rand = "0.8.5" -thiserror = "0.0.1" -daqbuf-err = { path = "../../../daqbuf-err" } - -[patch.crates-io] -thiserror = { git = "https://github.com/dominikwerder/thiserror.git", branch = "cstm" } diff --git a/crates/netpod/src/channelstatus.rs b/crates/netpod/src/channelstatus.rs deleted file mode 100644 index dfbe362..0000000 --- a/crates/netpod/src/channelstatus.rs +++ /dev/null @@ -1,111 +0,0 @@ -use daqbuf_err as err; - -#[derive(Debug, Clone)] -pub enum ChannelStatusClosedReason { - ShutdownCommand, - ChannelRemove, - ProtocolError, - FrequencyQuota, - BandwidthQuota, - InternalError, - IocTimeout, - NoProtocol, - ProtocolDone, - ConnectFail, - IoError, -} - -#[derive(Debug, Clone)] -pub enum ChannelStatus { - AssignedToAddress, - Opened, - Closed(ChannelStatusClosedReason), - Pong, - MonitoringSilenceReadStart, - MonitoringSilenceReadTimeout, - MonitoringSilenceReadUnchanged, - HaveStatusId, - HaveAddress, -} - -impl ChannelStatus { - pub fn to_kind(&self) -> u32 { - use ChannelStatus::*; - use ChannelStatusClosedReason::*; - match self { - AssignedToAddress => 24, - Opened => 1, - Closed(x) => match x { - ShutdownCommand => 2, - ChannelRemove => 3, - ProtocolError => 4, - FrequencyQuota => 5, - BandwidthQuota => 6, - InternalError => 7, - IocTimeout => 8, - NoProtocol => 9, - ProtocolDone => 10, - ConnectFail => 11, - IoError => 12, - }, - Pong => 25, - MonitoringSilenceReadStart => 26, - MonitoringSilenceReadTimeout => 27, - MonitoringSilenceReadUnchanged => 28, - HaveStatusId => 29, - HaveAddress => 30, - } - } - - pub fn from_kind(kind: u32) -> Result { - use ChannelStatus::*; - use ChannelStatusClosedReason::*; - let ret = match kind { - 1 => Opened, - 2 => Closed(ShutdownCommand), - 3 => Closed(ChannelRemove), - 4 => Closed(ProtocolError), - 5 => Closed(FrequencyQuota), - 6 => Closed(BandwidthQuota), - 7 => Closed(InternalError), - 8 => Closed(IocTimeout), - 9 => Closed(NoProtocol), - 10 => Closed(ProtocolDone), - 11 => Closed(ConnectFail), - 12 => Closed(IoError), - 24 => AssignedToAddress, - 25 => Pong, - 26 => MonitoringSilenceReadStart, - 27 => MonitoringSilenceReadTimeout, - 28 => MonitoringSilenceReadUnchanged, - 29 => HaveStatusId, - 30 => HaveAddress, - _ => { - return Err(err::Error::with_msg_no_trace(format!( - "unknown ChannelStatus kind {kind}" - ))); - } - }; - Ok(ret) - } - - pub fn to_u64(&self) -> u64 { - self.to_kind() as u64 - } - - pub fn to_user_variant_string(&self) -> String { - use ChannelStatus::*; - let ret = match self { - AssignedToAddress => "Located", - Opened => "Opened", - Closed(_) => "Closed", - Pong => "Pongg", - MonitoringSilenceReadStart => "MSRS", - MonitoringSilenceReadTimeout => "MSRT", - MonitoringSilenceReadUnchanged => "MSRU", - HaveStatusId => "HaveStatusId", - HaveAddress => "HaveAddress", - }; - ret.into() - } -} diff --git a/crates/netpod/src/hex.rs b/crates/netpod/src/hex.rs deleted file mode 100644 index 604a107..0000000 --- a/crates/netpod/src/hex.rs +++ /dev/null @@ -1,10 +0,0 @@ -/// Input may also contain whitespace. -pub fn decode_hex>(inp: INP) -> Result, ()> { - let a: Vec<_> = inp - .as_ref() - .bytes() - .filter(|&x| (x >= b'0' && x <= b'9') || (x >= b'a' && x <= b'f')) - .collect(); - let ret = hex::decode(a).map_err(|_| ())?; - Ok(ret) -} diff --git a/crates/netpod/src/histo.rs b/crates/netpod/src/histo.rs deleted file mode 100644 index b9d144a..0000000 --- a/crates/netpod/src/histo.rs +++ /dev/null @@ -1,30 +0,0 @@ -#[derive(Debug)] -pub struct HistoLog2 { - histo: [u64; 16], - sub: usize, -} - -impl HistoLog2 { - pub fn new(sub: usize) -> Self { - Self { histo: [0; 16], sub } - } - - #[inline] - pub fn ingest(&mut self, mut v: u32) { - let mut po = 0; - while v != 0 && po < 15 { - v = v >> 1; - po += 1; - } - let po = if po >= self.histo.len() + self.sub { - self.histo.len() - 1 - } else { - if po > self.sub { - po - self.sub - } else { - 0 - } - }; - self.histo[po] += 1; - } -} diff --git a/crates/netpod/src/netpod.rs b/crates/netpod/src/netpod.rs deleted file mode 100644 index 0e50943..0000000 --- a/crates/netpod/src/netpod.rs +++ /dev/null @@ -1,4331 +0,0 @@ -pub mod channelstatus; -pub mod hex; -pub mod histo; -pub mod query; -pub mod range; -pub mod status; -pub mod stream_impl_tracer; -pub mod streamext; -pub mod ttl; - -pub mod log_macros { - #[allow(unused)] - #[macro_export] - macro_rules! trace { - ($($arg:tt)*) => { - eprintln!($($arg)*); - }; - } - - #[allow(unused)] - #[macro_export] - macro_rules! debug { - ($($arg:tt)*) => { - eprintln!($($arg)*); - }; - } - - #[allow(unused)] - #[macro_export] - macro_rules! info { - ($($arg:tt)*) => { - eprintln!($($arg)*); - }; - } - - #[allow(unused)] - #[macro_export] - macro_rules! warn { - ($($arg:tt)*) => { - eprintln!($($arg)*); - }; - } - - #[allow(unused)] - #[macro_export] - macro_rules! error { - ($($arg:tt)*) => { - eprintln!($($arg)*); - }; - } -} - -pub mod log { - pub use tracing::{self, event, span, Level}; - pub use tracing::{debug, error, info, trace, warn}; -} - -pub mod log_ { - pub use crate::{debug, error, info, trace, warn}; - pub use tracing::{self, event, span, Level}; -} - -use daqbuf_err as err; - -use bytes::Bytes; -use chrono::DateTime; -use chrono::TimeZone; -use chrono::Utc; -use err::thiserror; -use err::Error; -use err::ThisError; -use futures_util::Stream; -use futures_util::StreamExt; -use http::Request; -use http::Uri; -use range::evrange::NanoRange; -use range::evrange::PulseRange; -use range::evrange::SeriesRange; -use serde::Deserialize; -use serde::Serialize; -use serde_json::Value as JsVal; -use std::collections::BTreeMap; -use std::collections::VecDeque; -use std::fmt; -use std::iter::FromIterator; -use std::net::SocketAddr; -use std::path::PathBuf; -use std::pin::Pin; -use std::str::FromStr; -use std::sync::atomic; -use std::sync::atomic::AtomicPtr; -use std::sync::Once; -use std::sync::RwLock; -use std::sync::RwLockWriteGuard; -use std::task::Context; -use std::task::Poll; -use std::time::Duration; -use std::time::Instant; -use std::time::SystemTime; -use std::time::UNIX_EPOCH; -use timeunits::*; -use url::Url; - -pub const APP_JSON: &str = "application/json"; -pub const APP_JSON_LINES: &str = "application/jsonlines"; -pub const APP_OCTET: &str = "application/octet-stream"; -pub const APP_CBOR_FRAMED: &str = "application/cbor-framed"; -pub const APP_JSON_FRAMED: &str = "application/json-framed"; -pub const ACCEPT_ALL: &str = "*/*"; -pub const X_DAQBUF_REQID: &str = "x-daqbuffer-request-id"; -pub const HEADER_NAME_REQUEST_ID: &str = "requestid"; - -pub const CONNECTION_STATUS_DIV: DtMs = DtMs::from_ms_u64(1000 * 60 * 60); -// pub const TS_MSP_GRID_UNIT: DtMs = DtMs::from_ms_u64(1000 * 10); -// pub const TS_MSP_GRID_SPACING: u64 = 6 * 2; - -pub const EMIT_ACCOUNTING_SNAP: DtMs = DtMs::from_ms_u64(1000 * 60 * 10); - -pub const DATETIME_FMT_0MS: &str = "%Y-%m-%dT%H:%M:%SZ"; -pub const DATETIME_FMT_3MS: &str = "%Y-%m-%dT%H:%M:%S.%3fZ"; -pub const DATETIME_FMT_6MS: &str = "%Y-%m-%dT%H:%M:%S.%6fZ"; -pub const DATETIME_FMT_9MS: &str = "%Y-%m-%dT%H:%M:%S.%9fZ"; - -const TEST_BACKEND: &str = "testbackend-00"; - -#[allow(non_upper_case_globals)] -pub const trigger: [&'static str; 0] = [ - // - // "S30CB05-VMCP-A010:PRESSURE", - // "ATSRF-CAV:TUN-DETUNING-REL-ACT", - // "S30CB14-KBOC-HPPI1:PI-OUT", -]; - -pub const TRACE_SERIES_ID: [u64; 1] = [ - // - 4985969403507503043, -]; - -pub struct OnDrop -where - F: FnOnce() -> (), -{ - f: Option, -} - -impl OnDrop -where - F: FnOnce() -> (), -{ - pub fn new(f: F) -> Self { - Self { f: Some(f) } - } -} - -impl Drop for OnDrop -where - F: FnOnce() -> (), -{ - fn drop(&mut self) { - self.f.take().map(|x| x()); - } -} - -pub fn is_false(x: T) -> bool -where - T: std::borrow::Borrow, -{ - *x.borrow() == false -} - -pub trait CmpZero { - fn is_zero(&self) -> bool; -} - -impl CmpZero for u32 { - fn is_zero(&self) -> bool { - *self == 0 - } -} -impl CmpZero for usize { - fn is_zero(&self) -> bool { - *self == 0 - } -} - -#[derive(Debug, ThisError)] -#[cstm(name = "Netpod")] -pub enum NetpodError { - UnknownSeriesKind(i64), - BadInt(#[from] std::num::ParseIntError), - ChronoParse(#[from] chrono::ParseError), - HumantimeDurationParse(#[from] humantime::DurationError), - MissingQueryParameters, - MissingSeries, - MissingBackend, - MissingTimerange, - BadTimerange, - NoSeriesNoName, - BadScalarTypeIndex(i64), - UnsupportedDtype(u8), - JsonParse(#[from] serde_json::Error), - BadScalarTypeVariant(String), - BadScalarTypeCaId(u16), - ScalarTypeNotInCa, - MissingScalarType, - MissingShape, - MissingBinningScheme, - BadCacheUsage(String), - TimelikeBinWidthImpossibleForPulseRange, - BinCountTooLarge, - BinCountTooSmall, - BinnedNoGridMatch, - NotTimerange, -} - -#[derive(Debug, ThisError)] -#[cstm(name = "AsyncChannelError")] -pub enum AsyncChannelError { - Send, - Recv, -} - -pub struct BodyStream -where - E: std::error::Error, -{ - pub inner: Box> + Send + Unpin>, -} - -#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Serialize, Deserialize)] -pub enum SeriesKind { - ChannelStatus, - ChannelData, - CaStatus, -} - -impl SeriesKind { - pub fn to_db_i16(&self) -> i16 { - use SeriesKind::*; - match self { - ChannelStatus => 1, - ChannelData => 2, - CaStatus => 3, - } - } - - pub fn from_db_i16(x: i16) -> Result { - let ret = match x { - 1 => Self::ChannelStatus, - 2 => Self::ChannelData, - 3 => Self::CaStatus, - _ => return Err(NetpodError::UnknownSeriesKind(x as i64)), - }; - Ok(ret) - } -} - -impl Default for SeriesKind { - fn default() -> Self { - SeriesKind::ChannelData - } -} - -impl FromUrl for SeriesKind { - type Error = NetpodError; - - fn from_url(url: &Url) -> Result { - let pairs = get_url_query_pairs(url); - Self::from_pairs(&pairs) - } - - fn from_pairs(pairs: &BTreeMap) -> Result { - let ret = pairs - .get("seriesKind") - .and_then(|x| match x.as_str() { - "channelStatus" => Some(Self::ChannelStatus), - "channelData" => Some(Self::ChannelData), - "caStatus" => Some(Self::CaStatus), - _ => None, - }) - .unwrap_or(Self::default()); - Ok(ret) - } -} - -impl AppendToUrl for SeriesKind { - fn append_to_url(&self, url: &mut Url) { - let s = match self { - SeriesKind::ChannelStatus => "channelStatus", - SeriesKind::ChannelData => "channelData", - SeriesKind::CaStatus => "caStatus", - }; - let mut g = url.query_pairs_mut(); - g.append_pair("seriesKind", &s); - } -} - -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] -pub enum ScalarType { - U8, - U16, - U32, - U64, - I8, - I16, - I32, - I64, - F32, - F64, - BOOL, - STRING, - Enum, -} - -impl fmt::Debug for ScalarType { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{}", self.to_variant_str()) - } -} - -impl fmt::Display for ScalarType { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{}", self.to_variant_str()) - } -} - -impl Serialize for ScalarType { - fn serialize(&self, ser: S) -> Result - where - S::Error: serde::ser::Error, - { - use ScalarType::*; - match self { - U8 => ser.serialize_str("u8"), - U16 => ser.serialize_str("u16"), - U32 => ser.serialize_str("u32"), - U64 => ser.serialize_str("u64"), - I8 => ser.serialize_str("i8"), - I16 => ser.serialize_str("i16"), - I32 => ser.serialize_str("i32"), - I64 => ser.serialize_str("i64"), - F32 => ser.serialize_str("f32"), - F64 => ser.serialize_str("f64"), - BOOL => ser.serialize_str("bool"), - STRING => ser.serialize_str("string"), - Enum => ser.serialize_str("enum"), - } - } -} - -struct ScalarTypeVis; - -impl<'de> serde::de::Visitor<'de> for ScalarTypeVis { - type Value = ScalarType; - - fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("a string describing the ScalarType variant") - } - - fn visit_str(self, value: &str) -> Result { - use ScalarType::*; - let s = value.to_lowercase(); - let ret = match s.as_str() { - "u8" => U8, - "u16" => U16, - "u32" => U32, - "u64" => U64, - "i8" => I8, - "i16" => I16, - "i32" => I32, - "i64" => I64, - "f32" => F32, - "f64" => F64, - "bool" => BOOL, - "string" => STRING, - "enum" => Enum, - k => return Err(E::custom(format!("can not understand variant {k:?}"))), - }; - Ok(ret) - } -} - -impl<'de> Deserialize<'de> for ScalarType { - fn deserialize(de: D) -> Result - where - D: serde::Deserializer<'de>, - { - de.deserialize_str(ScalarTypeVis) - } -} - -pub trait HasScalarType { - fn scalar_type(&self) -> ScalarType; -} - -impl ScalarType { - pub fn from_dtype_index(ix: u8) -> Result { - use ScalarType::*; - let g = match ix { - 0 => BOOL, - 1 => BOOL, - 3 => U8, - 5 => U16, - 8 => U32, - 10 => U64, - 2 => I8, - 4 => I16, - 7 => I32, - 9 => I64, - 11 => F32, - 12 => F64, - 13 => STRING, - 15 => Enum, - _ => return Err(NetpodError::UnsupportedDtype(ix)), - }; - Ok(g) - } - - pub fn to_variant_str(&self) -> &'static str { - use ScalarType::*; - match self { - U8 => "u8", - U16 => "u16", - U32 => "u32", - U64 => "u64", - I8 => "i8", - I16 => "i16", - I32 => "i32", - I64 => "i64", - F32 => "f32", - F64 => "f64", - BOOL => "bool", - STRING => "string", - Enum => "enum", - } - } - - pub fn from_variant_str(s: &str) -> Result { - use ScalarType::*; - let ret = match s { - "u8" => U8, - "u16" => U16, - "u32" => U32, - "u64" => U64, - "i8" => I8, - "i16" => I16, - "i32" => I32, - "i64" => I64, - "f32" => F32, - "f64" => F64, - "bool" => BOOL, - "string" => STRING, - "enum" => Enum, - _ => return Err(NetpodError::BadScalarTypeVariant(s.into())), - }; - Ok(ret) - } - - pub fn to_bsread_str(&self) -> &'static str { - use ScalarType::*; - match self { - U8 => "uint8", - U16 => "uint16", - U32 => "uint32", - U64 => "uint64", - I8 => "int8", - I16 => "int16", - I32 => "int32", - I64 => "int64", - F32 => "float32", - F64 => "float64", - BOOL => "bool", - STRING => "string", - Enum => "enum", - } - } - - pub fn from_bsread_str(s: &str) -> Result { - use ScalarType::*; - let ret = match s { - "uint8" => U8, - "uint16" => U16, - "uint32" => U32, - "uint64" => U64, - "int8" => I8, - "int16" => I16, - "int32" => I32, - "int64" => I64, - "float" => F32, - "double" => F64, - "float32" => F32, - "float64" => F64, - "bool" => BOOL, - "string" => STRING, - "enum" => Enum, - _ => return Err(NetpodError::BadScalarTypeVariant(s.into())), - }; - Ok(ret) - } - - pub fn from_ca_id(k: u16) -> Result { - use ScalarType::*; - let ret = match k { - 0 => STRING, - 1 => I16, - 2 => F32, - 3 => Enum, - 4 => I8, - 5 => I32, - 6 => F64, - _ => return Err(NetpodError::BadScalarTypeCaId(k)), - }; - Ok(ret) - } - - pub fn to_ca_id(&self) -> Result { - use ScalarType::*; - let ret = match self { - I8 => 4, - I16 => 1, - I32 => 5, - F32 => 2, - F64 => 6, - STRING => 0, - Enum => 3, - _ => return Err(NetpodError::ScalarTypeNotInCa), - }; - Ok(ret) - } - - pub fn from_archeng_db_str(s: &str) -> Result { - use ScalarType::*; - let ret = match s { - "I8" => I8, - "I16" => I16, - "I32" => I32, - "I64" => I64, - "F32" => F32, - "F64" => F64, - _ => return Err(NetpodError::BadScalarTypeVariant(s.into())), - }; - Ok(ret) - } - - pub fn from_scylla_i32(k: i32) -> Result { - if k < 0 || k > u8::MAX as i32 { - return Err(NetpodError::BadScalarTypeIndex(k as i64)); - } - Self::from_dtype_index(k as u8) - } - - // TODO this is useless for strings and enums. - pub fn bytes(&self) -> u8 { - use ScalarType::*; - match self { - U8 => 1, - U16 => 2, - U32 => 4, - U64 => 8, - I8 => 1, - I16 => 2, - I32 => 4, - I64 => 8, - F32 => 4, - F64 => 8, - BOOL => 1, - STRING => 1, - Enum => 2, - } - } - - pub fn index(&self) -> u8 { - use ScalarType::*; - match self { - U8 => 3, - U16 => 5, - U32 => 8, - U64 => 10, - I8 => 2, - I16 => 4, - I32 => 7, - I64 => 9, - F32 => 11, - F64 => 12, - BOOL => 0, - STRING => 13, - Enum => 15, - } - } - - pub fn to_scylla_table_name_id(&self) -> &'static str { - self.to_variant_str() - } - - pub fn to_scylla_i32(&self) -> i32 { - self.index() as i32 - } - - pub fn from_url_str(s: &str) -> Result { - let ret = serde_json::from_str(&format!("\"{s}\""))?; - Ok(ret) - } -} - -#[derive(Debug, Clone, PartialOrd, PartialEq)] -pub struct StringFix { - data: [char; N], - len: u8, -} - -impl StringFix { - pub fn new() -> Self { - Self { - data: [char::REPLACEMENT_CHARACTER; N], - len: 0, - } - } - - pub fn string(&self) -> String { - self.data[..self.len as usize].iter().map(|x| *x).collect() - } -} - -impl From for StringFix -where - T: AsRef, -{ - fn from(x: T) -> Self { - let sl = x.as_ref(); - let sl = &sl[0..sl.len().min(N)]; - let mut ret = Self::new(); - for (i, ch) in sl.chars().enumerate() { - ret.data[i] = ch; - } - ret.len = sl.len() as u8; - ret - } -} - -impl From> for String { - fn from(x: StringFix) -> Self { - x.data[0..x.len as _].iter().collect() - } -} - -mod string_fix_impl_serde { - use crate::StringFix; - use serde::de::Visitor; - use serde::Deserialize; - use serde::Serialize; - use std::fmt; - - impl Serialize for StringFix { - fn serialize(&self, ser: S) -> Result - where - S: serde::Serializer, - { - ser.serialize_str(todo!("StringFix Serialize")) - } - } - - impl<'de, const N: usize> Deserialize<'de> for StringFix { - fn deserialize(de: D) -> Result - where - D: serde::Deserializer<'de>, - { - todo!("StringFix Deserialize") - // de.deserialize_unit(Vis::) - } - } - - struct Vis; - - impl<'de, const N: usize> Visitor<'de> for Vis { - type Value = StringFix; - - fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "deserialize enum error") - } - - fn visit_unit(self) -> Result - where - E: serde::de::Error, - { - todo!("StringFix Visitor") - // Ok(Self::Value::new()) - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialOrd, PartialEq)] -pub struct EnumVariant { - ix: u16, - name: String, -} - -impl EnumVariant { - pub fn new(ix: u16, name: impl Into) -> Self { - Self { ix, name: name.into() } - } - - pub fn ix(&self) -> u16 { - self.ix - } - - pub fn name_string(&self) -> String { - self.name.clone() - } - - pub fn into_parts(self) -> (u16, String) { - (self.ix, self.name) - } -} - -impl Default for EnumVariant { - fn default() -> Self { - Self { - ix: u16::MAX, - name: String::new(), - } - } -} - -impl fmt::Display for EnumVariant { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{}({})", self.ix, self.name) - } -} - -impl AppendToUrl for ScalarType { - fn append_to_url(&self, url: &mut Url) { - let mut g = url.query_pairs_mut(); - g.append_pair("scalarType", self.to_variant_str()); - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct SfDatabuffer { - pub data_base_path: PathBuf, - pub ksprefix: String, - pub splits: Option>, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ArchiverAppliance { - pub data_base_paths: Vec, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ChannelArchiver { - pub data_base_paths: Vec, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Node { - pub host: String, - // TODO for `listen` and the ports, would be great to allow a default on Cluster level. - pub listen: Option, - #[serde(deserialize_with = "serde_port::port_from_any")] - pub port: u16, - #[serde(deserialize_with = "serde_port::port_from_any", default)] - pub port_raw: u16, - pub sf_databuffer: Option, - pub archiver_appliance: Option, - pub channel_archiver: Option, - pub prometheus_api_bind: Option, -} - -mod serde_port { - use super::*; - - struct Vis; - - impl<'de> serde::de::Visitor<'de> for Vis { - type Value = u16; - - fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "a tcp port number, in numeric or string form.") - } - - fn visit_u64(self, val: u64) -> Result - where - E: serde::de::Error, - { - if val > u16::MAX as u64 { - Err(serde::de::Error::invalid_type( - serde::de::Unexpected::Unsigned(val), - &self, - )) - } else { - self.visit_i64(val as i64) - } - } - - fn visit_i64(self, val: i64) -> Result - where - E: serde::de::Error, - { - if val < 1 || val > u16::MAX as i64 { - Err(serde::de::Error::invalid_type( - serde::de::Unexpected::Signed(val), - &self, - )) - } else { - Ok(val as u16) - } - } - - fn visit_str(self, val: &str) -> Result - where - E: serde::de::Error, - { - match val.parse::() { - Err(_) => Err(serde::de::Error::invalid_type(serde::de::Unexpected::Str(val), &self)), - Ok(v) => Ok(v), - } - } - } - - pub fn port_from_any<'de, D>(de: D) -> Result - where - D: serde::Deserializer<'de>, - { - // We expect to use json or yaml only. - de.deserialize_any(Vis) - } - - #[test] - fn test_port_from_any() { - #[derive(Deserialize)] - struct Conf { - #[serde(deserialize_with = "port_from_any")] - port: u16, - } - let conf: Conf = serde_json::from_str(r#"{"port":"9192"}"#).unwrap(); - assert_eq!(conf.port, 9192); - let conf: Conf = serde_json::from_str(r#"{"port":9194}"#).unwrap(); - assert_eq!(conf.port, 9194); - } -} - -impl Node { - // TODO needed? Could `sf_databuffer` be None? - pub fn dummy() -> Self { - Self { - host: "dummy".into(), - listen: None, - port: 4444, - port_raw: 4444, - sf_databuffer: Some(SfDatabuffer { - data_base_path: PathBuf::new(), - ksprefix: "daqlocal".into(), - splits: None, - }), - archiver_appliance: None, - channel_archiver: None, - prometheus_api_bind: None, - } - } - - // TODO should a node know how to reach itself? Because, depending on network - // topology (proxies etc.) the way to reach a node depends on the tuple `(node, client)`. - pub fn baseurl(&self) -> Url { - // TODO should be able to decide whether we are reachable via tls. - // So far this does not matter because this `baseurl` is used for internal communication - // and is always non-tls. - format!("http://{}:{}", self.host, self.port).parse().unwrap() - } - - pub fn listen(&self) -> String { - match &self.listen { - Some(x) => x.into(), - None => "0.0.0.0".into(), - } - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Database { - pub host: String, - pub port: u16, - pub user: String, - pub pass: String, - pub name: String, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ScyllaConfig { - pub hosts: Vec, - pub keyspace: String, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Cluster { - pub backend: String, - pub nodes: Vec, - pub database: Database, - #[serde(rename = "runMapPulse", default)] - pub run_map_pulse_task: bool, - #[serde(rename = "isCentralStorage", default)] - pub is_central_storage: bool, - #[serde(rename = "fileIoBufferSize", default)] - pub file_io_buffer_size: FileIoBufferSize, - scylla: Option, - #[serde(rename = "scylla_st")] - scylla_st: Option, - #[serde(rename = "scylla_mt")] - scylla_mt: Option, - #[serde(rename = "scylla_lt")] - scylla_lt: Option, - cache_scylla: Option, - pub announce_backends: Option>, -} - -impl Cluster { - pub fn decompress_default(&self) -> bool { - if self.is_central_storage { - false - } else { - true - } - } - - pub fn scylla_st(&self) -> Option<&ScyllaConfig> { - self.scylla_st.as_ref().map_or_else(|| self.scylla.as_ref(), Some) - } - - pub fn scylla_mt(&self) -> Option<&ScyllaConfig> { - self.scylla_mt.as_ref() - } - - pub fn scylla_lt(&self) -> Option<&ScyllaConfig> { - self.scylla_lt.as_ref() - } - - pub fn test_00() -> Self { - Self { - backend: "testbackend-00".into(), - nodes: Vec::new(), - database: Database { - name: "".into(), - host: "".into(), - port: 5432, - user: "".into(), - pass: "".into(), - }, - run_map_pulse_task: false, - is_central_storage: false, - file_io_buffer_size: FileIoBufferSize(1024 * 8), - scylla: None, - scylla_st: None, - scylla_mt: None, - scylla_lt: None, - cache_scylla: None, - announce_backends: None, - } - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct NodeConfig { - pub name: String, - pub cluster: Cluster, -} - -impl NodeConfig { - pub fn get_node(&self) -> Option<(&Node, usize)> { - if self.name.contains(":") { - let mut i1 = 0; - for n in &self.cluster.nodes { - if self.name == format!("{}:{}", n.host, n.port) { - return Some((n, i1)); - } - i1 += 1; - } - } else { - let mut i1 = 0; - for n in &self.cluster.nodes { - if self.name == format!("{}", n.host) { - return Some((n, i1)); - } - i1 += 1; - } - } - None - } -} - -#[derive(Clone, Debug)] -pub struct ServiceVersion { - pub major: u32, - pub minor: u32, - pub patch: u32, - pub pre: Option, -} - -impl fmt::Display for ServiceVersion { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match &self.pre { - Some(pre) => write!(fmt, "{}.{}.{}-{}", self.major, self.minor, self.patch, pre), - None => write!(fmt, "{}.{}.{}", self.major, self.minor, self.patch), - } - } -} - -#[derive(Clone, Debug)] -pub struct NodeConfigCached { - pub node_config: NodeConfig, - pub node: Node, - pub ix: usize, -} - -impl From for Result { - fn from(k: NodeConfig) -> Self { - match k.get_node() { - Some((node, ix)) => { - let ret = NodeConfigCached { - node: node.clone(), - node_config: k, - ix, - }; - Ok(ret) - } - None => Err(Error::with_msg(format!("can not find node {:?}", k))), - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct NodeStatusArchiverAppliance { - pub readable: Vec<(PathBuf, bool)>, -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct TableSizes { - pub sizes: Vec<(String, String)>, -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct NodeStatusSub { - pub url: String, - pub status: Result, -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct NodeStatus { - pub name: String, - pub version: String, - #[serde(default, skip_serializing_if = "is_false")] - pub is_sf_databuffer: bool, - #[serde(default, skip_serializing_if = "is_false")] - pub is_archiver_engine: bool, - #[serde(default, skip_serializing_if = "is_false")] - pub is_archiver_appliance: bool, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub database_size: Option>, - //#[serde(default, skip_serializing_if = "Option::is_none")] - //pub table_sizes: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub archiver_appliance_status: Option, - #[serde(default, skip_serializing_if = "VecDeque::is_empty")] - pub subs: VecDeque, -} - -// Describes a swissfel-databuffer style "channel" which is a time-series with a unique name within a "backend". -// Also the concept of "backend" could be split into "facility" and some optional other identifier -// for cases like e.g. post-mortem, or to differentiate between channel-access and bsread for cases where -// the same channel-name is delivered via different methods. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct SfDbChannel { - series: Option, - // "backend" is currently used in the existing systems for multiple purposes: - // it can indicate the facility (eg. sf-databuffer, hipa, ...) but also - // some special subsystem (eg. sf-rf-databuffer). - backend: String, - name: String, - kind: SeriesKind, -} - -impl SfDbChannel { - pub fn from_full, U: Into>( - backend: T, - series: Option, - name: U, - kind: SeriesKind, - ) -> Self { - Self { - backend: backend.into(), - series, - name: name.into(), - kind, - } - } - - pub fn from_name, U: Into>(backend: T, name: U) -> Self { - Self { - backend: backend.into(), - series: None, - name: name.into(), - kind: SeriesKind::default(), - } - } - - pub fn backend(&self) -> &str { - &self.backend - } - - pub fn series(&self) -> Option { - self.series - } - - pub fn name(&self) -> &str { - &self.name - } - - pub fn kind(&self) -> SeriesKind { - self.kind.clone() - } - - pub fn set_series(&mut self, series: u64) { - self.series = Some(series); - } -} - -impl fmt::Display for SfDbChannel { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!( - fmt, - "SfDbChannel {{ series: {:?}, backend: {:?}, name: {:?}, kind: {:?} }}", - self.series, self.backend, self.name, self.kind - ) - } -} - -impl FromUrl for SfDbChannel { - type Error = NetpodError; - - fn from_url(url: &Url) -> Result { - let pairs = get_url_query_pairs(url); - Self::from_pairs(&pairs) - } - - fn from_pairs(pairs: &BTreeMap) -> Result { - let ret = SfDbChannel { - backend: pairs.get("backend").ok_or_else(|| NetpodError::MissingBackend)?.into(), - name: pairs - .get("channelName") - .map(String::from) - .unwrap_or(String::new()) - .into(), - series: pairs - .get("seriesId") - .and_then(|x| x.parse::().map_or(None, |x| Some(x))), - kind: SeriesKind::from_pairs(pairs)?, - }; - if ret.name.is_empty() && ret.series.is_none() { - return Err(NetpodError::NoSeriesNoName); - } - Ok(ret) - } -} - -impl AppendToUrl for SfDbChannel { - fn append_to_url(&self, url: &mut Url) { - let mut g = url.query_pairs_mut(); - g.append_pair("backend", &self.backend); - if self.name().len() > 0 { - g.append_pair("channelName", &self.name); - } - if let Some(series) = self.series { - g.append_pair("seriesId", &series.to_string()); - } - drop(g); - self.kind.append_to_url(url); - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ChannelTyped { - pub channel: SfDbChannel, - pub scalar_type: ScalarType, - pub shape: Shape, -} - -impl ChannelTyped { - pub fn channel(&self) -> &SfDbChannel { - &self.channel - } -} - -// Describes a Scylla-based "daqbuffer" style time series. -// The tuple `(backend, series)` is supposed to be unique. -// Contains also the name because it is so useful. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct DaqbufSeries { - pub series: u64, - // "backend" is currently used in the existing systems for multiple purposes: - // it can indicate the facility (eg. sf-databuffer, hipa, ...) but also - // some special subsystem (eg. sf-rf-databuffer). - pub backend: String, - // This name is only for better user-facing messages. The (backend, series-id) is the identifier. - pub name: String, -} - -impl DaqbufSeries { - pub fn series(&self) -> u64 { - self.series - } - - pub fn backend(&self) -> &str { - &self.backend - } - - pub fn name(&self) -> &str { - &self.name - } -} - -impl FromUrl for DaqbufSeries { - type Error = NetpodError; - - fn from_url(url: &Url) -> Result { - let pairs = get_url_query_pairs(url); - Self::from_pairs(&pairs) - } - - fn from_pairs(pairs: &BTreeMap) -> Result { - let ret = DaqbufSeries { - series: pairs - .get("seriesId") - .ok_or_else(|| NetpodError::MissingSeries) - .map(|x| x.parse::())??, - backend: pairs.get("backend").ok_or_else(|| NetpodError::MissingBackend)?.into(), - name: pairs - .get("channelName") - .map(String::from) - .unwrap_or(String::new()) - .into(), - }; - Ok(ret) - } -} - -impl AppendToUrl for DaqbufSeries { - fn append_to_url(&self, url: &mut Url) { - let mut g = url.query_pairs_mut(); - g.append_pair("backend", &self.backend); - if self.name().len() > 0 { - g.append_pair("channelName", &self.name); - } - g.append_pair("seriesId", &self.series.to_string()); - } -} - -pub struct HostPort { - pub host: String, - pub port: u16, -} - -impl HostPort { - pub fn new>(host: S, port: u16) -> Self { - Self { - host: host.into(), - port, - } - } - - pub fn from_node(node: &Node) -> Self { - Self { - host: node.host.clone(), - port: node.port, - } - } -} - -#[derive(Clone, Copy, Debug, Serialize, Deserialize)] -pub struct FilePos { - pub pos: u64, -} - -impl From for u64 { - fn from(k: FilePos) -> Self { - k.pos - } -} - -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, PartialOrd, Eq, Ord)] -pub enum ByteOrder { - Little, - Big, -} - -impl ByteOrder { - pub fn from_dtype_flags(flags: u8) -> Self { - if flags & 0x20 == 0 { - Self::Little - } else { - Self::Big - } - } - - pub fn from_bsread_str(s: &str) -> Result { - match s { - "little" => Ok(ByteOrder::Little), - "big" => Ok(ByteOrder::Big), - _ => Err(Error::with_msg_no_trace(format!( - "ByteOrder::from_bsread_str can not understand {}", - s - ))), - } - } - - pub fn is_le(&self) -> bool { - if let Self::Little = self { - true - } else { - false - } - } - - pub fn is_be(&self) -> bool { - if let Self::Big = self { - true - } else { - false - } - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum GenVar { - Default, - TimeWeight, - ConstRegular, -} - -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Serialize, Deserialize)] -pub enum ShapeOld { - Scalar, - Wave(u32), - Image(u32, u32), -} - -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] -pub enum Shape { - Scalar, - Wave(u32), - Image(u32, u32), -} - -impl fmt::Display for Shape { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(self, fmt) - } -} - -mod serde_shape { - use super::*; - - impl Serialize for Shape { - fn serialize(&self, ser: S) -> Result - where - S::Error: serde::ser::Error, - { - use Shape::*; - match self { - Scalar => ser.collect_seq(std::iter::empty::()), - Wave(a) => ser.collect_seq([*a].iter()), - Image(a, b) => ser.collect_seq([*a, *b].iter()), - } - } - } - - struct ShapeVis; - - impl<'de> serde::de::Visitor<'de> for ShapeVis { - type Value = Shape; - - fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("a vector describing the shape") - } - - // TODO unused, do not support deser from any for Shape - fn visit_str(self, v: &str) -> Result - where - E: serde::de::Error, - { - if v == "Scalar" { - Ok(Shape::Scalar) - } else { - Err(E::custom(format!("unexpected value: {v:?}"))) - } - } - - // TODO unused, do not support deser from any for Shape - fn visit_map(self, mut map: A) -> Result - where - A: serde::de::MapAccess<'de>, - { - use serde::de::Error; - if let Some(key) = map.next_key::()? { - if key == "Wave" { - let n: u32 = map.next_value()?; - Ok(Shape::Wave(n)) - } else if key == "Image" { - let a = map.next_value::<[u32; 2]>()?; - Ok(Shape::Image(a[0], a[1])) - } else { - Err(A::Error::custom(format!("unexpected key {key:?}"))) - } - } else { - Err(A::Error::custom(format!("invalid shape format"))) - } - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'de>, - { - let mut a = vec![]; - while let Some(item) = seq.next_element()? { - let n: u32 = item; - a.push(n); - } - if a.len() == 0 { - Ok(Shape::Scalar) - } else if a.len() == 1 { - Ok(Shape::Wave(a[0])) - } else if a.len() == 2 { - Ok(Shape::Image(a[0], a[1])) - } else { - use serde::de::Error; - Err(A::Error::custom(format!("bad shape"))) - } - } - } - - impl<'de> Deserialize<'de> for Shape { - fn deserialize(de: D) -> Result - where - D: serde::Deserializer<'de>, - { - let res = de.deserialize_seq(ShapeVis); - res - } - } -} - -impl Shape { - pub fn from_sf_databuffer_raw(v: &Option>) -> Result { - let ret = match v { - Some(a) => match a.len() { - 0 => Shape::Scalar, - 1 => Shape::Wave(a[0]), - 2 => Shape::Image(a[0], a[1]), - _ => return Err(Error::with_msg_no_trace("can not understand sf databuffer shape spec")), - }, - None => Shape::Scalar, - }; - Ok(ret) - } - - pub fn from_bsread_jsval(v: &JsVal) -> Result { - match v { - JsVal::Array(v) => match v.len() { - 0 => Ok(Shape::Scalar), - 1 => match &v[0] { - JsVal::Number(v) => match v.as_u64() { - Some(0) | Some(1) => Ok(Shape::Scalar), - Some(v) => Ok(Shape::Wave(v as u32)), - None => Err(Error::with_msg_no_trace(format!( - "Shape from_bsread_jsval can not understand {:?}", - v - ))), - }, - _ => Err(Error::with_msg_no_trace(format!( - "Shape from_bsread_jsval can not understand {:?}", - v - ))), - }, - _ => Err(Error::with_msg_no_trace(format!( - "Shape from_bsread_jsval can not understand {:?}", - v - ))), - }, - _ => Err(Error::with_msg_no_trace(format!( - "Shape from_bsread_jsval can not understand {:?}", - v - ))), - } - } - - // TODO use simply a list to represent all shapes: empty, or with 1 or 2 entries. - pub fn from_db_jsval(v: &JsVal) -> Result { - match v { - JsVal::String(s) => { - if s == "Scalar" { - Ok(Shape::Scalar) - } else { - Err(Error::with_msg_no_trace(format!( - "Shape from_db_jsval can not understand {:?}", - v - ))) - } - } - JsVal::Object(j) => match j.get("Wave") { - Some(JsVal::Number(j)) => Ok(Shape::Wave(j.as_u64().ok_or_else(|| { - Error::with_msg_no_trace(format!("Shape from_db_jsval can not understand {:?}", v)) - })? as u32)), - _ => Err(Error::with_msg_no_trace(format!( - "Shape from_db_jsval can not understand {:?}", - v - ))), - }, - _ => Err(Error::with_msg_no_trace(format!( - "Shape from_db_jsval can not understand {:?}", - v - ))), - } - } - - pub fn from_dims_str(s: &str) -> Result { - let a: Vec = serde_json::from_str(s)?; - if a.len() == 0 { - Ok(Shape::Scalar) - } else if a.len() == 1 { - Ok(Shape::Wave(a[0])) - } else if a.len() == 2 { - Ok(Shape::Image(a[0], a[1])) - } else { - Err(Error::with_public_msg_no_trace("only scalar, 1d and 2d supported")) - } - } - - pub fn from_scylla_shape_dims(v: &[i32]) -> Result { - let res = if v.len() == 0 { - Shape::Scalar - } else if v.len() == 1 { - Shape::Wave(v[0] as u32) - } else if v.len() == 2 { - Shape::Image(v[0] as u32, v[1] as u32) - } else { - return Err(Error::with_public_msg_no_trace(format!("bad shape_dims {v:?}"))); - }; - Ok(res) - } - - pub fn from_ca_count(k: u32) -> Result { - if k == 0 { - Err(Error::with_public_msg_no_trace(format!( - "zero sized ca data count {k:?}" - ))) - } else if k == 1 { - Ok(Shape::Scalar) - } else if k <= 1024 * 3000 { - Ok(Shape::Wave(k)) - } else { - Err(Error::with_public_msg_no_trace(format!( - "too large ca data count {k:?}" - ))) - } - } - - pub fn to_ca_count(&self) -> Result { - use Shape::*; - let res = match self { - Scalar => 1, - Wave(n) => *n as u32, - _ => { - return Err(Error::with_msg_no_trace(format!( - "can not represent {self:?} as CA count" - ))) - } - }; - Ok(res) - } - - pub fn to_scylla_vec(&self) -> Vec { - use Shape::*; - match self { - Scalar => Vec::new(), - Wave(n) => vec![*n as i32], - Image(n, m) => vec![*n as i32, *m as i32], - } - } - - pub fn to_u32_vec(&self) -> Vec { - use Shape::*; - match self { - Scalar => Vec::new(), - Wave(n) => vec![*n as u32], - Image(n, m) => vec![*n as u32, *m as u32], - } - } - - pub fn to_json_value(&self) -> JsVal { - use serde_json::Number; - match self { - Shape::Scalar => JsVal::Array(Vec::new()), - Shape::Wave(n) => JsVal::Array(vec![JsVal::Number(Number::from(*n))]), - Shape::Image(n, m) => JsVal::Array(vec![JsVal::Number(Number::from(*n)), JsVal::Number(Number::from(*m))]), - } - } - - pub fn from_url_str(s: &str) -> Result { - let ret = serde_json::from_str(s)?; - Ok(ret) - } - - pub fn ele_count(&self) -> u64 { - match self { - Shape::Scalar => 1, - Shape::Wave(n) => *n as u64, - Shape::Image(n, m) => *n as u64 * *m as u64, - } - } -} - -impl AppendToUrl for Shape { - fn append_to_url(&self, url: &mut Url) { - let mut g = url.query_pairs_mut(); - g.append_pair("shape", &format!("{:?}", self.to_scylla_vec())); - } -} - -#[test] -fn test_shape_serde() { - let s = serde_json::to_string(&Shape::Image(42, 43)).unwrap(); - assert_eq!(s, r#"[42,43]"#); - let s = serde_json::to_string(&ShapeOld::Scalar).unwrap(); - assert_eq!(s, r#""Scalar""#); - let s = serde_json::to_string(&ShapeOld::Wave(8)).unwrap(); - assert_eq!(s, r#"{"Wave":8}"#); - let s = serde_json::to_string(&ShapeOld::Image(42, 43)).unwrap(); - assert_eq!(s, r#"{"Image":[42,43]}"#); - let s: ShapeOld = serde_json::from_str(r#""Scalar""#).unwrap(); - assert_eq!(s, ShapeOld::Scalar); - let s: ShapeOld = serde_json::from_str(r#"{"Wave": 123}"#).unwrap(); - assert_eq!(s, ShapeOld::Wave(123)); - let s: ShapeOld = serde_json::from_str(r#"{"Image":[77, 78]}"#).unwrap(); - assert_eq!(s, ShapeOld::Image(77, 78)); - let s: Shape = serde_json::from_str(r#"[]"#).unwrap(); - assert_eq!(s, Shape::Scalar); - let s: Shape = serde_json::from_str(r#"[12]"#).unwrap(); - assert_eq!(s, Shape::Wave(12)); - let s: Shape = serde_json::from_str(r#"[12, 13]"#).unwrap(); - assert_eq!(s, Shape::Image(12, 13)); -} - -pub mod timeunits { - pub const MU: u64 = 1000; - pub const MS: u64 = MU * 1000; - pub const SEC: u64 = MS * 1000; - pub const MIN: u64 = SEC * 60; - pub const HOUR: u64 = MIN * 60; - pub const DAY: u64 = HOUR * 24; -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub enum Dim0Kind { - Time, - Pulse, -} - -pub trait Dim0Index: Clone + fmt::Debug + PartialOrd { - fn add(&self, v: &Self) -> Self; - fn sub(&self, v: &Self) -> Self; - fn sub_n(&self, v: u64) -> Self; - fn times(&self, x: u64) -> Self; - fn div_n(&self, n: u64) -> Self; - fn div_v(&self, v: &Self) -> u64; - fn as_u64(&self) -> u64; - fn series_range(a: Self, b: Self) -> SeriesRange; - fn prebin_bin_len_opts() -> Vec; - fn prebin_patch_len_for(i: usize) -> Self; - fn to_pre_binned_patch_range_enum( - &self, - bin_count: u64, - patch_offset: u64, - patch_count: u64, - ) -> PreBinnedPatchRangeEnum; - fn binned_bin_len_opts() -> Vec; - fn to_binned_range_enum(&self, bin_off: u64, bin_cnt: u64) -> BinnedRangeEnum; -} - -pub trait Dim0Range: Clone + fmt::Debug + PartialOrd {} - -pub struct Dim0RangeValue -where - T: Dim0Index, -{ - pub ix: [T; 2], -} - -#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Eq, Ord)] -pub struct DtNano(u64); - -impl DtNano { - pub const fn from_ns(ns: u64) -> Self { - Self(ns) - } - - pub const fn from_ms(ms: u64) -> Self { - Self(1000000 * ms) - } - - pub const fn ns(&self) -> u64 { - self.0 - } - - pub const fn ms_u64(&self) -> u64 { - self.0 / 1000000 - } - - pub fn to_i64(&self) -> i64 { - self.0 as i64 - } - - pub fn add(self, rhs: Self) -> Self { - Self(self.0 + rhs.0) - } - - pub fn fraction_of(self, rhs: Self) -> f32 { - self.0 as f32 / rhs.0 as f32 - } -} - -impl fmt::Display for DtNano { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let sec = self.0 / SEC; - let ms = (self.0 - SEC * sec) / MS; - let ns = self.0 - SEC * sec - MS * ms; - // fmt.debug_tuple("DtNano").field(&sec).field(&ms).field(&ns).finish() - write!(fmt, "DtNano {{ sec {} ms {} ns {} }}", sec, ms, ns) - } -} - -mod dt_nano_serde { - use super::DtNano; - use de::Visitor; - use serde::de; - use serde::Deserialize; - use serde::Serialize; - use std::fmt; - - impl Serialize for DtNano { - fn serialize(&self, ser: S) -> Result - where - S: serde::Serializer, - { - ser.serialize_u64(self.ns()) - } - } - - struct Vis1; - - impl<'de> Visitor<'de> for Vis1 { - type Value = DtNano; - - fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "an integer of nanoseconds") - } - - fn visit_u64(self, v: u64) -> Result - where - E: de::Error, - { - Ok(DtNano::from_ns(v)) - } - } - - impl<'de> Deserialize<'de> for DtNano { - fn deserialize(de: D) -> Result - where - D: serde::Deserializer<'de>, - { - de.deserialize_u64(Vis1) - } - } -} - -#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Eq, Ord)] -pub struct DtMs(u64); - -impl DtMs { - pub const fn from_nano_u64(x: u64) -> Self { - Self(x / 1000000) - } - - pub const fn from_ms_u64(x: u64) -> Self { - Self(x) - } - - pub const fn ms(&self) -> u64 { - self.0 - } - - pub const fn ns(&self) -> u64 { - 1000000 * self.0 - } - - pub const fn dt_ns(&self) -> DtNano { - DtNano::from_ms(self.0) - } - - pub const fn to_i64(&self) -> i64 { - self.0 as i64 - } -} - -impl fmt::Display for DtMs { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let dur = Duration::from_millis(self.ms()); - write!(fmt, "{}", humantime::format_duration(dur)) - } -} - -#[derive(Copy, Clone, PartialEq, PartialOrd, Eq, Ord)] -pub struct TsNano(u64); - -mod ts_nano_ser { - use super::TsNano; - use crate::timeunits::SEC; - use chrono::TimeZone; - use chrono::Utc; - use de::Visitor; - use serde::de; - use serde::Deserialize; - use serde::Serialize; - use std::fmt; - - impl Serialize for TsNano { - fn serialize(&self, ser: S) -> Result - where - S: serde::Serializer, - { - if false { - let ts = Utc.timestamp_opt((self.0 / SEC) as i64, (self.0 % SEC) as u32); - let value = format!("{}", ts.earliest().unwrap()); - ser.serialize_newtype_struct("TsNano", &value) - } else { - ser.serialize_u64(self.ns()) - } - } - } - - struct Vis1; - - impl<'de> Visitor<'de> for Vis1 { - type Value = TsNano; - - fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "integer nanoseconds since unix epoch") - } - - fn visit_u64(self, v: u64) -> Result - where - E: de::Error, - { - Ok(TsNano::from_ns(v)) - } - } - - impl<'de> Deserialize<'de> for TsNano { - fn deserialize(de: D) -> Result - where - D: serde::Deserializer<'de>, - { - de.deserialize_u64(Vis1) - } - } -} - -impl TsNano { - pub const fn from_ns(ns: u64) -> Self { - Self(ns) - } - - pub const fn from_ms(ms: u64) -> Self { - Self(1000000 * ms) - } - - pub const fn ns(&self) -> u64 { - self.0 - } - - pub const fn ms(&self) -> u64 { - self.0 / 1000000 - } - - pub const fn add_dt_nano(self, v: DtNano) -> Self { - Self(self.0 + v.0) - } - - pub const fn sub(self, v: DtNano) -> Self { - Self(self.0 - v.0) - } - - pub const fn delta(self, v: Self) -> DtNano { - DtNano(self.0 - v.0) - } - - pub const fn add_ns(self, v: u64) -> Self { - Self(self.0 + v) - } - - pub const fn mul(self, v: u64) -> Self { - Self(self.0 * v) - } - - pub const fn div(self, v: u64) -> Self { - Self(self.0 / v) - } - - pub const fn to_ts_ms(self) -> TsMs { - TsMs::from_ms_u64(self.ms()) - } - - pub const fn to_dt_nano(self) -> DtNano { - DtNano::from_ns(self.0) - } - - pub const fn to_dt_ms(self) -> DtMs { - DtMs::from_ms_u64(self.ms()) - } - - pub fn from_system_time(st: SystemTime) -> Self { - let tsunix = st.duration_since(UNIX_EPOCH).unwrap_or(Duration::ZERO); - let x = tsunix.as_secs() * 1_000_000_000 + tsunix.subsec_nanos() as u64; - Self::from_ns(x) - } - - pub fn fmt(&self) -> TsNanoFmt { - TsNanoFmt { ts: self.clone() } - } -} - -impl fmt::Debug for TsNano { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let ts = Utc - .timestamp_opt((self.0 / SEC) as i64, (self.0 % SEC) as u32) - .earliest() - .unwrap_or(Default::default()); - write!(fmt, "TsNano {{ {} }}", ts.format(DATETIME_FMT_3MS)) - } -} - -impl fmt::Display for TsNano { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let ts = Utc - .timestamp_opt((self.0 / SEC) as i64, (self.0 % SEC) as u32) - .earliest() - .unwrap_or(Default::default()); - ts.format(DATETIME_FMT_3MS).fmt(fmt) - } -} - -pub struct TsNanoFmt { - ts: TsNano, -} - -impl fmt::Display for TsNanoFmt { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - chrono::DateTime::from_timestamp_millis(self.ts.ms() as i64) - .unwrap() - .format(DATETIME_FMT_3MS) - .fmt(fmt) - } -} - -impl fmt::Debug for TsNanoFmt { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, PartialOrd)] -pub struct PulseId(u64); - -impl PulseId { - pub fn from_id(id: u64) -> Self { - Self(id) - } -} - -impl Dim0Index for TsNano { - fn add(&self, v: &Self) -> Self { - Self(self.0 + v.0) - } - - fn sub(&self, v: &Self) -> Self { - Self(self.0 - v.0) - } - - fn sub_n(&self, v: u64) -> Self { - Self(self.0 - v) - } - - fn times(&self, x: u64) -> Self { - Self(self.0 * x) - } - - fn div_n(&self, n: u64) -> Self { - Self(self.0 / n) - } - - fn div_v(&self, v: &Self) -> u64 { - self.0 / v.0 - } - - fn as_u64(&self) -> u64 { - self.0 - } - - fn series_range(a: Self, b: Self) -> SeriesRange { - SeriesRange::TimeRange(NanoRange { beg: a.0, end: b.0 }) - } - - fn prebin_bin_len_opts() -> Vec { - PREBIN_TIME_BIN_LEN_VAR0.iter().map(|&x| Self(x)).collect() - } - - fn prebin_patch_len_for(i: usize) -> Self { - let _ = i; - todo!() - } - - fn to_pre_binned_patch_range_enum( - &self, - bin_count: u64, - patch_offset: u64, - patch_count: u64, - ) -> PreBinnedPatchRangeEnum { - PreBinnedPatchRangeEnum::Time(PreBinnedPatchRange { - first: PreBinnedPatchCoord { - bin_len: self.clone(), - bin_count, - patch_offset, - }, - patch_count, - }) - } - - fn binned_bin_len_opts() -> Vec { - TIME_BIN_THRESHOLDS.iter().map(|&x| Self(x)).collect() - } - - fn to_binned_range_enum(&self, bin_off: u64, bin_cnt: u64) -> BinnedRangeEnum { - BinnedRangeEnum::Time(BinnedRange { - bin_len: self.clone(), - bin_off, - bin_cnt, - }) - } -} - -impl Dim0Index for PulseId { - fn add(&self, v: &Self) -> Self { - Self(self.0 + v.0) - } - - fn sub(&self, v: &Self) -> Self { - Self(self.0 - v.0) - } - - fn sub_n(&self, v: u64) -> Self { - Self(self.0 - v) - } - - fn times(&self, x: u64) -> Self { - Self(self.0 * x) - } - - fn div_n(&self, n: u64) -> Self { - Self(self.0 / n) - } - - fn div_v(&self, v: &Self) -> u64 { - self.0 / v.0 - } - - fn as_u64(&self) -> u64 { - self.0 - } - - fn series_range(a: Self, b: Self) -> SeriesRange { - SeriesRange::PulseRange(PulseRange { beg: a.0, end: b.0 }) - } - - fn prebin_bin_len_opts() -> Vec { - PREBIN_PULSE_BIN_LEN_VAR0.iter().map(|&x| Self(x)).collect() - } - - fn prebin_patch_len_for(i: usize) -> Self { - let _ = i; - todo!() - } - - fn to_pre_binned_patch_range_enum( - &self, - bin_count: u64, - patch_offset: u64, - patch_count: u64, - ) -> PreBinnedPatchRangeEnum { - PreBinnedPatchRangeEnum::Pulse(PreBinnedPatchRange { - first: PreBinnedPatchCoord { - bin_len: self.clone(), - bin_count, - patch_offset, - }, - patch_count, - }) - } - - fn binned_bin_len_opts() -> Vec { - PULSE_BIN_THRESHOLDS.iter().map(|&x| Self(x)).collect() - } - - fn to_binned_range_enum(&self, bin_off: u64, bin_cnt: u64) -> BinnedRangeEnum { - BinnedRangeEnum::Pulse(BinnedRange { - bin_len: self.clone(), - bin_off, - bin_cnt, - }) - } -} - -const PREBIN_TIME_BIN_LEN_VAR0: [u64; 3] = [MIN * 1, HOUR * 1, DAY]; - -const PREBIN_PULSE_BIN_LEN_VAR0: [u64; 4] = [100, 10000, 1000000, 100000000]; - -#[allow(unused)] -const PATCH_T_LEN_OPTIONS_SCALAR: [u64; 3] = [ - // - //MIN * 60, - HOUR * 6, - DAY * 16, - DAY * 64, -]; - -#[allow(unused)] -const PATCH_T_LEN_OPTIONS_WAVE: [u64; 3] = [ - // - //MIN * 10, - HOUR * 6, - DAY * 8, - DAY * 32, -]; - -const TIME_BIN_THRESHOLDS: [u64; 26] = [ - MS * 20, - MS * 50, - MS * 100, - MS * 200, - MS * 500, - SEC, - SEC * 2, - SEC * 5, - SEC * 10, - SEC * 20, - MIN, - MIN * 2, - MIN * 5, - MIN * 10, - MIN * 20, - HOUR, - HOUR * 2, - HOUR * 4, - HOUR * 12, - DAY, - DAY * 2, - DAY * 4, - DAY * 8, - DAY * 16, - DAY * 32, - DAY * 64, -]; - -const TIME_BIN_LEN_CACHE_OPTS: [DtMs; 0] = [ - // - // DtMs(1000 * 10), - // DtMs(1000 * 60 * 60), -]; - -pub fn time_bin_len_cache_opts() -> &'static [DtMs] { - &TIME_BIN_LEN_CACHE_OPTS -} - -const PULSE_BIN_THRESHOLDS: [u64; 25] = [ - 10, 20, 40, 80, 100, 200, 400, 800, 1000, 2000, 4000, 8000, 10000, 20000, 40000, 80000, 100000, 200000, 400000, - 800000, 1000000, 2000000, 4000000, 8000000, 10000000, -]; - -#[allow(unused)] -const fn time_bin_threshold_at(i: usize) -> TsNano { - TsNano(TIME_BIN_THRESHOLDS[i]) -} - -#[allow(unused)] -const fn pulse_bin_threshold_at(i: usize) -> PulseId { - PulseId(PULSE_BIN_THRESHOLDS[i]) -} - -/// Identifies one patch on the binning grid at a certain resolution. -/// A patch consists of `bin_count` consecutive bins. -/// In total, a given `PreBinnedPatchCoord` spans a time range from `patch_beg` to `patch_end`. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct PreBinnedPatchCoord -where - T: Dim0Index, -{ - bin_len: T, - bin_count: u64, - patch_offset: u64, -} - -impl PreBinnedPatchCoord -where - T: Dim0Index, -{ - pub fn new(bin_len: T, bin_count: u64, patch_offset: u64) -> Self { - Self { - bin_len, - bin_count, - patch_offset, - } - } - pub fn bin_len(&self) -> T { - self.bin_len.clone() - } - - pub fn patch_len(&self) -> T { - self.bin_len().times(self.bin_count) - } - - pub fn patch_beg(&self) -> T { - self.bin_len().times(self.bin_count).times(self.patch_offset) - } - - pub fn patch_end(&self) -> T { - self.bin_len().times(self.bin_count).times(1 + self.patch_offset) - } - - pub fn series_range(&self) -> SeriesRange { - T::series_range(self.patch_beg(), self.patch_end()) - } - - pub fn bin_count(&self) -> u64 { - self.bin_count - } - - pub fn patch_offset(&self) -> u64 { - self.patch_offset - } - - pub fn edges(&self) -> Vec { - let mut ret = Vec::new(); - let mut t = self.patch_beg(); - ret.push(t.clone()); - for _ in 0..self.bin_count() { - t = t.add(&self.bin_len); - ret.push(t.clone()); - } - ret - } - - pub fn next(&self) -> Self { - Self::new(self.bin_len.clone(), self.bin_count, 1 + self.patch_offset) - } -} - -impl AppendToUrl for PreBinnedPatchCoord -where - T: Dim0Index, -{ - fn append_to_url(&self, url: &mut Url) { - error!("TODO AppendToUrl for PreBinnedPatchCoord"); - err::todo(); - // TODO must also emit the type of the series index - let mut g = url.query_pairs_mut(); - g.append_pair("patchTlen", &format!("{}", 4242)); - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum PreBinnedPatchCoordEnum { - Time(PreBinnedPatchCoord), - Pulse(PreBinnedPatchCoord), -} - -impl PreBinnedPatchCoordEnum { - pub fn bin_count(&self) -> u64 { - todo!() - } - - pub fn patch_range(&self) -> SeriesRange { - match self { - PreBinnedPatchCoordEnum::Time(k) => k.series_range(), - PreBinnedPatchCoordEnum::Pulse(k) => k.series_range(), - } - } - - pub fn span_desc(&self) -> String { - match self { - PreBinnedPatchCoordEnum::Time(k) => { - format!("pre-W-{}-B-{}", k.bin_len.0 * k.bin_count / SEC, k.patch_offset / SEC) - } - PreBinnedPatchCoordEnum::Pulse(k) => { - format!("pre-W-{}-B-{}", k.bin_len.0 * k.bin_count / SEC, k.patch_offset / SEC) - } - } - } -} - -impl FromUrl for PreBinnedPatchCoordEnum { - type Error = NetpodError; - - fn from_url(_url: &Url) -> Result { - todo!() - } - - fn from_pairs(_pairs: &BTreeMap) -> Result { - todo!() - } -} - -impl AppendToUrl for PreBinnedPatchCoordEnum { - fn append_to_url(&self, _url: &mut Url) { - todo!() - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct PreBinnedPatchRange -where - T: Dim0Index, -{ - first: PreBinnedPatchCoord, - patch_count: u64, -} - -impl PreBinnedPatchRange -where - T: Dim0Index, -{ - pub fn edges(&self) -> Vec { - err::todo(); - let ret = Vec::new(); - ret - } - - pub fn series_range(&self) -> SeriesRange { - T::series_range(err::todoval(), err::todoval()) - } - - pub fn patch_count(&self) -> u64 { - self.patch_count - } - - pub fn bin_count(&self) -> u64 { - err::todoval() - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum PreBinnedPatchRangeEnum { - Time(PreBinnedPatchRange), - Pulse(PreBinnedPatchRange), -} - -impl PreBinnedPatchRangeEnum { - fn covering_range_ty(a: T, b: T, min_bin_count: u32) -> Result - where - T: Dim0Index + 'static, - { - let opts = T::prebin_bin_len_opts(); - if min_bin_count < 1 { - Err(Error::with_msg("min_bin_count < 1"))?; - } - if min_bin_count > 20000 { - Err(Error::with_msg(format!("min_bin_count > 20000: {}", min_bin_count)))?; - } - let du = b.sub(&a); - let max_bin_len = du.div_n(min_bin_count as u64); - for (i1, bl) in opts.iter().enumerate().rev() { - if bl <= &max_bin_len { - let patch_len = ::prebin_patch_len_for(i1); - let bin_count = patch_len.div_v(bl); - let patch_off_1 = a.div_v(&patch_len); - let patch_off_2 = (b.add(&patch_len).sub_n(1)).div_v(&patch_len); - let patch_count = patch_off_2 - patch_off_1; - let ret = T::to_pre_binned_patch_range_enum(&bl, bin_count, patch_off_1, patch_count); - return Ok(ret); - } - } - Err(Error::with_msg_no_trace("can not find matching pre-binned grid")) - } - - /// Cover at least the given range with at least as many as the requested number of bins. - pub fn covering_range(range: SeriesRange, min_bin_count: u32) -> Result { - match range { - SeriesRange::TimeRange(k) => Self::covering_range_ty(TsNano(k.beg), TsNano(k.end), min_bin_count), - SeriesRange::PulseRange(k) => Self::covering_range_ty(PulseId(k.beg), PulseId(k.end), min_bin_count), - } - } -} - -#[derive(Clone, Serialize, Deserialize)] -pub struct BinnedRange -where - T: Dim0Index, -{ - // TODO remove pub, which is currently used in tests - pub bin_len: T, - pub bin_off: u64, - pub bin_cnt: u64, -} - -impl fmt::Debug for BinnedRange { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let beg = self.bin_len.times(self.bin_off); - let end = self.bin_len.times(self.bin_off + self.bin_cnt); - write!(fmt, "BinnedRange {{ {}, {}, {} }}", beg, end, self.bin_len.to_dt_ms()) - } -} - -impl fmt::Debug for BinnedRange { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "BinnedRange {{ .. }}") - } -} - -impl fmt::Display for BinnedRange { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(self, fmt) - } -} - -impl BinnedRange { - pub fn to_nano_range(&self) -> NanoRange { - let beg = self.bin_len.times(self.bin_off).as_u64(); - let end = self.bin_len.times(self.bin_off + self.bin_cnt).as_u64(); - NanoRange { beg, end } - } - - pub fn from_nano_range(range: NanoRange, bin_len: DtMs) -> Self { - let off1 = range.beg() / bin_len.ns(); - let off2 = (bin_len.ns() - 1 + range.end()) / bin_len.ns(); - Self { - bin_len: TsNano::from_ns(bin_len.ns()), - bin_off: off1, - bin_cnt: off2 - off1, - } - } - - pub fn covering_range_time(range: NanoRange, bin_len_req: DtMs) -> Result { - let opts = ::binned_bin_len_opts(); - let bin_len_req = if bin_len_req.ms() < opts[0].ms() { - DtMs::from_ms_u64(opts[0].ms()) - } else { - bin_len_req - }; - let bin_len_req = if bin_len_req.ms() > opts.last().unwrap().ms() { - DtMs::from_ms_u64(opts.last().unwrap().ms()) - } else { - bin_len_req - }; - let pv = TsNano::from_ns(bin_len_req.ns()); - let pi = opts.partition_point(|&x| x < pv); - let bin_len = if pi == 0 { - DtMs::from_ms_u64(opts[0].ms()) - } else { - let v1 = DtMs::from_ms_u64(opts[pi - 1].ms()); - if let Some(&v2) = opts.get(pi) { - let v2 = DtMs::from_ms_u64(v2.ms()); - if v1 >= bin_len_req || v2 < bin_len_req { - panic!("logic covering_range_time"); - } else { - let f1 = (bin_len_req.ms() - v1.ms()) / bin_len_req.ms(); - let f2 = (v2.ms() - bin_len_req.ms()) / bin_len_req.ms(); - if f1 < f2 { - v1 - } else { - v2 - } - } - } else { - DtMs::from_ms_u64(v1.ms()) - } - }; - let bin_off = range.beg() / bin_len.ns(); - let off2 = (range.end() + bin_len.ns() - 1) / bin_len.ns(); - let bin_cnt = off2 - bin_off; - let ret = Self { - bin_len: TsNano::from_ns(bin_len.ns()), - bin_off, - bin_cnt, - }; - Ok(ret) - } - - pub fn nano_beg(&self) -> TsNano { - self.bin_len.times(self.bin_off) - } - - pub fn nano_end(&self) -> TsNano { - self.bin_len.times(self.bin_off + self.bin_cnt) - } - - pub fn one_before_bin(&self) -> Self { - Self { - bin_len: self.bin_len, - bin_off: self.bin_off - 1, - bin_cnt: self.bin_cnt + 1, - } - } - - pub fn bin_len_dt_ms(&self) -> DtMs { - self.bin_len.to_dt_ms() - } -} - -impl BinnedRange -where - T: Dim0Index, -{ - pub fn bin_count(&self) -> u64 { - self.bin_cnt - } - - pub fn get_range(&self, ix: u32) -> NanoRange { - let _ = ix; - /*NanoRange { - beg: (self.offset + ix as u64) * self.grid_spec.bin_t_len, - end: (self.offset + ix as u64 + 1) * self.grid_spec.bin_t_len, - }*/ - err::todoval() - } - - pub fn full_range(&self) -> NanoRange { - /*NanoRange { - beg: self.offset * self.grid_spec.bin_t_len, - end: (self.offset + self.bin_count) * self.grid_spec.bin_t_len, - }*/ - let beg = self.bin_len.times(self.bin_off).as_u64(); - let end = self.bin_len.times(self.bin_off + self.bin_cnt).as_u64(); - panic!("TODO make generic for pulse"); - NanoRange { beg, end } - } - - pub fn edges_u64(&self) -> Vec { - let mut ret = Vec::new(); - let mut t = self.bin_len.times(self.bin_off); - let end = self.bin_len.times(self.bin_off + self.bin_cnt); - while t <= end { - ret.push(t.as_u64()); - t = t.add(&self.bin_len); - } - ret - } - - pub fn edges(&self) -> Vec { - let mut ret = Vec::new(); - let mut t = self.bin_len.times(self.bin_off); - let end = self.bin_len.times(self.bin_off + self.bin_cnt); - while t <= end { - ret.push(t.clone()); - t = t.add(&self.bin_len); - } - ret - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum BinnedRangeEnum { - Time(BinnedRange), - Pulse(BinnedRange), -} - -impl BinnedRangeEnum { - fn covering_range_ty(a: T, b: T, min_bin_count: u32) -> Result - where - T: Dim0Index + 'static, - { - let opts = T::binned_bin_len_opts(); - if min_bin_count < 1 { - Err(NetpodError::BinCountTooSmall)?; - } - let bin_count_max = i32::MAX as u32; - if min_bin_count > bin_count_max { - Err(NetpodError::BinCountTooLarge)?; - } - let du = b.sub(&a); - let max_bin_len = du.div_n(min_bin_count as u64); - for (_, bl) in opts.iter().enumerate().rev() { - if bl <= &max_bin_len { - let off_1 = a.div_v(&bl); - let off_2 = (b.add(&bl).sub_n(1)).div_v(&bl); - eprintln!("off_1 {off_1:?} off_2 {off_2:?}"); - let bin_cnt = off_2 - off_1; - let ret = T::to_binned_range_enum(bl, off_1, bin_cnt); - return Ok(ret); - } - } - Err(NetpodError::BinnedNoGridMatch) - } - - /// Cover at least the given range while selecting the bin width which best fits the requested bin width. - pub fn covering_range_time(range: SeriesRange, bin_len_req: DtMs) -> Result { - match range { - SeriesRange::TimeRange(k) => Ok(Self::Time(BinnedRange::covering_range_time(k, bin_len_req)?)), - SeriesRange::PulseRange(_) => Err(NetpodError::TimelikeBinWidthImpossibleForPulseRange), - } - } - - /// Cover at least the given range with at least as many as the requested number of bins. - pub fn covering_range(range: SeriesRange, min_bin_count: u32) -> Result { - match range { - SeriesRange::TimeRange(k) => Self::covering_range_ty(TsNano(k.beg), TsNano(k.end), min_bin_count), - SeriesRange::PulseRange(k) => Self::covering_range_ty(PulseId(k.beg), PulseId(k.end), min_bin_count), - } - } - - pub fn bin_count(&self) -> u64 { - match self { - BinnedRangeEnum::Time(k) => k.bin_count(), - BinnedRangeEnum::Pulse(k) => k.bin_count(), - } - } - - pub fn range_at(&self, i: usize) -> Option { - match self { - BinnedRangeEnum::Time(k) => { - if (i as u64) < k.bin_cnt { - let beg = k.bin_len.0 * (k.bin_off + i as u64); - let x = SeriesRange::TimeRange(NanoRange { - beg, - end: beg + k.bin_len.0, - }); - Some(x) - } else { - None - } - } - BinnedRangeEnum::Pulse(k) => { - if (i as u64) < k.bin_cnt { - let beg = k.bin_len.0 * (k.bin_off + i as u64); - let x = SeriesRange::PulseRange(PulseRange { - beg, - end: beg + k.bin_len.0, - }); - Some(x) - } else { - None - } - } - } - } - - pub fn dim0kind(&self) -> Dim0Kind { - match self { - BinnedRangeEnum::Time(_) => Dim0Kind::Time, - BinnedRangeEnum::Pulse(_) => Dim0Kind::Pulse, - } - } - - pub fn binned_range_time(&self) -> BinnedRange { - match self { - BinnedRangeEnum::Time(x) => x.clone(), - BinnedRangeEnum::Pulse(_) => panic!(), - } - } - - // Only a helper for unit tests. - pub fn from_custom(len: TsNano, off: u64, cnt: u64) -> BinnedRangeEnum { - let rng = BinnedRange { - bin_len: len, - bin_off: off, - bin_cnt: cnt, - }; - BinnedRangeEnum::Time(rng) - } -} - -#[cfg(test)] -mod test_binned_range { - use super::*; - - #[test] - fn binned_range_00() { - let range = NanoRange { - beg: HOUR * 72, - end: HOUR * 73, - }; - let range = BinnedRangeEnum::covering_range(range.into(), 10).unwrap(); - assert_eq!(range.bin_count(), 12); - match range { - BinnedRangeEnum::Time(range) => { - assert_eq!(range.edges_u64()[0], HOUR * 72); - assert_eq!(range.edges_u64()[2], HOUR * 72 + MIN * 5 * 2); - } - BinnedRangeEnum::Pulse(_) => panic!(), - } - } - - #[test] - fn binned_range_01() { - let range = NanoRange { - beg: MIN * 20 + SEC * 10, - end: HOUR * 10 + MIN * 20 + SEC * 30, - }; - let range = BinnedRangeEnum::covering_range(range.into(), 10).unwrap(); - assert_eq!(range.bin_count(), 11); - match range { - BinnedRangeEnum::Time(range) => { - assert_eq!(range.edges_u64()[0], HOUR * 0); - assert_eq!(range.edges_u64()[1], HOUR * 1); - assert_eq!(range.edges_u64()[11], HOUR * 11); - } - BinnedRangeEnum::Pulse(_) => panic!(), - } - } -} - -#[derive(Clone, Serialize, Deserialize)] -pub enum AggKind { - EventBlobs, - DimXBins1, - DimXBinsN(u32), - Plain, - TimeWeightedScalar, - PulseIdDiff, -} - -impl AggKind { - pub fn do_time_weighted(&self) -> bool { - match self { - Self::EventBlobs => false, - Self::TimeWeightedScalar => true, - Self::DimXBins1 => false, - Self::DimXBinsN(_) => false, - Self::Plain => false, - Self::PulseIdDiff => false, - } - } - - pub fn need_expand(&self) -> bool { - match self { - Self::EventBlobs => false, - Self::TimeWeightedScalar => true, - Self::DimXBins1 => false, - Self::DimXBinsN(_) => false, - Self::Plain => false, - Self::PulseIdDiff => false, - } - } -} - -pub fn x_bin_count(shape: &Shape, agg_kind: &AggKind) -> usize { - match agg_kind { - AggKind::EventBlobs => 0, - AggKind::TimeWeightedScalar => 0, - AggKind::DimXBins1 => 0, - AggKind::DimXBinsN(n) => { - if *n == 0 { - match shape { - Shape::Scalar => 0, - Shape::Wave(n) => *n as usize, - Shape::Image(j, k) => *j as usize * *k as usize, - } - } else { - *n as usize - } - } - AggKind::Plain => match shape { - Shape::Scalar => 0, - Shape::Wave(n) => *n as usize, - Shape::Image(j, k) => *j as usize * *k as usize, - }, - AggKind::PulseIdDiff => 0, - } -} - -impl fmt::Display for AggKind { - fn fmt(&self, fmt: &mut fmt::Formatter) -> std::fmt::Result { - match self { - Self::EventBlobs => { - write!(fmt, "EventBlobs") - } - Self::DimXBins1 => { - write!(fmt, "DimXBins1") - } - Self::DimXBinsN(n) => { - write!(fmt, "DimXBinsN{}", n) - } - Self::Plain => { - write!(fmt, "Plain") - } - Self::TimeWeightedScalar => { - write!(fmt, "TimeWeightedScalar") - } - Self::PulseIdDiff => { - write!(fmt, "PulseIdDiff") - } - } - } -} - -impl fmt::Debug for AggKind { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} - -impl FromStr for AggKind { - type Err = Error; - - fn from_str(s: &str) -> Result { - let nmark = "DimXBinsN"; - if s == "EventBlobs" { - Ok(AggKind::EventBlobs) - } else if s == "DimXBins1" { - Ok(AggKind::DimXBins1) - } else if s == "TimeWeightedScalar" { - Ok(AggKind::TimeWeightedScalar) - } else if s.starts_with(nmark) { - let nbins: u32 = s[nmark.len()..].parse()?; - Ok(AggKind::DimXBinsN(nbins)) - } else if s == "PulseIdDiff" { - Ok(AggKind::PulseIdDiff) - } else { - Err(Error::with_msg(format!("can not parse {} as AggKind", s))) - } - } -} - -pub trait ToNanos { - fn to_nanos(&self) -> u64; -} - -impl ToNanos for DateTime { - fn to_nanos(&self) -> u64 { - self.timestamp() as u64 * timeunits::SEC + self.timestamp_subsec_nanos() as u64 - } -} - -#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Eq, Ord)] -pub struct TsMs(pub u64); - -impl TsMs { - pub const fn from_ms_u64(x: u64) -> Self { - Self(x) - } - - pub const fn from_ns_u64(x: u64) -> Self { - Self(x / 1000000) - } - - pub fn from_system_time(st: SystemTime) -> Self { - let tsunix = st.duration_since(UNIX_EPOCH).unwrap_or(Duration::ZERO); - let x = tsunix.as_secs() * 1000 + tsunix.subsec_millis() as u64; - Self::from_ms_u64(x) - } - - pub const fn ms(self) -> u64 { - self.0 - } - - pub const fn ns(self) -> TsNano { - TsNano::from_ms(self.0) - } - - pub const fn ns_u64(self) -> u64 { - 1000000 * self.0 - } - - pub const fn sec(self) -> u64 { - self.0 / 1000 - } - - pub const fn to_u64(self) -> u64 { - self.0 - } - - pub const fn to_i64(self) -> i64 { - self.0 as i64 - } - - pub const fn to_grid_02(self, grid: DtMs) -> (Self, DtMs) { - let msp = TsMs(self.0 / grid.0 * grid.0); - let lsp = DtMs(self.0 - msp.0); - (msp, lsp) - } - - pub fn bump_epsilon(&self) -> TsMs { - Self(self.0 + 1) - } - - pub fn fmt(&self) -> TsMsFmt { - TsMsFmt { ts: self.clone() } - } -} - -impl AsRef for TsMs { - fn as_ref(&self) -> &TsMs { - &self - } -} - -impl fmt::Display for TsMs { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "TsMs {{ {} }}", self.0) - } -} - -impl core::ops::Sub for TsMs { - type Output = DtMs; - - fn sub(self, rhs: Self) -> Self::Output { - DtMs(self.0.saturating_sub(rhs.0)) - } -} - -pub struct TsMsFmt { - ts: TsMs, -} - -impl fmt::Debug for TsMsFmt { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - chrono::DateTime::from_timestamp_millis(self.ts.ms() as i64) - .unwrap() - .format(DATETIME_FMT_3MS) - .fmt(fmt) - } -} - -impl fmt::Display for TsMsFmt { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - chrono::DateTime::from_timestamp_millis(self.ts.ms() as i64) - .unwrap() - .format(DATETIME_FMT_3MS) - .fmt(fmt) - } -} - -pub struct TsMsVecFmt(pub I); - -impl fmt::Display for TsMsVecFmt -where - I: Clone + IntoIterator, - T: AsRef, -{ - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "[")?; - for ts in self.0.clone().into_iter() { - write!(fmt, " {}", ts.as_ref().fmt())?; - } - write!(fmt, " ]")?; - Ok(()) - } -} - -pub trait RetStreamExt: Stream { - fn only_first_error(self) -> OnlyFirstError - where - Self: Sized; -} - -pub struct OnlyFirstError { - inp: T, - errored: bool, - complete: bool, -} - -impl OnlyFirstError { - pub fn type_name() -> &'static str { - std::any::type_name::() - } -} - -impl Stream for OnlyFirstError -where - T: Stream> + Unpin, -{ - type Item = ::Item; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - use Poll::*; - if self.complete { - panic!("{} poll_next on complete", Self::type_name()) - } - if self.errored { - self.complete = true; - return Ready(None); - } - match self.inp.poll_next_unpin(cx) { - Ready(Some(Ok(k))) => Ready(Some(Ok(k))), - Ready(Some(Err(e))) => { - self.errored = true; - Ready(Some(Err(e))) - } - Ready(None) => { - self.complete = true; - Ready(None) - } - Pending => Pending, - } - } -} - -impl RetStreamExt for T -where - T: Stream, -{ - fn only_first_error(self) -> OnlyFirstError { - OnlyFirstError { - inp: self, - errored: false, - complete: false, - } - } -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct EventDataReadStats { - pub parsed_bytes: u64, -} - -impl EventDataReadStats { - pub fn new() -> Self { - Self { parsed_bytes: 0 } - } - pub fn trans(&mut self, k: &mut Self) { - self.parsed_bytes += k.parsed_bytes; - k.parsed_bytes = 0; - } -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct RangeFilterStats { - pub items_no_prune_high: u64, - pub items_all_prune_high: u64, - pub items_part_prune_high: u64, -} - -impl RangeFilterStats { - pub fn new() -> Self { - Self { - items_no_prune_high: 0, - items_all_prune_high: 0, - items_part_prune_high: 0, - } - } -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub enum DiskStats { - OpenStats(OpenStats), - SeekStats(SeekStats), - ReadStats(ReadStats), - ReadExactStats(ReadExactStats), -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct OpenStats { - pub duration: Duration, -} - -impl OpenStats { - pub fn new(duration: Duration) -> Self { - Self { duration } - } -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct SeekStats { - pub duration: Duration, -} - -impl SeekStats { - pub fn new(duration: Duration) -> Self { - Self { duration } - } -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct ReadStats { - pub duration: Duration, -} - -impl ReadStats { - pub fn new(duration: Duration) -> Self { - Self { duration } - } -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct ReadExactStats { - pub duration: Duration, -} - -impl ReadExactStats { - pub fn new(duration: Duration) -> Self { - Self { duration } - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct Api1WarningStats { - pub subreq_fail: usize, -} - -impl Api1WarningStats { - pub fn new() -> Self { - Self { subreq_fail: 0 } - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ByteSize(pub u32); - -impl ByteSize { - pub fn from_bytes(b: u32) -> Self { - Self(b) - } - - pub fn from_kb(kb: u32) -> Self { - Self(1024 * kb) - } - - pub fn from_mb(mb: u32) -> Self { - Self(1024 * 1024 * mb) - } - - pub fn bytes(&self) -> u32 { - self.0 - } -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct FileIoBufferSize(pub usize); - -impl FileIoBufferSize { - pub fn new(k: usize) -> Self { - Self(k) - } - pub fn bytes(&self) -> usize { - self.0 - } -} - -impl Default for FileIoBufferSize { - fn default() -> Self { - Self(1024 * 4) - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum ReadSys { - TokioAsyncRead, - Read2, - Read3, - Read4, - Read5, - BlockingTaskIntoChannel, -} - -impl ReadSys { - pub fn default() -> Self { - Self::BlockingTaskIntoChannel - } -} - -impl From<&str> for ReadSys { - fn from(k: &str) -> Self { - if k == "TokioAsyncRead" { - Self::TokioAsyncRead - } else if k == "Read2" { - Self::Read2 - } else if k == "Read3" { - Self::Read3 - } else if k == "Read4" { - Self::Read4 - } else if k == "Read5" { - Self::Read5 - } else if k == "BlockingTaskIntoChannel" { - Self::BlockingTaskIntoChannel - } else { - Self::default() - } - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct DiskIoTune { - pub read_sys: ReadSys, - pub read_buffer_len: usize, - pub read_queue_len: usize, -} - -impl DiskIoTune { - pub fn default_for_testing() -> Self { - Self { - read_sys: ReadSys::default(), - read_buffer_len: 1024 * 8, - read_queue_len: 4, - } - } - - pub fn default() -> Self { - Self { - read_sys: ReadSys::default(), - read_buffer_len: 1024 * 16, - read_queue_len: 4, - } - } - - pub fn with_read_buffer_len(mut self, x: usize) -> Self { - self.read_buffer_len = x; - self - } -} - -impl Default for DiskIoTune { - fn default() -> Self { - Self::default() - } -} - -impl FromUrl for DiskIoTune { - type Error = NetpodError; - - fn from_url(url: &Url) -> Result { - Self::from_pairs(&get_url_query_pairs(url)) - } - - fn from_pairs(pairs: &BTreeMap) -> Result { - let read_sys = pairs - .get("ReadSys") - .map(|x| x.as_str().into()) - .unwrap_or_else(|| ReadSys::default()); - let read_buffer_len = pairs - .get("ReadBufferLen") - .map(|x| x.parse().map_or(None, Some)) - .unwrap_or(None) - .unwrap_or(1024 * 4); - let read_queue_len = pairs - .get("ReadQueueLen") - .map(|x| x.parse().map_or(None, Some)) - .unwrap_or(None) - .unwrap_or(8); - let ret = DiskIoTune { - read_sys, - read_buffer_len, - read_queue_len, - }; - Ok(ret) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct ChannelSearchQuery { - pub backend: Option, - pub name_regex: String, - pub source_regex: String, - pub description_regex: String, - #[serde(default)] - pub icase: bool, - #[serde(default)] - pub kind: SeriesKind, -} - -impl ChannelSearchQuery { - pub fn from_url(url: &Url) -> Result { - let pairs = get_url_query_pairs(url); - let ret = Self { - backend: pairs.get("backend").map(Into::into), - name_regex: pairs.get("nameRegex").map_or(String::new(), |k| k.clone()), - source_regex: pairs.get("sourceRegex").map_or(String::new(), |k| k.clone()), - description_regex: pairs.get("descriptionRegex").map_or(String::new(), |k| k.clone()), - icase: pairs.get("icase").map_or(None, |x| x.parse().ok()).unwrap_or(false), - kind: SeriesKind::from_pairs(&pairs)?, - }; - Ok(ret) - } - - pub fn append_to_url(&self, url: &mut Url) { - let mut qp = url.query_pairs_mut(); - if let Some(v) = &self.backend { - qp.append_pair("backend", v); - } - qp.append_pair("nameRegex", &self.name_regex); - qp.append_pair("sourceRegex", &self.source_regex); - qp.append_pair("descriptionRegex", &self.description_regex); - qp.append_pair("icase", &self.icase.to_string()); - drop(qp); - self.kind.append_to_url(url); - } -} - -#[cfg(test)] -mod test { - #[test] - fn parse_url_1() { - let mut url = url::Url::parse("http://host/123").unwrap(); - url.query_pairs_mut().append_pair("text", "jo jo • yo"); - assert_eq!(url.to_string(), "http://host/123?text=jo+jo+%E2%80%A2+yo"); - } - - #[test] - fn parse_url_2() { - let url = url::Url::parse("dummy:?123").unwrap(); - assert_eq!(url.query().unwrap(), "123") - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct ChannelSearchSingleResult { - pub backend: String, - pub name: String, - #[serde(rename = "seriesId")] - pub series: u64, - pub source: String, - #[serde(rename = "type")] - pub ty: String, - pub shape: Vec, - pub unit: String, - pub description: String, - #[serde(rename = "isApi0", skip_serializing_if = "Option::is_none")] - pub is_api_0: Option, -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct ChannelSearchResult { - pub channels: Vec, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ProxyBackend { - pub name: String, - pub url: String, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct StatusSub { - pub url: String, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ProxyConfig { - pub name: String, - pub listen: String, - pub port: u16, - pub backends: Vec, - pub status_subs: Vec, - pub announce_backends: Option>, -} - -pub trait HasBackend { - fn backend(&self) -> &str; -} - -// TODO change into Option, why do I need to set a timeout using this trait? -pub trait HasTimeout { - fn timeout(&self) -> Option; -} - -pub trait FromUrl: Sized { - type Error; - fn from_url(url: &Url) -> Result; - // TODO put this in separate trait, because some implementors need url path segments to construct. - fn from_pairs(pairs: &BTreeMap) -> Result; -} - -pub trait AppendToUrl { - fn append_to_url(&self, url: &mut Url); -} - -pub type MapQuery = BTreeMap; - -impl AppendToUrl for MapQuery { - fn append_to_url(&self, url: &mut Url) { - let mut g = url.query_pairs_mut(); - for (k, v) in self { - g.append_pair(k, v); - } - } -} - -impl FromUrl for MapQuery { - type Error = NetpodError; - - fn from_url(url: &Url) -> Result { - let pairs = get_url_query_pairs(url); - Self::from_pairs(&pairs) - } - - fn from_pairs(pairs: &BTreeMap) -> Result { - Ok(pairs.clone()) - } -} - -impl HasBackend for MapQuery { - fn backend(&self) -> &str { - self.get("backend").map_or("NOBACKEND", AsRef::as_ref) - } -} - -impl HasTimeout for MapQuery { - fn timeout(&self) -> Option { - let x: Option = if let Some(v) = self.get("timeout") { - v.parse::().ok() - } else { - None - }; - x.map(|x| Duration::from_millis(x as _)) - } -} - -pub fn get_url_query_pairs(url: &Url) -> BTreeMap { - BTreeMap::from_iter(url.query_pairs().map(|(j, k)| (j.to_string(), k.to_string()))) -} - -// Request type of the channel/config api. -// At least on some backends the channel configuration may change depending on the queried range. -// Therefore, the query includes the range. -// The presence of a configuration in some range does not imply that there is any data available. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ChannelConfigQuery { - pub channel: SfDbChannel, - pub range: NanoRange, - pub expand: bool, -} - -impl HasBackend for ChannelConfigQuery { - fn backend(&self) -> &str { - &self.channel.backend - } -} - -impl HasTimeout for ChannelConfigQuery { - fn timeout(&self) -> Option { - None - } -} - -impl FromUrl for ChannelConfigQuery { - type Error = NetpodError; - - fn from_url(url: &Url) -> Result { - let pairs = get_url_query_pairs(url); - Self::from_pairs(&pairs) - } - - fn from_pairs(pairs: &BTreeMap) -> Result { - let beg_date = pairs - .get("begDate") - .map(String::from) - .unwrap_or_else(|| String::from("1970-01-01T00:00:00Z")); - let end_date = pairs - .get("endDate") - .map(String::from) - .unwrap_or_else(|| String::from("3000-01-01T00:00:00Z")); - let expand = pairs.get("expand").map(|s| s == "true").unwrap_or(false); - let ret = Self { - channel: SfDbChannel::from_pairs(&pairs)?, - range: NanoRange { - beg: beg_date.parse::>()?.to_nanos(), - end: end_date.parse::>()?.to_nanos(), - }, - expand, - }; - Ok(ret) - } -} - -impl AppendToUrl for ChannelConfigQuery { - fn append_to_url(&self, url: &mut Url) { - self.channel.append_to_url(url); - let mut g = url.query_pairs_mut(); - g.append_pair( - "begDate", - &Utc.timestamp_nanos(self.range.beg as i64) - .format(DATETIME_FMT_3MS) - .to_string(), - ); - g.append_pair( - "endDate", - &Utc.timestamp_nanos(self.range.end as i64) - .format(DATETIME_FMT_3MS) - .to_string(), - ); - if self.expand { - g.append_pair("expand", "true"); - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename = "SfDatabuffer")] -pub struct SfChannelConfigResponse { - #[serde(rename = "backend")] - pub backend: String, - #[serde(rename = "name")] - pub name: String, - #[serde(rename = "keyspace")] - pub keyspace: u8, - #[serde(rename = "timeBinSize")] - pub timebinsize: u64, - #[serde(rename = "scalarType")] - pub scalar_type: ScalarType, - #[serde(rename = "shape")] - pub shape: Shape, - #[serde(rename = "byteOrder")] - pub byte_order: ByteOrder, -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename = "Daqbuf")] -pub struct DaqbufChannelConfig { - #[serde(rename = "backend")] - pub backend: String, - #[serde(rename = "seriesId")] - pub series: u64, - #[serde(rename = "seriesKind")] - pub kind: SeriesKind, - #[serde(rename = "scalarType")] - pub scalar_type: ScalarType, - #[serde(rename = "shape")] - pub shape: Shape, - #[serde(rename = "name")] - pub name: String, -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum ChannelConfigResponse { - SfDatabuffer(SfChannelConfigResponse), - Daqbuf(DaqbufChannelConfig), -} - -impl From for ChannelConfigResponse { - fn from(value: SfChFetchInfo) -> Self { - Self::SfDatabuffer(SfChannelConfigResponse { - backend: value.backend().into(), - name: value.name().into(), - keyspace: value.ks(), - timebinsize: value.bs().ms_u64(), - scalar_type: value.scalar_type().clone(), - shape: value.shape().clone(), - byte_order: value.byte_order().clone(), - }) - } -} - -impl From for ChannelConfigResponse { - fn from(value: ChConf) -> Self { - Self::Daqbuf(DaqbufChannelConfig { - backend: value.backend().into(), - series: value.series(), - kind: value.kind(), - scalar_type: value.scalar_type().clone(), - shape: value.shape().clone(), - name: value.name().into(), - }) - } -} - -impl From for ChannelConfigResponse { - fn from(value: ChannelTypeConfigGen) -> Self { - match value { - ChannelTypeConfigGen::Scylla(k) => k.into(), - ChannelTypeConfigGen::SfDatabuffer(k) => k.into(), - } - } -} - -/** -Provide basic information about a channel, especially it's shape. -Also, byte-order is important for clients that process the raw databuffer event data (python data_api3). -*/ -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ChannelInfo { - pub scalar_type: ScalarType, - pub byte_order: Option, - pub shape: Shape, - pub msg: serde_json::Value, -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, PartialOrd, Eq, Ord)] -pub struct ChConf { - backend: String, - series: u64, - kind: SeriesKind, - scalar_type: ScalarType, - shape: Shape, - name: String, -} - -impl ChConf { - pub fn new( - backend: S1, - series: u64, - kind: SeriesKind, - scalar_type: ScalarType, - shape: Shape, - name: S2, - ) -> Self - where - S1: Into, - S2: Into, - { - Self { - backend: backend.into(), - series, - kind, - scalar_type, - shape, - name: name.into(), - } - } - - pub fn backend(&self) -> &str { - &self.backend - } - - pub fn series(&self) -> u64 { - self.series - } - - pub fn kind(&self) -> SeriesKind { - self.kind.clone() - } - - pub fn scalar_type(&self) -> &ScalarType { - &self.scalar_type - } - - pub fn shape(&self) -> &Shape { - &self.shape - } - - pub fn name(&self) -> &str { - &self.name - } -} - -// Includes the necessary information to know where to localize datafiles for sf-databuffer -// and what (approximate) types to expect. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, PartialOrd, Eq, Ord)] -pub struct SfChFetchInfo { - backend: String, - name: String, - ks: u8, - bs: DtNano, - scalar_type: ScalarType, - shape: Shape, - compression: bool, - byte_order: ByteOrder, - array: bool, -} - -impl SfChFetchInfo { - pub fn new( - backend: S1, - name: S2, - ks: u8, - bs: DtNano, - byte_order: ByteOrder, - scalar_type: ScalarType, - shape: Shape, - ) -> Self - where - S1: Into, - S2: Into, - { - Self { - backend: backend.into(), - name: name.into(), - ks, - bs, - scalar_type, - shape, - byte_order, - compression: false, - array: false, - } - } - - pub fn with_compression(mut self, x: bool) -> Self { - self.compression = x; - self - } - - pub fn with_array(mut self, x: bool) -> Self { - self.array = x; - self - } - - pub fn backend(&self) -> &str { - &self.backend - } - - pub fn name(&self) -> &str { - &self.name - } - - pub fn ks(&self) -> u8 { - self.ks - } - - pub fn bs(&self) -> DtNano { - self.bs.clone() - } - - pub fn scalar_type(&self) -> &ScalarType { - &self.scalar_type - } - - pub fn shape(&self) -> &Shape { - &self.shape - } - - pub fn byte_order(&self) -> &ByteOrder { - &self.byte_order - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, PartialOrd, Eq, Ord)] -pub enum ChannelTypeConfigGen { - Scylla(ChConf), - SfDatabuffer(SfChFetchInfo), -} - -impl ChannelTypeConfigGen { - pub fn to_scylla(&self) -> Result { - if let ChannelTypeConfigGen::Scylla(k) = self { - Ok(k.clone()) - } else { - Err(Error::with_msg_no_trace("this ChannelTypeConfigGen is not for scylla")) - } - } - - pub fn to_sf_databuffer(&self) -> Result { - if let ChannelTypeConfigGen::SfDatabuffer(k) = self { - Ok(k.clone()) - } else { - Err(Error::with_msg_no_trace("this ChannelTypeConfigGen is not for scylla")) - } - } - - pub fn backend(&self) -> &str { - match self { - ChannelTypeConfigGen::Scylla(x) => x.backend(), - ChannelTypeConfigGen::SfDatabuffer(x) => x.backend(), - } - } - - pub fn name(&self) -> &str { - match self { - ChannelTypeConfigGen::Scylla(x) => x.name(), - ChannelTypeConfigGen::SfDatabuffer(x) => x.name(), - } - } - - pub fn scalar_type(&self) -> &ScalarType { - match self { - ChannelTypeConfigGen::Scylla(x) => &x.scalar_type, - ChannelTypeConfigGen::SfDatabuffer(x) => x.scalar_type(), - } - } - - pub fn shape(&self) -> &Shape { - match self { - ChannelTypeConfigGen::Scylla(x) => &x.shape, - ChannelTypeConfigGen::SfDatabuffer(x) => x.shape(), - } - } - - pub fn series(&self) -> Option { - match self { - ChannelTypeConfigGen::Scylla(ch_conf) => Some(ch_conf.series()), - ChannelTypeConfigGen::SfDatabuffer(sf_ch_fetch_info) => None, - } - } -} - -impl From for ChannelTypeConfigGen { - fn from(value: SfChFetchInfo) -> Self { - Self::SfDatabuffer(value) - } -} - -impl From for ChannelTypeConfigGen { - fn from(value: ChConf) -> Self { - Self::Scylla(value) - } -} - -pub fn f32_close(a: f32, b: f32) -> bool { - if (a - b).abs() < 1e-4 || (a / b > 0.999 && a / b < 1.001) { - true - } else { - false - } -} - -pub fn f64_close(a: f64, b: f64) -> bool { - if (a - b).abs() < 1e-5 || (a / b > 0.9999 && a / b < 1.0001) { - true - } else { - false - } -} - -pub fn test_cluster() -> Cluster { - let nodes = (0..3) - .into_iter() - .map(|id| Node { - host: "localhost".into(), - listen: None, - port: 6170 + id as u16, - port_raw: 6170 + id as u16 + 100, - sf_databuffer: Some(SfDatabuffer { - data_base_path: test_data_base_path_databuffer().join(format!("node{:02}", id)), - ksprefix: "ks".into(), - splits: None, - }), - archiver_appliance: None, - channel_archiver: None, - prometheus_api_bind: None, - }) - .collect(); - Cluster { - backend: TEST_BACKEND.into(), - nodes, - database: Database { - host: "127.0.0.1".into(), - port: 5432, - name: "testingdaq".into(), - user: "testingdaq".into(), - pass: "testingdaq".into(), - }, - scylla: None, - scylla_st: None, - scylla_mt: None, - scylla_lt: None, - cache_scylla: None, - run_map_pulse_task: false, - is_central_storage: false, - file_io_buffer_size: Default::default(), - announce_backends: None, - } -} - -pub fn sls_test_cluster() -> Cluster { - let nodes = (0..1) - .into_iter() - .map(|id| Node { - host: "localhost".into(), - listen: None, - port: 6190 + id as u16, - port_raw: 6190 + id as u16 + 100, - sf_databuffer: None, - archiver_appliance: None, - channel_archiver: Some(ChannelArchiver { - data_base_paths: vec![test_data_base_path_channel_archiver_sls()], - }), - prometheus_api_bind: None, - }) - .collect(); - Cluster { - backend: "sls-archive".into(), - nodes, - database: Database { - host: "127.0.0.1".into(), - port: 5432, - name: "testingdaq".into(), - user: "testingdaq".into(), - pass: "testingdaq".into(), - }, - scylla: None, - scylla_st: None, - scylla_mt: None, - scylla_lt: None, - cache_scylla: None, - run_map_pulse_task: false, - is_central_storage: false, - file_io_buffer_size: Default::default(), - announce_backends: None, - } -} - -pub fn archapp_test_cluster() -> Cluster { - let nodes = (0..1) - .into_iter() - .map(|id| Node { - host: "localhost".into(), - listen: None, - port: 6200 + id as u16, - port_raw: 6200 + id as u16 + 100, - sf_databuffer: None, - channel_archiver: None, - archiver_appliance: Some(ArchiverAppliance { - data_base_paths: vec![test_data_base_path_archiver_appliance()], - }), - prometheus_api_bind: None, - }) - .collect(); - Cluster { - backend: "sf-archive".into(), - nodes, - database: Database { - host: "127.0.0.1".into(), - port: 5432, - name: "testingdaq".into(), - user: "testingdaq".into(), - pass: "testingdaq".into(), - }, - scylla: None, - scylla_st: None, - scylla_mt: None, - scylla_lt: None, - cache_scylla: None, - run_map_pulse_task: false, - is_central_storage: false, - file_io_buffer_size: Default::default(), - announce_backends: None, - } -} - -pub fn test_data_base_path_databuffer() -> PathBuf { - let homedir = std::env::var("HOME").unwrap(); - let data_base_path = PathBuf::from(homedir).join("daqbuffer-testdata").join("databuffer"); - data_base_path -} - -pub fn test_data_base_path_channel_archiver_sls() -> PathBuf { - let homedir = std::env::var("HOME").unwrap(); - let data_base_path = PathBuf::from(homedir) - .join("daqbuffer-testdata") - .join("sls") - .join("gfa03"); - data_base_path -} - -pub fn test_data_base_path_archiver_appliance() -> PathBuf { - let homedir = std::env::var("HOME").unwrap(); - let data_base_path = PathBuf::from(homedir) - .join("daqbuffer-testdata") - .join("archappdata") - .join("lts") - .join("ArchiverStore"); - data_base_path -} - -#[cfg(test)] -mod test_parse { - use super::*; - - #[test] - fn parse_scalar_type_shape() { - let mut url: Url = "http://test/path".parse().unwrap(); - { - let mut g = url.query_pairs_mut(); - g.append_pair("scalarType", &format!("{:?}", ScalarType::F32)); - g.append_pair("shape", &format!("{:?}", Shape::Image(3, 4))); - } - let url = url; - let urls = format!("{}", url); - let url: Url = urls.parse().unwrap(); - let mut a = BTreeMap::new(); - for (k, v) in url.query_pairs() { - let k = k.to_string(); - let v = v.to_string(); - info!("k {k:?} v {v:?}"); - a.insert(k, v); - } - assert_eq!(a.get("scalarType").unwrap(), "f32"); - assert_eq!(a.get("shape").unwrap(), "Image(3, 4)"); - } -} - -pub const PSI_DAQBUFFER_SERVICE_MARK: &'static str = "PSI-Daqbuffer-Service-Mark"; -pub const PSI_DAQBUFFER_SEEN_URL: &'static str = "PSI-Daqbuffer-Seen-Url"; - -#[derive(Debug, Clone)] -pub struct ReqCtx { - ts_ctor: Instant, - reqid: String, - reqid_this: String, - marks: Vec, - mark: String, -} - -impl ReqCtx { - pub fn new_with_node(req: &Request, nc: &NodeConfigCached) -> Self { - let reqid_this = status_board().unwrap().new_status_id(); - let reqid = if let Some(reqid_parent) = req.headers().get(X_DAQBUF_REQID) { - let parent = reqid_parent.to_str().unwrap_or("badid"); - format!("{}-{}", parent, reqid_this) - } else { - reqid_this.clone() - }; - let mark = format!("{}:{}", nc.node_config.name, nc.node.port); - let mut marks = Vec::new(); - for (n, v) in req.headers().iter() { - if n == PSI_DAQBUFFER_SERVICE_MARK { - marks.push(String::from_utf8_lossy(v.as_bytes()).to_string()); - } - } - Self { - ts_ctor: Instant::now(), - reqid, - reqid_this, - marks, - mark, - } - } - - pub fn new_with_proxy(req: &Request, proxy: &ProxyConfig) -> Self { - let reqid_this = status_board().unwrap().new_status_id(); - let reqid = if let Some(reqid_parent) = req.headers().get(X_DAQBUF_REQID) { - let parent = reqid_parent.to_str().unwrap_or("badid"); - format!("{}-{}", parent, reqid_this) - } else { - reqid_this.clone() - }; - let mark = format!("{}:{}", proxy.name, proxy.port); - let mut marks = Vec::new(); - for (n, v) in req.headers().iter() { - if n == PSI_DAQBUFFER_SERVICE_MARK { - marks.push(String::from_utf8_lossy(v.as_bytes()).to_string()); - } - } - Self { - ts_ctor: Instant::now(), - reqid, - reqid_this, - marks, - mark, - } - } - - pub fn new_from_single_reqid(reqid: String) -> Self { - Self { - ts_ctor: Instant::now(), - reqid_this: reqid.clone(), - reqid, - marks: Vec::new(), - mark: String::new(), - } - } - - pub fn for_test() -> Self { - Self { - ts_ctor: Instant::now(), - reqid: "PARENTID-TESTID".into(), - reqid_this: "TESTID".into(), - marks: Vec::new(), - mark: String::new(), - } - } - - pub fn ts_ctor(&self) -> Instant { - self.ts_ctor.clone() - } - - pub fn reqid(&self) -> &str { - &self.reqid - } - - pub fn reqid_this(&self) -> &str { - &self.reqid_this - } - - pub fn mark(&self) -> &str { - &self.mark - } - - pub fn marks(&self) -> &[String] { - &self.marks - } - - pub fn header_name(&self) -> &'static str { - X_DAQBUF_REQID - } - - pub fn header_value(&self) -> &str { - &self.reqid - } -} - -pub type ReqCtxArc = std::sync::Arc; - -static STATUS_BOARD: AtomicPtr> = AtomicPtr::new(std::ptr::null_mut()); - -#[derive(Debug, Serialize)] -pub struct StatusBoardEntry { - #[allow(unused)] - #[serde(serialize_with = "instant_serde::ser")] - ts_created: SystemTime, - #[serde(serialize_with = "instant_serde::ser")] - ts_updated: SystemTime, - // #[serde(skip_serializing_if = "is_false")] - done: bool, - // #[serde(skip_serializing_if = "Vec::is_empty")] - errors: Vec, - // TODO make this a better Stats container and remove pub access. - // #[serde(default, skip_serializing_if = "CmpZero::is_zero")] - error_count: usize, - // #[serde(default, skip_serializing_if = "CmpZero::is_zero")] - warn_count: usize, - // #[serde(default, skip_serializing_if = "CmpZero::is_zero")] - channel_not_found: usize, - // #[serde(default, skip_serializing_if = "CmpZero::is_zero")] - subreq_fail: usize, -} - -mod instant_serde { - use super::DATETIME_FMT_3MS; - use serde::Serializer; - use std::time::SystemTime; - - pub fn ser(x: &SystemTime, ser: S) -> Result { - use chrono::LocalResult; - let dur = x.duration_since(std::time::UNIX_EPOCH).unwrap(); - let res = chrono::TimeZone::timestamp_opt(&chrono::Utc, dur.as_secs() as i64, dur.subsec_nanos()); - match res { - LocalResult::None => Err(serde::ser::Error::custom(format!("Bad local instant conversion"))), - LocalResult::Single(dt) => { - let s = dt.format(DATETIME_FMT_3MS).to_string(); - ser.serialize_str(&s) - } - LocalResult::Ambiguous(dt, _dt2) => { - let s = dt.format(DATETIME_FMT_3MS).to_string(); - ser.serialize_str(&s) - } - } - } -} - -impl StatusBoardEntry { - pub fn new() -> Self { - Self { - ts_created: SystemTime::now(), - ts_updated: SystemTime::now(), - done: false, - errors: Vec::new(), - error_count: 0, - warn_count: 0, - channel_not_found: 0, - subreq_fail: 0, - } - } - - pub fn warn_inc(&mut self) { - self.warn_count += 1; - } - - pub fn channel_not_found_inc(&mut self) { - self.channel_not_found += 1; - } -} - -#[derive(Debug, Serialize)] -pub struct StatusBoardEntryUser { - // #[serde(default, skip_serializing_if = "CmpZero::is_zero")] - error_count: usize, - // #[serde(default, skip_serializing_if = "CmpZero::is_zero")] - warn_count: usize, - // #[serde(default, skip_serializing_if = "CmpZero::is_zero")] - channel_not_found: usize, - #[serde(skip_serializing_if = "Vec::is_empty")] - errors: Vec, -} - -impl StatusBoardEntryUser { - pub fn new_all_good() -> Self { - Self { - error_count: 0, - warn_count: 0, - channel_not_found: 0, - errors: Vec::new(), - } - } -} - -impl From<&StatusBoardEntry> for StatusBoardEntryUser { - fn from(e: &StatusBoardEntry) -> Self { - Self { - error_count: e.error_count, - warn_count: e.warn_count, - channel_not_found: e.channel_not_found, - errors: e - .errors - .iter() - .map(|e| err::ToPublicError::to_public_error(e)) - .collect(), - } - } -} - -#[derive(Debug, Serialize)] -pub struct StatusBoard { - entries: BTreeMap, -} - -impl StatusBoard { - pub fn new() -> Self { - Self { - entries: BTreeMap::new(), - } - } - - pub fn new_status_id(&mut self) -> String { - self.clean_if_needed(); - let n: u32 = rand::random(); - let s = format!("{:08x}", n); - self.entries.insert(s.clone(), StatusBoardEntry::new()); - s - } - - pub fn clean_if_needed(&mut self) { - if self.entries.len() > 15000 { - let mut tss: Vec<_> = self.entries.values().map(|e| e.ts_updated).collect(); - tss.sort_unstable(); - let tss = tss; - let tsm = tss[tss.len() / 3]; - let a = std::mem::replace(&mut self.entries, BTreeMap::new()); - self.entries = a.into_iter().filter(|(_k, v)| v.ts_updated >= tsm).collect(); - } - } - - pub fn get_entry(&mut self, status_id: &str) -> Option<&mut StatusBoardEntry> { - self.entries.get_mut(status_id) - } - - pub fn mark_alive(&mut self, status_id: &str) { - match self.entries.get_mut(status_id) { - Some(e) => { - e.ts_updated = SystemTime::now(); - } - None => { - error!("can not find status id {}", status_id); - } - } - } - - pub fn mark_done(&mut self, status_id: &str) { - match self.entries.get_mut(status_id) { - Some(e) => { - e.ts_updated = SystemTime::now(); - e.done = true; - } - None => { - error!("can not find status id {}", status_id); - } - } - } - - pub fn add_error(&mut self, status_id: &str, err: err::Error) { - match self.entries.get_mut(status_id) { - Some(e) => { - e.ts_updated = SystemTime::now(); - if e.errors.len() < 100 { - e.errors.push(err); - e.error_count += 1; - } - } - None => { - error!("can not find status id {}", status_id); - } - } - } - - pub fn status_as_json(&self, status_id: &str) -> Option { - match self.entries.get(status_id) { - Some(e) => Some(e.into()), - None => None, - } - } -} - -#[derive(Debug)] -pub enum StatusBoardError { - CantAcquire, -} - -impl fmt::Display for StatusBoardError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{self:?}") - } -} - -pub fn status_board() -> Result, StatusBoardError> { - let x = unsafe { &*STATUS_BOARD.load(atomic::Ordering::SeqCst) }.write(); - match x { - Ok(x) => Ok(x), - Err(e) => { - error!("{e}"); - Err(StatusBoardError::CantAcquire) - } - } -} - -pub fn status_board_init() { - static STATUS_BOARD_INIT: Once = Once::new(); - STATUS_BOARD_INIT.call_once(|| { - let b = StatusBoard::new(); - let a = RwLock::new(b); - let x = Box::new(a); - STATUS_BOARD.store(Box::into_raw(x), atomic::Ordering::SeqCst); - }); -} - -#[derive(Debug, ThisError)] -#[cstm(name = "UriError")] -pub enum UriError { - ParseError(Uri), -} - -pub fn req_uri_to_url(uri: &Uri) -> Result { - if uri.scheme().is_none() { - format!("dummy:{uri}") - .parse() - .map_err(|_| UriError::ParseError(uri.clone())) - } else { - uri.to_string().parse().map_err(|_| UriError::ParseError(uri.clone())) - } -} diff --git a/crates/netpod/src/query.rs b/crates/netpod/src/query.rs deleted file mode 100644 index a8b2e47..0000000 --- a/crates/netpod/src/query.rs +++ /dev/null @@ -1,376 +0,0 @@ -pub mod api1; -pub mod datetime; -pub mod prebinned; - -use daqbuf_err as err; - -use crate::get_url_query_pairs; -use crate::log::*; -use crate::AggKind; -use crate::AppendToUrl; -use crate::FromUrl; -use crate::HasBackend; -use crate::HasTimeout; -use crate::NanoRange; -use crate::NetpodError; -use crate::PulseRange; -use crate::SfDbChannel; -use crate::ToNanos; -use crate::DATETIME_FMT_6MS; -use chrono::DateTime; -use chrono::TimeZone; -use chrono::Utc; -use err::Error; -use serde::Deserialize; -use serde::Serialize; -use std::collections::BTreeMap; -use std::fmt; -use std::time::Duration; -use url::Url; - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum CacheUsage { - Use, - Ignore, - Recreate, - V0NoCache, -} - -impl CacheUsage { - pub fn query_param_value(&self) -> String { - match self { - CacheUsage::Use => "use", - CacheUsage::Ignore => "ignore", - CacheUsage::Recreate => "recreate", - CacheUsage::V0NoCache => "v0nocache", - } - .into() - } - - // Missing query parameter is not an error - pub fn from_pairs(pairs: &BTreeMap) -> Result, NetpodError> { - pairs - .get("cacheUsage") - .map(|k| { - if k == "use" { - Ok(Some(CacheUsage::Use)) - } else if k == "ignore" { - Ok(Some(CacheUsage::Ignore)) - } else if k == "recreate" { - Ok(Some(CacheUsage::Recreate)) - } else if k == "v0nocache" { - Ok(Some(CacheUsage::V0NoCache)) - } else { - Err(NetpodError::BadCacheUsage(k.clone()))? - } - }) - .unwrap_or(Ok(None)) - } - - pub fn from_string(s: &str) -> Result { - let ret = if s == "ignore" { - CacheUsage::Ignore - } else if s == "recreate" { - CacheUsage::Recreate - } else if s == "use" { - CacheUsage::Use - } else if s == "v0nocache" { - CacheUsage::V0NoCache - } else { - return Err(Error::with_msg(format!("can not interpret cache usage string: {}", s))); - }; - Ok(ret) - } - - pub fn is_cache_write(&self) -> bool { - match self { - CacheUsage::Use => true, - CacheUsage::Ignore => false, - CacheUsage::Recreate => true, - CacheUsage::V0NoCache => false, - } - } - - pub fn is_cache_read(&self) -> bool { - match self { - CacheUsage::Use => true, - CacheUsage::Ignore => false, - CacheUsage::Recreate => false, - CacheUsage::V0NoCache => false, - } - } -} - -impl fmt::Display for CacheUsage { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{}", self.query_param_value()) - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct TimeRangeQuery { - range: NanoRange, -} - -fn parse_time(v: &str) -> Result, NetpodError> { - if let Ok(x) = v.parse() { - Ok(x) - } else { - if v.ends_with("ago") { - let d = humantime::parse_duration(&v[..v.len() - 3]).map_err(|_| NetpodError::BadTimerange)?; - Ok(Utc::now() - d) - } else { - Err(NetpodError::BadTimerange) - } - } -} - -impl FromUrl for TimeRangeQuery { - type Error = NetpodError; - - fn from_url(url: &Url) -> Result { - let pairs = get_url_query_pairs(url); - Self::from_pairs(&pairs) - } - - fn from_pairs(pairs: &BTreeMap) -> Result { - if let (Some(beg), Some(end)) = (pairs.get("begDate"), pairs.get("endDate")) { - let ret = Self { - range: NanoRange { - beg: parse_time(beg)?.to_nanos(), - end: parse_time(end)?.to_nanos(), - }, - }; - Ok(ret) - } else if let (Some(beg), Some(end)) = (pairs.get("begNs"), pairs.get("endNs")) { - let ret = Self { - range: NanoRange { - beg: beg.parse()?, - end: end.parse()?, - }, - }; - Ok(ret) - } else { - Err(NetpodError::MissingTimerange) - } - } -} - -impl AppendToUrl for TimeRangeQuery { - fn append_to_url(&self, url: &mut Url) { - let date_fmt = DATETIME_FMT_6MS; - let mut g = url.query_pairs_mut(); - g.append_pair( - "begDate", - &Utc.timestamp_nanos(self.range.beg as i64).format(date_fmt).to_string(), - ); - g.append_pair( - "endDate", - &Utc.timestamp_nanos(self.range.end as i64).format(date_fmt).to_string(), - ); - } -} - -impl From for NanoRange { - fn from(k: TimeRangeQuery) -> Self { - Self { - beg: k.range.beg, - end: k.range.end, - } - } -} - -impl From<&NanoRange> for TimeRangeQuery { - fn from(k: &NanoRange) -> Self { - Self { - range: NanoRange { beg: k.beg, end: k.end }, - } - } -} - -impl From<&PulseRange> for PulseRangeQuery { - fn from(k: &PulseRange) -> Self { - Self { - range: PulseRange { beg: k.beg, end: k.end }, - } - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct PulseRangeQuery { - range: PulseRange, -} - -impl FromUrl for PulseRangeQuery { - type Error = NetpodError; - - fn from_url(url: &Url) -> Result { - let pairs = get_url_query_pairs(url); - Self::from_pairs(&pairs) - } - - fn from_pairs(pairs: &BTreeMap) -> Result { - if let (Some(beg), Some(end)) = (pairs.get("begPulse"), pairs.get("endPulse")) { - let ret = Self { - range: PulseRange { - beg: beg.parse()?, - end: end.parse()?, - }, - }; - Ok(ret) - } else { - Err(NetpodError::MissingQueryParameters) - } - } -} - -impl AppendToUrl for PulseRangeQuery { - fn append_to_url(&self, url: &mut Url) { - let mut g = url.query_pairs_mut(); - g.append_pair("begPulse", &self.range.beg.to_string()); - g.append_pair("endPulse", &self.range.end.to_string()); - } -} - -impl From for PulseRange { - fn from(k: PulseRangeQuery) -> Self { - Self { - beg: k.range.beg, - end: k.range.end, - } - } -} - -pub fn binning_scheme_append_to_url(agg_kind: &AggKind, url: &mut Url) { - let mut g = url.query_pairs_mut(); - match agg_kind { - AggKind::EventBlobs => { - g.append_pair("binningScheme", "eventBlobs"); - } - AggKind::TimeWeightedScalar => { - g.append_pair("binningScheme", "timeWeightedScalar"); - } - AggKind::Plain => { - g.append_pair("binningScheme", "fullValue"); - } - AggKind::DimXBins1 => { - g.append_pair("binningScheme", "unweightedScalar"); - } - AggKind::DimXBinsN(n) => { - g.append_pair("binningScheme", "binnedX"); - g.append_pair("binnedXcount", &format!("{}", n)); - } - AggKind::PulseIdDiff => { - g.append_pair("binningScheme", "pulseIdDiff"); - } - } -} - -// Absent AggKind is not considered an error. -pub fn agg_kind_from_binning_scheme(pairs: &BTreeMap) -> Result, NetpodError> { - let key = "binningScheme"; - if let Some(s) = pairs.get(key) { - let ret = if s == "eventBlobs" { - AggKind::EventBlobs - } else if s == "fullValue" { - AggKind::Plain - } else if s == "timeWeightedScalar" { - AggKind::TimeWeightedScalar - } else if s == "unweightedScalar" { - AggKind::DimXBins1 - } else if s == "binnedX" { - let u = pairs.get("binnedXcount").map_or("1", |k| k).parse()?; - AggKind::DimXBinsN(u) - } else if s == "pulseIdDiff" { - AggKind::PulseIdDiff - } else { - return Err(NetpodError::MissingBinningScheme); - }; - Ok(Some(ret)) - } else { - Ok(None) - } -} - -#[derive(Clone, Debug)] -pub struct ChannelStateEventsQuery { - channel: SfDbChannel, - range: NanoRange, -} - -impl ChannelStateEventsQuery { - pub fn new(channel: SfDbChannel, range: NanoRange) -> Self { - Self { channel, range } - } - - pub fn range(&self) -> &NanoRange { - &self.range - } - - pub fn channel(&self) -> &SfDbChannel { - &self.channel - } - - pub fn set_series_id(&mut self, series: u64) { - self.channel.series = Some(series); - } - - pub fn channel_mut(&mut self) -> &mut SfDbChannel { - &mut self.channel - } -} - -impl HasBackend for ChannelStateEventsQuery { - fn backend(&self) -> &str { - &self.channel.backend - } -} - -impl HasTimeout for ChannelStateEventsQuery { - fn timeout(&self) -> Option { - None - } -} - -impl FromUrl for ChannelStateEventsQuery { - type Error = NetpodError; - - fn from_url(url: &Url) -> Result { - let pairs = get_url_query_pairs(url); - Self::from_pairs(&pairs) - } - - fn from_pairs(pairs: &BTreeMap) -> Result { - let beg_date = pairs.get("begDate").ok_or_else(|| NetpodError::MissingTimerange)?; - let end_date = pairs.get("endDate").ok_or_else(|| NetpodError::MissingTimerange)?; - let ret = Self { - channel: SfDbChannel::from_pairs(&pairs)?, - range: NanoRange { - beg: beg_date.parse::>()?.to_nanos(), - end: end_date.parse::>()?.to_nanos(), - }, - }; - let self_name = std::any::type_name::(); - debug!("{self_name}::from_url {ret:?}"); - Ok(ret) - } -} - -impl AppendToUrl for ChannelStateEventsQuery { - fn append_to_url(&self, url: &mut Url) { - self.channel.append_to_url(url); - let mut g = url.query_pairs_mut(); - g.append_pair( - "begDate", - &Utc.timestamp_nanos(self.range.beg as i64) - .format(DATETIME_FMT_6MS) - .to_string(), - ); - g.append_pair( - "endDate", - &Utc.timestamp_nanos(self.range.end as i64) - .format(DATETIME_FMT_6MS) - .to_string(), - ); - } -} diff --git a/crates/netpod/src/query/api1.rs b/crates/netpod/src/query/api1.rs deleted file mode 100644 index 7fed861..0000000 --- a/crates/netpod/src/query/api1.rs +++ /dev/null @@ -1,317 +0,0 @@ -use crate::query::datetime::Datetime; -use crate::DiskIoTune; -use crate::FileIoBufferSize; -use crate::ReadSys; -use daqbuf_err as err; -use err::Error; -use serde::Deserialize; -use serde::Serialize; -use std::fmt; -use std::time::Duration; - -#[derive(Debug, PartialEq, Serialize, Deserialize)] -pub struct Api1Range { - #[serde(rename = "type", default, skip_serializing_if = "String::is_empty")] - ty: String, - #[serde(rename = "startDate")] - beg: Datetime, - #[serde(rename = "endDate")] - end: Datetime, -} - -impl Api1Range { - pub fn new(beg: Datetime, end: Datetime) -> Result { - let ret = Self { - ty: String::new(), - beg, - end, - }; - Ok(ret) - } - - pub fn beg(&self) -> &Datetime { - &self.beg - } - - pub fn end(&self) -> &Datetime { - &self.end - } -} - -#[test] -fn serde_de_range_zulu() { - let s = r#"{"startDate": "2022-11-22T10:15:12.412Z", "endDate": "2022-11-22T10:15:12.413556Z"}"#; - let range: Api1Range = serde_json::from_str(s).unwrap(); - assert_eq!(range.beg().offset().local_minus_utc(), 0); - assert_eq!(range.end().offset().local_minus_utc(), 0); - assert_eq!(range.beg().timestamp_subsec_micros(), 412000); - assert_eq!(range.end().timestamp_subsec_micros(), 413556); -} - -#[test] -fn serde_de_range_offset() { - let s = r#"{"startDate": "2022-11-22T10:15:12.412Z", "endDate": "2022-11-22T10:15:12.413556Z"}"#; - let range: Api1Range = serde_json::from_str(s).unwrap(); - assert_eq!(range.beg().offset().local_minus_utc(), 0); - assert_eq!(range.end().offset().local_minus_utc(), 0); - assert_eq!(range.beg().timestamp_subsec_micros(), 412000); - assert_eq!(range.end().timestamp_subsec_micros(), 413556); -} - -#[test] -fn serde_ser_range_offset() { - use chrono::{FixedOffset, NaiveDate, TimeZone}; - let beg = FixedOffset::east_opt(60 * 60 * 3) - .unwrap() - .from_local_datetime( - &NaiveDate::from_ymd_opt(2022, 11, 22) - .unwrap() - .and_hms_milli_opt(13, 14, 15, 16) - .unwrap(), - ) - .earliest() - .unwrap(); - let end = FixedOffset::east_opt(-60 * 60 * 1) - .unwrap() - .from_local_datetime( - &NaiveDate::from_ymd_opt(2022, 11, 22) - .unwrap() - .and_hms_milli_opt(13, 14, 15, 800) - .unwrap(), - ) - .earliest() - .unwrap(); - let range = Api1Range::new(beg.into(), end.into()).unwrap(); - let js = serde_json::to_string(&range).unwrap(); - let exp = r#"{"startDate":"2022-11-22T13:14:15.016+03:00","endDate":"2022-11-22T13:14:15.800-01:00"}"#; - assert_eq!(js, exp); -} - -#[test] -fn serde_ser_range_01() -> Result<(), Error> { - let beg = Datetime::try_from("2022-11-22T02:03:04Z")?; - let end = Datetime::try_from("2022-11-22T02:03:04.123Z")?; - let range = Api1Range::new(beg, end).unwrap(); - let js = serde_json::to_string(&range).unwrap(); - let exp = r#"{"startDate":"2022-11-22T02:03:04Z","endDate":"2022-11-22T02:03:04.123Z"}"#; - assert_eq!(js, exp); - Ok(()) -} - -#[test] -fn serde_ser_range_02() -> Result<(), Error> { - let beg = Datetime::try_from("2022-11-22T02:03:04.987654Z")?; - let end = Datetime::try_from("2022-11-22T02:03:04.777000Z")?; - let range = Api1Range::new(beg, end).unwrap(); - let js = serde_json::to_string(&range).unwrap(); - let exp = r#"{"startDate":"2022-11-22T02:03:04.987654Z","endDate":"2022-11-22T02:03:04.777Z"}"#; - assert_eq!(js, exp); - Ok(()) -} - -/// In Api1, the list of channels consists of either `BACKEND/CHANNELNAME` -/// or just `CHANNELNAME`. -#[derive(Debug, PartialEq)] -pub struct ChannelTuple { - backend: Option, - name: String, -} - -impl ChannelTuple { - pub fn new(backend: String, name: String) -> Self { - Self { - backend: Some(backend), - name, - } - } - - pub fn from_name(name: String) -> Self { - Self { backend: None, name } - } - - pub fn backend(&self) -> Option<&String> { - self.backend.as_ref() - } - - pub fn name(&self) -> &str { - &self.name - } -} - -mod serde_channel_tuple { - use super::*; - use serde::de::{Deserialize, Deserializer, Visitor}; - use serde::ser::{Serialize, Serializer}; - - impl Serialize for ChannelTuple { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - if let Some(backend) = self.backend.as_ref() { - serializer.serialize_str(&format!("{}/{}", backend, self.name)) - } else { - serializer.serialize_str(&self.name) - } - } - } - - struct Vis; - - impl<'de> Visitor<'de> for Vis { - type Value = ChannelTuple; - - fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "[Backendname/]Channelname") - } - - fn visit_str(self, val: &str) -> Result - where - E: serde::de::Error, - { - let mut it = val.split("/"); - // Even empty string splits into one element of empty string - let s0 = it.next().unwrap(); - if let Some(s1) = it.next() { - let ret = ChannelTuple { - backend: Some(s0.into()), - name: s1.into(), - }; - Ok(ret) - } else { - let ret = ChannelTuple { - backend: None, - name: s0.into(), - }; - Ok(ret) - } - } - } - - impl<'de> Deserialize<'de> for ChannelTuple { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_str(Vis) - } - } - - #[test] - fn ser_name() { - let x = ChannelTuple { - backend: None, - name: "temperature".into(), - }; - let js = serde_json::to_string(&x).unwrap(); - assert_eq!(js, r#""temperature""#); - } - - #[test] - fn ser_backend_name() { - let x = ChannelTuple { - backend: Some("beach".into()), - name: "temperature".into(), - }; - let js = serde_json::to_string(&x).unwrap(); - assert_eq!(js, r#""beach/temperature""#); - } -} - -#[derive(Debug, PartialEq, Serialize, Deserialize)] -pub struct Api1Query { - range: Api1Range, - channels: Vec, - #[serde(default, skip_serializing_if = "Option::is_none")] - timeout: Option, - // All following parameters are private and not to be used - #[serde(default, skip_serializing_if = "Option::is_none")] - file_io_buffer_size: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - decompress: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - events_max: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - io_queue_len: Option, - #[serde(default, skip_serializing_if = "String::is_empty")] - log_level: String, - #[serde(default, skip_serializing_if = "String::is_empty")] - read_sys: String, -} - -impl Api1Query { - pub fn new(range: Api1Range, channels: Vec) -> Self { - Self { - range, - channels, - timeout: None, - decompress: None, - events_max: None, - file_io_buffer_size: None, - io_queue_len: None, - log_level: String::new(), - read_sys: String::new(), - } - } - - pub fn disk_io_tune(&self) -> DiskIoTune { - let mut k = DiskIoTune::default(); - if let Some(x) = &self.file_io_buffer_size { - k.read_buffer_len = x.0; - } - if let Some(x) = self.io_queue_len { - k.read_queue_len = x as usize; - } - let read_sys: ReadSys = self.read_sys.as_str().into(); - k.read_sys = read_sys; - k - } - - pub fn range(&self) -> &Api1Range { - &self.range - } - - pub fn channels(&self) -> &[ChannelTuple] { - &self.channels - } - - pub fn timeout(&self) -> Option { - self.timeout - } - - pub fn timeout_or_default(&self) -> Duration { - Duration::from_secs(60 * 30) - } - - pub fn log_level(&self) -> &str { - &self.log_level - } - - pub fn decompress(&self) -> Option { - self.decompress - } - - pub fn events_max(&self) -> Option { - self.events_max - } - - pub fn set_decompress(&mut self, v: Option) { - self.decompress = v; - } -} - -#[test] -fn serde_api1_query() -> Result<(), Error> { - let beg = Datetime::try_from("2022-11-22T08:09:10Z")?; - let end = Datetime::try_from("2022-11-23T08:11:05.455009+02:00")?; - let range = Api1Range::new(beg, end).unwrap(); - let ch0 = ChannelTuple::from_name("nameonly".into()); - let ch1 = ChannelTuple::new("somebackend".into(), "somechan".into()); - let qu = Api1Query::new(range, vec![ch0, ch1]); - let js = serde_json::to_string(&qu).unwrap(); - assert_eq!( - js, - r#"{"range":{"startDate":"2022-11-22T08:09:10Z","endDate":"2022-11-23T08:11:05.455009+02:00"},"channels":["nameonly","somebackend/somechan"]}"# - ); - Ok(()) -} diff --git a/crates/netpod/src/query/datetime.rs b/crates/netpod/src/query/datetime.rs deleted file mode 100644 index c3d3d2c..0000000 --- a/crates/netpod/src/query/datetime.rs +++ /dev/null @@ -1,192 +0,0 @@ -use chrono::DateTime; -use chrono::FixedOffset; -use daqbuf_err as err; -use err::Error; -use serde::de::Visitor; -use serde::Deserialize; -use serde::Serialize; -use std::fmt; -use std::ops; - -#[derive(Clone, Debug, PartialEq)] -pub struct Datetime(DateTime); - -impl From> for Datetime { - fn from(x: DateTime) -> Self { - Datetime(x) - } -} - -impl TryFrom<&str> for Datetime { - type Error = Error; - - fn try_from(val: &str) -> Result { - let dt = - DateTime::::parse_from_rfc3339(val).map_err(|e| Error::with_msg_no_trace(format!("{e}")))?; - Ok(Datetime(dt)) - } -} - -impl ops::Deref for Datetime { - type Target = DateTime; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -// RFC 3339 (subset of ISO 8601) - -impl Serialize for Datetime { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - use fmt::Write; - use serde::ser::Error; - let val = &self.0; - let mut s = String::with_capacity(64); - write!(&mut s, "{}", val.format("%Y-%m-%dT%H:%M:%S")).map_err(|_| Error::custom("fmt"))?; - let ns = val.timestamp_subsec_nanos(); - let mus = val.timestamp_subsec_micros(); - if ns % 1000 != 0 { - write!(&mut s, "{}", val.format(".%9f")).map_err(|_| Error::custom("fmt"))?; - } else if mus % 1000 != 0 { - write!(&mut s, "{}", val.format(".%6f")).map_err(|_| Error::custom("fmt"))?; - } else if mus != 0 { - write!(&mut s, "{}", val.format(".%3f")).map_err(|_| Error::custom("fmt"))?; - } - if val.offset().local_minus_utc() == 0 { - write!(&mut s, "Z").map_err(|_| Error::custom("fmt"))?; - } else { - write!(&mut s, "{}", val.format("%:z")).map_err(|_| Error::custom("fmt"))?; - } - serializer.collect_str(&s) - } -} - -mod ser_impl_2 { - use super::Datetime; - use crate::DATETIME_FMT_0MS; - use crate::DATETIME_FMT_3MS; - use crate::DATETIME_FMT_6MS; - use crate::DATETIME_FMT_9MS; - use fmt::Write; - use serde::ser::Error; - use std::fmt; - - #[allow(unused)] - fn serialize(obj: &Datetime, serializer: S) -> Result - where - S: serde::Serializer, - { - let val = &obj.0; - let mut s = String::with_capacity(64); - write!(&mut s, "{}", val.format("%Y-%m-%dT%H:%M:%S")).map_err(|_| Error::custom("fmt"))?; - let ns = val.timestamp_subsec_nanos(); - let s = if ns % 1000 != 0 { - val.format(DATETIME_FMT_9MS) - } else { - let mus = val.timestamp_subsec_micros(); - if mus % 1000 != 0 { - val.format(DATETIME_FMT_6MS) - } else { - let ms = val.timestamp_subsec_millis(); - if ms != 0 { - val.format(DATETIME_FMT_3MS) - } else { - val.format(DATETIME_FMT_0MS) - } - } - }; - serializer.collect_str(&s) - } -} - -struct Vis1; - -impl<'de> Visitor<'de> for Vis1 { - type Value = Datetime; - - fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "Datetime") - } - - fn visit_str(self, val: &str) -> Result - where - E: serde::de::Error, - { - Datetime::try_from(val).map_err(|e| serde::de::Error::custom(format!("{e}"))) - } -} - -impl<'de> Deserialize<'de> for Datetime { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - deserializer.deserialize_str(Vis1) - } -} - -#[test] -fn ser_00() { - use chrono::TimeZone; - let x = FixedOffset::east_opt(0) - .unwrap() - .with_ymd_and_hms(2023, 2, 3, 15, 12, 40) - .earliest() - .unwrap(); - let x = Datetime(x); - let s = serde_json::to_string(&x).unwrap(); - - assert_eq!(s, r#""2023-02-03T15:12:40Z""#); -} - -#[test] -fn ser_01() { - use chrono::TimeZone; - let x = FixedOffset::east_opt(0) - .unwrap() - .with_ymd_and_hms(2023, 2, 3, 15, 12, 40) - .earliest() - .unwrap() - .checked_add_signed(chrono::Duration::milliseconds(876)) - .unwrap(); - let x = Datetime(x); - let s = serde_json::to_string(&x).unwrap(); - - assert_eq!(s, r#""2023-02-03T15:12:40.876Z""#); -} - -#[test] -fn ser_02() { - use chrono::TimeZone; - let x = FixedOffset::east_opt(0) - .unwrap() - .with_ymd_and_hms(2023, 2, 3, 15, 12, 40) - .earliest() - .unwrap() - .checked_add_signed(chrono::Duration::nanoseconds(543430000)) - .unwrap(); - let x = Datetime(x); - let s = serde_json::to_string(&x).unwrap(); - - assert_eq!(s, r#""2023-02-03T15:12:40.543430Z""#); -} - -#[test] -fn ser_03() { - use chrono::TimeZone; - let x = FixedOffset::east_opt(0) - .unwrap() - .with_ymd_and_hms(2023, 2, 3, 15, 12, 40) - .earliest() - .unwrap() - .checked_add_signed(chrono::Duration::nanoseconds(543432321)) - .unwrap(); - let x = Datetime(x); - let s = serde_json::to_string(&x).unwrap(); - - assert_eq!(s, r#""2023-02-03T15:12:40.543432321Z""#); -} diff --git a/crates/netpod/src/query/prebinned.rs b/crates/netpod/src/query/prebinned.rs deleted file mode 100644 index 80a4f18..0000000 --- a/crates/netpod/src/query/prebinned.rs +++ /dev/null @@ -1,144 +0,0 @@ -use super::agg_kind_from_binning_scheme; -use super::binning_scheme_append_to_url; -use super::CacheUsage; -use crate::AggKind; -use crate::AppendToUrl; -use crate::ByteSize; -use crate::FromUrl; -use crate::NetpodError; -use crate::PreBinnedPatchCoordEnum; -use crate::ScalarType; -use crate::SfDbChannel; -use crate::Shape; -use std::collections::BTreeMap; -use url::Url; - -#[derive(Clone, Debug)] -pub struct PreBinnedQuery { - patch: PreBinnedPatchCoordEnum, - channel: SfDbChannel, - scalar_type: ScalarType, - shape: Shape, - agg_kind: Option, - cache_usage: Option, - buf_len_disk_io: Option, - disk_stats_every: Option, -} - -impl PreBinnedQuery { - pub fn new( - patch: PreBinnedPatchCoordEnum, - channel: SfDbChannel, - scalar_type: ScalarType, - shape: Shape, - agg_kind: Option, - cache_usage: Option, - buf_len_disk_io: Option, - disk_stats_every: Option, - ) -> Self { - Self { - patch, - channel, - scalar_type, - shape, - agg_kind, - cache_usage, - buf_len_disk_io, - disk_stats_every, - } - } - - pub fn from_url(url: &Url) -> Result { - let mut pairs = BTreeMap::new(); - for (j, k) in url.query_pairs() { - pairs.insert(j.to_string(), k.to_string()); - } - let pairs = pairs; - let scalar_type = pairs - .get("scalarType") - .ok_or_else(|| NetpodError::MissingScalarType) - .map(|x| ScalarType::from_url_str(&x))??; - let shape = pairs - .get("shape") - .ok_or_else(|| NetpodError::MissingShape) - .map(|x| Shape::from_url_str(&x))??; - let ret = Self { - patch: PreBinnedPatchCoordEnum::from_pairs(&pairs)?, - channel: SfDbChannel::from_pairs(&pairs)?, - scalar_type, - shape, - agg_kind: agg_kind_from_binning_scheme(&pairs)?, - cache_usage: CacheUsage::from_pairs(&pairs)?, - buf_len_disk_io: pairs - .get("bufLenDiskIo") - .map_or(Ok(None), |k| k.parse().map(|k| Some(k)))?, - disk_stats_every: pairs - .get("diskStatsEveryKb") - .map(|k| k.parse().ok()) - .unwrap_or(None) - .map(ByteSize::from_kb), - }; - Ok(ret) - } - - pub fn patch(&self) -> &PreBinnedPatchCoordEnum { - &self.patch - } - - pub fn channel(&self) -> &SfDbChannel { - &self.channel - } - - pub fn scalar_type(&self) -> &ScalarType { - &self.scalar_type - } - - pub fn shape(&self) -> &Shape { - &self.shape - } - - pub fn agg_kind(&self) -> &Option { - &self.agg_kind - } - - pub fn disk_stats_every(&self) -> ByteSize { - match &self.disk_stats_every { - Some(x) => x.clone(), - None => ByteSize(1024 * 1024 * 4), - } - } - - pub fn cache_usage(&self) -> CacheUsage { - self.cache_usage.as_ref().map_or(CacheUsage::Use, |x| x.clone()) - } - - pub fn buf_len_disk_io(&self) -> usize { - self.buf_len_disk_io.unwrap_or(1024 * 8) - } -} - -impl AppendToUrl for PreBinnedQuery { - fn append_to_url(&self, url: &mut Url) { - if false { - panic!("remove, not in use"); - } - self.patch.append_to_url(url); - self.channel.append_to_url(url); - self.shape.append_to_url(url); - self.scalar_type.append_to_url(url); - if let Some(x) = &self.agg_kind { - binning_scheme_append_to_url(x, url); - } - let mut g = url.query_pairs_mut(); - // TODO add also impl AppendToUrl for these if applicable: - if let Some(x) = &self.cache_usage { - g.append_pair("cacheUsage", &x.query_param_value()); - } - if let Some(x) = self.buf_len_disk_io { - g.append_pair("bufLenDiskIo", &format!("{}", x)); - } - if let Some(x) = &self.disk_stats_every { - g.append_pair("diskStatsEveryKb", &format!("{}", x.bytes() / 1024)); - } - } -} diff --git a/crates/netpod/src/range.rs b/crates/netpod/src/range.rs deleted file mode 100644 index 3ed6f24..0000000 --- a/crates/netpod/src/range.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod binrange; -pub mod evrange; diff --git a/crates/netpod/src/range/binrange.rs b/crates/netpod/src/range/binrange.rs deleted file mode 100644 index ff7cd03..0000000 --- a/crates/netpod/src/range/binrange.rs +++ /dev/null @@ -1,86 +0,0 @@ -use super::evrange::NanoRange; -use super::evrange::SeriesRange; -use crate::timeunits::SEC; -use crate::BinnedRangeEnum; -use crate::Dim0Kind; -use crate::TsNano; -use chrono::DateTime; -use chrono::Utc; - -#[test] -fn test_binned_range_covering_00() { - let range = SeriesRange::TimeRange(NanoRange::from_date_time( - DateTime::parse_from_rfc3339("1970-01-01T10:10:00Z").unwrap().into(), - DateTime::parse_from_rfc3339("1970-01-01T10:20:00Z").unwrap().into(), - )); - let r = BinnedRangeEnum::covering_range(range, 9).unwrap(); - assert_eq!(r.bin_count(), 10); - if let Dim0Kind::Time = r.dim0kind() { - } else { - panic!() - } - let r2 = r.binned_range_time(); - let a = r2.edges(); - assert_eq!(a.len(), 1 + r.bin_count() as usize); - assert_eq!(a[0], TsNano((((10 * 60) + 10) * 60 + 0) * SEC)); - assert_eq!(a[1], TsNano((((10 * 60) + 11) * 60 + 0) * SEC)); - assert_eq!(a[10], TsNano((((10 * 60) + 20) * 60 + 0) * SEC)); - let x = r.range_at(2).unwrap(); - let y = SeriesRange::TimeRange(NanoRange { - beg: (((10 * 60) + 12) * 60 + 0) * SEC, - end: (((10 * 60) + 13) * 60 + 0) * SEC, - }); - assert_eq!(x, y); -} - -#[test] -fn test_binned_range_covering_01() { - let range = SeriesRange::TimeRange(NanoRange::from_date_time( - DateTime::parse_from_rfc3339("1970-01-01T00:20:04Z").unwrap().into(), - DateTime::parse_from_rfc3339("1970-01-01T00:21:10Z").unwrap().into(), - )); - let r = BinnedRangeEnum::covering_range(range, 9).unwrap(); - assert_eq!(r.bin_count(), 14); - if let Dim0Kind::Time = r.dim0kind() { - } else { - panic!() - } - let r2 = r.binned_range_time(); - let a = r2.edges(); - assert_eq!(a.len(), 1 + r.bin_count() as usize); - assert_eq!(a[0], TsNano((((0 * 60) + 20) * 60 + 0) * SEC)); - assert_eq!(a[1], TsNano((((0 * 60) + 20) * 60 + 5) * SEC)); - assert_eq!(a[14], TsNano((((0 * 60) + 21) * 60 + 10) * SEC)); - let x = r.range_at(0).unwrap(); - let y = SeriesRange::TimeRange(NanoRange { - beg: (((0 * 60) + 20) * 60 + 0) * SEC, - end: (((0 * 60) + 20) * 60 + 5) * SEC, - }); - assert_eq!(x, y); -} - -#[test] -fn test_binned_range_covering_02() { - let range = SeriesRange::TimeRange(NanoRange::from_date_time( - DateTime::parse_from_rfc3339("1970-01-01T00:20:04Z").unwrap().into(), - DateTime::parse_from_rfc3339("1970-01-01T00:22:10Z").unwrap().into(), - )); - let r = BinnedRangeEnum::covering_range(range, 25).unwrap(); - assert_eq!(r.bin_count(), 26); - if let Dim0Kind::Time = r.dim0kind() { - } else { - panic!() - } - let r2 = r.binned_range_time(); - let a = r2.edges(); - assert_eq!(a.len(), 1 + r.bin_count() as usize); - assert_eq!(a[0], TsNano((((0 * 60) + 20) * 60 + 0) * SEC)); - assert_eq!(a[1], TsNano((((0 * 60) + 20) * 60 + 5) * SEC)); - assert_eq!(a[14], TsNano((((0 * 60) + 21) * 60 + 10) * SEC)); - let x = r.range_at(0).unwrap(); - let y = SeriesRange::TimeRange(NanoRange { - beg: (((0 * 60) + 20) * 60 + 0) * SEC, - end: (((0 * 60) + 20) * 60 + 5) * SEC, - }); - assert_eq!(x, y); -} diff --git a/crates/netpod/src/range/evrange.rs b/crates/netpod/src/range/evrange.rs deleted file mode 100644 index f53e9f0..0000000 --- a/crates/netpod/src/range/evrange.rs +++ /dev/null @@ -1,220 +0,0 @@ -use crate::query::PulseRangeQuery; -use crate::query::TimeRangeQuery; -use crate::timeunits::SEC; -use crate::AppendToUrl; -use crate::Dim0Kind; -use crate::FromUrl; -use crate::NetpodError; -use crate::TsNano; -use chrono::DateTime; -use chrono::TimeZone; -use chrono::Utc; -use daqbuf_err as err; -use err::Error; -use serde::Deserialize; -use serde::Serialize; -use std::collections::BTreeMap; -use std::fmt; -use url::Url; - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum TimeRange { - Time { beg: DateTime, end: DateTime }, - Pulse { beg: u64, end: u64 }, - Nano { beg: u64, end: u64 }, -} - -#[derive(Clone, Serialize, Deserialize, PartialEq)] -pub struct NanoRange { - pub beg: u64, - pub end: u64, -} - -impl fmt::Debug for NanoRange { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - if true { - let beg = TsNano(self.beg); - let end = TsNano(self.end); - write!(fmt, "NanoRange {{ beg: {}, end: {} }}", beg.fmt(), end.fmt()) - } else if false { - let beg = TsNano(self.beg); - let end = TsNano(self.end); - fmt.debug_struct("NanoRange") - .field("beg", &beg) - .field("end", &end) - .finish() - } else { - let beg = chrono::Utc - .timestamp_opt((self.beg / SEC) as i64, (self.beg % SEC) as u32) - .earliest(); - let end = chrono::Utc - .timestamp_opt((self.end / SEC) as i64, (self.end % SEC) as u32) - .earliest(); - if let (Some(a), Some(b)) = (beg, end) { - fmt.debug_struct("NanoRange").field("beg", &a).field("end", &b).finish() - } else { - fmt.debug_struct("NanoRange") - .field("beg", &beg) - .field("end", &end) - .finish() - } - } - } -} - -impl fmt::Display for NanoRange { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(self, fmt) - } -} - -impl NanoRange { - pub fn from_date_time(beg: DateTime, end: DateTime) -> Self { - Self { - beg: beg.timestamp_nanos_opt().unwrap_or(0) as u64, - end: end.timestamp_nanos_opt().unwrap_or(0) as u64, - } - } - - pub fn from_ns_u64(beg: u64, end: u64) -> Self { - Self { beg, end } - } - - pub fn delta(&self) -> u64 { - self.end - self.beg - } - - pub fn beg(&self) -> u64 { - self.beg - } - - pub fn end(&self) -> u64 { - self.end - } -} - -impl From<(u64, u64)> for NanoRange { - fn from(value: (u64, u64)) -> Self { - Self { - beg: value.0, - end: value.1, - } - } -} - -impl TryFrom<&SeriesRange> for NanoRange { - type Error = NetpodError; - - fn try_from(val: &SeriesRange) -> Result { - match val { - SeriesRange::TimeRange(x) => Ok(x.clone()), - SeriesRange::PulseRange(_) => Err(NetpodError::NotTimerange), - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub struct PulseRange { - pub beg: u64, - pub end: u64, -} - -#[derive(Clone, Serialize, Deserialize, PartialEq)] -pub enum SeriesRange { - TimeRange(NanoRange), - PulseRange(PulseRange), -} - -impl SeriesRange { - pub fn dim0kind(&self) -> Dim0Kind { - match self { - SeriesRange::TimeRange(_) => Dim0Kind::Time, - SeriesRange::PulseRange(_) => Dim0Kind::Pulse, - } - } - - pub fn is_time(&self) -> bool { - match self { - SeriesRange::TimeRange(_) => true, - SeriesRange::PulseRange(_) => false, - } - } - - pub fn is_pulse(&self) -> bool { - match self { - SeriesRange::TimeRange(_) => false, - SeriesRange::PulseRange(_) => true, - } - } - - pub fn beg_u64(&self) -> u64 { - match self { - SeriesRange::TimeRange(x) => x.beg, - SeriesRange::PulseRange(x) => x.beg, - } - } - - pub fn end_u64(&self) -> u64 { - match self { - SeriesRange::TimeRange(x) => x.end, - SeriesRange::PulseRange(x) => x.end, - } - } - - pub fn delta_u64(&self) -> u64 { - match self { - SeriesRange::TimeRange(x) => x.end - x.beg, - SeriesRange::PulseRange(x) => x.end - x.beg, - } - } -} - -impl fmt::Debug for SeriesRange { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match self { - SeriesRange::TimeRange(range) => write!(fmt, "SeriesRange::TimeRange {{ {} }}", range), - SeriesRange::PulseRange(_) => write!(fmt, "SeriesRange::PulseRange {{ .. }}"), - } - } -} - -impl From for SeriesRange { - fn from(k: NanoRange) -> Self { - Self::TimeRange(k) - } -} - -impl From for SeriesRange { - fn from(k: PulseRange) -> Self { - Self::PulseRange(k) - } -} - -impl FromUrl for SeriesRange { - type Error = NetpodError; - - fn from_url(url: &url::Url) -> Result { - let pairs = crate::get_url_query_pairs(url); - Self::from_pairs(&pairs) - } - - fn from_pairs(pairs: &BTreeMap) -> Result { - let ret = if let Ok(x) = TimeRangeQuery::from_pairs(pairs) { - SeriesRange::TimeRange(x.into()) - } else if let Ok(x) = PulseRangeQuery::from_pairs(pairs) { - SeriesRange::PulseRange(x.into()) - } else { - return Err(NetpodError::MissingTimerange); - }; - Ok(ret) - } -} - -impl AppendToUrl for SeriesRange { - fn append_to_url(&self, url: &mut Url) { - match self { - SeriesRange::TimeRange(k) => TimeRangeQuery::from(k).append_to_url(url), - SeriesRange::PulseRange(k) => PulseRangeQuery::from(k).append_to_url(url), - } - } -} diff --git a/crates/netpod/src/status.rs b/crates/netpod/src/status.rs deleted file mode 100644 index 7443131..0000000 --- a/crates/netpod/src/status.rs +++ /dev/null @@ -1,4 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Serialize, Deserialize)] -pub struct SystemStats {} diff --git a/crates/netpod/src/stream_impl_tracer.rs b/crates/netpod/src/stream_impl_tracer.rs deleted file mode 100644 index e38ce99..0000000 --- a/crates/netpod/src/stream_impl_tracer.rs +++ /dev/null @@ -1,43 +0,0 @@ -use crate::log::*; - -pub struct StreamImplTracer { - name: String, - npoll_cnt: usize, - npoll_max: usize, - loop_cnt: usize, - loop_max: usize, -} - -impl StreamImplTracer { - pub fn new(name: String, npoll_max: usize, loop_max: usize) -> Self { - Self { - name, - npoll_cnt: 0, - npoll_max, - loop_cnt: 0, - loop_max, - } - } - - pub fn poll_enter(&mut self) -> bool { - self.npoll_cnt += 1; - if self.npoll_cnt >= self.npoll_max { - trace!("{} poll {} reached limit", self.name, self.npoll_cnt); - true - } else { - trace!("{} poll {}", self.name, self.npoll_cnt); - false - } - } - - pub fn loop_enter(&mut self) -> bool { - self.loop_cnt += 1; - if self.loop_cnt >= self.loop_max { - trace!("{} loop {} reached limit", self.name, self.loop_cnt); - true - } else { - trace!("{} loop {}", self.name, self.loop_cnt); - false - } - } -} diff --git a/crates/netpod/src/streamext.rs b/crates/netpod/src/streamext.rs deleted file mode 100644 index 213b3a1..0000000 --- a/crates/netpod/src/streamext.rs +++ /dev/null @@ -1,72 +0,0 @@ -use daqbuf_err as err; -use err::Error; -use futures_util::{Stream, StreamExt}; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pub struct SCC -where - S: Stream, -{ - inp: S, - errored: bool, - completed: bool, -} - -impl SCC -where - S: Stream, -{ - pub fn new(inp: S) -> Self { - Self { - inp, - errored: false, - completed: false, - } - } -} - -impl Stream for SCC -where - S: Stream> + Unpin, -{ - type Item = ::Item; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - use Poll::*; - if self.completed { - panic!("SCC poll_next on completed"); - } else if self.errored { - self.completed = true; - Ready(None) - } else { - match self.inp.poll_next_unpin(cx) { - Ready(Some(Ok(k))) => Ready(Some(Ok(k))), - Ready(Some(Err(e))) => { - self.errored = true; - Ready(Some(Err(e))) - } - Ready(None) => { - self.completed = true; - Ready(None) - } - Pending => Pending, - } - } - } -} - -pub trait IntoSCC -where - S: Stream, -{ - fn into_scc(self) -> SCC; -} - -impl IntoSCC for S -where - S: Stream, -{ - fn into_scc(self) -> SCC { - SCC::new(self) - } -} diff --git a/crates/netpod/src/ttl.rs b/crates/netpod/src/ttl.rs deleted file mode 100644 index 33e2e3b..0000000 --- a/crates/netpod/src/ttl.rs +++ /dev/null @@ -1,111 +0,0 @@ -use core::fmt; -use daqbuf_err as err; -use err::thiserror; -use err::ThisError; -use serde::Deserialize; -use serde::Serialize; -use std::str::FromStr; -use std::time::Duration; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum RetentionTime { - Short, - Medium, - Long, -} - -impl RetentionTime { - pub fn debug_tag(&self) -> &'static str { - use RetentionTime::*; - match self { - Short => "ST", - Medium => "MT", - Long => "LT", - } - } - - pub fn table_prefix(&self) -> &'static str { - use RetentionTime::*; - match self { - Short => "st_", - Medium => "mt_", - Long => "lt_", - } - } - - pub fn ttl_events_d0(&self) -> Duration { - let day = 60 * 60 * 24; - let margin_max = Duration::from_secs(day * 2); - let ttl = self.ttl_ts_msp(); - let margin = ttl / 10; - let margin = if margin >= margin_max { margin_max } else { margin }; - ttl + margin - } - - pub fn ttl_events_d1(&self) -> Duration { - // TTL now depends only on RetentionTime, not on data type or shape. - self.ttl_events_d0() - } - - pub fn ttl_ts_msp(&self) -> Duration { - let day = 60 * 60 * 24; - match self { - RetentionTime::Short => Duration::from_secs(day * 7), - RetentionTime::Medium => Duration::from_secs(day * 31 * 13), - RetentionTime::Long => Duration::from_secs(day * 31 * 12 * 17), - } - } - - pub fn ttl_binned(&self) -> Duration { - // Current choice is to keep the TTL the same as for events - self.ttl_events_d0() - } - - pub fn ttl_channel_status(&self) -> Duration { - // Current choice is to keep the TTL the same as for events - self.ttl_events_d0() - } -} - -impl fmt::Display for RetentionTime { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let s = match self { - RetentionTime::Short => "short", - RetentionTime::Medium => "medium", - RetentionTime::Long => "long", - }; - fmt.write_str(s) - } -} - -#[derive(Debug, ThisError)] -#[cstm(name = "TTL")] -pub enum Error { - Parse, -} - -// err::err_dbg_dis!(Error, "ttl::Error::"); - -impl FromStr for RetentionTime { - type Err = Error; - - fn from_str(s: &str) -> Result { - let ret = match s { - "short" => Self::Short, - "medium" => Self::Medium, - "long" => Self::Long, - _ => return Err(Error::Parse), - }; - Ok(ret) - } -} - -// impl ToString for RetentionTime { -// fn to_string(&self) -> String { -// match self { -// RetentionTime::Short => "short".into(), -// RetentionTime::Medium => "medium".into(), -// RetentionTime::Long => "long".into(), -// } -// } -// } diff --git a/crates/nodenet/Cargo.toml b/crates/nodenet/Cargo.toml index 16a1568..8ccc17c 100644 --- a/crates/nodenet/Cargo.toml +++ b/crates/nodenet/Cargo.toml @@ -18,7 +18,7 @@ futures-util = "0.3.14" tracing = "0.1.25" hex = "0.4.3" daqbuf-err = { path = "../../../daqbuf-err" } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } query = { path = "../query" } disk = { path = "../disk" } #parse = { path = "../parse" } diff --git a/crates/parse/Cargo.toml b/crates/parse/Cargo.toml index 539550f..648a30e 100644 --- a/crates/parse/Cargo.toml +++ b/crates/parse/Cargo.toml @@ -14,4 +14,4 @@ byteorder = "1.4" hex = "0.4.3" nom = "7.1.3" daqbuf-err = { path = "../../../daqbuf-err" } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } diff --git a/crates/query/Cargo.toml b/crates/query/Cargo.toml index 6534b4e..513cbf9 100644 --- a/crates/query/Cargo.toml +++ b/crates/query/Cargo.toml @@ -13,7 +13,7 @@ url = "2.2" humantime = "2.1.0" humantime-serde = "1.1.1" thiserror = "0.0.1" -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } items_0 = { path = "../items_0" } items_2 = { path = "../items_2" } diff --git a/crates/scyllaconn/Cargo.toml b/crates/scyllaconn/Cargo.toml index 74b9cb1..d14105e 100644 --- a/crates/scyllaconn/Cargo.toml +++ b/crates/scyllaconn/Cargo.toml @@ -13,7 +13,7 @@ pin-project = "1" async-channel = "2.3.1" scylla = "0.13.0" daqbuf-err = { path = "../../../daqbuf-err" } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } query = { path = "../query" } items_0 = { path = "../items_0" } items_2 = { path = "../items_2" } diff --git a/crates/streamio/Cargo.toml b/crates/streamio/Cargo.toml index 7d0cc3d..4ef1314 100644 --- a/crates/streamio/Cargo.toml +++ b/crates/streamio/Cargo.toml @@ -23,7 +23,7 @@ rand_xoshiro = "0.6.0" thiserror = "0.0.1" chrono = { version = "0.4.19", features = ["serde"] } wasmer = { version = "4.1.0", default-features = false, features = ["sys", "cranelift"], optional = true } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } query = { path = "../query" } items_0 = { path = "../items_0" } items_2 = { path = "../items_2" } diff --git a/crates/streams/Cargo.toml b/crates/streams/Cargo.toml index 196c96d..4a0bbfd 100644 --- a/crates/streams/Cargo.toml +++ b/crates/streams/Cargo.toml @@ -21,7 +21,7 @@ rand_xoshiro = "0.6.0" thiserror = "0.0.1" chrono = { version = "0.4.19", features = ["serde"] } wasmer = { version = "4.1.0", default-features = false, features = ["sys", "cranelift"], optional = true } -netpod = { path = "../netpod" } +netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" } query = { path = "../query" } items_0 = { path = "../items_0" } items_2 = { path = "../items_2" }