WIP on events_plain_json_00

This commit is contained in:
Dominik Werder
2022-11-22 11:53:25 +01:00
parent 7cdf5975b9
commit 06e21bc21f
47 changed files with 1133 additions and 687 deletions

View File

@@ -8,7 +8,7 @@ edition = "2021"
path = "src/commonio.rs"
[dependencies]
tokio = { version = "1.18.1", features = ["io-util", "net", "time", "fs"] }
tokio = { version = "1.21.1", features = ["io-util", "net", "time", "fs"] }
tracing = "0.1"
futures-core = "0.3.15"
futures-util = "0.3.15"
@@ -16,7 +16,6 @@ bytes = "1"
serde = { version = "1", features = ["derive"] }
serde_derive = "1"
serde_json = "1"
bincode = "1.3"
chrono = "0.4"
async-channel = "1"
parking_lot = "0.11"

View File

@@ -13,10 +13,8 @@ tracing-subscriber = "0.2.17"
futures-core = "0.3.14"
futures-util = "0.3.14"
bytes = "1.0.1"
#bincode = "1.3.3"
#async-channel = "1"
#dashmap = "3"
#tokio-postgres = "0.7"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
@@ -27,6 +25,5 @@ lazy_static = "1.4.0"
err = { path = "../err" }
taskrun = { path = "../taskrun" }
netpod = { path = "../netpod" }
#httpret = { path = "../httpret" }
disk = { path = "../disk" }
daqbufp2 = { path = "../daqbufp2" }

View File

@@ -13,13 +13,10 @@ tracing-subscriber = "0.2.17"
futures-core = "0.3.14"
futures-util = "0.3.14"
bytes = "1.0.1"
#async-channel = "1"
#dashmap = "3"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
bincode = "1.3.3"
#ciborium = "0.1.0"
rmp-serde = "1.1.1"
chrono = "0.4"
url = "2.2.2"
lazy_static = "1.4.0"

View File

@@ -120,7 +120,7 @@ pub async fn get_binned(
error!("unexpected type id got {} exp {}", frame.tyid(), type_id_exp);
}
let n1 = frame.buf().len();
match bincode::deserialize::<ExpectedType>(frame.buf()) {
match rmp_serde::from_slice::<ExpectedType>(frame.buf()) {
Ok(item) => match item {
Ok(item) => {
match item {

View File

@@ -2,6 +2,8 @@ pub mod archapp;
pub mod binnedbinary;
pub mod binnedjson;
pub mod events;
#[cfg(test)]
mod eventsjson;
pub mod timeweightedjson;
use bytes::BytesMut;

View File

@@ -8,8 +8,9 @@ use http::StatusCode;
use hyper::Body;
use items::binsdim0::MinMaxAvgDim0Bins;
use items::{RangeCompletableItem, Sitemty, StatsItem, StreamItem, SubFrId, WithLen};
use netpod::log::*;
use netpod::query::{BinnedQuery, CacheUsage};
use netpod::{log::*, AppendToUrl};
use netpod::AppendToUrl;
use netpod::{AggKind, Channel, Cluster, HostPort, NanoRange, PerfOpts, APP_OCTET};
use serde::de::DeserializeOwned;
use std::fmt;
@@ -200,7 +201,7 @@ where
if frame.tyid() != err::todoval::<u32>() {
error!("test receives unexpected tyid {:x}", frame.tyid());
}
match bincode::deserialize::<Sitemty<MinMaxAvgDim0Bins<NTY>>>(frame.buf()) {
match rmp_serde::from_slice::<Sitemty<MinMaxAvgDim0Bins<NTY>>>(frame.buf()) {
Ok(item) => match item {
Ok(item) => match item {
StreamItem::Log(item) => {

View File

@@ -172,7 +172,7 @@ where
error!("test receives unexpected tyid {:x}", frame.tyid());
None
} else {
match bincode::deserialize::<Sitemty<ScalarEvents<NTY>>>(frame.buf()) {
match rmp_serde::from_slice::<Sitemty<ScalarEvents<NTY>>>(frame.buf()) {
Ok(item) => match item {
Ok(item) => match item {
StreamItem::Log(item) => {

View File

@@ -0,0 +1,81 @@
use crate::err::ErrConv;
use crate::nodes::require_test_hosts_running;
use chrono::{DateTime, Utc};
use err::Error;
use http::StatusCode;
use hyper::Body;
use netpod::log::*;
use netpod::query::PlainEventsQuery;
use netpod::APP_JSON;
use netpod::{AppendToUrl, Channel, Cluster, HostPort, NanoRange};
use serde_json::Value as JsonValue;
use url::Url;
#[test]
fn events_plain_json_00() -> Result<(), Error> {
let fut = async {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
events_plain_json(
Channel {
backend: "testbackend".into(),
name: "inmem-d0-i32".into(),
series: None,
},
"1970-01-01T00:20:04.000Z",
"1970-01-01T00:20:10.000Z",
cluster,
true,
4,
)
.await?;
Ok(())
};
taskrun::run(fut)
}
// TODO improve by a more information-rich return type.
async fn events_plain_json(
channel: Channel,
beg_date: &str,
end_date: &str,
cluster: &Cluster,
_expect_range_complete: bool,
_expect_event_count: u64,
) -> Result<JsonValue, Error> {
let t1 = Utc::now();
let node0 = &cluster.nodes[0];
let beg_date: DateTime<Utc> = beg_date.parse()?;
let end_date: DateTime<Utc> = end_date.parse()?;
let range = NanoRange::from_date_time(beg_date, end_date);
let query = PlainEventsQuery::new(channel, range, 1024 * 4, None, false);
let hp = HostPort::from_node(node0);
let mut url = Url::parse(&format!("http://{}:{}/api/4/events", hp.host, hp.port))?;
query.append_to_url(&mut url);
let url = url;
info!("get_plain_events get {}", url);
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(url.to_string())
.header(http::header::ACCEPT, APP_JSON)
.body(Body::empty())
.ec()?;
let client = hyper::Client::new();
let res = client.request(req).await.ec()?;
if res.status() != StatusCode::OK {
error!("client response {:?}", res);
return Err(Error::with_msg_no_trace(format!("bad result {res:?}")));
}
let buf = hyper::body::to_bytes(res.into_body()).await.ec()?;
let s = String::from_utf8_lossy(&buf);
let res: JsonValue = serde_json::from_str(&s)?;
let pretty = serde_json::to_string_pretty(&res)?;
eprintln!("{pretty}");
// TODO assert more
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
// TODO add timeout
debug!("time {} ms", ms);
Ok(res)
}

View File

@@ -8,11 +8,9 @@ edition = "2021"
path = "src/dq.rs"
[dependencies]
#serde = { version = "1.0", features = ["derive"] }
#serde_json = "1.0"
tokio = { version = "1.18.1", features = ["rt-multi-thread", "io-util", "net", "time", "sync", "fs"] }
tokio = { version = "1.21.1", features = ["rt-multi-thread", "io-util", "net", "time", "sync", "fs"] }
futures-util = "0.3.14"
clap = { version = "3.0.6", features = ["derive", "cargo"] }
clap = { version = "4.0", features = ["derive", "cargo"] }
chrono = "0.4.19"
bytes = "1.0.1"
err = { path = "../err" }

View File

@@ -1,14 +1,15 @@
use clap::Parser;
use clap::{ArgAction, Parser};
use err::Error;
use netpod::timeunits::*;
use std::{path::PathBuf, str::FromStr};
use std::path::PathBuf;
use std::str::FromStr;
#[derive(Debug, Parser)]
#[clap(name = "DAQ buffer tools", version)]
#[command(author, version)]
pub struct Opts {
#[clap(short, long, parse(from_occurrences))]
#[arg(short, long, action(ArgAction::Count))]
pub verbose: u32,
#[clap(subcommand)]
#[command(subcommand)]
pub subcmd: SubCmd,
}
@@ -21,19 +22,19 @@ pub enum SubCmd {
#[derive(Debug, Parser)]
pub struct ConvertArchiverApplianceChannel {
/// Prefix for keyspaces, e.g. specify `daq` to get scalar keyspace directory `daq_2`.
#[clap(long)]
#[arg(long)]
keyspace_prefix: String,
/// Name of the channel to convert.
#[clap(long)]
#[arg(long)]
channel_name: String,
/// Look for archiver appliance data at given path.
#[clap(long)]
#[arg(long)]
input_dir: PathBuf,
/// Generate Databuffer format at given path.
#[clap(long)]
#[arg(long)]
output_dir: PathBuf,
/// Size of the time-bins in the generated Databuffer format.\nUnit-suffixes: `h` (hours), `d` (days)
#[clap(default_value = "1d", long)]
#[arg(default_value = "1d", long)]
time_bin_size: TimeBinSize,
}

View File

@@ -1,4 +1,4 @@
use clap::Parser;
use clap::{ArgAction, Parser};
use err::Error;
use netpod::log::*;
use netpod::{ByteOrder, ByteSize, Channel, ChannelConfig, NanoRange, Shape};
@@ -8,11 +8,11 @@ use tokio::fs::File;
use tokio::io::AsyncReadExt;
#[derive(Debug, Parser)]
#[clap(name = "DAQ buffer tools", version)]
#[command(name = "DAQ buffer tools", author, version)]
pub struct Opts {
#[clap(short, long, parse(from_occurrences))]
#[arg(short, long, action(ArgAction::Count))]
pub verbose: u32,
#[clap(subcommand)]
#[command(subcommand)]
pub subcmd: SubCmd,
}
@@ -24,15 +24,15 @@ pub enum SubCmd {
#[derive(Debug, Parser)]
pub struct ReadDatabufferConfigfile {
#[clap(long)]
#[arg(long)]
configfile: PathBuf,
}
#[derive(Debug, Parser)]
pub struct ReadDatabufferDatafile {
#[clap(long)]
#[arg(long)]
configfile: PathBuf,
#[clap(long)]
#[arg(long)]
datafile: PathBuf,
}

View File

@@ -10,6 +10,7 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_cbor = "0.11"
erased-serde = "0.3"
rmp-serde = "1.1.1"
async-channel = "1.6"
chrono = { version = "0.4", features = ["serde"] }
url = "2.2"

View File

@@ -377,6 +377,18 @@ impl From<TryFromSliceError> for Error {
}
}
impl From<rmp_serde::encode::Error> for Error {
fn from(k: rmp_serde::encode::Error) -> Self {
Self::with_msg(format!("{:?}", k))
}
}
impl From<rmp_serde::decode::Error> for Error {
fn from(k: rmp_serde::decode::Error) -> Self {
Self::with_msg(format!("{:?}", k))
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct PublicError {
reason: Option<Reason>,

View File

@@ -4,9 +4,10 @@ use crate::{response, response_err, BodyStream, ToPublicResponse};
use futures_util::{Stream, StreamExt, TryStreamExt};
use http::{Method, Request, Response, StatusCode};
use hyper::Body;
use items_2::channelevents::ChannelEvents;
use items_2::merger_cev::ChannelEventsMerger;
use items_2::{binned_collected, empty_events_dyn, empty_events_dyn_2, ChannelEvents};
use netpod::query::{BinnedQuery, ChannelStateEventsQuery, PlainEventsQuery};
use items_2::{binned_collected, empty_events_dyn, empty_events_dyn_2};
use netpod::query::{BinnedQuery, ChannelStateEventsQuery, PlainEventsQuery, RawEventsQuery};
use netpod::{log::*, HasBackend};
use netpod::{AggKind, BinnedRange, FromUrl, NodeConfigCached};
use netpod::{ACCEPT_ALL, APP_JSON, APP_OCTET};
@@ -53,6 +54,7 @@ async fn plain_events(req: Request<Body>, node_config: &NodeConfigCached) -> Res
.map_err(Error::from)
.map_err(|e| e.add_public_msg(format!("Can not parse query url")))?
};
// TODO format error.
if accept == APP_JSON || accept == ACCEPT_ALL {
Ok(plain_events_json(url, req, node_config).await?)
} else if accept == APP_OCTET {
@@ -117,7 +119,11 @@ async fn plain_events_json(
// ---
if query.backend() == "testbackend" {
err::todoval()
let query = RawEventsQuery::new(query.channel().clone(), query.range().clone(), AggKind::Plain);
let item = streams::plaineventsjson::plain_events_json(query, &node_config.node_config.cluster).await?;
let buf = serde_json::to_vec(&item)?;
let ret = response(StatusCode::OK).body(Body::from(buf))?;
Ok(ret)
} else {
let op = disk::channelexec::PlainEventsJson::new(
// TODO pass only the query, not channel, range again:
@@ -252,7 +258,7 @@ impl EventsHandlerScylla {
Ok(k) => match k {
ChannelEvents::Events(mut item) => {
if coll.is_none() {
coll = Some(item.new_collector());
coll = Some(items_2::streams::Collectable::new_collector(item.as_ref()));
}
let cl = coll.as_mut().unwrap();
cl.ingest(item.as_collectable_mut());

View File

@@ -668,10 +668,20 @@ mod instant_serde {
use super::*;
use serde::Serializer;
pub fn ser<S: Serializer>(x: &SystemTime, ser: S) -> Result<S::Ok, S::Error> {
use chrono::LocalResult;
let dur = x.duration_since(std::time::UNIX_EPOCH).unwrap();
let dt = chrono::TimeZone::timestamp(&chrono::Utc, dur.as_secs() as i64, dur.subsec_nanos());
let s = dt.format("%Y-%m-%dT%H:%M:%S%.3f").to_string();
ser.serialize_str(&s)
let res = chrono::TimeZone::timestamp_opt(&chrono::Utc, dur.as_secs() as i64, dur.subsec_nanos());
match res {
LocalResult::None => Err(serde::ser::Error::custom(format!("Bad local instant conversion"))),
LocalResult::Single(dt) => {
let s = dt.format("%Y-%m-%dT%H:%M:%S%.3f").to_string();
ser.serialize_str(&s)
}
LocalResult::Ambiguous(dt, _dt2) => {
let s = dt.format("%Y-%m-%dT%H:%M:%S%.3f").to_string();
ser.serialize_str(&s)
}
}
}
}

View File

@@ -12,9 +12,9 @@ tokio = { version = "1.21.2", features = ["rt-multi-thread", "io-util", "net", "
futures-util = "0.3.15"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_cbor = "0.11.1"
ciborium = "0.2"
rmp-serde = "1.1.1"
erased-serde = "0.3"
bincode = "1.3.3"
bytes = "1.2.1"
num-traits = "0.2.15"
chrono = { version = "0.4.22", features = ["serde"] }

View File

@@ -1,6 +1,5 @@
use crate::numops::NumOps;
use crate::streams::{Collectable, Collector, ToJsonBytes, ToJsonResult};
use crate::ts_offs_from_abs;
use crate::Appendable;
use crate::FilterFittingInside;
use crate::Fits;
@@ -12,6 +11,7 @@ use crate::ReadableFromFile;
use crate::Sitemty;
use crate::SubFrId;
use crate::TimeBinnableDyn;
use crate::{ts_offs_from_abs, FrameType};
use crate::{NewEmpty, RangeOverlapInfo, WithLen};
use crate::{TimeBinnableType, TimeBinnableTypeAggregator};
use crate::{TimeBinned, TimeBinnerDyn, TimeBins};
@@ -45,6 +45,15 @@ where
const FRAME_TYPE_ID: u32 = crate::MIN_MAX_AVG_DIM_0_BINS_FRAME_TYPE_ID + NTY::SUB;
}
impl<NTY> FrameType for MinMaxAvgDim0Bins<NTY>
where
NTY: SubFrId,
{
fn frame_type_id(&self) -> u32 {
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
}
}
impl<NTY> fmt::Debug for MinMaxAvgDim0Bins<NTY>
where
NTY: fmt::Debug,
@@ -202,7 +211,7 @@ where
}
fn from_buf(buf: &[u8]) -> Result<Self, Error> {
let dec = serde_cbor::from_slice(&buf)?;
let dec = rmp_serde::from_slice(&buf)?;
Ok(dec)
}
}

View File

@@ -1,5 +1,4 @@
use crate::numops::NumOps;
use crate::pulse_offs_from_abs;
use crate::streams::{Collectable, Collector, ToJsonBytes, ToJsonResult};
use crate::ts_offs_from_abs;
use crate::waveevents::WaveEvents;
@@ -13,6 +12,7 @@ use crate::TimeBinnableDyn;
use crate::TimeBinnableType;
use crate::TimeBinnableTypeAggregator;
use crate::TimeBins;
use crate::{pulse_offs_from_abs, FrameType};
use crate::{Fits, FitsInside, NewEmpty, ReadPbv, Sitemty, SubFrId, TimeBinned, WithLen};
use chrono::{TimeZone, Utc};
use err::Error;
@@ -42,6 +42,15 @@ where
const FRAME_TYPE_ID: u32 = crate::MIN_MAX_AVG_DIM_1_BINS_FRAME_TYPE_ID + NTY::SUB;
}
impl<NTY> FrameType for MinMaxAvgDim1Bins<NTY>
where
NTY: SubFrId,
{
fn frame_type_id(&self) -> u32 {
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
}
}
impl<NTY> fmt::Debug for MinMaxAvgDim1Bins<NTY>
where
NTY: fmt::Debug,
@@ -199,7 +208,7 @@ where
}
fn from_buf(buf: &[u8]) -> Result<Self, Error> {
let dec = serde_cbor::from_slice(&buf)?;
let dec = rmp_serde::from_slice(&buf)?;
Ok(dec)
}
}

View File

@@ -3,7 +3,9 @@ use netpod::{ScalarType, Shape};
use parse::channelconfig::CompressionMethod;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::{Appendable, ByteEstimate, Clearable, FrameTypeInnerStatic, PushableIndex, WithLen, WithTimestamps};
use crate::{
Appendable, ByteEstimate, Clearable, FrameType, FrameTypeInnerStatic, PushableIndex, WithLen, WithTimestamps,
};
#[derive(Debug, Serialize, Deserialize)]
pub struct EventFull {
@@ -99,6 +101,12 @@ impl FrameTypeInnerStatic for EventFull {
const FRAME_TYPE_ID: u32 = crate::EVENT_FULL_FRAME_TYPE_ID;
}
impl FrameType for EventFull {
fn frame_type_id(&self) -> u32 {
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
}
}
impl WithLen for EventFull {
fn len(&self) -> usize {
self.tss.len()

View File

@@ -4,6 +4,7 @@ use crate::{Appendable, Clearable, FrameTypeInnerDyn, PushableIndex, WithLen, Wi
use netpod::{AggKind, HasScalarType, HasShape, ScalarType, Shape};
use serde::{Deserialize, Serialize};
// TODO remove
#[derive(Debug, Serialize, Deserialize)]
pub enum EventsItem {
Plain(PlainEvents),

View File

@@ -1,18 +1,32 @@
use crate::inmem::InMemoryFrame;
use crate::{FrameDecodable, FrameType, LogItem, StatsItem};
use crate::{
ERROR_FRAME_TYPE_ID, INMEM_FRAME_ENCID, INMEM_FRAME_HEAD, INMEM_FRAME_MAGIC, LOG_FRAME_TYPE_ID,
RANGE_COMPLETE_FRAME_TYPE_ID, STATS_FRAME_TYPE_ID, TERM_FRAME_TYPE_ID,
};
use crate::{ContainsError, FrameDecodable, FrameType, LogItem, StatsItem};
use crate::{ERROR_FRAME_TYPE_ID, INMEM_FRAME_ENCID, INMEM_FRAME_HEAD, INMEM_FRAME_MAGIC};
use crate::{LOG_FRAME_TYPE_ID, RANGE_COMPLETE_FRAME_TYPE_ID, STATS_FRAME_TYPE_ID, TERM_FRAME_TYPE_ID};
use bytes::{BufMut, BytesMut};
use err::Error;
#[allow(unused)]
use netpod::log::*;
use serde::Serialize;
trait EC {
fn ec(self) -> err::Error;
}
impl EC for rmp_serde::encode::Error {
fn ec(self) -> err::Error {
err::Error::with_msg_no_trace(format!("{self:?}"))
}
}
impl EC for rmp_serde::decode::Error {
fn ec(self) -> err::Error {
err::Error::with_msg_no_trace(format!("{self:?}"))
}
}
pub fn make_frame<FT>(item: &FT) -> Result<BytesMut, Error>
where
FT: FrameType + Serialize,
FT: FrameType + ContainsError + Serialize,
{
if item.is_err() {
make_error_frame(item.err().unwrap())
@@ -21,21 +35,15 @@ where
}
}
pub fn make_frame_2<FT>(item: &FT, fty: u32) -> Result<BytesMut, Error>
pub fn make_frame_2<T>(item: &T, fty: u32) -> Result<BytesMut, Error>
where
FT: erased_serde::Serialize,
T: erased_serde::Serialize,
{
trace!("make_frame_2 fty {:x}", fty);
let mut out = vec![];
use bincode::Options;
let opts = bincode::DefaultOptions::new()
.with_little_endian()
.with_fixint_encoding()
.allow_trailing_bytes();
let mut ser = bincode::Serializer::new(&mut out, opts);
//let mut ser = serde_json::Serializer::new(std::io::stdout());
let mut out = Vec::new();
let mut ser = rmp_serde::Serializer::new(&mut out);
let mut ser2 = <dyn erased_serde::Serializer>::erase(&mut ser);
//match bincode::serialize(item) {
//let writer = ciborium::ser::into_writer(&item, &mut out).unwrap();
match item.erased_serialize(&mut ser2) {
Ok(_) => {
let enc = out;
@@ -69,7 +77,7 @@ where
// TODO remove duplication for these similar `make_*_frame` functions:
pub fn make_error_frame(error: &::err::Error) -> Result<BytesMut, Error> {
match bincode::serialize(error) {
match rmp_serde::to_vec(error) {
Ok(enc) => {
let mut h = crc32fast::Hasher::new();
h.update(&enc);
@@ -91,12 +99,12 @@ pub fn make_error_frame(error: &::err::Error) -> Result<BytesMut, Error> {
//trace!("frame_crc {}", frame_crc);
Ok(buf)
}
Err(e) => Err(e)?,
Err(e) => Err(e.ec())?,
}
}
pub fn make_log_frame(item: &LogItem) -> Result<BytesMut, Error> {
match bincode::serialize(item) {
match rmp_serde::to_vec(item) {
Ok(enc) => {
let mut h = crc32fast::Hasher::new();
h.update(&enc);
@@ -115,12 +123,12 @@ pub fn make_log_frame(item: &LogItem) -> Result<BytesMut, Error> {
buf.put_u32_le(frame_crc);
Ok(buf)
}
Err(e) => Err(e)?,
Err(e) => Err(e.ec())?,
}
}
pub fn make_stats_frame(item: &StatsItem) -> Result<BytesMut, Error> {
match bincode::serialize(item) {
match rmp_serde::to_vec(item) {
Ok(enc) => {
let mut h = crc32fast::Hasher::new();
h.update(&enc);
@@ -139,7 +147,7 @@ pub fn make_stats_frame(item: &StatsItem) -> Result<BytesMut, Error> {
buf.put_u32_le(frame_crc);
Ok(buf)
}
Err(e) => Err(e)?,
Err(e) => Err(e.ec())?,
}
}
@@ -199,47 +207,38 @@ where
)));
}
if frame.tyid() == ERROR_FRAME_TYPE_ID {
let k: ::err::Error = match bincode::deserialize(frame.buf()) {
let k: ::err::Error = match rmp_serde::from_slice(frame.buf()) {
Ok(item) => item,
Err(e) => {
error!(
"ERROR bincode::deserialize len {} ERROR_FRAME_TYPE_ID",
frame.buf().len()
);
error!("ERROR deserialize len {} ERROR_FRAME_TYPE_ID", frame.buf().len());
let n = frame.buf().len().min(128);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
Err(e.ec())?
}
};
Ok(T::from_error(k))
} else if frame.tyid() == LOG_FRAME_TYPE_ID {
let k: LogItem = match bincode::deserialize(frame.buf()) {
let k: LogItem = match rmp_serde::from_slice(frame.buf()) {
Ok(item) => item,
Err(e) => {
error!(
"ERROR bincode::deserialize len {} LOG_FRAME_TYPE_ID",
frame.buf().len()
);
error!("ERROR deserialize len {} LOG_FRAME_TYPE_ID", frame.buf().len());
let n = frame.buf().len().min(128);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
Err(e.ec())?
}
};
Ok(T::from_log(k))
} else if frame.tyid() == STATS_FRAME_TYPE_ID {
let k: StatsItem = match bincode::deserialize(frame.buf()) {
let k: StatsItem = match rmp_serde::from_slice(frame.buf()) {
Ok(item) => item,
Err(e) => {
error!(
"ERROR bincode::deserialize len {} STATS_FRAME_TYPE_ID",
frame.buf().len()
);
error!("ERROR deserialize len {} STATS_FRAME_TYPE_ID", frame.buf().len());
let n = frame.buf().len().min(128);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
Err(e.ec())?
}
};
Ok(T::from_stats(k))
@@ -256,18 +255,14 @@ where
frame
)))
} else {
match bincode::deserialize(frame.buf()) {
match rmp_serde::from_slice(frame.buf()) {
Ok(item) => Ok(item),
Err(e) => {
error!(
"ERROR bincode::deserialize len {} tyid {:x}",
frame.buf().len(),
frame.tyid()
);
error!("ERROR deserialize len {} tyid {:x}", frame.buf().len(), frame.tyid());
let n = frame.buf().len().min(64);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
Err(e.ec())?
}
}
}

View File

@@ -37,8 +37,9 @@ use std::task::{Context, Poll};
use tokio::fs::File;
use tokio::io::{AsyncRead, ReadBuf};
pub const TERM_FRAME_TYPE_ID: u32 = 0x01;
pub const ERROR_FRAME_TYPE_ID: u32 = 0x02;
pub const TERM_FRAME_TYPE_ID: u32 = 0xaa01;
pub const ERROR_FRAME_TYPE_ID: u32 = 0xaa02;
pub const SITEMTY_NONSPEC_FRAME_TYPE_ID: u32 = 0xaa04;
pub const EVENT_QUERY_JSON_STRING_FRAME: u32 = 0x100;
pub const EVENTS_0D_FRAME_TYPE_ID: u32 = 0x500;
pub const MIN_MAX_AVG_DIM_0_BINS_FRAME_TYPE_ID: u32 = 0x700;
@@ -99,6 +100,25 @@ impl LogItem {
pub type Sitemty<T> = Result<StreamItem<RangeCompletableItem<T>>, Error>;
impl<T> FrameType for Sitemty<T>
where
T: FrameType,
{
fn frame_type_id(&self) -> u32 {
match self {
Ok(item) => match item {
StreamItem::DataItem(item) => match item {
RangeCompletableItem::RangeComplete => SITEMTY_NONSPEC_FRAME_TYPE_ID,
RangeCompletableItem::Data(item) => item.frame_type_id(),
},
StreamItem::Log(_) => SITEMTY_NONSPEC_FRAME_TYPE_ID,
StreamItem::Stats(_) => SITEMTY_NONSPEC_FRAME_TYPE_ID,
},
Err(_) => ERROR_FRAME_TYPE_ID,
}
}
}
pub fn sitem_data<X>(x: X) -> Sitemty<X> {
Ok(StreamItem::DataItem(RangeCompletableItem::Data(x)))
}
@@ -257,8 +277,6 @@ where
// Meant to be implemented by Sitemty.
pub trait FrameType {
fn frame_type_id(&self) -> u32;
fn is_err(&self) -> bool;
fn err(&self) -> Option<&::err::Error>;
}
impl<T> FrameType for Box<T>
@@ -268,7 +286,29 @@ where
fn frame_type_id(&self) -> u32 {
self.as_ref().frame_type_id()
}
}
impl FrameTypeInnerDyn for Box<dyn TimeBinned> {
fn frame_type_id(&self) -> u32 {
FrameTypeInnerDyn::frame_type_id(self.as_time_binnable_dyn())
}
}
impl FrameTypeInnerDyn for Box<dyn EventsDyn> {
fn frame_type_id(&self) -> u32 {
FrameTypeInnerDyn::frame_type_id(self.as_time_binnable_dyn())
}
}
pub trait ContainsError {
fn is_err(&self) -> bool;
fn err(&self) -> Option<&::err::Error>;
}
impl<T> ContainsError for Box<T>
where
T: ContainsError,
{
fn is_err(&self) -> bool {
self.as_ref().is_err()
}
@@ -278,14 +318,7 @@ where
}
}
impl<T> FrameType for Sitemty<T>
where
T: FrameTypeInnerStatic,
{
fn frame_type_id(&self) -> u32 {
<T as FrameTypeInnerStatic>::FRAME_TYPE_ID
}
impl<T> ContainsError for Sitemty<T> {
fn is_err(&self) -> bool {
match self {
Ok(_) => false,
@@ -301,18 +334,6 @@ where
}
}
impl FrameTypeInnerDyn for Box<dyn TimeBinned> {
fn frame_type_id(&self) -> u32 {
self.as_time_binnable_dyn().frame_type_id()
}
}
impl FrameTypeInnerDyn for Box<dyn EventsDyn> {
fn frame_type_id(&self) -> u32 {
self.as_time_binnable_dyn().frame_type_id()
}
}
pub trait Framable {
fn make_frame(&self) -> Result<BytesMut, Error>;
}
@@ -331,7 +352,7 @@ erased_serde::serialize_trait_object!(TimeBinned);
impl<T> Framable for Sitemty<T>
where
T: Sized + serde::Serialize + FrameTypeInnerDyn,
T: Sized + serde::Serialize + FrameType,
{
fn make_frame(&self) -> Result<BytesMut, Error> {
match self {
@@ -391,6 +412,12 @@ impl FrameTypeInnerStatic for EventQueryJsonStringFrame {
const FRAME_TYPE_ID: u32 = EVENT_QUERY_JSON_STRING_FRAME;
}
impl FrameType for EventQueryJsonStringFrame {
fn frame_type_id(&self) -> u32 {
EventQueryJsonStringFrame::FRAME_TYPE_ID
}
}
pub trait EventsNodeProcessor: Send + Unpin {
type Input;
type Output: Send + Unpin + DeserializeOwned + WithTimestamps + TimeBinnableType + ByteEstimate;
@@ -522,14 +549,32 @@ pub trait TimeBinnableType:
// TODO should not require Sync!
// TODO SitemtyFrameType is already supertrait of FramableInner.
pub trait TimeBinnableDyn:
std::fmt::Debug + FramableInner + FrameTypeInnerDyn + WithLen + RangeOverlapInfo + Any + Sync + Send + 'static
std::fmt::Debug
+ FramableInner
+ FrameType
+ FrameTypeInnerDyn
+ WithLen
+ RangeOverlapInfo
+ Any
+ Sync
+ Send
+ 'static
{
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Box<dyn TimeBinnerDyn>;
fn as_any(&self) -> &dyn Any;
}
pub trait TimeBinnableDynStub:
std::fmt::Debug + FramableInner + FrameTypeInnerDyn + WithLen + RangeOverlapInfo + Any + Sync + Send + 'static
std::fmt::Debug
+ FramableInner
+ FrameType
+ FrameTypeInnerDyn
+ WithLen
+ RangeOverlapInfo
+ Any
+ Sync
+ Send
+ 'static
{
}
@@ -572,6 +617,12 @@ pub trait TimeBinned: TimeBinnableDyn {
fn validate(&self) -> Result<(), String>;
}
impl FrameType for Box<dyn TimeBinned> {
fn frame_type_id(&self) -> u32 {
FrameType::frame_type_id(self.as_ref())
}
}
impl WithLen for Box<dyn TimeBinned> {
fn len(&self) -> usize {
self.as_time_binnable_dyn().len()

View File

@@ -3,8 +3,8 @@ use crate::numops::NumOps;
use crate::streams::{Collectable, Collector};
use crate::{
pulse_offs_from_abs, ts_offs_from_abs, Appendable, ByteEstimate, Clearable, EventAppendable, EventsDyn,
FilterFittingInside, Fits, FitsInside, FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv,
ReadableFromFile, TimeBinnableDyn, TimeBinnableType, TimeBinnableTypeAggregator, TimeBinnerDyn, WithLen,
FilterFittingInside, Fits, FitsInside, FrameType, FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo,
ReadPbv, ReadableFromFile, TimeBinnableDyn, TimeBinnableType, TimeBinnableTypeAggregator, TimeBinnerDyn, WithLen,
WithTimestamps,
};
use err::Error;
@@ -26,6 +26,14 @@ pub struct ScalarEvents<NTY> {
}
impl<NTY> ScalarEvents<NTY> {
pub fn empty() -> Self {
Self {
tss: vec![],
pulses: vec![],
values: vec![],
}
}
#[inline(always)]
pub fn push(&mut self, ts: u64, pulse: u64, value: NTY) {
self.tss.push(ts);
@@ -59,13 +67,12 @@ where
const FRAME_TYPE_ID: u32 = crate::EVENTS_0D_FRAME_TYPE_ID + NTY::SUB;
}
impl<NTY> ScalarEvents<NTY> {
pub fn empty() -> Self {
Self {
tss: vec![],
pulses: vec![],
values: vec![],
}
impl<NTY> FrameType for ScalarEvents<NTY>
where
NTY: NumOps,
{
fn frame_type_id(&self) -> u32 {
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
}
}

View File

@@ -1,8 +1,8 @@
use crate::streams::{Collectable, Collector};
use crate::{
ts_offs_from_abs, Appendable, ByteEstimate, Clearable, EventAppendable, FilterFittingInside, Fits, FitsInside,
FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, TimeBinnableType,
TimeBinnableTypeAggregator, WithLen, WithTimestamps,
FrameType, FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile,
TimeBinnableType, TimeBinnableTypeAggregator, WithLen, WithTimestamps,
};
use err::Error;
use netpod::log::*;
@@ -21,6 +21,12 @@ impl FrameTypeInnerStatic for StatsEvents {
const FRAME_TYPE_ID: u32 = crate::STATS_EVENTS_FRAME_TYPE_ID;
}
impl FrameType for StatsEvents {
fn frame_type_id(&self) -> u32 {
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
}
}
impl StatsEvents {
pub fn empty() -> Self {
Self {

View File

@@ -4,8 +4,8 @@ use crate::xbinnedscalarevents::XBinnedScalarEvents;
use crate::xbinnedwaveevents::XBinnedWaveEvents;
use crate::{
Appendable, ByteEstimate, Clearable, EventAppendable, EventsDyn, EventsNodeProcessor, FilterFittingInside, Fits,
FitsInside, FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, SubFrId,
TimeBinnableDyn, TimeBinnableType, TimeBinnableTypeAggregator, WithLen, WithTimestamps,
FitsInside, FrameType, FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile,
SubFrId, TimeBinnableDyn, TimeBinnableType, TimeBinnableTypeAggregator, WithLen, WithTimestamps,
};
use err::Error;
use netpod::log::*;
@@ -30,6 +30,14 @@ impl<NTY> WaveEvents<NTY> {
}
impl<NTY> WaveEvents<NTY> {
pub fn empty() -> Self {
Self {
tss: Vec::new(),
pulses: Vec::new(),
vals: Vec::new(),
}
}
pub fn shape(&self) -> Result<Shape, Error> {
if let Some(k) = self.vals.first() {
let ret = Shape::Wave(k.len() as u32);
@@ -47,13 +55,12 @@ where
const FRAME_TYPE_ID: u32 = crate::WAVE_EVENTS_FRAME_TYPE_ID + NTY::SUB;
}
impl<NTY> WaveEvents<NTY> {
pub fn empty() -> Self {
Self {
tss: vec![],
pulses: vec![],
vals: vec![],
}
impl<NTY> FrameType for WaveEvents<NTY>
where
NTY: NumOps,
{
fn frame_type_id(&self) -> u32 {
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
}
}

View File

@@ -2,9 +2,9 @@ use crate::binsdim0::MinMaxAvgDim0Bins;
use crate::numops::NumOps;
use crate::streams::{Collectable, Collector};
use crate::{
ts_offs_from_abs, Appendable, ByteEstimate, Clearable, FilterFittingInside, Fits, FitsInside, FrameTypeInnerStatic,
NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, SubFrId, TimeBinnableType,
TimeBinnableTypeAggregator, WithLen, WithTimestamps,
ts_offs_from_abs, Appendable, ByteEstimate, Clearable, FilterFittingInside, Fits, FitsInside, FrameType,
FrameTypeInnerStatic, NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, SubFrId,
TimeBinnableType, TimeBinnableTypeAggregator, WithLen, WithTimestamps,
};
use err::Error;
use netpod::log::*;
@@ -30,6 +30,15 @@ where
const FRAME_TYPE_ID: u32 = crate::X_BINNED_SCALAR_EVENTS_FRAME_TYPE_ID + NTY::SUB;
}
impl<NTY> FrameType for XBinnedScalarEvents<NTY>
where
NTY: SubFrId,
{
fn frame_type_id(&self) -> u32 {
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
}
}
impl<NTY> XBinnedScalarEvents<NTY> {
pub fn empty() -> Self {
Self {

View File

@@ -2,9 +2,9 @@ use crate::binsdim1::MinMaxAvgDim1Bins;
use crate::numops::NumOps;
use crate::streams::{Collectable, Collector};
use crate::{
Appendable, ByteEstimate, Clearable, FilterFittingInside, Fits, FitsInside, FrameTypeInnerStatic, NewEmpty,
PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, SubFrId, TimeBinnableType, TimeBinnableTypeAggregator,
WithLen, WithTimestamps,
Appendable, ByteEstimate, Clearable, FilterFittingInside, Fits, FitsInside, FrameType, FrameTypeInnerStatic,
NewEmpty, PushableIndex, RangeOverlapInfo, ReadPbv, ReadableFromFile, SubFrId, TimeBinnableType,
TimeBinnableTypeAggregator, WithLen, WithTimestamps,
};
use err::Error;
use netpod::log::*;
@@ -30,6 +30,15 @@ where
const FRAME_TYPE_ID: u32 = crate::X_BINNED_WAVE_EVENTS_FRAME_TYPE_ID + NTY::SUB;
}
impl<NTY> FrameType for XBinnedWaveEvents<NTY>
where
NTY: SubFrId,
{
fn frame_type_id(&self) -> u32 {
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
}
}
impl<NTY> XBinnedWaveEvents<NTY> {
pub fn empty() -> Self {
Self {

View File

@@ -229,6 +229,14 @@ pub struct BinsDim0CollectedResult<NTY> {
finished_at: Option<IsoDateTime>,
}
impl<NTY: ScalarOps> crate::AsAnyRef for BinsDim0CollectedResult<NTY> {
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<NTY: ScalarOps> crate::collect::Collected for BinsDim0CollectedResult<NTY> {}
impl<NTY> BinsDim0CollectedResult<NTY> {
pub fn ts_anchor_sec(&self) -> u64 {
self.ts_anchor_sec

View File

@@ -0,0 +1,488 @@
use std::any::Any;
use std::fmt;
use crate::merger_cev::MergeableCev;
use crate::streams::Collectable;
use crate::streams::Collector;
use crate::{merger, Events};
use items::FrameType;
use items::FrameTypeInnerStatic;
use netpod::log::*;
use serde::{Deserialize, Serialize};
// TODO maybe rename to ChannelStatus?
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ConnStatus {
Connect,
Disconnect,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConnStatusEvent {
pub ts: u64,
pub status: ConnStatus,
}
impl ConnStatusEvent {
pub fn new(ts: u64, status: ConnStatus) -> Self {
Self { ts, status }
}
}
/// Events on a channel consist not only of e.g. timestamped values, but can be also
/// connection status changes.
#[derive(Debug)]
pub enum ChannelEvents {
Events(Box<dyn Events>),
Status(ConnStatusEvent),
}
impl FrameTypeInnerStatic for ChannelEvents {
const FRAME_TYPE_ID: u32 = items::ITEMS_2_CHANNEL_EVENTS_FRAME_TYPE_ID;
}
impl FrameType for ChannelEvents {
fn frame_type_id(&self) -> u32 {
// TODO SubFrId missing, but get rid of the frame type concept anyhow.
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
}
}
impl Clone for ChannelEvents {
fn clone(&self) -> Self {
match self {
Self::Events(arg0) => Self::Events(arg0.clone_dyn()),
Self::Status(arg0) => Self::Status(arg0.clone()),
}
}
}
mod serde_channel_events {
use super::{ChannelEvents, Events};
use crate::eventsdim0::EventsDim0;
use serde::de::{self, EnumAccess, VariantAccess, Visitor};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::fmt;
impl Serialize for ChannelEvents {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let name = "ChannelEvents";
match self {
ChannelEvents::Events(obj) => {
use serde::ser::SerializeTupleVariant;
let mut ser = serializer.serialize_tuple_variant(name, 0, "Events", 3)?;
ser.serialize_field(obj.serde_id())?;
ser.serialize_field(&obj.nty_id())?;
ser.serialize_field(obj)?;
ser.end()
}
ChannelEvents::Status(val) => serializer.serialize_newtype_variant(name, 1, "Status", val),
}
}
}
struct EventsBoxVisitor;
impl<'de> Visitor<'de> for EventsBoxVisitor {
type Value = Box<dyn Events>;
fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "Events object")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'de>,
{
use items::SubFrId;
let e0: &str = seq.next_element()?.ok_or(de::Error::missing_field("ty .0"))?;
let e1: u32 = seq.next_element()?.ok_or(de::Error::missing_field("nty .1"))?;
if e0 == EventsDim0::<u8>::serde_id() {
match e1 {
i32::SUB => {
let obj: EventsDim0<i32> = seq.next_element()?.ok_or(de::Error::missing_field("obj .2"))?;
Ok(Box::new(obj))
}
f32::SUB => {
let obj: EventsDim0<f32> = seq.next_element()?.ok_or(de::Error::missing_field("obj .2"))?;
Ok(Box::new(obj))
}
_ => Err(de::Error::custom(&format!("unknown nty {e1}"))),
}
} else {
Err(de::Error::custom(&format!("unknown ty {e0}")))
}
}
}
pub struct ChannelEventsVisitor;
impl ChannelEventsVisitor {
fn name() -> &'static str {
"ChannelEvents"
}
fn allowed_variants() -> &'static [&'static str] {
&["Events", "Status", "RangeComplete"]
}
}
impl<'de> Visitor<'de> for ChannelEventsVisitor {
type Value = ChannelEvents;
fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "ChannelEvents")
}
fn visit_enum<A>(self, data: A) -> Result<Self::Value, A::Error>
where
A: EnumAccess<'de>,
{
let (id, var) = data.variant()?;
match id {
"Events" => {
let c = var.tuple_variant(3, EventsBoxVisitor)?;
Ok(Self::Value::Events(c))
}
_ => return Err(de::Error::unknown_variant(id, Self::allowed_variants())),
}
}
}
impl<'de> Deserialize<'de> for ChannelEvents {
fn deserialize<D>(de: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
de.deserialize_enum(
ChannelEventsVisitor::name(),
ChannelEventsVisitor::allowed_variants(),
ChannelEventsVisitor,
)
}
}
}
#[cfg(test)]
mod test_channel_events_serde {
use super::ChannelEvents;
use crate::eventsdim0::EventsDim0;
use crate::Empty;
#[test]
fn channel_events() {
let mut evs = EventsDim0::empty();
evs.push(8, 2, 3.0f32);
evs.push(12, 3, 3.2f32);
let item = ChannelEvents::Events(Box::new(evs));
let s = serde_json::to_string_pretty(&item).unwrap();
eprintln!("{s}");
let w: ChannelEvents = serde_json::from_str(&s).unwrap();
eprintln!("{w:?}");
}
}
impl PartialEq for ChannelEvents {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Self::Events(l0), Self::Events(r0)) => l0 == r0,
(Self::Status(l0), Self::Status(r0)) => l0 == r0,
_ => core::mem::discriminant(self) == core::mem::discriminant(other),
}
}
}
impl MergeableCev for ChannelEvents {
fn ts_min(&self) -> Option<u64> {
use ChannelEvents::*;
match self {
Events(k) => k.ts_min(),
Status(k) => Some(k.ts),
}
}
fn ts_max(&self) -> Option<u64> {
error!("TODO impl MergableEvents for ChannelEvents");
err::todoval()
}
}
impl crate::merger::Mergeable for ChannelEvents {
fn len(&self) -> usize {
match self {
ChannelEvents::Events(k) => k.len(),
ChannelEvents::Status(_) => 1,
}
}
fn ts_min(&self) -> Option<u64> {
match self {
ChannelEvents::Events(k) => k.ts_min(),
ChannelEvents::Status(k) => Some(k.ts),
}
}
fn ts_max(&self) -> Option<u64> {
match self {
ChannelEvents::Events(k) => k.ts_max(),
ChannelEvents::Status(k) => Some(k.ts),
}
}
fn is_compatible_target(&self, tgt: &Self) -> bool {
use ChannelEvents::*;
match self {
Events(_) => {
// TODO better to delegate this to inner type?
if let Events(_) = tgt {
true
} else {
false
}
}
Status(_) => {
// TODO better to delegate this to inner type?
if let Status(_) = tgt {
true
} else {
false
}
}
}
}
fn move_into_fresh(&mut self, ts_end: u64) -> Self {
match self {
ChannelEvents::Events(k) => ChannelEvents::Events(k.move_into_fresh(ts_end)),
ChannelEvents::Status(k) => ChannelEvents::Status(k.clone()),
}
}
fn move_into_existing(&mut self, tgt: &mut Self, ts_end: u64) -> Result<(), merger::MergeError> {
match self {
ChannelEvents::Events(k) => match tgt {
ChannelEvents::Events(tgt) => k.move_into_existing(tgt, ts_end),
ChannelEvents::Status(_) => Err(merger::MergeError::NotCompatible),
},
ChannelEvents::Status(_) => match tgt {
ChannelEvents::Events(_) => Err(merger::MergeError::NotCompatible),
ChannelEvents::Status(_) => Err(merger::MergeError::Full),
},
}
}
}
impl Collectable for ChannelEvents {
fn new_collector(&self) -> Box<dyn Collector> {
match self {
ChannelEvents::Events(_item) => todo!(),
ChannelEvents::Status(_) => todo!(),
}
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
pub struct ChannelEventsTimeBinner {
// TODO `ConnStatus` contains all the changes that can happen to a connection, but
// here we would rather require a simplified current state for binning purpose.
edges: Vec<u64>,
do_time_weight: bool,
conn_state: ConnStatus,
binner: Option<Box<dyn crate::TimeBinner>>,
}
impl fmt::Debug for ChannelEventsTimeBinner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ChannelEventsTimeBinner")
.field("conn_state", &self.conn_state)
.finish()
}
}
impl crate::timebin::TimeBinner for ChannelEventsTimeBinner {
type Input = ChannelEvents;
type Output = Box<dyn crate::TimeBinned>;
fn ingest(&mut self, item: &mut Self::Input) {
match item {
ChannelEvents::Events(item) => {
if self.binner.is_none() {
let binner = item.time_binner_new(self.edges.clone(), self.do_time_weight);
self.binner = Some(binner);
}
match self.binner.as_mut() {
Some(binner) => binner.ingest(item.as_time_binnable()),
None => {
error!("ingest without active binner item {item:?}");
()
}
}
}
ChannelEvents::Status(item) => {
warn!("TODO consider channel status in time binning {item:?}");
}
}
}
fn set_range_complete(&mut self) {
match self.binner.as_mut() {
Some(binner) => binner.set_range_complete(),
None => (),
}
}
fn bins_ready_count(&self) -> usize {
match &self.binner {
Some(binner) => binner.bins_ready_count(),
None => 0,
}
}
fn bins_ready(&mut self) -> Option<Self::Output> {
match self.binner.as_mut() {
Some(binner) => binner.bins_ready(),
None => None,
}
}
fn push_in_progress(&mut self, push_empty: bool) {
match self.binner.as_mut() {
Some(binner) => binner.push_in_progress(push_empty),
None => (),
}
}
fn cycle(&mut self) {
match self.binner.as_mut() {
Some(binner) => binner.cycle(),
None => (),
}
}
}
impl crate::timebin::TimeBinnable for ChannelEvents {
type TimeBinner = ChannelEventsTimeBinner;
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Self::TimeBinner {
let (binner, status) = match self {
ChannelEvents::Events(_events) => (None, ConnStatus::Connect),
ChannelEvents::Status(status) => (None, status.status.clone()),
};
ChannelEventsTimeBinner {
edges,
do_time_weight,
conn_state: status,
binner,
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ChannelEventsCollectorOutput {}
impl crate::AsAnyRef for ChannelEventsCollectorOutput {
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl crate::ToJsonResult for ChannelEventsCollectorOutput {
fn to_json_result(&self) -> Result<Box<dyn crate::streams::ToJsonBytes>, err::Error> {
todo!()
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl crate::collect::Collected for ChannelEventsCollectorOutput {}
#[derive(Debug)]
pub struct ChannelEventsCollector {
coll: Option<Box<dyn crate::collect::CollectorDyn>>,
range_complete: bool,
timed_out: bool,
}
impl ChannelEventsCollector {
pub fn new() -> Self {
Self {
coll: None,
range_complete: false,
timed_out: false,
}
}
}
impl crate::collect::Collector for ChannelEventsCollector {
type Input = ChannelEvents;
type Output = Box<dyn crate::collect::Collected>;
fn len(&self) -> usize {
match &self.coll {
Some(coll) => coll.len(),
None => 0,
}
}
fn ingest(&mut self, item: &mut Self::Input) {
match item {
ChannelEvents::Events(item) => {
if self.coll.is_none() {
let coll = item.as_ref().as_collectable_with_default_ref().new_collector();
self.coll = Some(coll);
}
let coll = self.coll.as_mut().unwrap();
coll.ingest(item.as_collectable_with_default_mut());
}
ChannelEvents::Status(_) => {
// TODO decide on output format to collect also the connection status events
}
}
}
fn set_range_complete(&mut self) {
self.range_complete = true;
}
fn set_timed_out(&mut self) {
self.timed_out = true;
}
fn result(&mut self) -> Result<Self::Output, crate::Error> {
match self.coll.as_mut() {
Some(coll) => {
if self.range_complete {
coll.set_range_complete();
}
if self.timed_out {
coll.set_timed_out();
}
let res = coll.result()?;
//error!("fix output of ChannelEventsCollector [03ce6bc5a]");
//err::todo();
//let output = ChannelEventsCollectorOutput {};
Ok(res)
}
None => {
error!("nothing collected [caa8d2565]");
todo!()
}
}
}
}
impl crate::collect::Collectable for ChannelEvents {
type Collector = ChannelEventsCollector;
fn new_collector(&self) -> Self::Collector {
ChannelEventsCollector::new()
}
}

View File

@@ -1,9 +1,12 @@
use crate::AsAnyRef;
use crate::Error;
use std::any::Any;
use std::fmt;
pub trait Collector: fmt::Debug {
pub trait Collector: fmt::Debug + Send {
// TODO should require here Collectable?
type Input;
type Output;
type Output: Collected;
fn len(&self) -> usize;
@@ -21,3 +24,45 @@ pub trait Collectable: fmt::Debug {
fn new_collector(&self) -> Self::Collector;
}
pub trait Collected: fmt::Debug + crate::streams::ToJsonResult + AsAnyRef + Send {}
erased_serde::serialize_trait_object!(Collected);
impl AsAnyRef for Box<dyn Collected> {
fn as_any_ref(&self) -> &dyn Any {
self.as_ref().as_any_ref()
}
}
impl crate::streams::ToJsonResult for Box<dyn Collected> {
fn to_json_result(&self) -> Result<Box<dyn crate::streams::ToJsonBytes>, err::Error> {
self.as_ref().to_json_result()
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl Collected for Box<dyn Collected> {}
#[derive(Debug)]
pub struct CollectorDynDefault {}
pub trait CollectorDyn: fmt::Debug + Send {
fn len(&self) -> usize;
fn ingest(&mut self, item: &mut dyn CollectableWithDefault);
fn set_range_complete(&mut self);
fn set_timed_out(&mut self);
fn result(&mut self) -> Result<Box<dyn Collected>, Error>;
}
pub trait CollectableWithDefault {
fn new_collector(&self) -> Box<dyn CollectorDyn>;
fn as_any_mut(&mut self) -> &mut dyn Any;
}

View File

@@ -164,6 +164,20 @@ pub struct EventsDim0CollectorOutput<NTY> {
timed_out: bool,
}
impl<NTY: ScalarOps> EventsDim0CollectorOutput<NTY> {
pub fn len(&self) -> usize {
self.values.len()
}
}
impl<NTY: ScalarOps> crate::AsAnyRef for EventsDim0CollectorOutput<NTY> {
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<NTY: ScalarOps> crate::collect::Collected for EventsDim0CollectorOutput<NTY> {}
impl<NTY: ScalarOps> ToJsonResult for EventsDim0CollectorOutput<NTY> {
fn to_json_result(&self) -> Result<Box<dyn crate::streams::ToJsonBytes>, Error> {
let k = serde_json::to_value(self)?;
@@ -222,7 +236,6 @@ impl<NTY: ScalarOps> CollectableType for EventsDim0<NTY> {
impl<NTY: ScalarOps> crate::collect::Collector for EventsDim0Collector<NTY> {
type Input = EventsDim0<NTY>;
// TODO the output probably needs to be different to accommodate also range-complete, continue-at, etc
type Output = EventsDim0CollectorOutput<NTY>;
fn len(&self) -> usize {
@@ -576,6 +589,14 @@ impl<NTY: ScalarOps> Events for EventsDim0<NTY> {
self
}
fn as_collectable_with_default_ref(&self) -> &dyn crate::collect::CollectableWithDefault {
self
}
fn as_collectable_with_default_mut(&mut self) -> &mut dyn crate::collect::CollectableWithDefault {
self
}
fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box<dyn Events> {
// TODO improve the search
let n1 = self.tss.iter().take_while(|&&x| x <= ts_end).count();
@@ -598,7 +619,7 @@ impl<NTY: ScalarOps> Events for EventsDim0<NTY> {
fn move_into_existing(&mut self, tgt: &mut Box<dyn Events>, ts_end: u64) -> Result<(), ()> {
// TODO as_any and as_any_mut are declared on unrealted traits. Simplify.
if let Some(tgt) = tgt.as_any_mut().downcast_mut::<Self>() {
if let Some(tgt) = crate::streams::Collectable::as_any_mut(tgt.as_mut()).downcast_mut::<Self>() {
// TODO improve the search
let n1 = self.tss.iter().take_while(|&&x| x <= ts_end).count();
// TODO make it harder to forget new members when the struct may get modified in the future
@@ -837,10 +858,83 @@ impl<NTY: ScalarOps> TimeBinner for EventsDim0TimeBinner<NTY> {
}
}
impl<NTY: ScalarOps> crate::collect::Collectable for EventsDim0<NTY> {
type Collector;
#[derive(Debug)]
pub struct EventsDim0CollectorDyn {}
fn new_collector(&self) -> Self::Collector {
impl EventsDim0CollectorDyn {
pub fn new() -> Self {
Self {}
}
}
impl crate::collect::CollectorDyn for EventsDim0CollectorDyn {
fn len(&self) -> usize {
todo!()
}
fn ingest(&mut self, _item: &mut dyn crate::collect::CollectableWithDefault) {
// TODO remove this struct?
todo!()
}
fn set_range_complete(&mut self) {
todo!()
}
fn set_timed_out(&mut self) {
todo!()
}
fn result(&mut self) -> Result<Box<dyn crate::collect::Collected>, crate::Error> {
todo!()
}
}
impl<NTY: ScalarOps> crate::collect::CollectorDyn for EventsDim0Collector<NTY> {
fn len(&self) -> usize {
WithLen::len(self)
}
fn ingest(&mut self, item: &mut dyn crate::collect::CollectableWithDefault) {
let x = item.as_any_mut();
if let Some(item) = x.downcast_mut::<EventsDim0<NTY>>() {
CollectorType::ingest(self, item)
} else {
// TODO need possibility to return error
()
}
}
fn set_range_complete(&mut self) {
CollectorType::set_range_complete(self);
}
fn set_timed_out(&mut self) {
CollectorType::set_timed_out(self);
}
fn result(&mut self) -> Result<Box<dyn crate::collect::Collected>, crate::Error> {
CollectorType::result(self)
.map(|x| Box::new(x) as _)
.map_err(|e| e.into())
}
}
impl<NTY: ScalarOps> crate::collect::CollectableWithDefault for EventsDim0<NTY> {
fn new_collector(&self) -> Box<dyn crate::collect::CollectorDyn> {
let coll = EventsDim0Collector::<NTY>::new();
Box::new(coll)
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<NTY: ScalarOps> crate::collect::Collectable for EventsDim0<NTY> {
type Collector = EventsDim0Collector<NTY>;
fn new_collector(&self) -> Self::Collector {
EventsDim0Collector::new()
}
}

View File

@@ -1,4 +1,5 @@
pub mod binsdim0;
pub mod channelevents;
pub mod collect;
pub mod eventsdim0;
pub mod merger;
@@ -9,17 +10,19 @@ pub mod test;
pub mod testgen;
pub mod timebin;
use crate as items_2;
use crate::streams::Collectable;
use crate::streams::Collector;
use crate::streams::ToJsonResult;
use channelevents::ChannelEvents;
use chrono::{DateTime, TimeZone, Utc};
use futures_util::FutureExt;
use futures_util::Stream;
use futures_util::StreamExt;
use items::FrameTypeInnerStatic;
use items::RangeCompletableItem;
use items::Sitemty;
use items::StreamItem;
use items::SubFrId;
use merger_cev::MergeableCev;
use netpod::log::*;
use netpod::timeunits::*;
use netpod::{AggKind, NanoRange, ScalarType, Shape};
@@ -30,8 +33,6 @@ use std::fmt;
use std::pin::Pin;
use std::time::Duration;
use std::time::Instant;
use streams::Collectable;
use streams::ToJsonResult;
pub fn bool_is_false(x: &bool) -> bool {
*x == false
@@ -262,6 +263,20 @@ pub trait AppendEmptyBin {
fn append_empty_bin(&mut self, ts1: u64, ts2: u64);
}
pub trait AsAnyRef {
fn as_any_ref(&self) -> &dyn Any;
}
pub trait AsAnyMut {
fn as_any_mut(&mut self) -> &mut dyn Any;
}
/*impl AsAnyRef for Box<dyn AsAnyRef> {
fn as_any_ref(&self) -> &dyn Any {
self.as_ref().as_any_ref()
}
}*/
#[derive(Clone, Debug, PartialEq, Deserialize)]
pub struct IsoDateTime(DateTime<Utc>);
@@ -313,11 +328,21 @@ pub trait TimeBinnable: fmt::Debug + WithLen + RangeOverlapInfo + Any + Send {
// TODO can I remove the Any bound?
/// Container of some form of events, for use as trait object.
pub trait Events: fmt::Debug + Any + Collectable + TimeBinnable + Send + erased_serde::Serialize {
pub trait Events:
fmt::Debug
+ Any
+ Collectable
+ items_2::collect::CollectableWithDefault
+ TimeBinnable
+ Send
+ erased_serde::Serialize
{
fn as_time_binnable(&self) -> &dyn TimeBinnable;
fn verify(&self) -> bool;
fn output_info(&self);
fn as_collectable_mut(&mut self) -> &mut dyn Collectable;
fn as_collectable_with_default_ref(&self) -> &dyn crate::collect::CollectableWithDefault;
fn as_collectable_with_default_mut(&mut self) -> &mut dyn crate::collect::CollectableWithDefault;
fn ts_min(&self) -> Option<u64>;
fn ts_max(&self) -> Option<u64>;
fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box<dyn Events>;
@@ -366,74 +391,6 @@ impl PartialEq for Box<dyn Events> {
}
}
// TODO remove
struct EventsCollector2 {}
impl WithLen for EventsCollector2 {
fn len(&self) -> usize {
todo!()
}
}
// TODO remove
impl Collector for EventsCollector2 {
fn ingest(&mut self, _src: &mut dyn Collectable) {
todo!()
}
fn set_range_complete(&mut self) {
todo!()
}
fn set_timed_out(&mut self) {
todo!()
}
fn result(&mut self) -> Result<Box<dyn ToJsonResult>, err::Error> {
todo!()
}
}
// TODO remove
impl Collectable for Box<dyn Events> {
fn new_collector(&self) -> Box<dyn Collector> {
Box::new(EventsCollector2 {})
}
fn as_any_mut(&mut self) -> &mut dyn Any {
todo!()
}
}
/*impl crate::streams::CollectorType for EventsCollector {
type Input = dyn Events;
type Output = Box<dyn Events>;
fn ingest(&mut self, src: &mut Self::Input) {
todo!()
}
fn set_range_complete(&mut self) {
todo!()
}
fn set_timed_out(&mut self) {
todo!()
}
fn result(&mut self) -> Result<Self::Output, err::Error> {
todo!()
}
}*/
/*impl crate::streams::CollectableType for dyn Events {
type Collector = EventsCollector;
fn new_collector() -> Self::Collector {
todo!()
}
}*/
/// Data in time-binned form.
pub trait TimeBinned: Any + TimeBinnable {
fn as_time_binnable_dyn(&self) -> &dyn TimeBinnable;
@@ -593,409 +550,50 @@ pub fn empty_binned_dyn(scalar_type: &ScalarType, shape: &Shape, agg_kind: &AggK
}
}
// TODO maybe rename to ChannelStatus?
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ConnStatus {
Connect,
Disconnect,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConnStatusEvent {
pub ts: u64,
pub status: ConnStatus,
}
impl ConnStatusEvent {
pub fn new(ts: u64, status: ConnStatus) -> Self {
Self { ts, status }
}
}
/// Events on a channel consist not only of e.g. timestamped values, but can be also
/// connection status changes.
#[derive(Debug)]
pub enum ChannelEvents {
Events(Box<dyn Events>),
Status(ConnStatusEvent),
}
impl FrameTypeInnerStatic for ChannelEvents {
const FRAME_TYPE_ID: u32 = items::ITEMS_2_CHANNEL_EVENTS_FRAME_TYPE_ID;
}
impl Clone for ChannelEvents {
fn clone(&self) -> Self {
match self {
Self::Events(arg0) => Self::Events(arg0.clone_dyn()),
Self::Status(arg0) => Self::Status(arg0.clone()),
}
}
}
mod serde_channel_events {
use super::{ChannelEvents, Events};
use crate::eventsdim0::EventsDim0;
use serde::de::{self, EnumAccess, VariantAccess, Visitor};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::fmt;
impl Serialize for ChannelEvents {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let name = "ChannelEvents";
match self {
ChannelEvents::Events(obj) => {
use serde::ser::SerializeTupleVariant;
let mut ser = serializer.serialize_tuple_variant(name, 0, "Events", 3)?;
ser.serialize_field(obj.serde_id())?;
ser.serialize_field(&obj.nty_id())?;
ser.serialize_field(obj)?;
ser.end()
}
ChannelEvents::Status(val) => serializer.serialize_newtype_variant(name, 1, "Status", val),
}
}
}
struct EventsBoxVisitor;
impl<'de> Visitor<'de> for EventsBoxVisitor {
type Value = Box<dyn Events>;
fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "Events object")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'de>,
{
use items::SubFrId;
let e0: &str = seq.next_element()?.ok_or(de::Error::missing_field("ty .0"))?;
let e1: u32 = seq.next_element()?.ok_or(de::Error::missing_field("nty .1"))?;
if e0 == EventsDim0::<u8>::serde_id() {
match e1 {
f32::SUB => {
let obj: EventsDim0<f32> = seq.next_element()?.ok_or(de::Error::missing_field("obj .2"))?;
Ok(Box::new(obj))
}
_ => Err(de::Error::custom(&format!("unknown nty {e1}"))),
}
} else {
Err(de::Error::custom(&format!("unknown ty {e0}")))
}
}
}
pub struct ChannelEventsVisitor;
impl ChannelEventsVisitor {
fn name() -> &'static str {
"ChannelEvents"
}
fn allowed_variants() -> &'static [&'static str] {
&["Events", "Status", "RangeComplete"]
}
}
impl<'de> Visitor<'de> for ChannelEventsVisitor {
type Value = ChannelEvents;
fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "ChannelEvents")
}
fn visit_enum<A>(self, data: A) -> Result<Self::Value, A::Error>
where
A: EnumAccess<'de>,
{
let (id, var) = data.variant()?;
match id {
"Events" => {
let c = var.tuple_variant(3, EventsBoxVisitor)?;
Ok(Self::Value::Events(c))
}
_ => return Err(de::Error::unknown_variant(id, Self::allowed_variants())),
}
}
}
impl<'de> Deserialize<'de> for ChannelEvents {
fn deserialize<D>(de: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
de.deserialize_enum(
ChannelEventsVisitor::name(),
ChannelEventsVisitor::allowed_variants(),
ChannelEventsVisitor,
)
}
}
}
#[cfg(test)]
mod test_channel_events_serde {
use super::ChannelEvents;
use crate::eventsdim0::EventsDim0;
use crate::Empty;
#[test]
fn channel_events() {
let mut evs = EventsDim0::empty();
evs.push(8, 2, 3.0f32);
evs.push(12, 3, 3.2f32);
let item = ChannelEvents::Events(Box::new(evs));
let s = serde_json::to_string_pretty(&item).unwrap();
eprintln!("{s}");
let w: ChannelEvents = serde_json::from_str(&s).unwrap();
eprintln!("{w:?}");
}
}
impl PartialEq for ChannelEvents {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Self::Events(l0), Self::Events(r0)) => l0 == r0,
(Self::Status(l0), Self::Status(r0)) => l0 == r0,
_ => core::mem::discriminant(self) == core::mem::discriminant(other),
}
}
}
impl MergeableCev for ChannelEvents {
fn ts_min(&self) -> Option<u64> {
use ChannelEvents::*;
match self {
Events(k) => k.ts_min(),
Status(k) => Some(k.ts),
}
}
fn ts_max(&self) -> Option<u64> {
error!("TODO impl MergableEvents for ChannelEvents");
err::todoval()
}
}
impl crate::merger::Mergeable for ChannelEvents {
fn len(&self) -> usize {
match self {
ChannelEvents::Events(k) => k.len(),
ChannelEvents::Status(_) => 1,
}
}
fn ts_min(&self) -> Option<u64> {
match self {
ChannelEvents::Events(k) => k.ts_min(),
ChannelEvents::Status(k) => Some(k.ts),
}
}
fn ts_max(&self) -> Option<u64> {
match self {
ChannelEvents::Events(k) => k.ts_max(),
ChannelEvents::Status(k) => Some(k.ts),
}
}
fn is_compatible_target(&self, tgt: &Self) -> bool {
use ChannelEvents::*;
match self {
Events(_) => {
// TODO better to delegate this to inner type?
if let Events(_) = tgt {
true
} else {
false
}
}
Status(_) => {
// TODO better to delegate this to inner type?
if let Status(_) = tgt {
true
} else {
false
}
}
}
}
fn move_into_fresh(&mut self, ts_end: u64) -> Self {
match self {
ChannelEvents::Events(k) => ChannelEvents::Events(k.move_into_fresh(ts_end)),
ChannelEvents::Status(k) => ChannelEvents::Status(k.clone()),
}
}
fn move_into_existing(&mut self, tgt: &mut Self, ts_end: u64) -> Result<(), merger::MergeError> {
match self {
ChannelEvents::Events(k) => match tgt {
ChannelEvents::Events(tgt) => k.move_into_existing(tgt, ts_end),
ChannelEvents::Status(_) => Err(merger::MergeError::NotCompatible),
},
ChannelEvents::Status(_) => match tgt {
ChannelEvents::Events(_) => Err(merger::MergeError::NotCompatible),
ChannelEvents::Status(_) => Err(merger::MergeError::Full),
},
}
}
}
impl Collectable for ChannelEvents {
fn new_collector(&self) -> Box<dyn Collector> {
match self {
ChannelEvents::Events(_item) => todo!(),
ChannelEvents::Status(_) => todo!(),
}
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
pub struct ChannelEventsTimeBinner {
// TODO `ConnStatus` contains all the changes that can happen to a connection, but
// here we would rather require a simplified current state for binning purpose.
edges: Vec<u64>,
do_time_weight: bool,
conn_state: ConnStatus,
binner: Option<Box<dyn crate::TimeBinner>>,
}
impl fmt::Debug for ChannelEventsTimeBinner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ChannelEventsTimeBinner")
.field("conn_state", &self.conn_state)
.finish()
}
}
impl crate::timebin::TimeBinner for ChannelEventsTimeBinner {
type Input = ChannelEvents;
type Output = Box<dyn crate::TimeBinned>;
fn ingest(&mut self, item: &mut Self::Input) {
match item {
ChannelEvents::Events(item) => {
if self.binner.is_none() {
let binner = item.time_binner_new(self.edges.clone(), self.do_time_weight);
self.binner = Some(binner);
}
match self.binner.as_mut() {
Some(binner) => binner.ingest(item.as_time_binnable()),
None => {
error!("ingest without active binner item {item:?}");
()
}
}
}
ChannelEvents::Status(item) => {
warn!("TODO consider channel status in time binning {item:?}");
}
}
}
fn set_range_complete(&mut self) {
match self.binner.as_mut() {
Some(binner) => binner.set_range_complete(),
None => (),
}
}
fn bins_ready_count(&self) -> usize {
match &self.binner {
Some(binner) => binner.bins_ready_count(),
None => 0,
}
}
fn bins_ready(&mut self) -> Option<Self::Output> {
match self.binner.as_mut() {
Some(binner) => binner.bins_ready(),
None => None,
}
}
fn push_in_progress(&mut self, push_empty: bool) {
match self.binner.as_mut() {
Some(binner) => binner.push_in_progress(push_empty),
None => (),
}
}
fn cycle(&mut self) {
match self.binner.as_mut() {
Some(binner) => binner.cycle(),
None => (),
}
}
}
impl crate::timebin::TimeBinnable for ChannelEvents {
type TimeBinner = ChannelEventsTimeBinner;
fn time_binner_new(&self, edges: Vec<u64>, do_time_weight: bool) -> Self::TimeBinner {
let (binner, status) = match self {
ChannelEvents::Events(_events) => (None, ConnStatus::Connect),
ChannelEvents::Status(status) => (None, status.status.clone()),
};
ChannelEventsTimeBinner {
edges,
do_time_weight,
conn_state: status,
binner,
}
}
}
#[derive(Debug)]
pub struct EventsCollector {
coll: Option<Box<()>>,
coll: Box<dyn items_2::collect::CollectorDyn>,
}
impl EventsCollector {
pub fn new() -> Self {
Self { coll: Box::new(()) }
pub fn new(coll: Box<dyn items_2::collect::CollectorDyn>) -> Self {
Self { coll }
}
}
impl crate::collect::Collector for EventsCollector {
impl items_2::collect::Collector for EventsCollector {
type Input = Box<dyn Events>;
type Output = Box<dyn Events>;
// TODO this Output trait does not differentiate between e.g. collected events, collected bins, different aggs, etc...
type Output = Box<dyn items_2::collect::Collected>;
fn len(&self) -> usize {
todo!()
self.coll.len()
}
fn ingest(&mut self, item: &mut Self::Input) {
todo!()
self.coll.ingest(item.as_collectable_with_default_mut());
}
fn set_range_complete(&mut self) {
todo!()
self.coll.set_range_complete()
}
fn set_timed_out(&mut self) {
todo!()
self.coll.set_timed_out()
}
fn result(&mut self) -> Result<Self::Output, Error> {
todo!()
self.coll.result()
}
}
impl crate::collect::Collectable for Box<dyn Events> {
impl items_2::collect::Collectable for Box<dyn Events> {
type Collector = EventsCollector;
fn new_collector(&self) -> Self::Collector {
Collectable::new_collector(self)
let coll = items_2::collect::CollectableWithDefault::new_collector(self.as_ref());
EventsCollector::new(coll)
}
}

View File

@@ -6,7 +6,7 @@ use std::fmt;
pub trait CollectorType: Send + Unpin + WithLen {
type Input: Collectable;
type Output: ToJsonResult + Serialize;
type Output: crate::collect::Collected + ToJsonResult + Serialize;
fn ingest(&mut self, src: &mut Self::Input);
fn set_range_complete(&mut self);

View File

@@ -1,10 +1,11 @@
use crate::binsdim0::BinsDim0CollectedResult;
use crate::channelevents::{ConnStatus, ConnStatusEvent};
use crate::eventsdim0::EventsDim0;
use crate::merger::{Mergeable, Merger};
use crate::merger_cev::ChannelEventsMerger;
use crate::testgen::make_some_boxed_d0_f32;
use crate::Error;
use crate::{binned_collected, runfut, ChannelEvents, Empty, Events, IsoDateTime};
use crate::{ConnStatus, ConnStatusEvent, Error};
use chrono::{TimeZone, Utc};
use futures_util::{stream, StreamExt};
use items::{sitem_data, RangeCompletableItem, Sitemty, StreamItem};

View File

@@ -658,7 +658,7 @@ impl Nanos {
impl fmt::Debug for Nanos {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let ts = chrono::Utc.timestamp((self.ns / SEC) as i64, (self.ns % SEC) as u32);
let ts = chrono::Utc.timestamp_opt((self.ns / SEC) as i64, (self.ns % SEC) as u32);
f.debug_struct("Nanos").field("ns", &ts).finish()
}
}
@@ -671,8 +671,8 @@ pub struct NanoRange {
impl fmt::Debug for NanoRange {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let beg = chrono::Utc.timestamp((self.beg / SEC) as i64, (self.beg % SEC) as u32);
let end = chrono::Utc.timestamp((self.end / SEC) as i64, (self.end % SEC) as u32);
let beg = chrono::Utc.timestamp_opt((self.beg / SEC) as i64, (self.beg % SEC) as u32);
let end = chrono::Utc.timestamp_opt((self.end / SEC) as i64, (self.end % SEC) as u32);
f.debug_struct("NanoRange")
.field("beg", &beg)
.field("end", &end)

View File

@@ -97,7 +97,7 @@ impl RawEventsQuery {
}
}
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Serialize)]
pub struct PlainEventsQuery {
channel: Channel,
range: NanoRange,

View File

@@ -3,6 +3,7 @@ use futures_core::Stream;
use futures_util::StreamExt;
use items::frame::{decode_frame, make_term_frame};
use items::{EventQueryJsonStringFrame, Framable, RangeCompletableItem, Sitemty, StreamItem};
use items_2::channelevents::ChannelEvents;
use netpod::histo::HistoLog2;
use netpod::log::*;
use netpod::query::{PlainEventsQuery, RawEventsQuery};
@@ -108,19 +109,31 @@ async fn events_conn_handler_inner_try(
let mut p1: Pin<Box<dyn Stream<Item = Box<dyn Framable + Send>> + Send>> =
if evq.channel().backend() == "testbackend" {
use items_2::ChannelEvents;
warn!("TEST BACKEND DATA");
use items_2::Empty;
use netpod::timeunits::MS;
let node_count = node_config.node_config.cluster.nodes.len();
let node_ix = node_config.ix;
if evq.channel().name() == "inmem-d0-i32" {
let mut item = items_2::eventsdim0::EventsDim0::<i32>::empty();
let td = MS * 10;
for i in 0..20 {
let ts = MS * 17 + td * node_ix as u64 + td * node_count as u64 * i;
let pulse = 1 + node_ix as u64 + node_count as u64 * i;
item.push(ts, pulse, pulse as _);
}
let item = ChannelEvents::Events(Box::new(item) as _);
let item = Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)));
let item = Box::new(item) as _;
let stream = futures_util::stream::iter([item]);
Box::pin(stream)
} else if evq.channel().name() == "inmem-d0-f32" {
let mut item = items_2::eventsdim0::EventsDim0::<f32>::empty();
let td = MS * 10;
let mut ts = MS * 17 + MS * td * node_ix as u64;
let mut pulse = 1 + node_ix as u64;
for _ in 0..20 {
for i in 0..20 {
let ts = MS * 17 + td * node_ix as u64 + td * node_count as u64 * i;
let pulse = 1 + node_ix as u64 + node_count as u64 * i;
item.push(ts, pulse, ts as _);
ts += 3 * td;
pulse += 3;
}
let item = ChannelEvents::Events(Box::new(item) as _);
let item = Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)));
@@ -164,12 +177,12 @@ async fn events_conn_handler_inner_try(
let s = stream.map(|item| {
let item = match item {
Ok(item) => match item {
items_2::ChannelEvents::Events(_item) => {
ChannelEvents::Events(_item) => {
// TODO
let item = items::scalarevents::ScalarEvents::<f64>::empty();
Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))
}
items_2::ChannelEvents::Status(_item) => todo!(),
ChannelEvents::Status(_item) => todo!(),
},
Err(e) => Err(e),
};

View File

@@ -1,7 +1,7 @@
use super::*;
use items::frame::make_frame;
use items::Sitemty;
use items_2::ChannelEvents;
use items_2::channelevents::ChannelEvents;
use netpod::timeunits::SEC;
use netpod::{Channel, Cluster, Database, FileIoBufferSize, NanoRange, Node, NodeConfig, SfDatabuffer};
use tokio::net::TcpListener;

View File

@@ -3,7 +3,8 @@ use crate::events::EventsStreamScylla;
use err::Error;
use futures_util::{Future, Stream, StreamExt};
use items_2::binsdim0::BinsDim0;
use items_2::{empty_binned_dyn, empty_events_dyn, ChannelEvents, TimeBinned};
use items_2::channelevents::ChannelEvents;
use items_2::{empty_binned_dyn, empty_events_dyn, TimeBinned};
use netpod::log::*;
use netpod::query::{CacheUsage, PlainEventsQuery, RawEventsQuery};
use netpod::timeunits::*;

View File

@@ -1,8 +1,9 @@
use crate::errconv::ErrConv;
use err::Error;
use futures_util::{Future, FutureExt, Stream, StreamExt};
use items_2::channelevents::{ChannelEvents, ConnStatus, ConnStatusEvent};
use items_2::eventsdim0::EventsDim0;
use items_2::{ChannelEvents, ConnStatus, ConnStatusEvent, Empty, Events, WithLen};
use items_2::{Empty, Events, WithLen};
use netpod::log::*;
use netpod::query::{ChannelStateEventsQuery, PlainEventsQuery};
use netpod::timeunits::*;

View File

@@ -12,7 +12,6 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_cbor = "0.11.1"
erased-serde = "0.3.23"
bincode = "1.3.3"
bytes = "1.0.1"
arrayref = "0.3.6"
crc32fast = "1.3.2"

View File

@@ -1,6 +1,3 @@
// Sets up the raw tcp connections: disk::merge::mergedfromremotes::MergedFromRemotes
// and then sets up a disk::merge::MergedStream
pub mod mergedstream;
use crate::frames::eventsfromframes::EventsFromFrames;
@@ -12,23 +9,24 @@ use items::frame::make_term_frame;
use items::sitem_data;
use items::EventQueryJsonStringFrame;
use items::Sitemty;
use items_2::ChannelEvents;
use netpod::log::*;
use netpod::Cluster;
use std::pin::Pin;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
pub type ChannelEventsBoxedStream = Pin<Box<dyn Stream<Item = Sitemty<ChannelEvents>> + Send>>;
pub type BoxedStream<T> = Pin<Box<dyn Stream<Item = Sitemty<T>> + Send>>;
pub async fn open_tcp_streams(
query: &dyn erased_serde::Serialize,
cluster: &Cluster,
) -> Result<Vec<ChannelEventsBoxedStream>, Error> {
pub async fn open_tcp_streams<Q, T>(query: Q, cluster: &Cluster) -> Result<Vec<BoxedStream<T>>, Error>
where
Q: serde::Serialize,
// Group bounds in new trait
T: items::FrameTypeInnerStatic + serde::de::DeserializeOwned + Send + Unpin + 'static,
{
// TODO when unit tests established, change to async connect:
let mut streams = Vec::new();
for node in &cluster.nodes {
debug!("x_processed_stream_from_node to: {}:{}", node.host, node.port_raw);
debug!("open_tcp_streams to: {}:{}", node.host, node.port_raw);
let net = TcpStream::connect(format!("{}:{}", node.host, node.port_raw)).await?;
let qjs = serde_json::to_string(&query)?;
let (netin, mut netout) = net.into_split();
@@ -42,8 +40,7 @@ pub async fn open_tcp_streams(
netout.forget();
// TODO for images, we need larger buffer capacity
let frames = InMemoryFrameAsyncReadStream::new(netin, 1024 * 128);
type ITEM = ChannelEvents;
let stream = EventsFromFrames::<_, ITEM>::new(frames);
let stream = EventsFromFrames::<_, T>::new(frames);
streams.push(Box::pin(stream) as _);
}
Ok(streams)

View File

@@ -296,7 +296,8 @@ where
#[cfg(test)]
mod test {
use items_2::{ChannelEvents, Empty};
use items_2::channelevents::ChannelEvents;
use items_2::Empty;
#[test]
fn merge_channel_events() {

View File

@@ -1,16 +1,16 @@
use crate::merge::open_tcp_streams;
use bytes::Bytes;
use err::Error;
use futures_util::{future, stream, FutureExt, Stream, StreamExt};
use items::streams::collect_plain_events_json;
use items::{sitem_data, RangeCompletableItem, Sitemty, StreamItem};
use items_2::ChannelEvents;
use futures_util::{Stream, StreamExt};
use items::Sitemty;
#[allow(unused)]
use netpod::log::*;
use netpod::Cluster;
use serde::Serialize;
use serde_json::Value as JsonValue;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use std::time::{Duration, Instant};
pub struct BytesStream(Pin<Box<dyn Stream<Item = Sitemty<Bytes>> + Send>>);
@@ -22,52 +22,27 @@ impl Stream for BytesStream {
}
}
pub async fn plain_events_json<SER>(query: SER, cluster: &Cluster) -> Result<BytesStream, Error>
// TODO remove?
pub async fn plain_events_json<SER>(query: SER, cluster: &Cluster) -> Result<JsonValue, Error>
where
SER: Serialize,
{
let inps = open_tcp_streams(&query, cluster).await?;
let mut merged = items_2::merger_cev::ChannelEventsMerger::new(inps);
let timeout = Duration::from_millis(2000);
// TODO should be able to ask for data-events only, instead of mixed data and status events.
let inps = open_tcp_streams::<_, items_2::channelevents::ChannelEvents>(&query, cluster).await?;
// TODO propagate also the max-buf-len for the first stage event reader:
#[cfg(NOTHING)]
let stream = {
let mut it = inps.into_iter();
let inp0 = it.next().unwrap();
let inp1 = it.next().unwrap();
let inp2 = it.next().unwrap();
let stream = inp0.chain(inp1).chain(inp2);
stream
};
let stream = { items_2::merger::Merger::new(inps, 1) };
let deadline = Instant::now() + Duration::from_millis(2000);
let events_max = 100;
let do_log = false;
let mut coll = None;
while let Some(item) = merged.next().await {
let item = item?;
match item {
StreamItem::DataItem(item) => match item {
RangeCompletableItem::RangeComplete => todo!(),
RangeCompletableItem::Data(item) => match item {
ChannelEvents::Events(mut item) => {
if coll.is_none() {
coll = Some(item.new_collector());
}
let coll = coll
.as_mut()
.ok_or_else(|| Error::with_msg_no_trace(format!("no collector")))?;
coll.ingest(&mut item);
}
ChannelEvents::Status(_) => todo!(),
},
},
StreamItem::Log(item) => {
info!("log {item:?}");
}
StreamItem::Stats(item) => {
info!("stats {item:?}");
}
}
}
// TODO compare with
// streams::collect::collect_plain_events_json
// and remove duplicate functionality.
let mut coll = coll.ok_or_else(|| Error::with_msg_no_trace(format!("no collector created")))?;
let res = coll.result()?;
// TODO factor the conversion of the result out to a higher level.
// The output of this function should again be collectable, maybe even binnable and otherwise processable.
let js = serde_json::to_vec(&res)?;
let item = sitem_data(Bytes::from(js));
let stream = stream::once(future::ready(item));
let stream = BytesStream(Box::pin(stream));
Ok(stream)
let collected = crate::collect::collect(stream, deadline, events_max).await?;
let jsval = serde_json::to_value(&collected)?;
Ok(jsval)
}

View File

@@ -6,8 +6,9 @@ mod timebin;
use err::Error;
use futures_util::{stream, Stream};
use items::{sitem_data, Sitemty};
use items_2::channelevents::ChannelEvents;
use items_2::eventsdim0::EventsDim0;
use items_2::{ChannelEvents, Empty};
use items_2::Empty;
use netpod::timeunits::SEC;
use std::pin::Pin;
@@ -46,7 +47,7 @@ fn merge_mergeable_00() -> Result<(), Error> {
let fut = async {
let inp0 = inmem_test_events_d0_i32_00();
let inp1 = inmem_test_events_d0_i32_01();
let mut merger = items_2::merger::Merger::new(vec![inp0, inp1], 4);
let _merger = items_2::merger::Merger::new(vec![inp0, inp1], 4);
Ok(())
};
runfut(fut)

View File

@@ -2,6 +2,7 @@ use crate::test::runfut;
use err::Error;
use futures_util::stream;
use items::sitem_data;
use items_2::eventsdim0::EventsDim0CollectorOutput;
use items_2::testgen::make_some_boxed_d0_f32;
use netpod::timeunits::SEC;
use std::time::{Duration, Instant};
@@ -15,7 +16,14 @@ fn collect_channel_events() -> Result<(), Error> {
let deadline = Instant::now() + Duration::from_millis(4000);
let events_max = 10000;
let res = crate::collect::collect(stream, deadline, events_max).await?;
eprintln!("collected result: {res:?}");
//eprintln!("collected result: {res:?}");
if let Some(res) = res.as_any_ref().downcast_ref::<EventsDim0CollectorOutput<f32>>() {
eprintln!("Great, a match");
eprintln!("{res:?}");
assert_eq!(res.len(), 40);
} else {
return Err(Error::with_msg(format!("bad type of collected result")));
}
Ok(())
};
runfut(fut)

View File

@@ -3,8 +3,8 @@ use err::Error;
use futures_util::{stream, StreamExt};
use items::{sitem_data, RangeCompletableItem, StreamItem};
use items_2::binsdim0::BinsDim0;
use items_2::channelevents::{ChannelEvents, ConnStatus, ConnStatusEvent};
use items_2::testgen::make_some_boxed_d0_f32;
use items_2::{ChannelEvents, ConnStatus, ConnStatusEvent};
use netpod::timeunits::{MS, SEC};
use std::collections::VecDeque;
use std::time::{Duration, Instant};