Update deps, error type

This commit is contained in:
Dominik Werder
2025-06-03 12:28:46 +02:00
parent 31389fa7e3
commit 5a64613727
27 changed files with 268 additions and 286 deletions

View File

@@ -7,6 +7,6 @@ edition = "2024"
[dependencies]
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
autoerr = "0.0.3"
autoerr = "0.0"
taskrun = { path = "../taskrun" }
redis = { version = "0.31.0", features = [] }

View File

@@ -1,6 +1,6 @@
[package]
name = "daqretrieve"
version = "0.5.5-aa.13"
version = "0.5.5-aa.14"
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
edition = "2024"
@@ -13,6 +13,7 @@ serde_json = "1.0"
serde_yaml = "0.9.34"
chrono = "0.4.39"
url = "2.5.4"
autoerr = "0.0.5"
clap = { version = "4.5.28", features = ["derive", "cargo"] }
daqbuf-err = { path = "../../../daqbuf-err" }
taskrun = { path = "../taskrun" }

View File

@@ -1,32 +1,31 @@
use daqbuf_err::thiserror;
use daqbuf_err::ThisError;
use futures_util::future;
use futures_util::StreamExt;
use http::header;
use futures_util::future;
use http::Method;
use http::header;
use httpclient::IncomingStream;
use httpclient::body_empty;
use httpclient::connect_client;
use httpclient::http;
use httpclient::http::StatusCode;
use httpclient::hyper::Request;
use httpclient::IncomingStream;
use netpod::log::*;
use netpod::APP_CBOR_FRAMED;
use netpod::ScalarType;
use netpod::Shape;
use netpod::APP_CBOR_FRAMED;
use netpod::log::*;
use streams::cbor_stream::FramedBytesToChannelEventsStream;
use url::Url;
#[derive(Debug, ThisError)]
#[cstm(name = "DataFetch")]
pub enum Error {
Url(#[from] url::ParseError),
NoHostname,
HttpBody(#[from] http::Error),
HttpClient(#[from] httpclient::Error),
Hyper(#[from] httpclient::hyper::Error),
RequestFailed(String),
}
autoerr::create_error_v1!(
name(Error, "DataFetch"),
enum variants {
Url(#[from] url::ParseError),
NoHostname,
HttpBody(#[from] http::Error),
HttpClient(#[from] httpclient::Error),
Hyper(#[from] httpclient::hyper::Error),
RequestFailed(String),
},
);
pub async fn fetch_cbor(url: &str, scalar_type: ScalarType, shape: Shape) -> Result<(), Error> {
let url: Url = url.parse()?;

View File

@@ -136,7 +136,7 @@ pub async fn get_binned(
"get_cached_0 DONE total download {} MB throughput {:5} kB/s bin_count {}",
ntot / 1024 / 1024,
throughput,
bin_count,
bin_count
);
Ok(())
}

View File

@@ -20,7 +20,7 @@ pin-project = "1"
async-channel = "1.9.0"
chrono = "0.4.39"
regex = "1.11.1"
autoerr = "0.0.3"
autoerr = "0.0.5"
daqbuf-err = { path = "../../../daqbuf-err" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
parse = { path = "../../../daqbuf-parse", package = "daqbuf-parse" }

View File

@@ -1,31 +1,29 @@
use autoerr::dbgdisplay::Dbg;
use chrono::DateTime;
use chrono::Utc;
use daqbuf_err as err;
use err::thiserror;
use err::ThisError;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::ChConf;
use netpod::ScalarType;
use netpod::SeriesKind;
use netpod::SfDbChannel;
use netpod::Shape;
use netpod::TsMs;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use std::time::Duration;
use tokio_postgres::Client;
#[derive(Debug, ThisError)]
#[cstm(name = "DbChannelConfig")]
pub enum Error {
Pg(#[from] tokio_postgres::Error),
#[error("NotFound({0}, {1})")]
NotFound(SfDbChannel, NanoRange),
SeriesNotFound(String, u64),
BadScalarType(i32),
BadShape(Vec<i32>),
BadKind(i16),
NoInput,
}
autoerr::create_error_v1!(
name(Error, "DbChannelConfig"),
enum variants {
Pg(#[from] tokio_postgres::Error),
NotFound(SfDbChannel, NanoRange),
SeriesNotFound(String, u64),
BadScalarType(i32),
BadShape(Dbg<Vec<i32>>),
BadKind(i16),
NoInput,
},
);
/// It is an unsolved question as to how we want to uniquely address channels.
/// Currently, the usual (backend, channelname) works in 99% of the cases, but the edge-cases
@@ -67,7 +65,7 @@ pub(super) async fn chconf_best_matching_for_name_and_range(
let series = series as u64;
let _scalar_type =
ScalarType::from_scylla_i32(scalar_type).map_err(|_| Error::BadScalarType(scalar_type))?;
let _shape = Shape::from_scylla_shape_dims(&shape_dims).map_err(|_| Error::BadShape(shape_dims))?;
let _shape = Shape::from_scylla_shape_dims(&shape_dims).map_err(|_| Error::BadShape(Dbg(shape_dims)))?;
let tsms = tsc.signed_duration_since(DateTime::UNIX_EPOCH).num_milliseconds() as u64;
let ts = TsMs::from_ms_u64(tsms);
rows.push((ts, series));
@@ -88,7 +86,7 @@ pub(super) async fn chconf_best_matching_for_name_and_range(
let series = series as u64;
let kind = channel.kind();
let scalar_type = ScalarType::from_scylla_i32(scalar_type).map_err(|_| Error::BadScalarType(scalar_type))?;
let shape = Shape::from_scylla_shape_dims(&shape_dims).map_err(|_| Error::BadShape(shape_dims))?;
let shape = Shape::from_scylla_shape_dims(&shape_dims).map_err(|_| Error::BadShape(Dbg(shape_dims)))?;
let ret = ChConf::new(channel.backend(), series, kind, scalar_type, shape, channel.name());
Ok(ret)
}
@@ -217,7 +215,7 @@ pub(super) async fn chconf_for_series(backend: &str, series: u64, pg: &Client) -
ScalarType::from_dtype_index(scalar_type as _).map_err(|_| Error::BadScalarType(scalar_type))?;
// TODO can I get a slice from psql driver?
let shape = row.get::<_, Vec<i32>>(2);
let shape = Shape::from_scylla_shape_dims(&shape).map_err(|_| Error::BadShape(shape))?;
let shape = Shape::from_scylla_shape_dims(&shape).map_err(|_| Error::BadShape(Dbg(shape)))?;
let kind: i16 = row.get(3);
let kind = SeriesKind::from_db_i16(kind).map_err(|_| Error::BadKind(kind))?;
let ret = ChConf::new(backend, series, kind, scalar_type, shape, name);

View File

@@ -5,24 +5,22 @@ pub mod search;
pub mod worker;
pub mod pg {
pub use tokio_postgres::types::Type;
pub use tokio_postgres::Client;
pub use tokio_postgres::Error;
pub use tokio_postgres::NoTls;
pub use tokio_postgres::Statement;
pub use tokio_postgres::types::Type;
}
use daqbuf_err as err;
use err::anyhow;
use err::thiserror;
use err::Error;
use err::Res2;
use err::ThisError;
use netpod::log::*;
use err::anyhow;
use netpod::Database;
use netpod::NodeConfigCached;
use netpod::SfDbChannel;
use netpod::TableSizes;
use netpod::log::*;
use pg::Client as PgClient;
use pg::NoTls;
use serde::Serialize;
@@ -195,12 +193,13 @@ pub async fn find_series_sf_databuffer(channel: &SfDbChannel, pgclient: Arc<PgCl
Ok(series)
}
#[derive(Debug, ThisError, Serialize)]
#[cstm(name = "FindChannel")]
pub enum FindChannelError {
UnknownBackend,
BadSeriesId,
NoFound,
MultipleFound,
Database(String),
}
autoerr::create_error_v1!(
name(FindChannelError, "FindChannel"),
enum variants {
UnknownBackend,
BadSeriesId,
NoFound,
MultipleFound,
Database(String),
},
);

View File

@@ -29,6 +29,7 @@ hex = "0.4.3"
num-traits = "0.2.19"
num-derive = "0.4.2"
url = "2.5.4"
autoerr = "0.0.5"
tiny-keccak = { version = "2.0", features = ["sha3"] }
daqbuf-err = { path = "../../../daqbuf-err" }
taskrun = { path = "../taskrun" }

View File

@@ -21,13 +21,14 @@ use std::time::SystemTime;
use streams::tcprawclient::TEST_BACKEND;
use taskrun::tokio;
#[derive(Debug, ThisError)]
#[cstm(name = "ChannelConfig")]
pub enum ConfigError {
ParseError(ConfigParseError),
NotFound,
Error,
}
autoerr::create_error_v1!(
name(ConfigError, "ChannelConfig"),
enum variants {
ParseError(ConfigParseError),
NotFound,
Error,
},
);
impl From<ConfigParseError> for ConfigError {
fn from(value: ConfigParseError) -> Self {

View File

@@ -91,7 +91,10 @@ async fn position_file(
let gg = match gg {
Ok(x) => x,
Err(e) => {
error!("can not position file for range {range:?} expand_right {expand_right:?} buflen {buflen}", buflen = buf.len());
error!(
"can not position file for range {range:?} expand_right {expand_right:?} buflen {}",
buf.len()
);
return Err(e);
}
};

View File

@@ -1,9 +1,7 @@
use bytes::Buf;
use bytes::BytesMut;
use daqbuf_err as err;
use err::thiserror;
use err::Error;
use err::ThisError;
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::streamitem::LogItem;
@@ -23,8 +21,6 @@ use netpod::ScalarType;
use netpod::SfChFetchInfo;
use netpod::Shape;
use parse::channelconfig::CompressionMethod;
use serde::Deserialize;
use serde::Serialize;
use std::collections::VecDeque;
use std::io::Cursor;
use std::path::PathBuf;
@@ -38,26 +34,26 @@ use streams::needminbuffer::NeedMinBuffer;
#[allow(unused)]
macro_rules! trace_parse_buf { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
#[derive(Debug, ThisError, Serialize, Deserialize)]
#[cstm(name = "DatabufferDataParse")]
pub enum DataParseError {
DataFrameLengthMismatch,
FileHeaderTooShort,
BadVersionTag,
HeaderTooLarge,
Utf8Error,
EventTooShort,
#[error("EventTooLong({0}, {1})")]
EventTooLong(Shape, u32),
TooManyBeforeRange,
EventWithOptional,
BadTypeIndex,
WaveShapeWithoutEventArray,
ShapedWithoutDims,
TooManyDims,
UnknownCompression,
BadCompresionBlockSize,
}
autoerr::create_error_v1!(
name(DataParseError, "DatabufferDataParse"),
enum variants {
DataFrameLengthMismatch,
FileHeaderTooShort,
BadVersionTag,
HeaderTooLarge,
Utf8Error,
EventTooShort,
EventTooLong(Shape, u32),
TooManyBeforeRange,
EventWithOptional,
BadTypeIndex,
WaveShapeWithoutEventArray,
ShapedWithoutDims,
TooManyDims,
UnknownCompression,
BadCompresionBlockSize,
},
);
pub struct EventChunker {
inp: NeedMinBuffer,

View File

@@ -19,7 +19,7 @@ hyper = { version = "1.6.0", features = ["http1", "http2", "client", "server"] }
hyper-util = { version = "0.1.10", features = ["full"] }
bytes = "1.10.0"
async-channel = "1.9.0"
autoerr = "0.0.3"
autoerr = "0.0"
daqbuf-err = { path = "../../../daqbuf-err" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
parse = { path = "../../../daqbuf-parse", package = "daqbuf-parse" }

View File

@@ -35,7 +35,7 @@ rand = "0.9.0"
ciborium = "0.2.2"
flate2 = "1"
brotli = "8.0.1"
autoerr = "0.0.3"
autoerr = "0.0"
daqbuf-err = { path = "../../../daqbuf-err" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
query = { path = "../../../daqbuf-query", package = "daqbuf-query" }

View File

@@ -623,7 +623,7 @@ impl DataApiPython3DataStream {
b.be[i1],
b.scalar_types[i1],
b.shapes[i1],
b.comps[i1],
b.comps[i1]
);
}
// TODO emit warning when we use a different setting compared to channel config.

View File

@@ -3,9 +3,7 @@ use async_channel::Receiver;
use async_channel::Sender;
use bytes::Bytes;
use daqbuf_err as err;
use err::thiserror;
use err::PublicError;
use err::ThisError;
use err::ToPublicError;
use futures_util::Stream;
use futures_util::StreamExt;
@@ -30,18 +28,17 @@ use std::task::Context;
use std::task::Poll;
use taskrun::tokio;
#[derive(Debug, ThisError)]
#[cstm(name = "ChannelFindActive")]
pub enum FindActiveError {
HttpBadAccept,
HttpBadUrl,
#[error("Error({0})")]
Error(Box<dyn ToPublicError>),
#[error("UrlError({0})")]
UrlError(#[from] url::ParseError),
InternalError,
IO(#[from] std::io::Error),
}
autoerr::create_error_v1!(
name(FindActiveError, "ChannelFindActive"),
enum variants {
HttpBadAccept,
HttpBadUrl,
Error(Box<dyn ToPublicError>),
UrlError(#[from] url::ParseError),
InternalError,
IO(#[from] std::io::Error),
},
);
impl ToPublicError for FindActiveError {
fn to_public_error(&self) -> PublicError {

View File

@@ -2,9 +2,7 @@ use crate::response;
use crate::ReqCtx;
use crate::ServiceSharedResources;
use daqbuf_err as err;
use err::thiserror;
use err::PublicError;
use err::ThisError;
use err::ToPublicError;
use http::Method;
use http::StatusCode;
@@ -19,13 +17,14 @@ use netpod::NodeConfigCached;
use std::sync::Arc;
use streams::instrument::InstrumentStream;
#[derive(Debug, ThisError)]
#[cstm(name = "EventData")]
pub enum EventDataError {
QueryParse,
Error(Box<dyn ToPublicError>),
InternalError,
}
autoerr::create_error_v1!(
name(EventDataError, "EventData"),
enum variants {
QueryParse,
Error(Box<dyn ToPublicError>),
InternalError,
},
);
impl ToPublicError for EventDataError {
fn to_public_error(&self) -> PublicError {

View File

@@ -4,10 +4,7 @@ use crate::requests::accepts_json_framed;
use crate::requests::accepts_json_or_all;
use crate::response;
use crate::ServiceSharedResources;
use daqbuf_err as err;
use dbconn::worker::PgQueue;
use err::thiserror;
use err::ThisError;
use http::header::CONTENT_TYPE;
use http::Method;
use http::StatusCode;
@@ -44,16 +41,17 @@ use streams::streamtimeout::StreamTimeout2;
use tracing::Instrument;
use tracing::Span;
#[derive(Debug, ThisError)]
#[cstm(name = "Api4Events")]
pub enum Error {
ChannelNotFound,
HttpLib(#[from] http::Error),
ChannelConfig(crate::channelconfig::Error),
Retrieval(#[from] crate::RetrievalError),
EventsCbor(#[from] streams::plaineventscbor::Error),
EventsJson(#[from] streams::plaineventsjson::Error),
}
autoerr::create_error_v1!(
name(Error, "Api4Events"),
enum variants {
ChannelNotFound,
HttpLib(#[from] http::Error),
ChannelConfig(crate::channelconfig::Error),
Retrieval(#[from] crate::RetrievalError),
EventsCbor(#[from] streams::plaineventscbor::Error),
EventsJson(#[from] streams::plaineventsjson::Error),
},
);
impl Error {
pub fn user_message(&self) -> String {

View File

@@ -17,7 +17,7 @@ byteorder = "1.5.0"
futures-util = "0.3.31"
tracing = "0.1.41"
hex = "0.4.3"
autoerr = "0.0.3"
autoerr = "0.0"
daqbuf-err = { path = "../../../daqbuf-err" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
query = { path = "../../../daqbuf-query", package = "daqbuf-query" }

View File

@@ -1,7 +1,4 @@
use daqbuf_err as err;
use dbconn::worker::PgQueue;
use err::thiserror;
use err::ThisError;
use httpclient::url::Url;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
@@ -22,24 +19,25 @@ use netpod::Shape;
use netpod::APP_JSON;
use serde::Serialize;
#[derive(Debug, ThisError)]
#[cstm(name = "ChannelConfigNode")]
pub enum Error {
NotFoundChannel(SfDbChannel),
ChannelConfig(dbconn::channelconfig::Error),
DbWorker(#[from] dbconn::worker::Error),
DiskConfig(#[from] disk::channelconfig::ConfigError),
BackendConfigError,
BadTestSetup,
HttpReqError,
HttpClient(#[from] httpclient::Error),
ConfigParse(#[from] disk::parse::channelconfig::ConfigParseError),
JsonParse(#[from] serde_json::Error),
SearchWithGivenSeries,
AsyncSend,
AsyncRecv,
Todo,
}
autoerr::create_error_v1!(
name(Error, "ChannelConfigNode"),
enum variants {
NotFoundChannel(SfDbChannel),
ChannelConfig(dbconn::channelconfig::Error),
DbWorker(#[from] dbconn::worker::Error),
DiskConfig(#[from] disk::channelconfig::ConfigError),
BackendConfigError,
BadTestSetup,
HttpReqError,
HttpClient(#[from] httpclient::Error),
ConfigParse(#[from] disk::parse::channelconfig::ConfigParseError),
JsonParse(#[from] serde_json::Error),
SearchWithGivenSeries,
AsyncSend,
AsyncRecv,
Todo,
},
);
impl From<async_channel::RecvError> for Error {
fn from(_value: async_channel::RecvError) -> Self {

View File

@@ -1,8 +1,5 @@
use crate::channelconfig::http_get_channel_config;
use daqbuf_err as err;
use dbconn::worker::PgQueue;
use err::thiserror;
use err::ThisError;
use netpod::log::*;
use netpod::range::evrange::SeriesRange;
use netpod::ChConf;
@@ -18,19 +15,20 @@ use std::collections::BTreeMap;
use std::time::Duration;
use taskrun::tokio;
#[derive(Debug, ThisError)]
#[cstm(name = "ConfigQuorum")]
pub enum Error {
NotFound(SfDbChannel),
MissingTimeRange,
Timeout,
ChannelConfig(crate::channelconfig::Error),
ExpectSfDatabufferBackend,
UnsupportedBackend,
BadTimeRange,
DbWorker(#[from] dbconn::worker::Error),
FindChannel(#[from] dbconn::FindChannelError),
}
autoerr::create_error_v1!(
name(Error, "ConfigQuorum"),
enum variants {
NotFound(SfDbChannel),
MissingTimeRange,
Timeout,
ChannelConfig(crate::channelconfig::Error),
ExpectSfDatabufferBackend,
UnsupportedBackend,
BadTimeRange,
DbWorker(#[from] dbconn::worker::Error),
FindChannel(#[from] dbconn::FindChannelError),
},
);
impl From<crate::channelconfig::Error> for Error {
fn from(value: crate::channelconfig::Error) -> Self {

View File

@@ -1,8 +1,6 @@
use crate::scylla::scylla_channel_event_stream;
use bytes::Bytes;
use daqbuf_err as err;
use err::thiserror;
use err::ThisError;
use futures_util::Stream;
use futures_util::StreamExt;
use futures_util::TryStreamExt;
@@ -41,23 +39,24 @@ use tracing::Instrument;
#[cfg(test)]
mod test;
#[derive(Debug, ThisError)]
#[cstm(name = "NodenetConn")]
pub enum Error {
BadQuery,
Scylla(#[from] crate::scylla::Error),
Error(#[from] err::Error),
Io(#[from] std::io::Error),
Items(#[from] items_2::Error),
NotAvailable,
DebugTest,
Generator(#[from] streams::generators::Error),
Framable(#[from] items_2::framable::Error),
Frame(#[from] items_2::frame::Error),
InMem(#[from] streams::frames::inmem::Error),
FramedStream(#[from] streams::frames::Error),
Netpod(#[from] netpod::Error),
}
autoerr::create_error_v1!(
name(Error, "NodenetConn"),
enum variants {
BadQuery,
Scylla(#[from] crate::scylla::Error),
Error(#[from] err::Error),
Io(#[from] std::io::Error),
Items(#[from] items_2::Error),
NotAvailable,
DebugTest,
Generator(#[from] streams::generators::Error),
Framable(#[from] items_2::framable::Error),
Frame(#[from] items_2::frame::Error),
InMem(#[from] streams::frames::inmem::Error),
FramedStream(#[from] streams::frames::Error),
Netpod(#[from] netpod::Error),
},
);
pub async fn events_service(ncc: NodeConfigCached) -> Result<(), Error> {
let scyqueue = err::todoval();

View File

@@ -13,7 +13,7 @@ serde = { version = "1", features = ["derive"] }
serde_json = "1"
time = { version = "0.3.41", features = ["parsing", "formatting", "macros"] }
hashbrown = "0.15.3"
autoerr = "0.0.3"
autoerr = "0.0"
daqbuf-err = { path = "../../../daqbuf-err" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
query = { path = "../../../daqbuf-query", package = "daqbuf-query" }

View File

@@ -5,23 +5,21 @@ use crate::events2::onebeforeandbulk;
use crate::range::ScyllaSeriesRange;
use crate::worker::ScyllaQueue;
use daqbuf_err as err;
use err::thiserror;
use err::ThisError;
use futures_util::Future;
use futures_util::FutureExt;
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::WithLen;
use items_0::merge::DrainIntoNewResult;
use items_0::merge::MergeableTy;
use items_0::WithLen;
use items_2::channelevents::ChannelEvents;
use netpod::ChConf;
use netpod::TsNano;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::range::evrange::SeriesRange;
use netpod::stream_impl_tracer::StreamImplTracer;
use netpod::ttl::RetentionTime;
use netpod::ChConf;
use netpod::TsNano;
use std::collections::VecDeque;
use std::pin::Pin;
use std::task::Context;
@@ -51,17 +49,18 @@ macro_rules! tracer_loop_enter {
};
}
#[derive(Debug, ThisError)]
#[cstm(name = "EventsMergeRt")]
pub enum Error {
Input(#[from] crate::events2::onebeforeandbulk::Error),
Events(#[from] crate::events2::events::Error),
Logic,
OrderMin,
OrderMax,
LimitPoll,
LimitLoop,
}
autoerr::create_error_v1!(
name(Error, "EventsMergeRt"),
enum variants {
Input(#[from] crate::events2::onebeforeandbulk::Error),
Events(#[from] crate::events2::events::Error),
Logic,
OrderMin,
OrderMax,
LimitPoll,
LimitLoop,
},
);
#[allow(unused)]
enum Resolvable<F>
@@ -339,7 +338,7 @@ impl MergeRtsChained {
fn push_out_one_before(&mut self) {
if let Some(buf) = self.buf_before.take() {
trace_fetch!("push_out_one_before len {len:?}", len = buf.len());
trace_fetch!("push_out_one_before len {:?}", buf.len());
if buf.len() != 0 {
self.out.push_back(buf);
}

View File

@@ -18,9 +18,8 @@ bytes = "1.10"
arrayref = "0.3.9"
crc32fast = "1.4.2"
byteorder = "1.5.0"
async-channel = "1.9.0"
rand_xoshiro = "0.7.0"
autoerr = "0.0.3"
autoerr = "0.0"
chrono = { version = "0.4.39", features = ["serde"] }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
query = { path = "../../../daqbuf-query", package = "daqbuf-query" }

View File

@@ -42,7 +42,7 @@ pub async fn x_processed_event_blobs_stream_from_node_tcp(
node: Node,
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>, Error> {
let addr = format!("{}:{}", node.host, node.port_raw);
debug!("x_processed_event_blobs_stream_from_node to: {addr}",);
debug!("x_processed_event_blobs_stream_from_node to: {addr}");
let frame1 = make_node_command_frame(subq.clone())?;
let net = TcpStream::connect(addr.clone()).await?;
let (netin, mut netout) = net.into_split();