Remove Arc from config structs to make them Serialize
This commit is contained in:
@@ -442,7 +442,7 @@ where
|
||||
|
||||
pub fn make_test_node(id: u32) -> Node {
|
||||
Node {
|
||||
id,
|
||||
id: format!("{:02}", id),
|
||||
host: "localhost".into(),
|
||||
listen: "0.0.0.0".into(),
|
||||
port: 8800 + id as u16,
|
||||
|
||||
@@ -7,7 +7,6 @@ use futures_util::StreamExt;
|
||||
use netpod::timeunits::*;
|
||||
use netpod::{BinSpecDimT, Channel, ChannelConfig, NanoRange, ScalarType, Shape};
|
||||
use std::future::ready;
|
||||
use std::sync::Arc;
|
||||
#[allow(unused_imports)]
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
@@ -22,7 +21,6 @@ fn agg_x_dim_0() {
|
||||
|
||||
async fn agg_x_dim_0_inner() {
|
||||
let node = make_test_node(0);
|
||||
let node = Arc::new(node);
|
||||
let query = netpod::AggQuerySingleChannel {
|
||||
channel_config: ChannelConfig {
|
||||
channel: Channel {
|
||||
@@ -93,7 +91,6 @@ async fn agg_x_dim_1_inner() {
|
||||
// /data/sf-databuffer/daq_swissfel/daq_swissfel_3/byTime/S10BC01-DBAM070\:BAM_CH1_NORM/*
|
||||
// S10BC01-DBAM070:BAM_CH1_NORM
|
||||
let node = make_test_node(0);
|
||||
let node = Arc::new(node);
|
||||
let query = netpod::AggQuerySingleChannel {
|
||||
channel_config: ChannelConfig {
|
||||
channel: Channel {
|
||||
@@ -179,7 +176,6 @@ async fn merge_0_inner() {
|
||||
.into_iter()
|
||||
.map(|k| make_test_node(k))
|
||||
.map(|node| {
|
||||
let node = Arc::new(node);
|
||||
super::eventblobs::EventBlobsComplete::new(
|
||||
range.clone(),
|
||||
query.channel_config.clone(),
|
||||
|
||||
@@ -9,7 +9,6 @@ use netpod::{AggKind, Channel, NodeConfig, PreBinnedPatchIterator};
|
||||
use netpod::{NanoRange, RetStreamExt};
|
||||
use std::future::ready;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
pub struct BinnedStream {
|
||||
@@ -22,7 +21,7 @@ impl BinnedStream {
|
||||
channel: Channel,
|
||||
range: NanoRange,
|
||||
agg_kind: AggKind,
|
||||
node_config: Arc<NodeConfig>,
|
||||
node_config: &NodeConfig,
|
||||
) -> Self {
|
||||
let patches: Vec<_> = patch_it.collect();
|
||||
warn!("BinnedStream will open a PreBinnedValueStream");
|
||||
@@ -30,8 +29,9 @@ impl BinnedStream {
|
||||
info!("BinnedStream -> patch {:?}", p);
|
||||
}
|
||||
let inp = futures_util::stream::iter(patches.into_iter())
|
||||
.map(move |coord| {
|
||||
PreBinnedValueFetchedStream::new(coord, channel.clone(), agg_kind.clone(), node_config.clone())
|
||||
.map({
|
||||
let node_config = node_config.clone();
|
||||
move |coord| PreBinnedValueFetchedStream::new(coord, channel.clone(), agg_kind.clone(), &node_config)
|
||||
})
|
||||
.flatten()
|
||||
.only_first_error()
|
||||
|
||||
@@ -12,13 +12,12 @@ use futures_core::Stream;
|
||||
use futures_util::{pin_mut, StreamExt};
|
||||
use hyper::Response;
|
||||
use netpod::{
|
||||
AggKind, Channel, Cluster, NanoRange, NodeConfig, PreBinnedPatchCoord, PreBinnedPatchIterator, PreBinnedPatchRange,
|
||||
ToNanos,
|
||||
AggKind, Channel, Cluster, NanoRange, Node, NodeConfig, PreBinnedPatchCoord, PreBinnedPatchIterator,
|
||||
PreBinnedPatchRange, ToNanos,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use tiny_keccak::Hasher;
|
||||
use tokio::io::{AsyncRead, ReadBuf};
|
||||
@@ -66,11 +65,12 @@ impl Query {
|
||||
}
|
||||
|
||||
pub async fn binned_bytes_for_http(
|
||||
node_config: Arc<NodeConfig>,
|
||||
node_config: &NodeConfig,
|
||||
node: &Node,
|
||||
query: &Query,
|
||||
) -> Result<BinnedBytesForHttpStream, Error> {
|
||||
let range = &query.range;
|
||||
let channel_config = read_local_config(&query.channel, node_config.clone()).await?;
|
||||
let channel_config = read_local_config(&query.channel, node).await?;
|
||||
let entry = extract_matching_config_entry(range, &channel_config);
|
||||
info!("found config entry {:?}", entry);
|
||||
let grid = PreBinnedPatchRange::covering_range(query.range.clone(), query.count);
|
||||
@@ -83,7 +83,7 @@ pub async fn binned_bytes_for_http(
|
||||
query.channel.clone(),
|
||||
query.range.clone(),
|
||||
query.agg_kind.clone(),
|
||||
node_config.clone(),
|
||||
node_config,
|
||||
);
|
||||
let ret = BinnedBytesForHttpStream::new(s1);
|
||||
Ok(ret)
|
||||
@@ -170,10 +170,11 @@ impl PreBinnedQuery {
|
||||
// A user must first make sure that the grid spec is valid, and that this node is responsible for it.
|
||||
// Otherwise it is an error.
|
||||
pub fn pre_binned_bytes_for_http(
|
||||
node_config: Arc<NodeConfig>,
|
||||
node_config: &NodeConfig,
|
||||
node: &Node,
|
||||
query: &PreBinnedQuery,
|
||||
) -> Result<PreBinnedValueByteStream, Error> {
|
||||
info!("pre_binned_bytes_for_http {:?} {:?}", query, node_config.node);
|
||||
info!("pre_binned_bytes_for_http {:?} {:?}", query, node);
|
||||
let ret = PreBinnedValueByteStream::new(
|
||||
query.patch.clone(),
|
||||
query.channel.clone(),
|
||||
@@ -254,7 +255,7 @@ pub struct MergedFromRemotes {
|
||||
}
|
||||
|
||||
impl MergedFromRemotes {
|
||||
pub fn new(evq: Arc<EventsQuery>, cluster: Arc<Cluster>) -> Self {
|
||||
pub fn new(evq: EventsQuery, cluster: Cluster) -> Self {
|
||||
let mut tcp_establish_futs = vec![];
|
||||
for node in &cluster.nodes {
|
||||
let f = super::raw::x_processed_stream_from_node(evq.clone(), node.clone());
|
||||
|
||||
18
disk/src/cache/pbv.rs
vendored
18
disk/src/cache/pbv.rs
vendored
@@ -15,7 +15,6 @@ use netpod::{
|
||||
};
|
||||
use std::future::{ready, Future};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
pub struct PreBinnedValueByteStream {
|
||||
@@ -25,7 +24,7 @@ pub struct PreBinnedValueByteStream {
|
||||
}
|
||||
|
||||
impl PreBinnedValueByteStream {
|
||||
pub fn new(patch: PreBinnedPatchCoord, channel: Channel, agg_kind: AggKind, node_config: Arc<NodeConfig>) -> Self {
|
||||
pub fn new(patch: PreBinnedPatchCoord, channel: Channel, agg_kind: AggKind, node_config: &NodeConfig) -> Self {
|
||||
Self {
|
||||
inp: PreBinnedValueStream::new(patch, channel, agg_kind, node_config),
|
||||
errored: false,
|
||||
@@ -64,7 +63,7 @@ pub struct PreBinnedValueStream {
|
||||
patch_coord: PreBinnedPatchCoord,
|
||||
channel: Channel,
|
||||
agg_kind: AggKind,
|
||||
node_config: Arc<NodeConfig>,
|
||||
node_config: NodeConfig,
|
||||
open_check_local_file: Option<Pin<Box<dyn Future<Output = Result<tokio::fs::File, std::io::Error>> + Send>>>,
|
||||
fut2: Option<Pin<Box<dyn Stream<Item = Result<MinMaxAvgScalarBinBatch, Error>> + Send>>>,
|
||||
}
|
||||
@@ -74,15 +73,15 @@ impl PreBinnedValueStream {
|
||||
patch_coord: PreBinnedPatchCoord,
|
||||
channel: Channel,
|
||||
agg_kind: AggKind,
|
||||
node_config: Arc<NodeConfig>,
|
||||
node_config: &NodeConfig,
|
||||
) -> Self {
|
||||
let node_ix = node_ix_for_patch(&patch_coord, &channel, &node_config.cluster);
|
||||
assert!(node_ix == node_config.node.id);
|
||||
// TODO check that we are the correct node.
|
||||
let _node_ix = node_ix_for_patch(&patch_coord, &channel, &node_config.cluster);
|
||||
Self {
|
||||
patch_coord,
|
||||
channel,
|
||||
agg_kind,
|
||||
node_config,
|
||||
node_config: node_config.clone(),
|
||||
open_check_local_file: None,
|
||||
fut2: None,
|
||||
}
|
||||
@@ -107,7 +106,7 @@ impl PreBinnedValueStream {
|
||||
range
|
||||
);
|
||||
assert!(g / h > 1);
|
||||
assert!(g / h < 20);
|
||||
assert!(g / h < 200);
|
||||
assert!(g % h == 0);
|
||||
let bin_size = range.grid_spec.bin_t_len();
|
||||
let channel = self.channel.clone();
|
||||
@@ -116,7 +115,7 @@ impl PreBinnedValueStream {
|
||||
let patch_it = PreBinnedPatchIterator::from_range(range);
|
||||
let s = futures_util::stream::iter(patch_it)
|
||||
.map(move |coord| {
|
||||
PreBinnedValueFetchedStream::new(coord, channel.clone(), agg_kind.clone(), node_config.clone())
|
||||
PreBinnedValueFetchedStream::new(coord, channel.clone(), agg_kind.clone(), &node_config)
|
||||
})
|
||||
.flatten()
|
||||
.map(move |k| {
|
||||
@@ -147,7 +146,6 @@ impl PreBinnedValueStream {
|
||||
ts2: self.patch_coord.patch_end(),
|
||||
count,
|
||||
};
|
||||
let evq = Arc::new(evq);
|
||||
let s1 = MergedFromRemotes::new(evq, self.node_config.cluster.clone());
|
||||
let s2 = s1
|
||||
.map(|k| {
|
||||
|
||||
3
disk/src/cache/pbvfs.rs
vendored
3
disk/src/cache/pbvfs.rs
vendored
@@ -9,7 +9,6 @@ use futures_util::{pin_mut, FutureExt};
|
||||
use netpod::log::*;
|
||||
use netpod::{AggKind, Channel, NodeConfig, PreBinnedPatchCoord};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
pub struct PreBinnedValueFetchedStream {
|
||||
@@ -23,7 +22,7 @@ impl PreBinnedValueFetchedStream {
|
||||
patch_coord: PreBinnedPatchCoord,
|
||||
channel: Channel,
|
||||
agg_kind: AggKind,
|
||||
node_config: Arc<NodeConfig>,
|
||||
node_config: &NodeConfig,
|
||||
) -> Self {
|
||||
let nodeix = node_ix_for_patch(&patch_coord, &channel, &node_config.cluster);
|
||||
let node = &node_config.cluster.nodes[nodeix as usize];
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use err::Error;
|
||||
use netpod::{Channel, NanoRange, NodeConfig};
|
||||
use netpod::{Channel, NanoRange, Node};
|
||||
use nom::number::complete::{be_i16, be_i32, be_i64, be_i8, be_u8};
|
||||
use nom::Needed;
|
||||
#[allow(unused_imports)]
|
||||
@@ -11,7 +11,6 @@ use nom::{
|
||||
use num_derive::{FromPrimitive, ToPrimitive};
|
||||
use num_traits::ToPrimitive;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
#[allow(unused_imports)]
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
@@ -264,9 +263,8 @@ pub fn parse_config(inp: &[u8]) -> NRes<Config> {
|
||||
Ok((inp, ret))
|
||||
}
|
||||
|
||||
pub async fn read_local_config(channel: &Channel, node_config: Arc<NodeConfig>) -> Result<Config, Error> {
|
||||
let path = node_config
|
||||
.node
|
||||
pub async fn read_local_config(channel: &Channel, node: &Node) -> Result<Config, Error> {
|
||||
let path = node
|
||||
.data_base_path
|
||||
.join("config")
|
||||
.join(&channel.name)
|
||||
|
||||
@@ -5,14 +5,13 @@ use futures_util::StreamExt;
|
||||
use netpod::log::*;
|
||||
use netpod::{ChannelConfig, NanoRange, Nanos, Node};
|
||||
use std::mem::size_of;
|
||||
use std::sync::Arc;
|
||||
use tokio::fs::{File, OpenOptions};
|
||||
use tokio::io::{AsyncReadExt, AsyncSeekExt, ErrorKind, SeekFrom};
|
||||
|
||||
pub fn open_files(
|
||||
range: &NanoRange,
|
||||
channel_config: &ChannelConfig,
|
||||
node: Arc<Node>,
|
||||
node: Node,
|
||||
) -> async_channel::Receiver<Result<File, Error>> {
|
||||
let (chtx, chrx) = async_channel::bounded(2);
|
||||
let range = range.clone();
|
||||
@@ -35,7 +34,7 @@ async fn open_files_inner(
|
||||
chtx: &async_channel::Sender<Result<File, Error>>,
|
||||
range: &NanoRange,
|
||||
channel_config: &ChannelConfig,
|
||||
node: Arc<Node>,
|
||||
node: Node,
|
||||
) -> Result<(), Error> {
|
||||
let channel_config = channel_config.clone();
|
||||
// TODO reduce usage of `query` and see what we actually need.
|
||||
|
||||
@@ -6,7 +6,6 @@ use futures_core::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use netpod::{ChannelConfig, NanoRange, Node};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use tokio::fs::File;
|
||||
|
||||
@@ -19,9 +18,9 @@ pub struct EventBlobsComplete {
|
||||
}
|
||||
|
||||
impl EventBlobsComplete {
|
||||
pub fn new(range: NanoRange, channel_config: ChannelConfig, node: Arc<Node>, buffer_size: usize) -> Self {
|
||||
pub fn new(range: NanoRange, channel_config: ChannelConfig, node: Node, buffer_size: usize) -> Self {
|
||||
Self {
|
||||
file_chan: open_files(&range, &channel_config, node.clone()),
|
||||
file_chan: open_files(&range, &channel_config, node),
|
||||
evs: None,
|
||||
buffer_size,
|
||||
channel_config,
|
||||
@@ -67,12 +66,12 @@ impl Stream for EventBlobsComplete {
|
||||
|
||||
pub fn event_blobs_complete(
|
||||
query: &netpod::AggQuerySingleChannel,
|
||||
node: Arc<Node>,
|
||||
node: Node,
|
||||
) -> impl Stream<Item = Result<EventFull, Error>> + Send {
|
||||
let query = query.clone();
|
||||
let node = node.clone();
|
||||
async_stream::stream! {
|
||||
let filerx = open_files(err::todoval(), err::todoval(), node.clone());
|
||||
let filerx = open_files(err::todoval(), err::todoval(), node);
|
||||
while let Ok(fileres) = filerx.recv().await {
|
||||
match fileres {
|
||||
Ok(file) => {
|
||||
|
||||
@@ -38,7 +38,7 @@ pub async fn gen_test_data() -> Result<(), Error> {
|
||||
}
|
||||
for i1 in 0..3 {
|
||||
let node = Node {
|
||||
id: i1,
|
||||
id: format!("{:02}", i1),
|
||||
host: "localhost".into(),
|
||||
listen: "0.0.0.0".into(),
|
||||
port: 7780 + i1 as u16,
|
||||
|
||||
@@ -270,14 +270,14 @@ pub fn raw_concat_channel_read_stream_try_open_in_background(
|
||||
pub fn raw_concat_channel_read_stream_file_pipe(
|
||||
range: &NanoRange,
|
||||
channel_config: &ChannelConfig,
|
||||
node: Arc<Node>,
|
||||
node: Node,
|
||||
buffer_size: usize,
|
||||
) -> impl Stream<Item = Result<BytesMut, Error>> + Send {
|
||||
let range = range.clone();
|
||||
let channel_config = channel_config.clone();
|
||||
let node = node.clone();
|
||||
async_stream::stream! {
|
||||
let chrx = open_files(&range, &channel_config, node.clone());
|
||||
let chrx = open_files(&range, &channel_config, node);
|
||||
while let Ok(file) = chrx.recv().await {
|
||||
let mut file = match file {
|
||||
Ok(k) => k,
|
||||
@@ -319,14 +319,11 @@ pub fn file_content_stream(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parsed1(
|
||||
query: &netpod::AggQuerySingleChannel,
|
||||
node: Arc<Node>,
|
||||
) -> impl Stream<Item = Result<Bytes, Error>> + Send {
|
||||
pub fn parsed1(query: &netpod::AggQuerySingleChannel, node: &Node) -> impl Stream<Item = Result<Bytes, Error>> + Send {
|
||||
let query = query.clone();
|
||||
let node = node.clone();
|
||||
async_stream::stream! {
|
||||
let filerx = open_files(err::todoval(), err::todoval(), node.clone());
|
||||
let filerx = open_files(err::todoval(), err::todoval(), node);
|
||||
while let Ok(fileres) = filerx.recv().await {
|
||||
match fileres {
|
||||
Ok(file) => {
|
||||
|
||||
@@ -14,7 +14,6 @@ use futures_core::Stream;
|
||||
use netpod::{AggKind, Channel, NanoRange, Node};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::net::TcpStream;
|
||||
#[allow(unused_imports)]
|
||||
@@ -37,11 +36,11 @@ pub struct EventsQuery {
|
||||
pub struct EventQueryJsonStringFrame(String);
|
||||
|
||||
pub async fn x_processed_stream_from_node(
|
||||
query: Arc<EventsQuery>,
|
||||
node: Arc<Node>,
|
||||
query: EventsQuery,
|
||||
node: Node,
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Result<MinMaxAvgScalarEventBatch, Error>> + Send>>, Error> {
|
||||
let net = TcpStream::connect(format!("{}:{}", node.host, node.port_raw)).await?;
|
||||
let qjs = serde_json::to_string(query.as_ref())?;
|
||||
let qjs = serde_json::to_string(&query)?;
|
||||
let (netin, mut netout) = net.into_split();
|
||||
let buf = make_frame(&EventQueryJsonStringFrame(qjs))?;
|
||||
netout.write_all(&buf).await?;
|
||||
|
||||
@@ -11,31 +11,36 @@ use futures_util::StreamExt;
|
||||
#[allow(unused_imports)]
|
||||
use netpod::log::*;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::{NodeConfig, ScalarType, Shape};
|
||||
use netpod::{Node, NodeConfig, ScalarType, Shape};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::net::tcp::OwnedWriteHalf;
|
||||
use tokio::net::TcpStream;
|
||||
use tracing::Instrument;
|
||||
|
||||
pub async fn raw_service(node_config: Arc<NodeConfig>) -> Result<(), Error> {
|
||||
let addr = format!("{}:{}", node_config.node.listen, node_config.node.port_raw);
|
||||
pub async fn raw_service(node_config: NodeConfig, node: Node) -> Result<(), Error> {
|
||||
let addr = format!("{}:{}", node.listen, node.port_raw);
|
||||
let lis = tokio::net::TcpListener::bind(addr).await?;
|
||||
loop {
|
||||
match lis.accept().await {
|
||||
Ok((stream, addr)) => {
|
||||
taskrun::spawn(raw_conn_handler(stream, addr, node_config.clone()));
|
||||
let node = node.clone();
|
||||
taskrun::spawn(raw_conn_handler(stream, addr, node_config.clone(), node));
|
||||
}
|
||||
Err(e) => Err(e)?,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn raw_conn_handler(stream: TcpStream, addr: SocketAddr, node_config: Arc<NodeConfig>) -> Result<(), Error> {
|
||||
async fn raw_conn_handler(
|
||||
stream: TcpStream,
|
||||
addr: SocketAddr,
|
||||
node_config: NodeConfig,
|
||||
node: Node,
|
||||
) -> Result<(), Error> {
|
||||
//use tracing_futures::Instrument;
|
||||
let span1 = span!(Level::INFO, "raw::raw_conn_handler");
|
||||
let r = raw_conn_handler_inner(stream, addr, node_config)
|
||||
let r = raw_conn_handler_inner(stream, addr, &node_config, &node)
|
||||
.instrument(span1)
|
||||
.await;
|
||||
match r {
|
||||
@@ -52,9 +57,10 @@ pub type RawConnOut = Result<MinMaxAvgScalarEventBatch, Error>;
|
||||
async fn raw_conn_handler_inner(
|
||||
stream: TcpStream,
|
||||
addr: SocketAddr,
|
||||
node_config: Arc<NodeConfig>,
|
||||
node_config: &NodeConfig,
|
||||
node: &Node,
|
||||
) -> Result<(), Error> {
|
||||
match raw_conn_handler_inner_try(stream, addr, node_config).await {
|
||||
match raw_conn_handler_inner_try(stream, addr, node_config, node).await {
|
||||
Ok(_) => (),
|
||||
Err(mut ce) => {
|
||||
/*error!(
|
||||
@@ -88,7 +94,8 @@ impl<E: Into<Error>> From<(E, OwnedWriteHalf)> for ConnErr {
|
||||
async fn raw_conn_handler_inner_try(
|
||||
stream: TcpStream,
|
||||
addr: SocketAddr,
|
||||
node_config: Arc<NodeConfig>,
|
||||
node_config: &NodeConfig,
|
||||
node: &Node,
|
||||
) -> Result<(), ConnErr> {
|
||||
info!("raw_conn_handler SPAWNED for {:?}", addr);
|
||||
let (netin, mut netout) = stream.into_split();
|
||||
@@ -126,13 +133,13 @@ async fn raw_conn_handler_inner_try(
|
||||
return Err((Error::with_msg("can not parse request json"), netout))?;
|
||||
}
|
||||
};
|
||||
match dbconn::channel_exists(&evq.channel, node_config.clone()).await {
|
||||
match dbconn::channel_exists(&evq.channel, &node_config).await {
|
||||
Ok(_) => (),
|
||||
Err(e) => return Err((e, netout))?,
|
||||
}
|
||||
debug!("REQUEST {:?}", evq);
|
||||
let range = &evq.range;
|
||||
let channel_config = match read_local_config(&evq.channel, node_config.clone()).await {
|
||||
let channel_config = match read_local_config(&evq.channel, node).await {
|
||||
Ok(k) => k,
|
||||
Err(e) => return Err((e, netout))?,
|
||||
};
|
||||
@@ -173,21 +180,16 @@ async fn raw_conn_handler_inner_try(
|
||||
buffer_size: 1024 * 4,
|
||||
};
|
||||
let buffer_size = 1024 * 4;
|
||||
let mut s1 = EventBlobsComplete::new(
|
||||
range.clone(),
|
||||
query.channel_config.clone(),
|
||||
node_config.node.clone(),
|
||||
buffer_size,
|
||||
)
|
||||
.into_dim_1_f32_stream()
|
||||
.into_binned_x_bins_1();
|
||||
let mut s1 = EventBlobsComplete::new(range.clone(), query.channel_config.clone(), node.clone(), buffer_size)
|
||||
.into_dim_1_f32_stream()
|
||||
.into_binned_x_bins_1();
|
||||
let mut e = 0;
|
||||
while let Some(item) = s1.next().await {
|
||||
if let Ok(k) = &item {
|
||||
e += 1;
|
||||
trace!(
|
||||
"emit items sp {:2} e {:3} len {:3} {:10?} {:10?}",
|
||||
node_config.node.split,
|
||||
node.split,
|
||||
e,
|
||||
k.tss.len(),
|
||||
k.tss.first().map(|k| k / SEC),
|
||||
|
||||
Reference in New Issue
Block a user