Refactor
This commit is contained in:
@@ -15,8 +15,8 @@ use futures_core::Stream;
|
|||||||
use futures_util::{pin_mut, StreamExt};
|
use futures_util::{pin_mut, StreamExt};
|
||||||
use hyper::Response;
|
use hyper::Response;
|
||||||
use netpod::{
|
use netpod::{
|
||||||
AggKind, BinnedRange, Channel, Cluster, NanoRange, NodeConfigCached, PreBinnedPatchCoord, PreBinnedPatchIterator,
|
AggKind, BinnedRange, Channel, Cluster, NanoRange, NodeConfigCached, PerfOpts, PreBinnedPatchCoord,
|
||||||
PreBinnedPatchRange, ToNanos,
|
PreBinnedPatchIterator, PreBinnedPatchRange, ToNanos,
|
||||||
};
|
};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
@@ -174,6 +174,7 @@ pub async fn binned_bytes_for_http(
|
|||||||
let range = BinnedRange::covering_range(range.clone(), query.bin_count).ok_or(Error::with_msg(format!(
|
let range = BinnedRange::covering_range(range.clone(), query.bin_count).ok_or(Error::with_msg(format!(
|
||||||
"binned_bytes_for_http BinnedRange::covering_range returned None"
|
"binned_bytes_for_http BinnedRange::covering_range returned None"
|
||||||
)))?;
|
)))?;
|
||||||
|
let perf_opts = PerfOpts { inmem_bufcap: 512 };
|
||||||
match PreBinnedPatchRange::covering_range(query.range.clone(), query.bin_count) {
|
match PreBinnedPatchRange::covering_range(query.range.clone(), query.bin_count) {
|
||||||
Some(pre_range) => {
|
Some(pre_range) => {
|
||||||
info!("binned_bytes_for_http found pre_range: {:?}", pre_range);
|
info!("binned_bytes_for_http found pre_range: {:?}", pre_range);
|
||||||
@@ -206,7 +207,7 @@ pub async fn binned_bytes_for_http(
|
|||||||
agg_kind: query.agg_kind.clone(),
|
agg_kind: query.agg_kind.clone(),
|
||||||
};
|
};
|
||||||
// TODO do I need to set up more transformations or binning to deliver the requested data?
|
// TODO do I need to set up more transformations or binning to deliver the requested data?
|
||||||
let s1 = MergedFromRemotes::new(evq, node_config.node_config.cluster.clone());
|
let s1 = MergedFromRemotes::new(evq, perf_opts, node_config.node_config.cluster.clone());
|
||||||
let s1 = s1.into_binned_t(range);
|
let s1 = s1.into_binned_t(range);
|
||||||
/*let s1 = s1.map(|k| {
|
/*let s1 = s1.map(|k| {
|
||||||
use super::agg::scalarbinbatch::MinMaxAvgScalarBinBatchStreamItem::*;
|
use super::agg::scalarbinbatch::MinMaxAvgScalarBinBatchStreamItem::*;
|
||||||
@@ -372,10 +373,10 @@ pub struct MergedFromRemotes {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl MergedFromRemotes {
|
impl MergedFromRemotes {
|
||||||
pub fn new(evq: EventsQuery, cluster: Cluster) -> Self {
|
pub fn new(evq: EventsQuery, perf_opts: PerfOpts, cluster: Cluster) -> Self {
|
||||||
let mut tcp_establish_futs = vec![];
|
let mut tcp_establish_futs = vec![];
|
||||||
for node in &cluster.nodes {
|
for node in &cluster.nodes {
|
||||||
let f = super::raw::x_processed_stream_from_node(evq.clone(), node.clone());
|
let f = super::raw::x_processed_stream_from_node(evq.clone(), perf_opts.clone(), node.clone());
|
||||||
let f: T002 = Box::pin(f);
|
let f: T002 = Box::pin(f);
|
||||||
tcp_establish_futs.push(f);
|
tcp_establish_futs.push(f);
|
||||||
}
|
}
|
||||||
|
|||||||
20
disk/src/cache/pbv.rs
vendored
20
disk/src/cache/pbv.rs
vendored
@@ -11,8 +11,8 @@ use futures_core::Stream;
|
|||||||
use futures_util::{FutureExt, StreamExt};
|
use futures_util::{FutureExt, StreamExt};
|
||||||
use netpod::log::*;
|
use netpod::log::*;
|
||||||
use netpod::streamext::SCC;
|
use netpod::streamext::SCC;
|
||||||
use netpod::{BinnedRange, NodeConfigCached, PreBinnedPatchIterator, PreBinnedPatchRange};
|
use netpod::{BinnedRange, NodeConfigCached, PerfOpts, PreBinnedPatchIterator, PreBinnedPatchRange};
|
||||||
use std::future::{ready, Future};
|
use std::future::Future;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
@@ -105,7 +105,8 @@ impl PreBinnedValueStream {
|
|||||||
// TODO do I need to set up more transformations or binning to deliver the requested data?
|
// TODO do I need to set up more transformations or binning to deliver the requested data?
|
||||||
let count = self.query.patch.patch_t_len() / self.query.patch.bin_t_len();
|
let count = self.query.patch.patch_t_len() / self.query.patch.bin_t_len();
|
||||||
let range = BinnedRange::covering_range(evq.range.clone(), count).unwrap();
|
let range = BinnedRange::covering_range(evq.range.clone(), count).unwrap();
|
||||||
let s1 = MergedFromRemotes::new(evq, self.node_config.node_config.cluster.clone());
|
let perf_opts = PerfOpts { inmem_bufcap: 512 };
|
||||||
|
let s1 = MergedFromRemotes::new(evq, perf_opts, self.node_config.node_config.cluster.clone());
|
||||||
let s1 = s1.into_binned_t(range);
|
let s1 = s1.into_binned_t(range);
|
||||||
let s1 = s1.map(|k| {
|
let s1 = s1.map(|k| {
|
||||||
use MinMaxAvgScalarBinBatchStreamItem::*;
|
use MinMaxAvgScalarBinBatchStreamItem::*;
|
||||||
@@ -161,13 +162,12 @@ impl PreBinnedValueStream {
|
|||||||
PreBinnedValueFetchedStream::new(&query, &node_config)
|
PreBinnedValueFetchedStream::new(&query, &node_config)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.filter_map(|k| match k {
|
.map(|k| {
|
||||||
Ok(k) => ready(Some(k)),
|
let s: Pin<Box<dyn Stream<Item = _> + Send>> = match k {
|
||||||
Err(e) => {
|
Ok(k) => Box::pin(k),
|
||||||
// TODO Reconsider error handling here:
|
Err(e) => Box::pin(futures_util::stream::iter(vec![Err(e)])),
|
||||||
error!("{:?}", e);
|
};
|
||||||
ready(None)
|
s
|
||||||
}
|
|
||||||
})
|
})
|
||||||
.flatten();
|
.flatten();
|
||||||
self.fut2 = Some(Box::pin(s));
|
self.fut2 = Some(Box::pin(s));
|
||||||
|
|||||||
5
disk/src/cache/pbvfs.rs
vendored
5
disk/src/cache/pbvfs.rs
vendored
@@ -9,7 +9,7 @@ use futures_util::{pin_mut, FutureExt};
|
|||||||
use http::StatusCode;
|
use http::StatusCode;
|
||||||
#[allow(unused_imports)]
|
#[allow(unused_imports)]
|
||||||
use netpod::log::*;
|
use netpod::log::*;
|
||||||
use netpod::{EventDataReadStats, NodeConfigCached};
|
use netpod::{EventDataReadStats, NodeConfigCached, PerfOpts};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
@@ -104,8 +104,9 @@ impl Stream for PreBinnedValueFetchedStream {
|
|||||||
Ready(res) => match res {
|
Ready(res) => match res {
|
||||||
Ok(res) => {
|
Ok(res) => {
|
||||||
if res.status() == StatusCode::OK {
|
if res.status() == StatusCode::OK {
|
||||||
|
let perf_opts = PerfOpts { inmem_bufcap: 512 };
|
||||||
let s1 = HttpBodyAsAsyncRead::new(res);
|
let s1 = HttpBodyAsAsyncRead::new(res);
|
||||||
let s2 = InMemoryFrameAsyncReadStream::new(s1);
|
let s2 = InMemoryFrameAsyncReadStream::new(s1, perf_opts.inmem_bufcap);
|
||||||
self.res = Some(s2);
|
self.res = Some(s2);
|
||||||
continue 'outer;
|
continue 'outer;
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -31,13 +31,11 @@ impl<T> InMemoryFrameAsyncReadStream<T>
|
|||||||
where
|
where
|
||||||
T: AsyncRead + Unpin,
|
T: AsyncRead + Unpin,
|
||||||
{
|
{
|
||||||
pub fn new(inp: T) -> Self {
|
pub fn new(inp: T, bufcap: usize) -> Self {
|
||||||
// TODO make capacity adjustable.
|
|
||||||
let bufcap = 512;
|
|
||||||
let mut t = Self {
|
let mut t = Self {
|
||||||
inp,
|
inp,
|
||||||
buf: BytesMut::new(),
|
buf: BytesMut::new(),
|
||||||
bufcap: bufcap,
|
bufcap,
|
||||||
wp: 0,
|
wp: 0,
|
||||||
tryparse: false,
|
tryparse: false,
|
||||||
errored: false,
|
errored: false,
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ use crate::frame::makeframe::{make_frame, make_term_frame};
|
|||||||
use crate::raw::bffr::MinMaxAvgScalarEventBatchStreamFromFrames;
|
use crate::raw::bffr::MinMaxAvgScalarEventBatchStreamFromFrames;
|
||||||
use err::Error;
|
use err::Error;
|
||||||
use futures_core::Stream;
|
use futures_core::Stream;
|
||||||
use netpod::{AggKind, Channel, NanoRange, Node};
|
use netpod::{AggKind, Channel, NanoRange, Node, PerfOpts};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::AsyncWriteExt;
|
||||||
@@ -37,6 +37,7 @@ pub struct EventQueryJsonStringFrame(String);
|
|||||||
|
|
||||||
pub async fn x_processed_stream_from_node(
|
pub async fn x_processed_stream_from_node(
|
||||||
query: EventsQuery,
|
query: EventsQuery,
|
||||||
|
perf_opts: PerfOpts,
|
||||||
node: Node,
|
node: Node,
|
||||||
) -> Result<Pin<Box<dyn Stream<Item = Result<MinMaxAvgScalarEventBatchStreamItem, Error>> + Send>>, Error> {
|
) -> Result<Pin<Box<dyn Stream<Item = Result<MinMaxAvgScalarEventBatchStreamItem, Error>> + Send>>, Error> {
|
||||||
let net = TcpStream::connect(format!("{}:{}", node.host, node.port_raw)).await?;
|
let net = TcpStream::connect(format!("{}:{}", node.host, node.port_raw)).await?;
|
||||||
@@ -48,7 +49,7 @@ pub async fn x_processed_stream_from_node(
|
|||||||
netout.write_all(&buf).await?;
|
netout.write_all(&buf).await?;
|
||||||
netout.flush().await?;
|
netout.flush().await?;
|
||||||
netout.forget();
|
netout.forget();
|
||||||
let frames = InMemoryFrameAsyncReadStream::new(netin);
|
let frames = InMemoryFrameAsyncReadStream::new(netin, perf_opts.inmem_bufcap);
|
||||||
let items = MinMaxAvgScalarEventBatchStreamFromFrames::new(frames);
|
let items = MinMaxAvgScalarEventBatchStreamFromFrames::new(frames);
|
||||||
Ok(Box::pin(items))
|
Ok(Box::pin(items))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,28 +40,22 @@ where
|
|||||||
|
|
||||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
use Poll::*;
|
use Poll::*;
|
||||||
if self.completed {
|
|
||||||
panic!("MinMaxAvgScalarEventBatchStreamFromFrames poll_next on completed");
|
|
||||||
}
|
|
||||||
if self.errored {
|
|
||||||
self.completed = true;
|
|
||||||
return Ready(None);
|
|
||||||
}
|
|
||||||
loop {
|
loop {
|
||||||
break match self.inp.poll_next_unpin(cx) {
|
break if self.completed {
|
||||||
Ready(Some(Ok(frame))) => {
|
panic!("MinMaxAvgScalarEventBatchStreamFromFrames poll_next on completed");
|
||||||
type ExpectedType = RawConnOut;
|
} else if self.errored {
|
||||||
trace!(
|
self.completed = true;
|
||||||
"MinMaxAvgScalarEventBatchStreamFromFrames got full frame buf {}",
|
Ready(None)
|
||||||
frame.buf().len()
|
} else {
|
||||||
);
|
match self.inp.poll_next_unpin(cx) {
|
||||||
match decode_frame::<ExpectedType>(&frame) {
|
Ready(Some(Ok(frame))) => {
|
||||||
Ok(item) => {
|
type ExpectedType = RawConnOut;
|
||||||
match item {
|
match decode_frame::<ExpectedType>(&frame) {
|
||||||
|
Ok(item) => match item {
|
||||||
Ok(item) => {
|
Ok(item) => {
|
||||||
match &item {
|
match &item {
|
||||||
MinMaxAvgScalarEventBatchStreamItem::EventDataReadStats(stats) => {
|
MinMaxAvgScalarEventBatchStreamItem::EventDataReadStats(stats) => {
|
||||||
info!("✒✒✒✒✒✒✒✒✒✒✒✒✒✒✒✒ MinMaxAvgScalarEventBatchStreamFromFrames stats {:?}", stats);
|
info!("✒✒ ✒✒ ✒✒ ✒✒ ✒✒ ✒✒ stats {:?}", stats);
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
info!("✒ ✒ ✒ ✒ other kind")
|
info!("✒ ✒ ✒ ✒ other kind")
|
||||||
@@ -73,27 +67,27 @@ where
|
|||||||
self.errored = true;
|
self.errored = true;
|
||||||
Ready(Some(Err(e)))
|
Ready(Some(Err(e)))
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
"MinMaxAvgScalarEventBatchStreamFromFrames ~~~~~~~~ ERROR on frame payload {}",
|
||||||
|
frame.buf().len(),
|
||||||
|
);
|
||||||
|
self.errored = true;
|
||||||
|
Ready(Some(Err(e)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
|
||||||
error!(
|
|
||||||
"MinMaxAvgScalarEventBatchStreamFromFrames ~~~~~~~~ ERROR on frame payload {}",
|
|
||||||
frame.buf().len(),
|
|
||||||
);
|
|
||||||
self.errored = true;
|
|
||||||
Ready(Some(Err(e)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
Ready(Some(Err(e))) => {
|
||||||
|
self.errored = true;
|
||||||
|
Ready(Some(Err(e)))
|
||||||
|
}
|
||||||
|
Ready(None) => {
|
||||||
|
self.completed = true;
|
||||||
|
Ready(None)
|
||||||
|
}
|
||||||
|
Pending => Pending,
|
||||||
}
|
}
|
||||||
Ready(Some(Err(e))) => {
|
|
||||||
self.errored = true;
|
|
||||||
Ready(Some(Err(e)))
|
|
||||||
}
|
|
||||||
Ready(None) => {
|
|
||||||
self.completed = true;
|
|
||||||
Ready(None)
|
|
||||||
}
|
|
||||||
Pending => Pending,
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use crate::raw::{EventQueryJsonStringFrame, EventsQuery};
|
|||||||
use err::Error;
|
use err::Error;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use netpod::log::*;
|
use netpod::log::*;
|
||||||
use netpod::{NodeConfigCached, Shape};
|
use netpod::{NodeConfigCached, PerfOpts, Shape};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::AsyncWriteExt;
|
||||||
use tokio::net::tcp::OwnedWriteHalf;
|
use tokio::net::tcp::OwnedWriteHalf;
|
||||||
@@ -85,7 +85,8 @@ async fn raw_conn_handler_inner_try(
|
|||||||
) -> Result<(), ConnErr> {
|
) -> Result<(), ConnErr> {
|
||||||
let _ = addr;
|
let _ = addr;
|
||||||
let (netin, mut netout) = stream.into_split();
|
let (netin, mut netout) = stream.into_split();
|
||||||
let mut h = InMemoryFrameAsyncReadStream::new(netin);
|
let perf_opts = PerfOpts { inmem_bufcap: 512 };
|
||||||
|
let mut h = InMemoryFrameAsyncReadStream::new(netin, perf_opts.inmem_bufcap);
|
||||||
let mut frames = vec![];
|
let mut frames = vec![];
|
||||||
while let Some(k) = h
|
while let Some(k) = h
|
||||||
.next()
|
.next()
|
||||||
|
|||||||
@@ -667,3 +667,8 @@ impl EventDataReadStats {
|
|||||||
k.parsed_bytes = 0;
|
k.parsed_bytes = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct PerfOpts {
|
||||||
|
pub inmem_bufcap: usize,
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ use futures_util::TryStreamExt;
|
|||||||
use http::StatusCode;
|
use http::StatusCode;
|
||||||
use hyper::Body;
|
use hyper::Body;
|
||||||
use netpod::log::*;
|
use netpod::log::*;
|
||||||
|
use netpod::PerfOpts;
|
||||||
|
|
||||||
pub async fn get_binned(
|
pub async fn get_binned(
|
||||||
host: String,
|
host: String,
|
||||||
@@ -39,8 +40,9 @@ pub async fn get_binned(
|
|||||||
error!("Server error {:?}", res);
|
error!("Server error {:?}", res);
|
||||||
return Err(Error::with_msg(format!("Server error {:?}", res)));
|
return Err(Error::with_msg(format!("Server error {:?}", res)));
|
||||||
}
|
}
|
||||||
|
let perf_opts = PerfOpts { inmem_bufcap: 512 };
|
||||||
let s1 = disk::cache::HttpBodyAsAsyncRead::new(res);
|
let s1 = disk::cache::HttpBodyAsAsyncRead::new(res);
|
||||||
let s2 = InMemoryFrameAsyncReadStream::new(s1);
|
let s2 = InMemoryFrameAsyncReadStream::new(s1, perf_opts.inmem_bufcap);
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use std::future::ready;
|
use std::future::ready;
|
||||||
let mut bin_count = 0;
|
let mut bin_count = 0;
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use futures_util::TryStreamExt;
|
|||||||
use http::StatusCode;
|
use http::StatusCode;
|
||||||
use hyper::Body;
|
use hyper::Body;
|
||||||
use netpod::log::*;
|
use netpod::log::*;
|
||||||
use netpod::{Cluster, Database, Node};
|
use netpod::{Cluster, Database, Node, PerfOpts};
|
||||||
use std::future::ready;
|
use std::future::ready;
|
||||||
use tokio::io::AsyncRead;
|
use tokio::io::AsyncRead;
|
||||||
|
|
||||||
@@ -50,8 +50,8 @@ async fn get_binned_0_inner() -> Result<(), Error> {
|
|||||||
get_binned_channel(
|
get_binned_channel(
|
||||||
"wave-f64-be-n21",
|
"wave-f64-be-n21",
|
||||||
"1970-01-01T00:20:10.000Z",
|
"1970-01-01T00:20:10.000Z",
|
||||||
"1970-01-01T00:20:15.000Z",
|
"1970-01-01T00:20:20.000Z",
|
||||||
4,
|
1,
|
||||||
&cluster,
|
&cluster,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -93,6 +93,8 @@ where
|
|||||||
let end_date: DateTime<Utc> = end_date.as_ref().parse()?;
|
let end_date: DateTime<Utc> = end_date.as_ref().parse()?;
|
||||||
let channel_backend = "testbackend";
|
let channel_backend = "testbackend";
|
||||||
let date_fmt = "%Y-%m-%dT%H:%M:%S.%3fZ";
|
let date_fmt = "%Y-%m-%dT%H:%M:%S.%3fZ";
|
||||||
|
let perf_opts = PerfOpts { inmem_bufcap: 512 };
|
||||||
|
// TODO have a function to form the uri, including perf opts:
|
||||||
let uri = format!(
|
let uri = format!(
|
||||||
"http://{}:{}/api/1/binned?cache_usage=ignore&channel_backend={}&channel_name={}&bin_count={}&beg_date={}&end_date={}",
|
"http://{}:{}/api/1/binned?cache_usage=ignore&channel_backend={}&channel_name={}&bin_count={}&beg_date={}&end_date={}",
|
||||||
node0.host,
|
node0.host,
|
||||||
@@ -114,7 +116,7 @@ where
|
|||||||
error!("client response {:?}", res);
|
error!("client response {:?}", res);
|
||||||
}
|
}
|
||||||
let s1 = disk::cache::HttpBodyAsAsyncRead::new(res);
|
let s1 = disk::cache::HttpBodyAsAsyncRead::new(res);
|
||||||
let s2 = InMemoryFrameAsyncReadStream::new(s1);
|
let s2 = InMemoryFrameAsyncReadStream::new(s1, perf_opts.inmem_bufcap);
|
||||||
let res = consume_binned_response(s2).await?;
|
let res = consume_binned_response(s2).await?;
|
||||||
let t2 = chrono::Utc::now();
|
let t2 = chrono::Utc::now();
|
||||||
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
|
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
|
||||||
|
|||||||
Reference in New Issue
Block a user