Iterate through pb files and parse the header
This commit is contained in:
29
daqbufp2/Cargo.toml
Normal file
29
daqbufp2/Cargo.toml
Normal file
@@ -0,0 +1,29 @@
|
||||
[package]
|
||||
name = "daqbufp2"
|
||||
version = "0.0.1-a.dev.12"
|
||||
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1.4.0", features = ["rt-multi-thread", "io-util", "net", "time", "sync", "fs"] }
|
||||
hyper = "0.14"
|
||||
http = "0.2"
|
||||
tracing = "0.1.25"
|
||||
tracing-subscriber = "0.2.17"
|
||||
futures-core = "0.3.14"
|
||||
futures-util = "0.3.14"
|
||||
bytes = "1.0.1"
|
||||
bincode = "1.3.3"
|
||||
#async-channel = "1"
|
||||
#dashmap = "3"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
chrono = "0.4"
|
||||
url = "2.2.2"
|
||||
lazy_static = "1.4.0"
|
||||
err = { path = "../err" }
|
||||
taskrun = { path = "../taskrun" }
|
||||
netpod = { path = "../netpod" }
|
||||
httpret = { path = "../httpret" }
|
||||
disk = { path = "../disk" }
|
||||
161
daqbufp2/src/client.rs
Normal file
161
daqbufp2/src/client.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
use chrono::{DateTime, Utc};
|
||||
use disk::agg::scalarbinbatch::MinMaxAvgScalarBinBatch;
|
||||
use disk::agg::streams::StreamItem;
|
||||
use disk::binned::query::{BinnedQuery, CacheUsage};
|
||||
use disk::binned::RangeCompletableItem;
|
||||
use disk::frame::inmem::InMemoryFrameAsyncReadStream;
|
||||
use disk::frame::makeframe::FrameType;
|
||||
use disk::streamlog::Streamlog;
|
||||
use err::Error;
|
||||
use futures_util::TryStreamExt;
|
||||
use http::StatusCode;
|
||||
use hyper::Body;
|
||||
use netpod::log::*;
|
||||
use netpod::{AggKind, AppendToUrl, ByteSize, Channel, HostPort, NanoRange, PerfOpts, APP_OCTET};
|
||||
use url::Url;
|
||||
|
||||
pub async fn status(host: String, port: u16) -> Result<(), Error> {
|
||||
let t1 = Utc::now();
|
||||
let uri = format!("http://{}:{}/api/4/node_status", host, port,);
|
||||
let req = hyper::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri(uri)
|
||||
.body(Body::empty())?;
|
||||
let client = hyper::Client::new();
|
||||
let res = client.request(req).await?;
|
||||
if res.status() != StatusCode::OK {
|
||||
error!("Server error {:?}", res);
|
||||
return Err(Error::with_msg(format!("Server error {:?}", res)));
|
||||
}
|
||||
let body = hyper::body::to_bytes(res.into_body()).await?;
|
||||
let res = String::from_utf8(body.to_vec())?;
|
||||
let t2 = chrono::Utc::now();
|
||||
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
|
||||
info!("node_status DONE duration: {} ms", ms);
|
||||
println!("{}", res);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_binned(
|
||||
host: String,
|
||||
port: u16,
|
||||
channel_backend: String,
|
||||
channel_name: String,
|
||||
beg_date: DateTime<Utc>,
|
||||
end_date: DateTime<Utc>,
|
||||
bin_count: u32,
|
||||
cache_usage: CacheUsage,
|
||||
disk_stats_every_kb: u32,
|
||||
) -> Result<(), Error> {
|
||||
info!("------- get_binned client");
|
||||
info!("channel {}", channel_name);
|
||||
info!("beg {}", beg_date);
|
||||
info!("end {}", end_date);
|
||||
info!("-------");
|
||||
let t1 = Utc::now();
|
||||
let channel = Channel {
|
||||
backend: channel_backend.clone(),
|
||||
name: channel_name.into(),
|
||||
};
|
||||
let agg_kind = AggKind::DimXBins1;
|
||||
let range = NanoRange::from_date_time(beg_date, end_date);
|
||||
let mut query = BinnedQuery::new(channel, range, bin_count, agg_kind);
|
||||
query.set_cache_usage(cache_usage);
|
||||
query.set_disk_stats_every(ByteSize(1024 * disk_stats_every_kb));
|
||||
let hp = HostPort { host: host, port: port };
|
||||
let mut url = Url::parse(&format!("http://{}:{}/api/4/binned", hp.host, hp.port))?;
|
||||
query.append_to_url(&mut url);
|
||||
let url = url;
|
||||
let req = hyper::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri(url.to_string())
|
||||
.header(http::header::ACCEPT, APP_OCTET)
|
||||
.body(Body::empty())?;
|
||||
let client = hyper::Client::new();
|
||||
let res = client.request(req).await?;
|
||||
if res.status() != StatusCode::OK {
|
||||
error!("Server error {:?}", res);
|
||||
let (head, body) = res.into_parts();
|
||||
let buf = hyper::body::to_bytes(body).await?;
|
||||
let s = String::from_utf8_lossy(&buf);
|
||||
return Err(Error::with_msg(format!(
|
||||
concat!(
|
||||
"Server error {:?}\n",
|
||||
"---------------------- message from http body:\n",
|
||||
"{}\n",
|
||||
"---------------------- end of http body",
|
||||
),
|
||||
head, s
|
||||
)));
|
||||
}
|
||||
let perf_opts = PerfOpts { inmem_bufcap: 512 };
|
||||
let s1 = disk::cache::HttpBodyAsAsyncRead::new(res);
|
||||
let s2 = InMemoryFrameAsyncReadStream::new(s1, perf_opts.inmem_bufcap);
|
||||
use futures_util::StreamExt;
|
||||
use std::future::ready;
|
||||
let s3 = s2
|
||||
.map_err(|e| error!("get_binned {:?}", e))
|
||||
.filter_map(|item| {
|
||||
let g = match item {
|
||||
Ok(item) => match item {
|
||||
StreamItem::Log(item) => {
|
||||
Streamlog::emit(&item);
|
||||
None
|
||||
}
|
||||
StreamItem::Stats(item) => {
|
||||
info!("Stats: {:?}", item);
|
||||
None
|
||||
}
|
||||
StreamItem::DataItem(frame) => {
|
||||
type ExpectedType = Result<StreamItem<RangeCompletableItem<MinMaxAvgScalarBinBatch>>, Error>;
|
||||
let type_id_exp = <ExpectedType as FrameType>::FRAME_TYPE_ID;
|
||||
if frame.tyid() != type_id_exp {
|
||||
error!("unexpected type id got {} exp {}", frame.tyid(), type_id_exp);
|
||||
}
|
||||
let n1 = frame.buf().len();
|
||||
match bincode::deserialize::<ExpectedType>(frame.buf()) {
|
||||
Ok(item) => match item {
|
||||
Ok(item) => {
|
||||
match item {
|
||||
StreamItem::Log(item) => {
|
||||
Streamlog::emit(&item);
|
||||
}
|
||||
StreamItem::Stats(item) => {
|
||||
info!("Stats: {:?}", item);
|
||||
}
|
||||
StreamItem::DataItem(item) => {
|
||||
info!("DataItem: {:?}", item);
|
||||
}
|
||||
}
|
||||
Some(Ok(()))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("len {} error frame {:?}", n1, e);
|
||||
Some(Err(e))
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
error!("len {} bincode error {:?}", n1, e);
|
||||
Some(Err(e.into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => Some(Err(Error::with_msg(format!("{:?}", e)))),
|
||||
};
|
||||
ready(g)
|
||||
})
|
||||
.for_each(|_| ready(()));
|
||||
s3.await;
|
||||
let t2 = chrono::Utc::now();
|
||||
let ntot = 0;
|
||||
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
|
||||
let throughput = ntot / 1024 * 1000 / ms;
|
||||
info!(
|
||||
"get_cached_0 DONE total download {} MB throughput {:5} kB/s bin_count {}",
|
||||
ntot / 1024 / 1024,
|
||||
throughput,
|
||||
bin_count,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
34
daqbufp2/src/lib.rs
Normal file
34
daqbufp2/src/lib.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
use err::Error;
|
||||
use netpod::{Cluster, NodeConfig, NodeConfigCached, ProxyConfig};
|
||||
|
||||
pub mod client;
|
||||
pub mod nodes;
|
||||
#[cfg(test)]
|
||||
pub mod test;
|
||||
|
||||
pub fn spawn_test_hosts(cluster: Cluster) -> Vec<JoinHandle<Result<(), Error>>> {
|
||||
let mut ret = vec![];
|
||||
for node in &cluster.nodes {
|
||||
let node_config = NodeConfig {
|
||||
cluster: cluster.clone(),
|
||||
name: format!("{}:{}", node.host, node.port),
|
||||
};
|
||||
let node_config: Result<NodeConfigCached, Error> = node_config.into();
|
||||
let node_config = node_config.unwrap();
|
||||
let h = tokio::spawn(httpret::host(node_config));
|
||||
ret.push(h);
|
||||
}
|
||||
ret
|
||||
}
|
||||
|
||||
pub async fn run_node(node_config: NodeConfigCached) -> Result<(), Error> {
|
||||
httpret::host(node_config).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run_proxy(proxy_config: ProxyConfig) -> Result<(), Error> {
|
||||
httpret::proxy::proxy(proxy_config).await?;
|
||||
Ok(())
|
||||
}
|
||||
59
daqbufp2/src/nodes.rs
Normal file
59
daqbufp2/src/nodes.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
use crate::spawn_test_hosts;
|
||||
use err::Error;
|
||||
use netpod::{Cluster, Database, Node};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
pub struct RunningHosts {
|
||||
pub cluster: Cluster,
|
||||
_jhs: Vec<JoinHandle<Result<(), Error>>>,
|
||||
}
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref HOSTS_RUNNING: Mutex<Option<Arc<RunningHosts>>> = Mutex::new(None);
|
||||
}
|
||||
|
||||
pub fn require_test_hosts_running() -> Result<Arc<RunningHosts>, Error> {
|
||||
let mut g = HOSTS_RUNNING.lock().unwrap();
|
||||
match g.as_ref() {
|
||||
None => {
|
||||
let cluster = test_cluster();
|
||||
let jhs = spawn_test_hosts(cluster.clone());
|
||||
let ret = RunningHosts {
|
||||
cluster: cluster.clone(),
|
||||
_jhs: jhs,
|
||||
};
|
||||
let a = Arc::new(ret);
|
||||
*g = Some(a.clone());
|
||||
Ok(a)
|
||||
}
|
||||
Some(gg) => Ok(gg.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
fn test_cluster() -> Cluster {
|
||||
let nodes = (0..3)
|
||||
.into_iter()
|
||||
.map(|id| Node {
|
||||
host: "localhost".into(),
|
||||
listen: "0.0.0.0".into(),
|
||||
port: 8360 + id as u16,
|
||||
port_raw: 8360 + id as u16 + 100,
|
||||
data_base_path: format!("../tmpdata/node{:02}", id).into(),
|
||||
ksprefix: "ks".into(),
|
||||
split: id,
|
||||
backend: "testbackend".into(),
|
||||
bin_grain_kind: 0,
|
||||
archiver_appliance: None,
|
||||
})
|
||||
.collect();
|
||||
Cluster {
|
||||
nodes: nodes,
|
||||
database: Database {
|
||||
name: "daqbuffer".into(),
|
||||
host: "localhost".into(),
|
||||
user: "daqbuffer".into(),
|
||||
pass: "daqbuffer".into(),
|
||||
},
|
||||
}
|
||||
}
|
||||
24
daqbufp2/src/test.rs
Normal file
24
daqbufp2/src/test.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
use bytes::BytesMut;
|
||||
|
||||
pub mod binnedbinary;
|
||||
pub mod binnedjson;
|
||||
pub mod events;
|
||||
|
||||
#[test]
|
||||
fn bufs() {
|
||||
use bytes::{Buf, BufMut};
|
||||
let mut buf = BytesMut::with_capacity(1024);
|
||||
assert!(buf.as_mut().len() == 0);
|
||||
buf.put_u32_le(123);
|
||||
assert!(buf.as_mut().len() == 4);
|
||||
let mut b2 = buf.split_to(4);
|
||||
assert!(b2.capacity() == 4);
|
||||
b2.advance(2);
|
||||
assert!(b2.capacity() == 2);
|
||||
b2.advance(2);
|
||||
assert!(b2.capacity() == 0);
|
||||
assert!(buf.capacity() == 1020);
|
||||
assert!(buf.remaining() == 0);
|
||||
assert!(buf.remaining_mut() >= 1020);
|
||||
assert!(buf.capacity() == 1020);
|
||||
}
|
||||
256
daqbufp2/src/test/binnedbinary.rs
Normal file
256
daqbufp2/src/test/binnedbinary.rs
Normal file
@@ -0,0 +1,256 @@
|
||||
use crate::nodes::require_test_hosts_running;
|
||||
use chrono::{DateTime, Utc};
|
||||
use disk::agg::streams::{StatsItem, StreamItem};
|
||||
use disk::binned::query::{BinnedQuery, CacheUsage};
|
||||
use disk::binned::{MinMaxAvgBins, RangeCompletableItem, WithLen};
|
||||
use disk::frame::inmem::InMemoryFrameAsyncReadStream;
|
||||
use disk::frame::makeframe::{FrameType, SubFrId};
|
||||
use disk::streamlog::Streamlog;
|
||||
use disk::Sitemty;
|
||||
use err::Error;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use http::StatusCode;
|
||||
use hyper::Body;
|
||||
use netpod::log::*;
|
||||
use netpod::{AggKind, AppendToUrl, Channel, Cluster, HostPort, NanoRange, PerfOpts, APP_OCTET};
|
||||
use serde::de::DeserializeOwned;
|
||||
use std::fmt;
|
||||
use std::future::ready;
|
||||
use tokio::io::AsyncRead;
|
||||
use url::Url;
|
||||
|
||||
#[test]
|
||||
fn get_binned_binary() {
|
||||
taskrun::run(get_binned_binary_inner()).unwrap();
|
||||
}
|
||||
|
||||
async fn get_binned_binary_inner() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
if true {
|
||||
get_binned_channel::<i32>(
|
||||
"scalar-i32-be",
|
||||
"1970-01-01T00:20:10.000Z",
|
||||
"1970-01-01T00:20:50.000Z",
|
||||
3,
|
||||
cluster,
|
||||
true,
|
||||
4,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if true {
|
||||
get_binned_channel::<f64>(
|
||||
"wave-f64-be-n21",
|
||||
"1970-01-01T00:20:10.000Z",
|
||||
"1970-01-01T00:20:30.000Z",
|
||||
2,
|
||||
cluster,
|
||||
true,
|
||||
2,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if true {
|
||||
get_binned_channel::<u16>(
|
||||
"wave-u16-le-n77",
|
||||
"1970-01-01T01:11:00.000Z",
|
||||
"1970-01-01T01:35:00.000Z",
|
||||
7,
|
||||
cluster,
|
||||
true,
|
||||
24,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if true {
|
||||
get_binned_channel::<u16>(
|
||||
"wave-u16-le-n77",
|
||||
"1970-01-01T01:42:00.000Z",
|
||||
"1970-01-01T03:55:00.000Z",
|
||||
2,
|
||||
cluster,
|
||||
true,
|
||||
3,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_binned_channel<NTY>(
|
||||
channel_name: &str,
|
||||
beg_date: &str,
|
||||
end_date: &str,
|
||||
bin_count: u32,
|
||||
cluster: &Cluster,
|
||||
expect_range_complete: bool,
|
||||
expect_bin_count: u64,
|
||||
) -> Result<BinnedResponse, Error>
|
||||
where
|
||||
NTY: fmt::Debug + SubFrId + DeserializeOwned,
|
||||
{
|
||||
let t1 = Utc::now();
|
||||
let agg_kind = AggKind::DimXBins1;
|
||||
let node0 = &cluster.nodes[0];
|
||||
let beg_date: DateTime<Utc> = beg_date.parse()?;
|
||||
let end_date: DateTime<Utc> = end_date.parse()?;
|
||||
let channel_backend = "testbackend";
|
||||
let perf_opts = PerfOpts { inmem_bufcap: 512 };
|
||||
let channel = Channel {
|
||||
backend: channel_backend.into(),
|
||||
name: channel_name.into(),
|
||||
};
|
||||
let range = NanoRange::from_date_time(beg_date, end_date);
|
||||
let mut query = BinnedQuery::new(channel, range, bin_count, agg_kind);
|
||||
query.set_cache_usage(CacheUsage::Ignore);
|
||||
query.set_disk_io_buffer_size(1024 * 16);
|
||||
let hp = HostPort::from_node(node0);
|
||||
let mut url = Url::parse(&format!("http://{}:{}/api/4/binned", hp.host, hp.port))?;
|
||||
query.append_to_url(&mut url);
|
||||
let url = url;
|
||||
info!("get_binned_channel get {}", url);
|
||||
let req = hyper::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri(url.to_string())
|
||||
.header(http::header::ACCEPT, APP_OCTET)
|
||||
.body(Body::empty())?;
|
||||
let client = hyper::Client::new();
|
||||
let res = client.request(req).await?;
|
||||
if res.status() != StatusCode::OK {
|
||||
error!("client response {:?}", res);
|
||||
}
|
||||
let s1 = disk::cache::HttpBodyAsAsyncRead::new(res);
|
||||
let s2 = InMemoryFrameAsyncReadStream::new(s1, perf_opts.inmem_bufcap);
|
||||
let res = consume_binned_response::<NTY, _>(s2).await?;
|
||||
let t2 = chrono::Utc::now();
|
||||
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
|
||||
info!("get_cached_0 DONE bin_count {} time {} ms", res.bin_count, ms);
|
||||
if !res.is_valid() {
|
||||
Err(Error::with_msg(format!("invalid response: {:?}", res)))
|
||||
} else if res.range_complete_count == 0 && expect_range_complete {
|
||||
Err(Error::with_msg(format!("expect range complete: {:?}", res)))
|
||||
} else if res.bin_count != expect_bin_count {
|
||||
Err(Error::with_msg(format!("bin count mismatch: {:?}", res)))
|
||||
} else {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BinnedResponse {
|
||||
bin_count: u64,
|
||||
err_item_count: u64,
|
||||
data_item_count: u64,
|
||||
bytes_read: u64,
|
||||
range_complete_count: u64,
|
||||
log_item_count: u64,
|
||||
stats_item_count: u64,
|
||||
}
|
||||
|
||||
impl BinnedResponse {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
bin_count: 0,
|
||||
err_item_count: 0,
|
||||
data_item_count: 0,
|
||||
bytes_read: 0,
|
||||
range_complete_count: 0,
|
||||
log_item_count: 0,
|
||||
stats_item_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if self.range_complete_count > 1 {
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn consume_binned_response<NTY, T>(inp: InMemoryFrameAsyncReadStream<T>) -> Result<BinnedResponse, Error>
|
||||
where
|
||||
NTY: fmt::Debug + SubFrId + DeserializeOwned,
|
||||
T: AsyncRead + Unpin,
|
||||
{
|
||||
let s1 = inp
|
||||
.map_err(|e| error!("TEST GOT ERROR {:?}", e))
|
||||
.filter_map(|item| {
|
||||
let g = match item {
|
||||
Ok(item) => match item {
|
||||
StreamItem::Log(item) => {
|
||||
Streamlog::emit(&item);
|
||||
None
|
||||
}
|
||||
StreamItem::Stats(item) => {
|
||||
info!("Stats: {:?}", item);
|
||||
None
|
||||
}
|
||||
StreamItem::DataItem(frame) => {
|
||||
if frame.tyid() != <Sitemty<MinMaxAvgBins<NTY>> as FrameType>::FRAME_TYPE_ID {
|
||||
error!("test receives unexpected tyid {:x}", frame.tyid());
|
||||
}
|
||||
match bincode::deserialize::<Sitemty<MinMaxAvgBins<NTY>>>(frame.buf()) {
|
||||
Ok(item) => match item {
|
||||
Ok(item) => match item {
|
||||
StreamItem::Log(item) => {
|
||||
Streamlog::emit(&item);
|
||||
Some(Ok(StreamItem::Log(item)))
|
||||
}
|
||||
item => {
|
||||
info!("TEST GOT ITEM {:?}", item);
|
||||
Some(Ok(item))
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
error!("TEST GOT ERROR FRAME: {:?}", e);
|
||||
Some(Err(e))
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
error!("bincode error: {:?}", e);
|
||||
Some(Err(e.into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => Some(Err(Error::with_msg(format!("WEIRD EMPTY ERROR {:?}", e)))),
|
||||
};
|
||||
ready(g)
|
||||
})
|
||||
.fold(BinnedResponse::new(), |mut a, k| {
|
||||
let g = match k {
|
||||
Ok(StreamItem::Log(_item)) => {
|
||||
a.log_item_count += 1;
|
||||
a
|
||||
}
|
||||
Ok(StreamItem::Stats(item)) => match item {
|
||||
StatsItem::EventDataReadStats(item) => {
|
||||
a.bytes_read += item.parsed_bytes;
|
||||
a
|
||||
}
|
||||
},
|
||||
Ok(StreamItem::DataItem(item)) => match item {
|
||||
RangeCompletableItem::RangeComplete => {
|
||||
a.range_complete_count += 1;
|
||||
a
|
||||
}
|
||||
RangeCompletableItem::Data(item) => {
|
||||
a.data_item_count += 1;
|
||||
a.bin_count += WithLen::len(&item) as u64;
|
||||
a
|
||||
}
|
||||
},
|
||||
Err(_e) => {
|
||||
a.err_item_count += 1;
|
||||
a
|
||||
}
|
||||
};
|
||||
ready(g)
|
||||
});
|
||||
let ret = s1.await;
|
||||
info!("BinnedResponse: {:?}", ret);
|
||||
Ok(ret)
|
||||
}
|
||||
151
daqbufp2/src/test/binnedjson.rs
Normal file
151
daqbufp2/src/test/binnedjson.rs
Normal file
@@ -0,0 +1,151 @@
|
||||
use crate::nodes::require_test_hosts_running;
|
||||
use chrono::{DateTime, Utc};
|
||||
use disk::binned::query::{BinnedQuery, CacheUsage};
|
||||
use err::Error;
|
||||
use http::StatusCode;
|
||||
use hyper::Body;
|
||||
use netpod::log::*;
|
||||
use netpod::{AggKind, AppendToUrl, Channel, Cluster, NanoRange, APP_JSON};
|
||||
use std::time::Duration;
|
||||
use url::Url;
|
||||
|
||||
#[test]
|
||||
fn get_binned_json_0() {
|
||||
taskrun::run(get_binned_json_0_inner()).unwrap();
|
||||
}
|
||||
|
||||
async fn get_binned_json_0_inner() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
get_binned_json_common(
|
||||
"scalar-i32-be",
|
||||
"1970-01-01T00:20:10.000Z",
|
||||
"1970-01-01T01:20:30.000Z",
|
||||
10,
|
||||
AggKind::DimXBins1,
|
||||
cluster,
|
||||
13,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_binned_json_1() {
|
||||
taskrun::run(get_binned_json_1_inner()).unwrap();
|
||||
}
|
||||
|
||||
async fn get_binned_json_1_inner() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
get_binned_json_common(
|
||||
"wave-f64-be-n21",
|
||||
"1970-01-01T00:20:10.000Z",
|
||||
"1970-01-01T01:20:45.000Z",
|
||||
10,
|
||||
AggKind::DimXBins1,
|
||||
cluster,
|
||||
13,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_binned_json_2() {
|
||||
taskrun::run(get_binned_json_2_inner()).unwrap();
|
||||
}
|
||||
|
||||
async fn get_binned_json_2_inner() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
get_binned_json_common(
|
||||
"wave-f64-be-n21",
|
||||
"1970-01-01T00:20:10.000Z",
|
||||
"1970-01-01T00:20:20.000Z",
|
||||
2,
|
||||
AggKind::DimXBinsN(3),
|
||||
cluster,
|
||||
2,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_binned_json_common(
|
||||
channel_name: &str,
|
||||
beg_date: &str,
|
||||
end_date: &str,
|
||||
bin_count: u32,
|
||||
agg_kind: AggKind,
|
||||
cluster: &Cluster,
|
||||
expect_bin_count: u32,
|
||||
expect_finalised_range: bool,
|
||||
) -> Result<(), Error> {
|
||||
let t1 = Utc::now();
|
||||
let node0 = &cluster.nodes[0];
|
||||
let beg_date: DateTime<Utc> = beg_date.parse()?;
|
||||
let end_date: DateTime<Utc> = end_date.parse()?;
|
||||
let channel_backend = "testbackend";
|
||||
let channel = Channel {
|
||||
backend: channel_backend.into(),
|
||||
name: channel_name.into(),
|
||||
};
|
||||
let range = NanoRange::from_date_time(beg_date, end_date);
|
||||
let mut query = BinnedQuery::new(channel, range, bin_count, agg_kind);
|
||||
query.set_timeout(Duration::from_millis(15000));
|
||||
query.set_cache_usage(CacheUsage::Ignore);
|
||||
let mut url = Url::parse(&format!("http://{}:{}/api/4/binned", node0.host, node0.port))?;
|
||||
query.append_to_url(&mut url);
|
||||
let url = url;
|
||||
info!("get_binned_json_common get {}", url);
|
||||
let req = hyper::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri(url.to_string())
|
||||
.header(http::header::ACCEPT, APP_JSON)
|
||||
.body(Body::empty())?;
|
||||
let client = hyper::Client::new();
|
||||
let res = client.request(req).await?;
|
||||
if res.status() != StatusCode::OK {
|
||||
error!("get_binned_json_common client response {:?}", res);
|
||||
}
|
||||
let res = hyper::body::to_bytes(res.into_body()).await?;
|
||||
let t2 = chrono::Utc::now();
|
||||
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
|
||||
info!("get_binned_json_common DONE time {} ms", ms);
|
||||
let res = String::from_utf8_lossy(&res).to_string();
|
||||
//info!("get_binned_json_common res: {}", res);
|
||||
let res: serde_json::Value = serde_json::from_str(res.as_str())?;
|
||||
info!(
|
||||
"result from endpoint: --------------\n{}\n--------------",
|
||||
serde_json::to_string_pretty(&res)?
|
||||
);
|
||||
// TODO enable in future:
|
||||
if false {
|
||||
if expect_finalised_range {
|
||||
if !res
|
||||
.get("finalisedRange")
|
||||
.ok_or(Error::with_msg("missing finalisedRange"))?
|
||||
.as_bool()
|
||||
.ok_or(Error::with_msg("key finalisedRange not bool"))?
|
||||
{
|
||||
return Err(Error::with_msg("expected finalisedRange"));
|
||||
}
|
||||
} else if res.get("finalisedRange").is_some() {
|
||||
return Err(Error::with_msg("expect absent finalisedRange"));
|
||||
}
|
||||
}
|
||||
if res.get("counts").unwrap().as_array().unwrap().len() != expect_bin_count as usize {
|
||||
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
|
||||
}
|
||||
if res.get("mins").unwrap().as_array().unwrap().len() != expect_bin_count as usize {
|
||||
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
|
||||
}
|
||||
if res.get("maxs").unwrap().as_array().unwrap().len() != expect_bin_count as usize {
|
||||
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
|
||||
}
|
||||
if res.get("avgs").unwrap().as_array().unwrap().len() != expect_bin_count as usize {
|
||||
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
298
daqbufp2/src/test/events.rs
Normal file
298
daqbufp2/src/test/events.rs
Normal file
@@ -0,0 +1,298 @@
|
||||
use crate::nodes::require_test_hosts_running;
|
||||
use chrono::{DateTime, Utc};
|
||||
use disk::agg::streams::{StatsItem, StreamItem};
|
||||
use disk::binned::{NumOps, RangeCompletableItem, WithLen};
|
||||
use disk::decode::EventValues;
|
||||
use disk::events::{PlainEventsBinaryQuery, PlainEventsJsonQuery};
|
||||
use disk::frame::inmem::InMemoryFrameAsyncReadStream;
|
||||
use disk::frame::makeframe::FrameType;
|
||||
use disk::streamlog::Streamlog;
|
||||
use disk::Sitemty;
|
||||
use err::Error;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use http::StatusCode;
|
||||
use hyper::Body;
|
||||
use netpod::log::*;
|
||||
use netpod::{AppendToUrl, Channel, Cluster, HostPort, NanoRange, PerfOpts, APP_JSON, APP_OCTET};
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::fmt::Debug;
|
||||
use std::future::ready;
|
||||
use tokio::io::AsyncRead;
|
||||
use url::Url;
|
||||
|
||||
#[test]
|
||||
fn get_plain_events_binary_0() {
|
||||
taskrun::run(get_plain_events_binary_0_inner()).unwrap();
|
||||
}
|
||||
|
||||
async fn get_plain_events_binary_0_inner() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
if true {
|
||||
get_plain_events_binary::<i32>(
|
||||
"scalar-i32-be",
|
||||
"1970-01-01T00:20:10.000Z",
|
||||
"1970-01-01T00:20:50.000Z",
|
||||
cluster,
|
||||
true,
|
||||
4,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_plain_events_binary<NTY>(
|
||||
channel_name: &str,
|
||||
beg_date: &str,
|
||||
end_date: &str,
|
||||
cluster: &Cluster,
|
||||
_expect_range_complete: bool,
|
||||
_expect_event_count: u64,
|
||||
) -> Result<EventsResponse, Error>
|
||||
where
|
||||
NTY: NumOps,
|
||||
{
|
||||
let t1 = Utc::now();
|
||||
let node0 = &cluster.nodes[0];
|
||||
let beg_date: DateTime<Utc> = beg_date.parse()?;
|
||||
let end_date: DateTime<Utc> = end_date.parse()?;
|
||||
let channel_backend = "testbackend";
|
||||
let perf_opts = PerfOpts { inmem_bufcap: 512 };
|
||||
let channel = Channel {
|
||||
backend: channel_backend.into(),
|
||||
name: channel_name.into(),
|
||||
};
|
||||
let range = NanoRange::from_date_time(beg_date, end_date);
|
||||
let query = PlainEventsBinaryQuery::new(channel, range, 1024 * 4);
|
||||
let hp = HostPort::from_node(node0);
|
||||
let mut url = Url::parse(&format!("http://{}:{}", hp.host, hp.port))?;
|
||||
query.append_to_url(&mut url);
|
||||
let url = url;
|
||||
info!("get_plain_events get {}", url);
|
||||
let req = hyper::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri(url.to_string())
|
||||
.header(http::header::ACCEPT, APP_OCTET)
|
||||
.body(Body::empty())?;
|
||||
let client = hyper::Client::new();
|
||||
let res = client.request(req).await?;
|
||||
if res.status() != StatusCode::OK {
|
||||
error!("client response {:?}", res);
|
||||
}
|
||||
let s1 = disk::cache::HttpBodyAsAsyncRead::new(res);
|
||||
let s2 = InMemoryFrameAsyncReadStream::new(s1, perf_opts.inmem_bufcap);
|
||||
let res = consume_plain_events_binary::<NTY, _>(s2).await?;
|
||||
let t2 = chrono::Utc::now();
|
||||
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
|
||||
info!("time {} ms", ms);
|
||||
if !res.is_valid() {
|
||||
Ok(res)
|
||||
} else {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EventsResponse {
|
||||
event_count: u64,
|
||||
err_item_count: u64,
|
||||
data_item_count: u64,
|
||||
bytes_read: u64,
|
||||
range_complete_count: u64,
|
||||
log_item_count: u64,
|
||||
stats_item_count: u64,
|
||||
}
|
||||
|
||||
impl EventsResponse {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
event_count: 0,
|
||||
err_item_count: 0,
|
||||
data_item_count: 0,
|
||||
bytes_read: 0,
|
||||
range_complete_count: 0,
|
||||
log_item_count: 0,
|
||||
stats_item_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if self.range_complete_count > 1 {
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn consume_plain_events_binary<NTY, T>(inp: InMemoryFrameAsyncReadStream<T>) -> Result<EventsResponse, Error>
|
||||
where
|
||||
NTY: NumOps,
|
||||
T: AsyncRead + Unpin,
|
||||
{
|
||||
let s1 = inp
|
||||
.map_err(|e| error!("TEST GOT ERROR {:?}", e))
|
||||
.filter_map(|item| {
|
||||
let g = match item {
|
||||
Ok(item) => match item {
|
||||
StreamItem::Log(item) => {
|
||||
Streamlog::emit(&item);
|
||||
None
|
||||
}
|
||||
StreamItem::Stats(item) => {
|
||||
info!("Stats: {:?}", item);
|
||||
None
|
||||
}
|
||||
StreamItem::DataItem(frame) => {
|
||||
if frame.tyid() != <Sitemty<EventValues<NTY>> as FrameType>::FRAME_TYPE_ID {
|
||||
error!("test receives unexpected tyid {:x}", frame.tyid());
|
||||
None
|
||||
} else {
|
||||
match bincode::deserialize::<Sitemty<EventValues<NTY>>>(frame.buf()) {
|
||||
Ok(item) => match item {
|
||||
Ok(item) => match item {
|
||||
StreamItem::Log(item) => {
|
||||
Streamlog::emit(&item);
|
||||
Some(Ok(StreamItem::Log(item)))
|
||||
}
|
||||
item => {
|
||||
info!("TEST GOT ITEM {:?}", item);
|
||||
Some(Ok(item))
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
error!("TEST GOT ERROR FRAME: {:?}", e);
|
||||
Some(Err(e))
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
error!("bincode error: {:?}", e);
|
||||
Some(Err(e.into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => Some(Err(Error::with_msg(format!("WEIRD EMPTY ERROR {:?}", e)))),
|
||||
};
|
||||
ready(g)
|
||||
})
|
||||
.fold(EventsResponse::new(), |mut a, k| {
|
||||
let g = match k {
|
||||
Ok(StreamItem::Log(_item)) => {
|
||||
a.log_item_count += 1;
|
||||
a
|
||||
}
|
||||
Ok(StreamItem::Stats(item)) => match item {
|
||||
StatsItem::EventDataReadStats(item) => {
|
||||
a.bytes_read += item.parsed_bytes;
|
||||
a
|
||||
}
|
||||
},
|
||||
Ok(StreamItem::DataItem(item)) => match item {
|
||||
RangeCompletableItem::RangeComplete => {
|
||||
a.range_complete_count += 1;
|
||||
a
|
||||
}
|
||||
RangeCompletableItem::Data(item) => {
|
||||
a.data_item_count += 1;
|
||||
a.event_count += WithLen::len(&item) as u64;
|
||||
a
|
||||
}
|
||||
},
|
||||
Err(_e) => {
|
||||
a.err_item_count += 1;
|
||||
a
|
||||
}
|
||||
};
|
||||
ready(g)
|
||||
});
|
||||
let ret = s1.await;
|
||||
info!("result: {:?}", ret);
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_plain_events_json_0() {
|
||||
taskrun::run(get_plain_events_json_0_inner()).unwrap();
|
||||
}
|
||||
|
||||
async fn get_plain_events_json_0_inner() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
get_plain_events_json(
|
||||
"scalar-i32-be",
|
||||
"1970-01-01T00:20:10.000Z",
|
||||
"1970-01-01T00:20:12.000Z",
|
||||
cluster,
|
||||
true,
|
||||
4,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_plain_events_json_1() {
|
||||
taskrun::run(get_plain_events_json_1_inner()).unwrap();
|
||||
}
|
||||
|
||||
async fn get_plain_events_json_1_inner() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
get_plain_events_json(
|
||||
"wave-f64-be-n21",
|
||||
"1970-01-01T00:20:10.000Z",
|
||||
"1970-01-01T00:20:12.000Z",
|
||||
cluster,
|
||||
true,
|
||||
4,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_plain_events_json(
|
||||
channel_name: &str,
|
||||
beg_date: &str,
|
||||
end_date: &str,
|
||||
cluster: &Cluster,
|
||||
_expect_range_complete: bool,
|
||||
_expect_event_count: u64,
|
||||
) -> Result<(), Error> {
|
||||
let t1 = Utc::now();
|
||||
let node0 = &cluster.nodes[0];
|
||||
let beg_date: DateTime<Utc> = beg_date.parse()?;
|
||||
let end_date: DateTime<Utc> = end_date.parse()?;
|
||||
let channel_backend = "testbackend";
|
||||
let channel = Channel {
|
||||
backend: channel_backend.into(),
|
||||
name: channel_name.into(),
|
||||
};
|
||||
let range = NanoRange::from_date_time(beg_date, end_date);
|
||||
let query = PlainEventsJsonQuery::new(channel, range, 1024 * 4, false);
|
||||
let hp = HostPort::from_node(node0);
|
||||
let mut url = Url::parse(&format!("http://{}:{}/api/4/events", hp.host, hp.port))?;
|
||||
query.append_to_url(&mut url);
|
||||
let url = url;
|
||||
info!("get_plain_events get {}", url);
|
||||
let req = hyper::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri(url.to_string())
|
||||
.header(http::header::ACCEPT, APP_JSON)
|
||||
.body(Body::empty())?;
|
||||
let client = hyper::Client::new();
|
||||
let res = client.request(req).await?;
|
||||
if res.status() != StatusCode::OK {
|
||||
error!("client response {:?}", res);
|
||||
}
|
||||
let buf = hyper::body::to_bytes(res.into_body()).await?;
|
||||
let s = String::from_utf8_lossy(&buf);
|
||||
let res: JsonValue = serde_json::from_str(&s)?;
|
||||
info!("GOT: {}", serde_json::to_string_pretty(&res)?);
|
||||
let t2 = chrono::Utc::now();
|
||||
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
|
||||
info!("time {} ms", ms);
|
||||
Ok(())
|
||||
}
|
||||
Reference in New Issue
Block a user