Clean up left todo items
This commit is contained in:
@@ -93,21 +93,10 @@ async fn go() -> Result<(), Error> {
|
||||
#[test]
|
||||
fn simple_fetch() {
|
||||
use netpod::Nanos;
|
||||
use netpod::{
|
||||
timeunits::*, ByteOrder, Channel, ChannelConfig, Cluster, Database, Node, NodeConfig, ScalarType, Shape,
|
||||
};
|
||||
use netpod::{timeunits::*, ByteOrder, Channel, ChannelConfig, ScalarType, Shape};
|
||||
taskrun::run(async {
|
||||
let _rh = daqbuffer::nodes::require_test_hosts_running()?;
|
||||
let t1 = chrono::Utc::now();
|
||||
let node = Node {
|
||||
host: "localhost".into(),
|
||||
listen: "0.0.0.0".into(),
|
||||
port: 8360,
|
||||
port_raw: 8360 + 100,
|
||||
data_base_path: err::todoval(),
|
||||
ksprefix: "daq_swissfel".into(),
|
||||
split: 0,
|
||||
backend: "testbackend".into(),
|
||||
};
|
||||
let query = netpod::AggQuerySingleChannel {
|
||||
channel_config: ChannelConfig {
|
||||
channel: Channel {
|
||||
@@ -118,7 +107,7 @@ fn simple_fetch() {
|
||||
time_bin_size: Nanos { ns: DAY },
|
||||
array: true,
|
||||
scalar_type: ScalarType::F64,
|
||||
shape: Shape::Wave(err::todoval()),
|
||||
shape: Shape::Wave(42),
|
||||
byte_order: ByteOrder::big_endian(),
|
||||
compression: true,
|
||||
},
|
||||
@@ -126,23 +115,7 @@ fn simple_fetch() {
|
||||
tb_file_count: 1,
|
||||
buffer_size: 1024 * 8,
|
||||
};
|
||||
let cluster = Cluster {
|
||||
nodes: vec![node],
|
||||
database: Database {
|
||||
name: "daqbuffer".into(),
|
||||
host: "localhost".into(),
|
||||
user: "daqbuffer".into(),
|
||||
pass: "daqbuffer".into(),
|
||||
},
|
||||
};
|
||||
let node_config = NodeConfig {
|
||||
name: format!("{}:{}", cluster.nodes[0].host, cluster.nodes[0].port),
|
||||
cluster,
|
||||
};
|
||||
let node_config: Result<NodeConfigCached, Error> = node_config.into();
|
||||
let node_config = node_config?;
|
||||
let query_string = serde_json::to_string(&query).unwrap();
|
||||
let host = tokio::spawn(httpret::host(node_config.clone()));
|
||||
let req = hyper::Request::builder()
|
||||
.method(http::Method::POST)
|
||||
.uri("http://localhost:8360/api/4/parsed_raw")
|
||||
@@ -176,8 +149,6 @@ fn simple_fetch() {
|
||||
ntot / 1024 / 1024,
|
||||
throughput
|
||||
);
|
||||
drop(host);
|
||||
//Err::<(), _>(format!("test error").into())
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
@@ -6,6 +6,7 @@ use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
pub mod cli;
|
||||
pub mod client;
|
||||
pub mod nodes;
|
||||
#[cfg(test)]
|
||||
pub mod test;
|
||||
|
||||
|
||||
57
daqbuffer/src/nodes.rs
Normal file
57
daqbuffer/src/nodes.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
use crate::spawn_test_hosts;
|
||||
use err::Error;
|
||||
use netpod::{Cluster, Database, Node};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
pub struct RunningHosts {
|
||||
pub cluster: Cluster,
|
||||
_jhs: Vec<JoinHandle<Result<(), Error>>>,
|
||||
}
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref HOSTS_RUNNING: Mutex<Option<Arc<RunningHosts>>> = Mutex::new(None);
|
||||
}
|
||||
|
||||
pub fn require_test_hosts_running() -> Result<Arc<RunningHosts>, Error> {
|
||||
let mut g = HOSTS_RUNNING.lock().unwrap();
|
||||
match g.as_ref() {
|
||||
None => {
|
||||
let cluster = test_cluster();
|
||||
let jhs = spawn_test_hosts(cluster.clone());
|
||||
let ret = RunningHosts {
|
||||
cluster: cluster.clone(),
|
||||
_jhs: jhs,
|
||||
};
|
||||
let a = Arc::new(ret);
|
||||
*g = Some(a.clone());
|
||||
Ok(a)
|
||||
}
|
||||
Some(gg) => Ok(gg.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
fn test_cluster() -> Cluster {
|
||||
let nodes = (0..3)
|
||||
.into_iter()
|
||||
.map(|id| Node {
|
||||
host: "localhost".into(),
|
||||
listen: "0.0.0.0".into(),
|
||||
port: 8360 + id as u16,
|
||||
port_raw: 8360 + id as u16 + 100,
|
||||
data_base_path: format!("../tmpdata/node{:02}", id).into(),
|
||||
ksprefix: "ks".into(),
|
||||
split: id,
|
||||
backend: "testbackend".into(),
|
||||
})
|
||||
.collect();
|
||||
Cluster {
|
||||
nodes: nodes,
|
||||
database: Database {
|
||||
name: "daqbuffer".into(),
|
||||
host: "localhost".into(),
|
||||
user: "daqbuffer".into(),
|
||||
pass: "daqbuffer".into(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::spawn_test_hosts;
|
||||
use crate::nodes::require_test_hosts_running;
|
||||
use bytes::BytesMut;
|
||||
use chrono::{DateTime, Utc};
|
||||
use disk::agg::streams::{StatsItem, StreamItem};
|
||||
@@ -14,68 +14,14 @@ use futures_util::TryStreamExt;
|
||||
use http::StatusCode;
|
||||
use hyper::Body;
|
||||
use netpod::log::*;
|
||||
use netpod::{AggKind, Channel, Cluster, Database, HostPort, NanoRange, Node, PerfOpts};
|
||||
use netpod::{AggKind, Channel, Cluster, HostPort, NanoRange, PerfOpts};
|
||||
use serde::de::DeserializeOwned;
|
||||
use std::fmt::Debug;
|
||||
use std::future::ready;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use tokio::io::AsyncRead;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
pub mod json;
|
||||
|
||||
struct RunningHosts {
|
||||
cluster: Cluster,
|
||||
_jhs: Vec<JoinHandle<Result<(), Error>>>,
|
||||
}
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref HOSTS_RUNNING: Mutex<Option<Arc<RunningHosts>>> = Mutex::new(None);
|
||||
}
|
||||
|
||||
fn require_test_hosts_running() -> Result<Arc<RunningHosts>, Error> {
|
||||
let mut g = HOSTS_RUNNING.lock().unwrap();
|
||||
match g.as_ref() {
|
||||
None => {
|
||||
let cluster = test_cluster();
|
||||
let jhs = spawn_test_hosts(cluster.clone());
|
||||
let ret = RunningHosts {
|
||||
cluster: cluster.clone(),
|
||||
_jhs: jhs,
|
||||
};
|
||||
let a = Arc::new(ret);
|
||||
*g = Some(a.clone());
|
||||
Ok(a)
|
||||
}
|
||||
Some(gg) => Ok(gg.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
fn test_cluster() -> Cluster {
|
||||
let nodes = (0..3)
|
||||
.into_iter()
|
||||
.map(|id| Node {
|
||||
host: "localhost".into(),
|
||||
listen: "0.0.0.0".into(),
|
||||
port: 8360 + id as u16,
|
||||
port_raw: 8360 + id as u16 + 100,
|
||||
data_base_path: format!("../tmpdata/node{:02}", id).into(),
|
||||
ksprefix: "ks".into(),
|
||||
split: id,
|
||||
backend: "testbackend".into(),
|
||||
})
|
||||
.collect();
|
||||
Cluster {
|
||||
nodes: nodes,
|
||||
database: Database {
|
||||
name: "daqbuffer".into(),
|
||||
host: "localhost".into(),
|
||||
user: "daqbuffer".into(),
|
||||
pass: "daqbuffer".into(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_binned_binary() {
|
||||
taskrun::run(get_binned_binary_inner()).unwrap();
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::test::require_test_hosts_running;
|
||||
use crate::nodes::require_test_hosts_running;
|
||||
use chrono::{DateTime, Utc};
|
||||
use disk::binned::query::BinnedQuery;
|
||||
use err::Error;
|
||||
@@ -6,6 +6,7 @@ use http::StatusCode;
|
||||
use hyper::Body;
|
||||
use netpod::log::*;
|
||||
use netpod::{AggKind, Channel, Cluster, HostPort, NanoRange};
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn get_binned_json_0() {
|
||||
@@ -15,22 +16,46 @@ fn get_binned_json_0() {
|
||||
async fn get_binned_json_0_inner() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
get_binned_json_0_inner2(
|
||||
"wave-f64-be-n21",
|
||||
get_binned_json_common(
|
||||
"scalar-i32-be",
|
||||
"1970-01-01T00:20:10.000Z",
|
||||
"1970-01-01T01:20:30.000Z",
|
||||
10,
|
||||
cluster,
|
||||
13,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_binned_json_0_inner2(
|
||||
#[test]
|
||||
fn get_binned_json_1() {
|
||||
taskrun::run(get_binned_json_1_inner()).unwrap();
|
||||
}
|
||||
|
||||
async fn get_binned_json_1_inner() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
get_binned_json_common(
|
||||
"wave-f64-be-n21",
|
||||
"1970-01-01T00:20:10.000Z",
|
||||
"1970-01-01T01:20:45.000Z",
|
||||
10,
|
||||
cluster,
|
||||
13,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_binned_json_common(
|
||||
channel_name: &str,
|
||||
beg_date: &str,
|
||||
end_date: &str,
|
||||
bin_count: u32,
|
||||
cluster: &Cluster,
|
||||
expect_bin_count: u32,
|
||||
expect_finalised_range: bool,
|
||||
) -> Result<(), Error> {
|
||||
let t1 = Utc::now();
|
||||
let agg_kind = AggKind::DimXBins1;
|
||||
@@ -43,7 +68,8 @@ async fn get_binned_json_0_inner2(
|
||||
name: channel_name.into(),
|
||||
};
|
||||
let range = NanoRange::from_date_time(beg_date, end_date);
|
||||
let query = BinnedQuery::new(channel, range, bin_count, agg_kind);
|
||||
let mut query = BinnedQuery::new(channel, range, bin_count, agg_kind);
|
||||
query.set_timeout(Duration::from_millis(15000));
|
||||
let url = query.url(&HostPort::from_node(node0));
|
||||
info!("get_binned_json_0 get {}", url);
|
||||
let req = hyper::Request::builder()
|
||||
@@ -57,14 +83,38 @@ async fn get_binned_json_0_inner2(
|
||||
error!("client response {:?}", res);
|
||||
}
|
||||
let res = hyper::body::to_bytes(res.into_body()).await?;
|
||||
let t2 = chrono::Utc::now();
|
||||
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
|
||||
info!("get_binned_json_0 DONE time {} ms", ms);
|
||||
let res = String::from_utf8(res.to_vec())?;
|
||||
let res: serde_json::Value = serde_json::from_str(res.as_str())?;
|
||||
info!(
|
||||
"result from endpoint: --------------\n{}\n--------------",
|
||||
serde_json::to_string_pretty(&res)?
|
||||
);
|
||||
let t2 = chrono::Utc::now();
|
||||
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
|
||||
info!("get_binned_json_0 DONE time {} ms", ms);
|
||||
if expect_finalised_range {
|
||||
if !res
|
||||
.get("finalisedRange")
|
||||
.ok_or(Error::with_msg("missing finalisedRange"))?
|
||||
.as_bool()
|
||||
.ok_or(Error::with_msg("key finalisedRange not bool"))?
|
||||
{
|
||||
return Err(Error::with_msg("expected finalisedRange"));
|
||||
}
|
||||
} else if res.get("finalisedRange").is_some() {
|
||||
return Err(Error::with_msg("expect absent finalisedRange"));
|
||||
}
|
||||
if res.get("counts").unwrap().as_array().unwrap().len() != expect_bin_count as usize {
|
||||
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
|
||||
}
|
||||
if res.get("mins").unwrap().as_array().unwrap().len() != expect_bin_count as usize {
|
||||
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
|
||||
}
|
||||
if res.get("maxs").unwrap().as_array().unwrap().len() != expect_bin_count as usize {
|
||||
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
|
||||
}
|
||||
if res.get("avgs").unwrap().as_array().unwrap().len() != expect_bin_count as usize {
|
||||
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user