Move workspace crates into subfolder

This commit is contained in:
Dominik Werder
2023-07-10 14:45:25 +02:00
parent 8938e55f86
commit 30c7fcb1e5
212 changed files with 246 additions and 41 deletions

View File

@@ -0,0 +1,137 @@
use crate::err::ErrConv;
use chrono::DateTime;
use chrono::Utc;
use disk::streamlog::Streamlog;
use err::Error;
use futures_util::TryStreamExt;
use http::StatusCode;
use httpclient::HttpBodyAsAsyncRead;
use hyper::Body;
use items_0::streamitem::StreamItem;
use netpod::log::*;
use netpod::query::CacheUsage;
use netpod::range::evrange::NanoRange;
use netpod::AppendToUrl;
use netpod::ByteSize;
use netpod::HostPort;
use netpod::SfDbChannel;
use netpod::APP_OCTET;
use query::api4::binned::BinnedQuery;
use streams::frames::inmem::InMemoryFrameAsyncReadStream;
use url::Url;
pub async fn status(host: String, port: u16) -> Result<(), Error> {
let t1 = Utc::now();
let uri = format!("http://{}:{}/api/4/node_status", host, port,);
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(uri)
.body(Body::empty())
.ec()?;
let client = hyper::Client::new();
let res = client.request(req).await.ec()?;
if res.status() != StatusCode::OK {
error!("Server error {:?}", res);
return Err(Error::with_msg(format!("Server error {:?}", res)));
}
let body = hyper::body::to_bytes(res.into_body()).await.ec()?;
let res = String::from_utf8(body.to_vec())?;
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
info!("node_status DONE duration: {} ms", ms);
println!("{}", res);
Ok(())
}
pub async fn get_binned(
host: String,
port: u16,
channel_backend: String,
channel_name: String,
beg_date: DateTime<Utc>,
end_date: DateTime<Utc>,
bin_count: u32,
cache_usage: CacheUsage,
disk_stats_every_kb: u32,
) -> Result<(), Error> {
info!("------- get_binned client");
info!("channel {}", channel_name);
info!("beg {}", beg_date);
info!("end {}", end_date);
info!("-------");
let t1 = Utc::now();
let channel = SfDbChannel::from_name(channel_backend, channel_name);
let range = NanoRange::from_date_time(beg_date, end_date).into();
// TODO this was before fixed using AggKind::DimXBins1
let mut query = BinnedQuery::new(channel, range, bin_count).for_time_weighted_scalar();
query.set_cache_usage(cache_usage);
query.set_disk_stats_every(ByteSize(1024 * disk_stats_every_kb));
let hp = HostPort { host: host, port: port };
let mut url = Url::parse(&format!("http://{}:{}/api/4/binned", hp.host, hp.port))?;
query.append_to_url(&mut url);
let url = url;
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(url.to_string())
.header(http::header::ACCEPT, APP_OCTET)
.body(Body::empty())
.ec()?;
let client = hyper::Client::new();
let res = client.request(req).await.ec()?;
if res.status() != StatusCode::OK {
error!("Server error {:?}", res);
let (head, body) = res.into_parts();
let buf = hyper::body::to_bytes(body).await.ec()?;
let s = String::from_utf8_lossy(&buf);
return Err(Error::with_msg(format!(
concat!(
"Server error {:?}\n",
"---------------------- message from http body:\n",
"{}\n",
"---------------------- end of http body",
),
head, s
)));
}
let s1 = HttpBodyAsAsyncRead::new(res);
let s2 = InMemoryFrameAsyncReadStream::new(s1, ByteSize::from_kb(8));
use futures_util::StreamExt;
use std::future::ready;
let s3 = s2
.map_err(|e| error!("get_binned {:?}", e))
.filter_map(|item| {
let g = match item {
Ok(item) => match item {
StreamItem::Log(item) => {
Streamlog::emit(&item);
None
}
StreamItem::Stats(item) => {
info!("Stats: {:?}", item);
None
}
StreamItem::DataItem(_frame) => {
// TODO
// The expected type nowadays depends on the channel and agg-kind.
err::todo();
Some(Ok(()))
}
},
Err(e) => Some(Err(Error::with_msg(format!("{:?}", e)))),
};
ready(g)
})
.for_each(|_| ready(()));
s3.await;
let t2 = chrono::Utc::now();
let ntot = 0;
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
let throughput = ntot / 1024 * 1000 / ms;
info!(
"get_cached_0 DONE total download {} MB throughput {:5} kB/s bin_count {}",
ntot / 1024 / 1024,
throughput,
bin_count,
);
Ok(())
}

View File

@@ -0,0 +1,38 @@
pub mod client;
pub mod err;
pub mod nodes;
#[cfg(test)]
pub mod test;
use ::err::Error;
use futures_util::TryFutureExt;
use netpod::{Cluster, NodeConfig, NodeConfigCached, ProxyConfig};
use tokio::task::JoinHandle;
pub fn spawn_test_hosts(cluster: Cluster) -> Vec<JoinHandle<Result<(), Error>>> {
let mut ret = Vec::new();
for node in &cluster.nodes {
let node_config = NodeConfig {
cluster: cluster.clone(),
name: format!("{}:{}", node.host, node.port),
};
let node_config: Result<NodeConfigCached, Error> = node_config.into();
let node_config = node_config.unwrap();
let h = tokio::spawn(httpret::host(node_config).map_err(Error::from));
ret.push(h);
}
// TODO spawn also two proxy nodes
ret
}
pub async fn run_node(node_config: NodeConfigCached) -> Result<(), Error> {
httpret::host(node_config).await?;
Ok(())
}
pub async fn run_proxy(proxy_config: ProxyConfig) -> Result<(), Error> {
httpret::proxy::proxy(proxy_config).await?;
Ok(())
}

View File

@@ -0,0 +1,18 @@
pub trait ErrConv<T> {
fn ec(self) -> Result<T, ::err::Error>;
}
pub trait Convable: ToString {}
impl<T, E: Convable> ErrConv<T> for Result<T, E> {
fn ec(self) -> Result<T, ::err::Error> {
match self {
Ok(x) => Ok(x),
Err(e) => Err(::err::Error::from_string(e.to_string())),
}
}
}
impl Convable for http::Error {}
impl Convable for hyper::Error {}
impl Convable for tokio::task::JoinError {}

View File

@@ -0,0 +1,115 @@
use crate::spawn_test_hosts;
use err::Error;
use netpod::log::*;
use netpod::Cluster;
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
use tokio::task::JoinHandle;
pub struct RunningHosts {
pub cluster: Cluster,
_jhs: Vec<JoinHandle<Result<(), Error>>>,
}
impl Drop for RunningHosts {
fn drop(&mut self) {
netpod::log::info!("\n\n+++++++++++++++++++ impl Drop for RunningHost\n\n");
}
}
pub struct RunningSlsHost {
pub cluster: Cluster,
_jhs: Vec<JoinHandle<Result<(), Error>>>,
}
impl Drop for RunningSlsHost {
fn drop(&mut self) {
netpod::log::info!("\n\n+++++++++++++++++++ impl Drop for RunningSlsHost\n\n");
}
}
pub struct RunningArchappHost {
pub cluster: Cluster,
_jhs: Vec<JoinHandle<Result<(), Error>>>,
}
impl Drop for RunningArchappHost {
fn drop(&mut self) {
netpod::log::info!("\n\n+++++++++++++++++++ impl Drop for RunningArchappHost\n\n");
}
}
lazy_static::lazy_static! {
static ref HOSTS_RUNNING: Mutex<Option<Arc<RunningHosts>>> = Mutex::new(None);
static ref SLS_HOST_RUNNING: Mutex<Option<Arc<RunningSlsHost>>> = Mutex::new(None);
static ref ARCHAPP_HOST_RUNNING: Mutex<Option<Arc<RunningArchappHost>>> = Mutex::new(None);
}
pub fn require_test_hosts_running() -> Result<Arc<RunningHosts>, Error> {
let mut g = HOSTS_RUNNING.lock().unwrap();
match g.as_ref() {
None => {
info!("\n\n+++++++++++++++++++ MAKE NEW RunningHosts\n\n");
let cluster = netpod::test_cluster();
let jhs = spawn_test_hosts(cluster.clone());
let ret = RunningHosts {
cluster: cluster.clone(),
_jhs: jhs,
};
let a = Arc::new(ret);
*g = Some(a.clone());
// TODO check in different way that test hosts are up, sockets connected, ready for testing
thread::sleep(Duration::from_millis(400));
Ok(a)
}
Some(gg) => {
debug!("\n\n+++++++++++++++++++ REUSE RunningHost\n\n");
Ok(gg.clone())
}
}
}
pub fn require_sls_test_host_running() -> Result<Arc<RunningSlsHost>, Error> {
let mut g = SLS_HOST_RUNNING.lock().unwrap();
match g.as_ref() {
None => {
netpod::log::info!("\n\n+++++++++++++++++++ MAKE NEW RunningSlsHost\n\n");
let cluster = netpod::sls_test_cluster();
let jhs = spawn_test_hosts(cluster.clone());
let ret = RunningSlsHost {
cluster: cluster.clone(),
_jhs: jhs,
};
let a = Arc::new(ret);
*g = Some(a.clone());
Ok(a)
}
Some(gg) => {
netpod::log::debug!("\n\n+++++++++++++++++++ REUSE RunningSlsHost\n\n");
Ok(gg.clone())
}
}
}
pub fn require_archapp_test_host_running() -> Result<Arc<RunningArchappHost>, Error> {
let mut g = ARCHAPP_HOST_RUNNING.lock().unwrap();
match g.as_ref() {
None => {
netpod::log::info!("\n\n+++++++++++++++++++ MAKE NEW RunningArchappHost\n\n");
let cluster = netpod::archapp_test_cluster();
let jhs = spawn_test_hosts(cluster.clone());
let ret = RunningArchappHost {
cluster: cluster.clone(),
_jhs: jhs,
};
let a = Arc::new(ret);
*g = Some(a.clone());
Ok(a)
}
Some(gg) => {
netpod::log::debug!("\n\n+++++++++++++++++++ REUSE RunningArchappHost\n\n");
Ok(gg.clone())
}
}
}

View File

@@ -0,0 +1,42 @@
#[cfg(test)]
mod api1;
#[cfg(test)]
mod api4;
pub mod archapp;
pub mod binnedjson;
#[cfg(test)]
mod timeweightedjson;
use bytes::BytesMut;
use err::Error;
use std::future::Future;
fn run_test<F>(f: F) -> Result<(), Error>
where
F: Future<Output = Result<(), Error>> + Send,
{
let runtime = taskrun::get_runtime();
let _g = runtime.enter();
runtime.block_on(f)
//let jh = tokio::spawn(f);
//jh.await;
}
#[test]
fn bufs() {
use bytes::{Buf, BufMut};
let mut buf = BytesMut::with_capacity(1024);
assert!(buf.as_mut().len() == 0);
buf.put_u32_le(123);
assert!(buf.as_mut().len() == 4);
let mut b2 = buf.split_to(4);
assert!(b2.capacity() == 4);
b2.advance(2);
assert!(b2.capacity() == 2);
b2.advance(2);
assert!(b2.capacity() == 0);
assert!(buf.capacity() == 1020);
assert!(buf.remaining() == 0);
assert!(buf.remaining_mut() >= 1020);
assert!(buf.capacity() == 1020);
}

View File

@@ -0,0 +1,122 @@
mod data_api_python;
use crate::nodes::require_test_hosts_running;
use err::Error;
use futures_util::Future;
use httpclient::http_post;
use netpod::log::*;
use netpod::query::api1::Api1Query;
use netpod::query::api1::Api1Range;
use netpod::query::api1::ChannelTuple;
use netpod::APP_OCTET;
use parse::api1_parse;
use parse::api1_parse::Api1Frame;
use parse::api1_parse::Api1ScalarType;
use std::fmt;
use url::Url;
const TEST_BACKEND: &str = "testbackend-00";
fn testrun<T, F>(fut: F) -> Result<T, Error>
where
F: Future<Output = Result<T, Error>>,
{
taskrun::run(fut)
}
fn is_monotonic_strict<I>(it: I) -> bool
where
I: Iterator,
<I as Iterator>::Item: PartialOrd + fmt::Debug,
{
let mut last = None;
for x in it {
if let Some(last) = last.as_ref() {
if x <= *last {
return false;
}
}
last = Some(x);
}
true
}
#[test]
fn test_is_monitonic_strict() {
assert_eq!(is_monotonic_strict([1, 2, 3].iter()), true);
assert_eq!(is_monotonic_strict([1, 2, 2].iter()), false);
}
#[test]
fn events_f64_plain() -> Result<(), Error> {
// TODO re-enable with in-memory generated config and event data.
if true {
return Ok(());
}
let fut = async {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
let node = &cluster.nodes[0];
let url: Url = format!("http://{}:{}/api/1/query", node.host, node.port).parse()?;
let accept = APP_OCTET;
let range = Api1Range::new("1970-01-01T00:00:00Z".try_into()?, "1970-01-01T00:01:00Z".try_into()?)?;
// TODO the channel list needs to get pre-processed to check for backend prefix!
let ch = ChannelTuple::new(TEST_BACKEND.into(), "test-gen-i32-dim0-v01".into());
let qu = Api1Query::new(range, vec![ch]);
let body = serde_json::to_string(&qu)?;
let buf = http_post(url, accept, body.into()).await?;
eprintln!("body received: {}", buf.len());
match api1_parse::api1_frames::<parse::nom::error::VerboseError<_>>(&buf) {
Ok((_, frames)) => {
debug!("FRAMES LEN: {}", frames.len());
assert_eq!(frames.len(), 121);
if let Api1Frame::Header(header) = &frames[0] {
if let Api1ScalarType::I32 = header.header().ty() {
} else {
panic!("bad scalar type")
}
} else {
panic!("bad header")
}
let tss: Vec<_> = frames
.iter()
.filter_map(|f| match f {
api1_parse::Api1Frame::Data(d) => Some(d.ts()),
_ => None,
})
.collect();
let pulses: Vec<_> = frames
.iter()
.filter_map(|f| match f {
api1_parse::Api1Frame::Data(d) => Some(d.pulse()),
_ => None,
})
.collect();
let values: Vec<_> = frames
.iter()
.filter_map(|f| match f {
api1_parse::Api1Frame::Data(d) => {
let val = i32::from_be_bytes(d.data().try_into().unwrap());
Some(val)
}
_ => None,
})
.collect();
assert_eq!(is_monotonic_strict(tss.iter()), true);
assert_eq!(is_monotonic_strict(pulses.iter()), true);
assert_eq!(is_monotonic_strict(values.iter()), true);
for &val in &values {
assert!(val >= 0);
assert!(val < 120);
}
}
Err(e) => {
error!("can not parse result: {e}");
panic!()
}
};
Ok(())
};
testrun(fut)?;
Ok(())
}

View File

@@ -0,0 +1,110 @@
use crate::err::ErrConv;
use crate::nodes::require_test_hosts_running;
use chrono::Utc;
use err::Error;
use http::StatusCode;
use hyper::Body;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::timeunits::MS;
use netpod::Cluster;
use netpod::HostPort;
use netpod::SfDbChannel;
use netpod::APP_JSON;
use netpod::DATETIME_FMT_3MS;
use parse::api1_parse::api1_frames;
use parse::api1_parse::Api1Frame;
use parse::api1_parse::Api1ScalarType;
use url::Url;
const TEST_BACKEND: &str = "testbackend-00";
// Fetches all data, not streaming, meant for basic test cases that fit in memory.
async fn fetch_data_api_python_blob(
channels: Vec<SfDbChannel>,
beg_date: &str,
end_date: &str,
cluster: &Cluster,
) -> Result<Vec<u8>, Error> {
let t1 = Utc::now();
let node0 = &cluster.nodes[0];
let beg_date = beg_date.parse()?;
let end_date = end_date.parse()?;
let _range = NanoRange::from_date_time(beg_date, end_date);
let start_date = beg_date.format(DATETIME_FMT_3MS).to_string();
let end_date = end_date.format(DATETIME_FMT_3MS).to_string();
let query = serde_json::json!({
"range": {
"type": "date",
"startDate": start_date,
"endDate": end_date,
},
"channels": channels.iter().map(|x| x.name()).collect::<Vec<_>>(),
"create_errors": "nodenet_parse_query",
});
let query_str = serde_json::to_string_pretty(&query)?;
let hp = HostPort::from_node(node0);
let url = Url::parse(&format!("http://{}:{}/api/1/query", hp.host, hp.port))?;
info!("http get {}", url);
let req = hyper::Request::builder()
.method(http::Method::POST)
.uri(url.to_string())
.header(http::header::CONTENT_TYPE, APP_JSON)
//.header(http::header::ACCEPT, APP_JSON)
.body(Body::from(query_str))
.ec()?;
let client = hyper::Client::new();
let res = client.request(req).await.ec()?;
if res.status() != StatusCode::OK {
error!("client response {:?}", res);
return Err(Error::with_msg_no_trace(format!("bad result {res:?}")));
}
let buf = hyper::body::to_bytes(res.into_body()).await.ec()?;
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
// TODO add timeout
info!("time {} ms body len {}", ms, buf.len());
Ok(buf.into())
}
#[test]
fn api3_hdf_dim0_00() -> Result<(), Error> {
let fut = async {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
let jsv = fetch_data_api_python_blob(
vec![SfDbChannel::from_name(TEST_BACKEND, "test-gen-i32-dim0-v00")],
"1970-01-01T00:20:04.000Z",
"1970-01-01T00:21:10.000Z",
cluster,
)
.await?;
use nom::error::VerboseError;
use parse::nom;
if false {
// Uses the default error type, but not unwrapped:
let _x: nom::IResult<_, _> = api1_frames(&jsv);
}
if false {
// Default error and unwrapped:
let (_, _) = api1_frames::<nom::error::Error<_>>(&jsv).unwrap();
}
let (_, frames) = api1_frames::<VerboseError<_>>(&jsv).unwrap();
if let Api1Frame::Header(header) = frames.get(0).expect("frame") {
assert_eq!(header.header().ty(), Api1ScalarType::I32);
} else {
panic!("expect header frame");
}
for (i, frame) in frames[1..].iter().enumerate() {
if let Api1Frame::Data(data) = frame {
assert_eq!(data.ts() / MS, 1000 * (60 * 20 + 4 + i as u64));
eprintln!("ts {}", data.ts() / MS);
} else {
panic!("expect data frame");
}
}
assert_eq!(frames.len(), 67);
Ok(())
};
taskrun::run(fut)
}

View File

@@ -0,0 +1,4 @@
pub mod binnedjson;
pub mod common;
pub mod eventsjson;
pub mod pulseiddiff;

View File

@@ -0,0 +1,381 @@
use crate::err::ErrConv;
use crate::nodes::require_test_hosts_running;
use chrono::Utc;
use err::Error;
use http::StatusCode;
use hyper::Body;
use items_0::test::f32_iter_cmp_near;
use items_0::test::f64_iter_cmp_near;
use items_0::WithLen;
use items_2::binsdim0::BinsDim0CollectedResult;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::AppendToUrl;
use netpod::Cluster;
use netpod::HostPort;
use netpod::SfDbChannel;
use netpod::APP_JSON;
use query::api4::binned::BinnedQuery;
use serde_json::Value as JsonValue;
use url::Url;
const TEST_BACKEND: &str = "testbackend-00";
fn make_query<S: Into<String>>(
name: S,
beg_date: &str,
end_date: &str,
bin_count_min: u32,
) -> Result<BinnedQuery, Error> {
let channel = SfDbChannel::from_name(TEST_BACKEND, name);
let beg_date = beg_date.parse()?;
let end_date = end_date.parse()?;
let range = NanoRange::from_date_time(beg_date, end_date).into();
let query = BinnedQuery::new(channel, range, bin_count_min).for_time_weighted_scalar();
Ok(query)
}
#[test]
fn binned_d0_json_00() -> Result<(), Error> {
let fut = async {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
let jsv = get_binned_json(
SfDbChannel::from_name(TEST_BACKEND, "test-gen-i32-dim0-v01"),
"1970-01-01T00:20:04.000Z",
"1970-01-01T00:20:37.000Z",
6,
cluster,
)
.await?;
debug!("Receveided a response json value: {jsv:?}");
let res: BinsDim0CollectedResult<i32> = serde_json::from_value(jsv)?;
assert_eq!(res.range_final(), true);
assert_eq!(res.len(), 8);
assert_eq!(res.ts_anchor_sec(), 1200);
{
let a1: Vec<_> = res.ts1_off_ms().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..8).into_iter().map(|x| 5000 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.ts2_off_ms().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..8).into_iter().map(|x| 5000 + 5000 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.counts().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..8).into_iter().map(|_| 10).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.mins().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..8).into_iter().map(|x| 2400 + 10 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.maxs().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..8).into_iter().map(|x| 2409 + 10 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.avgs().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..8).into_iter().map(|x| 2404.5 + 10. * x as f32).collect();
assert_eq!(f32_iter_cmp_near(a1, a2, 0.01, 0.01), true);
}
Ok(())
};
taskrun::run(fut)
}
#[test]
fn binned_d0_json_01a() -> Result<(), Error> {
let fut = async {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
let jsv = get_binned_json(
SfDbChannel::from_name(TEST_BACKEND, "test-gen-i32-dim0-v01"),
"1970-01-01T00:20:10.000Z",
"1970-01-01T00:40:30.000Z",
10,
cluster,
)
.await?;
debug!("Receveided a response json value: {jsv:?}");
let res: BinsDim0CollectedResult<i32> = serde_json::from_value(jsv)?;
assert_eq!(res.range_final(), true);
assert_eq!(res.len(), 11);
assert_eq!(res.ts_anchor_sec(), 1200);
let nb = res.len();
{
let a1: Vec<_> = res.ts1_off_ms().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 120 * 1000 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.ts2_off_ms().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 120 * 1000 * (1 + x)).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.counts().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|_| 240).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.mins().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 2400 + 240 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.maxs().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 2639 + 240 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.avgs().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb).into_iter().map(|x| 2520. + 240. * x as f32).collect();
assert_eq!(f32_iter_cmp_near(a1, a2, 0.001, 0.001), true);
}
Ok(())
};
taskrun::run(fut)
}
#[test]
fn binned_d0_json_01b() -> Result<(), Error> {
let fut = async {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
let jsv = get_binned_json(
SfDbChannel::from_name(TEST_BACKEND, "test-gen-i32-dim0-v01"),
"1970-01-01T00:20:10.000Z",
"1970-01-01T01:20:30.000Z",
10,
cluster,
)
.await?;
debug!("Receveided a response json value: {jsv:?}");
let res: BinsDim0CollectedResult<i32> = serde_json::from_value(jsv)?;
assert_eq!(res.range_final(), true);
assert_eq!(res.len(), 13);
assert_eq!(res.ts_anchor_sec(), 1200);
let nb = res.len();
{
let a1: Vec<_> = res.ts1_off_ms().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 300 * 1000 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.ts2_off_ms().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 300 * 1000 * (1 + x)).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.counts().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|_| 600).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.mins().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 2400 + 600 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.maxs().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 2999 + 600 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.avgs().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb).into_iter().map(|x| 2700. + 600. * x as f32).collect();
assert_eq!(f32_iter_cmp_near(a1, a2, 0.001, 0.001), true);
}
Ok(())
};
taskrun::run(fut)
}
#[test]
fn binned_d0_json_02() -> Result<(), Error> {
let fut = async {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
let jsv = get_binned_json(
SfDbChannel::from_name(TEST_BACKEND, "test-gen-f64-dim1-v00"),
"1970-01-01T00:20:00Z",
"1970-01-01T00:20:10Z",
//"1970-01-01T01:20:45.000Z",
10,
cluster,
)
.await?;
debug!("Receveided a response json value: {jsv:?}");
let res: BinsDim0CollectedResult<f64> = serde_json::from_value(jsv)?;
assert_eq!(res.range_final(), true);
assert_eq!(res.len(), 10);
assert_eq!(res.ts_anchor_sec(), 1200);
let nb = res.len();
{
let a1: Vec<_> = res.ts1_off_ms().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 1 * 1000 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.ts2_off_ms().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 1 * 1000 * (1 + x)).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.counts().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|_| 10).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.mins().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|_| 0.1).collect();
assert_eq!(f64_iter_cmp_near(a1, a2, 0.05, 0.05), true);
}
{
let a1: Vec<_> = res.maxs().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|_| 6.3).collect();
assert_eq!(f64_iter_cmp_near(a1, a2, 0.05, 0.05), true);
}
{
let a1: Vec<_> = res.avgs().iter().map(|x| *x).collect();
let a2 = vec![46.2, 40.4, 48.6, 40.6, 45.8, 45.1, 41.1, 48.5, 40.1, 46.8];
assert_eq!(f32_iter_cmp_near(a1, a2, 0.05, 0.05), true);
}
Ok(())
};
taskrun::run(fut)
}
#[test]
fn binned_d0_json_03() -> Result<(), Error> {
let fut = async {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
let jsv = get_binned_json(
SfDbChannel::from_name(TEST_BACKEND, "test-gen-f64-dim1-v00"),
"1970-01-01T00:20:10.000Z",
"1970-01-01T01:20:20.000Z",
2,
cluster,
)
.await?;
debug!("Receveided a response json value: {jsv:?}");
let res: BinsDim0CollectedResult<f64> = serde_json::from_value(jsv)?;
assert_eq!(res.range_final(), true);
assert_eq!(res.len(), 4);
assert_eq!(res.ts_anchor_sec(), 1200);
let nb = res.len();
{
let a1: Vec<_> = res.counts().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|_| 12000).collect();
assert_eq!(a1, a2);
}
Ok(())
};
taskrun::run(fut)
}
#[test]
fn binned_d0_json_04() -> Result<(), Error> {
let fut = async {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
let jsv = get_binned_json(
SfDbChannel::from_name(TEST_BACKEND, "test-gen-i32-dim0-v01"),
"1970-01-01T00:20:10.000Z",
"1970-01-01T04:20:30.000Z",
20,
cluster,
)
.await?;
debug!("Receveided a response json value: {jsv:?}");
let res: BinsDim0CollectedResult<i32> = serde_json::from_value(jsv)?;
assert_eq!(res.range_final(), true);
assert_eq!(res.len(), 25);
assert_eq!(res.ts_anchor_sec(), 1200);
let nb = res.len();
{
let a1: Vec<_> = res.ts1_off_ms().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 600 * 1000 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.ts2_off_ms().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 600 * 1000 * (1 + x)).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.counts().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|_| 1200).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.mins().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 2400 + 1200 * x).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.maxs().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 2399 + 1200 * (1 + x)).collect();
assert_eq!(a1, a2);
}
{
let a1: Vec<_> = res.avgs().iter().map(|x| *x).collect();
let a2: Vec<_> = (0..nb as _).into_iter().map(|x| 3000. + 1200. * x as f32).collect();
assert_eq!(f32_iter_cmp_near(a1, a2, 0.001, 0.001), true);
}
Ok(())
};
taskrun::run(fut)
}
async fn get_binned_json(
channel: SfDbChannel,
beg_date: &str,
end_date: &str,
bin_count: u32,
cluster: &Cluster,
) -> Result<JsonValue, Error> {
let t1 = Utc::now();
let node0 = &cluster.nodes[0];
let beg_date = beg_date.parse()?;
let end_date = end_date.parse()?;
let range = NanoRange::from_date_time(beg_date, end_date).into();
let mut query = BinnedQuery::new(channel, range, bin_count).for_time_weighted_scalar();
query.merger_out_len_max = Some(240);
let hp = HostPort::from_node(node0);
let mut url = Url::parse(&format!("http://{}:{}/api/4/binned", hp.host, hp.port))?;
query.append_to_url(&mut url);
let url = url;
debug!("http get {}", url);
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(url.to_string())
.header(http::header::ACCEPT, APP_JSON)
.body(Body::empty())
.ec()?;
let client = hyper::Client::new();
let res = client.request(req).await.ec()?;
if res.status() != StatusCode::OK {
error!("error response {:?}", res);
let buf = hyper::body::to_bytes(res.into_body()).await.ec()?;
let s = String::from_utf8_lossy(&buf);
error!("body of error response: {s}");
return Err(Error::with_msg_no_trace(format!("error response")));
}
let buf = hyper::body::to_bytes(res.into_body()).await.ec()?;
let s = String::from_utf8_lossy(&buf);
let res: JsonValue = serde_json::from_str(&s)?;
let pretty = serde_json::to_string_pretty(&res)?;
debug!("get_binned_json pretty {pretty}");
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
// TODO add timeout
debug!("time {} ms", ms);
Ok(res)
}

View File

@@ -0,0 +1,80 @@
use crate::err::ErrConv;
use chrono::Utc;
use err::Error;
use http::StatusCode;
use hyper::Body;
use netpod::log::*;
use netpod::AppendToUrl;
use netpod::Cluster;
use netpod::HostPort;
use netpod::APP_JSON;
use query::api4::binned::BinnedQuery;
use query::api4::events::PlainEventsQuery;
use serde_json::Value as JsonValue;
use url::Url;
// TODO improve by a more information-rich return type.
pub async fn fetch_events_json(query: PlainEventsQuery, cluster: &Cluster) -> Result<JsonValue, Error> {
let t1 = Utc::now();
let node0 = &cluster.nodes[0];
let hp = HostPort::from_node(node0);
let mut url = Url::parse(&format!("http://{}:{}/api/4/events", hp.host, hp.port))?;
query.append_to_url(&mut url);
let url = url;
debug!("fetch_events_json url {}", url);
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(url.to_string())
.header(http::header::ACCEPT, APP_JSON)
.body(Body::empty())
.ec()?;
let client = hyper::Client::new();
let res = client.request(req).await.ec()?;
if res.status() != StatusCode::OK {
error!("client response {:?}", res);
return Err(Error::with_msg_no_trace(format!("bad result {res:?}")));
}
let buf = hyper::body::to_bytes(res.into_body()).await.ec()?;
let s = String::from_utf8_lossy(&buf);
let res: JsonValue = serde_json::from_str(&s)?;
let pretty = serde_json::to_string_pretty(&res)?;
debug!("fetch_binned_json pretty: {pretty}");
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
// TODO add timeout
debug!("time {} ms", ms);
Ok(res)
}
// TODO improve by a more information-rich return type.
pub async fn fetch_binned_json(query: BinnedQuery, cluster: &Cluster) -> Result<JsonValue, Error> {
let t1 = Utc::now();
let node0 = &cluster.nodes[0];
let hp = HostPort::from_node(node0);
let mut url = Url::parse(&format!("http://{}:{}/api/4/binned", hp.host, hp.port))?;
query.append_to_url(&mut url);
let url = url;
debug!("fetch_binned_json url {}", url);
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(url.to_string())
.header(http::header::ACCEPT, APP_JSON)
.body(Body::empty())
.ec()?;
let client = hyper::Client::new();
let res = client.request(req).await.ec()?;
if res.status() != StatusCode::OK {
error!("client response {:?}", res);
return Err(Error::with_msg_no_trace(format!("bad result {res:?}")));
}
let buf = hyper::body::to_bytes(res.into_body()).await.ec()?;
let s = String::from_utf8_lossy(&buf);
let res: JsonValue = serde_json::from_str(&s)?;
let pretty = serde_json::to_string_pretty(&res)?;
debug!("fetch_binned_json pretty: {pretty}");
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
// TODO add timeout
debug!("time {} ms", ms);
Ok(res)
}

View File

@@ -0,0 +1,112 @@
use crate::err::ErrConv;
use crate::nodes::require_test_hosts_running;
use crate::test::api4::common::fetch_events_json;
use chrono::Utc;
use err::Error;
use http::StatusCode;
use hyper::Body;
use items_0::WithLen;
use items_2::eventsdim0::EventsDim0CollectorOutput;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::AppendToUrl;
use netpod::Cluster;
use netpod::HostPort;
use netpod::SfDbChannel;
use netpod::APP_JSON;
use query::api4::events::PlainEventsQuery;
use serde_json::Value as JsonValue;
use url::Url;
const TEST_BACKEND: &str = "testbackend-00";
fn make_query<S: Into<String>>(name: S, beg_date: &str, end_date: &str) -> Result<PlainEventsQuery, Error> {
let channel = SfDbChannel::from_name(TEST_BACKEND, name);
let beg_date = beg_date.parse()?;
let end_date = end_date.parse()?;
let range = NanoRange::from_date_time(beg_date, end_date);
let query = PlainEventsQuery::new(channel, range).for_time_weighted_scalar();
Ok(query)
}
#[test]
fn events_plain_json_00() -> Result<(), Error> {
let fut = async {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
let query = make_query(
"test-gen-i32-dim0-v01",
"1970-01-01T00:20:04.000Z",
"1970-01-01T00:21:10.000Z",
)?;
let jsv = fetch_events_json(query, cluster).await?;
let res: EventsDim0CollectorOutput<i32> = serde_json::from_value(jsv)?;
// Tim-weighted uses one event before requested range:
assert_eq!(res.len(), 133);
assert_eq!(res.ts_anchor_sec(), 1203);
Ok(())
};
taskrun::run(fut)
}
#[test]
fn events_plain_json_02_range_incomplete() -> Result<(), Error> {
let fut = async {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
let jsv = events_plain_json(
SfDbChannel::from_name(TEST_BACKEND, "test-gen-i32-dim0-v01"),
"1970-01-03T23:59:55.000Z",
"1970-01-04T00:00:01.000Z",
cluster,
)
.await?;
let res: EventsDim0CollectorOutput<i32> = serde_json::from_value(jsv).unwrap();
assert_eq!(res.range_final(), false);
assert_eq!(res.timed_out(), false);
Ok(())
};
taskrun::run(fut)
}
// TODO improve by a more information-rich return type.
async fn events_plain_json(
channel: SfDbChannel,
beg_date: &str,
end_date: &str,
cluster: &Cluster,
) -> Result<JsonValue, Error> {
let t1 = Utc::now();
let node0 = &cluster.nodes[0];
let beg_date = beg_date.parse()?;
let end_date = end_date.parse()?;
let range = NanoRange::from_date_time(beg_date, end_date);
let query = PlainEventsQuery::new(channel, range).for_time_weighted_scalar();
let hp = HostPort::from_node(node0);
let mut url = Url::parse(&format!("http://{}:{}/api/4/events", hp.host, hp.port))?;
query.append_to_url(&mut url);
let url = url;
info!("http get {}", url);
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(url.to_string())
.header(http::header::ACCEPT, APP_JSON)
.body(Body::empty())
.ec()?;
let client = hyper::Client::new();
let res = client.request(req).await.ec()?;
if res.status() != StatusCode::OK {
error!("client response {:?}", res);
return Err(Error::with_msg_no_trace(format!("bad result {res:?}")));
}
let buf = hyper::body::to_bytes(res.into_body()).await.ec()?;
let s = String::from_utf8_lossy(&buf);
let res: JsonValue = serde_json::from_str(&s)?;
let pretty = serde_json::to_string_pretty(&res)?;
info!("{pretty}");
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
// TODO add timeout
info!("time {} ms", ms);
Ok(res)
}

View File

@@ -0,0 +1,41 @@
use crate::nodes::require_test_hosts_running;
use crate::test::api4::common::fetch_events_json;
use err::Error;
use items_0::test::f32_iter_cmp_near;
use items_0::WithLen;
use items_2::eventsdim0::EventsDim0CollectorOutput;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::SfDbChannel;
use query::api4::events::PlainEventsQuery;
const BACKEND: &str = "testbackend-00";
pub fn make_query<S: Into<String>>(name: S, beg_date: &str, end_date: &str) -> Result<PlainEventsQuery, Error> {
let channel = SfDbChannel::from_name(BACKEND, name);
let beg_date = beg_date.parse()?;
let end_date = end_date.parse()?;
let range = NanoRange::from_date_time(beg_date, end_date);
let query = PlainEventsQuery::new(channel, range).for_pulse_id_diff();
Ok(query)
}
#[test]
fn events_plain_json_00() -> Result<(), Error> {
let fut = async {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
let query = make_query(
"test-gen-i32-dim0-v01",
"1970-01-01T00:20:04.000Z",
"1970-01-01T00:21:10.000Z",
)?;
let jsv = fetch_events_json(query, cluster).await?;
let res: EventsDim0CollectorOutput<i64> = serde_json::from_value(jsv)?;
// inmem was meant just for functional test, ignores the requested time range
assert_eq!(res.ts_anchor_sec(), 1204);
assert_eq!(res.len(), 132);
Ok(())
};
taskrun::run(fut)
}

View File

@@ -0,0 +1,103 @@
#![allow(unused)]
use crate::nodes::require_archapp_test_host_running;
use err::Error;
use netpod::f64_close;
use netpod::log::*;
#[test]
fn get_events_1() -> Result<(), Error> {
if true {
return Ok(());
}
// TODO re-use test data in dedicated archapp converter.
let fut = async { return Err::<(), _>(Error::with_msg_no_trace("TODO")) };
#[cfg(DISABLED)]
let fut = async {
let rh = require_archapp_test_host_running()?;
let cluster = &rh.cluster;
let res = get_plain_events_json(
// TODO this just added test backend name, no series id.
ch_gen("SARUN16-MQUA080:X"),
"2021-01-04T00:00:00Z",
"2021-01-30T00:00:00Z",
cluster,
true,
4,
)
.await?;
let res: ScalarEventsResponse = serde_json::from_value(res)?;
info!("RESULT: {res:?}");
let ts_anchor = 1609763681;
let ts_ms = vec![
617, 2569805, 2936041, 3010344, 3049906, 3708678, 5909539, 6477893, 6610677, 6758112, 71757772, 786724766,
1308470149, 1890757180, 1915078958, 1915194844, 1915194947, 1915362469, 1915362571, 1915417056, 1915465737,
1915520190, 1915520293, 1915571058, 1915805484, 1915805589, 1915965029, 1915965133, 1916031220, 1916031324,
1916082787, 1916082889, 1916157130, 1916157233, 1916345254, 1916345356, 1916488147, 1916513221, 1916620067,
1916620173, 1916672379, 1916693598, 1916723207, 1916723309, 1916745319, 1916745420, 1916775502, 1916775609,
];
let ts_ns = vec![
584454, 368902, 427972, 160693, 58866, 902958, 192718, 479215, 450894, 681257, 19499, 84254, 273548,
721894, 78541, 169037, 501222, 573798, 341840, 736887, 939637, 906430, 566278, 630241, 189349, 565614,
447258, 899381, 129461, 21285, 901927, 791954, 915058, 435737, 379707, 850017, 251317, 283772, 869783,
687797, 556662, 527206, 790635, 502581, 307019, 218006, 121460, 750763,
];
let values = vec![
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.25704250552462327,
-0.22139999999990323,
-0.22139999999990323,
-0.20710000000008225,
-0.20710000000008225,
-0.20714250552464364,
-0.20704250552444137,
-0.1999999999998181,
-0.1999999999998181,
-0.2001425055245818,
-0.1999999999998181,
-0.1999999999998181,
-0.1950000000001637,
-0.1950000000001637,
-0.20499999999992724,
-0.20499999999992724,
-0.2100000000000364,
-0.2100000000000364,
-0.2199999999997999,
-0.2199999999997999,
-0.2300000000000182,
-0.2300000000000182,
-0.22994250552437737,
-0.22994250552437737,
-0.2300000000000182,
-0.2300000000000182,
-0.2300425055245796,
-0.22994250552437737,
-0.2157000000001972,
-0.2157000000001972,
-0.2015000000001237,
-0.2015000000001237,
-0.2015000000001237,
-0.2015000000001237,
];
assert_eq!(res.ts_anchor, ts_anchor);
assert_eq!(&res.ts_ms, &ts_ms);
assert_eq!(&res.ts_ns, &ts_ns);
for (_i, (&a, &b)) in res.values.iter().zip(values.iter()).enumerate() {
assert!(f64_close(a, b));
}
Ok(())
};
taskrun::run(fut)
}

View File

@@ -0,0 +1,87 @@
mod channelarchiver;
use err::Error;
#[test]
fn get_sls_archive_1() -> Result<(), Error> {
if true {
return Ok(());
}
// TODO re-use test data in dedicated convert application.
let fut = async { Err::<(), _>(Error::with_msg_no_trace("TODO")) };
#[cfg(DISABLED)]
let fut = async move {
let rh = require_sls_test_host_running()?;
let cluster = &rh.cluster;
let channel = Channel {
backend: "sls-archive".into(),
name: "ABOMA-CH-6G:U-DCLINK".into(),
series: None,
};
let begstr = "2021-11-10T01:00:00Z";
let endstr = "2021-11-10T01:01:00Z";
let (res, jsstr) =
get_binned_json_common_res(channel, begstr, endstr, 10, AggKind::TimeWeightedScalar, cluster).await?;
let exp = r##"{"avgs":[24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875],"counts":[0,0,0,0,0,0,0,0,0,0,0,0],"rangeFinal":true,"maxs":[24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875],"mins":[24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875,24.37225341796875],"tsAnchor":1636506000,"tsMs":[0,5000,10000,15000,20000,25000,30000,35000,40000,45000,50000,55000,60000],"tsNs":[0,0,0,0,0,0,0,0,0,0,0,0,0]}"##;
let exp: String = serde_json::from_str(exp).unwrap();
check_close(&res, &exp, &jsstr)?;
Ok(())
};
taskrun::run(fut)
}
#[test]
fn get_sls_archive_3() -> Result<(), Error> {
if true {
return Ok(());
}
// TODO re-use test data in dedicated convert application.
let fut = async { Err::<(), _>(Error::with_msg_no_trace("TODO")) };
#[cfg(DISABLED)]
let fut = async move {
let rh = require_sls_test_host_running()?;
let cluster = &rh.cluster;
let channel = Channel {
backend: "sls-archive".into(),
name: "ARIDI-PCT:CURRENT".into(),
series: None,
};
let begstr = "2021-11-09T00:00:00Z";
let endstr = "2021-11-11T00:10:00Z";
let (res, jsstr) =
get_binned_json_common_res(channel, begstr, endstr, 10, AggKind::TimeWeightedScalar, cluster).await?;
let exp = r##"{"avgs":[401.1354675292969,401.1296081542969,401.1314392089844,401.134765625,401.1371154785156,376.5816345214844,401.13775634765625,209.2684783935547,-0.06278431415557861,-0.06278431415557861,-0.06278431415557861,-0.047479934990406036,0.0],"counts":[2772,2731,2811,2689,2803,2203,2355,1232,0,0,0,2,0],"maxs":[402.1717718261533,402.18702154022117,402.1908339687381,402.198458825772,402.17939668318724,402.194646397255,402.1908339687381,402.1908339687381,-0.06278431346925281,-0.06278431346925281,-0.06278431346925281,0.0,0.0],"mins":[400.0291869996188,400.02537457110185,400.0291869996188,400.0329994281358,400.0291869996188,0.0,400.0444367136866,-0.06278431346925281,-0.06278431346925281,-0.06278431346925281,-0.06278431346925281,-0.06278431346925281,0.0],"tsAnchor":1636416000,"tsMs":[0,14400000,28800000,43200000,57600000,72000000,86400000,100800000,115200000,129600000,144000000,158400000,172800000,187200000],"tsNs":[0,0,0,0,0,0,0,0,0,0,0,0,0,0]}"##;
let exp: BinnedResponse = serde_json::from_str(exp).unwrap();
check_close(&res, &exp, &jsstr)?;
Ok(())
};
taskrun::run(fut)
}
#[test]
fn get_sls_archive_wave_2() -> Result<(), Error> {
if true {
return Ok(());
}
// TODO re-use test data in dedicated convert application.
let fut = async { Err::<(), _>(Error::with_msg_no_trace("TODO")) };
#[cfg(DISABLED)]
let fut = async move {
let rh = require_sls_test_host_running()?;
let cluster = &rh.cluster;
let channel = Channel {
backend: "sls-archive".into(),
name: "ARIDI-MBF-X:CBM-IN".into(),
series: None,
};
let begstr = "2021-11-09T10:00:00Z";
let endstr = "2021-11-10T06:00:00Z";
let (res, jsstr) =
get_binned_json_common_res(channel, begstr, endstr, 10, AggKind::TimeWeightedScalar, cluster).await?;
let exp = r##"{"avgs":[2.0403556177939208e-8,1.9732556921780997e-8,1.9948116047885378e-8,2.024017220492169e-8,2.1306243880303555e-8,1.998394871804976e-8,1.776692748478581e-8,2.002254362309941e-8,2.0643645015638867e-8,2.0238848819076338e-8],"counts":[209,214,210,219,209,192,171,307,285,232],"maxs":[0.001784245832823217,0.0016909628175199032,0.0017036109929904342,0.0016926786629483104,0.0017604742897674441,0.0018568832892924547,0.001740367733873427,0.0017931810580193996,0.0017676990246400237,0.002342566382139921],"mins":[0.000040829672798281536,0.00004028259718324989,0.000037641591916326433,0.000039788486901670694,0.00004028418697998859,0.00003767738598980941,0.0,0.00004095739495824091,0.00004668773908633739,0.00003859612115775235],"tsAnchor":1636452000,"tsMs":[0,7200000,14400000,21600000,28800000,36000000,43200000,50400000,57600000,64800000,72000000],"tsNs":[0,0,0,0,0,0,0,0,0,0,0]}"##;
let exp: BinnedResponse = serde_json::from_str(exp).unwrap();
check_close(&res, &exp, &jsstr)?;
Ok(())
};
taskrun::run(fut)
}

View File

@@ -0,0 +1,155 @@
use super::*;
#[test]
fn get_scalar_2_events() -> Result<(), Error> {
if true {
return Ok(());
}
// TODO re-use test data in dedicated convert application.
let fut = async { Err::<(), _>(Error::with_msg_no_trace("TODO")) };
#[cfg(DISABLED)]
let fut = async move {
let rh = require_sls_test_host_running()?;
let cluster = &rh.cluster;
let channel = Channel {
backend: "sls-archive".into(),
name: "ARIDI-PCT:CURRENT".into(),
series: None,
};
let begstr = "2021-11-10T00:00:00Z";
let endstr = "2021-11-10T00:10:00Z";
let jsstr = get_events_json_common_res(channel, begstr, endstr, cluster).await?;
let res: ScalarEventsResponse = serde_json::from_str(&jsstr)?;
let ts_ms: Vec<u64> = vec![
148, 9751, 19670, 24151, 24471, 24792, 25110, 25430, 25751, 26071, 26391, 26713, 27032, 27356, 27672,
27991, 28311, 43040, 52966, 62570, 72177, 82105, 91706, 101632, 111235, 121160, 130759, 140677, 150606,
160209, 170134, 179738, 189980, 200224, 209831, 219751, 225514, 225834, 226154, 226475, 226794, 227116,
227433, 227755, 228074, 228395, 228714, 229035, 229354, 229674, 245674, 255597, 265510, 275110, 284707,
294302, 304224, 314138, 324054, 333333, 343248, 352849, 362762, 372363, 382283, 391891, 401796, 411395,
421634, 431230, 433790, 434110, 434428, 434752, 435068, 435391, 435709, 436028, 436351, 436668, 436990,
437308, 437628, 437953, 453304, 463222, 472824, 482417, 492019, 501934, 511851, 521447, 531364, 540959,
550558, 560474, 570071, 579668, 589582,
];
let ts_ns: Vec<u64> = vec![
943241, 130276, 226885, 258374, 9524, 153770, 179580, 985805, 757887, 751800, 877591, 159972, 764944,
429832, 426517, 490975, 828473, 101407, 528288, 331264, 131573, 178810, 415039, 544017, 621317, 25989,
229791, 897343, 130766, 19213, 766900, 92172, 352772, 779613, 521675, 192592, 77354, 998756, 10378, 278841,
811319, 520706, 673746, 687239, 676867, 251158, 253234, 304222, 241316, 387683, 600611, 524062, 235502,
793455, 38335, 688777, 318149, 62614, 893092, 188883, 897420, 545225, 949778, 609390, 339743, 35897,
218211, 159017, 133408, 824998, 269300, 196288, 665918, 597766, 741594, 855975, 727405, 902579, 172017,
546991, 578579, 735680, 825184, 663507, 543606, 926800, 487587, 970423, 42198, 491516, 409085, 408228,
480644, 404173, 856513, 364301, 945081, 81850, 868410,
];
assert_eq!(res.ts_anchor, 1636502401);
assert_eq!(&res.ts_ms, &ts_ms);
assert_eq!(&res.ts_ns, &ts_ns);
assert_eq!(res.finalised_range, true);
Ok(())
};
taskrun::run(fut)
}
#[test]
fn get_scalar_2_binned() -> Result<(), Error> {
if true {
return Ok(());
}
// TODO re-use test data in dedicated convert application.
let fut = async { return Err::<(), _>(Error::with_msg_no_trace("TODO")) };
#[cfg(DISABLED)]
let fut = async move {
let rh = require_sls_test_host_running()?;
let cluster = &rh.cluster;
let channel = Channel {
backend: "sls-archive".into(),
name: "ARIDI-PCT:CURRENT".into(),
series: None,
};
let begstr = "2021-11-10T00:00:00Z";
let endstr = "2021-11-10T00:10:00Z";
let (res, jsstr) =
get_binned_json_common_res(channel, begstr, endstr, 10, AggKind::TimeWeightedScalar, cluster).await?;
let exp = r##"{"avgs":[401.1745910644531,401.5135498046875,400.8823547363281,400.66156005859375,401.8301086425781,401.19305419921875,400.5584411621094,401.4371337890625,401.4137268066406,400.77880859375],"counts":[19,6,6,19,6,6,6,19,6,6],"rangeFinal":true,"maxs":[402.04977411361034,401.8439029736943,401.22628955394583,402.1298351124666,402.1298351124666,401.5084092642013,400.8869834159359,402.05358654212733,401.74477983225313,401.1271664125047],"mins":[400.08256099885625,401.22628955394583,400.60867613419754,400.0939982844072,401.5084092642013,400.8869834159359,400.2693699961876,400.05968642775446,401.1271664125047,400.50574056423943],"tsAnchor":1636502400,"tsMs":[0,60000,120000,180000,240000,300000,360000,420000,480000,540000,600000],"tsNs":[0,0,0,0,0,0,0,0,0,0,0]}"##;
let exp: BinnedResponse = serde_json::from_str(exp).unwrap();
check_close(&res, &exp, &jsstr)?;
Ok(())
};
taskrun::run(fut)
}
#[test]
fn get_wave_1_events() -> Result<(), Error> {
if true {
return Ok(());
}
// TODO re-use test data in dedicated convert application.
let fut = async { return Err::<(), _>(Error::with_msg_no_trace("TODO")) };
#[cfg(DISABLED)]
let fut = async move {
let rh = require_sls_test_host_running()?;
let cluster = &rh.cluster;
let channel = Channel {
backend: "sls-archive".into(),
name: "ARIDI-MBF-X:CBM-IN".into(),
series: None,
};
let begstr = "2021-11-09T00:00:00Z";
let endstr = "2021-11-09T00:10:00Z";
let jsstr = get_events_json_common_res(channel, begstr, endstr, cluster).await?;
let res: WaveEventsResponse = serde_json::from_str(&jsstr)?;
// TODO compare with resources/expected/f6882ac49c.json
let ts_ms: Vec<u64> = vec![
389, 4389, 30390, 60391, 64391, 96401, 104398, 148393, 184394, 212395, 212395, 244396, 268396, 268396,
308397, 366399, 408400, 446401, 482402, 484402, 508402, 544403, 570404, 570404,
];
let ts_ns: Vec<u64> = vec![
815849, 897529, 550829, 342809, 409129, 326629, 71679, 491294, 503054, 182074, 182074, 141729, 581034,
581034, 676829, 174124, 274914, 184119, 98504, 148344, 777404, 686129, 390264, 390264,
];
assert_eq!(res.ts_anchor, 1636416014);
assert_eq!(&res.ts_ms, &ts_ms);
assert_eq!(&res.ts_ns, &ts_ns);
assert_eq!(res.finalised_range, true);
assert_eq!(res.values.len(), 24);
assert_eq!(res.values[0].len(), 480);
assert_eq!(res.values[1].len(), 480);
assert!(f64_close(res.values[0][0], 0.00011179182183695957));
assert!(f64_close(res.values[1][2], 0.00014343370276037604));
assert!(f64_close(res.values[2][4], 0.00011945325240958482));
//let exp = r##"{}"##;
//let exp: WaveEventsResponse = serde_json::from_str(exp).unwrap();
//check_close_events(&res, &exp, &jsstr)?;
Ok(())
};
taskrun::run(fut)
}
#[test]
fn get_wave_1_binned() -> Result<(), Error> {
if true {
return Ok(());
}
// TODO re-use test data in dedicated convert application.
let fut = async { return Err::<(), _>(Error::with_msg_no_trace("TODO")) };
#[cfg(DISABLED)]
let fut = async move {
let rh = require_sls_test_host_running()?;
let cluster = &rh.cluster;
let channel = Channel {
backend: "sls-archive".into(),
name: "ARIDI-MBF-X:CBM-IN".into(),
series: None,
};
let begstr = "2021-11-09T00:00:00Z";
let endstr = "2021-11-11T00:10:00Z";
let (res, jsstr) =
get_binned_json_common_res(channel, begstr, endstr, 10, AggKind::TimeWeightedScalar, cluster).await?;
assert_eq!(res.ts_anchor, 1636416000);
info!("{}", jsstr);
//let exp = r##"{}"##;
//let exp: BinnedResponse = serde_json::from_str(exp).unwrap();
//check_close(&res, &exp, &jsstr)?;
Ok(())
};
taskrun::run(fut)
}

View File

@@ -0,0 +1,112 @@
use crate::err::ErrConv;
use chrono::DateTime;
use chrono::Utc;
use err::Error;
use http::StatusCode;
use hyper::Body;
use netpod::log::*;
use netpod::query::CacheUsage;
use netpod::range::evrange::NanoRange;
use netpod::AppendToUrl;
use netpod::Cluster;
use netpod::SfDbChannel;
use netpod::APP_JSON;
use query::api4::binned::BinnedQuery;
use std::time::Duration;
use url::Url;
const TEST_BACKEND: &str = "testbackend-00";
struct DataResult {
avgs: Vec<f64>,
}
// TODO compare if I want to recycle some of this:
#[allow(unused)]
async fn get_json_common(
channel_name: &str,
beg_date: &str,
end_date: &str,
bin_count: u32,
// TODO refactor for Transform
//agg_kind: AggKind,
cluster: &Cluster,
expect_bin_count: u32,
expect_finalised_range: bool,
) -> Result<DataResult, Error> {
let t1 = Utc::now();
let node0 = &cluster.nodes[0];
let beg_date: DateTime<Utc> = beg_date.parse()?;
let end_date: DateTime<Utc> = end_date.parse()?;
let channel_backend = TEST_BACKEND;
let channel = SfDbChannel::from_name(channel_backend, channel_name);
let range = NanoRange::from_date_time(beg_date, end_date).into();
let mut query = BinnedQuery::new(channel, range, bin_count).for_time_weighted_scalar();
query.set_timeout(Duration::from_millis(40000));
query.set_cache_usage(CacheUsage::Ignore);
let mut url = Url::parse(&format!("http://{}:{}/api/4/binned", node0.host, node0.port))?;
query.append_to_url(&mut url);
let url = url;
debug!("get_json_common get {}", url);
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(url.to_string())
.header(http::header::ACCEPT, APP_JSON)
.body(Body::empty())
.ec()?;
let client = hyper::Client::new();
let res = client.request(req).await.ec()?;
if res.status() != StatusCode::OK {
error!("get_json_common client response {:?}", res);
}
let res = hyper::body::to_bytes(res.into_body()).await.ec()?;
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
// TODO add timeout
debug!("get_json_common DONE time {} ms", ms);
let res = String::from_utf8_lossy(&res).to_string();
let res: serde_json::Value = serde_json::from_str(res.as_str())?;
// TODO assert these:
debug!(
"result from endpoint: --------------\n{}\n--------------",
serde_json::to_string_pretty(&res)?
);
// TODO enable in future:
if false {
if expect_finalised_range {
if !res
.get("rangeFinal")
.ok_or(Error::with_msg("missing rangeFinal"))?
.as_bool()
.ok_or(Error::with_msg("key rangeFinal not bool"))?
{
return Err(Error::with_msg("expected rangeFinal"));
}
} else if res.get("rangeFinal").is_some() {
return Err(Error::with_msg("expect absent rangeFinal"));
}
}
let counts = res.get("counts").unwrap().as_array().unwrap();
let mins = res.get("mins").unwrap().as_array().unwrap();
let maxs = res.get("maxs").unwrap().as_array().unwrap();
let avgs = res.get("avgs").unwrap().as_array().unwrap();
if counts.len() != expect_bin_count as usize {
return Err(Error::with_msg(format!(
"expect_bin_count {} got {}",
expect_bin_count,
counts.len()
)));
}
if mins.len() != expect_bin_count as usize {
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
}
if maxs.len() != expect_bin_count as usize {
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
}
let avgs: Vec<_> = avgs.into_iter().map(|k| k.as_f64().unwrap()).collect();
if avgs.len() != expect_bin_count as usize {
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
}
let ret = DataResult { avgs };
Ok(ret)
}