Rename project

This commit is contained in:
Dominik Werder
2021-05-31 14:06:19 +02:00
parent fc81763737
commit d3fa58563b
9 changed files with 14 additions and 13 deletions

30
daqbuffer/Cargo.toml Normal file
View File

@@ -0,0 +1,30 @@
[package]
name = "daqbuffer"
version = "4.0.0-a.dev.12"
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
edition = "2018"
[dependencies]
tokio = { version = "1.4.0", features = ["rt-multi-thread", "io-util", "net", "time", "sync", "fs"] }
hyper = "0.14"
http = "0.2"
tracing = "0.1.25"
tracing-subscriber = "0.2.17"
futures-core = "0.3.14"
futures-util = "0.3.14"
bytes = "1.0.1"
bincode = "1.3.3"
#async-channel = "1"
#dashmap = "3"
tokio-postgres = "0.7"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
chrono = "0.4"
clap = "3.0.0-beta.2"
lazy_static = "1.4.0"
err = { path = "../err" }
taskrun = { path = "../taskrun" }
netpod = { path = "../netpod" }
httpret = { path = "../httpret" }
disk = { path = "../disk" }

View File

@@ -0,0 +1,182 @@
use chrono::{DateTime, Duration, Utc};
use disk::cache::CacheUsage;
use err::Error;
use netpod::log::*;
use netpod::{NodeConfig, NodeConfigCached};
use tokio::io::AsyncReadExt;
use tokio::fs::File;
pub fn main() {
match taskrun::run(go()) {
Ok(k) => {
info!("{:?}", k);
}
Err(k) => {
error!("{:?}", k);
}
}
}
fn parse_ts_rel(s: &str) -> Result<DateTime<Utc>, Error> {
let (sign, rem) = if s.starts_with("p") { (1, &s[1..]) } else { (-1, s) };
let (fac, rem) = if rem.ends_with("h") {
(1000 * 60 * 60, &rem[..rem.len() - 1])
} else if rem.ends_with("m") {
(1000 * 60, &rem[..rem.len() - 1])
} else if rem.ends_with("s") {
(1000, &rem[..rem.len() - 1])
} else {
return Err(Error::with_msg(format!("can not understand relative time: {}", s)))?;
};
if rem.contains(".") {
let num: f32 = rem.parse()?;
let dur = Duration::milliseconds((num * fac as f32 * sign as f32) as i64);
Ok(Utc::now() + dur)
} else {
let num: i64 = rem.parse()?;
let dur = Duration::milliseconds(num * fac * sign);
Ok(Utc::now() + dur)
}
}
fn parse_ts(s: &str) -> Result<DateTime<Utc>, Error> {
let ret = if s.contains("-") { s.parse()? } else { parse_ts_rel(s)? };
Ok(ret)
}
async fn go() -> Result<(), Error> {
use clap::Clap;
use daqbuffer::cli::{ClientType, Opts, SubCmd};
let opts = Opts::parse();
match opts.subcmd {
SubCmd::Retrieval(subcmd) => {
trace!("test trace");
error!("test error");
info!("daqbuffer {}", clap::crate_version!());
let mut config_file = File::open(subcmd.config).await?;
let mut buf = vec![];
config_file.read_to_end(&mut buf).await?;
let node_config: NodeConfig = serde_json::from_slice(&buf)?;
let node_config: Result<NodeConfigCached, Error> = node_config.into();
let node_config = node_config?;
daqbuffer::run_node(node_config.clone()).await?;
}
SubCmd::Client(client) => match client.client_type {
ClientType::Status(opts) => {
daqbuffer::client::status(opts.host, opts.port).await?;
}
ClientType::Binned(opts) => {
let beg = parse_ts(&opts.beg)?;
let end = parse_ts(&opts.end)?;
let cache_usage = CacheUsage::from_string(&opts.cache)?;
daqbuffer::client::get_binned(
opts.host,
opts.port,
opts.backend,
opts.channel,
beg,
end,
opts.bins,
cache_usage,
opts.disk_stats_every_kb,
)
.await?;
}
},
SubCmd::GenerateTestData => {
disk::gen::gen_test_data().await?;
}
}
Ok(())
}
#[test]
fn simple_fetch() {
use netpod::Nanos;
use netpod::{timeunits::*, Channel, ChannelConfig, Cluster, Database, Node, NodeConfig, ScalarType, Shape};
taskrun::run(async {
let t1 = chrono::Utc::now();
let node = Node {
host: "localhost".into(),
listen: "0.0.0.0".into(),
port: 8360,
port_raw: 8360 + 100,
data_base_path: err::todoval(),
ksprefix: "daq_swissfel".into(),
split: 0,
backend: "testbackend".into(),
};
let query = netpod::AggQuerySingleChannel {
channel_config: ChannelConfig {
channel: Channel {
backend: "sf-databuffer".into(),
name: "S10BC01-DBAM070:BAM_CH1_NORM".into(),
},
keyspace: 3,
time_bin_size: Nanos { ns: DAY },
array: true,
scalar_type: ScalarType::F64,
shape: Shape::Wave(err::todoval()),
big_endian: true,
compression: true,
},
timebin: 18720,
tb_file_count: 1,
buffer_size: 1024 * 8,
};
let cluster = Cluster {
nodes: vec![node],
database: Database {
name: "daqbuffer".into(),
host: "localhost".into(),
user: "daqbuffer".into(),
pass: "daqbuffer".into(),
},
};
let node_config = NodeConfig {
name: format!("{}:{}", cluster.nodes[0].host, cluster.nodes[0].port),
cluster,
};
let node_config: Result<NodeConfigCached, Error> = node_config.into();
let node_config = node_config?;
let query_string = serde_json::to_string(&query).unwrap();
let host = tokio::spawn(httpret::host(node_config.clone()));
let req = hyper::Request::builder()
.method(http::Method::POST)
.uri("http://localhost:8360/api/4/parsed_raw")
.body(query_string.into())?;
let client = hyper::Client::new();
let res = client.request(req).await?;
info!("client response {:?}", res);
let mut res_body = res.into_body();
use hyper::body::HttpBody;
let mut ntot = 0 as u64;
loop {
match res_body.data().await {
Some(Ok(k)) => {
//info!("packet.. len {}", k.len());
ntot += k.len() as u64;
}
Some(Err(e)) => {
error!("{:?}", e);
}
None => {
info!("response stream exhausted");
break;
}
}
}
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
let throughput = ntot / 1024 * 1000 / ms;
info!(
"total download {} MB throughput {:5} kB/s",
ntot / 1024 / 1024,
throughput
);
drop(host);
//Err::<(), _>(format!("test error").into())
Ok(())
})
.unwrap();
}

65
daqbuffer/src/cli.rs Normal file
View File

@@ -0,0 +1,65 @@
use clap::{crate_version, Clap};
#[derive(Debug, Clap)]
#[clap(name="daqbuffer", author="Dominik Werder <dominik.werder@gmail.com>", version=crate_version!())]
pub struct Opts {
#[clap(short, long, parse(from_occurrences))]
pub verbose: i32,
#[clap(subcommand)]
pub subcmd: SubCmd,
}
#[derive(Debug, Clap)]
pub enum SubCmd {
Retrieval(Retrieval),
Client(Client),
GenerateTestData,
}
#[derive(Debug, Clap)]
pub struct Retrieval {
#[clap(long)]
pub config: String,
}
#[derive(Debug, Clap)]
pub struct Client {
#[clap(subcommand)]
pub client_type: ClientType,
}
#[derive(Debug, Clap)]
pub enum ClientType {
Binned(BinnedClient),
Status(StatusClient),
}
#[derive(Debug, Clap)]
pub struct StatusClient {
#[clap(long)]
pub host: String,
#[clap(long)]
pub port: u16,
}
#[derive(Debug, Clap)]
pub struct BinnedClient {
#[clap(long)]
pub host: String,
#[clap(long)]
pub port: u16,
#[clap(long)]
pub backend: String,
#[clap(long)]
pub channel: String,
#[clap(long)]
pub beg: String,
#[clap(long)]
pub end: String,
#[clap(long)]
pub bins: u32,
#[clap(long, default_value = "use")]
pub cache: String,
#[clap(long, default_value = "1048576")]
pub disk_stats_every_kb: u32,
}

158
daqbuffer/src/client.rs Normal file
View File

@@ -0,0 +1,158 @@
use chrono::{DateTime, Utc};
use disk::agg::scalarbinbatch::MinMaxAvgScalarBinBatch;
use disk::agg::streams::StreamItem;
use disk::binned::RangeCompletableItem;
use disk::cache::{BinnedQuery, CacheUsage};
use disk::frame::inmem::InMemoryFrameAsyncReadStream;
use disk::frame::makeframe::FrameType;
use disk::streamlog::Streamlog;
use err::Error;
use futures_util::TryStreamExt;
use http::StatusCode;
use hyper::Body;
use netpod::log::*;
use netpod::{AggKind, ByteSize, Channel, HostPort, NanoRange, PerfOpts};
pub async fn status(host: String, port: u16) -> Result<(), Error> {
let t1 = Utc::now();
let uri = format!("http://{}:{}/api/4/node_status", host, port,);
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(uri)
.body(Body::empty())?;
let client = hyper::Client::new();
let res = client.request(req).await?;
if res.status() != StatusCode::OK {
error!("Server error {:?}", res);
return Err(Error::with_msg(format!("Server error {:?}", res)));
}
let body = hyper::body::to_bytes(res.into_body()).await?;
let res = String::from_utf8(body.to_vec())?;
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
info!("node_status DONE duration: {} ms", ms);
println!("{}", res);
Ok(())
}
pub async fn get_binned(
host: String,
port: u16,
channel_backend: String,
channel_name: String,
beg_date: DateTime<Utc>,
end_date: DateTime<Utc>,
bin_count: u32,
cache_usage: CacheUsage,
disk_stats_every_kb: u32,
) -> Result<(), Error> {
info!("------- get_binned client");
info!("channel {}", channel_name);
info!("beg {}", beg_date);
info!("end {}", end_date);
info!("-------");
let t1 = Utc::now();
let channel = Channel {
backend: channel_backend.clone(),
name: channel_name.into(),
};
let agg_kind = AggKind::DimXBins1;
let range = NanoRange::from_date_time(beg_date, end_date);
let mut query = BinnedQuery::new(channel, range, bin_count, agg_kind);
query.set_cache_usage(cache_usage);
query.set_disk_stats_every(ByteSize(1024 * disk_stats_every_kb));
let hp = HostPort { host: host, port: port };
let url = query.url(&hp);
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(url)
.header("accept", "application/octet-stream")
.body(Body::empty())?;
let client = hyper::Client::new();
let res = client.request(req).await?;
if res.status() != StatusCode::OK {
error!("Server error {:?}", res);
let (head, body) = res.into_parts();
let buf = hyper::body::to_bytes(body).await?;
let s = String::from_utf8_lossy(&buf);
return Err(Error::with_msg(format!(
concat!(
"Server error {:?}\n",
"---------------------- message from http body:\n",
"{}\n",
"---------------------- end of http body",
),
head, s
)));
}
let perf_opts = PerfOpts { inmem_bufcap: 512 };
let s1 = disk::cache::HttpBodyAsAsyncRead::new(res);
let s2 = InMemoryFrameAsyncReadStream::new(s1, perf_opts.inmem_bufcap);
use futures_util::StreamExt;
use std::future::ready;
let s3 = s2
.map_err(|e| error!("get_binned {:?}", e))
.filter_map(|item| {
let g = match item {
Ok(item) => match item {
StreamItem::Log(item) => {
Streamlog::emit(&item);
None
}
StreamItem::Stats(item) => {
info!("Stats: {:?}", item);
None
}
StreamItem::DataItem(frame) => {
type ExpectedType = Result<StreamItem<RangeCompletableItem<MinMaxAvgScalarBinBatch>>, Error>;
let type_id_exp = <ExpectedType as FrameType>::FRAME_TYPE_ID;
if frame.tyid() != type_id_exp {
error!("unexpected type id got {} exp {}", frame.tyid(), type_id_exp);
}
let n1 = frame.buf().len();
match bincode::deserialize::<ExpectedType>(frame.buf()) {
Ok(item) => match item {
Ok(item) => {
match item {
StreamItem::Log(item) => {
Streamlog::emit(&item);
}
StreamItem::Stats(item) => {
info!("Stats: {:?}", item);
}
StreamItem::DataItem(item) => {
info!("DataItem: {:?}", item);
}
}
Some(Ok(()))
}
Err(e) => {
error!("len {} error frame {:?}", n1, e);
Some(Err(e))
}
},
Err(e) => {
error!("len {} bincode error {:?}", n1, e);
Some(Err(e.into()))
}
}
}
},
Err(e) => Some(Err(Error::with_msg(format!("{:?}", e)))),
};
ready(g)
})
.for_each(|_| ready(()));
s3.await;
let t2 = chrono::Utc::now();
let ntot = 0;
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
let throughput = ntot / 1024 * 1000 / ms;
info!(
"get_cached_0 DONE total download {} MB throughput {:5} kB/s bin_count {}",
ntot / 1024 / 1024,
throughput,
bin_count,
);
Ok(())
}

30
daqbuffer/src/lib.rs Normal file
View File

@@ -0,0 +1,30 @@
use err::Error;
use netpod::{Cluster, NodeConfig, NodeConfigCached};
use tokio::task::JoinHandle;
#[allow(unused_imports)]
use tracing::{debug, error, info, trace, warn};
pub mod cli;
pub mod client;
#[cfg(test)]
pub mod test;
pub fn spawn_test_hosts(cluster: Cluster) -> Vec<JoinHandle<Result<(), Error>>> {
let mut ret = vec![];
for node in &cluster.nodes {
let node_config = NodeConfig {
cluster: cluster.clone(),
name: format!("{}:{}", node.host, node.port),
};
let node_config: Result<NodeConfigCached, Error> = node_config.into();
let node_config = node_config.unwrap();
let h = tokio::spawn(httpret::host(node_config));
ret.push(h);
}
ret
}
pub async fn run_node(node_config: NodeConfigCached) -> Result<(), Error> {
httpret::host(node_config).await?;
Ok(())
}

310
daqbuffer/src/test.rs Normal file
View File

@@ -0,0 +1,310 @@
use crate::spawn_test_hosts;
use bytes::BytesMut;
use chrono::{DateTime, Utc};
use disk::agg::scalarbinbatch::MinMaxAvgScalarBinBatch;
use disk::agg::streams::{Bins, StatsItem, StreamItem};
use disk::binned::RangeCompletableItem;
use disk::cache::BinnedQuery;
use disk::frame::inmem::InMemoryFrameAsyncReadStream;
use disk::streamlog::Streamlog;
use err::Error;
use futures_util::StreamExt;
use futures_util::TryStreamExt;
use http::StatusCode;
use hyper::Body;
use netpod::log::*;
use netpod::{AggKind, Channel, Cluster, Database, HostPort, NanoRange, Node, PerfOpts};
use std::future::ready;
use std::sync::{Arc, Mutex};
use tokio::io::AsyncRead;
use tokio::task::JoinHandle;
pub mod json;
struct RunningHosts {
cluster: Cluster,
_jhs: Vec<JoinHandle<Result<(), Error>>>,
}
lazy_static::lazy_static! {
static ref HOSTS_RUNNING: Mutex<Option<Arc<RunningHosts>>> = Mutex::new(None);
}
fn require_test_hosts_running() -> Result<Arc<RunningHosts>, Error> {
let mut g = HOSTS_RUNNING.lock().unwrap();
match g.as_ref() {
None => {
let cluster = test_cluster();
let jhs = spawn_test_hosts(cluster.clone());
let ret = RunningHosts {
cluster: cluster.clone(),
_jhs: jhs,
};
let a = Arc::new(ret);
*g = Some(a.clone());
Ok(a)
}
Some(gg) => Ok(gg.clone()),
}
}
fn test_cluster() -> Cluster {
let nodes = (0..3)
.into_iter()
.map(|id| Node {
host: "localhost".into(),
listen: "0.0.0.0".into(),
port: 8360 + id as u16,
port_raw: 8360 + id as u16 + 100,
data_base_path: format!("../tmpdata/node{:02}", id).into(),
ksprefix: "ks".into(),
split: id,
backend: "testbackend".into(),
})
.collect();
Cluster {
nodes: nodes,
database: Database {
name: "daqbuffer".into(),
host: "localhost".into(),
user: "daqbuffer".into(),
pass: "daqbuffer".into(),
},
}
}
#[test]
fn get_binned_binary() {
taskrun::run(get_binned_binary_inner()).unwrap();
}
async fn get_binned_binary_inner() -> Result<(), Error> {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
if true {
get_binned_channel(
"wave-f64-be-n21",
"1970-01-01T00:20:10.000Z",
"1970-01-01T00:20:30.000Z",
2,
cluster,
true,
2,
)
.await?;
}
if true {
get_binned_channel(
"wave-u16-le-n77",
"1970-01-01T01:11:00.000Z",
"1970-01-01T01:35:00.000Z",
7,
cluster,
true,
24,
)
.await?;
}
if true {
get_binned_channel(
"wave-u16-le-n77",
"1970-01-01T01:42:00.000Z",
"1970-01-01T03:55:00.000Z",
2,
cluster,
true,
3,
)
.await?;
}
Ok(())
}
async fn get_binned_channel<S>(
channel_name: &str,
beg_date: S,
end_date: S,
bin_count: u32,
cluster: &Cluster,
expect_range_complete: bool,
expect_bin_count: u64,
) -> Result<BinnedResponse, Error>
where
S: AsRef<str>,
{
let t1 = Utc::now();
let agg_kind = AggKind::DimXBins1;
let node0 = &cluster.nodes[0];
let beg_date: DateTime<Utc> = beg_date.as_ref().parse()?;
let end_date: DateTime<Utc> = end_date.as_ref().parse()?;
let channel_backend = "testbackend";
let perf_opts = PerfOpts { inmem_bufcap: 512 };
let channel = Channel {
backend: channel_backend.into(),
name: channel_name.into(),
};
let range = NanoRange::from_date_time(beg_date, end_date);
let query = BinnedQuery::new(channel, range, bin_count, agg_kind);
let hp = HostPort::from_node(node0);
let url = query.url(&hp);
info!("get_binned_channel get {}", url);
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(url)
.header("accept", "application/octet-stream")
.body(Body::empty())?;
let client = hyper::Client::new();
let res = client.request(req).await?;
if res.status() != StatusCode::OK {
error!("client response {:?}", res);
}
let s1 = disk::cache::HttpBodyAsAsyncRead::new(res);
let s2 = InMemoryFrameAsyncReadStream::new(s1, perf_opts.inmem_bufcap);
let res = consume_binned_response(s2).await?;
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
info!("get_cached_0 DONE bin_count {} time {} ms", res.bin_count, ms);
if !res.is_valid() {
Err(Error::with_msg(format!("invalid response: {:?}", res)))
} else if res.range_complete_count == 0 && expect_range_complete {
Err(Error::with_msg(format!("expect range complete: {:?}", res)))
} else if res.bin_count != expect_bin_count {
Err(Error::with_msg(format!("bin count mismatch: {:?}", res)))
} else {
Ok(res)
}
}
#[derive(Debug)]
pub struct BinnedResponse {
bin_count: u64,
err_item_count: u64,
data_item_count: u64,
bytes_read: u64,
range_complete_count: u64,
log_item_count: u64,
stats_item_count: u64,
}
impl BinnedResponse {
pub fn new() -> Self {
Self {
bin_count: 0,
err_item_count: 0,
data_item_count: 0,
bytes_read: 0,
range_complete_count: 0,
log_item_count: 0,
stats_item_count: 0,
}
}
pub fn is_valid(&self) -> bool {
if self.range_complete_count > 1 {
false
} else {
true
}
}
}
async fn consume_binned_response<T>(inp: InMemoryFrameAsyncReadStream<T>) -> Result<BinnedResponse, Error>
where
T: AsyncRead + Unpin,
{
let s1 = inp
.map_err(|e| error!("TEST GOT ERROR {:?}", e))
.filter_map(|item| {
let g = match item {
Ok(item) => match item {
StreamItem::Log(item) => {
Streamlog::emit(&item);
None
}
StreamItem::Stats(item) => {
info!("Stats: {:?}", item);
None
}
StreamItem::DataItem(frame) => {
type ExpectedType = Result<StreamItem<RangeCompletableItem<MinMaxAvgScalarBinBatch>>, Error>;
match bincode::deserialize::<ExpectedType>(frame.buf()) {
Ok(item) => match item {
Ok(item) => match item {
StreamItem::Log(item) => {
Streamlog::emit(&item);
Some(Ok(StreamItem::Log(item)))
}
item => {
info!("TEST GOT ITEM {:?}", item);
Some(Ok(item))
}
},
Err(e) => {
error!("TEST GOT ERROR FRAME: {:?}", e);
Some(Err(e))
}
},
Err(e) => {
error!("bincode error: {:?}", e);
Some(Err(e.into()))
}
}
}
},
Err(e) => Some(Err(Error::with_msg(format!("WEIRD EMPTY ERROR {:?}", e)))),
};
ready(g)
})
.fold(BinnedResponse::new(), |mut a, k| {
let g = match k {
Ok(StreamItem::Log(_item)) => {
a.log_item_count += 1;
a
}
Ok(StreamItem::Stats(item)) => match item {
StatsItem::EventDataReadStats(item) => {
a.bytes_read += item.parsed_bytes;
a
}
},
Ok(StreamItem::DataItem(item)) => match item {
RangeCompletableItem::RangeComplete => {
a.range_complete_count += 1;
a
}
RangeCompletableItem::Data(item) => {
a.data_item_count += 1;
a.bin_count += item.bin_count() as u64;
a
}
},
Err(_e) => {
a.err_item_count += 1;
a
}
};
ready(g)
});
let ret = s1.await;
info!("BinnedResponse: {:?}", ret);
Ok(ret)
}
#[test]
fn bufs() {
use bytes::{Buf, BufMut};
let mut buf = BytesMut::with_capacity(1024);
assert!(buf.as_mut().len() == 0);
buf.put_u32_le(123);
assert!(buf.as_mut().len() == 4);
let mut b2 = buf.split_to(4);
assert!(b2.capacity() == 4);
b2.advance(2);
assert!(b2.capacity() == 2);
b2.advance(2);
assert!(b2.capacity() == 0);
assert!(buf.capacity() == 1020);
assert!(buf.remaining() == 0);
assert!(buf.remaining_mut() >= 1020);
assert!(buf.capacity() == 1020);
}

View File

@@ -0,0 +1,66 @@
use crate::test::require_test_hosts_running;
use chrono::{DateTime, Utc};
use disk::cache::BinnedQuery;
use err::Error;
use http::StatusCode;
use hyper::Body;
use netpod::log::*;
use netpod::{AggKind, Channel, Cluster, HostPort, NanoRange};
#[test]
fn get_binned_json_0() {
taskrun::run(get_binned_json_0_inner()).unwrap();
}
async fn get_binned_json_0_inner() -> Result<(), Error> {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
get_binned_json_0_inner2(
"wave-f64-be-n21",
"1970-01-01T00:20:10.000Z",
"1970-01-01T01:20:30.000Z",
10,
cluster,
)
.await
}
async fn get_binned_json_0_inner2(
channel_name: &str,
beg_date: &str,
end_date: &str,
bin_count: u32,
cluster: &Cluster,
) -> Result<(), Error> {
let t1 = Utc::now();
let agg_kind = AggKind::DimXBins1;
let node0 = &cluster.nodes[0];
let beg_date: DateTime<Utc> = beg_date.parse()?;
let end_date: DateTime<Utc> = end_date.parse()?;
let channel_backend = "testbackend";
let channel = Channel {
backend: channel_backend.into(),
name: channel_name.into(),
};
let range = NanoRange::from_date_time(beg_date, end_date);
let query = BinnedQuery::new(channel, range, bin_count, agg_kind);
let url = query.url(&HostPort::from_node(node0));
info!("get_binned_json_0 get {}", url);
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(url)
.header("Accept", "application/json")
.body(Body::empty())?;
let client = hyper::Client::new();
let res = client.request(req).await?;
if res.status() != StatusCode::OK {
error!("client response {:?}", res);
}
let res = hyper::body::to_bytes(res.into_body()).await?;
let res = String::from_utf8(res.to_vec())?;
info!("result from endpoint: [[[\n{}\n]]]", res);
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
info!("get_binned_json_0 DONE time {} ms", ms);
Ok(())
}