Move workspace crates into subfolder
This commit is contained in:
29
crates/daqbuffer/Cargo.toml
Normal file
29
crates/daqbuffer/Cargo.toml
Normal file
@@ -0,0 +1,29 @@
|
||||
[package]
|
||||
name = "daqbuffer"
|
||||
version = "0.3.6"
|
||||
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1.18.1", features = ["rt-multi-thread", "io-util", "net", "time", "sync", "fs"] }
|
||||
hyper = "0.14"
|
||||
http = "0.2"
|
||||
tracing = "0.1.25"
|
||||
tracing-subscriber = "0.2.17"
|
||||
futures-core = "0.3.14"
|
||||
futures-util = "0.3.14"
|
||||
bytes = "1.0.1"
|
||||
#dashmap = "3"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
serde_yaml = "0.9.16"
|
||||
chrono = "0.4"
|
||||
url = "2.2.2"
|
||||
clap = { version = "4.0.22", features = ["derive", "cargo"] }
|
||||
lazy_static = "1.4.0"
|
||||
err = { path = "../err" }
|
||||
taskrun = { path = "../taskrun" }
|
||||
netpod = { path = "../netpod" }
|
||||
disk = { path = "../disk" }
|
||||
daqbufp2 = { path = "../daqbufp2" }
|
||||
190
crates/daqbuffer/src/bin/daqbuffer.rs
Normal file
190
crates/daqbuffer/src/bin/daqbuffer.rs
Normal file
@@ -0,0 +1,190 @@
|
||||
use chrono::DateTime;
|
||||
use chrono::Duration;
|
||||
use chrono::Utc;
|
||||
use clap::Parser;
|
||||
use daqbuffer::cli::ClientType;
|
||||
use daqbuffer::cli::Opts;
|
||||
use daqbuffer::cli::SubCmd;
|
||||
use disk::AggQuerySingleChannel;
|
||||
use disk::SfDbChConf;
|
||||
use err::Error;
|
||||
use netpod::log::*;
|
||||
use netpod::query::CacheUsage;
|
||||
use netpod::DtNano;
|
||||
use netpod::NodeConfig;
|
||||
use netpod::NodeConfigCached;
|
||||
use netpod::ProxyConfig;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
pub fn main() {
|
||||
match taskrun::run(go()) {
|
||||
Ok(()) => {}
|
||||
Err(k) => {
|
||||
error!("{:?}", k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_ts_rel(s: &str) -> Result<DateTime<Utc>, Error> {
|
||||
let (sign, rem) = if s.starts_with("p") { (1, &s[1..]) } else { (-1, s) };
|
||||
let (fac, rem) = if rem.ends_with("h") {
|
||||
(1000 * 60 * 60, &rem[..rem.len() - 1])
|
||||
} else if rem.ends_with("m") {
|
||||
(1000 * 60, &rem[..rem.len() - 1])
|
||||
} else if rem.ends_with("s") {
|
||||
(1000, &rem[..rem.len() - 1])
|
||||
} else {
|
||||
return Err(Error::with_msg(format!("can not understand relative time: {}", s)))?;
|
||||
};
|
||||
if rem.contains(".") {
|
||||
let num: f32 = rem.parse()?;
|
||||
let dur = Duration::milliseconds((num * fac as f32 * sign as f32) as i64);
|
||||
Ok(Utc::now() + dur)
|
||||
} else {
|
||||
let num: i64 = rem.parse()?;
|
||||
let dur = Duration::milliseconds(num * fac * sign);
|
||||
Ok(Utc::now() + dur)
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_ts(s: &str) -> Result<DateTime<Utc>, Error> {
|
||||
let ret = if s.contains("-") { s.parse()? } else { parse_ts_rel(s)? };
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
async fn go() -> Result<(), Error> {
|
||||
let opts = Opts::parse();
|
||||
match opts.subcmd {
|
||||
SubCmd::Retrieval(subcmd) => {
|
||||
info!("daqbuffer {}", clap::crate_version!());
|
||||
let mut config_file = File::open(&subcmd.config).await?;
|
||||
let mut buf = Vec::new();
|
||||
config_file.read_to_end(&mut buf).await?;
|
||||
if let Ok(cfg) = serde_json::from_slice::<NodeConfig>(b"nothing") {
|
||||
info!("Parsed json config from {}", subcmd.config);
|
||||
let cfg: Result<NodeConfigCached, Error> = cfg.into();
|
||||
let cfg = cfg?;
|
||||
daqbufp2::run_node(cfg).await?;
|
||||
} else if let Ok(cfg) = serde_yaml::from_slice::<NodeConfig>(&buf) {
|
||||
info!("Parsed yaml config from {}", subcmd.config);
|
||||
let cfg: Result<NodeConfigCached, Error> = cfg.into();
|
||||
let cfg = cfg?;
|
||||
daqbufp2::run_node(cfg).await?;
|
||||
} else {
|
||||
return Err(Error::with_msg_no_trace(format!(
|
||||
"can not parse config at {}",
|
||||
subcmd.config
|
||||
)));
|
||||
}
|
||||
}
|
||||
SubCmd::Proxy(subcmd) => {
|
||||
info!("daqbuffer proxy {}", clap::crate_version!());
|
||||
let mut config_file = File::open(&subcmd.config).await?;
|
||||
let mut buf = Vec::new();
|
||||
config_file.read_to_end(&mut buf).await?;
|
||||
let proxy_config: ProxyConfig =
|
||||
serde_yaml::from_slice(&buf).map_err(|e| Error::with_msg_no_trace(e.to_string()))?;
|
||||
info!("Parsed yaml config from {}", subcmd.config);
|
||||
daqbufp2::run_proxy(proxy_config.clone()).await?;
|
||||
}
|
||||
SubCmd::Client(client) => match client.client_type {
|
||||
ClientType::Status(opts) => {
|
||||
daqbufp2::client::status(opts.host, opts.port).await?;
|
||||
}
|
||||
ClientType::Binned(opts) => {
|
||||
let beg = parse_ts(&opts.beg)?;
|
||||
let end = parse_ts(&opts.end)?;
|
||||
let cache_usage = CacheUsage::from_string(&opts.cache)?;
|
||||
daqbufp2::client::get_binned(
|
||||
opts.host,
|
||||
opts.port,
|
||||
opts.backend,
|
||||
opts.channel,
|
||||
beg,
|
||||
end,
|
||||
opts.bins,
|
||||
cache_usage,
|
||||
opts.disk_stats_every_kb,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
},
|
||||
SubCmd::GenerateTestData => {
|
||||
disk::gen::gen_test_data().await?;
|
||||
}
|
||||
SubCmd::Test => (),
|
||||
SubCmd::Version => {
|
||||
println!("{}", clap::crate_version!());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// TODO test data needs to be generated
|
||||
//#[test]
|
||||
#[allow(unused)]
|
||||
fn simple_fetch() {
|
||||
use daqbuffer::err::ErrConv;
|
||||
use netpod::timeunits::*;
|
||||
use netpod::ByteOrder;
|
||||
use netpod::ScalarType;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::Shape;
|
||||
taskrun::run(async {
|
||||
let _rh = daqbufp2::nodes::require_test_hosts_running()?;
|
||||
let t1 = chrono::Utc::now();
|
||||
let query = AggQuerySingleChannel {
|
||||
channel_config: SfDbChConf {
|
||||
channel: SfDbChannel::from_name("sf-databuffer", "S10BC01-DBAM070:BAM_CH1_NORM"),
|
||||
keyspace: 3,
|
||||
time_bin_size: DtNano::from_ns(DAY),
|
||||
array: true,
|
||||
scalar_type: ScalarType::F64,
|
||||
shape: Shape::Wave(42),
|
||||
byte_order: ByteOrder::Big,
|
||||
compression: true,
|
||||
},
|
||||
timebin: 18720,
|
||||
tb_file_count: 1,
|
||||
buffer_size: 1024 * 8,
|
||||
};
|
||||
let query_string = serde_json::to_string(&query).unwrap();
|
||||
let req = hyper::Request::builder()
|
||||
.method(http::Method::POST)
|
||||
.uri("http://localhost:8360/api/4/parsed_raw")
|
||||
.body(query_string.into())
|
||||
.ec()?;
|
||||
let client = hyper::Client::new();
|
||||
let res = client.request(req).await.ec()?;
|
||||
info!("client response {:?}", res);
|
||||
let mut res_body = res.into_body();
|
||||
use hyper::body::HttpBody;
|
||||
let mut ntot = 0 as u64;
|
||||
loop {
|
||||
match res_body.data().await {
|
||||
Some(Ok(k)) => {
|
||||
//info!("packet.. len {}", k.len());
|
||||
ntot += k.len() as u64;
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
error!("{:?}", e);
|
||||
}
|
||||
None => {
|
||||
info!("response stream exhausted");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
let t2 = chrono::Utc::now();
|
||||
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
|
||||
let throughput = ntot / 1024 * 1000 / ms;
|
||||
info!(
|
||||
"total download {} MB throughput {:5} kB/s",
|
||||
ntot / 1024 / 1024,
|
||||
throughput
|
||||
);
|
||||
Ok::<_, Error>(())
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
75
crates/daqbuffer/src/cli.rs
Normal file
75
crates/daqbuffer/src/cli.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
use clap::ArgAction;
|
||||
use clap::Parser;
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(author, version)]
|
||||
pub struct Opts {
|
||||
#[arg(long, action(ArgAction::Count))]
|
||||
pub verbose: u8,
|
||||
#[command(subcommand)]
|
||||
pub subcmd: SubCmd,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub enum SubCmd {
|
||||
Retrieval(Retrieval),
|
||||
Proxy(Proxy),
|
||||
Client(Client),
|
||||
GenerateTestData,
|
||||
Test,
|
||||
Version,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct Retrieval {
|
||||
#[arg(long)]
|
||||
pub config: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct Proxy {
|
||||
#[arg(long)]
|
||||
pub config: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct Client {
|
||||
#[command(subcommand)]
|
||||
pub client_type: ClientType,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub enum ClientType {
|
||||
Binned(BinnedClient),
|
||||
Status(StatusClient),
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct StatusClient {
|
||||
#[arg(long)]
|
||||
pub host: String,
|
||||
#[arg(long)]
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct BinnedClient {
|
||||
#[arg(long)]
|
||||
pub host: String,
|
||||
#[arg(long)]
|
||||
pub port: u16,
|
||||
#[arg(long)]
|
||||
pub backend: String,
|
||||
#[arg(long)]
|
||||
pub channel: String,
|
||||
#[arg(long)]
|
||||
pub beg: String,
|
||||
#[arg(long)]
|
||||
pub end: String,
|
||||
#[arg(long)]
|
||||
pub bins: u32,
|
||||
#[arg(long, default_value = "use")]
|
||||
pub cache: String,
|
||||
#[arg(long, default_value = "1048576")]
|
||||
pub disk_stats_every_kb: u32,
|
||||
}
|
||||
18
crates/daqbuffer/src/err.rs
Normal file
18
crates/daqbuffer/src/err.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
pub trait ErrConv<T> {
|
||||
fn ec(self) -> Result<T, ::err::Error>;
|
||||
}
|
||||
|
||||
pub trait Convable: ToString {}
|
||||
|
||||
impl<T, E: Convable> ErrConv<T> for Result<T, E> {
|
||||
fn ec(self) -> Result<T, err::Error> {
|
||||
match self {
|
||||
Ok(x) => Ok(x),
|
||||
Err(e) => Err(::err::Error::from_string(e.to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Convable for http::Error {}
|
||||
impl Convable for hyper::Error {}
|
||||
impl Convable for serde_yaml::Error {}
|
||||
2
crates/daqbuffer/src/lib.rs
Normal file
2
crates/daqbuffer/src/lib.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
pub mod cli;
|
||||
pub mod err;
|
||||
Reference in New Issue
Block a user