Relative CLI datetime, cache clear api, work on cache read
This commit is contained in:
@@ -356,7 +356,7 @@ where
|
||||
{
|
||||
buf: Vec<u8>,
|
||||
file: Option<File>,
|
||||
_mark: std::marker::PhantomData<T>,
|
||||
_marker: std::marker::PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> ReadPbv<T>
|
||||
@@ -367,7 +367,7 @@ where
|
||||
Self {
|
||||
buf: vec![],
|
||||
file: Some(file),
|
||||
_mark: std::marker::PhantomData::default(),
|
||||
_marker: std::marker::PhantomData::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -380,26 +380,29 @@ where
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||
use Poll::*;
|
||||
let mut buf = vec![];
|
||||
let mut dst = ReadBuf::new(&mut buf);
|
||||
let fp = self.file.as_mut().unwrap();
|
||||
let f = Pin::new(fp);
|
||||
match File::poll_read(f, cx, &mut dst) {
|
||||
Ready(res) => match res {
|
||||
Ok(_) => {
|
||||
if dst.filled().len() > 0 {
|
||||
self.buf.extend_from_slice(&mut buf);
|
||||
Pending
|
||||
} else {
|
||||
match T::from_buf(&mut self.buf) {
|
||||
Ok(item) => Ready(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))),
|
||||
Err(e) => Ready(Err(e)),
|
||||
'outer: loop {
|
||||
// TODO make buffer size a parameter:
|
||||
let mut buf = vec![0; 4096];
|
||||
let mut dst = ReadBuf::new(&mut buf);
|
||||
let fp = self.file.as_mut().unwrap();
|
||||
let f = Pin::new(fp);
|
||||
break match File::poll_read(f, cx, &mut dst) {
|
||||
Ready(res) => match res {
|
||||
Ok(_) => {
|
||||
if dst.filled().len() > 0 {
|
||||
self.buf.extend_from_slice(&mut buf);
|
||||
continue 'outer;
|
||||
} else {
|
||||
match T::from_buf(&mut self.buf) {
|
||||
Ok(item) => Ready(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))),
|
||||
Err(e) => Ready(Err(e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => Ready(Err(e.into())),
|
||||
},
|
||||
Pending => Pending,
|
||||
Err(e) => Ready(Err(e.into())),
|
||||
},
|
||||
Pending => Pending,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -415,6 +418,9 @@ impl ReadableFromFile for MinMaxAvgScalarBinBatch {
|
||||
Ok(ReadPbv::new(file))
|
||||
}
|
||||
fn from_buf(buf: &[u8]) -> Result<Self, Error> {
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&buf);
|
||||
info!("try to deserialize from buf len {} crc {}", buf.len(), h.finalize());
|
||||
let dec: MinMaxAvgScalarBinBatch = serde_cbor::from_slice(&buf)?;
|
||||
Ok(dec)
|
||||
}
|
||||
|
||||
@@ -9,22 +9,22 @@ use chrono::{DateTime, Utc};
|
||||
use err::Error;
|
||||
use futures_core::Stream;
|
||||
use futures_util::{pin_mut, StreamExt};
|
||||
use hyper::Response;
|
||||
use hyper::{Body, Response};
|
||||
use netpod::log::*;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::{
|
||||
AggKind, ByteSize, Channel, Cluster, NanoRange, NodeConfigCached, PerfOpts, PreBinnedPatchCoord, ToNanos,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, VecDeque};
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use tiny_keccak::Hasher;
|
||||
use tokio::io::{AsyncRead, ReadBuf};
|
||||
#[allow(unused_imports)]
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
pub mod pbv;
|
||||
pub mod pbvfs;
|
||||
@@ -275,19 +275,19 @@ where
|
||||
node_config.ix, patch_node_ix
|
||||
)))
|
||||
} else {
|
||||
let ret = super::cache::pbv::pre_binned_value_byte_stream_new(query, node_config, stream_kind);
|
||||
let ret = crate::cache::pbv::pre_binned_value_byte_stream_new(query, node_config, stream_kind);
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HttpBodyAsAsyncRead {
|
||||
inp: Response<hyper::Body>,
|
||||
inp: Response<Body>,
|
||||
left: Bytes,
|
||||
rp: usize,
|
||||
}
|
||||
|
||||
impl HttpBodyAsAsyncRead {
|
||||
pub fn new(inp: hyper::Response<hyper::Body>) -> Self {
|
||||
pub fn new(inp: Response<Body>) -> Self {
|
||||
Self {
|
||||
inp,
|
||||
left: Bytes::new(),
|
||||
@@ -297,7 +297,7 @@ impl HttpBodyAsAsyncRead {
|
||||
}
|
||||
|
||||
impl AsyncRead for HttpBodyAsAsyncRead {
|
||||
fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context, buf: &mut ReadBuf) -> Poll<std::io::Result<()>> {
|
||||
fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context, buf: &mut ReadBuf) -> Poll<io::Result<()>> {
|
||||
use hyper::body::HttpBody;
|
||||
use Poll::*;
|
||||
if self.left.len() != 0 {
|
||||
@@ -329,8 +329,8 @@ impl AsyncRead for HttpBodyAsAsyncRead {
|
||||
Ready(Ok(()))
|
||||
}
|
||||
}
|
||||
Ready(Some(Err(e))) => Ready(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
Ready(Some(Err(e))) => Ready(Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
Error::with_msg(format!("Received by HttpBodyAsAsyncRead: {:?}", e)),
|
||||
))),
|
||||
Ready(None) => Ready(Ok(())),
|
||||
@@ -551,13 +551,22 @@ where
|
||||
};
|
||||
let path = cfd.path(&node_config);
|
||||
let enc = serde_cbor::to_vec(&values)?;
|
||||
info!("Writing cache file size {}\n{:?}\npath: {:?}", enc.len(), cfd, path);
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(&enc);
|
||||
info!(
|
||||
"Writing cache file len {} crc {}\n{:?}\npath: {:?}",
|
||||
enc.len(),
|
||||
h.finalize(),
|
||||
cfd,
|
||||
path
|
||||
);
|
||||
tokio::fs::create_dir_all(path.parent().unwrap()).await?;
|
||||
let res = tokio::task::spawn_blocking({
|
||||
let path = path.clone();
|
||||
move || {
|
||||
use fs2::FileExt;
|
||||
use std::io::Write;
|
||||
use io::Write;
|
||||
// TODO write to random tmp file first and then move into place.
|
||||
let mut f = std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
@@ -573,3 +582,83 @@ where
|
||||
let ret = WrittenPbCache { bytes: res as u64 };
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct ClearCacheAllResult {
|
||||
pub log: Vec<String>,
|
||||
}
|
||||
|
||||
pub async fn clear_cache_all(node_config: &NodeConfigCached, dry: bool) -> Result<ClearCacheAllResult, Error> {
|
||||
let mut log = vec![];
|
||||
log.push(format!("begin at {:?}", chrono::Utc::now()));
|
||||
if dry {
|
||||
log.push(format!("dry run"));
|
||||
}
|
||||
let mut dirs = VecDeque::new();
|
||||
let mut stack = VecDeque::new();
|
||||
stack.push_front(node_config.node.data_base_path.join("cache"));
|
||||
loop {
|
||||
match stack.pop_front() {
|
||||
Some(path) => {
|
||||
let mut rd = tokio::fs::read_dir(path).await?;
|
||||
while let Some(entry) = rd.next_entry().await? {
|
||||
let path = entry.path();
|
||||
match path.to_str() {
|
||||
Some(_pathstr) => {
|
||||
let meta = path.symlink_metadata()?;
|
||||
//log.push(format!("len {:7} pathstr {}", meta.len(), pathstr,));
|
||||
let filename_str = path.file_name().unwrap().to_str().unwrap();
|
||||
if filename_str.ends_with("..") || filename_str.ends_with(".") {
|
||||
log.push(format!("ERROR encountered . or .."));
|
||||
} else {
|
||||
if meta.is_dir() {
|
||||
stack.push_front(path.clone());
|
||||
dirs.push_front((meta.len(), path));
|
||||
} else if meta.is_file() {
|
||||
log.push(format!("remove file len {:7} {}", meta.len(), path.to_string_lossy()));
|
||||
if !dry {
|
||||
match tokio::fs::remove_file(&path).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
log.push(format!(
|
||||
"can not remove file {} {:?}",
|
||||
path.to_string_lossy(),
|
||||
e
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.push(format!("not file, note dir"));
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
log.push(format!("Invalid utf-8 path encountered"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
log.push(format!(
|
||||
"start to remove {} dirs at {:?}",
|
||||
dirs.len(),
|
||||
chrono::Utc::now()
|
||||
));
|
||||
for (len, path) in dirs {
|
||||
log.push(format!("remove dir len {} {}", len, path.to_string_lossy()));
|
||||
if !dry {
|
||||
match tokio::fs::remove_dir(&path).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
log.push(format!("can not remove dir {} {:?}", path.to_string_lossy(), e));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
log.push(format!("done at {:?}", chrono::Utc::now()));
|
||||
let ret = ClearCacheAllResult { log };
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ use nom::error::ErrorKind;
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use std::fmt::Debug;
|
||||
use std::net::AddrParseError;
|
||||
use std::num::ParseIntError;
|
||||
use std::num::{ParseFloatError, ParseIntError};
|
||||
use std::string::FromUtf8Error;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
@@ -161,6 +161,12 @@ impl From<ParseIntError> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ParseFloatError> for Error {
|
||||
fn from(k: ParseFloatError) -> Self {
|
||||
Self::with_msg(k.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FromUtf8Error> for Error {
|
||||
fn from(k: FromUtf8Error) -> Self {
|
||||
Self::with_msg(k.to_string())
|
||||
|
||||
@@ -2,6 +2,7 @@ use crate::response;
|
||||
use err::Error;
|
||||
use http::{Method, StatusCode};
|
||||
use hyper::{Body, Client, Request, Response};
|
||||
use netpod::{Node, NodeConfigCached};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value as JsonValue;
|
||||
|
||||
@@ -43,7 +44,7 @@ async fn process_answer(res: Response<Body>) -> Result<JsonValue, Error> {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn gather_json(req: Request<Body>, pathpre: &str) -> Result<Response<Body>, Error> {
|
||||
pub async fn gather_json_from_hosts(req: Request<Body>, pathpre: &str) -> Result<Response<Body>, Error> {
|
||||
let (part_head, part_body) = req.into_parts();
|
||||
let bodyslice = hyper::body::to_bytes(part_body).await?;
|
||||
let gather_from: GatherFrom = serde_json::from_slice(&bodyslice)?;
|
||||
@@ -99,3 +100,65 @@ pub async fn gather_json(req: Request<Body>, pathpre: &str) -> Result<Response<B
|
||||
.body(serde_json::to_string(&Jres { hosts: a })?.into())?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub async fn gather_get_json(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
let (head, body) = req.into_parts();
|
||||
let _bodyslice = hyper::body::to_bytes(body).await?;
|
||||
let pathpre = "/api/4/gather/";
|
||||
let pathsuf = &head.uri.path()[pathpre.len()..];
|
||||
let spawned: Vec<_> = node_config
|
||||
.node_config
|
||||
.cluster
|
||||
.nodes
|
||||
.iter()
|
||||
.map(|node| {
|
||||
let uri = format!("http://{}:{}/api/4/{}", node.host, node.port, pathsuf);
|
||||
let req = Request::builder().method(Method::GET).uri(uri);
|
||||
let req = req.header("x-node-from-name", format!("{}", node_config.node_config.name));
|
||||
let req = req.header(http::header::ACCEPT, "application/json");
|
||||
let req = req.body(Body::empty());
|
||||
use futures_util::select;
|
||||
use futures_util::FutureExt;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
let task = tokio::spawn(async move {
|
||||
select! {
|
||||
_ = sleep(Duration::from_millis(1500)).fuse() => {
|
||||
Err(Error::with_msg("timeout"))
|
||||
}
|
||||
res = Client::new().request(req?).fuse() => Ok(process_answer(res?).await?)
|
||||
}
|
||||
});
|
||||
(node.clone(), task)
|
||||
})
|
||||
.collect();
|
||||
#[derive(Serialize)]
|
||||
struct Hres {
|
||||
node: Node,
|
||||
res: JsonValue,
|
||||
}
|
||||
#[derive(Serialize)]
|
||||
struct Jres {
|
||||
hosts: Vec<Hres>,
|
||||
}
|
||||
let mut a = vec![];
|
||||
for (node, jh) in spawned {
|
||||
let res = match jh.await {
|
||||
Ok(k) => match k {
|
||||
Ok(k) => k,
|
||||
Err(e) => JsonValue::String(format!("ERROR({:?})", e)),
|
||||
},
|
||||
Err(e) => JsonValue::String(format!("ERROR({:?})", e)),
|
||||
};
|
||||
let v = Hres {
|
||||
node: node.clone(),
|
||||
res,
|
||||
};
|
||||
a.push(v);
|
||||
}
|
||||
let a = a;
|
||||
let res = response(StatusCode::OK)
|
||||
.header(http::header::CONTENT_TYPE, "application/json")
|
||||
.body(serde_json::to_string(&Jres { hosts: a })?.into())?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use crate::gather::gather_get_json;
|
||||
use bytes::Bytes;
|
||||
use disk::binned::BinnedStreamKindScalar;
|
||||
use disk::cache::{BinnedQuery, PreBinnedQuery};
|
||||
@@ -88,17 +89,19 @@ macro_rules! static_http {
|
||||
($path:expr, $tgt:expr, $tgtex:expr, $ctype:expr) => {
|
||||
if $path == concat!("/api/4/documentation/", $tgt) {
|
||||
let c = include_bytes!(concat!("../static/documentation/", $tgtex));
|
||||
return Ok(response(StatusCode::OK)
|
||||
let ret = response(StatusCode::OK)
|
||||
.header("content-type", $ctype)
|
||||
.body(Body::from(&c[..]))?);
|
||||
.body(Body::from(&c[..]))?;
|
||||
return Ok(ret);
|
||||
}
|
||||
};
|
||||
($path:expr, $tgt:expr, $ctype:expr) => {
|
||||
if $path == concat!("/api/4/documentation/", $tgt) {
|
||||
let c = include_bytes!(concat!("../static/documentation/", $tgt));
|
||||
return Ok(response(StatusCode::OK)
|
||||
let ret = response(StatusCode::OK)
|
||||
.header("content-type", $ctype)
|
||||
.body(Body::from(&c[..]))?);
|
||||
.body(Body::from(&c[..]))?;
|
||||
return Ok(ret);
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -136,6 +139,18 @@ async fn data_api_proxy_try(req: Request<Body>, node_config: &NodeConfigCached)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if path.starts_with("/api/4/gather/") {
|
||||
if req.method() == Method::GET {
|
||||
Ok(gather_get_json(req, &node_config).await?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if path == "/api/4/clear_cache" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(clear_cache_all(req, &node_config).await?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if path.starts_with("/api/4/documentation/") {
|
||||
if req.method() == Method::GET {
|
||||
static_http!(path, "", "index.html", "text/html");
|
||||
@@ -342,3 +357,16 @@ pub async fn random_channel(req: Request<Body>, node_config: &NodeConfigCached)
|
||||
let ret = response(StatusCode::OK).body(Body::from(ret))?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub async fn clear_cache_all(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
let (head, _body) = req.into_parts();
|
||||
let dry = match head.uri.query() {
|
||||
Some(q) => q.contains("dry"),
|
||||
None => false,
|
||||
};
|
||||
let res = disk::cache::clear_cache_all(node_config, dry).await?;
|
||||
let ret = response(StatusCode::OK)
|
||||
.header(http::header::CONTENT_TYPE, "application/json")
|
||||
.body(Body::from(serde_json::to_string(&res)?))?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<h1>Retrieval 4.0 Documentation</h1>
|
||||
<h1>Retrieval Documentation</h1>
|
||||
|
||||
<h2>HTTP API documentation</h2>
|
||||
|
||||
@@ -27,172 +27,73 @@
|
||||
<p><strong>URL:</strong> https://data-api.psi.ch/api/4/binned</p>
|
||||
<p><strong>Query parameters:</strong></p>
|
||||
<ul>
|
||||
<li>channel_backend</li>
|
||||
<li>channel_name</li>
|
||||
<li>beg_date</li>
|
||||
<li>end_date</li>
|
||||
<li>bin_count</li>
|
||||
<li>channel_backend (e.g. "sf-databuffer")</li>
|
||||
<li>channel_name (e.g. "SLAAR-LSCP4-LAS6891:CH7:1")</li>
|
||||
<li>beg_date (e.g. "2021-05-26T07:10:00.000Z")</li>
|
||||
<li>end_date (e.g. "2021-05-26T07:16:00.000Z")</li>
|
||||
<li>bin_count (e.g. "6")</li>
|
||||
</ul>
|
||||
<p><strong>Request header:</strong> "Accept" must be "application/json"</p>
|
||||
<p><strong>Example:</strong></p>
|
||||
<pre>http://sf-daqbuf-21:8380/api/4/binned?channel_backend=sf-databuffer&channel_name=SLAAR-LSCP4-LAS6891:CH7:1&beg_date=2021-05-21T00:00:00.000Z&end_date=2021-05-21T02:00:00.000Z&bin_count=20</pre>
|
||||
<p><strong>Result body example:</strong></p>
|
||||
<pre>
|
||||
[
|
||||
{
|
||||
"backend": "sf-databuffer",
|
||||
"channels": [
|
||||
"SARES20-LSCP9:CH0:2",
|
||||
"SARES20-LSCP9:CH0:1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"backend": "hipa-archive",
|
||||
"channels": [],
|
||||
"error": {
|
||||
"code": "Error" // can be: "Error" | "Timeout" (more to be added in the future)
|
||||
}
|
||||
}
|
||||
]
|
||||
</pre>
|
||||
|
||||
<h4>CURL example:</h4>
|
||||
<pre>
|
||||
curl -H 'Accept: application/json' 'http://sf-daqbuf-21:8380/api/4/binned?channel_backend=sf-databuffer&channel_name=SLAAR-LSCP4-LAS6891:CH7:1&beg_date=2021-05-21T00:00:00.000Z&end_date=2021-05-21T02:00:00.000Z&bin_count=20'
|
||||
curl -H 'Accept: application/json' 'http://sf-daqbuf-21:8380/api/4/binned?channel_backend=sf-databuffer
|
||||
&channel_name=SLAAR-LSCP4-LAS6891:CH7:1&beg_date=2021-05-25T00:00:00.000Z&end_date=2021-05-26T00:00:00.000Z&bin_count=3'
|
||||
</pre>
|
||||
<p>Answer:</p>
|
||||
|
||||
<h4>Partial result</h4>
|
||||
<p>Note the keys <strong>continue_at</strong> and <strong>missing_bins</strong>.</p>
|
||||
<pre>
|
||||
{
|
||||
"counts": [
|
||||
458,
|
||||
459,
|
||||
458,
|
||||
459,
|
||||
459,
|
||||
458,
|
||||
459,
|
||||
458,
|
||||
459,
|
||||
459,
|
||||
458,
|
||||
459,
|
||||
458,
|
||||
459,
|
||||
458,
|
||||
459,
|
||||
459,
|
||||
458,
|
||||
459,
|
||||
458,
|
||||
459,
|
||||
458,
|
||||
459,
|
||||
459
|
||||
],
|
||||
"ts_bin_edges": [
|
||||
"2021-05-21T00:00:00.000Z",
|
||||
"2021-05-21T00:05:00.000Z",
|
||||
"2021-05-21T00:10:00.000Z",
|
||||
"2021-05-21T00:15:00.000Z",
|
||||
"2021-05-21T00:20:00.000Z",
|
||||
"2021-05-21T00:25:00.000Z",
|
||||
"2021-05-21T00:30:00.000Z",
|
||||
"2021-05-21T00:35:00.000Z",
|
||||
"2021-05-21T00:40:00.000Z",
|
||||
"2021-05-21T00:45:00.000Z",
|
||||
"2021-05-21T00:50:00.000Z",
|
||||
"2021-05-21T00:55:00.000Z",
|
||||
"2021-05-21T01:00:00.000Z",
|
||||
"2021-05-21T01:05:00.000Z",
|
||||
"2021-05-21T01:10:00.000Z",
|
||||
"2021-05-21T01:15:00.000Z",
|
||||
"2021-05-21T01:20:00.000Z",
|
||||
"2021-05-21T01:25:00.000Z",
|
||||
"2021-05-21T01:30:00.000Z",
|
||||
"2021-05-21T01:35:00.000Z",
|
||||
"2021-05-21T01:40:00.000Z",
|
||||
"2021-05-21T01:45:00.000Z",
|
||||
"2021-05-21T01:50:00.000Z",
|
||||
"2021-05-21T01:55:00.000Z",
|
||||
"2021-05-21T02:00:00.000Z"
|
||||
]
|
||||
"continue_at": "2021-05-25T16:00:00.000Z",
|
||||
"missing_bins": 2,
|
||||
"avgs": [
|
||||
340.87640380859375,
|
||||
340.7442321777344,
|
||||
340.58685302734375,
|
||||
341.04608154296875
|
||||
],
|
||||
"counts": [
|
||||
143076,
|
||||
143077,
|
||||
143076,
|
||||
143076
|
||||
],
|
||||
"maxs": [
|
||||
452,
|
||||
452,
|
||||
459,
|
||||
458
|
||||
],
|
||||
"mins": [
|
||||
231,
|
||||
240,
|
||||
239,
|
||||
239
|
||||
],
|
||||
"ts_bin_edges": [
|
||||
"2021-05-25T00:00:00.000Z",
|
||||
"2021-05-25T04:00:00.000Z",
|
||||
"2021-05-25T08:00:00.000Z",
|
||||
"2021-05-25T12:00:00.000Z",
|
||||
"2021-05-25T16:00:00.000Z"
|
||||
]
|
||||
}
|
||||
</pre>
|
||||
|
||||
<h4>Complete result</h4>
|
||||
<p>A complete result will not have a <strong>continue_at</strong> key.</p>
|
||||
|
||||
<h4>Finalised range</h4>
|
||||
<p>If the server can determine that no more data will be added to the requested time range
|
||||
then it will add the flag <strong>finalised_range</strong> to the response.</p>
|
||||
|
||||
|
||||
<a name="channel-search-configs"></a>
|
||||
<h2>Channel Search, with return of configuration information</h2>
|
||||
<p><strong>Method:</strong> POST</p>
|
||||
<p><strong>URL:</strong> https://data-api.psi.ch/api/1/channels/config</p>
|
||||
<p><strong>Request body:</strong> JSON with search parameters</p>
|
||||
<p><strong>Request body outline:</strong></p>
|
||||
<pre>
|
||||
{
|
||||
"regex": "[Optional: Regular expression to search in channel name]",
|
||||
"sourceRegex": "[Optional: Search in sourcename of the channel]",
|
||||
"descriptionRegex": "[Optional: Search in the channel's description]",
|
||||
"backends": ["gls-archive", "hipa-archive", "sf-databuffer"]
|
||||
}
|
||||
</pre>
|
||||
<p><strong>Result body example:</strong></p>
|
||||
<p>Assuming that "hipa-archive" would be unavailable:</p>
|
||||
<pre>
|
||||
[
|
||||
{
|
||||
"backend": "sf-databuffer",
|
||||
"channels": [
|
||||
{
|
||||
"backend": "sf-databuffer",
|
||||
"description": "",
|
||||
"name": "SARES20-LSCP9:CH0:2",
|
||||
"shape": [
|
||||
512
|
||||
],
|
||||
"source": "tcp://SARES20-CVME-01:9999",
|
||||
"type": "Float32",
|
||||
"unit": ""
|
||||
},
|
||||
{
|
||||
"backend": "sf-databuffer",
|
||||
"description": "",
|
||||
"name": "SARES20-LSCP9:CH0:1",
|
||||
"shape": [
|
||||
512
|
||||
],
|
||||
"source": "tcp://SARES20-CVME-01:9999",
|
||||
"type": "Int16",
|
||||
"unit": ""
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"backend": "hipa-archive",
|
||||
"channels": [],
|
||||
"error": {
|
||||
"code": "Error" // can be: "Error" | "Timeout" (more to be added in the future)
|
||||
}
|
||||
}
|
||||
]
|
||||
</pre>
|
||||
<p>Notes:</p>
|
||||
<p>The search constraints are AND'ed together.</p>
|
||||
<p>If some backend responds with an error, that error is indicated by the error key in the affected backend (see example above).</p>
|
||||
|
||||
|
||||
<h4>CURL example:</h4>
|
||||
<pre>
|
||||
QUERY='{ "regex": "LSCP9:CH0", "backends": ["sf-databuffer"] }'
|
||||
curl -H 'Content-Type: application/json' -H 'Accept: application/json' -d "$QUERY" https://data-api.psi.ch/api/1/channels/config
|
||||
</pre>
|
||||
|
||||
<h2>Feedback and comments</h2>
|
||||
<p>Feedback is very much appreciated:</p>
|
||||
<h2>Feedback and comments very much appreciated!</h2>
|
||||
<p>dominik.werder@psi.ch</p>
|
||||
<p>or please assign me a JIRA ticket.</p>
|
||||
|
||||
<div id="footer"></div>
|
||||
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -24,29 +24,6 @@ function load_status_main(ev) {
|
||||
.then(kk => {
|
||||
const js = kk[0];
|
||||
const ts2 = kk[1];
|
||||
if (false) {
|
||||
const response = document.getElementById("response");
|
||||
// Different ways to do the same thing:
|
||||
//response.querySelectorAll("*").forEach(n => n.remove());
|
||||
//response.innerHTML = "";
|
||||
response.textContent = "";
|
||||
while (response.firstChild) {
|
||||
response.removeChild(response.lastChild);
|
||||
response.lastChild.remove();
|
||||
}
|
||||
response.replaceChildren();
|
||||
//response.replaceChild();
|
||||
//JSON.stringify(js, null, 2);
|
||||
//for (let machine of js) {
|
||||
// console.log(typeof(machine));
|
||||
//}
|
||||
const dat2 = js.hosts;
|
||||
sort_default(dat2);
|
||||
response.appendChild(render_retrieval_metrics_as_table(dat2));
|
||||
response.appendChild(render_host_memory_as_table(dat2));
|
||||
//response.appendChild(render_host_memStd_as_table(dat2));
|
||||
response.appendChild(render_host_bufferPools_as_table(dat2));
|
||||
}
|
||||
{
|
||||
let b = document.getElementById("load_status");
|
||||
b.innerHTML = "Loaded (" + (ts2 - ts1) + " ms)";
|
||||
@@ -60,7 +37,78 @@ function load_status_main(ev) {
|
||||
});
|
||||
}
|
||||
|
||||
var g_config = {
|
||||
function clear_element() {
|
||||
// different methods:
|
||||
response.querySelectorAll("*").forEach(n => n.remove());
|
||||
response.innerHTML = "";
|
||||
response.replaceChildren();
|
||||
response.replaceChild();
|
||||
}
|
||||
|
||||
function sort_default(hosts) {
|
||||
hosts.sort((a, b) => {
|
||||
if (a.inst < b.inst) return -1;
|
||||
if (a.inst > b.inst) return +1;
|
||||
if (a.host < b.host) return -1;
|
||||
if (a.host > b.host) return +1;
|
||||
});
|
||||
}
|
||||
|
||||
function show_json_response(js) {
|
||||
const response = document.getElementById("response");
|
||||
response.textContent = "";
|
||||
while (response.firstChild) {
|
||||
response.removeChild(response.lastChild);
|
||||
//response.lastChild.remove();
|
||||
}
|
||||
//JSON.stringify(js, null, 2);
|
||||
//for (let machine of js) {
|
||||
// console.log(typeof(machine));
|
||||
//}
|
||||
//const dat2 = js.hosts;
|
||||
//sort_default(dat2);
|
||||
response.textContent = JSON.stringify(js, null, 2);
|
||||
}
|
||||
|
||||
function clear_cache_all(ev) {
|
||||
const ts1 = Date.now();
|
||||
const dom_ev = ev;
|
||||
const b = ev.target;
|
||||
b.classList.remove("loaded");
|
||||
b.classList.add("loading");
|
||||
b.value = b.dataset.btnLabel + " (loading)";
|
||||
const body = {
|
||||
hosts: "",
|
||||
};
|
||||
const fetch_init = {
|
||||
method: "get",
|
||||
/*headers: {
|
||||
retrieval_instance: document.getElementById("retrieval_instance").value,
|
||||
},*/
|
||||
/*body: JSON.stringify(body),*/
|
||||
};
|
||||
fetch(g_config.api_base + "gather/clear_cache", fetch_init)
|
||||
.then(x => Promise.all([x.json(), Date.now()]))
|
||||
.then(g_config.ui_delay_test)
|
||||
.then(g_config.ui_delay_blink)
|
||||
.then(kk => {
|
||||
const js = kk[0];
|
||||
show_json_response(js);
|
||||
const ts2 = kk[1];
|
||||
{
|
||||
let b = document.getElementById("load_status");
|
||||
b.innerHTML = "Loaded (" + (ts2 - ts1) + " ms)";
|
||||
}
|
||||
{
|
||||
let b = dom_ev.target;
|
||||
b.classList.remove("loading");
|
||||
b.classList.add("loaded");
|
||||
b.setAttribute("value", b.dataset.btnLabel);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
const g_config = {
|
||||
api_base: "http://localhost:8059/api/4/",
|
||||
ui_delay_test: x => x,
|
||||
ui_delay_blink: x => new Promise(resolve => setTimeout(() => resolve(x), 50)),
|
||||
@@ -72,6 +120,8 @@ function config_for_test() {
|
||||
}
|
||||
|
||||
function init() {
|
||||
// keydown event..
|
||||
document.getElementById("btn_clear_cache").addEventListener("click", clear_cache_all)
|
||||
}
|
||||
|
||||
window.addEventListener("load", ev => {
|
||||
@@ -79,7 +129,7 @@ window.addEventListener("load", ev => {
|
||||
config_for_test();
|
||||
}
|
||||
init();
|
||||
const init_load_ele = document.getElementById("btn_load");
|
||||
const init_load_ele = document.getElementById("none");
|
||||
if (init_load_ele != null) {
|
||||
init_load_ele.click();
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
<!doctype html>
|
||||
<html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8"/>
|
||||
<title>Main Status</title>
|
||||
@@ -8,11 +8,15 @@
|
||||
<script src="script.js" type="text/javascript"></script>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<h1>Retrieval - Main Status</h1>
|
||||
|
||||
<p>attach the event handlers via the on window load main event handler.</p>
|
||||
|
||||
<p class="buttonrow">
|
||||
<input type="button" id="btn_load" data-btn-label="Reload Overview" value="Reload" onclick="load_status_main(event)"/>
|
||||
<input id="retrieval_instance" type="text" value="main">
|
||||
<input type="button" id="btn_load" data-btn-label="Reload" value="Reload"/>
|
||||
<input type="button" id="btn_clear_cache" data-btn-label="Clear Cache" value="Clear Cache"/>
|
||||
<!--<input id="retrieval_instance" type="text" value="main">-->
|
||||
</p>
|
||||
|
||||
<p id="load_status"></p>
|
||||
|
||||
@@ -23,7 +23,7 @@ p {
|
||||
|
||||
body {
|
||||
font-family: monospace;
|
||||
font-size: 80%;
|
||||
font-size: 100%;
|
||||
line-height: 1.4;
|
||||
color: #000;
|
||||
}
|
||||
@@ -72,12 +72,12 @@ code#output {
|
||||
}
|
||||
|
||||
p#load_status {
|
||||
margin-top: 10em;
|
||||
margin-top: 0em;
|
||||
}
|
||||
|
||||
p.buttonrow {
|
||||
position: fixed;
|
||||
margin-top: 20px;
|
||||
--position: fixed;
|
||||
--margin-top: 20px;
|
||||
}
|
||||
|
||||
div#footer {
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use disk::cache::CacheUsage;
|
||||
use err::Error;
|
||||
use netpod::log::*;
|
||||
use netpod::{NodeConfig, NodeConfigCached};
|
||||
use tokio::io::AsyncReadExt;
|
||||
#[allow(unused_imports)]
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
pub fn main() {
|
||||
match taskrun::run(go()) {
|
||||
@@ -16,6 +16,28 @@ pub fn main() {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_ts_rel(s: &str) -> Result<DateTime<Utc>, Error> {
|
||||
let (sign, rem) = if s.starts_with("p") { (1, &s[1..]) } else { (-1, s) };
|
||||
let (fac, rem) = if rem.ends_with("h") {
|
||||
(1000 * 60 * 60, &rem[..rem.len() - 1])
|
||||
} else if rem.ends_with("m") {
|
||||
(1000 * 60, &rem[..rem.len() - 1])
|
||||
} else if rem.ends_with("s") {
|
||||
(1000, &rem[..rem.len() - 1])
|
||||
} else {
|
||||
return Err(Error::with_msg(format!("can not understand relative time: {}", s)))?;
|
||||
};
|
||||
if rem.contains(".") {
|
||||
let num: f32 = rem.parse()?;
|
||||
let dur = Duration::milliseconds((num * fac as f32 * sign as f32) as i64);
|
||||
Ok(Utc::now() + dur)
|
||||
} else {
|
||||
let num: i64 = rem.parse()?;
|
||||
let dur = Duration::milliseconds(num * fac * sign);
|
||||
Ok(Utc::now() + dur)
|
||||
}
|
||||
}
|
||||
|
||||
async fn go() -> Result<(), Error> {
|
||||
use clap::Clap;
|
||||
use retrieval::cli::{ClientType, Opts, SubCmd};
|
||||
@@ -38,14 +60,24 @@ async fn go() -> Result<(), Error> {
|
||||
retrieval::client::status(opts.host, opts.port).await?;
|
||||
}
|
||||
ClientType::Binned(opts) => {
|
||||
let beg = opts.beg.parse()?;
|
||||
let end = opts.end.parse()?;
|
||||
let cache_usage = if opts.ignore_cache {
|
||||
CacheUsage::Ignore
|
||||
} else if opts.recreate_cache {
|
||||
CacheUsage::Recreate
|
||||
let beg = if opts.beg.contains("-") {
|
||||
opts.beg.parse()?
|
||||
} else {
|
||||
parse_ts_rel(&opts.beg)?
|
||||
};
|
||||
let end = if opts.end.contains("-") {
|
||||
opts.end.parse()?
|
||||
} else {
|
||||
parse_ts_rel(&opts.end)?
|
||||
};
|
||||
let cache_usage = if opts.cache == "ignore" {
|
||||
CacheUsage::Ignore
|
||||
} else if opts.cache == "recreate" {
|
||||
CacheUsage::Recreate
|
||||
} else if opts.cache == "use" {
|
||||
CacheUsage::Use
|
||||
} else {
|
||||
return Err(Error::with_msg(format!("can not interpret --cache {}", opts.cache)));
|
||||
};
|
||||
retrieval::client::get_binned(
|
||||
opts.host,
|
||||
|
||||
@@ -58,10 +58,8 @@ pub struct BinnedClient {
|
||||
pub end: String,
|
||||
#[clap(long)]
|
||||
pub bins: u32,
|
||||
#[clap(long)]
|
||||
pub ignore_cache: bool,
|
||||
#[clap(long)]
|
||||
pub recreate_cache: bool,
|
||||
#[clap(long, default_value = "use")]
|
||||
pub cache: String,
|
||||
#[clap(long, default_value = "1048576")]
|
||||
pub disk_stats_every_kb: u32,
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user