Add io query parameters and file download test

This commit is contained in:
Dominik Werder
2022-03-04 19:37:54 +01:00
parent d67608fabc
commit e9b87bf9fa
20 changed files with 579 additions and 183 deletions

View File

@@ -11,7 +11,7 @@ use items::{RangeCompletableItem, Sitemty, StreamItem};
use itertools::Itertools;
use netpod::query::RawEventsQuery;
use netpod::timeunits::SEC;
use netpod::{log::*, ACCEPT_ALL};
use netpod::{log::*, DiskIoTune, ACCEPT_ALL};
use netpod::{ByteSize, Channel, FileIoBufferSize, NanoRange, NodeConfigCached, PerfOpts, Shape, APP_OCTET};
use netpod::{ChannelSearchQuery, ChannelSearchResult, ProxyConfig, APP_JSON};
use parse::channelconfig::{
@@ -477,12 +477,27 @@ pub struct Api1Query {
channels: Vec<String>,
range: Api1Range,
// All following parameters are private and not to be used
#[serde(rename = "fileIoBufferSize", default)]
#[serde(default)]
file_io_buffer_size: Option<FileIoBufferSize>,
#[serde(default)]
decompress: bool,
#[serde(rename = "eventsMax", default = "u64_max", skip_serializing_if = "is_u64_max")]
#[serde(default = "u64_max", skip_serializing_if = "is_u64_max")]
events_max: u64,
#[serde(default)]
io_queue_len: u64,
}
impl Api1Query {
pub fn disk_io_tune(&self) -> DiskIoTune {
let mut k = DiskIoTune::default();
if let Some(x) = &self.file_io_buffer_size {
k.read_buffer_len = x.0;
}
if self.io_queue_len != 0 {
k.read_queue_len = self.io_queue_len as usize;
}
k
}
}
fn u64_max() -> u64 {
@@ -511,7 +526,7 @@ pub struct DataApiPython3DataStream {
chan_ix: usize,
chan_stream: Option<Pin<Box<dyn Stream<Item = Result<BytesMut, Error>> + Send>>>,
config_fut: Option<Pin<Box<dyn Future<Output = Result<Config, Error>> + Send>>>,
file_io_buffer_size: FileIoBufferSize,
disk_io_tune: DiskIoTune,
do_decompress: bool,
#[allow(unused)]
event_count: u64,
@@ -526,7 +541,7 @@ impl DataApiPython3DataStream {
pub fn new(
range: NanoRange,
channels: Vec<Channel>,
file_io_buffer_size: FileIoBufferSize,
disk_io_tune: DiskIoTune,
do_decompress: bool,
events_max: u64,
status_id: String,
@@ -539,7 +554,7 @@ impl DataApiPython3DataStream {
chan_ix: 0,
chan_stream: None,
config_fut: None,
file_io_buffer_size,
disk_io_tune,
do_decompress,
event_count: 0,
events_max,
@@ -712,7 +727,7 @@ impl Stream for DataApiPython3DataStream {
channel,
range: self.range.clone(),
agg_kind: netpod::AggKind::EventBlobs,
disk_io_buffer_size: self.file_io_buffer_size.0,
disk_io_tune: self.disk_io_tune.clone(),
do_decompress: self.do_decompress,
};
let perf_opts = PerfOpts { inmem_bufcap: 1024 * 4 };
@@ -728,7 +743,7 @@ impl Stream for DataApiPython3DataStream {
evq.agg_kind.need_expand(),
evq.do_decompress,
event_chunker_conf,
self.file_io_buffer_size.clone(),
self.disk_io_tune.clone(),
&self.node_config,
)?;
Box::pin(s) as Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>
@@ -792,6 +807,13 @@ impl Stream for DataApiPython3DataStream {
} else {
if self.chan_ix >= self.channels.len() {
self.data_done = true;
{
let n = Instant::now();
let mut sb = crate::status_board().unwrap();
sb.mark_alive(&self.status_id);
self.ping_last = n;
sb.mark_ok(&self.status_id);
}
continue;
} else {
let channel = self.channels[self.chan_ix].clone();
@@ -850,6 +872,10 @@ impl Api1EventsBinaryHandler {
.map_err(|e| Error::with_msg_no_trace(format!("{e:?}")))?
.to_owned();
let body_data = hyper::body::to_bytes(body).await?;
info!(
"Api1EventsBinaryHandler query json: {}",
String::from_utf8_lossy(&body_data)
);
let qu: Api1Query = if let Ok(qu) = serde_json::from_slice(&body_data) {
qu
} else {
@@ -888,17 +914,12 @@ impl Api1EventsBinaryHandler {
name: x.clone(),
})
.collect();
let file_io_buffer_size = if let Some(k) = qu.file_io_buffer_size {
k
} else {
node_config.node_config.cluster.file_io_buffer_size.clone()
};
// TODO use a better stream protocol with built-in error delivery.
let status_id = super::status_board()?.new_status_id();
let s = DataApiPython3DataStream::new(
range.clone(),
chans,
file_io_buffer_size,
qu.disk_io_tune().clone(),
qu.decompress,
qu.events_max,
status_id.clone(),
@@ -939,7 +960,7 @@ impl RequestStatusHandler {
if accept != APP_JSON && accept != ACCEPT_ALL {
// TODO set the public error code and message and return Err(e).
let e = Error::with_public_msg(format!("Unsupported Accept: {:?}", accept));
error!("{e:?}");
error!("{e}");
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(Body::empty())?);
}
let _body_data = hyper::body::to_bytes(body).await?;

91
httpret/src/download.rs Normal file
View File

@@ -0,0 +1,91 @@
use std::pin::Pin;
use crate::err::Error;
use crate::response;
use futures_util::{Stream, TryStreamExt};
use http::{Method, StatusCode};
use hyper::{Body, Request, Response};
use netpod::{get_url_query_pairs, DiskIoTune, FromUrl, NodeConfigCached, ReadSys};
use url::Url;
#[derive(Clone, Debug)]
pub struct DownloadQuery {
disk_io_tune: DiskIoTune,
}
impl FromUrl for DownloadQuery {
fn from_url(url: &Url) -> Result<Self, ::err::Error> {
let pairs = get_url_query_pairs(url);
let read_sys = pairs
.get("ReadSys")
.map(|x| x as &str)
.unwrap_or("TokioAsyncRead")
.into();
let read_buffer_len = pairs
.get("ReadBufferLen")
.map(|x| x as &str)
.unwrap_or("xx")
.parse()
.unwrap_or(1024 * 4);
let read_queue_len = pairs
.get("ReadQueueLen")
.map(|x| x as &str)
.unwrap_or("xx")
.parse()
.unwrap_or(8);
let disk_io_tune = DiskIoTune {
read_sys,
read_buffer_len,
read_queue_len,
};
let ret = Self { disk_io_tune };
Ok(ret)
}
}
pub struct DownloadHandler {}
impl DownloadHandler {
pub fn path_prefix() -> &'static str {
"/api/4/test/download/"
}
pub fn handler(req: &Request<Body>) -> Option<Self> {
if req.uri().path().starts_with(Self::path_prefix()) {
Some(Self {})
} else {
None
}
}
pub async fn get(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
let (head, _body) = req.into_parts();
let p2 = &head.uri.path()[Self::path_prefix().len()..];
let base = match &node_config.node.sf_databuffer {
Some(k) => k.data_base_path.clone(),
None => "/UNDEFINED".into(),
};
let url = url::Url::parse(&format!("http://dummy{}", head.uri))?;
let query = DownloadQuery::from_url(&url)?;
let file = tokio::fs::OpenOptions::new().read(true).open(base.join(p2)).await?;
let s = match query.disk_io_tune.read_sys {
ReadSys::TokioAsyncRead => {
let s = disk::file_content_stream(file, query.disk_io_tune.clone()).map_ok(|x| x.into_buf());
Box::pin(s) as Pin<Box<dyn Stream<Item = _> + Send>>
}
ReadSys::Read3 => {
let s = disk::file_content_stream_2(file, query.disk_io_tune.clone()).map_ok(|x| x.into_buf());
Box::pin(s) as _
}
};
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::wrap_stream(s))?)
}
pub async fn handle(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
if req.method() == Method::GET {
self.get(req, node_config).await
} else {
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
}
}
}

View File

@@ -145,11 +145,14 @@ impl ChannelExecFunction for EvInfoFunc {
let _ = byte_order;
let _ = event_value_shape;
let perf_opts = PerfOpts { inmem_bufcap: 4096 };
// TODO let PlainEventsJsonQuery provide the tune
let mut disk_io_tune = netpod::DiskIoTune::default();
disk_io_tune.read_buffer_len = self.query.disk_io_buffer_size();
let evq = RawEventsQuery {
channel: self.query.channel().clone(),
range: self.query.range().clone(),
agg_kind: AggKind::Plain,
disk_io_buffer_size: self.query.disk_io_buffer_size(),
disk_io_tune,
do_decompress: true,
};

View File

@@ -1,6 +1,7 @@
pub mod api1;
pub mod channelarchiver;
pub mod channelconfig;
pub mod download;
pub mod err;
pub mod events;
pub mod evinfo;
@@ -8,6 +9,7 @@ pub mod gather;
pub mod proxy;
pub mod pulsemap;
pub mod search;
pub mod settings;
use crate::err::Error;
use crate::gather::gather_get_json;
@@ -301,6 +303,10 @@ async fn http_service_try(req: Request<Body>, node_config: &NodeConfigCached) ->
} else {
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
}
} else if let Some(h) = download::DownloadHandler::handler(&req) {
h.handle(req, &node_config).await
} else if let Some(h) = settings::SettingsThreadsMaxHandler::handler(&req) {
h.handle(req, &node_config).await
} else if let Some(h) = api1::Api1EventsBinaryHandler::handler(&req) {
h.handle(req, &node_config).await
} else if let Some(h) = evinfo::EventInfoScan::handler(&req) {

62
httpret/src/settings.rs Normal file
View File

@@ -0,0 +1,62 @@
use crate::err::Error;
use crate::response;
use http::{Method, StatusCode};
use hyper::{Body, Request, Response};
use netpod::log::*;
use netpod::NodeConfigCached;
use netpod::{ACCEPT_ALL, APP_JSON};
pub struct SettingsThreadsMaxHandler {}
impl SettingsThreadsMaxHandler {
pub fn path_prefix() -> &'static str {
"/api/4/settings/read3/threads_max"
}
pub fn handler(req: &Request<Body>) -> Option<Self> {
if req.uri().path().starts_with(Self::path_prefix()) {
Some(Self {})
} else {
None
}
}
pub async fn put(&self, req: Request<Body>, _node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
let (head, body) = req.into_parts();
let accept = head
.headers
.get(http::header::ACCEPT)
.map_or(Ok(ACCEPT_ALL), |k| k.to_str())
.map_err(|e| Error::with_msg_no_trace(format!("{e:?}")))?
.to_owned();
if accept != APP_JSON && accept != ACCEPT_ALL {
// TODO set the public error code and message and return Err(e).
let e = Error::with_public_msg(format!("Unsupported Accept: {:?}", accept));
error!("{e}");
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(Body::empty())?);
}
let body = hyper::body::to_bytes(body).await?;
//let threads_max: usize = head.uri.path()[Self::path_prefix().len()..].parse()?;
let threads_max: usize = String::from_utf8_lossy(&body).parse()?;
info!("threads_max {threads_max}");
disk::read3::Read3::get().set_threads_max(threads_max);
let ret = response(StatusCode::NO_CONTENT).body(Body::empty())?;
Ok(ret)
}
pub async fn get(&self, _req: Request<Body>, _node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
let threads_max = disk::read3::Read3::get().threads_max();
let ret = response(StatusCode::OK).body(Body::from(format!("{threads_max}")))?;
Ok(ret)
}
pub async fn handle(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
if req.method() == Method::GET {
self.get(req, node_config).await
} else if req.method() == Method::PUT {
self.put(req, node_config).await
} else {
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
}
}
}