rustc panics

This commit is contained in:
Dominik Werder
2023-07-18 11:27:39 +02:00
parent 2054f6c23f
commit 91947dec0f
49 changed files with 982 additions and 679 deletions

View File

@@ -1,8 +1,8 @@
use crate::bodystream::response;
use crate::bodystream::ToPublicResponse;
use crate::channelconfig::ch_conf_from_binned;
use crate::err::Error;
use crate::response_err;
use err::Error;
use http::Method;
use http::Request;
use http::Response;
@@ -20,7 +20,8 @@ use tracing::Instrument;
use url::Url;
async fn binned_json(url: Url, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
debug!("httpret plain_events_json req: {:?}", req);
debug!("{:?}", req);
let reqid = crate::status_board()?.new_status_id();
let (_head, _body) = req.into_parts();
let query = BinnedQuery::from_url(&url).map_err(|e| {
error!("binned_json: {e:?}");
@@ -41,7 +42,7 @@ async fn binned_json(url: Url, req: Request<Body>, node_config: &NodeConfigCache
span1.in_scope(|| {
debug!("begin");
});
let item = streams::timebinnedjson::timebinned_json(query, ch_conf, node_config.node_config.cluster.clone())
let item = streams::timebinnedjson::timebinned_json(query, ch_conf, reqid, node_config.node_config.cluster.clone())
.instrument(span1)
.await?;
let buf = serde_json::to_vec(&item)?;

View File

@@ -1,33 +1,40 @@
use crate::bodystream::response;
use crate::bodystream::BodyStream;
use crate::response_err;
use async_channel::Receiver;
use async_channel::Sender;
use bytes::Bytes;
use err::thiserror;
use err::ThisError;
use err::ToPublicError;
use futures_util::Stream;
use futures_util::StreamExt;
use http::Method;
use http::Request;
use http::Response;
use http::StatusCode;
use hyper::Body;
use netpod::log::*;
use netpod::Node;
use netpod::NodeConfigCached;
use netpod::ACCEPT_ALL;
use netpod::APP_JSON;
use serde::Serialize;
use std::path::PathBuf;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
use url::Url;
#[derive(Debug, thiserror::Error)]
#[derive(Debug, ThisError)]
pub enum FindActiveError {
#[error("HttpBadAccept")]
HttpBadAccept,
#[error("HttpBadUrl")]
HttpBadUrl,
#[error("{0}")]
#[error("Error({0})")]
Error(Box<dyn ToPublicError>),
#[error("{0}")]
#[error("UrlError({0})")]
UrlError(#[from] url::ParseError),
#[error("InternalError")]
InternalError,
IO(#[from] std::io::Error),
}
impl ToPublicError for FindActiveError {
@@ -36,8 +43,9 @@ impl ToPublicError for FindActiveError {
FindActiveError::HttpBadAccept => format!("{self}"),
FindActiveError::HttpBadUrl => format!("{self}"),
FindActiveError::Error(e) => e.to_public_error(),
FindActiveError::UrlError(e) => format!("can not parse url: {e}"),
FindActiveError::UrlError(_) => format!("{self}"),
FindActiveError::InternalError => format!("{self}"),
FindActiveError::IO(_) => format!("{self}"),
}
}
}
@@ -46,7 +54,7 @@ pub struct FindActiveHandler {}
impl FindActiveHandler {
pub fn handler(req: &Request<Body>) -> Option<Self> {
if req.uri().path() == "/api/4/tools/databuffer/findActive" {
if req.uri().path() == "/api/4/tool/sfdatabuffer/find/channel/active" {
Some(Self {})
} else {
None
@@ -83,28 +91,222 @@ impl FindActiveHandler {
};
if accept.contains(APP_JSON) || accept.contains(ACCEPT_ALL) {
type _A = netpod::BodyStream;
Ok(Response::builder()
.status(StatusCode::OK)
.body(BodyStream::wrapped(Box::pin(DummyStream::new()), "find_active".into()))
.map_err(|_| FindActiveError::InternalError)?)
let stream = FindActiveStream::new(40, 2, ncc);
let stream = stream.chain(FindActiveStream::new(40, 3, ncc));
let stream = stream
.map(|item| match item {
Ok(item) => {
let mut s = serde_json::to_vec(&item).unwrap();
s.push(0x0a);
s
}
Err(e) => {
error!("ERROR in http body stream after headers: {e}");
Vec::new()
}
})
.map(|x| Ok::<_, String>(Bytes::from(x)));
let body = Body::wrap_stream(Box::pin(stream));
Ok(Response::builder().status(StatusCode::OK).body(body).unwrap())
} else {
Err(FindActiveError::HttpBadAccept)
}
}
}
struct DummyStream {}
#[derive(Debug, Serialize)]
struct ActiveChannelDesc {
ks: u32,
name: String,
totlen: u64,
}
impl DummyStream {
pub fn new() -> Self {
todo!()
async fn sum_dir_contents(path: PathBuf) -> Result<u64, FindActiveError> {
let mut sum = 0;
let mut dir_stream = tokio::fs::read_dir(path).await?;
loop {
match dir_stream.next_entry().await? {
Some(x) => {
if x.file_name().to_string_lossy().starts_with("..") {
debug!("INCONVENIENT: {x:?}");
} else if x.file_type().await.unwrap().is_dir() {
let mut dir_stream_2 = tokio::fs::read_dir(x.path()).await?;
loop {
match dir_stream_2.next_entry().await? {
Some(x) => {
let md = x.metadata().await?;
sum += md.len();
}
None => break,
}
}
} else {
error!("unexpected file: {:?}", x.file_name());
sum += x.metadata().await?.len();
}
}
None => break,
}
}
Ok(sum)
}
struct XorShift32 {
state: u32,
}
impl XorShift32 {
fn new(state: u32) -> Self {
Self { state }
}
fn next(&mut self) -> u32 {
let mut x = self.state;
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
self.state = x;
x
}
}
impl Stream for DummyStream {
type Item = Result<Bytes, crate::err::Error>;
async fn find_active_inner(
max: usize,
ks: u32,
splits: &[u64],
node: Node,
tx: Sender<Result<ActiveChannelDesc, FindActiveError>>,
) -> Result<(), FindActiveError> {
let mut count = 0;
let now_sec = std::time::SystemTime::now()
.duration_since(std::time::SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs();
let mut rng = XorShift32::new(now_sec as u32);
for _ in 0..64 {
rng.next();
}
let tb_exp = now_sec / 60 / 60 / 24;
let re_tb = regex::Regex::new(r"(0000\d{15})").unwrap();
let path = disk::paths::datapath_for_keyspace(ks, &node);
let mut dir_stream = tokio::fs::read_dir(path).await?;
let mut channel_dirs = Vec::new();
loop {
let x = dir_stream.next_entry().await?;
match x {
Some(x) => {
if x.file_name().to_string_lossy().starts_with(".") {
debug!("INCONVENIENT: {x:?}");
} else if x.file_name().to_string_lossy().starts_with("..") {
debug!("INCONVENIENT: {x:?}");
} else {
channel_dirs.push((rng.next(), x));
}
}
None => break,
}
}
channel_dirs.sort_by_key(|x| x.0);
let channel_dirs = channel_dirs;
// TODO randomize channel list using given seed
'outer: for (_, chdir) in channel_dirs {
let ft = chdir.file_type().await?;
if ft.is_dir() {
let mut dir_stream = tokio::fs::read_dir(chdir.path())
.await
.map_err(|e| FindActiveError::IO(e))?;
loop {
match dir_stream.next_entry().await? {
Some(e) => {
let x = e.file_name();
let s = x.to_string_lossy();
if let Some(_) = re_tb.captures(&s) {
let chn1 = chdir.file_name();
let chname = chn1.to_string_lossy();
// debug!("match: {m:?}");
// TODO bin-size depends on channel config
match s.parse::<u64>() {
Ok(x) => {
if x == tb_exp {
// debug!("matching tb {}", chname);
let sum = sum_dir_contents(e.path()).await?;
if sum > 1024 * 1024 * 10 {
// debug!("sizable content: {sum}");
let x = ActiveChannelDesc {
ks,
name: chname.into(),
totlen: sum,
};
tx.send(Ok(x)).await;
count += 1;
if count >= max {
break 'outer;
}
}
}
}
Err(_) => {}
}
} else {
// debug!("no match");
}
}
None => break,
}
}
} else {
error!("unexpected file {chdir:?}");
}
}
Ok(())
}
fn poll_next(self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context) -> std::task::Poll<Option<Self::Item>> {
todo!()
async fn find_active(
max: usize,
ks: u32,
splits: Vec<u64>,
node: Node,
tx: Sender<Result<ActiveChannelDesc, FindActiveError>>,
) {
let tx2 = tx.clone();
match find_active_inner(max, ks, &splits, node, tx).await {
Ok(x) => x,
Err(e) => {
tx2.send(Err(e)).await;
return;
}
}
}
struct FindActiveStream {
rx: Receiver<Result<ActiveChannelDesc, FindActiveError>>,
}
impl FindActiveStream {
pub fn new(max: usize, ks: u32, ncc: &NodeConfigCached) -> Self {
let (tx, rx) = async_channel::bounded(4);
let splits = ncc
.node
.sf_databuffer
.as_ref()
.unwrap()
.splits
.as_ref()
.map_or(Vec::new(), Clone::clone);
let _jh = taskrun::spawn(find_active(max, ks, splits, ncc.node.clone(), tx));
Self { rx }
}
}
impl Stream for FindActiveStream {
type Item = Result<ActiveChannelDesc, FindActiveError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
match self.rx.poll_next_unpin(cx) {
Ready(Some(item)) => Ready(Some(item)),
Ready(None) => Ready(None),
Pending => Pending,
}
}
}

View File

@@ -1,9 +1,8 @@
use crate::channelconfig::chconf_from_events_v1;
use crate::err::Error;
use crate::response;
use crate::response_err;
use crate::BodyStream;
use crate::ToPublicResponse;
use err::Error;
use futures_util::stream;
use futures_util::TryStreamExt;
use http::Method;
@@ -72,15 +71,12 @@ async fn plain_events_binary(
req: Request<Body>,
node_config: &NodeConfigCached,
) -> Result<Response<Body>, Error> {
debug!("plain_events_binary req: {:?}", req);
debug!("{:?}", req);
let query = PlainEventsQuery::from_url(&url).map_err(|e| e.add_public_msg(format!("Can not understand query")))?;
let ch_conf = chconf_from_events_v1(&query, node_config).await?;
info!("plain_events_binary chconf_from_events_v1: {ch_conf:?}");
let s = stream::iter([Ok::<_, Error>(String::from("TODO_PREBINNED_BINARY_STREAM"))]);
let ret = response(StatusCode::OK).body(BodyStream::wrapped(
s.map_err(Error::from),
format!("plain_events_binary"),
))?;
let ret = response(StatusCode::OK).body(Body::wrap_stream(s.map_err(Error::from)))?;
Ok(ret)
}
@@ -89,6 +85,7 @@ async fn plain_events_json(
req: Request<Body>,
node_config: &NodeConfigCached,
) -> Result<Response<Body>, Error> {
let reqid = crate::status_board()?.new_status_id();
info!("plain_events_json req: {:?}", req);
let (_head, _body) = req.into_parts();
let query = PlainEventsQuery::from_url(&url)?;
@@ -99,7 +96,8 @@ async fn plain_events_json(
.map_err(Error::from)?
.ok_or_else(|| Error::with_msg_no_trace("channel not found"))?;
info!("plain_events_json chconf_from_events_v1: {ch_conf:?}");
let item = streams::plaineventsjson::plain_events_json(&query, ch_conf, &node_config.node_config.cluster).await;
let item =
streams::plaineventsjson::plain_events_json(&query, ch_conf, reqid, &node_config.node_config.cluster).await;
let item = match item {
Ok(item) => item,
Err(e) => {

View File

@@ -1,6 +1,6 @@
use crate::bodystream::response;
use crate::bodystream::ToPublicResponse;
use crate::Error;
use err::Error;
use http::Method;
use http::Request;
use http::Response;

View File

@@ -1,6 +1,6 @@
use crate::bodystream::response;
use crate::err::Error;
use crate::ReqCtx;
use crate::RetrievalError;
use http::Request;
use http::Response;
use http::StatusCode;
@@ -14,7 +14,7 @@ use std::collections::VecDeque;
use std::time::Duration;
#[allow(unused)]
async fn table_sizes(node_config: &NodeConfigCached) -> Result<TableSizes, Error> {
async fn table_sizes(node_config: &NodeConfigCached) -> Result<TableSizes, RetrievalError> {
let ret = dbconn::table_sizes(node_config).await?;
Ok(ret)
}
@@ -39,12 +39,12 @@ impl StatusNodesRecursive {
req: Request<Body>,
ctx: &ReqCtx,
node_config: &NodeConfigCached,
) -> Result<Response<Body>, Error> {
) -> Result<Response<Body>, RetrievalError> {
let res = tokio::time::timeout(Duration::from_millis(1200), self.status(req, ctx, node_config)).await;
let res = match res {
Ok(res) => res,
Err(e) => {
let e = Error::from(e).add_public_msg("see timeout");
let e = RetrievalError::from(e).add_public_msg("see timeout");
return Ok(crate::bodystream::ToPublicResponse::to_public_response(&e));
}
};
@@ -67,7 +67,7 @@ impl StatusNodesRecursive {
req: Request<Body>,
_ctx: &ReqCtx,
node_config: &NodeConfigCached,
) -> Result<NodeStatus, Error> {
) -> Result<NodeStatus, RetrievalError> {
let (_head, _body) = req.into_parts();
let archiver_appliance_status = match node_config.node.archiver_appliance.as_ref() {
Some(k) => {