Report unknown channel count in status response
This commit is contained in:
File diff suppressed because it is too large
Load Diff
@@ -61,6 +61,7 @@ use tokio::io::AsyncReadExt;
|
||||
use tokio::io::AsyncSeekExt;
|
||||
use tokio::io::ReadBuf;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::Instrument;
|
||||
|
||||
// TODO move to databuffer-specific crate
|
||||
// TODO duplicate of SfChFetchInfo?
|
||||
@@ -347,8 +348,10 @@ fn start_read5(
|
||||
}
|
||||
}
|
||||
let n = pos - pos_beg;
|
||||
info!("read5 done {n}");
|
||||
debug!("read5 done {n}");
|
||||
};
|
||||
let span = tracing::span!(tracing::Level::INFO, "read5", reqid);
|
||||
let fut = fut.instrument(span);
|
||||
tokio::task::spawn(fut);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -186,8 +186,7 @@ impl Stream for EventChunkerMultifile {
|
||||
let file = ofs.files.pop().unwrap();
|
||||
let path = file.path;
|
||||
let msg = format!("handle OFS {:?}", ofs);
|
||||
debug!("{}", msg);
|
||||
let item = LogItem::quick(Level::INFO, msg);
|
||||
let item = LogItem::quick(Level::DEBUG, msg);
|
||||
match file.file {
|
||||
Some(file) => {
|
||||
let inp = Box::pin(crate::file_content_stream(
|
||||
@@ -212,16 +211,12 @@ impl Stream for EventChunkerMultifile {
|
||||
Ready(Some(Ok(StreamItem::Log(item))))
|
||||
} else if ofs.files.len() == 0 {
|
||||
let msg = format!("handle OFS {:?} NO FILES", ofs);
|
||||
debug!("{}", msg);
|
||||
let item = LogItem::quick(Level::INFO, msg);
|
||||
let item = LogItem::quick(Level::DEBUG, msg);
|
||||
Ready(Some(Ok(StreamItem::Log(item))))
|
||||
} else {
|
||||
let msg = format!("handle OFS MERGED timebin {}", ofs.timebin);
|
||||
info!("{}", msg);
|
||||
for x in &ofs.files {
|
||||
info!(" path {:?}", x.path);
|
||||
}
|
||||
let item = LogItem::quick(Level::INFO, msg);
|
||||
let paths: Vec<_> = ofs.files.iter().map(|x| &x.path).collect();
|
||||
let msg = format!("handle OFS MERGED timebin {} {:?}", ofs.timebin, paths);
|
||||
let item = LogItem::quick(Level::DEBUG, msg);
|
||||
let mut chunkers = Vec::new();
|
||||
for of in ofs.files {
|
||||
if let Some(file) = of.file {
|
||||
@@ -256,7 +251,7 @@ impl Stream for EventChunkerMultifile {
|
||||
Ready(None) => {
|
||||
self.done = true;
|
||||
let item = LogItem::quick(
|
||||
Level::INFO,
|
||||
Level::DEBUG,
|
||||
format!(
|
||||
"EventChunkerMultifile used {} datafiles beg {} end {} node_ix {}",
|
||||
self.files_count,
|
||||
|
||||
@@ -96,7 +96,7 @@ impl Drop for EventChunker {
|
||||
warn!("config_mismatch_discard {}", self.config_mismatch_discard);
|
||||
}
|
||||
debug!(
|
||||
"EventChunker Drop Stats:\ndecomp_dt_histo: {:?}\nitem_len_emit_histo: {:?}",
|
||||
"EventChunker-stats {{ decomp_dt_histo: {:?}, item_len_emit_histo: {:?} }}",
|
||||
self.decomp_dt_histo, self.item_len_emit_histo
|
||||
);
|
||||
}
|
||||
@@ -164,7 +164,7 @@ impl EventChunker {
|
||||
dbg_path: PathBuf,
|
||||
expand: bool,
|
||||
) -> Self {
|
||||
info!("{}::{}", Self::self_name(), "from_start");
|
||||
debug!("{}::{}", Self::self_name(), "from_start");
|
||||
let need_min_max = match fetch_info.shape() {
|
||||
Shape::Scalar => 1024 * 8,
|
||||
Shape::Wave(_) => 1024 * 32,
|
||||
@@ -210,7 +210,7 @@ impl EventChunker {
|
||||
dbg_path: PathBuf,
|
||||
expand: bool,
|
||||
) -> Self {
|
||||
info!("{}::{}", Self::self_name(), "from_event_boundary");
|
||||
debug!("{}::{}", Self::self_name(), "from_event_boundary");
|
||||
let mut ret = Self::from_start(inp, fetch_info, range, stats_conf, dbg_path, expand);
|
||||
ret.state = DataFileState::Event;
|
||||
ret.need_min = 4;
|
||||
@@ -440,7 +440,7 @@ impl EventChunker {
|
||||
if discard {
|
||||
self.discard_count += 1;
|
||||
} else {
|
||||
ret.add_event(
|
||||
ret.push(
|
||||
ts,
|
||||
pulse,
|
||||
databuf.to_vec(),
|
||||
|
||||
@@ -74,7 +74,7 @@ impl EventBlobsGeneratorI32Test00 {
|
||||
}
|
||||
let pulse = ts;
|
||||
let value = (ts / (MS * 100) % 1000) as T;
|
||||
item.add_event(
|
||||
item.push(
|
||||
ts,
|
||||
pulse,
|
||||
value.to_be_bytes().to_vec(),
|
||||
@@ -174,7 +174,7 @@ impl EventBlobsGeneratorI32Test01 {
|
||||
}
|
||||
let pulse = ts;
|
||||
let value = (ts / self.dts) as T;
|
||||
item.add_event(
|
||||
item.push(
|
||||
ts,
|
||||
pulse,
|
||||
value.to_be_bytes().to_vec(),
|
||||
|
||||
@@ -466,6 +466,12 @@ impl From<&Error> for PublicError {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PublicError {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "{}", self.msg)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn todo() {
|
||||
let bt = backtrace::Backtrace::new();
|
||||
eprintln!("TODO\n{bt:?}");
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use crate::err::Error;
|
||||
use crate::gather::gather_get_json_generic;
|
||||
use crate::gather::SubRes;
|
||||
use crate::response;
|
||||
@@ -7,11 +8,8 @@ use bytes::BytesMut;
|
||||
use disk::eventchunker::EventChunkerConf;
|
||||
use disk::merge::mergedblobsfromremotes::MergedBlobsFromRemotes;
|
||||
use disk::raw::conn::make_local_event_blobs_stream;
|
||||
use err::Error;
|
||||
use futures_util::FutureExt;
|
||||
use futures_util::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use futures_util::TryStreamExt;
|
||||
use http::Method;
|
||||
use http::StatusCode;
|
||||
use hyper::Body;
|
||||
@@ -28,6 +26,7 @@ use netpod::log::*;
|
||||
use netpod::query::api1::Api1Query;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::Api1WarningStats;
|
||||
use netpod::ByteSize;
|
||||
use netpod::ChannelSearchQuery;
|
||||
use netpod::ChannelSearchResult;
|
||||
@@ -456,7 +455,7 @@ pub async fn gather_json_2_v1(
|
||||
struct Jres {
|
||||
hosts: Vec<Hres>,
|
||||
}
|
||||
let mut a = vec![];
|
||||
let mut a = Vec::new();
|
||||
for tr in spawned {
|
||||
let res = match tr.1.await {
|
||||
Ok(k) => match k {
|
||||
@@ -501,8 +500,6 @@ async fn process_answer(res: Response<Body>) -> Result<JsonValue, Error> {
|
||||
s1
|
||||
)))
|
||||
} else {
|
||||
//use snafu::IntoError;
|
||||
//Err(Bad{msg:format!("API error")}.into_error(NoneError)).ctxb(SE!(AddPos))
|
||||
Ok(JsonValue::String(format!("status {}", pre.status.as_str())))
|
||||
}
|
||||
} else {
|
||||
@@ -516,15 +513,6 @@ async fn process_answer(res: Response<Body>) -> Result<JsonValue, Error> {
|
||||
}
|
||||
}
|
||||
|
||||
async fn find_ch_conf(
|
||||
range: NanoRange,
|
||||
channel: SfDbChannel,
|
||||
ncc: NodeConfigCached,
|
||||
) -> Result<Option<ChannelTypeConfigGen>, Error> {
|
||||
let ret = nodenet::channelconfig::channel_config(range, channel, &ncc).await?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub struct DataApiPython3DataStream {
|
||||
range: NanoRange,
|
||||
channels: VecDeque<ChannelTypeConfigGen>,
|
||||
@@ -533,7 +521,6 @@ pub struct DataApiPython3DataStream {
|
||||
current_fetch_info: Option<SfChFetchInfo>,
|
||||
node_config: NodeConfigCached,
|
||||
chan_stream: Option<Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>>,
|
||||
config_fut: Option<Pin<Box<dyn Future<Output = Result<Option<ChannelTypeConfigGen>, Error>> + Send>>>,
|
||||
disk_io_tune: DiskIoTune,
|
||||
do_decompress: bool,
|
||||
event_count: usize,
|
||||
@@ -543,6 +530,7 @@ pub struct DataApiPython3DataStream {
|
||||
ping_last: Instant,
|
||||
data_done: bool,
|
||||
completed: bool,
|
||||
stats: Api1WarningStats,
|
||||
}
|
||||
|
||||
impl DataApiPython3DataStream {
|
||||
@@ -564,7 +552,6 @@ impl DataApiPython3DataStream {
|
||||
current_fetch_info: None,
|
||||
node_config,
|
||||
chan_stream: None,
|
||||
config_fut: None,
|
||||
disk_io_tune,
|
||||
do_decompress,
|
||||
event_count: 0,
|
||||
@@ -574,9 +561,16 @@ impl DataApiPython3DataStream {
|
||||
ping_last: Instant::now(),
|
||||
data_done: false,
|
||||
completed: false,
|
||||
stats: Api1WarningStats::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn channel_finished(&mut self) {
|
||||
self.chan_stream = None;
|
||||
self.header_out = false;
|
||||
self.event_count = 0;
|
||||
}
|
||||
|
||||
fn convert_item(
|
||||
b: EventFull,
|
||||
channel: &ChannelTypeConfigGen,
|
||||
@@ -588,7 +582,7 @@ impl DataApiPython3DataStream {
|
||||
let shape = fetch_info.shape();
|
||||
let mut d = BytesMut::new();
|
||||
for i1 in 0..b.len() {
|
||||
const EVIMAX: usize = 6;
|
||||
const EVIMAX: usize = 20;
|
||||
if *count_events < EVIMAX {
|
||||
debug!(
|
||||
"ev info {}/{} bloblen {:?} BE {:?} scalar-type {:?} shape {:?} comps {:?}",
|
||||
@@ -662,57 +656,83 @@ impl DataApiPython3DataStream {
|
||||
Ok(d)
|
||||
}
|
||||
|
||||
fn handle_chan_stream_ready(&mut self, item: Sitemty<EventFull>) -> Option<Result<BytesMut, Error>> {
|
||||
match item {
|
||||
Ok(k) => {
|
||||
let n = Instant::now();
|
||||
if n.duration_since(self.ping_last) >= Duration::from_millis(2000) {
|
||||
let mut sb = crate::status_board().unwrap();
|
||||
sb.mark_alive(self.reqctx.reqid());
|
||||
self.ping_last = n;
|
||||
}
|
||||
match k {
|
||||
StreamItem::DataItem(k) => match k {
|
||||
RangeCompletableItem::RangeComplete => todo!(),
|
||||
RangeCompletableItem::Data(k) => {
|
||||
let item = Self::convert_item(
|
||||
k,
|
||||
self.current_channel.as_ref().unwrap(),
|
||||
self.current_fetch_info.as_ref().unwrap(),
|
||||
self.do_decompress,
|
||||
&mut self.header_out,
|
||||
&mut self.event_count,
|
||||
)?;
|
||||
todo!()
|
||||
fn handle_chan_stream_ready(&mut self, item: Sitemty<EventFull>) -> Result<BytesMut, Error> {
|
||||
let ret = match item {
|
||||
Ok(k) => match k {
|
||||
StreamItem::DataItem(k) => match k {
|
||||
RangeCompletableItem::RangeComplete => {
|
||||
debug!("sees RangeComplete");
|
||||
Ok(BytesMut::new())
|
||||
}
|
||||
RangeCompletableItem::Data(k) => {
|
||||
self.event_count += k.len();
|
||||
if self.events_max != 0 && self.event_count >= self.events_max as usize {
|
||||
return Err(Error::with_msg_no_trace(format!(
|
||||
"events_max reached {} {}",
|
||||
self.event_count, self.events_max
|
||||
)));
|
||||
}
|
||||
},
|
||||
StreamItem::Log(k) => todo!(),
|
||||
StreamItem::Stats(k) => todo!(),
|
||||
|
||||
// NOTE needed because the databuffer actually doesn't write
|
||||
// the correct shape per event.
|
||||
let mut k = k;
|
||||
if let Some(fi) = self.current_fetch_info.as_ref() {
|
||||
if let Shape::Scalar = fi.shape() {
|
||||
} else {
|
||||
k.overwrite_all_shapes(fi.shape());
|
||||
}
|
||||
}
|
||||
let k = k;
|
||||
|
||||
let item = Self::convert_item(
|
||||
k,
|
||||
self.current_channel.as_ref().unwrap(),
|
||||
self.current_fetch_info.as_ref().unwrap(),
|
||||
self.do_decompress,
|
||||
&mut self.header_out,
|
||||
&mut self.event_count,
|
||||
)?;
|
||||
Ok(item)
|
||||
}
|
||||
},
|
||||
StreamItem::Log(k) => {
|
||||
let nodeix = k.node_ix;
|
||||
if k.level == Level::ERROR {
|
||||
tracing::event!(Level::ERROR, nodeix, message = k.msg);
|
||||
} else if k.level == Level::WARN {
|
||||
tracing::event!(Level::WARN, nodeix, message = k.msg);
|
||||
} else if k.level == Level::INFO {
|
||||
tracing::event!(Level::INFO, nodeix, message = k.msg);
|
||||
} else if k.level == Level::DEBUG {
|
||||
tracing::event!(Level::DEBUG, nodeix, message = k.msg);
|
||||
} else if k.level == Level::TRACE {
|
||||
tracing::event!(Level::TRACE, nodeix, message = k.msg);
|
||||
} else {
|
||||
tracing::event!(Level::TRACE, nodeix, message = k.msg);
|
||||
}
|
||||
Ok(BytesMut::new())
|
||||
}
|
||||
}
|
||||
StreamItem::Stats(k) => {
|
||||
//
|
||||
Ok(BytesMut::new())
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
error!("DataApiPython3DataStream emit error: {e:?}");
|
||||
self.chan_stream = None;
|
||||
self.current_channel = None;
|
||||
self.current_fetch_info = None;
|
||||
self.data_done = true;
|
||||
let mut sb = crate::status_board().unwrap();
|
||||
sb.add_error(self.reqctx.reqid(), e);
|
||||
if false {
|
||||
// TODO format as python data api error frame:
|
||||
let mut buf = BytesMut::with_capacity(1024);
|
||||
buf.put_slice("".as_bytes());
|
||||
Some(Ok(buf))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
error!("DataApiPython3DataStream emit error: {e}");
|
||||
Err(e.into())
|
||||
}
|
||||
};
|
||||
let tsnow = Instant::now();
|
||||
if tsnow.duration_since(self.ping_last) >= Duration::from_millis(500) {
|
||||
self.ping_last = tsnow;
|
||||
let mut sb = crate::status_board().unwrap();
|
||||
sb.mark_alive(self.reqctx.reqid());
|
||||
}
|
||||
ret
|
||||
}
|
||||
|
||||
// TODO this stream can currently only handle sf-databuffer type backend anyway.
|
||||
fn handle_config_fut_ready(&mut self, fetch_info: SfChFetchInfo) -> Result<(), Error> {
|
||||
self.config_fut = None;
|
||||
let select = EventsSubQuerySelect::new(
|
||||
ChannelTypeConfigGen::SfDatabuffer(fetch_info.clone()),
|
||||
self.range.clone().into(),
|
||||
@@ -726,7 +746,7 @@ impl DataApiPython3DataStream {
|
||||
debug!("TODO add timeout option to data api3 download");
|
||||
// TODO is this a good to place decide this?
|
||||
let stream = if self.node_config.node_config.cluster.is_central_storage {
|
||||
info!("Set up central storage stream");
|
||||
debug!("set up central storage stream");
|
||||
// TODO pull up this config
|
||||
let event_chunker_conf = EventChunkerConf::new(ByteSize::from_kb(1024));
|
||||
let s = make_local_event_blobs_stream(
|
||||
@@ -744,11 +764,6 @@ impl DataApiPython3DataStream {
|
||||
let s = MergedBlobsFromRemotes::new(subq, self.node_config.node_config.cluster.clone());
|
||||
Box::pin(s) as Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>
|
||||
};
|
||||
let evm = if self.events_max == 0 {
|
||||
usize::MAX
|
||||
} else {
|
||||
self.events_max as usize
|
||||
};
|
||||
self.chan_stream = Some(Box::pin(stream));
|
||||
self.current_fetch_info = Some(fetch_info);
|
||||
Ok(())
|
||||
@@ -769,63 +784,52 @@ impl Stream for DataApiPython3DataStream {
|
||||
} else {
|
||||
if let Some(stream) = &mut self.chan_stream {
|
||||
match stream.poll_next_unpin(cx) {
|
||||
Ready(Some(k)) => Ready(self.handle_chan_stream_ready(k)),
|
||||
Ready(None) => {
|
||||
self.chan_stream = None;
|
||||
continue;
|
||||
}
|
||||
Pending => Pending,
|
||||
}
|
||||
} else if let Some(fut) = &mut self.config_fut {
|
||||
match fut.poll_unpin(cx) {
|
||||
Ready(Ok(Some(k))) => match k {
|
||||
ChannelTypeConfigGen::Scylla(_) => {
|
||||
let e = Error::with_msg_no_trace("scylla");
|
||||
Ready(Some(k)) => match self.handle_chan_stream_ready(k) {
|
||||
Ok(k) => Ready(Some(Ok(k))),
|
||||
Err(e) => {
|
||||
error!("{e}");
|
||||
self.chan_stream = None;
|
||||
self.current_channel = None;
|
||||
self.current_fetch_info = None;
|
||||
self.data_done = true;
|
||||
let mut sb = crate::status_board().unwrap();
|
||||
sb.add_error(self.reqctx.reqid(), e.0.clone());
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
ChannelTypeConfigGen::SfDatabuffer(k) => match self.handle_config_fut_ready(k) {
|
||||
Ok(()) => continue,
|
||||
Err(e) => {
|
||||
self.config_fut = None;
|
||||
self.data_done = true;
|
||||
error!("api1_binary_events error {:?}", e);
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
},
|
||||
},
|
||||
Ready(Ok(None)) => {
|
||||
warn!("logic error");
|
||||
self.config_fut = None;
|
||||
Ready(None) => {
|
||||
self.channel_finished();
|
||||
continue;
|
||||
}
|
||||
Ready(Err(e)) => {
|
||||
self.data_done = true;
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
Pending => Pending,
|
||||
}
|
||||
} else {
|
||||
if let Some(channel) = self.channels.pop_front() {
|
||||
self.current_channel = Some(channel.clone());
|
||||
if false {
|
||||
self.config_fut = Some(Box::pin(find_ch_conf(
|
||||
self.range.clone(),
|
||||
err::todoval(),
|
||||
self.node_config.clone(),
|
||||
)));
|
||||
if let Some(chconf) = self.channels.pop_front() {
|
||||
match &chconf {
|
||||
ChannelTypeConfigGen::Scylla(_) => {
|
||||
// TODO count
|
||||
continue;
|
||||
}
|
||||
ChannelTypeConfigGen::SfDatabuffer(k) => match self.handle_config_fut_ready(k.clone()) {
|
||||
Ok(()) => {
|
||||
self.current_channel = Some(chconf.clone());
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("api1_binary_events error {:?}", e);
|
||||
self.stats.subreq_fail += 1;
|
||||
continue;
|
||||
}
|
||||
},
|
||||
}
|
||||
self.config_fut = Some(Box::pin(futures_util::future::ready(Ok(Some(channel)))));
|
||||
continue;
|
||||
} else {
|
||||
self.data_done = true;
|
||||
{
|
||||
let n = Instant::now();
|
||||
self.ping_last = n;
|
||||
let mut sb = crate::status_board().unwrap();
|
||||
sb.mark_alive(self.reqctx.reqid());
|
||||
self.ping_last = n;
|
||||
sb.mark_ok(self.reqctx.reqid());
|
||||
sb.mark_done(self.reqctx.reqid());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@@ -958,7 +962,14 @@ impl Api1EventsBinaryHandler {
|
||||
}
|
||||
None => {
|
||||
// TODO count in request ctx.
|
||||
// TODO must already here have the final stats counter container.
|
||||
// This means, the request status must provide these counters.
|
||||
error!("no config quorum found for {ch:?}");
|
||||
let mut sb = crate::status_board().unwrap();
|
||||
sb.mark_alive(reqctx.reqid());
|
||||
if let Some(e) = sb.get_entry(reqctx.reqid()) {
|
||||
e.channel_not_found_inc();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1005,7 +1016,7 @@ impl RequestStatusHandler {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle(&self, req: Request<Body>, _node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
pub async fn handle(&self, req: Request<Body>, _ncc: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
let (head, body) = req.into_parts();
|
||||
if head.method != Method::GET {
|
||||
return Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?);
|
||||
@@ -1024,8 +1035,9 @@ impl RequestStatusHandler {
|
||||
}
|
||||
let _body_data = hyper::body::to_bytes(body).await?;
|
||||
let status_id = &head.uri.path()[Self::path_prefix().len()..];
|
||||
info!("RequestStatusHandler status_id {:?}", status_id);
|
||||
let s = crate::status_board()?.status_as_json(status_id);
|
||||
debug!("RequestStatusHandler status_id {:?}", status_id);
|
||||
let status = crate::status_board()?.status_as_json(status_id);
|
||||
let s = serde_json::to_string(&status)?;
|
||||
let ret = response(StatusCode::OK).body(Body::from(s))?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use crate::bodystream::response;
|
||||
use crate::bodystream::ToPublicResponse;
|
||||
use crate::channelconfig::ch_conf_from_binned;
|
||||
use crate::err::Error;
|
||||
use crate::response_err;
|
||||
use err::Error;
|
||||
use http::Method;
|
||||
use http::Request;
|
||||
use http::Response;
|
||||
@@ -21,7 +21,9 @@ use url::Url;
|
||||
|
||||
async fn binned_json(url: Url, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
debug!("{:?}", req);
|
||||
let reqid = crate::status_board()?.new_status_id();
|
||||
let reqid = crate::status_board()
|
||||
.map_err(|e| Error::with_msg_no_trace(e.to_string()))?
|
||||
.new_status_id();
|
||||
let (_head, _body) = req.into_parts();
|
||||
let query = BinnedQuery::from_url(&url).map_err(|e| {
|
||||
error!("binned_json: {e:?}");
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use crate::channelconfig::chconf_from_events_v1;
|
||||
use crate::err::Error;
|
||||
use crate::response;
|
||||
use crate::response_err;
|
||||
use crate::ToPublicResponse;
|
||||
use err::Error;
|
||||
use futures_util::stream;
|
||||
use futures_util::TryStreamExt;
|
||||
use http::Method;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::bodystream::response;
|
||||
use crate::bodystream::ToPublicResponse;
|
||||
use err::Error;
|
||||
use crate::err::Error;
|
||||
use http::Method;
|
||||
use http::Request;
|
||||
use http::Response;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::bodystream::response;
|
||||
use crate::err::Error;
|
||||
use crate::ReqCtx;
|
||||
use crate::RetrievalError;
|
||||
use http::Request;
|
||||
use http::Response;
|
||||
use http::StatusCode;
|
||||
@@ -14,7 +14,7 @@ use std::collections::VecDeque;
|
||||
use std::time::Duration;
|
||||
|
||||
#[allow(unused)]
|
||||
async fn table_sizes(node_config: &NodeConfigCached) -> Result<TableSizes, RetrievalError> {
|
||||
async fn table_sizes(node_config: &NodeConfigCached) -> Result<TableSizes, Error> {
|
||||
let ret = dbconn::table_sizes(node_config).await?;
|
||||
Ok(ret)
|
||||
}
|
||||
@@ -39,12 +39,12 @@ impl StatusNodesRecursive {
|
||||
req: Request<Body>,
|
||||
ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, RetrievalError> {
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let res = tokio::time::timeout(Duration::from_millis(1200), self.status(req, ctx, node_config)).await;
|
||||
let res = match res {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
let e = RetrievalError::from(e).add_public_msg("see timeout");
|
||||
let e = Error::from(e).add_public_msg("see timeout");
|
||||
return Ok(crate::bodystream::ToPublicResponse::to_public_response(&e));
|
||||
}
|
||||
};
|
||||
@@ -67,7 +67,7 @@ impl StatusNodesRecursive {
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<NodeStatus, RetrievalError> {
|
||||
) -> Result<NodeStatus, Error> {
|
||||
let (_head, _body) = req.into_parts();
|
||||
let archiver_appliance_status = match node_config.node.archiver_appliance.as_ref() {
|
||||
Some(k) => {
|
||||
@@ -93,7 +93,7 @@ impl StatusNodesRecursive {
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
let database_size = dbconn::database_size(node_config).await.map_err(|e| format!("{e:?}"));
|
||||
let database_size = dbconn::database_size(node_config).await.map_err(|e| format!("{e}"));
|
||||
let ret = NodeStatus {
|
||||
name: format!("{}:{}", node_config.node.host, node_config.node.port),
|
||||
version: core::env!("CARGO_PKG_VERSION").into(),
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use err::Error;
|
||||
use crate::err::Error;
|
||||
use futures_util::StreamExt;
|
||||
use http::HeaderMap;
|
||||
use http::Response;
|
||||
@@ -28,6 +28,34 @@ impl ToPublicResponse for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl ToPublicResponse for ::err::Error {
|
||||
fn to_public_response(&self) -> Response<Body> {
|
||||
use err::Reason;
|
||||
let e = self.to_public_error();
|
||||
let status = match e.reason() {
|
||||
Some(Reason::BadRequest) => StatusCode::BAD_REQUEST,
|
||||
Some(Reason::InternalError) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
_ => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
};
|
||||
let msg = match serde_json::to_string(&e) {
|
||||
Ok(s) => s,
|
||||
Err(_) => "can not serialize error".into(),
|
||||
};
|
||||
match response(status)
|
||||
.header(http::header::ACCEPT, APP_JSON)
|
||||
.body(Body::from(msg))
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
error!("can not generate http error response {e:?}");
|
||||
let mut res = Response::new(Body::default());
|
||||
*res.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||
res
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct BodyStreamWrap(netpod::BodyStream);
|
||||
|
||||
impl hyper::body::HttpBody for BodyStreamWrap {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::bodystream::response;
|
||||
use crate::err::Error;
|
||||
use crate::ReqCtx;
|
||||
use err::Error;
|
||||
use futures_util::StreamExt;
|
||||
use http::Method;
|
||||
use http::Request;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::err::Error;
|
||||
use crate::response;
|
||||
use crate::ToPublicResponse;
|
||||
use dbconn::create_connection;
|
||||
use err::Error;
|
||||
use futures_util::StreamExt;
|
||||
use http::Method;
|
||||
use http::Request;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::err::Error;
|
||||
use crate::response;
|
||||
use crate::RetrievalError;
|
||||
use futures_util::TryStreamExt;
|
||||
use http::Method;
|
||||
use http::StatusCode;
|
||||
@@ -67,11 +67,7 @@ impl DownloadHandler {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get(
|
||||
&self,
|
||||
req: Request<Body>,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, RetrievalError> {
|
||||
pub async fn get(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
let (head, _body) = req.into_parts();
|
||||
let p2 = &head.uri.path()[Self::path_prefix().len()..];
|
||||
let base = match &node_config.node.sf_databuffer {
|
||||
@@ -88,11 +84,7 @@ impl DownloadHandler {
|
||||
Ok(response(StatusCode::OK).body(Body::wrap_stream(s))?)
|
||||
}
|
||||
|
||||
pub async fn handle(
|
||||
&self,
|
||||
req: Request<Body>,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, RetrievalError> {
|
||||
pub async fn handle(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
if req.method() == Method::GET {
|
||||
self.get(req, node_config).await
|
||||
} else {
|
||||
|
||||
@@ -37,6 +37,10 @@ impl Error {
|
||||
pub fn add_public_msg(self, msg: impl Into<String>) -> Self {
|
||||
Error(self.0.add_public_msg(msg))
|
||||
}
|
||||
|
||||
pub fn from_to_string<E: ToString>(e: E) -> Self {
|
||||
Self::with_msg_no_trace(e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Error {
|
||||
@@ -93,3 +97,4 @@ impl Convable for http::header::ToStrError {}
|
||||
impl Convable for hyper::Error {}
|
||||
impl Convable for std::array::TryFromSliceError {}
|
||||
impl Convable for err::anyhow::Error {}
|
||||
impl Convable for crate::RetrievalError {}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::err::Error;
|
||||
use crate::response;
|
||||
use crate::RetrievalError;
|
||||
use futures_util::select;
|
||||
use futures_util::FutureExt;
|
||||
use http::Method;
|
||||
@@ -33,7 +33,7 @@ struct GatherHost {
|
||||
inst: String,
|
||||
}
|
||||
|
||||
async fn process_answer(res: Response<Body>) -> Result<JsonValue, RetrievalError> {
|
||||
async fn process_answer(res: Response<Body>) -> Result<JsonValue, Error> {
|
||||
let (pre, mut body) = res.into_parts();
|
||||
if pre.status != StatusCode::OK {
|
||||
use hyper::body::HttpBody;
|
||||
@@ -55,14 +55,11 @@ async fn process_answer(res: Response<Body>) -> Result<JsonValue, RetrievalError
|
||||
Ok(k) => k,
|
||||
Err(_e) => JsonValue::String(String::from_utf8(body_all.to_vec())?),
|
||||
};
|
||||
Ok::<_, RetrievalError>(val)
|
||||
Ok::<_, Error>(val)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn unused_gather_json_from_hosts(
|
||||
req: Request<Body>,
|
||||
pathpre: &str,
|
||||
) -> Result<Response<Body>, RetrievalError> {
|
||||
pub async fn unused_gather_json_from_hosts(req: Request<Body>, pathpre: &str) -> Result<Response<Body>, Error> {
|
||||
let (part_head, part_body) = req.into_parts();
|
||||
let bodyslice = hyper::body::to_bytes(part_body).await?;
|
||||
let gather_from: GatherFrom = serde_json::from_slice(&bodyslice)?;
|
||||
@@ -82,7 +79,7 @@ pub async fn unused_gather_json_from_hosts(
|
||||
let task = tokio::spawn(async move {
|
||||
select! {
|
||||
_ = sleep(Duration::from_millis(1500)).fuse() => {
|
||||
Err(RetrievalError::with_msg("timeout"))
|
||||
Err(Error::with_msg_no_trace(format!("timeout")))
|
||||
}
|
||||
res = Client::new().request(req?).fuse() => Ok(process_answer(res?).await?)
|
||||
}
|
||||
@@ -115,10 +112,7 @@ pub async fn unused_gather_json_from_hosts(
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub async fn gather_get_json(
|
||||
req: Request<Body>,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, RetrievalError> {
|
||||
pub async fn gather_get_json(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
let (head, body) = req.into_parts();
|
||||
let _bodyslice = hyper::body::to_bytes(body).await?;
|
||||
let pathpre = "/api/4/gather/";
|
||||
@@ -136,7 +130,7 @@ pub async fn gather_get_json(
|
||||
let task = tokio::spawn(async move {
|
||||
select! {
|
||||
_ = sleep(Duration::from_millis(1500)).fuse() => {
|
||||
Err(RetrievalError::with_msg("timeout"))
|
||||
Err(Error::with_msg_no_trace(format!("timeout")))
|
||||
}
|
||||
res = Client::new().request(req?).fuse() => Ok(process_answer(res?).await?)
|
||||
}
|
||||
@@ -194,23 +188,23 @@ pub async fn gather_get_json_generic<SM, NT, FT, OUT>(
|
||||
// TODO use deadline instead.
|
||||
// TODO Wait a bit longer compared to remote to receive partial results.
|
||||
timeout: Duration,
|
||||
) -> Result<OUT, RetrievalError>
|
||||
) -> Result<OUT, Error>
|
||||
where
|
||||
SM: Send + 'static,
|
||||
NT: Fn(String, Response<Body>) -> Pin<Box<dyn Future<Output = Result<SubRes<SM>, RetrievalError>> + Send>>
|
||||
NT: Fn(String, Response<Body>) -> Pin<Box<dyn Future<Output = Result<SubRes<SM>, Error>> + Send>>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ Copy
|
||||
+ 'static,
|
||||
FT: Fn(Vec<(Tag, Result<SubRes<SM>, RetrievalError>)>) -> Result<OUT, RetrievalError>,
|
||||
FT: Fn(Vec<(Tag, Result<SubRes<SM>, Error>)>) -> Result<OUT, Error>,
|
||||
{
|
||||
// TODO remove magic constant
|
||||
let extra_timeout = Duration::from_millis(3000);
|
||||
if urls.len() != bodies.len() {
|
||||
return Err(RetrievalError::TextError(format!("unequal numbers of urls and bodies")));
|
||||
return Err(Error::with_msg_no_trace(format!("unequal numbers of urls and bodies")));
|
||||
}
|
||||
if urls.len() != tags.len() {
|
||||
return Err(RetrievalError::TextError(format!("unequal numbers of urls and tags")));
|
||||
return Err(Error::with_msg_no_trace(format!("unequal numbers of urls and tags")));
|
||||
}
|
||||
let spawned: Vec<_> = urls
|
||||
.into_iter()
|
||||
@@ -240,7 +234,7 @@ where
|
||||
select! {
|
||||
_ = sleep(timeout + extra_timeout).fuse() => {
|
||||
error!("PROXY TIMEOUT");
|
||||
Err(RetrievalError::TextError(format!("timeout")))
|
||||
Err(Error::with_msg_no_trace(format!("timeout")))
|
||||
}
|
||||
res = {
|
||||
let client = Client::new();
|
||||
|
||||
@@ -4,6 +4,7 @@ pub mod bodystream;
|
||||
pub mod channel_status;
|
||||
pub mod channelconfig;
|
||||
pub mod download;
|
||||
pub mod err;
|
||||
pub mod gather;
|
||||
pub mod prometheus;
|
||||
pub mod proxy;
|
||||
@@ -12,10 +13,11 @@ pub mod settings;
|
||||
|
||||
use self::bodystream::ToPublicResponse;
|
||||
use crate::bodystream::response;
|
||||
use crate::err::Error;
|
||||
use crate::gather::gather_get_json;
|
||||
use crate::pulsemap::UpdateTask;
|
||||
use err::thiserror;
|
||||
use err::ThisError;
|
||||
use ::err::thiserror;
|
||||
use ::err::ThisError;
|
||||
use futures_util::Future;
|
||||
use futures_util::FutureExt;
|
||||
use futures_util::StreamExt;
|
||||
@@ -32,6 +34,7 @@ use net::SocketAddr;
|
||||
use netpod::is_false;
|
||||
use netpod::log::*;
|
||||
use netpod::query::prebinned::PreBinnedQuery;
|
||||
use netpod::CmpZero;
|
||||
use netpod::NodeConfigCached;
|
||||
use netpod::ProxyConfig;
|
||||
use netpod::APP_JSON;
|
||||
@@ -61,7 +64,8 @@ pub const PSI_DAQBUFFER_SEEN_URL: &'static str = "PSI-Daqbuffer-Seen-Url";
|
||||
|
||||
#[derive(Debug, ThisError, Serialize, Deserialize)]
|
||||
pub enum RetrievalError {
|
||||
Error(#[from] err::Error),
|
||||
Error(#[from] ::err::Error),
|
||||
Error2(#[from] crate::err::Error),
|
||||
TextError(String),
|
||||
#[serde(skip)]
|
||||
Hyper(#[from] hyper::Error),
|
||||
@@ -79,6 +83,7 @@ trait IntoBoxedError: std::error::Error {}
|
||||
impl IntoBoxedError for net::AddrParseError {}
|
||||
impl IntoBoxedError for tokio::task::JoinError {}
|
||||
impl IntoBoxedError for api4::databuffer_tools::FindActiveError {}
|
||||
impl IntoBoxedError for std::string::FromUtf8Error {}
|
||||
|
||||
impl<E> From<E> for RetrievalError
|
||||
where
|
||||
@@ -89,6 +94,12 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl ::err::ToErr for RetrievalError {
|
||||
fn to_err(self) -> ::err::Error {
|
||||
::err::Error::with_msg_no_trace(self.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn host(node_config: NodeConfigCached) -> Result<(), RetrievalError> {
|
||||
static STATUS_BOARD_INIT: Once = Once::new();
|
||||
STATUS_BOARD_INIT.call_once(|| {
|
||||
@@ -114,11 +125,11 @@ pub async fn host(node_config: NodeConfigCached) -> Result<(), RetrievalError> {
|
||||
let node_config = node_config.clone();
|
||||
let addr = conn.remote_addr();
|
||||
async move {
|
||||
Ok::<_, RetrievalError>(service_fn({
|
||||
Ok::<_, Error>(service_fn({
|
||||
move |req| {
|
||||
// TODO send to logstash
|
||||
info!(
|
||||
"REQUEST {:?} - {:?} - {:?} - {:?}",
|
||||
"http-request {:?} - {:?} - {:?} - {:?}",
|
||||
addr,
|
||||
req.method(),
|
||||
req.uri(),
|
||||
@@ -131,12 +142,15 @@ pub async fn host(node_config: NodeConfigCached) -> Result<(), RetrievalError> {
|
||||
}
|
||||
}
|
||||
});
|
||||
Server::bind(&addr).serve(make_service).await?;
|
||||
Server::bind(&addr)
|
||||
.serve(make_service)
|
||||
.await
|
||||
.map(|e| RetrievalError::TextError(format!("{e:?}")))?;
|
||||
rawjh.await??;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn http_service(req: Request<Body>, node_config: NodeConfigCached) -> Result<Response<Body>, RetrievalError> {
|
||||
async fn http_service(req: Request<Body>, node_config: NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
match http_service_try(req, &node_config).await {
|
||||
Ok(k) => Ok(k),
|
||||
Err(e) => {
|
||||
@@ -146,13 +160,14 @@ async fn http_service(req: Request<Body>, node_config: NodeConfigCached) -> Resu
|
||||
}
|
||||
}
|
||||
|
||||
// TODO move this and related stuff to separate module
|
||||
struct Cont<F> {
|
||||
f: Pin<Box<F>>,
|
||||
}
|
||||
|
||||
impl<F, I> Future for Cont<F>
|
||||
where
|
||||
F: Future<Output = Result<I, RetrievalError>>,
|
||||
F: Future<Output = Result<I, Error>>,
|
||||
{
|
||||
type Output = <F as Future>::Output;
|
||||
|
||||
@@ -162,13 +177,13 @@ where
|
||||
Ok(k) => k,
|
||||
Err(e) => {
|
||||
error!("Cont<F> catch_unwind {e:?}");
|
||||
match e.downcast_ref::<RetrievalError>() {
|
||||
match e.downcast_ref::<Error>() {
|
||||
Some(e) => {
|
||||
error!("Cont<F> catch_unwind is Error: {e:?}");
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
Poll::Ready(Err(RetrievalError::TextError(format!("{e:?}"))))
|
||||
Poll::Ready(Err(Error::with_msg_no_trace(format!("{e:?}"))))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -271,10 +286,7 @@ macro_rules! static_http_api1 {
|
||||
};
|
||||
}
|
||||
|
||||
async fn http_service_try(
|
||||
req: Request<Body>,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, RetrievalError> {
|
||||
async fn http_service_try(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
use http::HeaderValue;
|
||||
let mut urlmarks = Vec::new();
|
||||
urlmarks.push(format!("{}:{}", req.method(), req.uri()));
|
||||
@@ -343,37 +355,37 @@ async fn http_service_inner(
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if let Some(h) = api4::status::StatusNodesRecursive::handler(&req) {
|
||||
h.handle(req, ctx, &node_config).await
|
||||
Ok(h.handle(req, ctx, &node_config).await?)
|
||||
} else if let Some(h) = StatusBoardAllHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
} else if let Some(h) = api4::databuffer_tools::FindActiveHandler::handler(&req) {
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = api4::search::ChannelSearchHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = api4::binned::BinnedHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = channelconfig::ChannelConfigQuorumHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = channelconfig::ChannelConfigsHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = channelconfig::ChannelConfigHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = channelconfig::ScyllaChannelsWithType::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = channelconfig::IocForChannel::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = channelconfig::ScyllaChannelsActive::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = channelconfig::ScyllaSeriesTsMsp::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = channelconfig::AmbigiousChannelNames::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = api4::events::EventsHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = channel_status::ConnectionStatusEvents::handler(&req) {
|
||||
h.handle(req, ctx, &node_config).await
|
||||
Ok(h.handle(req, ctx, &node_config).await?)
|
||||
} else if let Some(h) = channel_status::ChannelStatusEvents::handler(&req) {
|
||||
h.handle(req, ctx, &node_config).await
|
||||
Ok(h.handle(req, ctx, &node_config).await?)
|
||||
} else if path == "/api/4/prebinned" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(prebinned(req, ctx, &node_config).await?)
|
||||
@@ -417,29 +429,29 @@ async fn http_service_inner(
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if let Some(h) = download::DownloadHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = settings::SettingsThreadsMaxHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = api1::Api1EventsBinaryHandler::handler(&req) {
|
||||
h.handle(req, ctx, &node_config).await
|
||||
Ok(h.handle(req, ctx, &node_config).await?)
|
||||
} else if let Some(h) = pulsemap::MapPulseScyllaHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = pulsemap::IndexFullHttpFunction::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = pulsemap::MarkClosedHttpFunction::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = pulsemap::MapPulseLocalHttpFunction::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = pulsemap::MapPulseHistoHttpFunction::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = pulsemap::MapPulseHttpFunction::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = pulsemap::Api4MapPulse2HttpFunction::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = pulsemap::Api4MapPulseHttpFunction::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if let Some(h) = api1::RequestStatusHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
Ok(h.handle(req, &node_config).await?)
|
||||
} else if path.starts_with("/api/1/documentation/") {
|
||||
if req.method() == Method::GET {
|
||||
api_1_docs(path)
|
||||
@@ -670,26 +682,33 @@ async fn update_search_cache(
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct StatusBoardEntry {
|
||||
#[allow(unused)]
|
||||
#[serde(serialize_with = "instant_serde::ser")]
|
||||
ts_created: SystemTime,
|
||||
#[serde(serialize_with = "instant_serde::ser")]
|
||||
ts_updated: SystemTime,
|
||||
#[serde(skip_serializing_if = "is_false")]
|
||||
is_error: bool,
|
||||
#[serde(skip_serializing_if = "is_false")]
|
||||
is_ok: bool,
|
||||
// #[serde(skip_serializing_if = "is_false")]
|
||||
done: bool,
|
||||
// #[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
#[serde(skip)]
|
||||
errors: Vec<Box<dyn std::error::Error + Send>>,
|
||||
errors: Vec<::err::Error>,
|
||||
// TODO make this a better Stats container and remove pub access.
|
||||
// #[serde(default, skip_serializing_if = "CmpZero::is_zero")]
|
||||
error_count: usize,
|
||||
// #[serde(default, skip_serializing_if = "CmpZero::is_zero")]
|
||||
warn_count: usize,
|
||||
// #[serde(default, skip_serializing_if = "CmpZero::is_zero")]
|
||||
channel_not_found: usize,
|
||||
// #[serde(default, skip_serializing_if = "CmpZero::is_zero")]
|
||||
subreq_fail: usize,
|
||||
}
|
||||
|
||||
mod instant_serde {
|
||||
use super::*;
|
||||
use netpod::DATETIME_FMT_3MS;
|
||||
use serde::Serializer;
|
||||
|
||||
pub fn ser<S: Serializer>(x: &SystemTime, ser: S) -> Result<S::Ok, S::Error> {
|
||||
use chrono::LocalResult;
|
||||
let dur = x.duration_since(std::time::UNIX_EPOCH).unwrap();
|
||||
@@ -713,14 +732,48 @@ impl StatusBoardEntry {
|
||||
Self {
|
||||
ts_created: SystemTime::now(),
|
||||
ts_updated: SystemTime::now(),
|
||||
is_error: false,
|
||||
is_ok: false,
|
||||
done: false,
|
||||
errors: Vec::new(),
|
||||
error_count: 0,
|
||||
warn_count: 0,
|
||||
channel_not_found: 0,
|
||||
subreq_fail: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn warn_inc(&mut self) {
|
||||
self.warn_count += 1;
|
||||
}
|
||||
|
||||
pub fn channel_not_found_inc(&mut self) {
|
||||
self.channel_not_found += 1;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct StatusBoardEntryUser {
|
||||
// #[serde(default, skip_serializing_if = "CmpZero::is_zero")]
|
||||
error_count: usize,
|
||||
// #[serde(default, skip_serializing_if = "CmpZero::is_zero")]
|
||||
warn_count: usize,
|
||||
// #[serde(default, skip_serializing_if = "CmpZero::is_zero")]
|
||||
channel_not_found: usize,
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
errors: Vec<::err::PublicError>,
|
||||
}
|
||||
|
||||
impl From<&StatusBoardEntry> for StatusBoardEntryUser {
|
||||
fn from(e: &StatusBoardEntry) -> Self {
|
||||
Self {
|
||||
error_count: e.error_count,
|
||||
warn_count: e.warn_count,
|
||||
channel_not_found: e.channel_not_found,
|
||||
errors: e.errors.iter().map(|e| e.to_public_error()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct StatusBoard {
|
||||
entries: BTreeMap<String, StatusBoardEntry>,
|
||||
}
|
||||
@@ -741,7 +794,7 @@ impl StatusBoard {
|
||||
f.read_exact(&mut buf).unwrap();
|
||||
let n = u32::from_le_bytes(buf);
|
||||
let s = format!("{:08x}", n);
|
||||
info!("new_status_id {s}");
|
||||
debug!("new_status_id {s}");
|
||||
self.entries.insert(s.clone(), StatusBoardEntry::new());
|
||||
s
|
||||
}
|
||||
@@ -757,6 +810,10 @@ impl StatusBoard {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_entry(&mut self, status_id: &str) -> Option<&mut StatusBoardEntry> {
|
||||
self.entries.get_mut(status_id)
|
||||
}
|
||||
|
||||
pub fn mark_alive(&mut self, status_id: &str) {
|
||||
match self.entries.get_mut(status_id) {
|
||||
Some(e) => {
|
||||
@@ -768,12 +825,25 @@ impl StatusBoard {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mark_ok(&mut self, status_id: &str) {
|
||||
pub fn mark_done(&mut self, status_id: &str) {
|
||||
match self.entries.get_mut(status_id) {
|
||||
Some(e) => {
|
||||
e.ts_updated = SystemTime::now();
|
||||
if !e.is_error {
|
||||
e.is_ok = true;
|
||||
e.done = true;
|
||||
}
|
||||
None => {
|
||||
error!("can not find status id {}", status_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_error(&mut self, status_id: &str, err: ::err::Error) {
|
||||
match self.entries.get_mut(status_id) {
|
||||
Some(e) => {
|
||||
e.ts_updated = SystemTime::now();
|
||||
if e.errors.len() < 100 {
|
||||
e.errors.push(err);
|
||||
e.error_count += 1;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
@@ -782,51 +852,18 @@ impl StatusBoard {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_error<E>(&mut self, status_id: &str, error: E)
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send>>,
|
||||
{
|
||||
match self.entries.get_mut(status_id) {
|
||||
Some(e) => {
|
||||
e.ts_updated = SystemTime::now();
|
||||
e.is_error = true;
|
||||
e.is_ok = false;
|
||||
e.errors.push(error.into());
|
||||
}
|
||||
None => {
|
||||
error!("can not find status id {}", status_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn status_as_json(&self, status_id: &str) -> String {
|
||||
#[derive(Serialize)]
|
||||
struct StatJs {
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
errors: Vec<::err::PublicError>,
|
||||
}
|
||||
pub fn status_as_json(&self, status_id: &str) -> StatusBoardEntryUser {
|
||||
match self.entries.get(status_id) {
|
||||
Some(e) => {
|
||||
if e.is_ok {
|
||||
let js = StatJs { errors: Vec::new() };
|
||||
return serde_json::to_string(&js).unwrap();
|
||||
} else if e.is_error {
|
||||
// TODO
|
||||
// let errors = e.errors.iter().map(|e| (&e.0).into()).collect();
|
||||
let errors = vec![err::Error::with_msg_no_trace("TODO convert to user error").into()];
|
||||
let js = StatJs { errors };
|
||||
return serde_json::to_string(&js).unwrap();
|
||||
} else {
|
||||
warn!("requestStatus for unfinished {status_id}");
|
||||
let js = StatJs { errors: Vec::new() };
|
||||
return serde_json::to_string(&js).unwrap();
|
||||
}
|
||||
}
|
||||
Some(e) => e.into(),
|
||||
None => {
|
||||
error!("can not find status id {}", status_id);
|
||||
let e = ::err::Error::with_public_msg_no_trace(format!("Request status ID unknown {status_id}"));
|
||||
let js = StatJs { errors: vec![e.into()] };
|
||||
return serde_json::to_string(&js).unwrap();
|
||||
StatusBoardEntryUser {
|
||||
error_count: 1,
|
||||
warn_count: 0,
|
||||
channel_not_found: 0,
|
||||
errors: vec![::err::Error::with_public_msg_no_trace("request-id not found").into()],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -423,7 +423,7 @@ pub async fn host(bind: SocketAddr) -> Result<(), RetrievalError> {
|
||||
Ok::<_, RetrievalError>(service_fn({
|
||||
move |req| {
|
||||
info!(
|
||||
"REQUEST {:?} - {:?} - {:?} - {:?}",
|
||||
"http-request {:?} - {:?} - {:?} - {:?}",
|
||||
addr,
|
||||
req.method(),
|
||||
req.uri(),
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::api1::channel_search_list_v1;
|
||||
use crate::api1::gather_json_2_v1;
|
||||
use crate::api_1_docs;
|
||||
use crate::api_4_docs;
|
||||
use crate::err::Error;
|
||||
use crate::gather::gather_get_json_generic;
|
||||
use crate::gather::SubRes;
|
||||
use crate::pulsemap::MapPulseQuery;
|
||||
@@ -14,7 +15,6 @@ use crate::response_err;
|
||||
use crate::Cont;
|
||||
use crate::ReqCtx;
|
||||
use crate::PSI_DAQBUFFER_SERVICE_MARK;
|
||||
use err::Error;
|
||||
use futures_util::pin_mut;
|
||||
use futures_util::Stream;
|
||||
use http::Method;
|
||||
@@ -66,9 +66,8 @@ pub async fn proxy(proxy_config: ProxyConfig) -> Result<(), Error> {
|
||||
async move {
|
||||
Ok::<_, Error>(service_fn({
|
||||
move |req| {
|
||||
// TODO send to logstash
|
||||
info!(
|
||||
"REQUEST {:?} - {:?} - {:?} - {:?}",
|
||||
"http-request {:?} - {:?} - {:?} - {:?}",
|
||||
addr,
|
||||
req.method(),
|
||||
req.uri(),
|
||||
@@ -159,13 +158,13 @@ async fn proxy_http_service_inner(
|
||||
Ok(proxy_single_backend_query::<ChannelConfigQuery>(req, ctx, proxy_config).await?)
|
||||
} else if path.starts_with("/api/1/documentation/") {
|
||||
if req.method() == Method::GET {
|
||||
api_1_docs(path)
|
||||
Ok(api_1_docs(path)?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if path.starts_with("/api/4/documentation/") {
|
||||
if req.method() == Method::GET {
|
||||
api_4_docs(path)
|
||||
Ok(api_4_docs(path)?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
pub mod reqstatus;
|
||||
|
||||
use crate::bodystream::response;
|
||||
use crate::err::Error;
|
||||
use crate::ReqCtx;
|
||||
use err::Error;
|
||||
use http::HeaderValue;
|
||||
use http::Method;
|
||||
use http::Request;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::bodystream::response;
|
||||
use err::Error;
|
||||
use crate::err::Error;
|
||||
use http::Method;
|
||||
use http::Request;
|
||||
use http::Response;
|
||||
@@ -45,7 +45,7 @@ impl RequestStatusHandler {
|
||||
}
|
||||
let _body_data = hyper::body::to_bytes(body).await?;
|
||||
let status_id = &head.uri.path()[Self::path_prefix().len()..];
|
||||
info!("RequestStatusHandler status_id {:?}", status_id);
|
||||
debug!("RequestStatusHandler status_id {:?}", status_id);
|
||||
|
||||
let back = {
|
||||
let mut ret = None;
|
||||
@@ -59,7 +59,7 @@ impl RequestStatusHandler {
|
||||
};
|
||||
if let Some(back) = back {
|
||||
let url_str = format!("{}{}{}", back.url, Self::path_prefix(), status_id);
|
||||
info!("try to ask {url_str}");
|
||||
debug!("try to ask {url_str}");
|
||||
let req = Request::builder()
|
||||
.method(Method::GET)
|
||||
.uri(url_str)
|
||||
@@ -71,7 +71,7 @@ impl RequestStatusHandler {
|
||||
error!("backend returned error: {head:?}");
|
||||
Ok(response(StatusCode::INTERNAL_SERVER_ERROR).body(Body::empty())?)
|
||||
} else {
|
||||
info!("backend returned OK");
|
||||
debug!("backend returned OK");
|
||||
Ok(response(StatusCode::OK).body(body)?)
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
pub mod caioclookup;
|
||||
|
||||
use crate::bodystream::ToPublicResponse;
|
||||
use crate::err::Error;
|
||||
use crate::gather::gather_get_json_generic;
|
||||
use crate::gather::SubRes;
|
||||
use crate::gather::Tag;
|
||||
use crate::response;
|
||||
use crate::ReqCtx;
|
||||
use err::Error;
|
||||
use futures_util::Future;
|
||||
use http::Method;
|
||||
use http::Request;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::bodystream::response;
|
||||
use crate::err::Error;
|
||||
use crate::ReqCtx;
|
||||
use err::Error;
|
||||
use http::Request;
|
||||
use http::Response;
|
||||
use http::StatusCode;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use crate::err::Error;
|
||||
use crate::response;
|
||||
use async_channel::Receiver;
|
||||
use async_channel::Sender;
|
||||
@@ -6,7 +7,6 @@ use bytes::BufMut;
|
||||
use bytes::BytesMut;
|
||||
use chrono::TimeZone;
|
||||
use chrono::Utc;
|
||||
use err::Error;
|
||||
use futures_util::stream::FuturesOrdered;
|
||||
use futures_util::stream::FuturesUnordered;
|
||||
use futures_util::FutureExt;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::err::Error;
|
||||
use crate::response;
|
||||
use err::Error;
|
||||
use http::Method;
|
||||
use http::StatusCode;
|
||||
use hyper::Body;
|
||||
|
||||
@@ -42,6 +42,7 @@ pub enum StatsItem {
|
||||
EventDataReadStats(EventDataReadStats),
|
||||
RangeFilterStats(RangeFilterStats),
|
||||
DiskStats(DiskStats),
|
||||
Warnings(),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
|
||||
@@ -80,7 +80,7 @@ mod decomps_serde {
|
||||
}
|
||||
|
||||
impl EventFull {
|
||||
pub fn add_event(
|
||||
pub fn push(
|
||||
&mut self,
|
||||
ts: u64,
|
||||
pulse: u64,
|
||||
@@ -118,6 +118,13 @@ impl EventFull {
|
||||
self.shapes.truncate(nkeep);
|
||||
self.comps.truncate(nkeep);
|
||||
}
|
||||
|
||||
// NOTE needed because the databuffer actually doesn't write the correct shape per event.
|
||||
pub fn overwrite_all_shapes(&mut self, shape: &Shape) {
|
||||
for u in &mut self.shapes {
|
||||
*u = shape.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FrameTypeInnerStatic for EventFull {
|
||||
|
||||
@@ -69,6 +69,11 @@ impl CmpZero for u32 {
|
||||
*self == 0
|
||||
}
|
||||
}
|
||||
impl CmpZero for usize {
|
||||
fn is_zero(&self) -> bool {
|
||||
*self == 0
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BodyStream {
|
||||
//pub receiver: async_channel::Receiver<Result<Bytes, Error>>,
|
||||
@@ -2265,6 +2270,17 @@ impl ReadExactStats {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Api1WarningStats {
|
||||
pub subreq_fail: usize,
|
||||
}
|
||||
|
||||
impl Api1WarningStats {
|
||||
pub fn new() -> Self {
|
||||
Self { subreq_fail: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ByteSize(pub u32);
|
||||
|
||||
|
||||
@@ -237,7 +237,7 @@ async fn events_conn_handler_with_reqid(
|
||||
{
|
||||
let item = LogItem {
|
||||
node_ix: ncc.ix as _,
|
||||
level: Level::INFO,
|
||||
level: Level::DEBUG,
|
||||
msg: format!("buf_len_histo: {:?}", buf_len_histo),
|
||||
};
|
||||
let item: Sitemty<ChannelEvents> = Ok(StreamItem::Log(item));
|
||||
|
||||
@@ -128,6 +128,7 @@ impl Collect {
|
||||
//total_duration += k.duration;
|
||||
}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -281,6 +282,7 @@ where
|
||||
total_duration += k.duration;
|
||||
}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -40,7 +40,7 @@ impl NeedMinBuffer {
|
||||
// TODO collect somewhere else
|
||||
impl Drop for NeedMinBuffer {
|
||||
fn drop(&mut self) {
|
||||
debug!("NeedMinBuffer Drop Stats:\nbuf_len_histo: {:?}", self.buf_len_histo);
|
||||
debug!("NeedMinBuffer-drop {{ buf_len_histo: {:?} }}", self.buf_len_histo);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user