Clean up, collect with timeout
This commit is contained in:
@@ -37,7 +37,7 @@ async fn binned_json(url: Url, req: Request<Body>, node_config: &NodeConfigCache
|
||||
span1.in_scope(|| {
|
||||
debug!("begin");
|
||||
});
|
||||
let item = streams::timebinnedjson::timebinned_json(&query, &node_config.node_config.cluster)
|
||||
let item = streams::timebinnedjson::timebinned_json(&query, &chconf, &node_config.node_config.cluster)
|
||||
.instrument(span1)
|
||||
.await?;
|
||||
let buf = serde_json::to_vec(&item)?;
|
||||
|
||||
@@ -42,7 +42,7 @@ impl ChannelSearchHandler {
|
||||
Ok(response(StatusCode::OK).body(Body::from(buf))?)
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("ChannelConfigHandler::handle: got error from channel_config: {e:?}");
|
||||
warn!("handle: got error from channel_search: {e:?}");
|
||||
Ok(e.to_public_response())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ use netpod::NodeConfigCached;
|
||||
use netpod::NodeStatus;
|
||||
use netpod::NodeStatusArchiverAppliance;
|
||||
use netpod::TableSizes;
|
||||
use std::collections::VecDeque;
|
||||
use std::time::Duration;
|
||||
|
||||
async fn table_sizes(node_config: &NodeConfigCached) -> Result<TableSizes, Error> {
|
||||
@@ -94,13 +95,14 @@ impl StatusNodesRecursive {
|
||||
let database_size = dbconn::database_size(node_config).await.map_err(|e| format!("{e:?}"));
|
||||
let ret = NodeStatus {
|
||||
name: format!("{}:{}", node_config.node.host, node_config.node.port),
|
||||
version: core::env!("CARGO_PKG_VERSION").into(),
|
||||
is_sf_databuffer: node_config.node.sf_databuffer.is_some(),
|
||||
is_archiver_engine: node_config.node.channel_archiver.is_some(),
|
||||
is_archiver_appliance: node_config.node.archiver_appliance.is_some(),
|
||||
database_size: Some(database_size),
|
||||
table_sizes: Some(table_sizes(node_config).await.map_err(Into::into)),
|
||||
archiver_appliance_status,
|
||||
subs: None,
|
||||
subs: VecDeque::new(),
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use crate::err::Error;
|
||||
use crate::{response, ToPublicResponse};
|
||||
use dbconn::channelconfig::chconf_from_database;
|
||||
use dbconn::channelconfig::ChConf;
|
||||
use dbconn::create_connection;
|
||||
use futures_util::StreamExt;
|
||||
use http::{Method, Request, Response, StatusCode};
|
||||
@@ -11,6 +10,7 @@ use netpod::log::*;
|
||||
use netpod::query::prebinned::PreBinnedQuery;
|
||||
use netpod::query::{BinnedQuery, PlainEventsQuery};
|
||||
use netpod::timeunits::*;
|
||||
use netpod::ChConf;
|
||||
use netpod::{Channel, ChannelConfigQuery, FromUrl, ScalarType, Shape};
|
||||
use netpod::{ChannelConfigResponse, NodeConfigCached};
|
||||
use netpod::{ACCEPT_ALL, APP_JSON};
|
||||
@@ -33,6 +33,7 @@ pub async fn chconf_from_events_json(q: &PlainEventsQuery, ncc: &NodeConfigCache
|
||||
|
||||
pub async fn chconf_from_prebinned(q: &PreBinnedQuery, _ncc: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
let ret = ChConf {
|
||||
backend: q.channel().backend().into(),
|
||||
series: q
|
||||
.channel()
|
||||
.series()
|
||||
|
||||
@@ -34,7 +34,6 @@ impl EventsHandler {
|
||||
}
|
||||
|
||||
async fn plain_events(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
info!("httpret plain_events req: {:?}", req);
|
||||
let accept_def = APP_JSON;
|
||||
let accept = req
|
||||
.headers()
|
||||
@@ -83,20 +82,19 @@ async fn plain_events_json(
|
||||
req: Request<Body>,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
debug!("httpret plain_events_json req: {:?}", req);
|
||||
info!("httpret plain_events_json req: {:?}", req);
|
||||
let (_head, _body) = req.into_parts();
|
||||
let query = PlainEventsQuery::from_url(&url)?;
|
||||
let chconf = chconf_from_events_json(&query, node_config).await.map_err(|e| {
|
||||
error!("chconf_from_events_json {e:?}");
|
||||
e.add_public_msg(format!("Can not get channel information"))
|
||||
})?;
|
||||
let chconf = chconf_from_events_json(&query, node_config)
|
||||
.await
|
||||
.map_err(Error::from)?;
|
||||
// Update the series id since we don't require some unique identifier yet.
|
||||
let mut query = query;
|
||||
query.set_series_id(chconf.series);
|
||||
let query = query;
|
||||
// ---
|
||||
//let query = RawEventsQuery::new(query.channel().clone(), query.range().clone(), AggKind::Plain);
|
||||
let item = streams::plaineventsjson::plain_events_json(&query, &node_config.node_config.cluster).await;
|
||||
let item = streams::plaineventsjson::plain_events_json(&query, &chconf, &node_config.node_config.cluster).await;
|
||||
let item = match item {
|
||||
Ok(item) => item,
|
||||
Err(e) => {
|
||||
|
||||
@@ -178,7 +178,8 @@ pub async fn gather_get_json_generic<SM, NT, FT, OUT>(
|
||||
tags: Vec<String>,
|
||||
nt: NT,
|
||||
ft: FT,
|
||||
// TODO use deadline instead
|
||||
// TODO use deadline instead.
|
||||
// TODO Wait a bit longer compared to remote to receive partial results.
|
||||
timeout: Duration,
|
||||
) -> Result<OUT, Error>
|
||||
where
|
||||
@@ -190,6 +191,8 @@ where
|
||||
+ 'static,
|
||||
FT: Fn(Vec<(Tag, Result<SubRes<SM>, Error>)>) -> Result<OUT, Error>,
|
||||
{
|
||||
// TODO remove magic constant
|
||||
let extra_timeout = Duration::from_millis(3000);
|
||||
if urls.len() != bodies.len() {
|
||||
return Err(Error::with_msg_no_trace("unequal numbers of urls and bodies"));
|
||||
}
|
||||
@@ -222,14 +225,17 @@ where
|
||||
let tag2 = tag.clone();
|
||||
let jh = tokio::spawn(async move {
|
||||
select! {
|
||||
_ = sleep(timeout).fuse() => {
|
||||
_ = sleep(timeout + extra_timeout).fuse() => {
|
||||
error!("PROXY TIMEOUT");
|
||||
Err(Error::with_msg_no_trace("timeout"))
|
||||
}
|
||||
res = {
|
||||
let client = Client::new();
|
||||
client.request(req?).fuse()
|
||||
} => {
|
||||
info!("received result in time");
|
||||
let ret = nt(tag2, res?).await?;
|
||||
info!("transformed result in time");
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,8 +112,8 @@ async fn proxy_http_service_inner(
|
||||
h.handle(req, ctx, &proxy_config).await
|
||||
} else if path == "/api/4/backends" {
|
||||
Ok(backends(req, proxy_config).await?)
|
||||
} else if path == "/api/4/search/channel" {
|
||||
Ok(api4::channel_search(req, proxy_config).await?)
|
||||
} else if let Some(h) = api4::ChannelSearchAggHandler::handler(&req) {
|
||||
h.handle(req, &proxy_config).await
|
||||
} else if path == "/api/4/events" {
|
||||
Ok(proxy_single_backend_query::<PlainEventsQuery>(req, ctx, proxy_config).await?)
|
||||
} else if path == "/api/4/status/connection/events" {
|
||||
|
||||
@@ -1,108 +1,147 @@
|
||||
use crate::bodystream::ToPublicResponse;
|
||||
use crate::err::Error;
|
||||
use crate::gather::{gather_get_json_generic, SubRes, Tag};
|
||||
use crate::{response, ReqCtx};
|
||||
use crate::gather::gather_get_json_generic;
|
||||
use crate::gather::SubRes;
|
||||
use crate::gather::Tag;
|
||||
use crate::response;
|
||||
use crate::ReqCtx;
|
||||
use futures_util::Future;
|
||||
use http::{header, Request, Response, StatusCode};
|
||||
use http::Method;
|
||||
use http::Request;
|
||||
use http::Response;
|
||||
use http::StatusCode;
|
||||
use hyper::Body;
|
||||
use itertools::Itertools;
|
||||
use netpod::log::*;
|
||||
use netpod::ChannelSearchQuery;
|
||||
use netpod::ChannelSearchResult;
|
||||
use netpod::NodeStatus;
|
||||
use netpod::NodeStatusSub;
|
||||
use netpod::ProxyConfig;
|
||||
use netpod::ACCEPT_ALL;
|
||||
use netpod::{ChannelSearchQuery, ChannelSearchResult, ProxyConfig, APP_JSON};
|
||||
use netpod::APP_JSON;
|
||||
use serde_json::Value as JsVal;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::VecDeque;
|
||||
use std::pin::Pin;
|
||||
use std::time::Duration;
|
||||
use url::Url;
|
||||
|
||||
pub async fn channel_search(req: Request<Body>, proxy_config: &ProxyConfig) -> Result<Response<Body>, Error> {
|
||||
// TODO model channel search according to StatusNodesRecursive.
|
||||
// Make sure that backend handling is correct:
|
||||
// The aggregator asks all backends, except if the user specifies some backend
|
||||
// in which case it should only go to the matching backends.
|
||||
// The aggregators and leaf nodes behind should as well not depend on backend,
|
||||
// but simply answer all matching.
|
||||
|
||||
pub async fn channel_search(req: Request<Body>, proxy_config: &ProxyConfig) -> Result<ChannelSearchResult, Error> {
|
||||
let (head, _body) = req.into_parts();
|
||||
let vdef = header::HeaderValue::from_static(APP_JSON);
|
||||
let v = head.headers.get(header::ACCEPT).unwrap_or(&vdef);
|
||||
if v == APP_JSON || v == ACCEPT_ALL {
|
||||
let inpurl = Url::parse(&format!("dummy:{}", head.uri))?;
|
||||
let query = ChannelSearchQuery::from_url(&inpurl)?;
|
||||
let mut bodies = vec![];
|
||||
let urls = proxy_config
|
||||
.backends
|
||||
.iter()
|
||||
.filter(|k| {
|
||||
if let Some(back) = &query.backend {
|
||||
back == &k.name
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.map(|pb| match Url::parse(&format!("{}/api/4/search/channel", pb.url)) {
|
||||
let inpurl = Url::parse(&format!("dummy:{}", head.uri))?;
|
||||
let query = ChannelSearchQuery::from_url(&inpurl)?;
|
||||
let mut urls = Vec::new();
|
||||
let mut tags = Vec::new();
|
||||
let mut bodies = Vec::new();
|
||||
for pb in &proxy_config.backends {
|
||||
if if let Some(b) = &query.backend {
|
||||
pb.name.contains(b)
|
||||
} else {
|
||||
true
|
||||
} {
|
||||
match Url::parse(&format!("{}/api/4/search/channel", pb.url)) {
|
||||
Ok(mut url) => {
|
||||
query.append_to_url(&mut url);
|
||||
Ok(url)
|
||||
tags.push(url.to_string());
|
||||
bodies.push(None);
|
||||
urls.push(url);
|
||||
}
|
||||
Err(_) => return Err(Error::with_msg(format!("parse error for: {:?}", pb))),
|
||||
}
|
||||
}
|
||||
}
|
||||
let nt = |tag, res| {
|
||||
let fut = async {
|
||||
let body = hyper::body::to_bytes(res).await?;
|
||||
info!("got a result {:?}", body);
|
||||
let res: ChannelSearchResult = match serde_json::from_slice(&body) {
|
||||
Ok(k) => k,
|
||||
Err(_) => {
|
||||
let msg = format!("can not parse result: {}", String::from_utf8_lossy(&body));
|
||||
error!("{}", msg);
|
||||
return Err(Error::with_msg_no_trace(msg));
|
||||
}
|
||||
Err(_) => Err(Error::with_msg(format!("parse error for: {:?}", pb))),
|
||||
})
|
||||
.fold_ok(vec![], |mut a, x| {
|
||||
a.push(x);
|
||||
bodies.push(None);
|
||||
a
|
||||
})?;
|
||||
let tags = urls.iter().map(|k| k.to_string()).collect();
|
||||
let nt = |tag, res| {
|
||||
let fut = async {
|
||||
let body = hyper::body::to_bytes(res).await?;
|
||||
//info!("got a result {:?}", body);
|
||||
let res: ChannelSearchResult = match serde_json::from_slice(&body) {
|
||||
Ok(k) => k,
|
||||
Err(_) => {
|
||||
let msg = format!("can not parse result: {}", String::from_utf8_lossy(&body));
|
||||
error!("{}", msg);
|
||||
return Err(Error::with_msg_no_trace(msg));
|
||||
}
|
||||
};
|
||||
let ret = SubRes {
|
||||
tag,
|
||||
status: StatusCode::OK,
|
||||
val: res,
|
||||
};
|
||||
Ok(ret)
|
||||
};
|
||||
Box::pin(fut) as Pin<Box<dyn Future<Output = _> + Send>>
|
||||
let ret = SubRes {
|
||||
tag,
|
||||
status: StatusCode::OK,
|
||||
val: res,
|
||||
};
|
||||
Ok(ret)
|
||||
};
|
||||
let ft = |all: Vec<(Tag, Result<SubRes<ChannelSearchResult>, Error>)>| {
|
||||
let mut res = Vec::new();
|
||||
for (_tag, j) in all {
|
||||
match j {
|
||||
Ok(j) => {
|
||||
for k in j.val.channels {
|
||||
res.push(k);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("{e}");
|
||||
Box::pin(fut) as Pin<Box<dyn Future<Output = _> + Send>>
|
||||
};
|
||||
let ft = |all: Vec<(Tag, Result<SubRes<ChannelSearchResult>, Error>)>| {
|
||||
let mut res = Vec::new();
|
||||
for (_tag, j) in all {
|
||||
match j {
|
||||
Ok(j) => {
|
||||
for k in j.val.channels {
|
||||
res.push(k);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("{e}");
|
||||
}
|
||||
}
|
||||
let res = ChannelSearchResult { channels: res };
|
||||
let res = response(StatusCode::OK)
|
||||
.header(http::header::CONTENT_TYPE, APP_JSON)
|
||||
.body(Body::from(serde_json::to_string(&res)?))
|
||||
.map_err(Error::from)?;
|
||||
Ok(res)
|
||||
};
|
||||
let ret = gather_get_json_generic(
|
||||
http::Method::GET,
|
||||
urls,
|
||||
bodies,
|
||||
tags,
|
||||
nt,
|
||||
ft,
|
||||
Duration::from_millis(3000),
|
||||
)
|
||||
.await?;
|
||||
Ok(ret)
|
||||
} else {
|
||||
Ok(response(StatusCode::NOT_ACCEPTABLE)
|
||||
.body(Body::from(format!("{:?}", proxy_config.name)))
|
||||
.map_err(Error::from)?)
|
||||
}
|
||||
let res = ChannelSearchResult { channels: res };
|
||||
Ok(res)
|
||||
};
|
||||
let ret = gather_get_json_generic(
|
||||
http::Method::GET,
|
||||
urls,
|
||||
bodies,
|
||||
tags,
|
||||
nt,
|
||||
ft,
|
||||
Duration::from_millis(3000),
|
||||
)
|
||||
.await?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub struct ChannelSearchAggHandler {}
|
||||
|
||||
impl ChannelSearchAggHandler {
|
||||
pub fn handler(req: &Request<Body>) -> Option<Self> {
|
||||
if req.uri().path() == "/api/4/search/channel" {
|
||||
Some(Self {})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle(&self, req: Request<Body>, node_config: &ProxyConfig) -> Result<Response<Body>, Error> {
|
||||
if req.method() == Method::GET {
|
||||
let accept_def = APP_JSON;
|
||||
let accept = req
|
||||
.headers()
|
||||
.get(http::header::ACCEPT)
|
||||
.map_or(accept_def, |k| k.to_str().unwrap_or(accept_def));
|
||||
if accept.contains(APP_JSON) || accept.contains(ACCEPT_ALL) {
|
||||
match channel_search(req, node_config).await {
|
||||
Ok(item) => {
|
||||
let buf = serde_json::to_vec(&item)?;
|
||||
Ok(response(StatusCode::OK).body(Body::from(buf))?)
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("ChannelConfigHandler::handle: got error from channel_config: {e:?}");
|
||||
Ok(e.to_public_response())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Ok(response(StatusCode::BAD_REQUEST).body(Body::empty())?)
|
||||
}
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,27 +222,33 @@ impl StatusNodesRecursive {
|
||||
Box::pin(fut) as Pin<Box<dyn Future<Output = _> + Send>>
|
||||
};
|
||||
let ft = |all: Vec<(Tag, Result<SubRes<JsVal>, Error>)>| {
|
||||
let mut subs = BTreeMap::new();
|
||||
let mut subs = VecDeque::new();
|
||||
for (tag, sr) in all {
|
||||
match sr {
|
||||
Ok(sr) => {
|
||||
let s: Result<NodeStatus, _> = serde_json::from_value(sr.val).map_err(err::Error::from);
|
||||
subs.insert(tag.0, s);
|
||||
let sub = NodeStatusSub { url: tag.0, status: s };
|
||||
subs.push_back(sub);
|
||||
}
|
||||
Err(e) => {
|
||||
subs.insert(tag.0, Err(err::Error::from(e)));
|
||||
let sub = NodeStatusSub {
|
||||
url: tag.0,
|
||||
status: Err(err::Error::from(e)),
|
||||
};
|
||||
subs.push_back(sub);
|
||||
}
|
||||
}
|
||||
}
|
||||
let ret = NodeStatus {
|
||||
name: format!("{}:{}", proxy_config.name, proxy_config.port),
|
||||
version: core::env!("CARGO_PKG_VERSION").into(),
|
||||
is_sf_databuffer: false,
|
||||
is_archiver_engine: false,
|
||||
is_archiver_appliance: false,
|
||||
database_size: None,
|
||||
table_sizes: None,
|
||||
archiver_appliance_status: None,
|
||||
subs: Some(subs),
|
||||
subs,
|
||||
};
|
||||
Ok(ret)
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user