WIP (write message)

This commit is contained in:
Dominik Werder
2023-12-07 16:33:52 +01:00
parent f946d1b6d9
commit 90fe23b676
28 changed files with 365 additions and 205 deletions

View File

@@ -62,6 +62,7 @@ use std::any;
use std::collections::VecDeque;
use std::future::Future;
use std::pin::Pin;
use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
use std::time::Duration;
@@ -527,7 +528,7 @@ pub struct DataApiPython3DataStream {
event_count: usize,
events_max: u64,
header_out: bool,
reqctx: ReqCtxArc,
ctx: ReqCtxArc,
ping_last: Instant,
data_done: bool,
completed: bool,
@@ -561,7 +562,7 @@ impl DataApiPython3DataStream {
event_count: 0,
events_max,
header_out: false,
reqctx,
ctx: reqctx,
ping_last: Instant::now(),
data_done: false,
completed: false,
@@ -737,7 +738,7 @@ impl DataApiPython3DataStream {
if tsnow.duration_since(self.ping_last) >= Duration::from_millis(500) {
self.ping_last = tsnow;
let mut sb = crate::status_board().unwrap();
sb.mark_alive(self.reqctx.reqid());
sb.mark_alive(self.ctx.reqid());
}
ret
}
@@ -749,7 +750,7 @@ impl DataApiPython3DataStream {
self.range.clone().into(),
TransformQuery::for_event_blobs(),
);
let subq = EventsSubQuery::from_parts(select, self.settings.clone(), self.reqctx.reqid().into());
let subq = EventsSubQuery::from_parts(select, self.settings.clone(), self.ctx.reqid().into());
debug!("query for event blobs retrieval subq {subq:?}");
// TODO important TODO
debug!("TODO fix magic inmem_bufcap");
@@ -757,10 +758,10 @@ impl DataApiPython3DataStream {
// TODO is this a good to place decide this?
let stream = if self.node_config.node_config.cluster.is_central_storage {
debug!("set up central storage stream");
disk::raw::conn::make_event_blobs_pipe(&subq, &fetch_info, self.reqctx.clone(), &self.node_config)?
disk::raw::conn::make_event_blobs_pipe(&subq, &fetch_info, self.ctx.clone(), &self.node_config)?
} else {
debug!("set up merged remote stream {}", fetch_info.name());
let s = MergedBlobsFromRemotes::new(subq, self.node_config.node_config.cluster.clone());
let s = MergedBlobsFromRemotes::new(subq, &self.ctx, self.node_config.node_config.cluster.clone());
Box::pin(s) as Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>
};
self.chan_stream = Some(Box::pin(stream));
@@ -779,7 +780,7 @@ impl Stream for DataApiPython3DataStream {
panic!("poll on completed")
} else if self.data_done {
self.completed = true;
let reqid = self.reqctx.reqid();
let reqid = self.ctx.reqid();
info!(
"{} response body sent {} bytes ({})",
reqid, self.count_bytes, self.count_emits
@@ -801,7 +802,7 @@ impl Stream for DataApiPython3DataStream {
self.current_fetch_info = None;
self.data_done = true;
let mut sb = crate::status_board().unwrap();
sb.add_error(self.reqctx.reqid(), e.0.clone());
sb.add_error(self.ctx.reqid(), e.0.clone());
Ready(Some(Err(e)))
}
},
@@ -836,8 +837,8 @@ impl Stream for DataApiPython3DataStream {
let n = Instant::now();
self.ping_last = n;
let mut sb = crate::status_board().unwrap();
sb.mark_alive(self.reqctx.reqid());
sb.mark_done(self.reqctx.reqid());
sb.mark_alive(self.ctx.reqid());
sb.mark_done(self.ctx.reqid());
}
continue;
}
@@ -877,7 +878,7 @@ impl Api1EventsBinaryHandler {
pub async fn handle(
&self,
req: Requ,
_ctx: &ReqCtx,
ctx: &ReqCtx,
node_config: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
if req.method() != Method::POST {
@@ -902,8 +903,6 @@ impl Api1EventsBinaryHandler {
return Err(Error::with_msg_no_trace("can not parse query"));
}
};
let reqid = super::status_board()?.new_status_id();
let reqctx = netpod::ReqCtx::new(reqid);
let span = if qu.log_level() == "trace" {
debug!("enable trace for handler");
tracing::span!(tracing::Level::TRACE, "log_span_trace")
@@ -920,7 +919,9 @@ impl Api1EventsBinaryHandler {
.map_err(|e| e.add_public_msg(format!("Can not parse query url")))?
};
let disk_tune = DiskIoTune::from_url(&url)?;
let reqidspan = tracing::info_span!("api1query", reqid = reqctx.reqid());
let reqidspan = tracing::info_span!("api1query", reqid = ctx.reqid());
// TODO do not clone here
let reqctx = Arc::new(ctx.clone());
self.handle_for_query(
qu,
accept,
@@ -928,6 +929,7 @@ impl Api1EventsBinaryHandler {
&reqctx,
span.clone(),
reqidspan.clone(),
ctx,
node_config,
)
.instrument(span)
@@ -943,6 +945,7 @@ impl Api1EventsBinaryHandler {
reqctx: &ReqCtxArc,
span: tracing::Span,
reqidspan: tracing::Span,
ctx: &ReqCtx,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
let self_name = any::type_name::<Self>();
@@ -973,7 +976,8 @@ impl Api1EventsBinaryHandler {
debug!("try to find config quorum for {ch:?}");
let ch = SfDbChannel::from_name(backend, ch.name());
let ch_conf =
nodenet::configquorum::find_config_basics_quorum(ch.clone(), range.clone().into(), ncc).await?;
nodenet::configquorum::find_config_basics_quorum(ch.clone(), range.clone().into(), ctx, ncc)
.await?;
match ch_conf {
Some(x) => {
debug!("found quorum {ch:?} {x:?}");

View File

@@ -14,11 +14,17 @@ use netpod::log::*;
use netpod::timeunits::SEC;
use netpod::FromUrl;
use netpod::NodeConfigCached;
use netpod::ReqCtx;
use query::api4::binned::BinnedQuery;
use tracing::Instrument;
use url::Url;
async fn binned_json(url: Url, req: Requ, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
async fn binned_json(
url: Url,
req: Requ,
ctx: &ReqCtx,
node_config: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
debug!("{:?}", req);
let reqid = crate::status_board()
.map_err(|e| Error::with_msg_no_trace(e.to_string()))?
@@ -30,7 +36,7 @@ async fn binned_json(url: Url, req: Requ, node_config: &NodeConfigCached) -> Res
e.add_public_msg(msg)
})?;
// TODO handle None case better and return 404
let ch_conf = ch_conf_from_binned(&query, node_config)
let ch_conf = ch_conf_from_binned(&query, ctx, node_config)
.await?
.ok_or_else(|| Error::with_msg_no_trace("channel not found"))?;
let span1 = span!(
@@ -44,14 +50,14 @@ async fn binned_json(url: Url, req: Requ, node_config: &NodeConfigCached) -> Res
span1.in_scope(|| {
debug!("begin");
});
let item = streams::timebinnedjson::timebinned_json(query, ch_conf, reqid, node_config.node_config.cluster.clone())
let item = streams::timebinnedjson::timebinned_json(query, ch_conf, ctx, node_config.node_config.cluster.clone())
.instrument(span1)
.await?;
let ret = response(StatusCode::OK).body(ToJsonBody::from(&item).into_body())?;
Ok(ret)
}
async fn binned(req: Requ, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
async fn binned(req: Requ, ctx: &ReqCtx, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
let url = {
let s1 = format!("dummy:{}", req.uri());
Url::parse(&s1)
@@ -66,7 +72,7 @@ async fn binned(req: Requ, node_config: &NodeConfigCached) -> Result<StreamRespo
Err(Error::with_msg_no_trace("hidden message").add_public_msg("PublicMessage"))?;
}
if crate::accepts_json(&req.headers()) {
Ok(binned_json(url, req, node_config).await?)
Ok(binned_json(url, req, ctx, node_config).await?)
} else if crate::accepts_octets(&req.headers()) {
Ok(response_err(
StatusCode::NOT_ACCEPTABLE,
@@ -92,11 +98,16 @@ impl BinnedHandler {
}
}
pub async fn handle(&self, req: Requ, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
pub async fn handle(
&self,
req: Requ,
ctx: &ReqCtx,
node_config: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
if req.method() != Method::GET {
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(body_empty())?);
}
match binned(req, node_config).await {
match binned(req, ctx, node_config).await {
Ok(ret) => Ok(ret),
Err(e) => {
warn!("BinnedHandler handle sees: {e}");

View File

@@ -16,6 +16,7 @@ use httpclient::ToJsonBody;
use netpod::log::*;
use netpod::FromUrl;
use netpod::NodeConfigCached;
use netpod::ReqCtx;
use netpod::ACCEPT_ALL;
use netpod::APP_JSON;
use netpod::APP_OCTET;
@@ -33,11 +34,16 @@ impl EventsHandler {
}
}
pub async fn handle(&self, req: Requ, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
pub async fn handle(
&self,
req: Requ,
ctx: &ReqCtx,
node_config: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
if req.method() != Method::GET {
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(body_empty())?);
}
match plain_events(req, node_config).await {
match plain_events(req, ctx, node_config).await {
Ok(ret) => Ok(ret),
Err(e) => {
error!("EventsHandler sees: {e}");
@@ -47,7 +53,7 @@ impl EventsHandler {
}
}
async fn plain_events(req: Requ, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
async fn plain_events(req: Requ, ctx: &ReqCtx, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
let accept_def = APP_JSON;
let accept = req
.headers()
@@ -60,19 +66,24 @@ async fn plain_events(req: Requ, node_config: &NodeConfigCached) -> Result<Strea
.map_err(|e| e.add_public_msg(format!("Can not parse query url")))?
};
if accept.contains(APP_JSON) || accept.contains(ACCEPT_ALL) {
Ok(plain_events_json(url, req, node_config).await?)
Ok(plain_events_json(url, req, ctx, node_config).await?)
} else if accept == APP_OCTET {
Ok(plain_events_binary(url, req, node_config).await?)
Ok(plain_events_binary(url, req, ctx, node_config).await?)
} else {
let ret = response_err(StatusCode::NOT_ACCEPTABLE, format!("Unsupported Accept: {:?}", accept))?;
Ok(ret)
}
}
async fn plain_events_binary(url: Url, req: Requ, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
async fn plain_events_binary(
url: Url,
req: Requ,
ctx: &ReqCtx,
node_config: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
debug!("{:?}", req);
let query = PlainEventsQuery::from_url(&url).map_err(|e| e.add_public_msg(format!("Can not understand query")))?;
let ch_conf = chconf_from_events_quorum(&query, node_config).await?;
let ch_conf = chconf_from_events_quorum(&query, ctx, node_config).await?;
info!("plain_events_binary chconf_from_events_quorum: {ch_conf:?}");
let s = stream::iter([Ok::<_, Error>(String::from("TODO_PREBINNED_BINARY_STREAM"))]);
let s = s.map_err(Error::from);
@@ -80,20 +91,24 @@ async fn plain_events_binary(url: Url, req: Requ, node_config: &NodeConfigCached
Ok(ret)
}
async fn plain_events_json(url: Url, req: Requ, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
let reqid = crate::status_board()?.new_status_id();
async fn plain_events_json(
url: Url,
req: Requ,
ctx: &ReqCtx,
node_config: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
info!("plain_events_json req: {:?}", req);
let (_head, _body) = req.into_parts();
let query = PlainEventsQuery::from_url(&url)?;
info!("plain_events_json query {query:?}");
// TODO handle None case better and return 404
let ch_conf = chconf_from_events_quorum(&query, node_config)
let ch_conf = chconf_from_events_quorum(&query, ctx, node_config)
.await
.map_err(Error::from)?
.ok_or_else(|| Error::with_msg_no_trace("channel not found"))?;
info!("plain_events_json chconf_from_events_quorum: {ch_conf:?}");
let item =
streams::plaineventsjson::plain_events_json(&query, ch_conf, reqid, &node_config.node_config.cluster).await;
streams::plaineventsjson::plain_events_json(&query, ch_conf, ctx, &node_config.node_config.cluster).await;
let item = match item {
Ok(item) => item,
Err(e) => {

View File

@@ -20,6 +20,7 @@ use netpod::ChannelConfigResponse;
use netpod::ChannelTypeConfigGen;
use netpod::FromUrl;
use netpod::NodeConfigCached;
use netpod::ReqCtx;
use netpod::ScalarType;
use netpod::SfDbChannel;
use netpod::Shape;
@@ -36,25 +37,28 @@ use url::Url;
pub async fn chconf_from_events_quorum(
q: &PlainEventsQuery,
ctx: &ReqCtx,
ncc: &NodeConfigCached,
) -> Result<Option<ChannelTypeConfigGen>, Error> {
let ret = find_config_basics_quorum(q.channel().clone(), q.range().clone(), ncc).await?;
let ret = find_config_basics_quorum(q.channel().clone(), q.range().clone(), ctx, ncc).await?;
Ok(ret)
}
pub async fn chconf_from_prebinned(
q: &PreBinnedQuery,
ctx: &ReqCtx,
ncc: &NodeConfigCached,
) -> Result<Option<ChannelTypeConfigGen>, Error> {
let ret = find_config_basics_quorum(q.channel().clone(), q.patch().patch_range(), ncc).await?;
let ret = find_config_basics_quorum(q.channel().clone(), q.patch().patch_range(), ctx, ncc).await?;
Ok(ret)
}
pub async fn ch_conf_from_binned(
q: &BinnedQuery,
ctx: &ReqCtx,
ncc: &NodeConfigCached,
) -> Result<Option<ChannelTypeConfigGen>, Error> {
let ret = find_config_basics_quorum(q.channel().clone(), q.range().clone(), ncc).await?;
let ret = find_config_basics_quorum(q.channel().clone(), q.range().clone(), ctx, ncc).await?;
Ok(ret)
}
@@ -172,7 +176,12 @@ impl ChannelConfigQuorumHandler {
}
}
pub async fn handle(&self, req: Requ, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
pub async fn handle(
&self,
req: Requ,
ctx: &ReqCtx,
node_config: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
if req.method() == Method::GET {
let accept_def = APP_JSON;
let accept = req
@@ -180,7 +189,7 @@ impl ChannelConfigQuorumHandler {
.get(http::header::ACCEPT)
.map_or(accept_def, |k| k.to_str().unwrap_or(accept_def));
if accept.contains(APP_JSON) || accept.contains(ACCEPT_ALL) {
match self.channel_config_quorum(req, &node_config).await {
match self.channel_config_quorum(req, ctx, &node_config).await {
Ok(k) => Ok(k),
Err(e) => {
warn!("from channel_config_quorum: {e}");
@@ -195,12 +204,17 @@ impl ChannelConfigQuorumHandler {
}
}
async fn channel_config_quorum(&self, req: Requ, ncc: &NodeConfigCached) -> Result<StreamResponse, Error> {
async fn channel_config_quorum(
&self,
req: Requ,
ctx: &ReqCtx,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
info!("channel_config_quorum");
let url = Url::parse(&format!("dummy:{}", req.uri()))?;
let q = ChannelConfigQuery::from_url(&url)?;
info!("channel_config_quorum for q {q:?}");
let ch_confs = nodenet::configquorum::find_config_basics_quorum(q.channel, q.range.into(), ncc).await?;
let ch_confs = nodenet::configquorum::find_config_basics_quorum(q.channel, q.range.into(), ctx, ncc).await?;
let ret = response(StatusCode::OK)
.header(http::header::CONTENT_TYPE, APP_JSON)
.body(ToJsonBody::from(&ch_confs).into_body())?;

View File

@@ -5,8 +5,10 @@ use crate::response;
use crate::Requ;
use futures_util::select;
use futures_util::FutureExt;
use http::header;
use http::Method;
use http::StatusCode;
use http::Uri;
use httpclient::connect_client;
use httpclient::read_body_bytes;
use httpclient::IntoBody;
@@ -22,6 +24,7 @@ use netpod::APP_JSON;
use serde::Deserialize;
use serde::Serialize;
use serde_json::Value as JsonValue;
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::time::Duration;
@@ -69,9 +72,11 @@ pub async fn gather_get_json(req: Requ, node_config: &NodeConfigCached) -> Resul
.iter()
.filter_map(|node| {
let uri = format!("http://{}:{}/api/4/{}", node.host, node.port, pathsuf);
let req = Request::builder().method(Method::GET).uri(uri);
let req = req.header(http::header::HOST, &node.host);
let req = req.header(http::header::ACCEPT, APP_JSON);
let req = Request::builder()
.method(Method::GET)
.header(http::header::HOST, &node.host)
.header(http::header::ACCEPT, APP_JSON)
.uri(uri);
match req.body(body_empty()) {
Ok(req) => {
let task = tokio::spawn(async move {
@@ -133,6 +138,7 @@ pub async fn gather_get_json(req: Requ, node_config: &NodeConfigCached) -> Resul
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Serialize, Deserialize)]
pub struct Tag(pub String);
#[derive(Debug)]
pub struct SubRes<T> {
pub tag: String,
pub status: StatusCode,
@@ -158,6 +164,7 @@ where
+ Copy
+ 'static,
FT: Fn(Vec<(Tag, Result<SubRes<SM>, Error>)>) -> Result<OUT, Error>,
SubRes<SM>: fmt::Debug,
{
// TODO remove magic constant
let extra_timeout = Duration::from_millis(3000);
@@ -173,17 +180,26 @@ where
.zip(tags.into_iter())
.filter_map(move |((url, body), tag)| {
info!("Try gather from {}", url);
let url_str = url.as_str();
let req = if body.is_some() {
Request::builder().method(Method::POST).uri(url_str)
let uri: Uri = if let Ok(x) = url.as_str().parse() {
x
} else {
Request::builder().method(Method::GET).uri(url_str)
warn!("can not parse {url}");
return None;
};
let req = req.header(http::header::ACCEPT, APP_JSON);
let req = if body.is_some() {
req.header(http::header::CONTENT_TYPE, APP_JSON)
Request::builder()
.method(Method::POST)
.header(header::HOST, uri.host().unwrap())
.header(http::header::CONTENT_TYPE, APP_JSON)
.header(http::header::ACCEPT, APP_JSON)
.uri(uri)
} else {
req
Request::builder()
.method(Method::GET)
.header(header::HOST, uri.host().unwrap())
.header(http::header::CONTENT_TYPE, APP_JSON)
.header(http::header::ACCEPT, APP_JSON)
.uri(uri)
};
let body = match body {
None => body_empty(),
@@ -209,9 +225,9 @@ where
};
Ok(res)
}.fuse() => {
info!("received result in time");
debug!("received result in time {res:?}");
let ret = nt(tag2, res?).await?;
info!("transformed result in time");
debug!("transformed result in time {ret:?}");
Ok(ret)
}
}

View File

@@ -34,13 +34,12 @@ use httpclient::Requ;
use httpclient::StreamResponse;
use httpclient::ToJsonBody;
use hyper::service::service_fn;
use hyper::Request;
use hyper_util::rt::TokioIo;
use net::SocketAddr;
use netpod::log::*;
use netpod::query::prebinned::PreBinnedQuery;
use netpod::NodeConfigCached;
use netpod::ProxyConfig;
use netpod::ReqCtx;
use netpod::ServiceVersion;
use netpod::APP_JSON;
use netpod::APP_JSON_LINES;
@@ -64,9 +63,7 @@ use task::Context;
use task::Poll;
use taskrun::tokio;
use taskrun::tokio::net::TcpListener;
pub const PSI_DAQBUFFER_SERVICE_MARK: &'static str = "PSI-Daqbuffer-Service-Mark";
pub const PSI_DAQBUFFER_SEEN_URL: &'static str = "PSI-Daqbuffer-Seen-Url";
use tracing::Instrument;
#[derive(Debug, ThisError, Serialize, Deserialize)]
pub enum RetrievalError {
@@ -128,13 +125,7 @@ pub fn accepts_octets(hm: &http::HeaderMap) -> bool {
}
pub async fn host(node_config: NodeConfigCached, service_version: ServiceVersion) -> Result<(), RetrievalError> {
static STATUS_BOARD_INIT: Once = Once::new();
STATUS_BOARD_INIT.call_once(|| {
let b = StatusBoard::new();
let a = RwLock::new(b);
let x = Box::new(a);
STATUS_BOARD.store(Box::into_raw(x), Ordering::SeqCst);
});
status_board_init();
#[cfg(DISABLED)]
if let Some(bind) = node_config.node.prometheus_api_bind {
tokio::spawn(prometheus::host(bind));
@@ -179,6 +170,20 @@ async fn the_service_fn(
addr: SocketAddr,
node_config: NodeConfigCached,
service_version: ServiceVersion,
) -> Result<StreamResponse, Error> {
let ctx = ReqCtx::new(status_board()?.new_status_id()).with_node(&req, &node_config);
let reqid_span = span!(Level::INFO, "req", reqid = ctx.reqid());
let f = http_service(req, addr, ctx, node_config, service_version);
let f = Cont { f: Box::pin(f) };
f.instrument(reqid_span).await
}
async fn http_service(
req: Requ,
addr: SocketAddr,
ctx: ReqCtx,
node_config: NodeConfigCached,
service_version: ServiceVersion,
) -> Result<StreamResponse, Error> {
info!(
"http-request {:?} - {:?} - {:?} - {:?}",
@@ -187,17 +192,7 @@ async fn the_service_fn(
req.uri(),
req.headers()
);
let f = http_service(req, node_config, service_version).await;
// Cont { f: Box::pin(f) }
f
}
async fn http_service(
req: Requ,
node_config: NodeConfigCached,
service_version: ServiceVersion,
) -> Result<StreamResponse, Error> {
match http_service_try(req, &node_config, &service_version).await {
match http_service_try(req, ctx, &node_config, &service_version).await {
Ok(k) => Ok(k),
Err(e) => {
error!("daqbuffer node http_service sees error: {}", e);
@@ -237,41 +232,6 @@ where
impl<F> UnwindSafe for Cont<F> {}
pub struct ReqCtx {
pub marks: Vec<String>,
pub mark: String,
}
impl ReqCtx {
fn with_node<T>(req: &Request<T>, nc: &NodeConfigCached) -> Self {
let mut marks = Vec::new();
for (n, v) in req.headers().iter() {
if n == PSI_DAQBUFFER_SERVICE_MARK {
marks.push(String::from_utf8_lossy(v.as_bytes()).to_string());
}
}
Self {
marks,
mark: format!("{}:{}", nc.node_config.name, nc.node.port),
}
}
}
impl ReqCtx {
fn with_proxy<T>(req: &Request<T>, proxy: &ProxyConfig) -> Self {
let mut marks = Vec::new();
for (n, v) in req.headers().iter() {
if n == PSI_DAQBUFFER_SERVICE_MARK {
marks.push(String::from_utf8_lossy(v.as_bytes()).to_string());
}
}
Self {
marks,
mark: format!("{}:{}", proxy.name, proxy.port),
}
}
}
// TODO remove because I want error bodies to be json.
pub fn response_err<T>(status: StatusCode, msg: T) -> Result<StreamResponse, RetrievalError>
where
@@ -334,6 +294,7 @@ macro_rules! static_http_api1 {
async fn http_service_try(
req: Requ,
ctx: ReqCtx,
node_config: &NodeConfigCached,
service_version: &ServiceVersion,
) -> Result<StreamResponse, Error> {
@@ -341,23 +302,22 @@ async fn http_service_try(
let mut urlmarks = Vec::new();
urlmarks.push(format!("{}:{}", req.method(), req.uri()));
for (k, v) in req.headers() {
if k == PSI_DAQBUFFER_SEEN_URL {
if k == netpod::PSI_DAQBUFFER_SEEN_URL {
let s = String::from_utf8_lossy(v.as_bytes());
urlmarks.push(s.into());
}
}
let ctx = ReqCtx::with_node(&req, &node_config);
let mut res = http_service_inner(req, &ctx, node_config, service_version).await?;
let hm = res.headers_mut();
hm.append("Access-Control-Allow-Origin", "*".parse().unwrap());
hm.append("Access-Control-Allow-Headers", "*".parse().unwrap());
for m in &ctx.marks {
hm.append(PSI_DAQBUFFER_SERVICE_MARK, m.parse().unwrap());
for m in ctx.marks() {
hm.append(netpod::PSI_DAQBUFFER_SERVICE_MARK, m.parse().unwrap());
}
hm.append(PSI_DAQBUFFER_SERVICE_MARK, ctx.mark.parse().unwrap());
hm.append(netpod::PSI_DAQBUFFER_SERVICE_MARK, ctx.mark().parse().unwrap());
for s in urlmarks {
let v = HeaderValue::from_str(&s).unwrap_or_else(|_| HeaderValue::from_static("invalid"));
hm.append(PSI_DAQBUFFER_SEEN_URL, v);
hm.append(netpod::PSI_DAQBUFFER_SEEN_URL, v);
}
Ok(res)
}
@@ -427,9 +387,9 @@ async fn http_service_inner(
} else if let Some(h) = api4::search::ChannelSearchHandler::handler(&req) {
Ok(h.handle(req, &node_config).await?)
} else if let Some(h) = api4::binned::BinnedHandler::handler(&req) {
Ok(h.handle(req, &node_config).await?)
Ok(h.handle(req, ctx, &node_config).await?)
} else if let Some(h) = channelconfig::ChannelConfigQuorumHandler::handler(&req) {
Ok(h.handle(req, &node_config).await?)
Ok(h.handle(req, ctx, &node_config).await?)
} else if let Some(h) = channelconfig::ChannelConfigsHandler::handler(&req) {
Ok(h.handle(req, &node_config).await?)
} else if let Some(h) = channelconfig::ChannelConfigHandler::handler(&req) {
@@ -445,7 +405,7 @@ async fn http_service_inner(
} else if let Some(h) = channelconfig::AmbigiousChannelNames::handler(&req) {
Ok(h.handle(req, &node_config).await?)
} else if let Some(h) = api4::events::EventsHandler::handler(&req) {
Ok(h.handle(req, &node_config).await?)
Ok(h.handle(req, ctx, &node_config).await?)
} else if let Some(h) = channel_status::ConnectionStatusEvents::handler(&req) {
Ok(h.handle(req, ctx, &node_config).await?)
} else if let Some(h) = channel_status::ChannelStatusEventsHandler::handler(&req) {
@@ -845,20 +805,14 @@ impl StatusBoard {
}
pub fn new_status_id(&mut self) -> String {
use std::fs::File;
use std::io::Read;
self.clean();
let mut f = File::open("/dev/urandom").unwrap();
let mut buf = [0; 4];
f.read_exact(&mut buf).unwrap();
let n = u32::from_le_bytes(buf);
self.clean_if_needed();
let n: u32 = rand::random();
let s = format!("{:08x}", n);
debug!("new_status_id {s}");
self.entries.insert(s.clone(), StatusBoardEntry::new());
s
}
pub fn clean(&mut self) {
pub fn clean_if_needed(&mut self) {
if self.entries.len() > 15000 {
let mut tss: Vec<_> = self.entries.values().map(|e| e.ts_updated).collect();
tss.sort_unstable();
@@ -916,7 +870,7 @@ impl StatusBoard {
Some(e) => e.into(),
None => {
error!("can not find status id {}", status_id);
let _e = ::err::Error::with_public_msg_no_trace(format!("Request status ID unknown {status_id}"));
let _e = ::err::Error::with_public_msg_no_trace(format!("request-id unknown {status_id}"));
StatusBoardEntryUser {
error_count: 1,
warn_count: 0,
@@ -937,3 +891,13 @@ pub fn status_board() -> Result<RwLockWriteGuard<'static, StatusBoard>, Retrieva
Err(e) => Err(RetrievalError::TextError(format!("{e}"))),
}
}
pub fn status_board_init() {
static STATUS_BOARD_INIT: Once = Once::new();
STATUS_BOARD_INIT.call_once(|| {
let b = StatusBoard::new();
let a = RwLock::new(b);
let x = Box::new(a);
STATUS_BOARD.store(Box::into_raw(x), Ordering::SeqCst);
});
}

View File

@@ -12,9 +12,9 @@ use crate::gather::SubRes;
use crate::pulsemap::MapPulseQuery;
use crate::response;
use crate::response_err;
use crate::status_board;
use crate::status_board_init;
use crate::Cont;
use crate::ReqCtx;
use crate::PSI_DAQBUFFER_SERVICE_MARK;
use futures_util::pin_mut;
use futures_util::Stream;
use http::Method;
@@ -42,9 +42,11 @@ use netpod::FromUrl;
use netpod::HasBackend;
use netpod::HasTimeout;
use netpod::ProxyConfig;
use netpod::ReqCtx;
use netpod::ServiceVersion;
use netpod::ACCEPT_ALL;
use netpod::APP_JSON;
use netpod::PSI_DAQBUFFER_SERVICE_MARK;
use query::api4::binned::BinnedQuery;
use query::api4::events::PlainEventsQuery;
use serde::Deserialize;
@@ -61,19 +63,23 @@ use tokio::fs::File;
use tokio::io::AsyncRead;
use tokio::io::ReadBuf;
use tokio::net::TcpListener;
use tracing::Instrument;
use url::Url;
const DISTRI_PRE: &str = "/distri/";
pub async fn proxy(proxy_config: ProxyConfig, service_version: ServiceVersion) -> Result<(), Error> {
status_board_init();
use std::str::FromStr;
let bind_addr = SocketAddr::from_str(&format!("{}:{}", proxy_config.listen, proxy_config.port))?;
let listener = TcpListener::bind(bind_addr).await?;
loop {
let (stream, addr) = if let Ok(x) = listener.accept().await {
x
} else {
break;
let (stream, addr) = match listener.accept().await {
Ok(x) => x,
Err(e) => {
error!("{e}");
break;
}
};
debug!("new connection from {addr}");
let proxy_config = proxy_config.clone();
@@ -83,19 +89,7 @@ pub async fn proxy(proxy_config: ProxyConfig, service_version: ServiceVersion) -
let res = hyper::server::conn::http1::Builder::new()
.serve_connection(
io,
service_fn({
move |req| {
info!(
"http-request {:?} - {:?} - {:?} - {:?}",
bind_addr,
req.method(),
req.uri(),
req.headers()
);
let f = proxy_http_service(req, proxy_config.clone(), service_version.clone());
Cont { f: Box::pin(f) }
}
}),
service_fn(move |req| the_service_fn(req, addr, proxy_config.clone(), service_version.clone())),
)
.await;
match res {
@@ -106,15 +100,38 @@ pub async fn proxy(proxy_config: ProxyConfig, service_version: ServiceVersion) -
}
});
}
info!("proxy done");
Ok(())
}
async fn the_service_fn(
req: Requ,
addr: SocketAddr,
proxy_config: ProxyConfig,
service_version: ServiceVersion,
) -> Result<StreamResponse, Error> {
let ctx = ReqCtx::new(status_board().unwrap().new_status_id()).with_proxy(&req, &proxy_config);
let reqid_span = span!(Level::INFO, "req", reqid = ctx.reqid());
let f = proxy_http_service(req, addr, ctx, proxy_config.clone(), service_version.clone());
let f = Cont { f: Box::pin(f) };
f.instrument(reqid_span).await
}
async fn proxy_http_service(
req: Requ,
addr: SocketAddr,
ctx: ReqCtx,
proxy_config: ProxyConfig,
service_version: ServiceVersion,
) -> Result<StreamResponse, Error> {
match proxy_http_service_try(req, &proxy_config, &service_version).await {
info!(
"http-request {:?} - {:?} - {:?} - {:?}",
addr,
req.method(),
req.uri(),
req.headers()
);
match proxy_http_service_try(req, ctx, &proxy_config, &service_version).await {
Ok(k) => Ok(k),
Err(e) => {
error!("data_api_proxy sees error: {:?}", e);
@@ -125,18 +142,18 @@ async fn proxy_http_service(
async fn proxy_http_service_try(
req: Requ,
ctx: ReqCtx,
proxy_config: &ProxyConfig,
service_version: &ServiceVersion,
) -> Result<StreamResponse, Error> {
let ctx = ReqCtx::with_proxy(&req, proxy_config);
let mut res = proxy_http_service_inner(req, &ctx, proxy_config, &service_version).await?;
let hm = res.headers_mut();
hm.insert("Access-Control-Allow-Origin", "*".parse().unwrap());
hm.insert("Access-Control-Allow-Headers", "*".parse().unwrap());
for m in &ctx.marks {
for m in ctx.marks() {
hm.append(PSI_DAQBUFFER_SERVICE_MARK, m.parse().unwrap());
}
hm.append(PSI_DAQBUFFER_SERVICE_MARK, ctx.mark.parse().unwrap());
hm.append(PSI_DAQBUFFER_SERVICE_MARK, ctx.mark().parse().unwrap());
Ok(res)
}

View File

@@ -38,7 +38,7 @@ impl PythonDataApi1Query {
}
}
pub async fn handle(&self, req: Requ, _ctx: &ReqCtx, proxy_config: &ProxyConfig) -> Result<StreamResponse, Error> {
pub async fn handle(&self, req: Requ, ctx: &ReqCtx, proxy_config: &ProxyConfig) -> Result<StreamResponse, Error> {
if req.method() != Method::POST {
return Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(body_empty())?);
}
@@ -50,18 +50,16 @@ impl PythonDataApi1Query {
.map_err(|e| Error::with_msg_no_trace(format!("{e:?}")))?
.to_owned();
let body_data = read_body_bytes(body).await?;
if body_data.len() < 512 && body_data.first() == Some(&"{".as_bytes()[0]) {
info!("request body_data string: {}", String::from_utf8_lossy(&body_data));
}
let qu = match serde_json::from_slice::<Api1Query>(&body_data) {
Ok(qu) => qu,
Err(e) => {
error!("got body_data: {:?}", String::from_utf8_lossy(&body_data[..]));
let buf = &body_data[..body_data.len().min(200)];
error!("got body_data: {:?}", String::from_utf8_lossy(buf));
error!("can not parse: {e}");
return Err(Error::with_msg_no_trace("can not parse query"));
}
};
info!("Proxy sees request: {qu:?}");
info!("{qu:?}");
let back = {
let mut ret = None;
for b in &proxy_config.backends {
@@ -73,12 +71,24 @@ impl PythonDataApi1Query {
ret
};
if let Some(back) = back {
// TODO remove special code, make it part of configuration
let back = if back.url.contains("sf-daqbuf-23.psi.ch") {
let id = 21 + rand::random::<u32>() % 13;
let url = back.url.replace("-23.", &format!("-{id}."));
netpod::ProxyBackend {
name: back.name.clone(),
url,
}
} else {
back.clone()
};
let url_str = format!("{}/api/1/query", back.url);
info!("try to ask {url_str}");
let uri: Uri = url_str.parse()?;
let req = Request::builder()
.method(Method::POST)
.header(header::HOST, uri.host().unwrap())
.header(ctx.header_name(), ctx.header_value())
.uri(&uri)
.body(body_bytes(body_data))?;
let mut client = connect_client(&uri).await?;

View File

@@ -835,11 +835,11 @@ impl FromUrl for MapPulseQuery {
fn from_url(url: &url::Url) -> Result<Self, err::Error> {
let mut pit = url
.path_segments()
.ok_or_else(|| Error::with_msg_no_trace("no path in url"))?
.ok_or_else(|| Error::with_msg_no_trace(format!("no path in url {url}")))?
.rev();
let pulsestr = pit
.next()
.ok_or_else(|| Error::with_msg_no_trace("no pulse in url path"))?;
.ok_or_else(|| Error::with_msg_no_trace(format!("no pulse in url path {pit:?}")))?;
let backend = pit.next().unwrap_or("sf-databuffer").into();
// TODO legacy: use a default backend if not specified.
let backend = if backend == "pulse" {