Refactor and add test for api1 binary query
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
use crate::err::Error;
|
||||
use crate::gather::{gather_get_json_generic, SubRes};
|
||||
use crate::{response, BodyStream};
|
||||
use crate::{response, BodyStream, ReqCtx};
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use futures_core::Stream;
|
||||
use futures_util::{FutureExt, StreamExt, TryFutureExt, TryStreamExt};
|
||||
@@ -9,10 +9,10 @@ use hyper::{Body, Client, Request, Response};
|
||||
use items::eventfull::EventFull;
|
||||
use items::{RangeCompletableItem, Sitemty, StreamItem};
|
||||
use itertools::Itertools;
|
||||
use netpod::log::*;
|
||||
use netpod::query::api1::Api1Query;
|
||||
use netpod::query::RawEventsQuery;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::{log::*, ScalarType};
|
||||
use netpod::{ByteSize, Channel, DiskIoTune, NanoRange, NodeConfigCached, PerfOpts, Shape};
|
||||
use netpod::{ChannelSearchQuery, ChannelSearchResult, ProxyConfig};
|
||||
use netpod::{ACCEPT_ALL, APP_JSON, APP_OCTET};
|
||||
@@ -21,6 +21,7 @@ use parse::channelconfig::read_local_config;
|
||||
use parse::channelconfig::{Config, ConfigEntry, MatchingConfigEntry};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
@@ -468,17 +469,127 @@ async fn process_answer(res: Response<Body>) -> Result<JsonValue, Error> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum Api1ScalarType {
|
||||
#[serde(rename = "uint8")]
|
||||
U8,
|
||||
#[serde(rename = "uint16")]
|
||||
U16,
|
||||
#[serde(rename = "uint32")]
|
||||
U32,
|
||||
#[serde(rename = "uint64")]
|
||||
U64,
|
||||
#[serde(rename = "int8")]
|
||||
I8,
|
||||
#[serde(rename = "int16")]
|
||||
I16,
|
||||
#[serde(rename = "int32")]
|
||||
I32,
|
||||
#[serde(rename = "int64")]
|
||||
I64,
|
||||
#[serde(rename = "float32")]
|
||||
F32,
|
||||
#[serde(rename = "float64")]
|
||||
F64,
|
||||
#[serde(rename = "bool")]
|
||||
BOOL,
|
||||
#[serde(rename = "string")]
|
||||
STRING,
|
||||
}
|
||||
|
||||
impl Api1ScalarType {
|
||||
pub fn to_str(&self) -> &'static str {
|
||||
use Api1ScalarType as A;
|
||||
match self {
|
||||
A::U8 => "uint8",
|
||||
A::U16 => "uint16",
|
||||
A::U32 => "uint32",
|
||||
A::U64 => "uint64",
|
||||
A::I8 => "int8",
|
||||
A::I16 => "int16",
|
||||
A::I32 => "int32",
|
||||
A::I64 => "int64",
|
||||
A::F32 => "float32",
|
||||
A::F64 => "float64",
|
||||
A::BOOL => "bool",
|
||||
A::STRING => "string",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Api1ScalarType {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "{}", self.to_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ScalarType> for Api1ScalarType {
|
||||
fn from(k: &ScalarType) -> Self {
|
||||
use Api1ScalarType as B;
|
||||
use ScalarType as A;
|
||||
match k {
|
||||
A::U8 => B::U8,
|
||||
A::U16 => B::U16,
|
||||
A::U32 => B::U32,
|
||||
A::U64 => B::U64,
|
||||
A::I8 => B::I8,
|
||||
A::I16 => B::I16,
|
||||
A::I32 => B::I32,
|
||||
A::I64 => B::I64,
|
||||
A::F32 => B::F32,
|
||||
A::F64 => B::F64,
|
||||
A::BOOL => B::BOOL,
|
||||
A::STRING => B::STRING,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ScalarType> for Api1ScalarType {
|
||||
fn from(x: ScalarType) -> Self {
|
||||
(&x).into()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_variant_name() {
|
||||
let val = Api1ScalarType::F32;
|
||||
assert_eq!(format!("{val:?}"), "F32");
|
||||
assert_eq!(format!("{val}"), "float32");
|
||||
let s = serde_json::to_string(&val).unwrap();
|
||||
assert_eq!(s, "\"float32\"");
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum Api1ByteOrder {
|
||||
#[serde(rename = "LITTLE_ENDIAN")]
|
||||
Little,
|
||||
#[serde(rename = "BIG_ENDIAN")]
|
||||
Big,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Api1ChannelHeader {
|
||||
name: String,
|
||||
#[serde(rename = "type")]
|
||||
ty: String,
|
||||
ty: Api1ScalarType,
|
||||
#[serde(rename = "byteOrder")]
|
||||
byte_order: String,
|
||||
byte_order: Api1ByteOrder,
|
||||
#[serde(default)]
|
||||
shape: Vec<u32>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
compression: Option<usize>,
|
||||
}
|
||||
|
||||
impl Api1ChannelHeader {
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn ty(&self) -> Api1ScalarType {
|
||||
self.ty.clone()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DataApiPython3DataStream {
|
||||
range: NanoRange,
|
||||
channels: Vec<Channel>,
|
||||
@@ -560,11 +671,11 @@ impl DataApiPython3DataStream {
|
||||
if !*header_out {
|
||||
let head = Api1ChannelHeader {
|
||||
name: channel.name.clone(),
|
||||
ty: b.scalar_types[i1].to_api3proto().into(),
|
||||
ty: (&b.scalar_types[i1]).into(),
|
||||
byte_order: if b.be[i1] {
|
||||
"BIG_ENDIAN".into()
|
||||
Api1ByteOrder::Big
|
||||
} else {
|
||||
"LITTLE_ENDIAN".into()
|
||||
Api1ByteOrder::Little
|
||||
},
|
||||
// The shape is inconsistent on the events.
|
||||
// Seems like the config is to be trusted in this case.
|
||||
@@ -576,39 +687,18 @@ impl DataApiPython3DataStream {
|
||||
let l1 = 1 + h.as_bytes().len() as u32;
|
||||
d.put_u32(l1);
|
||||
d.put_u8(0);
|
||||
debug!("header frame byte len {}", 4 + 1 + h.as_bytes().len());
|
||||
d.extend_from_slice(h.as_bytes());
|
||||
d.put_u32(l1);
|
||||
*header_out = true;
|
||||
}
|
||||
{
|
||||
match &b.shapes[i1] {
|
||||
Shape::Image(_, _) => {
|
||||
let l1 = 17 + b.blobs[i1].len() as u32;
|
||||
d.put_u32(l1);
|
||||
d.put_u8(1);
|
||||
d.put_u64(b.tss[i1]);
|
||||
d.put_u64(b.pulses[i1]);
|
||||
d.put_slice(&b.blobs[i1]);
|
||||
d.put_u32(l1);
|
||||
}
|
||||
Shape::Wave(_) => {
|
||||
let l1 = 17 + b.blobs[i1].len() as u32;
|
||||
d.put_u32(l1);
|
||||
d.put_u8(1);
|
||||
d.put_u64(b.tss[i1]);
|
||||
d.put_u64(b.pulses[i1]);
|
||||
d.put_slice(&b.blobs[i1]);
|
||||
d.put_u32(l1);
|
||||
}
|
||||
_ => {
|
||||
let l1 = 17 + b.blobs[i1].len() as u32;
|
||||
d.put_u32(l1);
|
||||
d.put_u8(1);
|
||||
d.put_u64(b.tss[i1]);
|
||||
d.put_u64(b.pulses[i1]);
|
||||
d.put_slice(&b.blobs[i1]);
|
||||
d.put_u32(l1);
|
||||
}
|
||||
match &b.shapes[i1] {
|
||||
_ => {
|
||||
let l1 = 17 + b.blobs[i1].len() as u32;
|
||||
d.put_u32(l1);
|
||||
d.put_u8(1);
|
||||
d.put_u64(b.tss[i1]);
|
||||
d.put_u64(b.pulses[i1]);
|
||||
d.put_slice(&b.blobs[i1]);
|
||||
}
|
||||
}
|
||||
*count_events += 1;
|
||||
@@ -810,7 +900,12 @@ impl Api1EventsBinaryHandler {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
pub async fn handle(
|
||||
&self,
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
if req.method() != Method::POST {
|
||||
return Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?);
|
||||
}
|
||||
|
||||
@@ -13,20 +13,12 @@ use std::task::{Context, Poll};
|
||||
use tracing::field::Empty;
|
||||
use tracing::{span, Level};
|
||||
|
||||
fn proxy_mark() -> &'static str {
|
||||
"7c5e408a"
|
||||
}
|
||||
|
||||
pub fn response<T>(status: T) -> http::response::Builder
|
||||
where
|
||||
http::StatusCode: std::convert::TryFrom<T>,
|
||||
<http::StatusCode as std::convert::TryFrom<T>>::Error: Into<http::Error>,
|
||||
{
|
||||
Response::builder()
|
||||
.status(status)
|
||||
.header("Access-Control-Allow-Origin", "*")
|
||||
.header("Access-Control-Allow-Headers", "*")
|
||||
.header("x-proxy-log-mark", proxy_mark())
|
||||
Response::builder().status(status)
|
||||
}
|
||||
|
||||
pub struct BodyStream<S> {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use crate::bodystream::response;
|
||||
use crate::err::Error;
|
||||
use crate::ReqCtx;
|
||||
use http::{Method, Request, Response, StatusCode};
|
||||
use hyper::Body;
|
||||
use netpod::query::ChannelStateEventsQuery;
|
||||
@@ -17,7 +18,12 @@ impl ConnectionStatusEvents {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
pub async fn handle(
|
||||
&self,
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
if req.method() == Method::GET {
|
||||
let accept_def = APP_JSON;
|
||||
let accept = req
|
||||
@@ -70,7 +76,12 @@ impl ChannelConnectionStatusEvents {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
pub async fn handle(
|
||||
&self,
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
if req.method() == Method::GET {
|
||||
let accept_def = APP_JSON;
|
||||
let accept = req
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::channelconfig::{chconf_from_events_binary, chconf_from_events_json};
|
||||
use crate::err::Error;
|
||||
use crate::{response, response_err, BodyStream, ToPublicResponse};
|
||||
use crate::{response, response_err, BodyStream, ReqCtx, ToPublicResponse};
|
||||
use futures_util::{Stream, StreamExt, TryStreamExt};
|
||||
use http::{Method, Request, Response, StatusCode};
|
||||
use hyper::Body;
|
||||
@@ -164,17 +164,27 @@ impl EventsHandlerScylla {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
pub async fn handle(
|
||||
&self,
|
||||
req: Request<Body>,
|
||||
ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
if req.method() != Method::GET {
|
||||
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(Body::empty())?);
|
||||
}
|
||||
match self.fetch(req, node_config).await {
|
||||
match self.fetch(req, ctx, node_config).await {
|
||||
Ok(ret) => Ok(ret),
|
||||
Err(e) => Ok(e.to_public_response()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
async fn fetch(
|
||||
&self,
|
||||
req: Request<Body>,
|
||||
ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
info!("EventsHandlerScylla req: {:?}", req);
|
||||
let accept_def = APP_JSON;
|
||||
let accept = req
|
||||
@@ -182,14 +192,19 @@ impl EventsHandlerScylla {
|
||||
.get(http::header::ACCEPT)
|
||||
.map_or(accept_def, |k| k.to_str().unwrap_or(accept_def));
|
||||
if accept.contains(APP_JSON) || accept.contains(ACCEPT_ALL) {
|
||||
Ok(self.gather(req, node_config).await?)
|
||||
Ok(self.gather(req, ctx, node_config).await?)
|
||||
} else {
|
||||
let ret = response_err(StatusCode::NOT_ACCEPTABLE, format!("Unsupported Accept: {:?}", accept))?;
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
async fn gather(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
async fn gather(
|
||||
&self,
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let self_name = std::any::type_name::<Self>();
|
||||
let (head, _body) = req.into_parts();
|
||||
warn!("TODO PlainEventsQuery needs to take AggKind to do x-binning");
|
||||
@@ -303,11 +318,16 @@ impl BinnedHandlerScylla {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
pub async fn handle(
|
||||
&self,
|
||||
req: Request<Body>,
|
||||
ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
if req.method() != Method::GET {
|
||||
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(Body::empty())?);
|
||||
}
|
||||
match self.fetch(req, node_config).await {
|
||||
match self.fetch(req, ctx, node_config).await {
|
||||
Ok(ret) => Ok(ret),
|
||||
Err(e) => {
|
||||
eprintln!("error: {e}");
|
||||
@@ -316,7 +336,12 @@ impl BinnedHandlerScylla {
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
async fn fetch(
|
||||
&self,
|
||||
req: Request<Body>,
|
||||
ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
info!("BinnedHandlerScylla req: {:?}", req);
|
||||
let accept_def = APP_JSON;
|
||||
let accept = req
|
||||
@@ -324,14 +349,19 @@ impl BinnedHandlerScylla {
|
||||
.get(http::header::ACCEPT)
|
||||
.map_or(accept_def, |k| k.to_str().unwrap_or(accept_def));
|
||||
if accept.contains(APP_JSON) || accept.contains(ACCEPT_ALL) {
|
||||
Ok(self.gather(req, node_config).await?)
|
||||
Ok(self.gather(req, ctx, node_config).await?)
|
||||
} else {
|
||||
let ret = response_err(StatusCode::NOT_ACCEPTABLE, format!("Unsupported Accept: {:?}", accept))?;
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
async fn gather(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
async fn gather(
|
||||
&self,
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let (head, _body) = req.into_parts();
|
||||
warn!("TODO BinnedQuery needs to take AggKind to do x-binngin");
|
||||
let s1 = format!("dummy:{}", head.uri);
|
||||
|
||||
@@ -30,6 +30,7 @@ use net::SocketAddr;
|
||||
use netpod::log::*;
|
||||
use netpod::query::BinnedQuery;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::ProxyConfig;
|
||||
use netpod::{FromUrl, NodeConfigCached, NodeStatus, NodeStatusArchiverAppliance};
|
||||
use netpod::{ACCEPT_ALL, APP_JSON, APP_JSON_LINES, APP_OCTET};
|
||||
use nodenet::conn::events_service;
|
||||
@@ -45,6 +46,8 @@ use task::{Context, Poll};
|
||||
use tracing::Instrument;
|
||||
use url::Url;
|
||||
|
||||
pub const PSI_DAQBUFFER_SERVICE_MARK: &'static str = "PSI-Daqbuffer-Service-Mark";
|
||||
|
||||
pub async fn host(node_config: NodeConfigCached) -> Result<(), Error> {
|
||||
static STATUS_BOARD_INIT: Once = Once::new();
|
||||
STATUS_BOARD_INIT.call_once(|| {
|
||||
@@ -132,6 +135,41 @@ where
|
||||
|
||||
impl<F> UnwindSafe for Cont<F> {}
|
||||
|
||||
pub struct ReqCtx {
|
||||
pub marks: Vec<String>,
|
||||
pub mark: String,
|
||||
}
|
||||
|
||||
impl ReqCtx {
|
||||
fn with_node<T>(req: &Request<T>, nc: &NodeConfigCached) -> Self {
|
||||
let mut marks = Vec::new();
|
||||
for (n, v) in req.headers().iter() {
|
||||
if n == PSI_DAQBUFFER_SERVICE_MARK {
|
||||
marks.push(String::from_utf8_lossy(v.as_bytes()).to_string());
|
||||
}
|
||||
}
|
||||
Self {
|
||||
marks,
|
||||
mark: format!("{}:{}", nc.node_config.name, nc.node.port),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReqCtx {
|
||||
fn with_proxy<T>(req: &Request<T>, proxy: &ProxyConfig) -> Self {
|
||||
let mut marks = Vec::new();
|
||||
for (n, v) in req.headers().iter() {
|
||||
if n == PSI_DAQBUFFER_SERVICE_MARK {
|
||||
marks.push(String::from_utf8_lossy(v.as_bytes()).to_string());
|
||||
}
|
||||
}
|
||||
Self {
|
||||
marks,
|
||||
mark: format!("{}:{}", proxy.name, proxy.port),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO remove because I want error bodies to be json.
|
||||
pub fn response_err<T>(status: StatusCode, msg: T) -> Result<Response<Body>, Error>
|
||||
where
|
||||
@@ -193,11 +231,28 @@ macro_rules! static_http_api1 {
|
||||
}
|
||||
|
||||
async fn http_service_try(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
let ctx = ReqCtx::with_node(&req, node_config);
|
||||
let mut res = http_service_inner(req, &ctx, node_config).await?;
|
||||
let hm = res.headers_mut();
|
||||
hm.append("Access-Control-Allow-Origin", "*".parse().unwrap());
|
||||
hm.append("Access-Control-Allow-Headers", "*".parse().unwrap());
|
||||
for m in &ctx.marks {
|
||||
hm.append(PSI_DAQBUFFER_SERVICE_MARK, m.parse().unwrap());
|
||||
}
|
||||
hm.append(PSI_DAQBUFFER_SERVICE_MARK, ctx.mark.parse().unwrap());
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn http_service_inner(
|
||||
req: Request<Body>,
|
||||
ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let uri = req.uri().clone();
|
||||
let path = uri.path();
|
||||
if path == "/api/4/node_status" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(node_status(req, &node_config).await?)
|
||||
Ok(node_status(req, ctx, &node_config).await?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
@@ -245,34 +300,34 @@ async fn http_service_try(req: Request<Body>, node_config: &NodeConfigCached) ->
|
||||
} else if let Some(h) = events::EventsHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
} else if let Some(h) = events::EventsHandlerScylla::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
h.handle(req, ctx, &node_config).await
|
||||
} else if let Some(h) = events::BinnedHandlerScylla::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
h.handle(req, ctx, &node_config).await
|
||||
} else if let Some(h) = channel_status::ConnectionStatusEvents::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
h.handle(req, ctx, &node_config).await
|
||||
} else if let Some(h) = channel_status::ChannelConnectionStatusEvents::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
h.handle(req, ctx, &node_config).await
|
||||
} else if path == "/api/4/binned" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(binned(req, node_config).await?)
|
||||
Ok(binned(req, ctx, node_config).await?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if path == "/api/4/prebinned" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(prebinned(req, &node_config).await?)
|
||||
Ok(prebinned(req, ctx, &node_config).await?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if path == "/api/4/table_sizes" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(table_sizes(req, &node_config).await?)
|
||||
Ok(table_sizes(req, ctx, &node_config).await?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if path == "/api/4/random/channel" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(random_channel(req, &node_config).await?)
|
||||
Ok(random_channel(req, ctx, &node_config).await?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
@@ -284,25 +339,25 @@ async fn http_service_try(req: Request<Body>, node_config: &NodeConfigCached) ->
|
||||
}
|
||||
} else if path == "/api/4/clear_cache" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(clear_cache_all(req, &node_config).await?)
|
||||
Ok(clear_cache_all(req, ctx, &node_config).await?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if path == "/api/4/update_db_with_channel_names" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(update_db_with_channel_names(req, &node_config).await?)
|
||||
Ok(update_db_with_channel_names(req, ctx, &node_config).await?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if path == "/api/4/update_db_with_all_channel_configs" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(update_db_with_all_channel_configs(req, &node_config).await?)
|
||||
Ok(update_db_with_all_channel_configs(req, ctx, &node_config).await?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if path == "/api/4/update_search_cache" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(update_search_cache(req, &node_config).await?)
|
||||
Ok(update_search_cache(req, ctx, &node_config).await?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
@@ -311,7 +366,7 @@ async fn http_service_try(req: Request<Body>, node_config: &NodeConfigCached) ->
|
||||
} else if let Some(h) = settings::SettingsThreadsMaxHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
} else if let Some(h) = api1::Api1EventsBinaryHandler::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
h.handle(req, ctx, &node_config).await
|
||||
} else if let Some(h) = evinfo::EventInfoScan::handler(&req) {
|
||||
h.handle(req, &node_config).await
|
||||
} else if let Some(h) = pulsemap::MapPulseScyllaHandler::handler(&req) {
|
||||
@@ -387,8 +442,8 @@ impl StatusBoardAllHandler {
|
||||
}
|
||||
}
|
||||
|
||||
async fn binned(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
match binned_inner(req, node_config).await {
|
||||
async fn binned(req: Request<Body>, ctx: &ReqCtx, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
match binned_inner(req, ctx, node_config).await {
|
||||
Ok(ret) => Ok(ret),
|
||||
Err(e) => {
|
||||
error!("fn binned: {e:?}");
|
||||
@@ -397,7 +452,11 @@ async fn binned(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Re
|
||||
}
|
||||
}
|
||||
|
||||
async fn binned_inner(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
async fn binned_inner(
|
||||
req: Request<Body>,
|
||||
ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let (head, _body) = req.into_parts();
|
||||
let url = Url::parse(&format!("dummy:{}", head.uri))?;
|
||||
let query = BinnedQuery::from_url(&url).map_err(|e| {
|
||||
@@ -416,8 +475,8 @@ async fn binned_inner(req: Request<Body>, node_config: &NodeConfigCached) -> Res
|
||||
debug!("binned STARTING {:?}", query);
|
||||
});
|
||||
match head.headers.get(http::header::ACCEPT) {
|
||||
Some(v) if v == APP_OCTET => binned_binary(query, chconf, node_config).await,
|
||||
Some(v) if v == APP_JSON || v == ACCEPT_ALL => binned_json(query, chconf, node_config).await,
|
||||
Some(v) if v == APP_OCTET => binned_binary(query, chconf, &ctx, node_config).await,
|
||||
Some(v) if v == APP_JSON || v == ACCEPT_ALL => binned_json(query, chconf, &ctx, node_config).await,
|
||||
_ => Ok(response(StatusCode::NOT_ACCEPTABLE).body(Body::empty())?),
|
||||
}
|
||||
}
|
||||
@@ -425,6 +484,7 @@ async fn binned_inner(req: Request<Body>, node_config: &NodeConfigCached) -> Res
|
||||
async fn binned_binary(
|
||||
query: BinnedQuery,
|
||||
chconf: ChConf,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let body_stream =
|
||||
@@ -439,6 +499,7 @@ async fn binned_binary(
|
||||
async fn binned_json(
|
||||
query: BinnedQuery,
|
||||
chconf: ChConf,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let body_stream = disk::binned::binned_json(&query, chconf.scalar_type, chconf.shape, node_config).await?;
|
||||
@@ -449,8 +510,8 @@ async fn binned_json(
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn prebinned(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
match prebinned_inner(req, node_config).await {
|
||||
async fn prebinned(req: Request<Body>, ctx: &ReqCtx, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
match prebinned_inner(req, ctx, node_config).await {
|
||||
Ok(ret) => Ok(ret),
|
||||
Err(e) => {
|
||||
error!("fn prebinned: {e:?}");
|
||||
@@ -459,7 +520,11 @@ async fn prebinned(req: Request<Body>, node_config: &NodeConfigCached) -> Result
|
||||
}
|
||||
}
|
||||
|
||||
async fn prebinned_inner(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
async fn prebinned_inner(
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let (head, _body) = req.into_parts();
|
||||
let query = PreBinnedQuery::from_request(&head)?;
|
||||
let desc = format!(
|
||||
@@ -486,7 +551,11 @@ async fn prebinned_inner(req: Request<Body>, node_config: &NodeConfigCached) ->
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
async fn node_status(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
async fn node_status(
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let (_head, _body) = req.into_parts();
|
||||
let archiver_appliance_status = match node_config.node.archiver_appliance.as_ref() {
|
||||
Some(k) => {
|
||||
@@ -525,7 +594,11 @@ async fn node_status(req: Request<Body>, node_config: &NodeConfigCached) -> Resu
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
async fn table_sizes(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
async fn table_sizes(
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let (_head, _body) = req.into_parts();
|
||||
let sizes = dbconn::table_sizes(node_config).await?;
|
||||
let mut ret = String::new();
|
||||
@@ -537,14 +610,22 @@ async fn table_sizes(req: Request<Body>, node_config: &NodeConfigCached) -> Resu
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub async fn random_channel(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
pub async fn random_channel(
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let (_head, _body) = req.into_parts();
|
||||
let ret = dbconn::random_channel(node_config).await?;
|
||||
let ret = response(StatusCode::OK).body(Body::from(ret))?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub async fn clear_cache_all(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
pub async fn clear_cache_all(
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let (head, _body) = req.into_parts();
|
||||
let dry = match head.uri.query() {
|
||||
Some(q) => q.contains("dry"),
|
||||
@@ -559,6 +640,7 @@ pub async fn clear_cache_all(req: Request<Body>, node_config: &NodeConfigCached)
|
||||
|
||||
pub async fn update_db_with_channel_names(
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let (head, _body) = req.into_parts();
|
||||
@@ -594,6 +676,7 @@ pub async fn update_db_with_channel_names(
|
||||
|
||||
pub async fn update_db_with_channel_names_3(
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let (head, _body) = req.into_parts();
|
||||
@@ -616,6 +699,7 @@ pub async fn update_db_with_channel_names_3(
|
||||
|
||||
pub async fn update_db_with_all_channel_configs(
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let (head, _body) = req.into_parts();
|
||||
@@ -636,7 +720,11 @@ pub async fn update_db_with_all_channel_configs(
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub async fn update_search_cache(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
pub async fn update_search_cache(
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let (head, _body) = req.into_parts();
|
||||
let _dry = match head.uri.query() {
|
||||
Some(q) => q.contains("dry"),
|
||||
|
||||
@@ -4,7 +4,7 @@ use crate::api1::{channel_search_configs_v1, channel_search_list_v1, gather_json
|
||||
use crate::err::Error;
|
||||
use crate::gather::{gather_get_json_generic, SubRes};
|
||||
use crate::pulsemap::MapPulseQuery;
|
||||
use crate::{api_1_docs, api_4_docs, response, response_err, Cont};
|
||||
use crate::{api_1_docs, api_4_docs, response, response_err, Cont, ReqCtx, PSI_DAQBUFFER_SERVICE_MARK};
|
||||
use futures_core::Stream;
|
||||
use futures_util::pin_mut;
|
||||
use http::{Method, StatusCode};
|
||||
@@ -14,10 +14,9 @@ use hyper_tls::HttpsConnector;
|
||||
use itertools::Itertools;
|
||||
use netpod::log::*;
|
||||
use netpod::query::{BinnedQuery, PlainEventsQuery};
|
||||
use netpod::{
|
||||
AppendToUrl, ChannelConfigQuery, ChannelSearchQuery, ChannelSearchResult, ChannelSearchSingleResult, FromUrl,
|
||||
HasBackend, HasTimeout, ProxyConfig, ACCEPT_ALL, APP_JSON,
|
||||
};
|
||||
use netpod::{AppendToUrl, ChannelConfigQuery, FromUrl, HasBackend, HasTimeout, ProxyConfig};
|
||||
use netpod::{ChannelSearchQuery, ChannelSearchResult, ChannelSearchSingleResult};
|
||||
use netpod::{ACCEPT_ALL, APP_JSON};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::future::Future;
|
||||
@@ -70,6 +69,23 @@ async fn proxy_http_service(req: Request<Body>, proxy_config: ProxyConfig) -> Re
|
||||
}
|
||||
|
||||
async fn proxy_http_service_try(req: Request<Body>, proxy_config: &ProxyConfig) -> Result<Response<Body>, Error> {
|
||||
let ctx = ReqCtx::with_proxy(&req, proxy_config);
|
||||
let mut res = proxy_http_service_inner(req, &ctx, proxy_config).await?;
|
||||
let hm = res.headers_mut();
|
||||
hm.insert("Access-Control-Allow-Origin", "*".parse().unwrap());
|
||||
hm.insert("Access-Control-Allow-Headers", "*".parse().unwrap());
|
||||
for m in &ctx.marks {
|
||||
hm.append(PSI_DAQBUFFER_SERVICE_MARK, m.parse().unwrap());
|
||||
}
|
||||
hm.append(PSI_DAQBUFFER_SERVICE_MARK, ctx.mark.parse().unwrap());
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn proxy_http_service_inner(
|
||||
req: Request<Body>,
|
||||
ctx: &ReqCtx,
|
||||
proxy_config: &ProxyConfig,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let uri = req.uri().clone();
|
||||
let path = uri.path();
|
||||
if path == "/api/1/channels" {
|
||||
@@ -84,16 +100,15 @@ async fn proxy_http_service_try(req: Request<Body>, proxy_config: &ProxyConfig)
|
||||
Ok(proxy_api1_single_backend_query(req, proxy_config).await?)
|
||||
} else if path.starts_with("/api/1/map/pulse/") {
|
||||
warn!("/api/1/map/pulse/ DEPRECATED");
|
||||
Ok(proxy_api1_map_pulse(req, proxy_config).await?)
|
||||
Ok(proxy_api1_map_pulse(req, ctx, proxy_config).await?)
|
||||
} else if path.starts_with("/api/1/gather/") {
|
||||
Ok(gather_json_2_v1(req, "/api/1/gather/", proxy_config).await?)
|
||||
} else if path == "/api/4/version" {
|
||||
if req.method() == Method::GET {
|
||||
let ret = serde_json::json!({
|
||||
//"data_api_version": "4.0.0-beta",
|
||||
"data_api_version": {
|
||||
"major": 4,
|
||||
"minor": 0,
|
||||
"minor": 1,
|
||||
},
|
||||
});
|
||||
Ok(response(StatusCode::OK).body(Body::from(serde_json::to_vec(&ret)?))?)
|
||||
@@ -107,13 +122,13 @@ async fn proxy_http_service_try(req: Request<Body>, proxy_config: &ProxyConfig)
|
||||
} else if path == "/api/4/search/channel" {
|
||||
Ok(api4::channel_search(req, proxy_config).await?)
|
||||
} else if path == "/api/4/events" {
|
||||
Ok(proxy_single_backend_query::<PlainEventsQuery>(req, proxy_config).await?)
|
||||
Ok(proxy_single_backend_query::<PlainEventsQuery>(req, ctx, proxy_config).await?)
|
||||
} else if path.starts_with("/api/4/map/pulse/") {
|
||||
Ok(proxy_single_backend_query::<MapPulseQuery>(req, proxy_config).await?)
|
||||
Ok(proxy_single_backend_query::<MapPulseQuery>(req, ctx, proxy_config).await?)
|
||||
} else if path == "/api/4/binned" {
|
||||
Ok(proxy_single_backend_query::<BinnedQuery>(req, proxy_config).await?)
|
||||
Ok(proxy_single_backend_query::<BinnedQuery>(req, ctx, proxy_config).await?)
|
||||
} else if path == "/api/4/channel/config" {
|
||||
Ok(proxy_single_backend_query::<ChannelConfigQuery>(req, proxy_config).await?)
|
||||
Ok(proxy_single_backend_query::<ChannelConfigQuery>(req, ctx, proxy_config).await?)
|
||||
} else if path.starts_with("/api/1/documentation/") {
|
||||
if req.method() == Method::GET {
|
||||
api_1_docs(path)
|
||||
@@ -149,12 +164,18 @@ async fn proxy_http_service_try(req: Request<Body>, proxy_config: &ProxyConfig)
|
||||
} else if path.starts_with(DISTRI_PRE) {
|
||||
proxy_distribute_v2(req).await
|
||||
} else {
|
||||
Ok(response(StatusCode::NOT_FOUND).body(Body::from(format!(
|
||||
"Sorry, proxy can not find: {:?} {:?} {:?}",
|
||||
req.method(),
|
||||
req.uri().path(),
|
||||
req.uri().query(),
|
||||
)))?)
|
||||
use std::fmt::Write;
|
||||
let mut body = String::new();
|
||||
let out = &mut body;
|
||||
write!(out, "METHOD {:?}<br>\n", req.method())?;
|
||||
write!(out, "URI {:?}<br>\n", req.uri())?;
|
||||
write!(out, "HOST {:?}<br>\n", req.uri().host())?;
|
||||
write!(out, "PORT {:?}<br>\n", req.uri().port())?;
|
||||
write!(out, "PATH {:?}<br>\n", req.uri().path())?;
|
||||
for (hn, hv) in req.headers() {
|
||||
write!(out, "HEADER {hn:?}: {hv:?}<br>\n")?;
|
||||
}
|
||||
Ok(response(StatusCode::NOT_FOUND).body(Body::from(body))?)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -381,7 +402,11 @@ pub async fn channel_search(req: Request<Body>, proxy_config: &ProxyConfig) -> R
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn proxy_api1_map_pulse(req: Request<Body>, proxy_config: &ProxyConfig) -> Result<Response<Body>, Error> {
|
||||
pub async fn proxy_api1_map_pulse(
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
proxy_config: &ProxyConfig,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let s2 = format!("http://dummy/{}", req.uri());
|
||||
info!("s2: {:?}", s2);
|
||||
let url = Url::parse(&s2)?;
|
||||
@@ -505,6 +530,7 @@ pub async fn proxy_api1_single_backend_query(
|
||||
|
||||
pub async fn proxy_single_backend_query<QT>(
|
||||
req: Request<Body>,
|
||||
_ctx: &ReqCtx,
|
||||
proxy_config: &ProxyConfig,
|
||||
) -> Result<Response<Body>, Error>
|
||||
where
|
||||
|
||||
Reference in New Issue
Block a user