Remove ChannelExecFunction

This commit is contained in:
Dominik Werder
2022-12-05 17:51:23 +01:00
parent 821caddf63
commit ed723b634b
14 changed files with 39 additions and 1124 deletions

View File

@@ -2,8 +2,7 @@ use crate::err::Error;
use crate::gather::{gather_get_json_generic, SubRes};
use crate::{response, BodyStream, ReqCtx};
use bytes::{BufMut, BytesMut};
use futures_core::Stream;
use futures_util::{FutureExt, StreamExt, TryFutureExt, TryStreamExt};
use futures_util::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt};
use http::{Method, StatusCode};
use hyper::{Body, Client, Request, Response};
use items::eventfull::EventFull;

View File

@@ -1,7 +1,6 @@
use crate::err::Error;
use bytes::Bytes;
use futures_core::Stream;
use futures_util::StreamExt;
use futures_util::{Stream, StreamExt};
use http::HeaderMap;
use http::{Response, StatusCode};
use hyper::Body;

View File

@@ -1,11 +1,11 @@
use crate::err::Error;
use crate::{response, ToPublicResponse};
use dbconn::{create_connection, create_scylla_connection};
use disk::binned::query::PreBinnedQuery;
use futures_util::StreamExt;
use http::{Method, Request, Response, StatusCode};
use hyper::Body;
use netpod::log::*;
use netpod::query::prebinned::PreBinnedQuery;
use netpod::query::{BinnedQuery, PlainEventsQuery};
use netpod::timeunits::*;
use netpod::{get_url_query_pairs, Channel, ChannelConfigQuery, Database, FromUrl, ScalarType, ScyllaConfig, Shape};

View File

@@ -1,14 +1,14 @@
use crate::channelconfig::{chconf_from_events_binary, chconf_from_events_json};
use crate::err::Error;
use crate::{response, response_err, BodyStream, ReqCtx, ToPublicResponse};
use futures_util::{Stream, StreamExt, TryStreamExt};
use futures_util::{stream, Stream, StreamExt, TryStreamExt};
use http::{Method, Request, Response, StatusCode};
use hyper::Body;
use items_2::channelevents::ChannelEvents;
use items_2::merger_cev::ChannelEventsMerger;
use items_2::{binned_collected, empty_events_dyn, empty_events_dyn_2};
use netpod::log::*;
use netpod::query::{BinnedQuery, ChannelStateEventsQuery, PlainEventsQuery, RawEventsQuery};
use netpod::{log::*, HasBackend};
use netpod::{AggKind, BinnedRange, FromUrl, NodeConfigCached};
use netpod::{ACCEPT_ALL, APP_JSON, APP_OCTET};
use scyllaconn::create_scy_session;
@@ -72,25 +72,13 @@ async fn plain_events_binary(
debug!("httpret plain_events_binary req: {:?}", req);
let query = PlainEventsQuery::from_url(&url).map_err(|e| e.add_public_msg(format!("Can not understand query")))?;
let chconf = chconf_from_events_binary(&query, node_config).await?;
// Update the series id since we don't require some unique identifier yet.
let mut query = query;
query.set_series_id(chconf.series);
let query = query;
// ---
let op = disk::channelexec::PlainEvents::new(query.channel().clone(), query.range().clone(), node_config.clone());
let s = disk::channelexec::channel_exec(
op,
query.channel(),
query.range(),
chconf.scalar_type,
chconf.shape,
AggKind::Plain,
node_config,
)
.await?;
let s = s.map(|item| item.make_frame());
let _ = query;
let s = stream::iter([Ok::<_, Error>(String::from("TODO_PREBINNED_BINARY_STREAM"))]);
let ret = response(StatusCode::OK).body(BodyStream::wrapped(
s.map_err(Error::from),
format!("plain_events_binary"),
@@ -110,46 +98,16 @@ async fn plain_events_json(
error!("chconf_from_events_json {e:?}");
e.add_public_msg(format!("Can not get channel information"))
})?;
// Update the series id since we don't require some unique identifier yet.
let mut query = query;
query.set_series_id(chconf.series);
let query = query;
// ---
if true || query.backend().starts_with("test-") {
let query = RawEventsQuery::new(query.channel().clone(), query.range().clone(), AggKind::Plain);
let item = streams::plaineventsjson::plain_events_json(query, &node_config.node_config.cluster).await?;
let buf = serde_json::to_vec(&item)?;
let ret = response(StatusCode::OK).body(Body::from(buf))?;
Ok(ret)
} else {
let op = disk::channelexec::PlainEventsJson::new(
// TODO pass only the query, not channel, range again:
query.clone(),
query.channel().clone(),
query.range().clone(),
query.timeout(),
node_config.clone(),
query.events_max().unwrap_or(u64::MAX),
query.do_log(),
);
let s = disk::channelexec::channel_exec(
op,
query.channel(),
query.range(),
chconf.scalar_type,
chconf.shape,
AggKind::Plain,
node_config,
)
.await?;
let ret = response(StatusCode::OK).body(BodyStream::wrapped(
s.map_err(Error::from),
format!("plain_events_json"),
))?;
Ok(ret)
}
let query = RawEventsQuery::new(query.channel().clone(), query.range().clone(), AggKind::Plain);
let item = streams::plaineventsjson::plain_events_json(query, &node_config.node_config.cluster).await?;
let buf = serde_json::to_vec(&item)?;
let ret = response(StatusCode::OK).body(Body::from(buf))?;
Ok(ret)
}
pub struct EventsHandlerScylla {}

View File

@@ -1,170 +1 @@
use crate::channelconfig::chconf_from_events_json;
use crate::err::Error;
use crate::response;
use bytes::Bytes;
use disk::channelexec::channel_exec;
use disk::channelexec::ChannelExecFunction;
use disk::decode::Endianness;
use disk::decode::EventValueFromBytes;
use disk::decode::EventValueShape;
use disk::decode::NumFromBytes;
use disk::merge::mergedfromremotes::MergedFromRemotes;
use futures_util::FutureExt;
use futures_util::Stream;
use futures_util::TryStreamExt;
use http::{Method, StatusCode};
use hyper::{Body, Request, Response};
use items::numops::NumOps;
use items::streams::collect_plain_events_json;
use items::streams::Collectable;
use items::Clearable;
use items::EventsNodeProcessor;
use items::Framable;
use items::FrameType;
use items::PushableIndex;
use items::Sitemty;
use items::TimeBinnableType;
use netpod::log::*;
use netpod::query::{PlainEventsQuery, RawEventsQuery};
use netpod::{AggKind, Channel, FromUrl, NanoRange, NodeConfigCached, PerfOpts, ScalarType, Shape};
use serde::de::DeserializeOwned;
use std::fmt::Debug;
use std::pin::Pin;
use std::time::Duration;
use url::Url;
pub struct EventInfoScan {}
impl EventInfoScan {
pub fn handler(req: &Request<Body>) -> Option<Self> {
if req.uri().path().starts_with("/api/4/event/info") {
Some(Self {})
} else {
None
}
}
pub async fn handle(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
info!("EventInfoScan::handle");
if req.method() != Method::GET {
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(Body::empty())?);
}
let (head, _body) = req.into_parts();
let url = Url::parse(&format!("dummy:{}", head.uri))?;
let query = PlainEventsQuery::from_url(&url)?;
let ret = match Self::exec(&query, node_config).await {
Ok(stream) => {
//
let stream = stream.map_ok(|_| Bytes::new());
response(StatusCode::OK).body(Body::wrap_stream(stream))?
}
Err(e) => response(StatusCode::INTERNAL_SERVER_ERROR).body(Body::from(format!("{:?}", e)))?,
};
Ok(ret)
}
pub async fn exec(
query: &PlainEventsQuery,
node_config: &NodeConfigCached,
) -> Result<Pin<Box<dyn Stream<Item = Result<Bytes, Error>> + Send>>, Error> {
let chconf = chconf_from_events_json(&query, node_config).await?;
let ret = channel_exec(
EvInfoFunc::new(
query.clone(),
query.timeout(),
query.events_max().unwrap_or(u64::MAX),
node_config.clone(),
),
query.channel(),
query.range(),
chconf.scalar_type,
chconf.shape,
AggKind::Stats1,
node_config,
)
.await?;
Ok(Box::pin(ret.map_err(Error::from)))
}
}
pub struct EvInfoFunc {
query: PlainEventsQuery,
timeout: Duration,
node_config: NodeConfigCached,
events_max: u64,
}
impl EvInfoFunc {
pub fn new(query: PlainEventsQuery, timeout: Duration, events_max: u64, node_config: NodeConfigCached) -> Self {
Self {
query,
timeout,
events_max,
node_config,
}
}
pub fn channel(&self) -> &Channel {
&self.query.channel()
}
pub fn range(&self) -> &NanoRange {
&self.query.range()
}
}
impl ChannelExecFunction for EvInfoFunc {
type Output = Pin<Box<dyn Stream<Item = Result<Bytes, Error>> + Send>>;
fn exec<NTY, END, EVS, ENP>(
self,
byte_order: END,
_scalar_type: ScalarType,
_shape: Shape,
event_value_shape: EVS,
_events_node_proc: ENP,
) -> Result<Self::Output, ::err::Error>
where
NTY: NumOps + NumFromBytes<NTY, END> + 'static,
END: Endianness + 'static,
EVS: EventValueShape<NTY, END> + EventValueFromBytes<NTY, END> + 'static,
ENP: EventsNodeProcessor<Input = <EVS as EventValueFromBytes<NTY, END>>::Batch> + 'static,
// TODO require these things in general?
<ENP as EventsNodeProcessor>::Output: Debug + Collectable + PushableIndex + Clearable,
<<ENP as EventsNodeProcessor>::Output as TimeBinnableType>::Output: Debug
+ TimeBinnableType<Output = <<ENP as EventsNodeProcessor>::Output as TimeBinnableType>::Output>
+ Collectable
+ Unpin,
Sitemty<<ENP as EventsNodeProcessor>::Output>: FrameType + Framable + 'static,
Sitemty<<<ENP as EventsNodeProcessor>::Output as TimeBinnableType>::Output>:
FrameType + Framable + DeserializeOwned,
{
let _ = byte_order;
let _ = event_value_shape;
let perf_opts = PerfOpts { inmem_bufcap: 4096 };
// TODO let PlainEventsJsonQuery provide the tune and pass to RawEventsQuery:
let evq = RawEventsQuery::new(self.query.channel().clone(), self.query.range().clone(), AggKind::Plain);
// TODO Use a Merged-From-Multiple-Local-Splits.
// TODO Pass the read buffer size from query parameter: GPFS needs a larger buffer..
// TODO Must issue multiple reads to GPFS, keep futures in a ordered queue.
let s = MergedFromRemotes::<ENP>::new(evq, perf_opts, self.node_config.node_config.cluster);
let f = collect_plain_events_json(s, self.timeout, 0, self.events_max, self.query.do_log());
let f = FutureExt::map(f, |item| match item {
Ok(item) => {
// TODO add channel entry info here?
//let obj = item.as_object_mut().unwrap();
//obj.insert("channelName", JsonValue::String(en));
Ok(Bytes::from(serde_json::to_vec(&item)?))
}
Err(e) => Err(e.into()),
});
let s = futures_util::stream::once(f);
Ok(Box::pin(s))
}
fn empty() -> Self::Output {
Box::pin(futures_util::stream::empty())
}
}

View File

@@ -6,7 +6,6 @@ pub mod channelconfig;
pub mod download;
pub mod err;
pub mod events;
pub mod evinfo;
pub mod gather;
pub mod prometheus;
pub mod proxy;
@@ -19,33 +18,32 @@ use crate::bodystream::response;
use crate::err::Error;
use crate::gather::gather_get_json;
use crate::pulsemap::UpdateTask;
use channelconfig::{chconf_from_binned, ChConf};
use disk::binned::query::PreBinnedQuery;
use future::Future;
use futures_util::{FutureExt, StreamExt, TryStreamExt};
use futures_util::{Future, FutureExt, StreamExt};
use http::{Method, StatusCode};
use hyper::server::conn::AddrStream;
use hyper::service::{make_service_fn, service_fn};
use hyper::{server::Server, Body, Request, Response};
use net::SocketAddr;
use netpod::log::*;
use netpod::query::BinnedQuery;
use netpod::query::prebinned::PreBinnedQuery;
use netpod::timeunits::SEC;
use netpod::ProxyConfig;
use netpod::{FromUrl, NodeConfigCached, NodeStatus, NodeStatusArchiverAppliance};
use netpod::{ACCEPT_ALL, APP_JSON, APP_JSON_LINES, APP_OCTET};
use netpod::{NodeConfigCached, NodeStatus, NodeStatusArchiverAppliance};
use netpod::{APP_JSON, APP_JSON_LINES};
use nodenet::conn::events_service;
use panic::{AssertUnwindSafe, UnwindSafe};
use pin::Pin;
use serde::Serialize;
use std::collections::BTreeMap;
use std::net;
use std::panic;
use std::pin;
use std::sync::atomic::{AtomicPtr, Ordering};
use std::sync::{Once, RwLock, RwLockWriteGuard};
use std::task;
use std::time::SystemTime;
use std::{future, net, panic, pin, task};
use task::{Context, Poll};
use tracing::Instrument;
use url::Url;
use task::Context;
use task::Poll;
pub const PSI_DAQBUFFER_SERVICE_MARK: &'static str = "PSI-Daqbuffer-Service-Mark";
pub const PSI_DAQBUFFER_SEEN_URL: &'static str = "PSI-Daqbuffer-Seen-Url";
@@ -324,12 +322,6 @@ async fn http_service_inner(
h.handle(req, ctx, &node_config).await
} else if let Some(h) = api4::binned::BinnedHandler::handler(&req) {
h.handle(req, &node_config).await
} else if path == "/api/4/binned" {
if req.method() == Method::GET {
Ok(binned(req, ctx, node_config).await?)
} else {
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
}
} else if path == "/api/4/prebinned" {
if req.method() == Method::GET {
Ok(prebinned(req, ctx, &node_config).await?)
@@ -384,8 +376,6 @@ async fn http_service_inner(
h.handle(req, &node_config).await
} else if let Some(h) = api1::Api1EventsBinaryHandler::handler(&req) {
h.handle(req, ctx, &node_config).await
} else if let Some(h) = evinfo::EventInfoScan::handler(&req) {
h.handle(req, &node_config).await
} else if let Some(h) = pulsemap::MapPulseScyllaHandler::handler(&req) {
h.handle(req, &node_config).await
} else if let Some(h) = pulsemap::IndexFullHttpFunction::handler(&req) {
@@ -468,74 +458,6 @@ impl StatusBoardAllHandler {
}
}
async fn binned(req: Request<Body>, ctx: &ReqCtx, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
match binned_inner(req, ctx, node_config).await {
Ok(ret) => Ok(ret),
Err(e) => {
error!("fn binned: {e:?}");
Ok(e.to_public_response())
}
}
}
async fn binned_inner(
req: Request<Body>,
ctx: &ReqCtx,
node_config: &NodeConfigCached,
) -> Result<Response<Body>, Error> {
let (head, _body) = req.into_parts();
let url = Url::parse(&format!("dummy:{}", head.uri))?;
let query = BinnedQuery::from_url(&url).map_err(|e| {
let msg = format!("can not parse query: {}", e.msg());
e.add_public_msg(msg)
})?;
let chconf = chconf_from_binned(&query, node_config).await?;
// Update the series id since we don't require some unique identifier yet.
let mut query = query;
query.set_series_id(chconf.series);
let query = query;
// ---
let desc = format!("binned-BEG-{}-END-{}", query.range().beg / SEC, query.range().end / SEC);
let span1 = span!(Level::INFO, "httpret::binned", desc = &desc.as_str());
span1.in_scope(|| {
debug!("binned STARTING {:?}", query);
});
match head.headers.get(http::header::ACCEPT) {
Some(v) if v == APP_OCTET => binned_binary(query, chconf, &ctx, node_config).await,
Some(v) if v == APP_JSON || v == ACCEPT_ALL => binned_json(query, chconf, &ctx, node_config).await,
_ => Ok(response(StatusCode::NOT_ACCEPTABLE).body(Body::empty())?),
}
}
async fn binned_binary(
query: BinnedQuery,
chconf: ChConf,
_ctx: &ReqCtx,
node_config: &NodeConfigCached,
) -> Result<Response<Body>, Error> {
let body_stream =
disk::binned::binned_bytes_for_http(&query, chconf.scalar_type, chconf.shape, node_config).await?;
let res = response(StatusCode::OK).body(BodyStream::wrapped(
body_stream.map_err(Error::from),
format!("binned_binary"),
))?;
Ok(res)
}
async fn binned_json(
query: BinnedQuery,
chconf: ChConf,
_ctx: &ReqCtx,
node_config: &NodeConfigCached,
) -> Result<Response<Body>, Error> {
let body_stream = disk::binned::binned_json(&query, chconf.scalar_type, chconf.shape, node_config).await?;
let res = response(StatusCode::OK).body(BodyStream::wrapped(
body_stream.map_err(Error::from),
format!("binned_json"),
))?;
Ok(res)
}
async fn prebinned(req: Request<Body>, ctx: &ReqCtx, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
match prebinned_inner(req, ctx, node_config).await {
Ok(ret) => Ok(ret),
@@ -549,10 +471,11 @@ async fn prebinned(req: Request<Body>, ctx: &ReqCtx, node_config: &NodeConfigCac
async fn prebinned_inner(
req: Request<Body>,
_ctx: &ReqCtx,
node_config: &NodeConfigCached,
_node_config: &NodeConfigCached,
) -> Result<Response<Body>, Error> {
let (head, _body) = req.into_parts();
let query = PreBinnedQuery::from_request(&head)?;
let url: url::Url = format!("dummy://{}", head.uri).parse()?;
let query = PreBinnedQuery::from_url(&url)?;
let desc = format!(
"pre-W-{}-B-{}",
query.patch().bin_t_len() / SEC,
@@ -560,21 +483,10 @@ async fn prebinned_inner(
);
let span1 = span!(Level::INFO, "httpret::prebinned", desc = &desc.as_str());
span1.in_scope(|| {
debug!("prebinned STARTING");
debug!("begin");
});
let fut = disk::binned::prebinned::pre_binned_bytes_for_http(node_config, &query).instrument(span1);
let ret = match fut.await {
Ok(s) => response(StatusCode::OK).body(BodyStream::wrapped(s.map_err(Error::from), desc))?,
Err(e) => {
if query.report_error() {
response(StatusCode::INTERNAL_SERVER_ERROR).body(Body::from(format!("{:?}", e)))?
} else {
error!("fn prebinned: {:?}", e);
response(StatusCode::INTERNAL_SERVER_ERROR).body(Body::empty())?
}
}
};
Ok(ret)
//let fut = disk::binned::prebinned::pre_binned_bytes_for_http(node_config, &query).instrument(span1);
todo!()
}
async fn node_status(

View File

@@ -5,8 +5,7 @@ use crate::err::Error;
use crate::gather::{gather_get_json_generic, SubRes};
use crate::pulsemap::MapPulseQuery;
use crate::{api_1_docs, api_4_docs, response, response_err, Cont, ReqCtx, PSI_DAQBUFFER_SERVICE_MARK};
use futures_core::Stream;
use futures_util::pin_mut;
use futures_util::{pin_mut, Stream};
use http::{Method, StatusCode};
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server};

View File

@@ -1,7 +1,7 @@
use crate::err::Error;
use crate::gather::{gather_get_json_generic, SubRes};
use crate::response;
use futures_core::Future;
use futures_util::Future;
use http::{header, Request, Response, StatusCode};
use hyper::Body;
use itertools::Itertools;