Refactor series lookup

This commit is contained in:
Dominik Werder
2024-05-16 23:33:34 +02:00
parent 82455a2b16
commit 6224df534a
41 changed files with 762 additions and 562 deletions

View File

@@ -6,6 +6,7 @@ use crate::gather::SubRes;
use crate::response;
use crate::ReqCtx;
use crate::Requ;
use crate::ServiceSharedResources;
use bytes::BufMut;
use bytes::BytesMut;
use disk::merge::mergedblobsfromremotes::MergedBlobsFromRemotes;
@@ -896,6 +897,7 @@ impl Api1EventsBinaryHandler {
&self,
req: Requ,
ctx: &ReqCtx,
shared_res: &ServiceSharedResources,
node_config: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
if req.method() != Method::POST {
@@ -942,6 +944,7 @@ impl Api1EventsBinaryHandler {
span.clone(),
reqidspan.clone(),
ctx,
shared_res,
node_config,
)
.instrument(span)
@@ -958,6 +961,7 @@ impl Api1EventsBinaryHandler {
span: tracing::Span,
reqidspan: tracing::Span,
ctx: &ReqCtx,
shared_res: &ServiceSharedResources,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
let self_name = any::type_name::<Self>();
@@ -983,9 +987,14 @@ impl Api1EventsBinaryHandler {
for ch in qu.channels() {
debug!("try to find config quorum for {ch:?}");
let ch = SfDbChannel::from_name(backend, ch.name());
let ch_conf =
nodenet::configquorum::find_config_basics_quorum(ch.clone(), range.clone().into(), ctx, ncc)
.await?;
let ch_conf = nodenet::configquorum::find_config_basics_quorum(
ch.clone(),
range.clone().into(),
ctx,
&shared_res.pgqueue,
ncc,
)
.await?;
match ch_conf {
Some(x) => {
debug!("found quorum {ch:?} {x:?}");

View File

@@ -72,8 +72,7 @@ impl AccountingIngestedBytes {
let scyco = ncc
.node_config
.cluster
.scylla
.as_ref()
.scylla_st()
.ok_or_else(|| Error::with_public_msg_no_trace(format!("no scylla configured")))?;
let scy = scyllaconn::conn::create_scy_session(scyco).await?;
let mut stream = scyllaconn::accounting::totals::AccountingStreamScylla::new(q.range().try_into()?, scy);
@@ -136,16 +135,16 @@ impl AccountingToplistCounts {
_ctx: &ReqCtx,
ncc: &NodeConfigCached,
) -> Result<Toplist, Error> {
// TODO assumes that accounting data is in the LT keyspace
let scyco = ncc
.node_config
.cluster
.scylla
.as_ref()
.ok_or_else(|| Error::with_public_msg_no_trace(format!("no scylla configured")))?;
.scylla_lt()
.ok_or_else(|| Error::with_public_msg_no_trace(format!("no lt scylla configured")))?;
let scy = scyllaconn::conn::create_scy_session(scyco).await?;
let pgconf = &ncc.node_config.cluster.database;
let pg = dbconn::create_connection(&pgconf).await?;
let mut top1 = scyllaconn::accounting::toplist::read_ts(qu.ts().0, scy).await?;
let (pg, pgjh) = dbconn::create_connection(&pgconf).await?;
let mut top1 = scyllaconn::accounting::toplist::read_ts(qu.ts().ns(), scy).await?;
top1.sort_by_bytes();
let mut ret = Toplist { toplist: Vec::new() };
let series_ids: Vec<_> = top1.usage().iter().take(qu.limit() as _).map(|x| x.0).collect();

View File

@@ -5,6 +5,8 @@ use crate::channelconfig::ch_conf_from_binned;
use crate::err::Error;
use crate::requests::accepts_json_or_all;
use crate::requests::accepts_octets;
use crate::ServiceSharedResources;
use dbconn::worker::PgQueue;
use http::Method;
use http::StatusCode;
use httpclient::body_empty;
@@ -23,7 +25,13 @@ use query::api4::binned::BinnedQuery;
use tracing::Instrument;
use url::Url;
async fn binned_json(url: Url, req: Requ, ctx: &ReqCtx, ncc: &NodeConfigCached) -> Result<StreamResponse, Error> {
async fn binned_json(
url: Url,
req: Requ,
ctx: &ReqCtx,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
debug!("{:?}", req);
let reqid = crate::status_board()
.map_err(|e| Error::with_msg_no_trace(e.to_string()))?
@@ -35,7 +43,7 @@ async fn binned_json(url: Url, req: Requ, ctx: &ReqCtx, ncc: &NodeConfigCached)
e.add_public_msg(msg)
})?;
// TODO handle None case better and return 404
let ch_conf = ch_conf_from_binned(&query, ctx, ncc)
let ch_conf = ch_conf_from_binned(&query, ctx, pgqueue, ncc)
.await?
.ok_or_else(|| Error::with_msg_no_trace("channel not found"))?;
let span1 = span!(
@@ -58,7 +66,7 @@ async fn binned_json(url: Url, req: Requ, ctx: &ReqCtx, ncc: &NodeConfigCached)
Ok(ret)
}
async fn binned(req: Requ, ctx: &ReqCtx, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
async fn binned(req: Requ, ctx: &ReqCtx, pgqueue: &PgQueue, ncc: &NodeConfigCached) -> Result<StreamResponse, Error> {
let url = req_uri_to_url(req.uri())?;
if req
.uri()
@@ -68,7 +76,7 @@ async fn binned(req: Requ, ctx: &ReqCtx, node_config: &NodeConfigCached) -> Resu
Err(Error::with_msg_no_trace("hidden message").add_public_msg("PublicMessage"))?;
}
if accepts_json_or_all(&req.headers()) {
Ok(binned_json(url, req, ctx, node_config).await?)
Ok(binned_json(url, req, ctx, pgqueue, ncc).await?)
} else if accepts_octets(&req.headers()) {
Ok(response_err_msg(
StatusCode::NOT_ACCEPTABLE,
@@ -98,12 +106,13 @@ impl BinnedHandler {
&self,
req: Requ,
ctx: &ReqCtx,
node_config: &NodeConfigCached,
shared_res: &ServiceSharedResources,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
if req.method() != Method::GET {
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(body_empty())?);
}
match binned(req, ctx, node_config).await {
match binned(req, ctx, &shared_res.pgqueue, ncc).await {
Ok(ret) => Ok(ret),
Err(e) => {
warn!("BinnedHandler handle sees: {e}");

View File

@@ -1,6 +1,7 @@
use crate::bodystream::response_err_msg;
use crate::response;
use crate::ReqCtx;
use crate::ServiceSharedResources;
use err::thiserror;
use err::PublicError;
use err::ThisError;
@@ -15,6 +16,7 @@ use httpclient::StreamResponse;
use netpod::log::*;
use netpod::NodeConfigCached;
use netpod::ServiceVersion;
use std::sync::Arc;
#[derive(Debug, ThisError)]
pub enum EventDataError {
@@ -50,14 +52,14 @@ impl EventDataHandler {
req: Requ,
_ctx: &ReqCtx,
ncc: &NodeConfigCached,
_service_version: &ServiceVersion,
shared_res: Arc<ServiceSharedResources>,
) -> Result<StreamResponse, EventDataError> {
if req.method() != Method::POST {
Ok(response(StatusCode::NOT_ACCEPTABLE)
.body(body_empty())
.map_err(|_| EventDataError::InternalError)?)
} else {
match Self::handle_req(req, ncc).await {
match Self::handle_req(req, ncc, shared_res).await {
Ok(ret) => Ok(ret),
Err(e) => {
error!("{e}");
@@ -69,7 +71,11 @@ impl EventDataHandler {
}
}
async fn handle_req(req: Requ, ncc: &NodeConfigCached) -> Result<StreamResponse, EventDataError> {
async fn handle_req(
req: Requ,
ncc: &NodeConfigCached,
shared_res: Arc<ServiceSharedResources>,
) -> Result<StreamResponse, EventDataError> {
let (_head, body) = req.into_parts();
let body = read_body_bytes(body)
.await

View File

@@ -5,9 +5,11 @@ use crate::requests::accepts_cbor_framed;
use crate::requests::accepts_json_framed;
use crate::requests::accepts_json_or_all;
use crate::response;
use crate::ServiceSharedResources;
use crate::ToPublicResponse;
use bytes::Bytes;
use bytes::BytesMut;
use dbconn::worker::PgQueue;
use futures_util::future;
use futures_util::stream;
use futures_util::Stream;
@@ -44,12 +46,13 @@ impl EventsHandler {
&self,
req: Requ,
ctx: &ReqCtx,
node_config: &NodeConfigCached,
shared_res: &ServiceSharedResources,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
if req.method() != Method::GET {
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(body_empty())?);
}
match plain_events(req, ctx, node_config).await {
match plain_events(req, ctx, &shared_res.pgqueue, ncc).await {
Ok(ret) => Ok(ret),
Err(e) => {
error!("EventsHandler sees: {e}");
@@ -59,14 +62,19 @@ impl EventsHandler {
}
}
async fn plain_events(req: Requ, ctx: &ReqCtx, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
async fn plain_events(
req: Requ,
ctx: &ReqCtx,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
let url = req_uri_to_url(req.uri())?;
if accepts_cbor_framed(req.headers()) {
Ok(plain_events_cbor_framed(url, req, ctx, node_config).await?)
Ok(plain_events_cbor_framed(url, req, ctx, pgqueue, ncc).await?)
} else if accepts_json_framed(req.headers()) {
Ok(plain_events_json_framed(url, req, ctx, node_config).await?)
Ok(plain_events_json_framed(url, req, ctx, pgqueue, ncc).await?)
} else if accepts_json_or_all(req.headers()) {
Ok(plain_events_json(url, req, ctx, node_config).await?)
Ok(plain_events_json(url, req, ctx, pgqueue, ncc).await?)
} else {
let ret = response_err_msg(StatusCode::NOT_ACCEPTABLE, format!("unsupported accept {:?}", req))?;
Ok(ret)
@@ -77,10 +85,11 @@ async fn plain_events_cbor_framed(
url: Url,
req: Requ,
ctx: &ReqCtx,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
let evq = PlainEventsQuery::from_url(&url).map_err(|e| e.add_public_msg(format!("Can not understand query")))?;
let ch_conf = chconf_from_events_quorum(&evq, ctx, ncc)
let ch_conf = chconf_from_events_quorum(&evq, ctx, pgqueue, ncc)
.await?
.ok_or_else(|| Error::with_msg_no_trace("channel not found"))?;
info!("plain_events_cbor_framed chconf_from_events_quorum: {ch_conf:?} {req:?}");
@@ -115,10 +124,11 @@ async fn plain_events_json_framed(
url: Url,
req: Requ,
ctx: &ReqCtx,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
let evq = PlainEventsQuery::from_url(&url).map_err(|e| e.add_public_msg(format!("Can not understand query")))?;
let ch_conf = chconf_from_events_quorum(&evq, ctx, ncc)
let ch_conf = chconf_from_events_quorum(&evq, ctx, pgqueue, ncc)
.await?
.ok_or_else(|| Error::with_msg_no_trace("channel not found"))?;
info!("plain_events_json_framed chconf_from_events_quorum: {ch_conf:?} {req:?}");
@@ -133,7 +143,8 @@ async fn plain_events_json(
url: Url,
req: Requ,
ctx: &ReqCtx,
node_config: &NodeConfigCached,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
let self_name = "plain_events_json";
info!("{self_name} req: {:?}", req);
@@ -141,17 +152,17 @@ async fn plain_events_json(
let query = PlainEventsQuery::from_url(&url)?;
info!("{self_name} query {query:?}");
// TODO handle None case better and return 404
let ch_conf = chconf_from_events_quorum(&query, ctx, node_config)
let ch_conf = chconf_from_events_quorum(&query, ctx, pgqueue, ncc)
.await
.map_err(Error::from)?
.ok_or_else(|| Error::with_msg_no_trace("channel not found"))?;
info!("{self_name} chconf_from_events_quorum: {ch_conf:?}");
let open_bytes = OpenBoxedBytesViaHttp::new(node_config.node_config.cluster.clone());
let open_bytes = OpenBoxedBytesViaHttp::new(ncc.node_config.cluster.clone());
let item = streams::plaineventsjson::plain_events_json(
&query,
ch_conf,
ctx,
&node_config.node_config.cluster,
&ncc.node_config.cluster,
Box::pin(open_bytes),
)
.await;

View File

@@ -1,6 +1,7 @@
use crate::bodystream::response;
use crate::err::Error;
use crate::ReqCtx;
use crate::ServiceSharedResources;
use futures_util::StreamExt;
use http::Method;
use http::StatusCode;
@@ -37,7 +38,8 @@ impl ConnectionStatusEvents {
&self,
req: Requ,
_ctx: &ReqCtx,
node_config: &NodeConfigCached,
shared_res: &ServiceSharedResources,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
if req.method() == Method::GET {
let accept_def = APP_JSON;
@@ -48,7 +50,7 @@ impl ConnectionStatusEvents {
if accept.contains(APP_JSON) || accept.contains(ACCEPT_ALL) {
let url = req_uri_to_url(req.uri())?;
let q = ChannelStateEventsQuery::from_url(&url)?;
match self.fetch_data(&q, node_config).await {
match self.fetch_data(&q, shared_res, ncc).await {
Ok(k) => {
let body = ToJsonBody::from(&k).into_body();
Ok(response(StatusCode::OK).body(body)?)
@@ -70,17 +72,18 @@ impl ConnectionStatusEvents {
async fn fetch_data(
&self,
q: &ChannelStateEventsQuery,
node_config: &NodeConfigCached,
shared_res: &ServiceSharedResources,
ncc: &NodeConfigCached,
) -> Result<Vec<ConnStatusEvent>, Error> {
let scyco = node_config
let scyco = ncc
.node_config
.cluster
.scylla
.as_ref()
.scylla_st()
.ok_or_else(|| Error::with_public_msg_no_trace(format!("no scylla configured")))?;
let _scy = scyllaconn::conn::create_scy_session(scyco).await?;
let _chconf =
nodenet::channelconfig::channel_config(q.range().clone(), q.channel().clone(), node_config).await?;
nodenet::channelconfig::channel_config(q.range().clone(), q.channel().clone(), &shared_res.pgqueue, ncc)
.await?;
let _do_one_before_range = true;
let ret = Vec::new();
if true {
@@ -111,7 +114,8 @@ impl ChannelStatusEventsHandler {
&self,
req: Requ,
_ctx: &ReqCtx,
node_config: &NodeConfigCached,
shared_res: &ServiceSharedResources,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
if req.method() == Method::GET {
let accept_def = APP_JSON;
@@ -122,7 +126,7 @@ impl ChannelStatusEventsHandler {
if accept.contains(APP_JSON) || accept.contains(ACCEPT_ALL) {
let url = req_uri_to_url(req.uri())?;
let q = ChannelStateEventsQuery::from_url(&url)?;
match self.fetch_data(&q, node_config).await {
match self.fetch_data(&q, shared_res, ncc).await {
Ok(k) => {
let body = ToJsonBody::from(&k).into_body();
Ok(response(StatusCode::OK).body(body)?)
@@ -144,20 +148,25 @@ impl ChannelStatusEventsHandler {
async fn fetch_data(
&self,
q: &ChannelStateEventsQuery,
node_config: &NodeConfigCached,
shared_res: &ServiceSharedResources,
ncc: &NodeConfigCached,
) -> Result<ChannelStatusEvents, Error> {
let scyco = node_config
let scyco = ncc
.node_config
.cluster
.scylla
.as_ref()
.scylla_st()
.ok_or_else(|| Error::with_public_msg_no_trace(format!("no scylla configured")))?;
let scy = scyllaconn::conn::create_scy_session(scyco).await?;
let do_one_before_range = true;
if false {
let chconf = nodenet::channelconfig::channel_config(q.range().clone(), q.channel().clone(), node_config)
.await?
.ok_or_else(|| Error::with_msg_no_trace("channel config not found"))?;
let chconf = nodenet::channelconfig::channel_config(
q.range().clone(),
q.channel().clone(),
&shared_res.pgqueue,
ncc,
)
.await?
.ok_or_else(|| Error::with_msg_no_trace("channel config not found"))?;
use netpod::ChannelTypeConfigGen;
match chconf {
ChannelTypeConfigGen::Scylla(_x) => todo!(),

View File

@@ -2,6 +2,7 @@ use crate::err::Error;
use crate::response;
use crate::ToPublicResponse;
use dbconn::create_connection;
use dbconn::worker::PgQueue;
use futures_util::StreamExt;
use http::Method;
use http::StatusCode;
@@ -38,27 +39,30 @@ use url::Url;
pub async fn chconf_from_events_quorum(
q: &PlainEventsQuery,
ctx: &ReqCtx,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<Option<ChannelTypeConfigGen>, Error> {
let ret = find_config_basics_quorum(q.channel().clone(), q.range().clone(), ctx, ncc).await?;
let ret = find_config_basics_quorum(q.channel().clone(), q.range().clone(), ctx, pgqueue, ncc).await?;
Ok(ret)
}
pub async fn chconf_from_prebinned(
q: &PreBinnedQuery,
ctx: &ReqCtx,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<Option<ChannelTypeConfigGen>, Error> {
let ret = find_config_basics_quorum(q.channel().clone(), q.patch().patch_range(), ctx, ncc).await?;
let ret = find_config_basics_quorum(q.channel().clone(), q.patch().patch_range(), ctx, pgqueue, ncc).await?;
Ok(ret)
}
pub async fn ch_conf_from_binned(
q: &BinnedQuery,
ctx: &ReqCtx,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<Option<ChannelTypeConfigGen>, Error> {
let ret = find_config_basics_quorum(q.channel().clone(), q.range().clone(), ctx, ncc).await?;
let ret = find_config_basics_quorum(q.channel().clone(), q.range().clone(), ctx, pgqueue, ncc).await?;
Ok(ret)
}
@@ -73,7 +77,12 @@ impl ChannelConfigHandler {
}
}
pub async fn handle(&self, req: Requ, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
pub async fn handle(
&self,
req: Requ,
pgqueue: &PgQueue,
node_config: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
if req.method() == Method::GET {
let accept_def = APP_JSON;
let accept = req
@@ -81,7 +90,7 @@ impl ChannelConfigHandler {
.get(http::header::ACCEPT)
.map_or(accept_def, |k| k.to_str().unwrap_or(accept_def));
if accept.contains(APP_JSON) || accept.contains(ACCEPT_ALL) {
match self.channel_config(req, &node_config).await {
match self.channel_config(req, pgqueue, &node_config).await {
Ok(k) => Ok(k),
Err(e) => {
warn!("ChannelConfigHandler::handle: got error from channel_config: {e:?}");
@@ -96,10 +105,16 @@ impl ChannelConfigHandler {
}
}
async fn channel_config(&self, req: Requ, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
async fn channel_config(
&self,
req: Requ,
pgqueue: &PgQueue,
node_config: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
let url = req_uri_to_url(req.uri())?;
let q = ChannelConfigQuery::from_url(&url)?;
let conf = nodenet::channelconfig::channel_config(q.range.clone(), q.channel.clone(), node_config).await?;
let conf =
nodenet::channelconfig::channel_config(q.range.clone(), q.channel.clone(), pgqueue, node_config).await?;
match conf {
Some(conf) => {
let res: ChannelConfigResponse = conf.into();
@@ -180,6 +195,7 @@ impl ChannelConfigQuorumHandler {
&self,
req: Requ,
ctx: &ReqCtx,
pgqueue: &PgQueue,
node_config: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
if req.method() == Method::GET {
@@ -189,7 +205,7 @@ impl ChannelConfigQuorumHandler {
.get(http::header::ACCEPT)
.map_or(accept_def, |k| k.to_str().unwrap_or(accept_def));
if accept.contains(APP_JSON) || accept.contains(ACCEPT_ALL) {
match self.channel_config_quorum(req, ctx, &node_config).await {
match self.channel_config_quorum(req, ctx, pgqueue, &node_config).await {
Ok(k) => Ok(k),
Err(e) => {
warn!("from channel_config_quorum: {e}");
@@ -208,13 +224,15 @@ impl ChannelConfigQuorumHandler {
&self,
req: Requ,
ctx: &ReqCtx,
pgqueue: &PgQueue,
ncc: &NodeConfigCached,
) -> Result<StreamResponse, Error> {
info!("channel_config_quorum");
let url = req_uri_to_url(req.uri())?;
let q = ChannelConfigQuery::from_url(&url)?;
info!("channel_config_quorum for q {q:?}");
let ch_confs = nodenet::configquorum::find_config_basics_quorum(q.channel, q.range.into(), ctx, ncc).await?;
let ch_confs =
nodenet::configquorum::find_config_basics_quorum(q.channel, q.range.into(), ctx, pgqueue, ncc).await?;
let ret = response(StatusCode::OK)
.header(http::header::CONTENT_TYPE, APP_JSON)
.body(ToJsonBody::from(&ch_confs).into_body())?;
@@ -386,8 +404,7 @@ impl ScyllaChannelsActive {
let scyco = node_config
.node_config
.cluster
.scylla
.as_ref()
.scylla_st()
.ok_or_else(|| Error::with_public_msg_no_trace(format!("No Scylla configured")))?;
let scy = scyllaconn::conn::create_scy_session(scyco).await?;
// Database stores tsedge/ts_msp in units of (10 sec), and we additionally map to the grid.
@@ -494,7 +511,7 @@ impl IocForChannel {
node_config: &NodeConfigCached,
) -> Result<Option<IocForChannelRes>, Error> {
let dbconf = &node_config.node_config.cluster.database;
let pg_client = create_connection(dbconf).await?;
let (pg_client, pgjh) = create_connection(dbconf).await?;
let rows = pg_client
.query(
"select addr from ioc_by_channel where facility = $1 and channel = $2",
@@ -583,8 +600,7 @@ impl ScyllaSeriesTsMsp {
let scyco = node_config
.node_config
.cluster
.scylla
.as_ref()
.scylla_st()
.ok_or_else(|| Error::with_public_msg_no_trace(format!("No Scylla configured")))?;
let scy = scyllaconn::conn::create_scy_session(scyco).await?;
let mut ts_msps = Vec::new();
@@ -626,7 +642,7 @@ impl AmbigiousChannelNames {
}
}
pub async fn handle(&self, req: Requ, node_config: &NodeConfigCached) -> Result<StreamResponse, Error> {
pub async fn handle(&self, req: Requ, ncc: &NodeConfigCached) -> Result<StreamResponse, Error> {
if req.method() == Method::GET {
let accept_def = APP_JSON;
let accept = req
@@ -634,7 +650,7 @@ impl AmbigiousChannelNames {
.get(http::header::ACCEPT)
.map_or(accept_def, |k| k.to_str().unwrap_or(accept_def));
if accept == APP_JSON || accept == ACCEPT_ALL {
match self.process(node_config).await {
match self.process(ncc).await {
Ok(k) => {
let body = ToJsonBody::from(&k).into_body();
Ok(response(StatusCode::OK).body(body)?)
@@ -650,9 +666,9 @@ impl AmbigiousChannelNames {
}
}
async fn process(&self, node_config: &NodeConfigCached) -> Result<AmbigiousChannelNamesResponse, Error> {
let dbconf = &node_config.node_config.cluster.database;
let pg_client = create_connection(dbconf).await?;
async fn process(&self, ncc: &NodeConfigCached) -> Result<AmbigiousChannelNamesResponse, Error> {
let dbconf = &ncc.node_config.cluster.database;
let (pg_client, pgjh) = create_connection(dbconf).await?;
let rows = pg_client
.query(
"select t2.series, t2.channel, t2.scalar_type, t2.shape_dims, t2.agg_kind from series_by_channel t1, series_by_channel t2 where t2.channel = t1.channel and t2.series != t1.series",
@@ -747,9 +763,7 @@ impl GenerateScyllaTestData {
}
async fn process(&self, node_config: &NodeConfigCached) -> Result<(), Error> {
let dbconf = &node_config.node_config.cluster.database;
let _pg_client = create_connection(dbconf).await?;
let scyconf = node_config.node_config.cluster.scylla.as_ref().unwrap();
let scyconf = node_config.node_config.cluster.scylla_st().unwrap();
let scy = scyllaconn::conn::create_scy_session(scyconf).await?;
let series: u64 = 42001;
// TODO query `ts_msp` for all MSP values und use that to delete from event table first.

View File

@@ -19,6 +19,8 @@ use crate::bodystream::response;
use crate::err::Error;
use ::err::thiserror;
use ::err::ThisError;
use dbconn::worker::PgQueue;
use dbconn::worker::PgWorker;
use futures_util::Future;
use futures_util::FutureExt;
use http::Method;
@@ -37,6 +39,7 @@ use netpod::query::prebinned::PreBinnedQuery;
use netpod::req_uri_to_url;
use netpod::status_board;
use netpod::status_board_init;
use netpod::Database;
use netpod::NodeConfigCached;
use netpod::ReqCtx;
use netpod::ServiceVersion;
@@ -49,6 +52,7 @@ use serde::Serialize;
use std::net;
use std::panic;
use std::pin;
use std::sync::Arc;
use std::task;
use task::Context;
use task::Poll;
@@ -79,6 +83,7 @@ impl IntoBoxedError for tokio::task::JoinError {}
impl IntoBoxedError for api4::databuffer_tools::FindActiveError {}
impl IntoBoxedError for std::string::FromUtf8Error {}
impl IntoBoxedError for std::io::Error {}
impl IntoBoxedError for dbconn::worker::Error {}
impl<E> From<E> for RetrievalError
where
@@ -95,16 +100,29 @@ impl ::err::ToErr for RetrievalError {
}
}
pub async fn host(node_config: NodeConfigCached, service_version: ServiceVersion) -> Result<(), RetrievalError> {
pub struct ServiceSharedResources {
pgqueue: PgQueue,
}
impl ServiceSharedResources {
pub fn new(pgqueue: PgQueue) -> Self {
Self { pgqueue }
}
}
pub async fn host(ncc: NodeConfigCached, service_version: ServiceVersion) -> Result<(), RetrievalError> {
status_board_init();
#[cfg(DISABLED)]
if let Some(bind) = node_config.node.prometheus_api_bind {
if let Some(bind) = ncc.node.prometheus_api_bind {
tokio::spawn(prometheus::host(bind));
}
// let rawjh = taskrun::spawn(nodenet::conn::events_service(node_config.clone()));
let (pgqueue, pgworker) = PgWorker::new(&ncc.node_config.cluster.database).await?;
let pgworker_jh = taskrun::spawn(pgworker.work());
let shared_res = ServiceSharedResources::new(pgqueue);
let shared_res = Arc::new(shared_res);
use std::str::FromStr;
let bind_addr = SocketAddr::from_str(&format!("{}:{}", node_config.node.listen(), node_config.node.port))?;
let bind_addr = SocketAddr::from_str(&format!("{}:{}", ncc.node.listen(), ncc.node.port))?;
// tokio::net::TcpSocket::new_v4()?.listen(200)?
let listener = TcpListener::bind(bind_addr).await?;
loop {
@@ -114,14 +132,24 @@ pub async fn host(node_config: NodeConfigCached, service_version: ServiceVersion
break;
};
debug!("new connection from {addr}");
let node_config = node_config.clone();
let node_config = ncc.clone();
let service_version = service_version.clone();
let io = TokioIo::new(stream);
let shared_res = shared_res.clone();
// let shared_res = &shared_res;
tokio::task::spawn(async move {
let res = hyper::server::conn::http1::Builder::new()
.serve_connection(
io,
service_fn(move |req| the_service_fn(req, addr, node_config.clone(), service_version.clone())),
service_fn(move |req| {
the_service_fn(
req,
addr,
node_config.clone(),
service_version.clone(),
shared_res.clone(),
)
}),
)
.await;
match res {
@@ -132,7 +160,7 @@ pub async fn host(node_config: NodeConfigCached, service_version: ServiceVersion
}
});
}
info!("http host done");
// rawjh.await??;
Ok(())
}
@@ -142,10 +170,11 @@ async fn the_service_fn(
addr: SocketAddr,
node_config: NodeConfigCached,
service_version: ServiceVersion,
shared_res: Arc<ServiceSharedResources>,
) -> Result<StreamResponse, Error> {
let ctx = ReqCtx::new_with_node(&req, &node_config);
let reqid_span = span!(Level::INFO, "req", reqid = ctx.reqid());
let f = http_service(req, addr, ctx, node_config, service_version);
let f = http_service(req, addr, ctx, node_config, service_version, shared_res);
let f = Cont { f: Box::pin(f) };
f.instrument(reqid_span).await
}
@@ -156,6 +185,7 @@ async fn http_service(
ctx: ReqCtx,
node_config: NodeConfigCached,
service_version: ServiceVersion,
shared_res: Arc<ServiceSharedResources>,
) -> Result<StreamResponse, Error> {
info!(
"http-request {:?} - {:?} - {:?} - {:?}",
@@ -164,7 +194,7 @@ async fn http_service(
req.uri(),
req.headers()
);
match http_service_try(req, ctx, &node_config, &service_version).await {
match http_service_try(req, ctx, &node_config, &service_version, shared_res).await {
Ok(k) => Ok(k),
Err(e) => {
error!("daqbuffer node http_service sees error from http_service_try: {}", e);
@@ -209,6 +239,7 @@ async fn http_service_try(
ctx: ReqCtx,
node_config: &NodeConfigCached,
service_version: &ServiceVersion,
shared_res: Arc<ServiceSharedResources>,
) -> Result<StreamResponse, Error> {
use http::HeaderValue;
let mut urlmarks = Vec::new();
@@ -221,7 +252,7 @@ async fn http_service_try(
}
}
}
let mut res = http_service_inner(req, &ctx, node_config, service_version).await?;
let mut res = http_service_inner(req, &ctx, node_config, service_version, shared_res).await?;
let hm = res.headers_mut();
hm.append("Access-Control-Allow-Origin", "*".parse().unwrap());
hm.append("Access-Control-Allow-Headers", "*".parse().unwrap());
@@ -243,6 +274,7 @@ async fn http_service_inner(
ctx: &ReqCtx,
node_config: &NodeConfigCached,
service_version: &ServiceVersion,
shared_res: Arc<ServiceSharedResources>,
) -> Result<StreamResponse, RetrievalError> {
let uri = req.uri().clone();
let path = uri.path();
@@ -291,7 +323,7 @@ async fn http_service_inner(
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(body_empty())?)
}
} else if let Some(h) = api4::eventdata::EventDataHandler::handler(&req) {
Ok(h.handle(req, ctx, &node_config, service_version)
Ok(h.handle(req, ctx, &node_config, shared_res)
.await
.map_err(|e| Error::with_msg_no_trace(e.to_string()))?)
} else if let Some(h) = api4::status::StatusNodesRecursive::handler(&req) {
@@ -303,19 +335,19 @@ async fn http_service_inner(
} else if let Some(h) = api4::search::ChannelSearchHandler::handler(&req) {
Ok(h.handle(req, &node_config).await?)
} else if let Some(h) = channel_status::ConnectionStatusEvents::handler(&req) {
Ok(h.handle(req, ctx, &node_config).await?)
Ok(h.handle(req, ctx, &shared_res, &node_config).await?)
} else if let Some(h) = channel_status::ChannelStatusEventsHandler::handler(&req) {
Ok(h.handle(req, ctx, &node_config).await?)
Ok(h.handle(req, ctx, &shared_res, &node_config).await?)
} else if let Some(h) = api4::events::EventsHandler::handler(&req) {
Ok(h.handle(req, ctx, &node_config).await?)
Ok(h.handle(req, ctx, &shared_res, &node_config).await?)
} else if let Some(h) = api4::binned::BinnedHandler::handler(&req) {
Ok(h.handle(req, ctx, &node_config).await?)
Ok(h.handle(req, ctx, &shared_res, &node_config).await?)
} else if let Some(h) = channelconfig::ChannelConfigQuorumHandler::handler(&req) {
Ok(h.handle(req, ctx, &node_config).await?)
Ok(h.handle(req, ctx, &shared_res.pgqueue, &node_config).await?)
} else if let Some(h) = channelconfig::ChannelConfigsHandler::handler(&req) {
Ok(h.handle(req, &node_config).await?)
} else if let Some(h) = channelconfig::ChannelConfigHandler::handler(&req) {
Ok(h.handle(req, &node_config).await?)
Ok(h.handle(req, &shared_res.pgqueue, &node_config).await?)
} else if let Some(h) = channelconfig::IocForChannel::handler(&req) {
Ok(h.handle(req, &node_config).await?)
} else if let Some(h) = channelconfig::ScyllaChannelsActive::handler(&req) {
@@ -357,7 +389,7 @@ async fn http_service_inner(
} else if let Some(h) = settings::SettingsThreadsMaxHandler::handler(&req) {
Ok(h.handle(req, &node_config).await?)
} else if let Some(h) = api1::Api1EventsBinaryHandler::handler(&req) {
Ok(h.handle(req, ctx, &node_config).await?)
Ok(h.handle(req, ctx, &shared_res, &node_config).await?)
} else if let Some(h) = pulsemap::MapPulseScyllaHandler::handler(&req) {
Ok(h.handle(req, &node_config).await?)
} else if let Some(h) = pulsemap::IndexChannelHttpFunction::handler(&req) {

View File

@@ -425,7 +425,7 @@ impl IndexChannelHttpFunction {
async fn index(req: Requ, do_print: bool, node_config: &NodeConfigCached) -> Result<String, Error> {
// TODO avoid double-insert on central storage.
let pgc = dbconn::create_connection(&node_config.node_config.cluster.database).await?;
let (pgc, pgjh) = dbconn::create_connection(&node_config.node_config.cluster.database).await?;
// TODO remove update of static columns when older clients are removed.
let sql = "insert into map_pulse_files (channel, split, timebin, pulse_min, pulse_max, hostname, ks) values ($1, $2, $3, $4, $5, $6, $7) on conflict (channel, split, timebin) do update set pulse_min = $4, pulse_max = $5, upc1 = map_pulse_files.upc1 + 1, hostname = $6";
let insert_01 = pgc.prepare(sql).await?;
@@ -936,7 +936,7 @@ impl MapPulseScyllaHandler {
let url = req_uri_to_url(req.uri())?;
let query = MapPulseQuery::from_url(&url)?;
let pulse = query.pulse;
let scyconf = if let Some(x) = node_config.node_config.cluster.scylla.as_ref() {
let scyconf = if let Some(x) = node_config.node_config.cluster.scylla_st() {
x
} else {
return Err(Error::with_public_msg_no_trace("no scylla configured"));
@@ -1017,7 +1017,7 @@ impl MapPulseLocalHttpFunction {
})
.unwrap_or_else(|| String::from("missing x-req-from"));
let ts1 = Instant::now();
let conn = dbconn::create_connection(&node_config.node_config.cluster.database).await?;
let (conn, pgjh) = dbconn::create_connection(&node_config.node_config.cluster.database).await?;
let sql = "select channel, hostname, timebin, split, ks from map_pulse_files where hostname = $1 and pulse_min <= $2 and (pulse_max >= $2 or closed = 0)";
let rows = conn.query(sql, &[&node_config.node.host, &(pulse as i64)]).await?;
let cands: Vec<_> = rows
@@ -1516,7 +1516,7 @@ impl MarkClosedHttpFunction {
}
pub async fn mark_closed(node_config: &NodeConfigCached) -> Result<(), Error> {
let conn = dbconn::create_connection(&node_config.node_config.cluster.database).await?;
let (conn, pgjh) = dbconn::create_connection(&node_config.node_config.cluster.database).await?;
let sql = "select distinct channel from map_pulse_files order by channel";
let rows = conn.query(sql, &[]).await?;
let chns: Vec<_> = rows.iter().map(|r| r.get::<_, String>(0)).collect();