Restructure config between the kinds of backends

This commit is contained in:
Dominik Werder
2022-02-18 19:24:14 +01:00
parent b7aaad7a7b
commit 96fa8b5b09
20 changed files with 195 additions and 138 deletions

View File

@@ -18,8 +18,8 @@ use commonio::StatsChannel;
use err::{ErrStr, Error};
use futures_util::StreamExt;
use items::{StreamItem, WithLen};
use netpod::log::*;
use netpod::timeunits::SEC;
use netpod::{log::*, Database};
use netpod::{ChannelArchiver, ChannelConfigQuery, ChannelConfigResponse};
use netpod::{ScalarType, Shape};
use serde::Serialize;
@@ -178,9 +178,10 @@ impl From<ErrWrap> for Error {
pub async fn channel_config_from_db(
q: &ChannelConfigQuery,
conf: &ChannelArchiver,
_conf: &ChannelArchiver,
database: &Database,
) -> Result<ChannelConfigResponse, Error> {
let dbc = database_connect(&conf.database).await?;
let dbc = database_connect(database).await?;
let sql = "select config from channels where name = $1";
let rows = dbc.query(sql, &[&q.channel.name()]).await.errstr()?;
if let Some(row) = rows.first() {
@@ -217,10 +218,14 @@ pub async fn channel_config_from_db(
}
}
pub async fn channel_config(q: &ChannelConfigQuery, conf: &ChannelArchiver) -> Result<ChannelConfigResponse, Error> {
pub async fn channel_config(
q: &ChannelConfigQuery,
_conf: &ChannelArchiver,
database: &Database,
) -> Result<ChannelConfigResponse, Error> {
let _timed = Timed::new("channel_config");
let mut type_info = None;
let ixpaths = indexfiles::index_file_path_list(q.channel.clone(), conf.database.clone()).await?;
let ixpaths = indexfiles::index_file_path_list(q.channel.clone(), database.clone()).await?;
info!("got categorized ixpaths: {:?}", ixpaths);
let ixpath = ixpaths.first().unwrap().clone();
let stream = blockrefstream::blockref_stream(q.channel.clone(), q.range.clone(), q.expand, ixpath.clone());

View File

@@ -2,7 +2,7 @@ use crate::archeng::indexfiles::database_connect;
use err::{ErrStr, Error};
use futures_core::{Future, Stream};
use futures_util::{FutureExt, StreamExt};
use netpod::log::*;
use netpod::{log::*, NodeConfigCached};
use netpod::{Channel, ChannelArchiver, ChannelConfigQuery, ChannelConfigResponse, Database, NanoRange};
use serde::Serialize;
use serde_json::Value as JsVal;
@@ -126,6 +126,7 @@ pub enum ConfigItem {
}
pub struct ConfigStream {
node: NodeConfigCached,
conf: ChannelArchiver,
inp: ChannelNameStream,
inp_done: bool,
@@ -136,8 +137,9 @@ pub struct ConfigStream {
}
impl ConfigStream {
pub fn new(inp: ChannelNameStream, conf: ChannelArchiver) -> Self {
pub fn new(inp: ChannelNameStream, node: NodeConfigCached, conf: ChannelArchiver) -> Self {
Self {
node,
conf,
inp,
inp_done: false,
@@ -178,7 +180,7 @@ impl Stream for ConfigStream {
Ready(Ok(Res::Response(item))) => {
self.get_fut = None;
let name = item.channel.name.clone();
let dbconf = self.conf.database.clone();
let dbconf = self.node.node_config.cluster.database.clone();
let config = serde_json::to_value(&item)?;
let fut = async move {
let dbc = database_connect(&dbconf).await?;
@@ -193,7 +195,7 @@ impl Stream for ConfigStream {
}
Ready(Ok(Res::TimedOut(name))) => {
self.get_fut = None;
let dbconf = self.conf.database.clone();
let dbconf = self.node.node_config.cluster.database.clone();
let config = serde_json::to_value(&"TimedOut")?;
let fut = async move {
let dbc = database_connect(&dbconf).await?;
@@ -220,6 +222,7 @@ impl Stream for ConfigStream {
match self.inp.poll_next_unpin(cx) {
Ready(Some(Ok(item))) => {
let conf = self.conf.clone();
let database = self.node.node_config.cluster.database.clone();
let fut = async move {
let channel = Channel {
name: item,
@@ -236,7 +239,7 @@ impl Stream for ConfigStream {
range: NanoRange { beg, end },
expand: true,
};
let fut = super::channel_config(&q, &conf);
let fut = super::channel_config(&q, &conf, &database);
let fut = tokio::time::timeout(Duration::from_millis(2000), fut);
match fut.await {
Ok(Ok(k)) => Ok(Res::Response(k)),

View File

@@ -6,6 +6,7 @@ use err::{ErrStr, Error};
use futures_core::{Future, Stream};
use futures_util::stream::unfold;
use netpod::log::*;
use netpod::NodeConfigCached;
use netpod::{Channel, ChannelArchiver, Database};
use regex::Regex;
use std::collections::BTreeMap;
@@ -161,13 +162,15 @@ enum ScanIndexFilesSteps {
}
struct ScanIndexFiles {
node: NodeConfigCached,
conf: ChannelArchiver,
steps: ScanIndexFilesSteps,
}
impl ScanIndexFiles {
fn new(conf: ChannelArchiver) -> Self {
fn new(conf: ChannelArchiver, node: NodeConfigCached) -> Self {
Self {
node,
conf,
steps: ScanIndexFilesSteps::Level0,
}
@@ -184,7 +187,7 @@ impl ScanIndexFiles {
ScanIndexFilesSteps::Level1(paths) => {
let paths = get_level_1(paths).await?;
info!("collected {} level 1 paths", paths.len());
let dbc = database_connect(&self.conf.database).await?;
let dbc = database_connect(&self.node.node_config.cluster.database).await?;
for p in paths {
let ps = p.to_string_lossy();
let rows = dbc
@@ -238,8 +241,8 @@ impl UnfoldExec for ScanIndexFiles {
}
}
pub fn scan_index_files(conf: ChannelArchiver) -> impl Stream<Item = Result<String, Error>> {
unfold_stream(ScanIndexFiles::new(conf.clone()))
pub fn scan_index_files(conf: ChannelArchiver, node: NodeConfigCached) -> impl Stream<Item = Result<String, Error>> {
unfold_stream(ScanIndexFiles::new(conf.clone(), node))
/*
enum UnfoldState {
Running(ScanIndexFiles),
@@ -302,13 +305,16 @@ enum ScanChannelsSteps {
}
struct ScanChannels {
node: NodeConfigCached,
#[allow(unused)]
conf: ChannelArchiver,
steps: ScanChannelsSteps,
}
impl ScanChannels {
fn new(conf: ChannelArchiver) -> Self {
fn new(node: NodeConfigCached, conf: ChannelArchiver) -> Self {
Self {
node,
conf,
steps: ScanChannelsSteps::Start,
}
@@ -322,7 +328,7 @@ impl ScanChannels {
Ok(Some((format!("Start"), self)))
}
SelectIndexFile => {
let dbc = database_connect(&self.conf.database).await?;
let dbc = database_connect(&self.node.node_config.cluster.database).await?;
let sql =
"select path from indexfiles where ts_last_channel_search < now() - interval '1 hour' limit 1";
let rows = dbc.query(sql, &[]).await.errstr()?;
@@ -337,7 +343,7 @@ impl ScanChannels {
ReadChannels(mut paths) => {
// TODO stats
let stats = &StatsChannel::dummy();
let dbc = database_connect(&self.conf.database).await?;
let dbc = database_connect(&self.node.node_config.cluster.database).await?;
if let Some(path) = paths.pop() {
let rows = dbc
.query("select rowid from indexfiles where path = $1", &[&path])
@@ -411,8 +417,8 @@ impl UnfoldExec for ScanChannels {
}
}
pub fn scan_channels(conf: ChannelArchiver) -> impl Stream<Item = Result<String, Error>> {
unfold_stream(ScanChannels::new(conf.clone()))
pub fn scan_channels(node: NodeConfigCached, conf: ChannelArchiver) -> impl Stream<Item = Result<String, Error>> {
unfold_stream(ScanChannels::new(node, conf.clone()))
}
#[derive(Debug)]

View File

@@ -8,14 +8,16 @@ use items::eventsitem::EventsItem;
use items::plainevents::{PlainEvents, WavePlainEvents};
use items::waveevents::{WaveNBinner, WaveXBinner};
use items::{EventsNodeProcessor, Framable, LogItem, RangeCompletableItem, StreamItem};
use netpod::log::*;
use netpod::query::RawEventsQuery;
use netpod::{log::*, AggKind, Shape};
use netpod::{AggKind, NodeConfigCached, Shape};
use netpod::{ChannelArchiver, ChannelConfigQuery};
use std::pin::Pin;
use streams::rangefilter::RangeFilter;
pub async fn make_event_pipe(
evq: &RawEventsQuery,
node: NodeConfigCached,
conf: ChannelArchiver,
) -> Result<Pin<Box<dyn Stream<Item = Box<dyn Framable>> + Send>>, Error> {
debug!("make_event_pipe {:?}", evq);
@@ -25,10 +27,14 @@ pub async fn make_event_pipe(
range: evq.range.clone(),
expand: evq.agg_kind.need_expand(),
};
crate::archeng::channel_config_from_db(&q, &conf).await?
crate::archeng::channel_config_from_db(&q, &conf, &node.node_config.cluster.database).await?
};
debug!("Channel config: {:?}", channel_config);
let ixpaths = crate::archeng::indexfiles::index_file_path_list(evq.channel.clone(), conf.database.clone()).await?;
let ixpaths = crate::archeng::indexfiles::index_file_path_list(
evq.channel.clone(),
node.node_config.cluster.database.clone(),
)
.await?;
debug!("got categorized ixpaths: {:?}", ixpaths);
let ixpath = if let Some(x) = ixpaths.first() {
x.clone()