Remove RawEventsQuery

This commit is contained in:
Dominik Werder
2022-12-09 20:46:23 +01:00
parent f1292a5b32
commit b9607f27d2
21 changed files with 542 additions and 431 deletions

View File

@@ -5,7 +5,7 @@ use futures_util::{Future, Stream, StreamExt};
use items::binsdim0::MinMaxAvgDim0Bins;
use items::{empty_binned_dyn, empty_events_dyn, RangeCompletableItem, StreamItem, TimeBinned};
use netpod::log::*;
use netpod::query::{CacheUsage, RawEventsQuery};
use netpod::query::{CacheUsage, PlainEventsQuery};
use netpod::timeunits::*;
use netpod::{
AggKind, ChannelTyped, PreBinnedPatchCoord, PreBinnedPatchIterator, PreBinnedPatchRange, ScalarType, ScyllaConfig,
@@ -348,7 +348,14 @@ pub async fn fetch_uncached_binned_events(
let deadline = deadline
.checked_add(Duration::from_millis(6000))
.ok_or_else(|| Error::with_msg_no_trace(format!("deadline overflow")))?;
let evq = RawEventsQuery::new(chn.channel.clone(), coord.patch_range(), agg_kind);
let evq = PlainEventsQuery::new(
chn.channel.clone(),
coord.patch_range(),
agg_kind,
Duration::from_millis(6000),
None,
true,
);
let mut events_dyn = EventsStreamScylla::new(series, &evq, chn.scalar_type.clone(), chn.shape.clone(), scy, false);
let mut complete = false;
loop {

137
dbconn/src/channelconfig.rs Normal file
View File

@@ -0,0 +1,137 @@
use err::Error;
use netpod::log::*;
use netpod::{Channel, NodeConfigCached, ScalarType, Shape};
use crate::ErrConv;
pub struct ChConf {
pub series: u64,
pub scalar_type: ScalarType,
pub shape: Shape,
}
/// It is an unsolved question as to how we want to uniquely address channels.
/// Currently, the usual (backend, channelname) works in 99% of the cases, but the edge-cases
/// are not solved. At the same time, it is desirable to avoid to complicate things for users.
/// Current state:
/// If the series id is given, we take that.
/// Otherwise we try to uniquely identify the series id from the given information.
/// In the future, we can even try to involve time range information for that, but backends like
/// old archivers and sf databuffer do not support such lookup.
pub async fn chconf_from_database(channel: &Channel, ncc: &NodeConfigCached) -> Result<ChConf, Error> {
if channel.backend != ncc.node_config.cluster.backend {
warn!(
"mismatched backend {} vs {}",
channel.backend, ncc.node_config.cluster.backend
);
}
if channel.backend() == "test-inmem" {
let ret = if channel.name() == "inmem-d0-i32" {
let ret = ChConf {
series: 1,
scalar_type: ScalarType::I32,
shape: Shape::Scalar,
};
Ok(ret)
} else {
error!("no test information");
Err(Error::with_msg_no_trace(format!("no test information"))
.add_public_msg("No channel config for test channel {:?}"))
};
return ret;
}
if channel.backend() == "test-disk-databuffer" {
// TODO the series-ids here are just random. Need to integrate with better test setup.
let ret = if channel.name() == "scalar-i32-be" {
let ret = ChConf {
series: 1,
scalar_type: ScalarType::I32,
shape: Shape::Scalar,
};
Ok(ret)
} else if channel.name() == "wave-f64-be-n21" {
let ret = ChConf {
series: 2,
scalar_type: ScalarType::F64,
shape: Shape::Wave(21),
};
Ok(ret)
} else if channel.name() == "const-regular-scalar-i32-be" {
let ret = ChConf {
series: 3,
scalar_type: ScalarType::I32,
shape: Shape::Scalar,
};
Ok(ret)
} else {
error!("no test information");
Err(Error::with_msg_no_trace(format!("no test information"))
.add_public_msg("No channel config for test channel {:?}"))
};
return ret;
}
// TODO use a common already running worker pool for these queries:
let dbconf = &ncc.node_config.cluster.database;
let dburl = format!(
"postgresql://{}:{}@{}:{}/{}",
dbconf.user, dbconf.pass, dbconf.host, dbconf.port, dbconf.name
);
let (pgclient, pgconn) = tokio_postgres::connect(&dburl, tokio_postgres::NoTls)
.await
.err_conv()?;
tokio::spawn(pgconn);
if let Some(series) = channel.series() {
let res = pgclient
.query(
"select scalar_type, shape_dims from series_by_channel where series = $1",
&[&(series as i64)],
)
.await
.err_conv()?;
if res.len() < 1 {
warn!("can not find channel information for series {series} given through {channel:?}");
let e = Error::with_public_msg_no_trace(format!("can not find channel information for {channel:?}"));
Err(e)
} else {
let row = res.first().unwrap();
let scalar_type = ScalarType::from_dtype_index(row.get::<_, i32>(0) as u8)?;
// TODO can I get a slice from psql driver?
let shape = Shape::from_scylla_shape_dims(&row.get::<_, Vec<i32>>(1))?;
let ret = ChConf {
series,
scalar_type,
shape,
};
Ok(ret)
}
} else {
let res = pgclient
.query(
"select series, scalar_type, shape_dims from series_by_channel where facility = $1 and channel = $2",
&[&channel.backend(), &channel.name()],
)
.await
.err_conv()?;
if res.len() < 1 {
warn!("can not find channel information for {channel:?}");
let e = Error::with_public_msg_no_trace(format!("can not find channel information for {channel:?}"));
Err(e)
} else if res.len() > 1 {
warn!("ambigious channel {channel:?}");
let e = Error::with_public_msg_no_trace(format!("ambigious channel {channel:?}"));
Err(e)
} else {
let row = res.first().unwrap();
let series = row.get::<_, i64>(0) as u64;
let scalar_type = ScalarType::from_dtype_index(row.get::<_, i32>(1) as u8)?;
// TODO can I get a slice from psql driver?
let shape = Shape::from_scylla_shape_dims(&row.get::<_, Vec<i32>>(2))?;
let ret = ChConf {
series,
scalar_type,
shape,
};
Ok(ret)
}
}
}

View File

@@ -5,14 +5,17 @@ pub mod search;
pub mod pg {
pub use tokio_postgres::{Client, Error};
}
pub mod channelconfig;
use err::Error;
use netpod::log::*;
use netpod::{log::*, ScalarType, Shape};
use netpod::{Channel, Database, NodeConfigCached, ScyllaConfig};
use scylla::frame::response::cql_to_rust::FromRowError as ScyFromRowError;
use scylla::transport::errors::{NewSessionError as ScyNewSessionError, QueryError as ScyQueryError};
use scylla::Session as ScySession;
use std::sync::Arc;
use std::time::Duration;
use tokio_postgres::{Client, NoTls};
use tokio_postgres::{Client, Client as PgClient, NoTls};
trait ErrConv<T> {
fn err_conv(self) -> Result<T, Error>;
@@ -188,3 +191,40 @@ pub async fn insert_channel(name: String, facility: i64, dbc: &Client) -> Result
}
Ok(())
}
pub async fn find_series(channel: &Channel, pgclient: Arc<PgClient>) -> Result<(u64, ScalarType, Shape), Error> {
info!("find_series channel {:?}", channel);
let rows = if let Some(series) = channel.series() {
let q = "select series, facility, channel, scalar_type, shape_dims from series_by_channel where series = $1";
pgclient.query(q, &[&(series as i64)]).await.err_conv()?
} else {
let q = "select series, facility, channel, scalar_type, shape_dims from series_by_channel where facility = $1 and channel = $2";
pgclient
.query(q, &[&channel.backend(), &channel.name()])
.await
.err_conv()?
};
if rows.len() < 1 {
return Err(Error::with_public_msg_no_trace(format!(
"No series found for {channel:?}"
)));
}
if rows.len() > 1 {
error!("Multiple series found for {channel:?}");
return Err(Error::with_public_msg_no_trace(
"Multiple series found for channel, can not return data for ambiguous series",
));
}
let row = rows
.into_iter()
.next()
.ok_or_else(|| Error::with_public_msg_no_trace(format!("can not find series for channel")))?;
let series = row.get::<_, i64>(0) as u64;
let _facility: String = row.get(1);
let _channel: String = row.get(2);
let a: i32 = row.get(3);
let scalar_type = ScalarType::from_scylla_i32(a)?;
let a: Vec<i32> = row.get(4);
let shape = Shape::from_scylla_shape_dims(&a)?;
Ok((series, scalar_type, shape))
}

View File

@@ -5,9 +5,9 @@ use items::scalarevents::ScalarEvents;
use items::waveevents::WaveEvents;
use items::{EventsDyn, RangeCompletableItem, Sitemty, StreamItem};
use netpod::log::*;
use netpod::query::{ChannelStateEventsQuery, RawEventsQuery};
use netpod::query::{ChannelStateEventsQuery, PlainEventsQuery};
use netpod::timeunits::DAY;
use netpod::{Database, NanoRange, ScalarType, ScyllaConfig, Shape};
use netpod::{NanoRange, ScalarType, ScyllaConfig, Shape};
use scylla::Session as ScySession;
use std::collections::VecDeque;
use std::pin::Pin;
@@ -126,8 +126,6 @@ enum FrState {
pub struct EventsStreamScylla {
state: FrState,
series: u64,
#[allow(unused)]
evq: RawEventsQuery,
scalar_type: ScalarType,
shape: Shape,
range: NanoRange,
@@ -139,7 +137,7 @@ pub struct EventsStreamScylla {
impl EventsStreamScylla {
pub fn new(
series: u64,
evq: &RawEventsQuery,
evq: &PlainEventsQuery,
scalar_type: ScalarType,
shape: Shape,
scy: Arc<ScySession>,
@@ -148,10 +146,9 @@ impl EventsStreamScylla {
Self {
state: FrState::New,
series,
evq: evq.clone(),
scalar_type,
shape,
range: evq.range.clone(),
range: evq.range().clone(),
ts_msps: VecDeque::new(),
scy,
do_test_stream_error,
@@ -476,18 +473,6 @@ read_next_scalar_values!(read_next_values_scalar_f64, f64, f64, "events_scalar_f
read_next_array_values!(read_next_values_array_u16, u16, i16, "events_wave_u16");
// TODO remove
#[allow(unused)]
async fn make_scylla_stream(
_evq: &RawEventsQuery,
_scyco: &ScyllaConfig,
_dbconf: Database,
_do_test_stream_error: bool,
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<Box<dyn EventsDyn>>> + Send>>, Error> {
error!("forward call to crate scyllaconn");
err::todoval()
}
pub async fn channel_state_events(
evq: &ChannelStateEventsQuery,
scyco: &ScyllaConfig,