Remove unused

This commit is contained in:
Dominik Werder
2022-12-13 08:33:32 +01:00
parent e81337c22f
commit dfadb530d5
18 changed files with 97 additions and 1473 deletions

View File

@@ -1,470 +0,0 @@
use crate::events_scylla::EventsStreamScylla;
use crate::ErrConv;
use err::Error;
use futures_util::{Future, Stream, StreamExt};
use items::binsdim0::MinMaxAvgDim0Bins;
use items::{empty_binned_dyn, empty_events_dyn, RangeCompletableItem, StreamItem, TimeBinned};
use netpod::log::*;
use netpod::query::{CacheUsage, PlainEventsQuery};
use netpod::timeunits::*;
use netpod::{
AggKind, ChannelTyped, PreBinnedPatchCoord, PreBinnedPatchIterator, PreBinnedPatchRange, ScalarType, ScyllaConfig,
Shape,
};
use scylla::Session as ScySession;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
pub async fn read_cached_scylla(
series: u64,
chn: &ChannelTyped,
coord: &PreBinnedPatchCoord,
_agg_kind: AggKind,
scy: &ScySession,
) -> Result<Option<Box<dyn TimeBinned>>, Error> {
let vals = (
series as i64,
(coord.bin_t_len() / SEC) as i32,
(coord.patch_t_len() / SEC) as i32,
coord.ix() as i64,
);
let res = scy
.query_iter(
"select counts, avgs, mins, maxs from binned_scalar_f32 where series = ? and bin_len_sec = ? and patch_len_sec = ? and agg_kind = 'dummy-agg-kind' and offset = ?",
vals,
)
.await;
let mut res = res.err_conv().map_err(|e| {
error!("can not read from cache");
e
})?;
while let Some(item) = res.next().await {
let row = item.err_conv()?;
let edges = coord.edges();
let (counts, avgs, mins, maxs): (Vec<i64>, Vec<f32>, Vec<f32>, Vec<f32>) = row.into_typed().err_conv()?;
let mut counts_mismatch = false;
if edges.len() != counts.len() + 1 {
counts_mismatch = true;
}
if counts.len() != avgs.len() {
counts_mismatch = true;
}
let ts1s = edges[..(edges.len() - 1).min(edges.len())].to_vec();
let ts2s = edges[1.min(edges.len())..].to_vec();
if ts1s.len() != ts2s.len() {
error!("ts1s vs ts2s mismatch");
counts_mismatch = true;
}
if ts1s.len() != counts.len() {
counts_mismatch = true;
}
let avgs = avgs.into_iter().map(|x| x).collect::<Vec<_>>();
let mins = mins.into_iter().map(|x| x as _).collect::<Vec<_>>();
let maxs = maxs.into_iter().map(|x| x as _).collect::<Vec<_>>();
if counts_mismatch {
error!(
"mismatch: edges {} ts1s {} ts2s {} counts {} avgs {} mins {} maxs {}",
edges.len(),
ts1s.len(),
ts2s.len(),
counts.len(),
avgs.len(),
mins.len(),
maxs.len(),
);
}
let counts: Vec<_> = counts.into_iter().map(|x| x as u64).collect();
// TODO construct a dyn TimeBinned using the scalar type and shape information.
// TODO place the values with little copying into the TimeBinned.
use ScalarType::*;
use Shape::*;
match &chn.shape {
Scalar => match &chn.scalar_type {
F64 => {
let ret = MinMaxAvgDim0Bins::<f64> {
ts1s,
ts2s,
counts,
avgs,
mins,
maxs,
};
return Ok(Some(Box::new(ret)));
}
_ => {
error!("TODO can not yet restore {:?} {:?}", chn.scalar_type, chn.shape);
err::todoval()
}
},
_ => {
error!("TODO can not yet restore {:?} {:?}", chn.scalar_type, chn.shape);
err::todoval()
}
}
}
Ok(None)
}
#[allow(unused)]
struct WriteFut<'a> {
chn: &'a ChannelTyped,
coord: &'a PreBinnedPatchCoord,
data: &'a dyn TimeBinned,
scy: &'a ScySession,
}
impl<'a> WriteFut<'a> {
fn new(
chn: &'a ChannelTyped,
coord: &'a PreBinnedPatchCoord,
data: &'a dyn TimeBinned,
scy: &'a ScySession,
) -> Self {
Self { chn, coord, data, scy }
}
}
impl<'a> Future for WriteFut<'a> {
type Output = Result<(), Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let _ = cx;
Poll::Ready(Ok(()))
}
}
pub fn write_cached_scylla<'a>(
series: u64,
chn: &ChannelTyped,
coord: &'a PreBinnedPatchCoord,
data: &'a dyn TimeBinned,
scy: &ScySession,
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>> {
let _chn = unsafe { &*(chn as *const ChannelTyped) };
let data = unsafe { &*(data as *const dyn TimeBinned) };
let scy = unsafe { &*(scy as *const ScySession) };
let fut = async move {
let bin_len_sec = (coord.bin_t_len() / SEC) as i32;
let patch_len_sec = (coord.patch_t_len() / SEC) as i32;
let offset = coord.ix();
warn!(
"write_cached_scylla len {} where series = {} and bin_len_sec = {} and patch_len_sec = {} and agg_kind = 'dummy-agg-kind' and offset = {}",
data.counts().len(),
series,
bin_len_sec,
patch_len_sec,
offset,
);
let stmt = scy.prepare("insert into binned_scalar_f32 (series, bin_len_sec, patch_len_sec, agg_kind, offset, counts, avgs, mins, maxs) values (?, ?, ?, 'dummy-agg-kind', ?, ?, ?, ?, ?)").await.err_conv()?;
scy.execute(
&stmt,
(
series as i64,
bin_len_sec,
patch_len_sec,
offset as i64,
data.counts().iter().map(|x| *x as i64).collect::<Vec<_>>(),
data.avgs(),
data.mins(),
data.maxs(),
),
)
.await
.err_conv()
.map_err(|e| {
error!("can not write to cache");
e
})?;
Ok(())
};
Box::pin(fut)
}
pub async fn fetch_uncached_data(
series: u64,
chn: ChannelTyped,
coord: PreBinnedPatchCoord,
agg_kind: AggKind,
cache_usage: CacheUsage,
scy: Arc<ScySession>,
) -> Result<Option<(Box<dyn TimeBinned>, bool)>, Error> {
info!("fetch_uncached_data {coord:?}");
// Try to find a higher resolution pre-binned grid which covers the requested patch.
let (bin, complete) = match PreBinnedPatchRange::covering_range(coord.patch_range(), coord.bin_count() + 1) {
Ok(Some(range)) => {
if coord.patch_range() != range.range() {
error!(
"The chosen covering range does not exactly cover the requested patch {:?} vs {:?}",
coord.patch_range(),
range.range()
);
}
fetch_uncached_higher_res_prebinned(
series,
&chn,
coord.clone(),
range,
agg_kind,
cache_usage.clone(),
scy.clone(),
)
.await
}
Ok(None) => fetch_uncached_binned_events(series, &chn, coord.clone(), agg_kind, scy.clone()).await,
Err(e) => Err(e),
}?;
if true || complete {
let edges = coord.edges();
if edges.len() < bin.len() + 1 {
error!(
"attempt to write overfull bin to cache edges {} bin {}",
edges.len(),
bin.len()
);
return Err(Error::with_msg_no_trace(format!(
"attempt to write overfull bin to cache"
)));
} else if edges.len() > bin.len() + 1 {
let missing = edges.len() - bin.len() - 1;
error!("attempt to write incomplete bin to cache missing {missing}");
}
if let CacheUsage::Use | CacheUsage::Recreate = &cache_usage {
WriteFut::new(&chn, &coord, bin.as_ref(), &scy).await?;
write_cached_scylla(series, &chn, &coord, bin.as_ref(), &scy).await?;
}
}
Ok(Some((bin, complete)))
}
pub fn fetch_uncached_data_box(
series: u64,
chn: &ChannelTyped,
coord: &PreBinnedPatchCoord,
agg_kind: AggKind,
cache_usage: CacheUsage,
scy: Arc<ScySession>,
) -> Pin<Box<dyn Future<Output = Result<Option<(Box<dyn TimeBinned>, bool)>, Error>> + Send>> {
Box::pin(fetch_uncached_data(
series,
chn.clone(),
coord.clone(),
agg_kind,
cache_usage,
scy,
))
}
pub async fn fetch_uncached_higher_res_prebinned(
series: u64,
chn: &ChannelTyped,
coord: PreBinnedPatchCoord,
range: PreBinnedPatchRange,
agg_kind: AggKind,
cache_usage: CacheUsage,
scy: Arc<ScySession>,
) -> Result<(Box<dyn TimeBinned>, bool), Error> {
let edges = coord.edges();
// TODO refine the AggKind scheme or introduce a new BinningOpts type and get time-weight from there.
let do_time_weight = true;
// We must produce some result with correct types even if upstream delivers nothing at all.
let bin0 = empty_binned_dyn(&chn.scalar_type, &chn.shape, &agg_kind);
let mut time_binner = bin0.time_binner_new(edges.clone(), do_time_weight);
let mut complete = true;
let patch_it = PreBinnedPatchIterator::from_range(range.clone());
for patch_coord in patch_it {
// We request data here for a Coord, meaning that we expect to receive multiple bins.
// The expectation is that we receive a single TimeBinned which contains all bins of that PatchCoord.
//let patch_coord = PreBinnedPatchCoord::new(patch.bin_t_len(), patch.patch_t_len(), patch.ix());
let (bin, comp) = pre_binned_value_stream_with_scy(
series,
chn,
&patch_coord,
agg_kind.clone(),
cache_usage.clone(),
scy.clone(),
)
.await?;
if let Err(msg) = bin.validate() {
error!(
"pre-binned intermediate issue {} coord {:?} patch_coord {:?}",
msg, coord, patch_coord
);
}
complete = complete && comp;
time_binner.ingest(bin.as_time_binnable_dyn());
}
// Fixed limit to defend against a malformed implementation:
let mut i = 0;
while i < 80000 && time_binner.bins_ready_count() < coord.bin_count() as usize {
let n1 = time_binner.bins_ready_count();
if false {
trace!(
"pre-binned extra cycle {} {} {}",
i,
time_binner.bins_ready_count(),
coord.bin_count()
);
}
time_binner.cycle();
i += 1;
if time_binner.bins_ready_count() <= n1 {
warn!("pre-binned cycle did not add another bin, break");
break;
}
}
if time_binner.bins_ready_count() < coord.bin_count() as usize {
return Err(Error::with_msg_no_trace(format!(
"pre-binned unable to produce all bins for the patch bins_ready {} coord.bin_count {} edges.len {}",
time_binner.bins_ready_count(),
coord.bin_count(),
edges.len(),
)));
}
let ready = time_binner
.bins_ready()
.ok_or_else(|| Error::with_msg_no_trace(format!("unable to produce any bins for the patch range")))?;
if let Err(msg) = ready.validate() {
error!("pre-binned final issue {} coord {:?}", msg, coord);
}
Ok((ready, complete))
}
pub async fn fetch_uncached_binned_events(
series: u64,
chn: &ChannelTyped,
coord: PreBinnedPatchCoord,
agg_kind: AggKind,
scy: Arc<ScySession>,
) -> Result<(Box<dyn TimeBinned>, bool), Error> {
let edges = coord.edges();
// TODO refine the AggKind scheme or introduce a new BinningOpts type and get time-weight from there.
let do_time_weight = true;
// We must produce some result with correct types even if upstream delivers nothing at all.
let bin0 = empty_events_dyn(&chn.scalar_type, &chn.shape, &agg_kind);
let mut time_binner = bin0.time_binner_new(edges.clone(), do_time_weight);
let deadline = Instant::now();
let deadline = deadline
.checked_add(Duration::from_millis(6000))
.ok_or_else(|| Error::with_msg_no_trace(format!("deadline overflow")))?;
let evq = PlainEventsQuery::new(
chn.channel.clone(),
coord.patch_range(),
agg_kind,
Duration::from_millis(6000),
None,
true,
);
let mut events_dyn = EventsStreamScylla::new(series, &evq, chn.scalar_type.clone(), chn.shape.clone(), scy, false);
let mut complete = false;
loop {
let item = tokio::time::timeout_at(deadline.into(), events_dyn.next()).await;
let item = match item {
Ok(Some(k)) => k,
Ok(None) => break,
Err(_) => {
error!("fetch_uncached_binned_events timeout");
return Err(Error::with_msg_no_trace(format!(
"TODO handle fetch_uncached_binned_events timeout"
)));
}
};
match item {
Ok(StreamItem::DataItem(RangeCompletableItem::Data(item))) => {
time_binner.ingest(item.as_time_binnable_dyn());
// TODO could also ask the binner here whether we are "complete" to stop sending useless data.
}
Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)) => {
complete = true;
}
Ok(StreamItem::Stats(_item)) => {
warn!("TODO forward in stream bincache stats");
}
Ok(StreamItem::Log(item)) => {
warn!("TODO forward in stream bincache log msg {}", item.msg);
}
Err(e) => return Err(e),
}
}
// Fixed limit to defend against a malformed implementation:
let mut i = 0;
while i < 80000 && time_binner.bins_ready_count() < coord.bin_count() as usize {
let n1 = time_binner.bins_ready_count();
if false {
trace!(
"events extra cycle {} {} {}",
i,
time_binner.bins_ready_count(),
coord.bin_count()
);
}
time_binner.cycle();
i += 1;
if time_binner.bins_ready_count() <= n1 {
warn!("events cycle did not add another bin, break");
break;
}
}
if time_binner.bins_ready_count() < coord.bin_count() as usize {
return Err(Error::with_msg_no_trace(format!(
"events unable to produce all bins for the patch bins_ready {} coord.bin_count {} edges.len {}",
time_binner.bins_ready_count(),
coord.bin_count(),
edges.len(),
)));
}
let ready = time_binner
.bins_ready()
.ok_or_else(|| Error::with_msg_no_trace(format!("unable to produce any bins for the patch")))?;
if let Err(msg) = ready.validate() {
error!("time binned invalid {} coord {:?}", msg, coord);
}
Ok((ready, complete))
}
pub async fn pre_binned_value_stream_with_scy(
series: u64,
chn: &ChannelTyped,
coord: &PreBinnedPatchCoord,
agg_kind: AggKind,
cache_usage: CacheUsage,
scy: Arc<ScySession>,
) -> Result<(Box<dyn TimeBinned>, bool), Error> {
trace!("pre_binned_value_stream_with_scy {chn:?} {coord:?}");
if let (Some(item), CacheUsage::Use) = (
read_cached_scylla(series, chn, coord, agg_kind.clone(), &scy).await?,
&cache_usage,
) {
info!("+++++++++++++ GOOD READ");
Ok((item, true))
} else {
if let CacheUsage::Use = &cache_usage {
warn!("--+--+--+--+--+--+ NOT YET CACHED");
}
let res = fetch_uncached_data_box(series, chn, coord, agg_kind, cache_usage, scy).await?;
let (bin, complete) =
res.ok_or_else(|| Error::with_msg_no_trace(format!("pre_binned_value_stream_with_scy got None bin")))?;
Ok((bin, complete))
}
}
pub async fn pre_binned_value_stream(
series: u64,
chn: &ChannelTyped,
coord: &PreBinnedPatchCoord,
agg_kind: AggKind,
cache_usage: CacheUsage,
scyconf: &ScyllaConfig,
) -> Result<Pin<Box<dyn Stream<Item = Result<Box<dyn TimeBinned>, Error>> + Send>>, Error> {
trace!("pre_binned_value_stream series {series} {chn:?} {coord:?} {scyconf:?}");
let scy = scylla::SessionBuilder::new()
.known_nodes(&scyconf.hosts)
.use_keyspace(&scyconf.keyspace, true)
.build()
.await
.err_conv()?;
let scy = Arc::new(scy);
let res = pre_binned_value_stream_with_scy(series, chn, coord, agg_kind, cache_usage, scy).await?;
Ok(Box::pin(futures_util::stream::iter([Ok(res.0)])))
}

View File

@@ -1,18 +1,15 @@
pub mod bincache;
pub mod events_scylla;
pub mod channelconfig;
pub mod scan;
pub mod search;
pub mod pg {
pub use tokio_postgres::{Client, Error};
}
pub mod channelconfig;
use err::Error;
use netpod::{log::*, ScalarType, Shape};
use netpod::{Channel, Database, NodeConfigCached, ScyllaConfig};
use scylla::frame::response::cql_to_rust::FromRowError as ScyFromRowError;
use scylla::transport::errors::{NewSessionError as ScyNewSessionError, QueryError as ScyQueryError};
use scylla::Session as ScySession;
use netpod::log::*;
use netpod::{Channel, Database, NodeConfigCached};
use netpod::{ScalarType, Shape};
use std::sync::Arc;
use std::time::Duration;
use tokio_postgres::{Client, Client as PgClient, NoTls};
@@ -38,32 +35,6 @@ impl<T, A> ErrConv<T> for Result<T, async_channel::SendError<A>> {
}
}
}
impl<T> ErrConv<T> for Result<T, ScyQueryError> {
fn err_conv(self) -> Result<T, Error> {
match self {
Ok(k) => Ok(k),
Err(e) => Err(Error::with_msg_no_trace(format!("{e:?}"))),
}
}
}
impl<T> ErrConv<T> for Result<T, ScyNewSessionError> {
fn err_conv(self) -> Result<T, Error> {
match self {
Ok(k) => Ok(k),
Err(e) => Err(Error::with_msg_no_trace(format!("{e:?}"))),
}
}
}
impl<T> ErrConv<T> for Result<T, ScyFromRowError> {
fn err_conv(self) -> Result<T, Error> {
match self {
Ok(k) => Ok(k),
Err(e) => Err(Error::with_msg_no_trace(format!("{e:?}"))),
}
}
}
pub async fn delay_us(mu: u64) {
tokio::time::sleep(Duration::from_micros(mu)).await;
@@ -95,16 +66,6 @@ pub async fn create_connection(db_config: &Database) -> Result<Client, Error> {
Ok(cl)
}
pub async fn create_scylla_connection(scyconf: &ScyllaConfig) -> Result<ScySession, Error> {
let scy = scylla::SessionBuilder::new()
.known_nodes(&scyconf.hosts)
.use_keyspace(&scyconf.keyspace, true)
.build()
.await
.err_conv()?;
Ok(scy)
}
pub async fn channel_exists(channel: &Channel, node_config: &NodeConfigCached) -> Result<bool, Error> {
let cl = create_connection(&node_config.node_config.cluster.database).await?;
let rows = cl

View File

@@ -1,518 +0,0 @@
use crate::ErrConv;
use err::Error;
use futures_util::{Future, FutureExt, Stream, StreamExt};
use items::scalarevents::ScalarEvents;
use items::waveevents::WaveEvents;
use items::{EventsDyn, RangeCompletableItem, Sitemty, StreamItem};
use netpod::log::*;
use netpod::query::{ChannelStateEventsQuery, PlainEventsQuery};
use netpod::timeunits::DAY;
use netpod::{NanoRange, ScalarType, ScyllaConfig, Shape};
use scylla::Session as ScySession;
use std::collections::VecDeque;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
macro_rules! read_values {
($fname:ident, $self:expr, $ts_msp:expr) => {{
let fut = $fname($self.series, $ts_msp, $self.range.clone(), $self.fwd, $self.scy.clone());
let fut = fut.map(|x| {
match x {
Ok(k) => {
// TODO why static needed?
let b = Box::new(k) as Box<dyn EventsDyn + 'static>;
Ok(b)
}
Err(e) => Err(e),
}
});
let fut = Box::pin(fut) as Pin<Box<dyn Future<Output = Result<Box<dyn EventsDyn>, Error>> + Send>>;
fut
}};
}
struct ReadValues {
series: i64,
scalar_type: ScalarType,
shape: Shape,
range: NanoRange,
ts_msps: VecDeque<u64>,
fwd: bool,
fut: Pin<Box<dyn Future<Output = Result<Box<dyn EventsDyn>, Error>> + Send>>,
scy: Arc<ScySession>,
}
impl ReadValues {
fn new(
series: i64,
scalar_type: ScalarType,
shape: Shape,
range: NanoRange,
ts_msps: VecDeque<u64>,
fwd: bool,
scy: Arc<ScySession>,
) -> Self {
let mut ret = Self {
series,
scalar_type,
shape,
range,
ts_msps,
fwd,
fut: Box::pin(futures_util::future::ready(Err(Error::with_msg_no_trace(
"future not initialized",
)))),
scy,
};
ret.next();
ret
}
fn next(&mut self) -> bool {
if let Some(ts_msp) = self.ts_msps.pop_front() {
self.fut = self.make_fut(ts_msp, self.ts_msps.len() > 1);
true
} else {
false
}
}
fn make_fut(
&mut self,
ts_msp: u64,
_has_more_msp: bool,
) -> Pin<Box<dyn Future<Output = Result<Box<dyn EventsDyn>, Error>> + Send>> {
let fut = match &self.shape {
Shape::Scalar => match &self.scalar_type {
ScalarType::I8 => {
read_values!(read_next_values_scalar_i8, self, ts_msp)
}
ScalarType::I16 => {
read_values!(read_next_values_scalar_i16, self, ts_msp)
}
ScalarType::I32 => {
read_values!(read_next_values_scalar_i32, self, ts_msp)
}
ScalarType::F32 => {
read_values!(read_next_values_scalar_f32, self, ts_msp)
}
ScalarType::F64 => {
read_values!(read_next_values_scalar_f64, self, ts_msp)
}
_ => err::todoval(),
},
Shape::Wave(_) => match &self.scalar_type {
ScalarType::U16 => {
read_values!(read_next_values_array_u16, self, ts_msp)
}
_ => err::todoval(),
},
_ => err::todoval(),
};
fut
}
}
enum FrState {
New,
FindMsp(Pin<Box<dyn Future<Output = Result<VecDeque<u64>, Error>> + Send>>),
ReadBack1(ReadValues),
ReadBack2(ReadValues),
ReadValues(ReadValues),
Done,
}
pub struct EventsStreamScylla {
state: FrState,
series: u64,
scalar_type: ScalarType,
shape: Shape,
range: NanoRange,
ts_msps: VecDeque<u64>,
scy: Arc<ScySession>,
do_test_stream_error: bool,
}
impl EventsStreamScylla {
pub fn new(
series: u64,
evq: &PlainEventsQuery,
scalar_type: ScalarType,
shape: Shape,
scy: Arc<ScySession>,
do_test_stream_error: bool,
) -> Self {
Self {
state: FrState::New,
series,
scalar_type,
shape,
range: evq.range().clone(),
ts_msps: VecDeque::new(),
scy,
do_test_stream_error,
}
}
fn ts_msps_found(&mut self, ts_msps: VecDeque<u64>) {
info!("found ts_msps {ts_msps:?}");
self.ts_msps = ts_msps;
// Find the largest MSP which can potentially contain some event before the range.
let befores: Vec<_> = self
.ts_msps
.iter()
.map(|x| *x)
.filter(|x| *x < self.range.beg)
.collect();
if befores.len() >= 1 {
let st = ReadValues::new(
self.series as i64,
self.scalar_type.clone(),
self.shape.clone(),
self.range.clone(),
[befores[befores.len() - 1]].into(),
false,
self.scy.clone(),
);
self.state = FrState::ReadBack1(st);
} else if self.ts_msps.len() >= 1 {
let st = ReadValues::new(
self.series as i64,
self.scalar_type.clone(),
self.shape.clone(),
self.range.clone(),
self.ts_msps.clone(),
true,
self.scy.clone(),
);
self.state = FrState::ReadValues(st);
} else {
self.state = FrState::Done;
}
}
fn back_1_done(&mut self, item: Box<dyn EventsDyn>) -> Option<Box<dyn EventsDyn>> {
info!("back_1_done len {}", item.len());
if item.len() == 0 {
// Find the 2nd largest MSP which can potentially contain some event before the range.
let befores: Vec<_> = self
.ts_msps
.iter()
.map(|x| *x)
.filter(|x| *x < self.range.beg)
.collect();
if befores.len() >= 2 {
let st = ReadValues::new(
self.series as i64,
self.scalar_type.clone(),
self.shape.clone(),
self.range.clone(),
[befores[befores.len() - 2]].into(),
false,
self.scy.clone(),
);
self.state = FrState::ReadBack2(st);
None
} else if self.ts_msps.len() >= 1 {
let st = ReadValues::new(
self.series as i64,
self.scalar_type.clone(),
self.shape.clone(),
self.range.clone(),
self.ts_msps.clone(),
true,
self.scy.clone(),
);
self.state = FrState::ReadValues(st);
None
} else {
self.state = FrState::Done;
None
}
} else {
if self.ts_msps.len() > 0 {
let st = ReadValues::new(
self.series as i64,
self.scalar_type.clone(),
self.shape.clone(),
self.range.clone(),
self.ts_msps.clone(),
true,
self.scy.clone(),
);
self.state = FrState::ReadValues(st);
Some(item)
} else {
self.state = FrState::Done;
Some(item)
}
}
}
fn back_2_done(&mut self, item: Box<dyn EventsDyn>) -> Option<Box<dyn EventsDyn>> {
info!("back_2_done len {}", item.len());
if self.ts_msps.len() >= 1 {
let st = ReadValues::new(
self.series as i64,
self.scalar_type.clone(),
self.shape.clone(),
self.range.clone(),
self.ts_msps.clone(),
true,
self.scy.clone(),
);
self.state = FrState::ReadValues(st);
} else {
self.state = FrState::Done;
}
if item.len() > 0 {
Some(item)
} else {
None
}
}
}
impl Stream for EventsStreamScylla {
type Item = Sitemty<Box<dyn EventsDyn>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
if self.do_test_stream_error {
let e = Error::with_msg(format!("Test PRIVATE STREAM error."))
.add_public_msg(format!("Test PUBLIC STREAM error."));
return Ready(Some(Err(e)));
}
loop {
break match self.state {
FrState::New => {
let fut = find_ts_msp(self.series as i64, self.range.clone(), self.scy.clone());
let fut = Box::pin(fut);
self.state = FrState::FindMsp(fut);
continue;
}
FrState::FindMsp(ref mut fut) => match fut.poll_unpin(cx) {
Ready(Ok(ts_msps)) => {
self.ts_msps_found(ts_msps);
continue;
}
Ready(Err(e)) => {
self.state = FrState::Done;
Ready(Some(Err(e)))
}
Pending => Pending,
},
FrState::ReadBack1(ref mut st) => match st.fut.poll_unpin(cx) {
Ready(Ok(item)) => {
if let Some(item) = self.back_1_done(item) {
item.verify();
item.output_info();
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))))
} else {
continue;
}
}
Ready(Err(e)) => {
self.state = FrState::Done;
Ready(Some(Err(e)))
}
Pending => Pending,
},
FrState::ReadBack2(ref mut st) => match st.fut.poll_unpin(cx) {
Ready(Ok(item)) => {
if let Some(item) = self.back_2_done(item) {
item.verify();
item.output_info();
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))))
} else {
continue;
}
}
Ready(Err(e)) => {
self.state = FrState::Done;
Ready(Some(Err(e)))
}
Pending => Pending,
},
FrState::ReadValues(ref mut st) => match st.fut.poll_unpin(cx) {
Ready(Ok(item)) => {
info!("read values");
item.verify();
item.output_info();
if !st.next() {
info!("ReadValues exhausted");
self.state = FrState::Done;
}
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))))
}
Ready(Err(e)) => Ready(Some(Err(e))),
Pending => Pending,
},
FrState::Done => Ready(None),
};
}
}
}
async fn find_ts_msp(_series: i64, _range: NanoRange, _scy: Arc<ScySession>) -> Result<VecDeque<u64>, Error> {
// TODO remove
panic!()
}
macro_rules! read_next_scalar_values {
($fname:ident, $st:ty, $scyty:ty, $table_name:expr) => {
async fn $fname(
series: i64,
ts_msp: u64,
range: NanoRange,
fwd: bool,
scy: Arc<ScySession>,
) -> Result<ScalarEvents<$st>, Error> {
type ST = $st;
type SCYTY = $scyty;
if ts_msp >= range.end {
warn!("given ts_msp {} >= range.end {}", ts_msp, range.end);
}
if range.end > i64::MAX as u64 {
return Err(Error::with_msg_no_trace(format!("range.end overflows i64")));
}
let res = if fwd {
let ts_lsp_min = if ts_msp < range.beg { range.beg - ts_msp } else { 0 };
let ts_lsp_max = if ts_msp < range.end { range.end - ts_msp } else { 0 };
trace!(
"FWD ts_msp {} ts_lsp_min {} ts_lsp_max {} {}",
ts_msp,
ts_lsp_min,
ts_lsp_max,
stringify!($fname)
);
// TODO use prepared!
let cql = concat!(
"select ts_lsp, pulse, value from ",
$table_name,
" where series = ? and ts_msp = ? and ts_lsp >= ? and ts_lsp < ?"
);
scy.query(cql, (series, ts_msp as i64, ts_lsp_min as i64, ts_lsp_max as i64))
.await
.err_conv()?
} else {
let ts_lsp_max = if ts_msp < range.beg { range.beg - ts_msp } else { 0 };
info!(
"BCK ts_msp {} ts_lsp_max {} range beg {} end {} {}",
ts_msp,
ts_lsp_max,
range.beg,
range.end,
stringify!($fname)
);
// TODO use prepared!
let cql = concat!(
"select ts_lsp, pulse, value from ",
$table_name,
" where series = ? and ts_msp = ? and ts_lsp < ? order by ts_lsp desc limit 1"
);
scy.query(cql, (series, ts_msp as i64, ts_lsp_max as i64))
.await
.err_conv()?
};
let mut ret = ScalarEvents::<ST>::empty();
for row in res.rows_typed_or_empty::<(i64, i64, SCYTY)>() {
let row = row.err_conv()?;
let ts = ts_msp + row.0 as u64;
let pulse = row.1 as u64;
let value = row.2 as ST;
ret.push(ts, pulse, value);
}
trace!("found in total {} events ts_msp {}", ret.tss.len(), ts_msp);
Ok(ret)
}
};
}
macro_rules! read_next_array_values {
($fname:ident, $st:ty, $scyty:ty, $table_name:expr) => {
async fn $fname(
series: i64,
ts_msp: u64,
_range: NanoRange,
_fwd: bool,
scy: Arc<ScySession>,
) -> Result<WaveEvents<$st>, Error> {
if true {
return Err(Error::with_msg_no_trace("redo based on scalar case"));
}
type ST = $st;
type SCYTY = $scyty;
info!("{} series {} ts_msp {}", stringify!($fname), series, ts_msp);
let cql = concat!(
"select ts_lsp, pulse, value from ",
$table_name,
" where series = ? and ts_msp = ?"
);
let res = scy.query(cql, (series, ts_msp as i64)).await.err_conv()?;
let mut ret = WaveEvents::<ST>::empty();
for row in res.rows_typed_or_empty::<(i64, i64, Vec<SCYTY>)>() {
let row = row.err_conv()?;
let ts = ts_msp + row.0 as u64;
let pulse = row.1 as u64;
let value = row.2.into_iter().map(|x| x as ST).collect();
ret.push(ts, pulse, value);
}
info!("found in total {} events ts_msp {}", ret.tss.len(), ts_msp);
Ok(ret)
}
};
}
read_next_scalar_values!(read_next_values_scalar_i8, i8, i8, "events_scalar_i8");
read_next_scalar_values!(read_next_values_scalar_i16, i16, i16, "events_scalar_i16");
read_next_scalar_values!(read_next_values_scalar_i32, i32, i32, "events_scalar_i32");
read_next_scalar_values!(read_next_values_scalar_f32, f32, f32, "events_scalar_f32");
read_next_scalar_values!(read_next_values_scalar_f64, f64, f64, "events_scalar_f64");
read_next_array_values!(read_next_values_array_u16, u16, i16, "events_wave_u16");
pub async fn channel_state_events(
evq: &ChannelStateEventsQuery,
scyco: &ScyllaConfig,
) -> Result<Vec<(u64, u32)>, Error> {
let scy = scylla::SessionBuilder::new()
.known_nodes(&scyco.hosts)
.use_keyspace(&scyco.keyspace, true)
.build()
.await
.err_conv()?;
let scy = Arc::new(scy);
let mut ret = Vec::new();
let div = DAY;
let mut ts_msp = evq.range().beg / div * div;
loop {
let series = (evq
.channel()
.series()
.ok_or(Error::with_msg_no_trace(format!("series id not given"))))?;
let params = (series as i64, ts_msp as i64);
let mut res = scy
.query_iter(
"select ts_lsp, kind from channel_status where series = ? and ts_msp = ?",
params,
)
.await
.err_conv()?;
while let Some(row) = res.next().await {
let row = row.err_conv()?;
let (ts_lsp, kind): (i64, i32) = row.into_typed().err_conv()?;
let ts = ts_msp + ts_lsp as u64;
let kind = kind as u32;
if ts >= evq.range().beg && ts < evq.range().end {
ret.push((ts, kind));
}
}
ts_msp += div;
if ts_msp >= evq.range().end {
break;
}
}
Ok(ret)
}