List indexfiles in directories
This commit is contained in:
@@ -5,7 +5,7 @@ authors = ["Dominik Werder <dominik.werder@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1.7.1", features = ["io-util", "net", "time", "sync", "fs"] }
|
||||
tokio = { version = "1.7.1", features = ["io-util", "net", "time", "sync", "fs", "parking_lot"] }
|
||||
tracing = "0.1.26"
|
||||
futures-core = "0.3.15"
|
||||
futures-util = "0.3.15"
|
||||
@@ -17,6 +17,10 @@ bincode = "1.3.3"
|
||||
chrono = "0.4.19"
|
||||
protobuf = "2.24.1"
|
||||
async-channel = "1.6"
|
||||
parking_lot = "0.11.2"
|
||||
crc32fast = "1.2.1"
|
||||
regex = "1.5.4"
|
||||
tokio-postgres = { version = "0.7.4", features = ["runtime", "with-chrono-0_4", "with-serde_json-1"] }
|
||||
archapp_xc = { path = "../archapp_xc" }
|
||||
err = { path = "../err" }
|
||||
taskrun = { path = "../taskrun" }
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,17 +1,21 @@
|
||||
use crate::archeng::{
|
||||
open_read, read_channel, read_data_1, read_datafile_header, read_index_datablockref, search_record,
|
||||
index_file_path_list, open_read, read_channel, read_data_1, read_datafile_header, read_index_datablockref,
|
||||
search_record, search_record_expand, StatsChannel,
|
||||
};
|
||||
use crate::EventsItem;
|
||||
use crate::eventsitem::EventsItem;
|
||||
use crate::storagemerge::StorageMerge;
|
||||
use crate::timed::Timed;
|
||||
use async_channel::{Receiver, Sender};
|
||||
use err::Error;
|
||||
use futures_core::{Future, Stream};
|
||||
use futures_util::{FutureExt, StreamExt};
|
||||
use items::{RangeCompletableItem, Sitemty, StreamItem, WithLen};
|
||||
use items::{inspect_timestamps, RangeCompletableItem, Sitemty, StreamItem, WithLen};
|
||||
use netpod::{log::*, DataHeaderPos, FilePos, Nanos};
|
||||
use netpod::{Channel, NanoRange};
|
||||
use std::collections::VecDeque;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
type FR = (Option<Sitemty<EventsItem>>, Box<dyn FretCb>);
|
||||
@@ -20,77 +24,133 @@ trait FretCb {
|
||||
fn call(&mut self, stream: &mut Pin<&mut DatablockStream>);
|
||||
}
|
||||
|
||||
async fn datablock_stream(
|
||||
range: NanoRange,
|
||||
channel: Channel,
|
||||
base_dirs: VecDeque<PathBuf>,
|
||||
expand: bool,
|
||||
tx: Sender<Sitemty<EventsItem>>,
|
||||
) {
|
||||
match datablock_stream_inner(range, channel, base_dirs, expand, tx.clone()).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => match tx.send(Err(e)).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
if false {
|
||||
error!("can not send. error: {}", e);
|
||||
}
|
||||
}
|
||||
},
|
||||
static CHANNEL_SEND_ERROR: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
fn channel_send_error() {
|
||||
let c = CHANNEL_SEND_ERROR.fetch_add(1, Ordering::AcqRel);
|
||||
if c < 10 {
|
||||
error!("CHANNEL_SEND_ERROR {}", c);
|
||||
}
|
||||
}
|
||||
|
||||
async fn datablock_stream_inner(
|
||||
async fn datablock_stream(
|
||||
range: NanoRange,
|
||||
channel: Channel,
|
||||
base_dirs: VecDeque<PathBuf>,
|
||||
index_files_index_path: PathBuf,
|
||||
_base_dirs: VecDeque<PathBuf>,
|
||||
expand: bool,
|
||||
tx: Sender<Sitemty<EventsItem>>,
|
||||
max_events: u64,
|
||||
) {
|
||||
match datablock_stream_inner(range, channel, expand, index_files_index_path, tx.clone(), max_events).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
if let Err(_) = tx.send(Err(e)).await {
|
||||
channel_send_error();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn datablock_stream_inner_single_index(
|
||||
range: NanoRange,
|
||||
channel: Channel,
|
||||
index_path: PathBuf,
|
||||
expand: bool,
|
||||
tx: Sender<Sitemty<EventsItem>>,
|
||||
max_events: u64,
|
||||
) -> Result<(), Error> {
|
||||
let basename = channel
|
||||
.name()
|
||||
.split("-")
|
||||
.next()
|
||||
.ok_or(Error::with_msg_no_trace("can not find base for channel"))?;
|
||||
for base in base_dirs {
|
||||
debug!(
|
||||
"search for {:?} with basename: {} in path {:?}",
|
||||
channel, basename, base
|
||||
);
|
||||
// TODO need to try both:
|
||||
let index_path = base.join(format!("archive_{}_SH", basename)).join("index");
|
||||
let res = open_read(index_path.clone()).await;
|
||||
debug!("tried to open index file: {:?}", res);
|
||||
if let Ok(mut index_file) = res {
|
||||
if let Some(basics) = read_channel(&mut index_file, channel.name()).await? {
|
||||
let mut events_tot = 0;
|
||||
let stats = &StatsChannel::new(tx.clone());
|
||||
debug!("try to open index file: {:?}", index_path);
|
||||
let res = open_read(index_path.clone(), stats).await;
|
||||
debug!("opened index file: {:?} {:?}", index_path, res);
|
||||
match res {
|
||||
Ok(mut index_file) => {
|
||||
if let Some(basics) = read_channel(&mut index_file, channel.name(), stats).await? {
|
||||
let beg = Nanos { ns: range.beg };
|
||||
let mut expand_beg = expand;
|
||||
let mut index_ts_max = 0;
|
||||
let mut search_ts = beg.clone();
|
||||
let mut last_data_file_path = PathBuf::new();
|
||||
let mut last_data_file_pos = DataHeaderPos(0);
|
||||
loop {
|
||||
// TODO for expand mode, this needs another search function.
|
||||
let (res, _stats) =
|
||||
search_record(&mut index_file, basics.rtree_m, basics.rtree_start_pos, search_ts).await?;
|
||||
let timed_search = Timed::new("search next record");
|
||||
let (res, _stats) = if expand_beg {
|
||||
// TODO even though this is an entry in the index, it may reference
|
||||
// non-existent blocks.
|
||||
// Therefore, lower expand_beg flag at some later stage only if we've really
|
||||
// found at least one event in the block.
|
||||
expand_beg = false;
|
||||
search_record_expand(
|
||||
&mut index_file,
|
||||
basics.rtree_m,
|
||||
basics.rtree_start_pos,
|
||||
search_ts,
|
||||
stats,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
search_record(
|
||||
&mut index_file,
|
||||
basics.rtree_m,
|
||||
basics.rtree_start_pos,
|
||||
search_ts,
|
||||
stats,
|
||||
)
|
||||
.await?
|
||||
};
|
||||
drop(timed_search);
|
||||
if let Some(nrec) = res {
|
||||
let rec = nrec.rec();
|
||||
trace!("found record: {:?}", rec);
|
||||
let pos = FilePos { pos: rec.child_or_id };
|
||||
// TODO rename Datablock? → IndexNodeDatablock
|
||||
trace!("READ Datablock FROM {:?}\n", pos);
|
||||
let datablock = read_index_datablockref(&mut index_file, pos).await?;
|
||||
let datablock = read_index_datablockref(&mut index_file, pos, stats).await?;
|
||||
trace!("Datablock: {:?}\n", datablock);
|
||||
let data_path = index_path.parent().unwrap().join(datablock.file_name());
|
||||
if data_path == last_data_file_path && datablock.data_header_pos() == last_data_file_pos {
|
||||
debug!("skipping because it is the same block");
|
||||
} else {
|
||||
trace!("try to open data_path: {:?}", data_path);
|
||||
match open_read(data_path.clone()).await {
|
||||
match open_read(data_path.clone(), stats).await {
|
||||
Ok(mut data_file) => {
|
||||
let datafile_header =
|
||||
read_datafile_header(&mut data_file, datablock.data_header_pos()).await?;
|
||||
read_datafile_header(&mut data_file, datablock.data_header_pos(), stats)
|
||||
.await?;
|
||||
trace!("datafile_header -------------- HEADER\n{:?}", datafile_header);
|
||||
let events = read_data_1(&mut data_file, &datafile_header).await?;
|
||||
let events =
|
||||
read_data_1(&mut data_file, &datafile_header, range.clone(), expand_beg, stats)
|
||||
.await?;
|
||||
if false {
|
||||
let msg = inspect_timestamps(&events, range.clone());
|
||||
trace!("datablock_stream_inner_single_index read_data_1\n{}", msg);
|
||||
}
|
||||
{
|
||||
let mut ts_max = 0;
|
||||
use items::WithTimestamps;
|
||||
for i in 0..events.len() {
|
||||
let ts = events.ts(i);
|
||||
if ts < ts_max {
|
||||
error!("unordered event within block at ts {}", ts);
|
||||
break;
|
||||
} else {
|
||||
ts_max = ts;
|
||||
}
|
||||
if ts < index_ts_max {
|
||||
error!(
|
||||
"unordered event in index branch ts {} index_ts_max {}",
|
||||
ts, index_ts_max
|
||||
);
|
||||
break;
|
||||
} else {
|
||||
index_ts_max = ts;
|
||||
}
|
||||
}
|
||||
}
|
||||
trace!("Was able to read data: {} events", events.len());
|
||||
events_tot += events.len() as u64;
|
||||
let item = Ok(StreamItem::DataItem(RangeCompletableItem::Data(events)));
|
||||
tx.send(item).await?;
|
||||
}
|
||||
@@ -111,12 +171,59 @@ async fn datablock_stream_inner(
|
||||
warn!("nothing found, break");
|
||||
break;
|
||||
}
|
||||
if events_tot >= max_events {
|
||||
warn!("reached events_tot {} max_events {}", events_tot, max_events);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("can not read channel basics from {:?}", index_path);
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("can not find index file at {:?}", index_path);
|
||||
Err(Error::with_msg_no_trace(format!("can not open index file: {}", e)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn datablock_stream_inner(
|
||||
range: NanoRange,
|
||||
channel: Channel,
|
||||
expand: bool,
|
||||
index_files_index_path: PathBuf,
|
||||
tx: Sender<Sitemty<EventsItem>>,
|
||||
max_events: u64,
|
||||
) -> Result<(), Error> {
|
||||
let stats = &StatsChannel::new(tx.clone());
|
||||
let index_file_path_list = index_file_path_list(channel.clone(), index_files_index_path, stats).await?;
|
||||
let mut inner_rxs = vec![];
|
||||
let mut names = vec![];
|
||||
for index_path in index_file_path_list {
|
||||
let (tx, rx) = async_channel::bounded(2);
|
||||
let task = datablock_stream_inner_single_index(
|
||||
range.clone(),
|
||||
channel.clone(),
|
||||
(&index_path).into(),
|
||||
expand,
|
||||
tx,
|
||||
max_events,
|
||||
);
|
||||
taskrun::spawn(task);
|
||||
inner_rxs.push(Box::pin(rx) as Pin<Box<dyn Stream<Item = Sitemty<EventsItem>> + Send>>);
|
||||
names.push(index_path.to_str().unwrap().into());
|
||||
}
|
||||
let task = async move {
|
||||
let mut inp = StorageMerge::new(inner_rxs, names, range.clone());
|
||||
while let Some(k) = inp.next().await {
|
||||
if let Err(_) = tx.send(k).await {
|
||||
channel_send_error();
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
taskrun::spawn(task);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -132,14 +239,22 @@ pub struct DatablockStream {
|
||||
}
|
||||
|
||||
impl DatablockStream {
|
||||
pub fn for_channel_range(range: NanoRange, channel: Channel, base_dirs: VecDeque<PathBuf>, expand: bool) -> Self {
|
||||
pub fn for_channel_range(
|
||||
range: NanoRange,
|
||||
channel: Channel,
|
||||
base_dirs: VecDeque<PathBuf>,
|
||||
expand: bool,
|
||||
max_events: u64,
|
||||
) -> Self {
|
||||
let (tx, rx) = async_channel::bounded(1);
|
||||
taskrun::spawn(datablock_stream(
|
||||
range.clone(),
|
||||
channel.clone(),
|
||||
"/index/c5mapped".into(),
|
||||
base_dirs.clone(),
|
||||
expand.clone(),
|
||||
tx,
|
||||
max_events,
|
||||
));
|
||||
let ret = Self {
|
||||
range,
|
||||
@@ -151,6 +266,10 @@ impl DatablockStream {
|
||||
done: false,
|
||||
complete: false,
|
||||
};
|
||||
// TODO keeping for compatibility at the moment:
|
||||
let _ = &ret.range;
|
||||
let _ = &ret.channel;
|
||||
let _ = &ret.expand;
|
||||
ret
|
||||
}
|
||||
|
||||
@@ -169,7 +288,7 @@ impl DatablockStream {
|
||||
(None, Box::new(Cb {}))
|
||||
}
|
||||
|
||||
async fn start_with_base_dir(path: PathBuf) -> FR {
|
||||
async fn start_with_base_dir(_path: PathBuf) -> FR {
|
||||
warn!("start_with_base_dir");
|
||||
struct Cb {}
|
||||
impl FretCb for Cb {
|
||||
@@ -199,9 +318,7 @@ impl Stream for DatablockStream {
|
||||
} else if self.done {
|
||||
self.complete = true;
|
||||
Ready(None)
|
||||
} else if true {
|
||||
self.rx.poll_next_unpin(cx)
|
||||
} else {
|
||||
} else if false {
|
||||
match self.fut.poll_unpin(cx) {
|
||||
Ready((k, mut fr)) => {
|
||||
fr.call(&mut self);
|
||||
@@ -212,6 +329,8 @@ impl Stream for DatablockStream {
|
||||
}
|
||||
Pending => Pending,
|
||||
}
|
||||
} else {
|
||||
self.rx.poll_next_unpin(cx)
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -219,9 +338,8 @@ impl Stream for DatablockStream {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::EventsItem;
|
||||
|
||||
use super::DatablockStream;
|
||||
use crate::eventsitem::EventsItem;
|
||||
use chrono::{DateTime, Utc};
|
||||
use err::Error;
|
||||
use futures_util::StreamExt;
|
||||
@@ -258,7 +376,7 @@ mod test {
|
||||
.map(PathBuf::from)
|
||||
.collect();
|
||||
let expand = false;
|
||||
let datablocks = DatablockStream::for_channel_range(range.clone(), channel, base_dirs, expand);
|
||||
let datablocks = DatablockStream::for_channel_range(range.clone(), channel, base_dirs, expand, u64::MAX);
|
||||
let filtered = RangeFilter::<_, EventsItem>::new(datablocks, range, expand);
|
||||
let mut stream = filtered;
|
||||
while let Some(block) = stream.next().await {
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
use crate::EventsItem;
|
||||
use futures_core::Stream;
|
||||
use items::Sitemty;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
pub struct DataStream {}
|
||||
|
||||
impl Stream for DataStream {
|
||||
type Item = Sitemty<EventsItem>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
let _ = self;
|
||||
let _ = cx;
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
312
archapp/src/archeng/indexfiles.rs
Normal file
312
archapp/src/archeng/indexfiles.rs
Normal file
@@ -0,0 +1,312 @@
|
||||
use crate::wrap_task;
|
||||
use async_channel::Receiver;
|
||||
use err::Error;
|
||||
use futures_core::Future;
|
||||
use futures_core::Stream;
|
||||
use futures_util::stream::unfold;
|
||||
use futures_util::FutureExt;
|
||||
use netpod::log::*;
|
||||
use netpod::ChannelArchiver;
|
||||
use netpod::Database;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use tokio::fs::read_dir;
|
||||
use tokio_postgres::Client as PgClient;
|
||||
|
||||
pub fn list_index_files(node: &ChannelArchiver) -> Receiver<Result<PathBuf, Error>> {
|
||||
let node = node.clone();
|
||||
let (tx, rx) = async_channel::bounded(4);
|
||||
let tx2 = tx.clone();
|
||||
let task = async move {
|
||||
for bp in &node.data_base_paths {
|
||||
let mut rd = read_dir(bp).await?;
|
||||
while let Some(e) = rd.next_entry().await? {
|
||||
let ft = e.file_type().await?;
|
||||
if ft.is_dir() {
|
||||
let mut rd = read_dir(e.path()).await?;
|
||||
while let Some(e) = rd.next_entry().await? {
|
||||
let ft = e.file_type().await?;
|
||||
if false && ft.is_dir() {
|
||||
let mut rd = read_dir(e.path()).await?;
|
||||
while let Some(e) = rd.next_entry().await? {
|
||||
let ft = e.file_type().await?;
|
||||
if ft.is_file() {
|
||||
if e.file_name().to_string_lossy() == "index" {
|
||||
tx.send(Ok(e.path())).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if ft.is_file() {
|
||||
if e.file_name().to_string_lossy() == "index" {
|
||||
tx.send(Ok(e.path())).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if ft.is_file() {
|
||||
if e.file_name().to_string_lossy() == "index" {
|
||||
tx.send(Ok(e.path())).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
wrap_task(task, tx2);
|
||||
rx
|
||||
}
|
||||
|
||||
pub struct ScanIndexFiles0 {}
|
||||
|
||||
impl Stream for ScanIndexFiles0 {
|
||||
type Item = ();
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
let _ = cx;
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_level_0(conf: ChannelArchiver) -> Result<Vec<PathBuf>, Error> {
|
||||
let mut ret = vec![];
|
||||
for bp in &conf.data_base_paths {
|
||||
let mut rd = read_dir(bp).await?;
|
||||
while let Some(e) = rd.next_entry().await? {
|
||||
if e.file_name().to_string_lossy().contains("index") {
|
||||
warn!("Top-level data path contains `index` entry");
|
||||
}
|
||||
let ft = e.file_type().await?;
|
||||
if ft.is_dir() {
|
||||
ret.push(e.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub async fn get_level_1(lev0: Vec<PathBuf>) -> Result<Vec<PathBuf>, Error> {
|
||||
let mut ret = vec![];
|
||||
for bp in lev0 {
|
||||
let mut rd = read_dir(bp).await?;
|
||||
while let Some(e) = rd.next_entry().await? {
|
||||
let ft = e.file_type().await?;
|
||||
if ft.is_file() {
|
||||
if e.file_name().to_string_lossy() == "index" {
|
||||
ret.push(e.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub async fn database_connect(db_config: &Database) -> Result<PgClient, Error> {
|
||||
let d = db_config;
|
||||
let uri = format!("postgresql://{}:{}@{}:{}/{}", d.user, d.pass, d.host, 5432, d.name);
|
||||
let (cl, conn) = tokio_postgres::connect(&uri, tokio_postgres::NoTls).await?;
|
||||
// TODO monitor connection drop.
|
||||
let _cjh = tokio::spawn(async move {
|
||||
if let Err(e) = conn.await {
|
||||
error!("connection error: {}", e);
|
||||
}
|
||||
Ok::<_, Error>(())
|
||||
});
|
||||
Ok(cl)
|
||||
}
|
||||
|
||||
pub trait UnfoldExec {
|
||||
type Output: Send;
|
||||
fn exec(self) -> Pin<Box<dyn Future<Output = Result<Option<(Self::Output, Self)>, Error>> + Send>>
|
||||
where
|
||||
Self: Sized;
|
||||
}
|
||||
|
||||
pub fn unfold_stream<St, T>(st: St) -> impl Stream<Item = Result<T, Error>>
|
||||
where
|
||||
St: UnfoldExec<Output = T> + Send,
|
||||
T: Send,
|
||||
{
|
||||
enum UnfoldState<St> {
|
||||
Running(St),
|
||||
Done,
|
||||
}
|
||||
unfold(UnfoldState::Running(st), |st| async move {
|
||||
match st {
|
||||
UnfoldState::Running(st) => match st.exec().await {
|
||||
Ok(Some((item, st))) => Some((Ok(item), UnfoldState::Running(st))),
|
||||
Ok(None) => None,
|
||||
Err(e) => Some((Err(e), UnfoldState::Done)),
|
||||
},
|
||||
UnfoldState::Done => None,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
enum ScanIndexFilesSteps {
|
||||
Level0,
|
||||
Level1(Vec<PathBuf>),
|
||||
Done,
|
||||
}
|
||||
|
||||
struct ScanIndexFiles {
|
||||
conf: ChannelArchiver,
|
||||
steps: ScanIndexFilesSteps,
|
||||
}
|
||||
|
||||
impl ScanIndexFiles {
|
||||
fn new(conf: ChannelArchiver) -> Self {
|
||||
Self {
|
||||
conf,
|
||||
steps: ScanIndexFilesSteps::Level0,
|
||||
}
|
||||
}
|
||||
|
||||
async fn exec(mut self) -> Result<Option<(String, Self)>, Error> {
|
||||
match self.steps {
|
||||
ScanIndexFilesSteps::Level0 => {
|
||||
let res = get_level_0(self.conf.clone()).await?;
|
||||
self.steps = ScanIndexFilesSteps::Level1(res);
|
||||
let item = format!("level 0 done");
|
||||
Ok(Some((item, self)))
|
||||
}
|
||||
ScanIndexFilesSteps::Level1(paths) => {
|
||||
let paths = get_level_1(paths).await?;
|
||||
info!("collected {} level 1 paths", paths.len());
|
||||
let dbc = database_connect(&self.conf.database).await?;
|
||||
for p in paths {
|
||||
let sql = "insert into indexfiles (path) values ($1) on conflict do nothing";
|
||||
dbc.query(sql, &[&p.to_string_lossy()]).await?;
|
||||
}
|
||||
self.steps = ScanIndexFilesSteps::Done;
|
||||
let item = format!("level 1 done");
|
||||
Ok(Some((item, self)))
|
||||
}
|
||||
ScanIndexFilesSteps::Done => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UnfoldExec for ScanIndexFiles {
|
||||
type Output = String;
|
||||
|
||||
fn exec(self) -> Pin<Box<dyn Future<Output = Result<Option<(Self::Output, Self)>, Error>> + Send>>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
Box::pin(self.exec())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn scan_index_files(conf: ChannelArchiver) -> impl Stream<Item = Result<String, Error>> {
|
||||
unfold_stream(ScanIndexFiles::new(conf.clone()))
|
||||
/*
|
||||
enum UnfoldState {
|
||||
Running(ScanIndexFiles),
|
||||
Done,
|
||||
}
|
||||
unfold(UnfoldState::Running(ScanIndexFiles::new(conf)), |st| async move {
|
||||
match st {
|
||||
UnfoldState::Running(st) => match st.exec().await {
|
||||
Ok(Some((item, st))) => Some((Ok(item), UnfoldState::Running(st))),
|
||||
Ok(None) => None,
|
||||
Err(e) => {
|
||||
error!("{}", e);
|
||||
Some((Err(e), UnfoldState::Done))
|
||||
}
|
||||
},
|
||||
UnfoldState::Done => None,
|
||||
}
|
||||
})
|
||||
*/
|
||||
}
|
||||
|
||||
pub fn unfold1() -> impl Stream<Item = String> {
|
||||
unfold(123u32, |st| async move { Some((format!("{}", st), st)) })
|
||||
}
|
||||
|
||||
pub fn unfold2(_conf: ChannelArchiver) -> () {
|
||||
/*let f1 = async move {
|
||||
let _list = get_level_0(conf).await?;
|
||||
let yld = format!("level 0 done");
|
||||
let fut = async { Ok(None) };
|
||||
Ok(Some((yld, Box::pin(fut))))
|
||||
};
|
||||
unfold(
|
||||
Box::pin(f1) as Pin<Box<dyn Future<Output = Result<Option<(String, _)>, Error>>>>,
|
||||
|st| async {
|
||||
match st.await {
|
||||
Ok(None) => None,
|
||||
Ok(Some((item, st))) => {
|
||||
//Some((item, st));
|
||||
//Some((String::new(), Box::pin(async { Ok(None) })))
|
||||
None
|
||||
}
|
||||
Err(e) => {
|
||||
error!("{}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
},
|
||||
)*/
|
||||
err::todoval()
|
||||
}
|
||||
|
||||
// -------------------------------------------------
|
||||
|
||||
enum ScanChannelsSteps {
|
||||
Start,
|
||||
SelectIndexFile,
|
||||
Done,
|
||||
}
|
||||
|
||||
struct ScanChannels {
|
||||
conf: ChannelArchiver,
|
||||
steps: ScanChannelsSteps,
|
||||
}
|
||||
|
||||
impl ScanChannels {
|
||||
fn new(conf: ChannelArchiver) -> Self {
|
||||
Self {
|
||||
conf,
|
||||
steps: ScanChannelsSteps::Start,
|
||||
}
|
||||
}
|
||||
|
||||
async fn exec(mut self) -> Result<Option<(String, Self)>, Error> {
|
||||
use ScanChannelsSteps::*;
|
||||
match self.steps {
|
||||
Start => {
|
||||
self.steps = SelectIndexFile;
|
||||
Ok(Some((format!("Start"), self)))
|
||||
}
|
||||
SelectIndexFile => {
|
||||
let dbc = database_connect(&self.conf.database).await?;
|
||||
let sql =
|
||||
"select path from indexfiles where ts_last_channel_search < now() - interval '1 hour' limit 1";
|
||||
let rows = dbc.query(sql, &[]).await?;
|
||||
let mut paths = vec![];
|
||||
for row in rows {
|
||||
paths.push(row.get::<_, String>(0));
|
||||
}
|
||||
self.steps = Done;
|
||||
Ok(Some((format!("SelectIndexFile {:?}", paths), self)))
|
||||
}
|
||||
Done => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UnfoldExec for ScanChannels {
|
||||
type Output = String;
|
||||
|
||||
fn exec(self) -> Pin<Box<dyn Future<Output = Result<Option<(Self::Output, Self)>, Error>> + Send>>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
Box::pin(self.exec())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn scan_channels(conf: ChannelArchiver) -> impl Stream<Item = Result<String, Error>> {
|
||||
unfold_stream(ScanChannels::new(conf.clone()))
|
||||
}
|
||||
@@ -4,6 +4,7 @@ use err::Error;
|
||||
use futures_core::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use items::Framable;
|
||||
use netpod::ChannelConfigQuery;
|
||||
use netpod::{query::RawEventsQuery, ChannelArchiver};
|
||||
use std::pin::Pin;
|
||||
use streams::rangefilter::RangeFilter;
|
||||
@@ -12,27 +13,35 @@ pub async fn make_event_pipe(
|
||||
evq: &RawEventsQuery,
|
||||
conf: &ChannelArchiver,
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Box<dyn Framable>> + Send>>, Error> {
|
||||
// In order to extract something from the channel, need to look up first the type of the channel.
|
||||
//let ci = channel_info(&evq.channel, aa).await?;
|
||||
/*let mut inps = vec![];
|
||||
for p1 in &aa.data_base_paths {
|
||||
let p2 = p1.clone();
|
||||
let p3 = make_single_event_pipe(evq, p2).await?;
|
||||
inps.push(p3);
|
||||
}
|
||||
let sm = StorageMerge {
|
||||
inprng: inps.len() - 1,
|
||||
current_inp_item: (0..inps.len()).into_iter().map(|_| None).collect(),
|
||||
completed_inps: vec![false; inps.len()],
|
||||
inps,
|
||||
};*/
|
||||
let range = evq.range.clone();
|
||||
let channel = evq.channel.clone();
|
||||
let expand = evq.agg_kind.need_expand();
|
||||
let data = DatablockStream::for_channel_range(range.clone(), channel, conf.data_base_paths.clone().into(), expand);
|
||||
|
||||
// TODO I need the numeric type here which I expect for that channel in order to construct FrameMaker.
|
||||
// TODO Need to pass that requirement down to disk reader: error if type changes.
|
||||
|
||||
let channel_config = {
|
||||
let q = ChannelConfigQuery {
|
||||
channel: channel.clone(),
|
||||
range: range.clone(),
|
||||
};
|
||||
crate::archeng::channel_config(&q, conf).await?
|
||||
};
|
||||
|
||||
let data = DatablockStream::for_channel_range(
|
||||
range.clone(),
|
||||
channel,
|
||||
conf.data_base_paths.clone().into(),
|
||||
expand,
|
||||
u64::MAX,
|
||||
);
|
||||
let filtered = RangeFilter::new(data, range, expand);
|
||||
let stream = filtered;
|
||||
let mut frame_maker = Box::new(FrameMaker::untyped(evq.agg_kind.clone())) as Box<dyn FrameMakerTrait>;
|
||||
let mut frame_maker = Box::new(FrameMaker::with_item_type(
|
||||
channel_config.scalar_type.clone(),
|
||||
channel_config.shape.clone(),
|
||||
evq.agg_kind.clone(),
|
||||
)) as Box<dyn FrameMakerTrait>;
|
||||
let ret = stream.map(move |j| frame_maker.make_frame(j));
|
||||
Ok(Box::pin(ret))
|
||||
}
|
||||
|
||||
465
archapp/src/binnedevents.rs
Normal file
465
archapp/src/binnedevents.rs
Normal file
@@ -0,0 +1,465 @@
|
||||
use items::{
|
||||
xbinnedscalarevents::XBinnedScalarEvents, xbinnedwaveevents::XBinnedWaveEvents, Appendable, Clearable,
|
||||
PushableIndex, WithLen, WithTimestamps,
|
||||
};
|
||||
use netpod::{AggKind, HasScalarType, HasShape, ScalarType, Shape};
|
||||
|
||||
use crate::{
|
||||
eventsitem::EventsItem,
|
||||
plainevents::{PlainEvents, ScalarPlainEvents},
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum SingleBinWaveEvents {
|
||||
Byte(XBinnedScalarEvents<i8>),
|
||||
Short(XBinnedScalarEvents<i16>),
|
||||
Int(XBinnedScalarEvents<i32>),
|
||||
Float(XBinnedScalarEvents<f32>),
|
||||
Double(XBinnedScalarEvents<f64>),
|
||||
}
|
||||
|
||||
impl SingleBinWaveEvents {
|
||||
pub fn variant_name(&self) -> String {
|
||||
use SingleBinWaveEvents::*;
|
||||
match self {
|
||||
Byte(_) => format!("Byte"),
|
||||
Short(_) => format!("Short"),
|
||||
Int(_) => format!("Int"),
|
||||
Float(_) => format!("Float"),
|
||||
Double(_) => format!("Double"),
|
||||
}
|
||||
}
|
||||
|
||||
fn x_aggregate(self, ak: &AggKind) -> EventsItem {
|
||||
use SingleBinWaveEvents::*;
|
||||
match self {
|
||||
Byte(k) => match ak {
|
||||
AggKind::EventBlobs => panic!(),
|
||||
AggKind::Plain => EventsItem::XBinnedEvents(XBinnedEvents::SingleBinWave(SingleBinWaveEvents::Byte(k))),
|
||||
AggKind::TimeWeightedScalar => err::todoval(),
|
||||
AggKind::DimXBins1 => err::todoval(),
|
||||
AggKind::DimXBinsN(_) => EventsItem::Plain(PlainEvents::Wave(err::todoval())),
|
||||
},
|
||||
_ => err::todoval(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for SingleBinWaveEvents {
|
||||
fn clear(&mut self) {
|
||||
match self {
|
||||
SingleBinWaveEvents::Byte(k) => k.clear(),
|
||||
SingleBinWaveEvents::Short(k) => k.clear(),
|
||||
SingleBinWaveEvents::Int(k) => k.clear(),
|
||||
SingleBinWaveEvents::Float(k) => k.clear(),
|
||||
SingleBinWaveEvents::Double(k) => k.clear(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for SingleBinWaveEvents {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
match self {
|
||||
Self::Byte(k) => Self::Byte(k.empty_like_self()),
|
||||
Self::Short(k) => Self::Short(k.empty_like_self()),
|
||||
Self::Int(k) => Self::Int(k.empty_like_self()),
|
||||
Self::Float(k) => Self::Float(k.empty_like_self()),
|
||||
Self::Double(k) => Self::Double(k.empty_like_self()),
|
||||
}
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
match self {
|
||||
Self::Byte(k) => match src {
|
||||
Self::Byte(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Short(k) => match src {
|
||||
Self::Short(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Int(k) => match src {
|
||||
Self::Int(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Float(k) => match src {
|
||||
Self::Float(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Double(k) => match src {
|
||||
Self::Double(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for SingleBinWaveEvents {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
match self {
|
||||
Self::Byte(k) => match src {
|
||||
Self::Byte(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Short(k) => match src {
|
||||
Self::Short(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Int(k) => match src {
|
||||
Self::Int(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Float(k) => match src {
|
||||
Self::Float(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Double(k) => match src {
|
||||
Self::Double(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for SingleBinWaveEvents {
|
||||
fn len(&self) -> usize {
|
||||
use SingleBinWaveEvents::*;
|
||||
match self {
|
||||
Byte(j) => j.len(),
|
||||
Short(j) => j.len(),
|
||||
Int(j) => j.len(),
|
||||
Float(j) => j.len(),
|
||||
Double(j) => j.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for SingleBinWaveEvents {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
use SingleBinWaveEvents::*;
|
||||
match self {
|
||||
Byte(j) => j.ts(ix),
|
||||
Short(j) => j.ts(ix),
|
||||
Int(j) => j.ts(ix),
|
||||
Float(j) => j.ts(ix),
|
||||
Double(j) => j.ts(ix),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for SingleBinWaveEvents {
|
||||
fn shape(&self) -> Shape {
|
||||
use SingleBinWaveEvents::*;
|
||||
match self {
|
||||
Byte(_) => Shape::Scalar,
|
||||
Short(_) => Shape::Scalar,
|
||||
Int(_) => Shape::Scalar,
|
||||
Float(_) => Shape::Scalar,
|
||||
Double(_) => Shape::Scalar,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for SingleBinWaveEvents {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
use SingleBinWaveEvents::*;
|
||||
match self {
|
||||
Byte(_) => ScalarType::I8,
|
||||
Short(_) => ScalarType::I16,
|
||||
Int(_) => ScalarType::I32,
|
||||
Float(_) => ScalarType::F32,
|
||||
Double(_) => ScalarType::F64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum MultiBinWaveEvents {
|
||||
Byte(XBinnedWaveEvents<i8>),
|
||||
Short(XBinnedWaveEvents<i16>),
|
||||
Int(XBinnedWaveEvents<i32>),
|
||||
Float(XBinnedWaveEvents<f32>),
|
||||
Double(XBinnedWaveEvents<f64>),
|
||||
}
|
||||
|
||||
impl MultiBinWaveEvents {
|
||||
pub fn variant_name(&self) -> String {
|
||||
use MultiBinWaveEvents::*;
|
||||
match self {
|
||||
Byte(_) => format!("Byte"),
|
||||
Short(_) => format!("Short"),
|
||||
Int(_) => format!("Int"),
|
||||
Float(_) => format!("Float"),
|
||||
Double(_) => format!("Double"),
|
||||
}
|
||||
}
|
||||
|
||||
fn x_aggregate(self, ak: &AggKind) -> EventsItem {
|
||||
use MultiBinWaveEvents::*;
|
||||
match self {
|
||||
Byte(k) => match ak {
|
||||
AggKind::EventBlobs => panic!(),
|
||||
AggKind::Plain => EventsItem::XBinnedEvents(XBinnedEvents::MultiBinWave(MultiBinWaveEvents::Byte(k))),
|
||||
AggKind::TimeWeightedScalar => err::todoval(),
|
||||
AggKind::DimXBins1 => err::todoval(),
|
||||
AggKind::DimXBinsN(_) => EventsItem::Plain(PlainEvents::Wave(err::todoval())),
|
||||
},
|
||||
_ => err::todoval(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for MultiBinWaveEvents {
|
||||
fn clear(&mut self) {
|
||||
match self {
|
||||
MultiBinWaveEvents::Byte(k) => k.clear(),
|
||||
MultiBinWaveEvents::Short(k) => k.clear(),
|
||||
MultiBinWaveEvents::Int(k) => k.clear(),
|
||||
MultiBinWaveEvents::Float(k) => k.clear(),
|
||||
MultiBinWaveEvents::Double(k) => k.clear(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for MultiBinWaveEvents {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
match self {
|
||||
Self::Byte(k) => Self::Byte(k.empty_like_self()),
|
||||
Self::Short(k) => Self::Short(k.empty_like_self()),
|
||||
Self::Int(k) => Self::Int(k.empty_like_self()),
|
||||
Self::Float(k) => Self::Float(k.empty_like_self()),
|
||||
Self::Double(k) => Self::Double(k.empty_like_self()),
|
||||
}
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
match self {
|
||||
Self::Byte(k) => match src {
|
||||
Self::Byte(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Short(k) => match src {
|
||||
Self::Short(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Int(k) => match src {
|
||||
Self::Int(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Float(k) => match src {
|
||||
Self::Float(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Double(k) => match src {
|
||||
Self::Double(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for MultiBinWaveEvents {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
match self {
|
||||
Self::Byte(k) => match src {
|
||||
Self::Byte(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Short(k) => match src {
|
||||
Self::Short(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Int(k) => match src {
|
||||
Self::Int(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Float(k) => match src {
|
||||
Self::Float(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Double(k) => match src {
|
||||
Self::Double(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for MultiBinWaveEvents {
|
||||
fn len(&self) -> usize {
|
||||
use MultiBinWaveEvents::*;
|
||||
match self {
|
||||
Byte(j) => j.len(),
|
||||
Short(j) => j.len(),
|
||||
Int(j) => j.len(),
|
||||
Float(j) => j.len(),
|
||||
Double(j) => j.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for MultiBinWaveEvents {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
use MultiBinWaveEvents::*;
|
||||
match self {
|
||||
Byte(j) => j.ts(ix),
|
||||
Short(j) => j.ts(ix),
|
||||
Int(j) => j.ts(ix),
|
||||
Float(j) => j.ts(ix),
|
||||
Double(j) => j.ts(ix),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for MultiBinWaveEvents {
|
||||
fn shape(&self) -> Shape {
|
||||
use MultiBinWaveEvents::*;
|
||||
match self {
|
||||
Byte(_) => Shape::Scalar,
|
||||
Short(_) => Shape::Scalar,
|
||||
Int(_) => Shape::Scalar,
|
||||
Float(_) => Shape::Scalar,
|
||||
Double(_) => Shape::Scalar,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for MultiBinWaveEvents {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
use MultiBinWaveEvents::*;
|
||||
match self {
|
||||
Byte(_) => ScalarType::I8,
|
||||
Short(_) => ScalarType::I16,
|
||||
Int(_) => ScalarType::I32,
|
||||
Float(_) => ScalarType::F32,
|
||||
Double(_) => ScalarType::F64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum XBinnedEvents {
|
||||
Scalar(ScalarPlainEvents),
|
||||
SingleBinWave(SingleBinWaveEvents),
|
||||
MultiBinWave(MultiBinWaveEvents),
|
||||
}
|
||||
|
||||
impl XBinnedEvents {
|
||||
pub fn variant_name(&self) -> String {
|
||||
use XBinnedEvents::*;
|
||||
match self {
|
||||
Scalar(h) => format!("Scalar({})", h.variant_name()),
|
||||
SingleBinWave(h) => format!("SingleBinWave({})", h.variant_name()),
|
||||
MultiBinWave(h) => format!("MultiBinWave({})", h.variant_name()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn x_aggregate(self, ak: &AggKind) -> EventsItem {
|
||||
use XBinnedEvents::*;
|
||||
match self {
|
||||
Scalar(k) => EventsItem::Plain(PlainEvents::Scalar(k)),
|
||||
SingleBinWave(k) => k.x_aggregate(ak),
|
||||
MultiBinWave(k) => k.x_aggregate(ak),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for XBinnedEvents {
|
||||
fn clear(&mut self) {
|
||||
match self {
|
||||
XBinnedEvents::Scalar(k) => k.clear(),
|
||||
XBinnedEvents::SingleBinWave(k) => k.clear(),
|
||||
XBinnedEvents::MultiBinWave(k) => k.clear(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for XBinnedEvents {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
match self {
|
||||
Self::Scalar(k) => Self::Scalar(k.empty_like_self()),
|
||||
Self::SingleBinWave(k) => Self::SingleBinWave(k.empty_like_self()),
|
||||
Self::MultiBinWave(k) => Self::MultiBinWave(k.empty_like_self()),
|
||||
}
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
match self {
|
||||
Self::Scalar(k) => match src {
|
||||
Self::Scalar(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::SingleBinWave(k) => match src {
|
||||
Self::SingleBinWave(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::MultiBinWave(k) => match src {
|
||||
Self::MultiBinWave(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for XBinnedEvents {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
match self {
|
||||
Self::Scalar(k) => match src {
|
||||
Self::Scalar(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::SingleBinWave(k) => match src {
|
||||
Self::SingleBinWave(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::MultiBinWave(k) => match src {
|
||||
Self::MultiBinWave(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for XBinnedEvents {
|
||||
fn len(&self) -> usize {
|
||||
use XBinnedEvents::*;
|
||||
match self {
|
||||
Scalar(j) => j.len(),
|
||||
SingleBinWave(j) => j.len(),
|
||||
MultiBinWave(j) => j.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for XBinnedEvents {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
use XBinnedEvents::*;
|
||||
match self {
|
||||
Scalar(j) => j.ts(ix),
|
||||
SingleBinWave(j) => j.ts(ix),
|
||||
MultiBinWave(j) => j.ts(ix),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for XBinnedEvents {
|
||||
fn shape(&self) -> Shape {
|
||||
use XBinnedEvents::*;
|
||||
match self {
|
||||
Scalar(h) => h.shape(),
|
||||
SingleBinWave(h) => h.shape(),
|
||||
MultiBinWave(h) => h.shape(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for XBinnedEvents {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
use XBinnedEvents::*;
|
||||
match self {
|
||||
Scalar(h) => h.scalar_type(),
|
||||
SingleBinWave(h) => h.scalar_type(),
|
||||
MultiBinWave(h) => h.scalar_type(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
use crate::binnedevents::{MultiBinWaveEvents, SingleBinWaveEvents, XBinnedEvents};
|
||||
use crate::eventsitem::EventsItem;
|
||||
use crate::generated::EPICSEvent::PayloadType;
|
||||
use crate::parse::multi::parse_all_ts;
|
||||
use crate::parse::PbFileReader;
|
||||
use crate::{
|
||||
EventsItem, MultiBinWaveEvents, PlainEvents, ScalarPlainEvents, SingleBinWaveEvents, WavePlainEvents, XBinnedEvents,
|
||||
};
|
||||
use crate::plainevents::{PlainEvents, ScalarPlainEvents, WavePlainEvents};
|
||||
use crate::storagemerge::StorageMerge;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use err::Error;
|
||||
use futures_core::Stream;
|
||||
@@ -22,7 +23,6 @@ use serde_json::Value as JsonValue;
|
||||
use std::io::SeekFrom;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use tokio::fs::{read_dir, File};
|
||||
use tokio::io::{AsyncReadExt, AsyncSeekExt};
|
||||
|
||||
@@ -51,125 +51,13 @@ pub fn parse_data_filename(s: &str) -> Result<DataFilename, Error> {
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
struct StorageMerge {
|
||||
inps: Vec<Pin<Box<dyn Stream<Item = Sitemty<EventsItem>> + Send>>>,
|
||||
completed_inps: Vec<bool>,
|
||||
current_inp_item: Vec<Option<EventsItem>>,
|
||||
inprng: usize,
|
||||
}
|
||||
|
||||
impl StorageMerge {
|
||||
fn refill_if_needed(mut self: Pin<&mut Self>, cx: &mut Context) -> Result<(Pin<&mut Self>, bool), Error> {
|
||||
use Poll::*;
|
||||
let mut is_pending = false;
|
||||
for i in 0..self.inps.len() {
|
||||
if self.current_inp_item[i].is_none() && self.completed_inps[i] == false {
|
||||
match self.inps[i].poll_next_unpin(cx) {
|
||||
Ready(j) => {
|
||||
//
|
||||
match j {
|
||||
Some(j) => match j {
|
||||
Ok(j) => match j {
|
||||
StreamItem::DataItem(j) => match j {
|
||||
RangeCompletableItem::Data(j) => {
|
||||
self.current_inp_item[i] = Some(j);
|
||||
}
|
||||
RangeCompletableItem::RangeComplete => {}
|
||||
},
|
||||
StreamItem::Log(_) => {}
|
||||
StreamItem::Stats(_) => {}
|
||||
},
|
||||
Err(e) => {
|
||||
self.completed_inps[i] = true;
|
||||
error!("inp err {:?}", e);
|
||||
}
|
||||
},
|
||||
None => {
|
||||
//
|
||||
self.completed_inps[i] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
Pending => {
|
||||
is_pending = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok((self, is_pending))
|
||||
}
|
||||
|
||||
fn decide_next_item(&mut self) -> Result<Option<Sitemty<EventsItem>>, Error> {
|
||||
let not_found = 999;
|
||||
let mut i1 = self.inprng;
|
||||
let mut j1 = not_found;
|
||||
let mut tsmin = u64::MAX;
|
||||
#[allow(unused)]
|
||||
use items::{WithLen, WithTimestamps};
|
||||
loop {
|
||||
if self.completed_inps[i1] {
|
||||
} else {
|
||||
match self.current_inp_item[i1].as_ref() {
|
||||
None => panic!(),
|
||||
Some(j) => {
|
||||
if j.len() == 0 {
|
||||
j1 = i1;
|
||||
break;
|
||||
} else {
|
||||
let ts = j.ts(0);
|
||||
if ts < tsmin {
|
||||
tsmin = ts;
|
||||
j1 = i1;
|
||||
self.inprng = i1;
|
||||
} else {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
i1 -= 1;
|
||||
if i1 == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if j1 >= not_found {
|
||||
Ok(None)
|
||||
} else {
|
||||
let j = self.current_inp_item[j1]
|
||||
.take()
|
||||
.map(|j| Ok(StreamItem::DataItem(RangeCompletableItem::Data(j))));
|
||||
Ok(j)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for StorageMerge {
|
||||
type Item = Sitemty<EventsItem>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
let (mut self2, is_pending) = self.refill_if_needed(cx).unwrap();
|
||||
if is_pending {
|
||||
Pending
|
||||
} else {
|
||||
match self2.decide_next_item() {
|
||||
Ok(j) => Ready(j),
|
||||
Err(e) => {
|
||||
error!("impl Stream for StorageMerge {:?}", e);
|
||||
panic!()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait FrameMakerTrait: Send {
|
||||
fn make_frame(&mut self, ei: Sitemty<EventsItem>) -> Box<dyn Framable>;
|
||||
}
|
||||
|
||||
pub struct FrameMaker {
|
||||
scalar_type: Option<ScalarType>,
|
||||
shape: Option<Shape>,
|
||||
scalar_type: ScalarType,
|
||||
shape: Shape,
|
||||
agg_kind: AggKind,
|
||||
}
|
||||
|
||||
@@ -184,19 +72,11 @@ impl FrameMaker {
|
||||
|
||||
pub fn with_item_type(scalar_type: ScalarType, shape: Shape, agg_kind: AggKind) -> Self {
|
||||
Self {
|
||||
scalar_type: Some(scalar_type),
|
||||
shape: Some(shape),
|
||||
scalar_type: scalar_type,
|
||||
shape: shape,
|
||||
agg_kind: agg_kind,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn untyped(agg_kind: AggKind) -> Self {
|
||||
Self {
|
||||
scalar_type: None,
|
||||
shape: None,
|
||||
agg_kind,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused_macros)]
|
||||
@@ -351,82 +231,16 @@ macro_rules! arm1 {
|
||||
|
||||
impl FrameMakerTrait for FrameMaker {
|
||||
fn make_frame(&mut self, item: Sitemty<EventsItem>) -> Box<dyn Framable> {
|
||||
// Take from `self` the expected inner type.
|
||||
// If `ei` is not some data, then I can't dynamically determine the expected T of Sitemty.
|
||||
// Therefore, I need to decide that based on given parameters.
|
||||
// see also channel_info in this mod.
|
||||
if self.scalar_type.is_none() || self.shape.is_none() {
|
||||
//let scalar_type = &ScalarType::I8;
|
||||
//let shape = &Shape::Scalar;
|
||||
//let agg_kind = &self.agg_kind;
|
||||
let (scalar_type, shape) = match &item {
|
||||
Ok(k) => match k {
|
||||
StreamItem::DataItem(k) => match k {
|
||||
RangeCompletableItem::RangeComplete => (ScalarType::I8, Shape::Scalar),
|
||||
RangeCompletableItem::Data(k) => match k {
|
||||
EventsItem::Plain(k) => match k {
|
||||
PlainEvents::Scalar(k) => match k {
|
||||
ScalarPlainEvents::Byte(_) => (ScalarType::I8, Shape::Scalar),
|
||||
ScalarPlainEvents::Short(_) => (ScalarType::I16, Shape::Scalar),
|
||||
ScalarPlainEvents::Int(_) => (ScalarType::I32, Shape::Scalar),
|
||||
ScalarPlainEvents::Float(_) => (ScalarType::F32, Shape::Scalar),
|
||||
ScalarPlainEvents::Double(_) => (ScalarType::F64, Shape::Scalar),
|
||||
},
|
||||
PlainEvents::Wave(k) => match k {
|
||||
WavePlainEvents::Byte(k) => (ScalarType::I8, Shape::Wave(k.vals[0].len() as u32)),
|
||||
WavePlainEvents::Short(k) => (ScalarType::I16, Shape::Wave(k.vals[0].len() as u32)),
|
||||
WavePlainEvents::Int(k) => (ScalarType::I32, Shape::Wave(k.vals[0].len() as u32)),
|
||||
WavePlainEvents::Float(k) => (ScalarType::F32, Shape::Wave(k.vals[0].len() as u32)),
|
||||
WavePlainEvents::Double(k) => {
|
||||
(ScalarType::F64, Shape::Wave(k.vals[0].len() as u32))
|
||||
}
|
||||
},
|
||||
},
|
||||
EventsItem::XBinnedEvents(k) => match k {
|
||||
XBinnedEvents::Scalar(k) => match k {
|
||||
ScalarPlainEvents::Byte(_) => (ScalarType::I8, Shape::Scalar),
|
||||
ScalarPlainEvents::Short(_) => (ScalarType::I16, Shape::Scalar),
|
||||
ScalarPlainEvents::Int(_) => (ScalarType::I32, Shape::Scalar),
|
||||
ScalarPlainEvents::Float(_) => (ScalarType::F32, Shape::Scalar),
|
||||
ScalarPlainEvents::Double(_) => (ScalarType::F64, Shape::Scalar),
|
||||
},
|
||||
XBinnedEvents::SingleBinWave(k) => match k {
|
||||
SingleBinWaveEvents::Byte(_) => todo!(),
|
||||
SingleBinWaveEvents::Short(_) => todo!(),
|
||||
SingleBinWaveEvents::Int(_) => todo!(),
|
||||
SingleBinWaveEvents::Float(_) => todo!(),
|
||||
SingleBinWaveEvents::Double(_) => todo!(),
|
||||
},
|
||||
XBinnedEvents::MultiBinWave(k) => match k {
|
||||
MultiBinWaveEvents::Byte(_) => todo!(),
|
||||
MultiBinWaveEvents::Short(_) => todo!(),
|
||||
MultiBinWaveEvents::Int(_) => todo!(),
|
||||
MultiBinWaveEvents::Float(_) => todo!(),
|
||||
MultiBinWaveEvents::Double(_) => todo!(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
StreamItem::Log(_) => (ScalarType::I8, Shape::Scalar),
|
||||
StreamItem::Stats(_) => (ScalarType::I8, Shape::Scalar),
|
||||
},
|
||||
Err(_) => (ScalarType::I8, Shape::Scalar),
|
||||
};
|
||||
self.scalar_type = Some(scalar_type);
|
||||
self.shape = Some(shape);
|
||||
}
|
||||
{
|
||||
let scalar_type = self.scalar_type.as_ref().unwrap();
|
||||
let shape = self.shape.as_ref().unwrap();
|
||||
let agg_kind = &self.agg_kind;
|
||||
match scalar_type {
|
||||
ScalarType::I8 => arm1!(item, i8, Byte, shape, agg_kind),
|
||||
ScalarType::I16 => arm1!(item, i16, Short, shape, agg_kind),
|
||||
ScalarType::I32 => arm1!(item, i32, Int, shape, agg_kind),
|
||||
ScalarType::F32 => arm1!(item, f32, Float, shape, agg_kind),
|
||||
ScalarType::F64 => arm1!(item, f64, Double, shape, agg_kind),
|
||||
_ => err::todoval(),
|
||||
}
|
||||
let scalar_type = &self.scalar_type;
|
||||
let shape = &self.shape;
|
||||
let agg_kind = &self.agg_kind;
|
||||
match scalar_type {
|
||||
ScalarType::I8 => arm1!(item, i8, Byte, shape, agg_kind),
|
||||
ScalarType::I16 => arm1!(item, i16, Short, shape, agg_kind),
|
||||
ScalarType::I32 => arm1!(item, i32, Int, shape, agg_kind),
|
||||
ScalarType::F32 => arm1!(item, f32, Float, shape, agg_kind),
|
||||
ScalarType::F64 => arm1!(item, f64, Double, shape, agg_kind),
|
||||
_ => err::todoval(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -437,17 +251,14 @@ pub async fn make_event_pipe(
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Box<dyn Framable>> + Send>>, Error> {
|
||||
let ci = channel_info(&evq.channel, aa).await?;
|
||||
let mut inps = vec![];
|
||||
let mut names = vec![];
|
||||
for p1 in &aa.data_base_paths {
|
||||
let p2 = p1.clone();
|
||||
let p3 = make_single_event_pipe(evq, p2).await?;
|
||||
inps.push(p3);
|
||||
names.push(p1.to_str().unwrap().into());
|
||||
}
|
||||
let sm = StorageMerge {
|
||||
inprng: inps.len() - 1,
|
||||
current_inp_item: (0..inps.len()).into_iter().map(|_| None).collect(),
|
||||
completed_inps: vec![false; inps.len()],
|
||||
inps,
|
||||
};
|
||||
let sm = StorageMerge::new(inps, names, evq.range.clone());
|
||||
let mut frame_maker = Box::new(FrameMaker::with_item_type(
|
||||
ci.scalar_type.clone(),
|
||||
ci.shape.clone(),
|
||||
@@ -462,7 +273,6 @@ pub async fn make_single_event_pipe(
|
||||
base_path: PathBuf,
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<EventsItem>> + Send>>, Error> {
|
||||
// TODO must apply the proper x-binning depending on the requested AggKind.
|
||||
|
||||
info!("make_event_pipe {:?}", evq);
|
||||
let evq = evq.clone();
|
||||
let DirAndPrefix { dir, prefix } = directory_for_channel_files(&evq.channel, base_path)?;
|
||||
|
||||
156
archapp/src/eventsitem.rs
Normal file
156
archapp/src/eventsitem.rs
Normal file
@@ -0,0 +1,156 @@
|
||||
use items::{Appendable, Clearable, PushableIndex, WithLen, WithTimestamps};
|
||||
use netpod::{AggKind, HasScalarType, HasShape, ScalarType, Shape};
|
||||
|
||||
use crate::{
|
||||
binnedevents::XBinnedEvents,
|
||||
plainevents::{PlainEvents, ScalarPlainEvents, WavePlainEvents},
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum EventsItem {
|
||||
Plain(PlainEvents),
|
||||
XBinnedEvents(XBinnedEvents),
|
||||
}
|
||||
|
||||
impl EventsItem {
|
||||
pub fn is_wave(&self) -> bool {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(h) => h.is_wave(),
|
||||
XBinnedEvents(h) => {
|
||||
if let Shape::Wave(_) = h.shape() {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn variant_name(&self) -> String {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(h) => format!("Plain({})", h.variant_name()),
|
||||
XBinnedEvents(h) => format!("Plain({})", h.variant_name()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn x_aggregate(self, ak: &AggKind) -> Self {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(k) => k.x_aggregate(ak),
|
||||
XBinnedEvents(k) => k.x_aggregate(ak),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn type_info(&self) -> (ScalarType, Shape) {
|
||||
match self {
|
||||
EventsItem::Plain(k) => match k {
|
||||
PlainEvents::Scalar(k) => match k {
|
||||
ScalarPlainEvents::Byte(_) => (ScalarType::I8, Shape::Scalar),
|
||||
ScalarPlainEvents::Short(_) => (ScalarType::I16, Shape::Scalar),
|
||||
ScalarPlainEvents::Int(_) => (ScalarType::I32, Shape::Scalar),
|
||||
ScalarPlainEvents::Float(_) => (ScalarType::F32, Shape::Scalar),
|
||||
ScalarPlainEvents::Double(_) => (ScalarType::F64, Shape::Scalar),
|
||||
},
|
||||
PlainEvents::Wave(k) => match k {
|
||||
// TODO
|
||||
// Inherent issue for the non-static-type backends:
|
||||
// there is a chance that we can't determine the shape here.
|
||||
WavePlainEvents::Byte(k) => (ScalarType::I8, k.shape().unwrap()),
|
||||
WavePlainEvents::Short(k) => (ScalarType::I16, k.shape().unwrap()),
|
||||
WavePlainEvents::Int(k) => (ScalarType::I32, k.shape().unwrap()),
|
||||
WavePlainEvents::Float(k) => (ScalarType::F32, k.shape().unwrap()),
|
||||
WavePlainEvents::Double(k) => (ScalarType::F64, k.shape().unwrap()),
|
||||
},
|
||||
},
|
||||
EventsItem::XBinnedEvents(_k) => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for EventsItem {
|
||||
fn len(&self) -> usize {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(j) => j.len(),
|
||||
XBinnedEvents(j) => j.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for EventsItem {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(j) => j.ts(ix),
|
||||
XBinnedEvents(j) => j.ts(ix),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for EventsItem {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
match self {
|
||||
EventsItem::Plain(k) => EventsItem::Plain(k.empty_like_self()),
|
||||
EventsItem::XBinnedEvents(k) => EventsItem::XBinnedEvents(k.empty_like_self()),
|
||||
}
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
match self {
|
||||
Self::Plain(k) => match src {
|
||||
Self::Plain(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::XBinnedEvents(k) => match src {
|
||||
Self::XBinnedEvents(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for EventsItem {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
match self {
|
||||
Self::Plain(k) => match src {
|
||||
Self::Plain(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::XBinnedEvents(k) => match src {
|
||||
Self::XBinnedEvents(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for EventsItem {
|
||||
fn clear(&mut self) {
|
||||
match self {
|
||||
EventsItem::Plain(k) => k.clear(),
|
||||
EventsItem::XBinnedEvents(k) => k.clear(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for EventsItem {
|
||||
fn shape(&self) -> Shape {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(h) => h.shape(),
|
||||
XBinnedEvents(h) => h.shape(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for EventsItem {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
use EventsItem::*;
|
||||
match self {
|
||||
Plain(h) => h.scalar_type(),
|
||||
XBinnedEvents(h) => h.scalar_type(),
|
||||
}
|
||||
}
|
||||
}
|
||||
1114
archapp/src/lib.rs
1114
archapp/src/lib.rs
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,10 @@
|
||||
pub mod multi;
|
||||
|
||||
use crate::events::parse_data_filename;
|
||||
use crate::eventsitem::EventsItem;
|
||||
use crate::generated::EPICSEvent::PayloadType;
|
||||
use crate::{unescape_archapp_msg, EventsItem, PlainEvents, ScalarPlainEvents, WavePlainEvents};
|
||||
use crate::plainevents::{PlainEvents, ScalarPlainEvents, WavePlainEvents};
|
||||
use crate::unescape_archapp_msg;
|
||||
use archapp_xc::*;
|
||||
use async_channel::{bounded, Receiver};
|
||||
use chrono::{TimeZone, Utc};
|
||||
|
||||
465
archapp/src/plainevents.rs
Normal file
465
archapp/src/plainevents.rs
Normal file
@@ -0,0 +1,465 @@
|
||||
use crate::binnedevents::{SingleBinWaveEvents, XBinnedEvents};
|
||||
use crate::eventsitem::EventsItem;
|
||||
use err::Error;
|
||||
use items::eventvalues::EventValues;
|
||||
use items::waveevents::{WaveEvents, WaveXBinner};
|
||||
use items::{Appendable, Clearable, EventsNodeProcessor, PushableIndex, WithLen, WithTimestamps};
|
||||
use netpod::{AggKind, HasScalarType, HasShape, ScalarType, Shape};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ScalarPlainEvents {
|
||||
Byte(EventValues<i8>),
|
||||
Short(EventValues<i16>),
|
||||
Int(EventValues<i32>),
|
||||
Float(EventValues<f32>),
|
||||
Double(EventValues<f64>),
|
||||
}
|
||||
|
||||
impl ScalarPlainEvents {
|
||||
pub fn variant_name(&self) -> String {
|
||||
use ScalarPlainEvents::*;
|
||||
match self {
|
||||
Byte(_) => format!("Byte"),
|
||||
Short(_) => format!("Short"),
|
||||
Int(_) => format!("Int"),
|
||||
Float(_) => format!("Float"),
|
||||
Double(_) => format!("Double"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for ScalarPlainEvents {
|
||||
fn clear(&mut self) {
|
||||
match self {
|
||||
ScalarPlainEvents::Byte(k) => k.clear(),
|
||||
ScalarPlainEvents::Short(k) => k.clear(),
|
||||
ScalarPlainEvents::Int(k) => k.clear(),
|
||||
ScalarPlainEvents::Float(k) => k.clear(),
|
||||
ScalarPlainEvents::Double(k) => k.clear(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for ScalarPlainEvents {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
match self {
|
||||
Self::Byte(k) => Self::Byte(k.empty_like_self()),
|
||||
Self::Short(k) => Self::Short(k.empty_like_self()),
|
||||
Self::Int(k) => Self::Int(k.empty_like_self()),
|
||||
Self::Float(k) => Self::Float(k.empty_like_self()),
|
||||
Self::Double(k) => Self::Double(k.empty_like_self()),
|
||||
}
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
match self {
|
||||
Self::Byte(k) => match src {
|
||||
Self::Byte(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Short(k) => match src {
|
||||
Self::Short(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Int(k) => match src {
|
||||
Self::Int(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Float(k) => match src {
|
||||
Self::Float(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Double(k) => match src {
|
||||
Self::Double(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for ScalarPlainEvents {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
match self {
|
||||
Self::Byte(k) => match src {
|
||||
Self::Byte(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Short(k) => match src {
|
||||
Self::Short(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Int(k) => match src {
|
||||
Self::Int(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Float(k) => match src {
|
||||
Self::Float(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Double(k) => match src {
|
||||
Self::Double(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for ScalarPlainEvents {
|
||||
fn len(&self) -> usize {
|
||||
use ScalarPlainEvents::*;
|
||||
match self {
|
||||
Byte(j) => j.len(),
|
||||
Short(j) => j.len(),
|
||||
Int(j) => j.len(),
|
||||
Float(j) => j.len(),
|
||||
Double(j) => j.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for ScalarPlainEvents {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
use ScalarPlainEvents::*;
|
||||
match self {
|
||||
Byte(j) => j.ts(ix),
|
||||
Short(j) => j.ts(ix),
|
||||
Int(j) => j.ts(ix),
|
||||
Float(j) => j.ts(ix),
|
||||
Double(j) => j.ts(ix),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for ScalarPlainEvents {
|
||||
fn shape(&self) -> Shape {
|
||||
match self {
|
||||
_ => Shape::Scalar,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for ScalarPlainEvents {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
use ScalarPlainEvents::*;
|
||||
match self {
|
||||
Byte(_) => ScalarType::I8,
|
||||
Short(_) => ScalarType::I16,
|
||||
Int(_) => ScalarType::I32,
|
||||
Float(_) => ScalarType::F32,
|
||||
Double(_) => ScalarType::F64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum WavePlainEvents {
|
||||
Byte(WaveEvents<i8>),
|
||||
Short(WaveEvents<i16>),
|
||||
Int(WaveEvents<i32>),
|
||||
Float(WaveEvents<f32>),
|
||||
Double(WaveEvents<f64>),
|
||||
}
|
||||
|
||||
impl WavePlainEvents {
|
||||
pub fn shape(&self) -> Result<Shape, Error> {
|
||||
match self {
|
||||
WavePlainEvents::Byte(k) => k.shape(),
|
||||
WavePlainEvents::Short(k) => k.shape(),
|
||||
WavePlainEvents::Int(k) => k.shape(),
|
||||
WavePlainEvents::Float(k) => k.shape(),
|
||||
WavePlainEvents::Double(k) => k.shape(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! wagg1 {
|
||||
($k:expr, $ak:expr, $shape:expr, $sty:ident) => {
|
||||
match $ak {
|
||||
AggKind::EventBlobs => panic!(),
|
||||
AggKind::Plain => EventsItem::Plain(PlainEvents::Wave(WavePlainEvents::$sty($k))),
|
||||
AggKind::TimeWeightedScalar => {
|
||||
let p = WaveXBinner::create($shape, $ak.clone());
|
||||
let j = p.process($k);
|
||||
EventsItem::XBinnedEvents(XBinnedEvents::SingleBinWave(SingleBinWaveEvents::$sty(j)))
|
||||
}
|
||||
AggKind::DimXBins1 => {
|
||||
let p = WaveXBinner::create($shape, $ak.clone());
|
||||
let j = p.process($k);
|
||||
EventsItem::XBinnedEvents(XBinnedEvents::SingleBinWave(SingleBinWaveEvents::$sty(j)))
|
||||
}
|
||||
AggKind::DimXBinsN(_) => EventsItem::Plain(PlainEvents::Wave(err::todoval())),
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl WavePlainEvents {
|
||||
pub fn variant_name(&self) -> String {
|
||||
use WavePlainEvents::*;
|
||||
match self {
|
||||
Byte(h) => format!("Byte({})", h.vals.first().map_or(0, |j| j.len())),
|
||||
Short(h) => format!("Short({})", h.vals.first().map_or(0, |j| j.len())),
|
||||
Int(h) => format!("Int({})", h.vals.first().map_or(0, |j| j.len())),
|
||||
Float(h) => format!("Float({})", h.vals.first().map_or(0, |j| j.len())),
|
||||
Double(h) => format!("Double({})", h.vals.first().map_or(0, |j| j.len())),
|
||||
}
|
||||
}
|
||||
|
||||
fn x_aggregate(self, ak: &AggKind) -> EventsItem {
|
||||
use WavePlainEvents::*;
|
||||
let shape = self.shape().unwrap();
|
||||
match self {
|
||||
Byte(k) => wagg1!(k, ak, shape, Byte),
|
||||
Short(k) => wagg1!(k, ak, shape, Short),
|
||||
Int(k) => wagg1!(k, ak, shape, Int),
|
||||
Float(k) => wagg1!(k, ak, shape, Float),
|
||||
Double(k) => wagg1!(k, ak, shape, Double),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for WavePlainEvents {
|
||||
fn clear(&mut self) {
|
||||
match self {
|
||||
WavePlainEvents::Byte(k) => k.clear(),
|
||||
WavePlainEvents::Short(k) => k.clear(),
|
||||
WavePlainEvents::Int(k) => k.clear(),
|
||||
WavePlainEvents::Float(k) => k.clear(),
|
||||
WavePlainEvents::Double(k) => k.clear(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for WavePlainEvents {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
match self {
|
||||
Self::Byte(k) => Self::Byte(k.empty_like_self()),
|
||||
Self::Short(k) => Self::Short(k.empty_like_self()),
|
||||
Self::Int(k) => Self::Int(k.empty_like_self()),
|
||||
Self::Float(k) => Self::Float(k.empty_like_self()),
|
||||
Self::Double(k) => Self::Double(k.empty_like_self()),
|
||||
}
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
match self {
|
||||
Self::Byte(k) => match src {
|
||||
Self::Byte(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Short(k) => match src {
|
||||
Self::Short(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Int(k) => match src {
|
||||
Self::Int(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Float(k) => match src {
|
||||
Self::Float(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Double(k) => match src {
|
||||
Self::Double(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for WavePlainEvents {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
match self {
|
||||
Self::Byte(k) => match src {
|
||||
Self::Byte(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Short(k) => match src {
|
||||
Self::Short(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Int(k) => match src {
|
||||
Self::Int(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Float(k) => match src {
|
||||
Self::Float(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Double(k) => match src {
|
||||
Self::Double(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for WavePlainEvents {
|
||||
fn len(&self) -> usize {
|
||||
use WavePlainEvents::*;
|
||||
match self {
|
||||
Byte(j) => j.len(),
|
||||
Short(j) => j.len(),
|
||||
Int(j) => j.len(),
|
||||
Float(j) => j.len(),
|
||||
Double(j) => j.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for WavePlainEvents {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
use WavePlainEvents::*;
|
||||
match self {
|
||||
Byte(j) => j.ts(ix),
|
||||
Short(j) => j.ts(ix),
|
||||
Int(j) => j.ts(ix),
|
||||
Float(j) => j.ts(ix),
|
||||
Double(j) => j.ts(ix),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for WavePlainEvents {
|
||||
fn shape(&self) -> Shape {
|
||||
/*use WavePlainEvents::*;
|
||||
match self {
|
||||
Byte(h) => Shape::Wave(h.vals.first().map_or(0, |x| x.len() as u32)),
|
||||
Short(h) => Shape::Wave(h.vals.first().map_or(0, |x| x.len() as u32)),
|
||||
Int(h) => Shape::Wave(h.vals.first().map_or(0, |x| x.len() as u32)),
|
||||
Float(h) => Shape::Wave(h.vals.first().map_or(0, |x| x.len() as u32)),
|
||||
Double(h) => Shape::Wave(h.vals.first().map_or(0, |x| x.len() as u32)),
|
||||
}*/
|
||||
self.shape().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for WavePlainEvents {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
use WavePlainEvents::*;
|
||||
match self {
|
||||
Byte(_) => ScalarType::I8,
|
||||
Short(_) => ScalarType::I16,
|
||||
Int(_) => ScalarType::I32,
|
||||
Float(_) => ScalarType::F32,
|
||||
Double(_) => ScalarType::F64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum PlainEvents {
|
||||
Scalar(ScalarPlainEvents),
|
||||
Wave(WavePlainEvents),
|
||||
}
|
||||
|
||||
impl PlainEvents {
|
||||
pub fn is_wave(&self) -> bool {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(_) => false,
|
||||
Wave(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn variant_name(&self) -> String {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(h) => format!("Scalar({})", h.variant_name()),
|
||||
Wave(h) => format!("Scalar({})", h.variant_name()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn x_aggregate(self, ak: &AggKind) -> EventsItem {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(k) => EventsItem::Plain(PlainEvents::Scalar(k)),
|
||||
Wave(k) => k.x_aggregate(ak),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clearable for PlainEvents {
|
||||
fn clear(&mut self) {
|
||||
match self {
|
||||
PlainEvents::Scalar(k) => k.clear(),
|
||||
PlainEvents::Wave(k) => k.clear(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Appendable for PlainEvents {
|
||||
fn empty_like_self(&self) -> Self {
|
||||
match self {
|
||||
Self::Scalar(k) => Self::Scalar(k.empty_like_self()),
|
||||
Self::Wave(k) => Self::Wave(k.empty_like_self()),
|
||||
}
|
||||
}
|
||||
|
||||
fn append(&mut self, src: &Self) {
|
||||
match self {
|
||||
PlainEvents::Scalar(k) => match src {
|
||||
Self::Scalar(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
PlainEvents::Wave(k) => match src {
|
||||
Self::Wave(j) => k.append(j),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PushableIndex for PlainEvents {
|
||||
fn push_index(&mut self, src: &Self, ix: usize) {
|
||||
match self {
|
||||
Self::Scalar(k) => match src {
|
||||
Self::Scalar(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
Self::Wave(k) => match src {
|
||||
Self::Wave(j) => k.push_index(j, ix),
|
||||
_ => panic!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithLen for PlainEvents {
|
||||
fn len(&self) -> usize {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(j) => j.len(),
|
||||
Wave(j) => j.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTimestamps for PlainEvents {
|
||||
fn ts(&self, ix: usize) -> u64 {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(j) => j.ts(ix),
|
||||
Wave(j) => j.ts(ix),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasShape for PlainEvents {
|
||||
fn shape(&self) -> Shape {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(h) => HasShape::shape(h),
|
||||
Wave(h) => HasShape::shape(h),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasScalarType for PlainEvents {
|
||||
fn scalar_type(&self) -> ScalarType {
|
||||
use PlainEvents::*;
|
||||
match self {
|
||||
Scalar(h) => h.scalar_type(),
|
||||
Wave(h) => h.scalar_type(),
|
||||
}
|
||||
}
|
||||
}
|
||||
310
archapp/src/storagemerge.rs
Normal file
310
archapp/src/storagemerge.rs
Normal file
@@ -0,0 +1,310 @@
|
||||
use err::Error;
|
||||
use futures_core::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use items::{
|
||||
inspect_timestamps, Appendable, LogItem, PushableIndex, RangeCompletableItem, Sitemty, StatsItem, StreamItem,
|
||||
};
|
||||
use netpod::log::*;
|
||||
use netpod::{NanoRange, Nanos};
|
||||
use std::collections::VecDeque;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::eventsitem::EventsItem;
|
||||
|
||||
/**
|
||||
Priority-Merge events from different candidate sources.
|
||||
|
||||
Backends like Channel Archiver store the compacted "medium/long-term" data of a channel
|
||||
in logically unrelated locations on disk with unspecified semantics and without a
|
||||
common index over "short+medium+long term" data.
|
||||
In order to deliver data even over the edge of such (possibly overlapping) datasources
|
||||
without common look tables, the best we can do is fetch data from all sources and
|
||||
combine them. StorageMerge is doing this combination.
|
||||
*/
|
||||
pub struct StorageMerge {
|
||||
inps: Vec<Pin<Box<dyn Stream<Item = Sitemty<EventsItem>> + Send>>>,
|
||||
names: Vec<String>,
|
||||
range: NanoRange,
|
||||
completed_inps: Vec<bool>,
|
||||
range_complete: Vec<bool>,
|
||||
current_inp_item: Vec<Option<EventsItem>>,
|
||||
error_items: VecDeque<Error>,
|
||||
log_items: VecDeque<LogItem>,
|
||||
stats_items: VecDeque<StatsItem>,
|
||||
ourname: String,
|
||||
inprng: usize,
|
||||
data_done: bool,
|
||||
done: bool,
|
||||
complete: bool,
|
||||
}
|
||||
|
||||
impl StorageMerge {
|
||||
pub fn new(
|
||||
inps: Vec<Pin<Box<dyn Stream<Item = Sitemty<EventsItem>> + Send>>>,
|
||||
names: Vec<String>,
|
||||
range: NanoRange,
|
||||
) -> Self {
|
||||
assert_eq!(inps.len(), names.len());
|
||||
let n = inps.len();
|
||||
let mut h = crc32fast::Hasher::new();
|
||||
h.update(
|
||||
&SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.subsec_nanos()
|
||||
.to_le_bytes(),
|
||||
);
|
||||
let ourname = format!("{:08x}", h.finalize());
|
||||
for (i, n) in names.iter().enumerate() {
|
||||
debug!("[{}] {} {}", ourname, i, n);
|
||||
}
|
||||
Self {
|
||||
inps,
|
||||
names,
|
||||
range,
|
||||
completed_inps: vec![false; n],
|
||||
range_complete: vec![false; n],
|
||||
current_inp_item: (0..n).into_iter().map(|_| None).collect(),
|
||||
error_items: VecDeque::new(),
|
||||
log_items: VecDeque::new(),
|
||||
stats_items: VecDeque::new(),
|
||||
inprng: n - 1,
|
||||
data_done: false,
|
||||
done: false,
|
||||
complete: false,
|
||||
ourname,
|
||||
}
|
||||
}
|
||||
|
||||
fn refill_if_needed(self: &mut Pin<&mut Self>, cx: &mut Context) -> Result<bool, Error> {
|
||||
use Poll::*;
|
||||
let mut is_pending = false;
|
||||
for i in 0..self.inps.len() {
|
||||
while self.current_inp_item[i].is_none() && self.completed_inps[i] == false {
|
||||
match self.inps[i].poll_next_unpin(cx) {
|
||||
Ready(j) => match j {
|
||||
Some(j) => match j {
|
||||
Ok(j) => match j {
|
||||
StreamItem::DataItem(j) => match j {
|
||||
RangeCompletableItem::Data(j) => {
|
||||
self.current_inp_item[i] = Some(j);
|
||||
}
|
||||
RangeCompletableItem::RangeComplete => {
|
||||
self.range_complete[i] = true;
|
||||
}
|
||||
},
|
||||
StreamItem::Log(k) => {
|
||||
self.log_items.push_back(k);
|
||||
}
|
||||
StreamItem::Stats(k) => {
|
||||
self.stats_items.push_back(k);
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
error!("inp err input {} {:?}", i, e);
|
||||
self.error_items.push_back(e);
|
||||
}
|
||||
},
|
||||
None => {
|
||||
self.completed_inps[i] = true;
|
||||
}
|
||||
},
|
||||
Pending => {
|
||||
is_pending = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(is_pending)
|
||||
}
|
||||
|
||||
fn decide_next_item(&mut self) -> Result<Option<Sitemty<EventsItem>>, Error> {
|
||||
let not_found = 99999;
|
||||
let mut i1 = self.inprng;
|
||||
let mut j1 = not_found;
|
||||
let mut tsmin = u64::MAX;
|
||||
let mut tsend = u64::MAX;
|
||||
#[allow(unused)]
|
||||
use items::{WithLen, WithTimestamps};
|
||||
loop {
|
||||
if self.completed_inps[i1] {
|
||||
} else {
|
||||
match self.current_inp_item[i1].as_ref() {
|
||||
None => panic!(),
|
||||
Some(j) => {
|
||||
if j.len() == 0 {
|
||||
j1 = i1;
|
||||
break;
|
||||
} else {
|
||||
let ts1 = j.ts(0);
|
||||
let ts2 = j.ts(j.len() - 1);
|
||||
if ts1 == u64::MAX || ts2 == u64::MAX {
|
||||
panic!();
|
||||
}
|
||||
trace!("[{}] consider {} {:?}", self.ourname, i1, Nanos::from_ns(ts1));
|
||||
if ts1 <= tsmin {
|
||||
tsmin = ts1;
|
||||
tsend = ts2;
|
||||
j1 = i1;
|
||||
trace!(
|
||||
"[{}] switch to source {} / {} {}",
|
||||
self.ourname,
|
||||
i1,
|
||||
self.inps.len(),
|
||||
self.names[i1]
|
||||
);
|
||||
self.inprng = i1;
|
||||
} else {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if i1 == 0 {
|
||||
break;
|
||||
}
|
||||
i1 -= 1;
|
||||
}
|
||||
let i1 = ();
|
||||
let _ = i1;
|
||||
if j1 >= not_found {
|
||||
Ok(None)
|
||||
} else {
|
||||
trace!("[{}] decide for source {}", self.ourname, j1);
|
||||
trace!("[{}] decided tsmin {:?}", self.ourname, Nanos::from_ns(tsmin));
|
||||
trace!("[{}] decided tsend {:?}", self.ourname, Nanos::from_ns(tsend));
|
||||
let mut j5 = not_found;
|
||||
let mut tsmin2 = u64::MAX;
|
||||
if self.inprng > 0 {
|
||||
trace!("[{}] locate the next earliest timestamp", self.ourname);
|
||||
let mut i5 = self.inprng - 1;
|
||||
loop {
|
||||
if self.completed_inps[i5] {
|
||||
} else {
|
||||
let j = self.current_inp_item[i5].as_ref().unwrap();
|
||||
if j.len() != 0 {
|
||||
let ts1 = j.ts(0);
|
||||
if ts1 == u64::MAX {
|
||||
panic!();
|
||||
}
|
||||
trace!(
|
||||
"[{}] consider {} {:?} for next earliest",
|
||||
self.ourname,
|
||||
i5,
|
||||
Nanos::from_ns(ts1)
|
||||
);
|
||||
if ts1 <= tsmin2 {
|
||||
tsmin2 = ts1;
|
||||
j5 = i5;
|
||||
}
|
||||
}
|
||||
}
|
||||
if i5 == 0 {
|
||||
break;
|
||||
}
|
||||
i5 -= 1;
|
||||
}
|
||||
}
|
||||
trace!(
|
||||
"[{}] decided tsmin2 {:?} next earliest timestamp source {}",
|
||||
self.ourname,
|
||||
Nanos::from_ns(tsmin2),
|
||||
j5
|
||||
);
|
||||
let item = self.current_inp_item[j1].take().unwrap();
|
||||
let item = if j5 != not_found && tsmin2 != u64::MAX {
|
||||
if tsend >= tsmin2 {
|
||||
{
|
||||
let tsmin = Nanos::from_ns(tsmin);
|
||||
let tsend = Nanos::from_ns(tsend);
|
||||
let tsmin2 = Nanos::from_ns(tsmin2);
|
||||
trace!(
|
||||
"[{}] NEED TO TRUNCATE THE BLOCK tsmin {:?} tsend {:?} tsmin2 {:?}",
|
||||
self.ourname,
|
||||
tsmin,
|
||||
tsend,
|
||||
tsmin2
|
||||
);
|
||||
}
|
||||
let mut out = item.empty_like_self();
|
||||
for i in 0..item.len() {
|
||||
let ts = item.ts(i);
|
||||
if ts < tsmin2 {
|
||||
out.push_index(&item, i);
|
||||
}
|
||||
}
|
||||
out
|
||||
} else {
|
||||
item
|
||||
}
|
||||
} else {
|
||||
item
|
||||
};
|
||||
trace!("[{}] emit {} events", self.ourname, item.len());
|
||||
if false {
|
||||
let s = inspect_timestamps(&item, self.range.clone());
|
||||
trace!("[{}] timestamps:\n{}", self.ourname, s);
|
||||
}
|
||||
Ok(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for StorageMerge {
|
||||
type Item = Sitemty<EventsItem>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
loop {
|
||||
break if self.complete {
|
||||
panic!()
|
||||
} else if self.done {
|
||||
self.complete = true;
|
||||
Ready(None)
|
||||
} else if let Some(k) = self.error_items.pop_front() {
|
||||
Ready(Some(Err(k)))
|
||||
} else if let Some(k) = self.log_items.pop_front() {
|
||||
Ready(Some(Ok(StreamItem::Log(k))))
|
||||
} else if let Some(k) = self.stats_items.pop_front() {
|
||||
Ready(Some(Ok(StreamItem::Stats(k))))
|
||||
} else if self.data_done {
|
||||
self.done = true;
|
||||
continue;
|
||||
} else {
|
||||
match self.refill_if_needed(cx) {
|
||||
Ok(is_pending) => {
|
||||
if is_pending {
|
||||
if self.log_items.len() == 0 && self.stats_items.len() == 0 {
|
||||
Pending
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} else if self.error_items.len() != 0 {
|
||||
continue;
|
||||
} else {
|
||||
match self.decide_next_item() {
|
||||
Ok(Some(j)) => Ready(Some(j)),
|
||||
Ok(None) => {
|
||||
self.data_done = true;
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("impl Stream for StorageMerge {:?}", e);
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("{}", e);
|
||||
self.done = true;
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
28
archapp/src/timed.rs
Normal file
28
archapp/src/timed.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use netpod::log::*;
|
||||
|
||||
pub struct Timed {
|
||||
name: String,
|
||||
ts1: Instant,
|
||||
}
|
||||
|
||||
impl Timed {
|
||||
pub fn new<T>(name: T) -> Self
|
||||
where
|
||||
T: ToString,
|
||||
{
|
||||
Self {
|
||||
name: name.to_string(),
|
||||
ts1: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Timed {
|
||||
fn drop(&mut self) {
|
||||
let ts2 = Instant::now();
|
||||
let dt = ts2.duration_since(self.ts1);
|
||||
info!("Timed {} {:?}", self.name, dt);
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,7 @@ bytes = "1.0.1"
|
||||
pin-project = "1.0.7"
|
||||
#async-channel = "1"
|
||||
#dashmap = "3"
|
||||
tokio-postgres = { version = "0.7.2", features = ["runtime", "with-chrono-0_4", "with-serde_json-1"] }
|
||||
tokio-postgres = { version = "0.7.4", features = ["runtime", "with-chrono-0_4", "with-serde_json-1"] }
|
||||
async-channel = "1.6"
|
||||
chrono = "0.4"
|
||||
regex = "1.5.4"
|
||||
|
||||
@@ -335,8 +335,6 @@ pub async fn update_db_with_channel_names(
|
||||
let tx2 = tx.clone();
|
||||
let db_config = db_config.clone();
|
||||
let block1 = async move {
|
||||
//return Err(Error::with_msg("some test error1"));
|
||||
//tx.send(Err(Error::with_msg("some test error2"))).await?;
|
||||
let dbc = crate::create_connection(&db_config).await?;
|
||||
let node_disk_ident = get_node_disk_ident(&node_config, &dbc).await?;
|
||||
let c1 = Arc::new(RwLock::new(0u32));
|
||||
|
||||
@@ -332,6 +332,7 @@ where
|
||||
let mut collector = <T as Collectable>::new_collector(bin_count_exp);
|
||||
let mut i1 = 0;
|
||||
let mut stream = stream;
|
||||
let mut total_duration = Duration::ZERO;
|
||||
loop {
|
||||
let item = if i1 == 0 {
|
||||
stream.next().await
|
||||
@@ -357,11 +358,24 @@ where
|
||||
info!("collect_plain_events_json log {:?}", item);
|
||||
}
|
||||
}
|
||||
StreamItem::Stats(item) => {
|
||||
if do_log {
|
||||
info!("collect_plain_events_json stats {:?}", item);
|
||||
}
|
||||
}
|
||||
StreamItem::Stats(item) => match item {
|
||||
items::StatsItem::EventDataReadStats(_) => {}
|
||||
items::StatsItem::RangeFilterStats(_) => {}
|
||||
items::StatsItem::DiskStats(item) => match item {
|
||||
netpod::DiskStats::OpenStats(k) => {
|
||||
total_duration += k.duration;
|
||||
}
|
||||
netpod::DiskStats::SeekStats(k) => {
|
||||
total_duration += k.duration;
|
||||
}
|
||||
netpod::DiskStats::ReadStats(k) => {
|
||||
total_duration += k.duration;
|
||||
}
|
||||
netpod::DiskStats::ReadExactStats(k) => {
|
||||
total_duration += k.duration;
|
||||
}
|
||||
},
|
||||
},
|
||||
StreamItem::DataItem(item) => match item {
|
||||
RangeCompletableItem::RangeComplete => {
|
||||
collector.set_range_complete();
|
||||
@@ -382,6 +396,7 @@ where
|
||||
}
|
||||
}
|
||||
let ret = serde_json::to_value(collector.result()?)?;
|
||||
info!("Total duration: {:?}", total_duration);
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
|
||||
@@ -157,6 +157,9 @@ where
|
||||
Ready(Some(Ok(StreamItem::Log(item))))
|
||||
} else if let Some(item) = self.stats_items.pop_front() {
|
||||
Ready(Some(Ok(StreamItem::Stats(item))))
|
||||
} else if self.range_complete_observed_all_emitted {
|
||||
self.completed = true;
|
||||
Ready(None)
|
||||
} else if self.data_emit_complete {
|
||||
if self.range_complete_observed_all {
|
||||
if self.range_complete_observed_all_emitted {
|
||||
@@ -213,10 +216,16 @@ where
|
||||
continue 'outer;
|
||||
}
|
||||
} else {
|
||||
assert!(lowest_ts >= self.ts_last_emit);
|
||||
// TODO unordered cases
|
||||
if lowest_ts < self.ts_last_emit {
|
||||
self.errored = true;
|
||||
let msg = format!("unordered event at lowest_ts {}", lowest_ts);
|
||||
return Ready(Some(Err(Error::with_msg(msg))));
|
||||
} else {
|
||||
self.ts_last_emit = self.ts_last_emit.max(lowest_ts);
|
||||
}
|
||||
{
|
||||
let batch = self.batch.take();
|
||||
self.ts_last_emit = lowest_ts;
|
||||
let rix = self.ixs[lowest_ix];
|
||||
match &self.current[lowest_ix] {
|
||||
MergedCurVal::Val(val) => {
|
||||
|
||||
@@ -1,8 +1,26 @@
|
||||
use crate::response;
|
||||
use err::Error;
|
||||
use futures_core::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use http::{header, Method, Request, Response, StatusCode};
|
||||
use hyper::Body;
|
||||
use netpod::{log::*, NodeConfigCached, APP_JSON_LINES};
|
||||
use netpod::log::*;
|
||||
use netpod::{NodeConfigCached, APP_JSON_LINES};
|
||||
use serde::Serialize;
|
||||
|
||||
fn json_lines_stream<S, I>(stream: S) -> impl Stream<Item = Result<Vec<u8>, Error>>
|
||||
where
|
||||
S: Stream<Item = Result<I, Error>>,
|
||||
I: Serialize,
|
||||
{
|
||||
stream.map(|k| {
|
||||
k.map(|k| {
|
||||
let mut a = serde_json::to_vec(&k).unwrap();
|
||||
a.push(0xa);
|
||||
a
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
pub struct ListIndexFilesHttpFunction {}
|
||||
|
||||
@@ -35,7 +53,7 @@ impl ListIndexFilesHttpFunction {
|
||||
.ok_or(Error::with_msg_no_trace(
|
||||
"this node is not configured as channel archiver",
|
||||
))?;
|
||||
let s = archapp_wrap::archapp::archeng::list_index_files(conf);
|
||||
let s = archapp_wrap::archapp::archeng::indexfiles::list_index_files(conf);
|
||||
let s = futures_util::stream::unfold(s, |mut st| async move {
|
||||
use futures_util::StreamExt;
|
||||
let x = st.next().await;
|
||||
@@ -60,6 +78,84 @@ impl ListIndexFilesHttpFunction {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ScanIndexFiles {}
|
||||
|
||||
impl ScanIndexFiles {
|
||||
pub fn prefix() -> &'static str {
|
||||
"/api/4/channelarchiver/scan/indexfiles"
|
||||
}
|
||||
|
||||
pub fn name() -> &'static str {
|
||||
"ScanIndexFiles"
|
||||
}
|
||||
|
||||
pub fn should_handle(path: &str) -> Option<Self> {
|
||||
if path.starts_with(Self::prefix()) {
|
||||
Some(Self {})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
if req.method() != Method::GET {
|
||||
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(Body::empty())?);
|
||||
}
|
||||
info!("{} handle uri: {:?}", Self::name(), req.uri());
|
||||
let conf = node_config
|
||||
.node
|
||||
.channel_archiver
|
||||
.as_ref()
|
||||
.ok_or(Error::with_msg_no_trace(
|
||||
"this node is not configured as channel archiver",
|
||||
))?;
|
||||
let s = archapp_wrap::archapp::archeng::indexfiles::scan_index_files(conf.clone());
|
||||
let s = json_lines_stream(s);
|
||||
Ok(response(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, APP_JSON_LINES)
|
||||
.body(Body::wrap_stream(s))?)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ScanChannels {}
|
||||
|
||||
impl ScanChannels {
|
||||
pub fn prefix() -> &'static str {
|
||||
"/api/4/channelarchiver/scan/channels"
|
||||
}
|
||||
|
||||
pub fn name() -> &'static str {
|
||||
"ScanChannels"
|
||||
}
|
||||
|
||||
pub fn should_handle(path: &str) -> Option<Self> {
|
||||
if path.starts_with(Self::prefix()) {
|
||||
Some(Self {})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
if req.method() != Method::GET {
|
||||
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(Body::empty())?);
|
||||
}
|
||||
info!("{} handle uri: {:?}", Self::name(), req.uri());
|
||||
let conf = node_config
|
||||
.node
|
||||
.channel_archiver
|
||||
.as_ref()
|
||||
.ok_or(Error::with_msg_no_trace(
|
||||
"this node is not configured as channel archiver",
|
||||
))?;
|
||||
let s = archapp_wrap::archapp::archeng::indexfiles::scan_channels(conf.clone());
|
||||
let s = json_lines_stream(s);
|
||||
Ok(response(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, APP_JSON_LINES)
|
||||
.body(Body::wrap_stream(s))?)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ListChannelsHttpFunction {}
|
||||
|
||||
impl ListChannelsHttpFunction {
|
||||
|
||||
@@ -286,6 +286,10 @@ async fn http_service_try(req: Request<Body>, node_config: &NodeConfigCached) ->
|
||||
h.handle(req, &node_config).await
|
||||
} else if let Some(h) = channelarchiver::ListChannelsHttpFunction::should_handle(path) {
|
||||
h.handle(req, &node_config).await
|
||||
} else if let Some(h) = channelarchiver::ScanIndexFiles::should_handle(path) {
|
||||
h.handle(req, &node_config).await
|
||||
} else if let Some(h) = channelarchiver::ScanChannels::should_handle(path) {
|
||||
h.handle(req, &node_config).await
|
||||
} else if path.starts_with("/api/1/requestStatus/") {
|
||||
info!("{}", path);
|
||||
Ok(response(StatusCode::OK).body(Body::from("{}"))?)
|
||||
|
||||
@@ -4,8 +4,8 @@ use bytes::BytesMut;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use err::Error;
|
||||
use netpod::timeunits::{MS, SEC};
|
||||
use netpod::RangeFilterStats;
|
||||
use netpod::{log::Level, AggKind, EventDataReadStats, EventQueryJsonStringFrame, NanoRange, Shape};
|
||||
use netpod::{DiskStats, RangeFilterStats};
|
||||
use serde::de::{self, DeserializeOwned, Visitor};
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use std::fmt;
|
||||
@@ -52,6 +52,7 @@ pub enum RangeCompletableItem<T> {
|
||||
pub enum StatsItem {
|
||||
EventDataReadStats(EventDataReadStats),
|
||||
RangeFilterStats(RangeFilterStats),
|
||||
DiskStats(DiskStats),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -437,3 +438,23 @@ pub trait TimeBinnableTypeAggregator: Send {
|
||||
fn ingest(&mut self, item: &Self::Input);
|
||||
fn result_reset(&mut self, range: NanoRange, expand: bool) -> Self::Output;
|
||||
}
|
||||
|
||||
pub trait TimestampInspectable: WithTimestamps + WithLen {}
|
||||
|
||||
impl<T> TimestampInspectable for T where T: WithTimestamps + WithLen {}
|
||||
|
||||
pub fn inspect_timestamps(events: &dyn TimestampInspectable, range: NanoRange) -> String {
|
||||
use fmt::Write;
|
||||
let rd = range.delta();
|
||||
let mut buf = String::new();
|
||||
let n = events.len();
|
||||
for i in 0..n {
|
||||
if i < 3 || i > (n - 4) {
|
||||
let ts = events.ts(i);
|
||||
let z = ts - range.beg;
|
||||
let z = z as f64 / rd as f64 * 2.0 - 1.0;
|
||||
write!(&mut buf, "i {:3} tt {:6.3}\n", i, z).unwrap();
|
||||
}
|
||||
}
|
||||
buf
|
||||
}
|
||||
|
||||
@@ -126,6 +126,7 @@ pub struct ArchiverAppliance {
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ChannelArchiver {
|
||||
pub data_base_paths: Vec<PathBuf>,
|
||||
pub database: Database,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
@@ -298,6 +299,12 @@ pub struct Nanos {
|
||||
pub ns: u64,
|
||||
}
|
||||
|
||||
impl Nanos {
|
||||
pub fn from_ns(ns: u64) -> Self {
|
||||
Self { ns }
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Nanos {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let ts = chrono::Utc.timestamp((self.ns / SEC) as i64, (self.ns % SEC) as u32);
|
||||
@@ -970,6 +977,58 @@ impl RangeFilterStats {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum DiskStats {
|
||||
OpenStats(OpenStats),
|
||||
SeekStats(SeekStats),
|
||||
ReadStats(ReadStats),
|
||||
ReadExactStats(ReadExactStats),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct OpenStats {
|
||||
pub duration: Duration,
|
||||
}
|
||||
|
||||
impl OpenStats {
|
||||
pub fn new(duration: Duration) -> Self {
|
||||
Self { duration }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct SeekStats {
|
||||
pub duration: Duration,
|
||||
}
|
||||
|
||||
impl SeekStats {
|
||||
pub fn new(duration: Duration) -> Self {
|
||||
Self { duration }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ReadStats {
|
||||
pub duration: Duration,
|
||||
}
|
||||
|
||||
impl ReadStats {
|
||||
pub fn new(duration: Duration) -> Self {
|
||||
Self { duration }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ReadExactStats {
|
||||
pub duration: Duration,
|
||||
}
|
||||
|
||||
impl ReadExactStats {
|
||||
pub fn new(duration: Duration) -> Self {
|
||||
Self { duration }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PerfOpts {
|
||||
pub inmem_bufcap: usize,
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use err::Error;
|
||||
use futures_core::Stream;
|
||||
use futures_util::StreamExt;
|
||||
use items::StatsItem;
|
||||
use items::{Appendable, Clearable, PushableIndex, RangeCompletableItem, Sitemty, StreamItem, WithTimestamps};
|
||||
use netpod::NanoRange;
|
||||
use netpod::{log::*, RangeFilterStats};
|
||||
use netpod::{NanoRange, Nanos};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
@@ -12,6 +13,7 @@ pub struct RangeFilter<S, ITY> {
|
||||
range: NanoRange,
|
||||
range_str: String,
|
||||
expand: bool,
|
||||
ts_max: u64,
|
||||
stats: RangeFilterStats,
|
||||
prerange: Option<ITY>,
|
||||
have_pre: bool,
|
||||
@@ -34,6 +36,7 @@ where
|
||||
range_str: format!("{:?}", range),
|
||||
range,
|
||||
expand,
|
||||
ts_max: 0,
|
||||
stats: RangeFilterStats::new(),
|
||||
prerange: None,
|
||||
have_pre: false,
|
||||
@@ -79,6 +82,19 @@ where
|
||||
let mut ret = item.empty_like_self();
|
||||
for i1 in 0..item.len() {
|
||||
let ts = item.ts(i1);
|
||||
if ts < self.ts_max {
|
||||
self.done = true;
|
||||
let msg = format!(
|
||||
"unordered event i1 {} / {} ts {:?} ts_max {:?}",
|
||||
i1,
|
||||
item.len(),
|
||||
Nanos::from_ns(ts),
|
||||
Nanos::from_ns(self.ts_max)
|
||||
);
|
||||
error!("{}", msg);
|
||||
return Ready(Some(Err(Error::with_msg(msg))));
|
||||
}
|
||||
self.ts_max = ts;
|
||||
if ts < self.range.beg {
|
||||
if self.expand {
|
||||
let mut prerange = if let Some(prerange) = self.prerange.take() {
|
||||
|
||||
@@ -231,6 +231,9 @@ pub fn append_inner(dirname: &str, mut stdin: Stdin, _stderr: Stderr) -> Result<
|
||||
fout = next_file(&dir)?;
|
||||
};
|
||||
}
|
||||
if n1 == 0 {
|
||||
break Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -90,7 +90,14 @@ pub fn tracing_init() {
|
||||
.with_thread_names(true)
|
||||
//.with_max_level(tracing::Level::INFO)
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::new(
|
||||
["info", "archapp::archeng=info", "daqbuffer::test=trace"].join(","),
|
||||
[
|
||||
"info",
|
||||
"archapp::archeng=info",
|
||||
"archapp::archeng::datablockstream=info",
|
||||
"archapp::storagemerge=info",
|
||||
"daqbuffer::test=trace",
|
||||
]
|
||||
.join(","),
|
||||
))
|
||||
.init();
|
||||
*g = 1;
|
||||
|
||||
Reference in New Issue
Block a user