Improve search api

This commit is contained in:
Dominik Werder
2021-11-03 16:00:53 +01:00
parent 4d3965660c
commit 96e0473392
21 changed files with 1285 additions and 279 deletions

View File

@@ -2,6 +2,7 @@ pub mod backreadbuf;
pub mod blockrefstream;
pub mod blockstream;
pub mod bufminread;
pub mod configs;
pub mod datablock;
pub mod datablockstream;
pub mod diskio;
@@ -18,7 +19,7 @@ use crate::wrap_task;
use async_channel::{Receiver, Sender};
use err::Error;
use futures_util::StreamExt;
use items::{RangeCompletableItem, Sitemty, StatsItem, StreamItem};
use items::{RangeCompletableItem, Sitemty, StatsItem, StreamItem, WithLen};
use netpod::log::*;
use netpod::timeunits::SEC;
use netpod::{
@@ -329,26 +330,26 @@ pub fn list_all_channels(node: &ChannelArchiver) -> Receiver<Result<ListChannelI
pub async fn channel_config(q: &ChannelConfigQuery, conf: &ChannelArchiver) -> Result<ChannelConfigResponse, Error> {
let _timed = Timed::new("channel_config");
let mut type_info = None;
let mut stream = datablockstream::DatablockStream::for_channel_range(
q.range.clone(),
q.channel.clone(),
conf.data_base_paths.clone().into(),
true,
1,
);
let stream = blockrefstream::blockref_stream(q.channel.clone(), q.range.clone().clone(), conf.clone());
let stream = Box::pin(stream);
let stream = blockstream::BlockStream::new(stream, q.range.clone(), 1);
let mut stream = stream;
let timed_expand = Timed::new("channel_config EXPAND");
while let Some(item) = stream.next().await {
use blockstream::BlockItem::*;
match item {
Ok(k) => match k {
StreamItem::DataItem(k) => match k {
RangeCompletableItem::RangeComplete => (),
RangeCompletableItem::Data(k) => {
type_info = Some(k.type_info());
EventsItem(item) => {
if item.len() > 0 {
type_info = Some(item.type_info());
break;
}
},
StreamItem::Log(_) => (),
StreamItem::Stats(_) => (),
}
JsVal(jsval) => {
if false {
info!("jsval: {}", serde_json::to_string(&jsval)?);
}
}
},
Err(e) => {
error!("{}", e);
@@ -360,25 +361,25 @@ pub async fn channel_config(q: &ChannelConfigQuery, conf: &ChannelArchiver) -> R
if type_info.is_none() {
let timed_normal = Timed::new("channel_config NORMAL");
warn!("channel_config expand mode returned none");
let mut stream = datablockstream::DatablockStream::for_channel_range(
q.range.clone(),
q.channel.clone(),
conf.data_base_paths.clone().into(),
false,
u64::MAX,
);
let stream = blockrefstream::blockref_stream(q.channel.clone(), q.range.clone().clone(), conf.clone());
let stream = Box::pin(stream);
let stream = blockstream::BlockStream::new(stream, q.range.clone(), 1);
let mut stream = stream;
while let Some(item) = stream.next().await {
use blockstream::BlockItem::*;
match item {
Ok(k) => match k {
StreamItem::DataItem(k) => match k {
RangeCompletableItem::RangeComplete => (),
RangeCompletableItem::Data(k) => {
type_info = Some(k.type_info());
EventsItem(item) => {
if item.len() > 0 {
type_info = Some(item.type_info());
break;
}
},
StreamItem::Log(_) => (),
StreamItem::Stats(_) => (),
}
JsVal(jsval) => {
if false {
info!("jsval: {}", serde_json::to_string(&jsval)?);
}
}
},
Err(e) => {
error!("{}", e);

View File

@@ -123,6 +123,6 @@ impl<F> fmt::Debug for BackReadBuf<F> {
impl<F> Drop for BackReadBuf<F> {
fn drop(&mut self) {
info!("Drop {:?}", self);
trace!("Drop {:?}", self);
}
}

View File

@@ -1,22 +1,21 @@
use crate::archeng::backreadbuf::BackReadBuf;
use crate::archeng::datablock::{read_data2, read_data_1, read_datafile_header, read_datafile_header2};
use crate::archeng::datablock::{read_data2, read_datafile_header2};
use crate::archeng::indexfiles::{database_connect, unfold_stream, UnfoldExec};
use crate::archeng::indextree::{
read_datablockref, read_datablockref2, DataheaderPos, Dataref, HeaderVersion, IndexFileBasics, RecordIter,
RecordTarget,
read_datablockref2, DataheaderPos, Dataref, HeaderVersion, IndexFileBasics, RecordIter, RecordTarget,
};
use crate::archeng::ringbuf::RingBuf;
use crate::archeng::{open_read, seek, StatsChannel};
use crate::archeng::{open_read, StatsChannel};
use err::Error;
use futures_core::{Future, Stream};
use items::WithLen;
#[allow(unused)]
use netpod::log::*;
use netpod::{Channel, ChannelArchiver, FilePos, NanoRange};
use netpod::{Channel, ChannelArchiver, NanoRange};
#[allow(unused)]
use serde::Serialize;
use serde_json::Value as JsVal;
use std::collections::{BTreeMap, VecDeque};
use std::io::SeekFrom;
use std::path::PathBuf;
use std::pin::Pin;
use tokio::fs::File;
@@ -83,7 +82,7 @@ impl BlockrefStream {
match self.steps {
Start => {
self.steps = SelectIndexFile;
Ok(Some((BlockrefItem::JsVal(JsVal::Null), self)))
Ok(Some((BlockrefItem::JsVal(JsVal::String(format!("START"))), self)))
}
SelectIndexFile => {
let dbc = database_connect(&self.conf.database).await?;
@@ -93,7 +92,7 @@ impl BlockrefStream {
self.paths.push_back(row.try_get(0)?);
}
self.steps = SetupNextPath;
Ok(Some((BlockrefItem::JsVal(JsVal::String(format!("INIT"))), self)))
Ok(Some((BlockrefItem::JsVal(JsVal::String(format!("DBQUERY"))), self)))
}
SetupNextPath => {
let stats = &StatsChannel::dummy();
@@ -115,8 +114,11 @@ impl BlockrefStream {
};
Ok(Some((BlockrefItem::JsVal(JsVal::String(format!("NEXTPATH"))), self)))
} else {
self.steps = Done;
Ok(Some((BlockrefItem::JsVal(JsVal::String(format!("DONE"))), self)))
self.steps = SelectIndexFile;
Ok(Some((
BlockrefItem::JsVal(JsVal::String(format!("PATHQUEUEEMPTY"))),
self,
)))
}
}
ReadBlocks(ref mut iter, ref hver, ref indexpath) => {
@@ -208,7 +210,7 @@ impl BlockrefStream {
panic!();
}
} else {
info!(
debug!(
"data_bytes_read: {} same_dfh_count: {}",
self.data_bytes_read, self.same_dfh_count
);

View File

@@ -58,6 +58,7 @@ struct Reader {
impl Reader {}
struct FutAItem {
#[allow(unused)]
fname: String,
path: PathBuf,
dfnotfound: bool,
@@ -67,6 +68,7 @@ struct FutAItem {
events: Option<EventsItem>,
}
#[allow(unused)]
pub struct FutA {
fname: String,
pos: DataheaderPos,
@@ -76,13 +78,17 @@ pub struct FutA {
impl Future for FutA {
type Output = Result<JsVal, Error>;
#[allow(unused)]
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
use Poll::*;
err::todoval()
}
}
pub enum BlockItem {}
pub enum BlockItem {
EventsItem(EventsItem),
JsVal(JsVal),
}
pub struct BlockStream<S> {
inp: S,
@@ -115,7 +121,7 @@ impl<S> BlockStream<S> {
range,
dfnotfound: BTreeMap::new(),
block_reads: FuturesOrdered::new(),
max_reads,
max_reads: max_reads.max(1),
readers: VecDeque::new(),
last_dfname: String::new(),
last_dfhpos: DataheaderPos(u64::MAX),
@@ -143,7 +149,7 @@ impl<S> Stream for BlockStream<S>
where
S: Stream<Item = Result<BlockrefItem, Error>> + Unpin,
{
type Item = Result<JsVal, Error>;
type Item = Result<BlockItem, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
@@ -195,7 +201,7 @@ where
Some(reader)
} else {
let stats = StatsChannel::dummy();
info!("open new reader file {:?}", dpath);
debug!("open new reader file {:?}", dpath);
match open_read(dpath.clone(), &stats).await {
Ok(file) => {
//
@@ -212,9 +218,17 @@ where
let rp1 = reader.rb.bytes_read();
let dfheader =
read_datafile_header2(&mut reader.rb, pos).await?;
// TODO handle expand
let expand = false;
let data =
read_data2(&mut reader.rb, &dfheader, range, false)
.await?;
read_data2(&mut reader.rb, &dfheader, range, expand)
.await
.map_err(|e| {
Error::with_msg_no_trace(format!(
"dpath {:?} error {}",
dpath, e
))
})?;
let rp2 = reader.rb.bytes_read();
let bytes_read = rp2 - rp1;
let ret = FutAItem {
@@ -248,7 +262,7 @@ where
}
Int::Empty
}
BlockrefItem::JsVal(_jsval) => Int::Empty,
BlockrefItem::JsVal(jsval) => Int::Item(Ok(BlockItem::JsVal(jsval))),
},
Err(e) => {
self.done = true;
@@ -271,7 +285,6 @@ where
} else {
match self.block_reads.poll_next_unpin(cx) {
Ready(Some(Ok(item))) => {
//
if item.dfnotfound {
self.dfnotfound.insert(item.path, true);
}
@@ -297,23 +310,35 @@ where
item.events.is_some(),
item.events_read
));
let _ = item;
}
if self.acc.older(Duration::from_millis(1000)) {
let ret = std::mem::replace(&mut self.acc, StatsAcc::new());
match serde_json::to_value((ret, self.block_reads.len(), self.readers.len())) {
Ok(item) => Int::Item(Ok(item)),
Err(e) => {
self.done = true;
return Ready(Some(Err(e.into())));
if false {
// TODO emit proper variant for optional performance measurement.
if self.acc.older(Duration::from_millis(1000)) {
let ret = std::mem::replace(&mut self.acc, StatsAcc::new());
match serde_json::to_value((ret, self.block_reads.len(), self.readers.len())) {
Ok(item) => Int::Item(Ok::<_, Error>(item)),
Err(e) => {
self.done = true;
return Ready(Some(Err(e.into())));
}
}
}
} else {
//Int::Item(Ok(item))
Int::Empty
};
err::todoval()
} else {
//Int::Item(Ok(item))
Int::Empty
if let Some(events) = item.events {
Int::Item(Ok(BlockItem::EventsItem(events)))
} else {
Int::Empty
}
}
}
Ready(Some(Err(e))) => {
self.done = true;
error!("{}", e);
Int::Item(Err(e))
}
Ready(None) => {
@@ -364,6 +389,6 @@ impl<S> fmt::Debug for BlockStream<S> {
impl<S> Drop for BlockStream<S> {
fn drop(&mut self) {
info!("Drop {:?}", self);
trace!("Drop {:?}", self);
}
}

View File

@@ -0,0 +1,259 @@
use crate::archeng::indexfiles::database_connect;
use err::Error;
use futures_core::{Future, Stream};
use futures_util::{FutureExt, StreamExt};
use netpod::log::*;
use netpod::{Channel, ChannelArchiver, ChannelConfigQuery, ChannelConfigResponse, Database, NanoRange};
use serde::Serialize;
use serde_json::Value as JsVal;
use std::collections::VecDeque;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::{Duration, SystemTime};
use tokio_postgres::{Client, Row};
pub struct ChannelNameStream {
db_config: Database,
off: u64,
db_done: bool,
batch: VecDeque<String>,
connect_fut: Option<Pin<Box<dyn Future<Output = Result<Client, Error>> + Send>>>,
select_fut: Option<Pin<Box<dyn Future<Output = Result<Vec<Row>, Error>> + Send>>>,
done: bool,
complete: bool,
}
impl ChannelNameStream {
pub fn new(db_config: Database) -> Self {
Self {
db_config,
off: 0,
db_done: false,
batch: VecDeque::new(),
connect_fut: None,
select_fut: None,
done: false,
complete: false,
}
}
}
impl Stream for ChannelNameStream {
type Item = Result<String, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
loop {
break if self.complete {
panic!("poll on complete")
} else if self.done {
self.complete = true;
Ready(None)
} else if let Some(item) = self.batch.pop_front() {
Ready(Some(Ok(item)))
} else if let Some(fut) = &mut self.select_fut {
match fut.poll_unpin(cx) {
Ready(Ok(rows)) => {
self.select_fut = None;
self.off += rows.len() as u64;
if rows.len() == 0 {
self.db_done = true;
}
for row in rows {
self.batch.push_back(row.get(1));
}
continue;
}
Ready(Err(e)) => {
self.select_fut = None;
self.done = true;
Ready(Some(Err(e)))
}
Pending => Pending,
}
} else if let Some(fut) = &mut self.connect_fut {
match fut.poll_unpin(cx) {
Ready(Ok(dbc)) => {
self.connect_fut = None;
let off = self.off as i64;
let fut = async move {
let rows = dbc
.query(
"select rowid, name from channels where config = '{}'::jsonb order by name offset $1 limit 1000",
&[&off],
)
.await?;
Ok::<_, Error>(rows)
};
self.select_fut = Some(Box::pin(fut));
continue;
}
Ready(Err(e)) => {
self.connect_fut = None;
self.done = true;
Ready(Some(Err(e)))
}
Pending => Pending,
}
} else {
if self.db_done {
self.done = true;
info!("db_done");
continue;
} else {
let db = self.db_config.clone();
let fut = async move { database_connect(&db).await };
self.connect_fut = Some(Box::pin(fut));
continue;
}
};
}
}
}
enum Res {
TimedOut(String),
Response(ChannelConfigResponse),
}
#[derive(Debug, Serialize)]
pub enum ConfigItem {
Config(ChannelConfigResponse),
JsVal(JsVal),
}
pub struct ConfigStream {
conf: ChannelArchiver,
inp: ChannelNameStream,
inp_done: bool,
get_fut: Option<Pin<Box<dyn Future<Output = Result<Res, Error>> + Send>>>,
update_fut: Option<Pin<Box<dyn Future<Output = Result<(), Error>> + Send>>>,
done: bool,
complete: bool,
}
impl ConfigStream {
pub fn new(inp: ChannelNameStream, conf: ChannelArchiver) -> Self {
Self {
conf,
inp,
inp_done: false,
get_fut: None,
update_fut: None,
done: false,
complete: false,
}
}
}
impl Stream for ConfigStream {
type Item = Result<ConfigItem, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
loop {
break if self.complete {
panic!("poll on complete")
} else if self.done {
self.complete = true;
Ready(None)
} else if let Some(fut) = &mut self.update_fut {
match fut.poll_unpin(cx) {
Ready(Ok(_)) => {
self.update_fut = None;
continue;
}
Ready(Err(e)) => {
self.update_fut = None;
self.done = true;
Ready(Some(Err(e)))
}
Pending => Pending,
}
} else if let Some(fut) = &mut self.get_fut {
match fut.poll_unpin(cx) {
Ready(Ok(Res::Response(item))) => {
self.get_fut = None;
let name = item.channel.name.clone();
let dbconf = self.conf.database.clone();
let config = serde_json::to_value(&item)?;
let fut = async move {
let dbc = database_connect(&dbconf).await?;
dbc.query("update channels set config = $2 where name = $1", &[&name, &config])
.await?;
Ok(())
};
self.update_fut = Some(Box::pin(fut));
let item = ConfigItem::Config(item);
Ready(Some(Ok(item)))
}
Ready(Ok(Res::TimedOut(name))) => {
self.get_fut = None;
let dbconf = self.conf.database.clone();
let config = serde_json::to_value(&"TimedOut")?;
let fut = async move {
let dbc = database_connect(&dbconf).await?;
dbc.query("update channels set config = $2 where name = $1", &[&name, &config])
.await?;
Ok(())
};
self.update_fut = Some(Box::pin(fut));
continue;
}
Ready(Err(e)) => {
self.get_fut = None;
self.done = true;
Ready(Some(Err(e)))
}
Pending => Pending,
}
} else {
if self.inp_done {
self.done = true;
continue;
} else {
match self.inp.poll_next_unpin(cx) {
Ready(Some(Ok(item))) => {
let conf = self.conf.clone();
let fut = async move {
let channel = Channel {
name: item,
backend: "".into(),
};
let now = SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let beg = now - 60 * 60 * 1000;
let end = now + 60 * 60 * 4;
let q = ChannelConfigQuery {
channel,
range: NanoRange { beg, end },
};
let fut = super::channel_config(&q, &conf);
let fut = tokio::time::timeout(Duration::from_millis(2000), fut);
match fut.await {
Ok(Ok(k)) => Ok(Res::Response(k)),
Ok(Err(e)) => Err(e),
Err(_) => Ok(Res::TimedOut(q.channel.name)),
}
};
self.get_fut = Some(Box::pin(fut));
continue;
}
Ready(Some(Err(e))) => {
self.done = true;
Ready(Some(Err(e)))
}
Ready(None) => {
self.inp_done = true;
info!("ConfigStream input done.");
continue;
}
Pending => Pending,
}
}
};
}
}
}

View File

@@ -1,9 +1,12 @@
use super::format_hex_block;
use super::indextree::DataheaderPos;
use crate::archeng::ringbuf::RingBuf;
use crate::archeng::{read_exact, read_string, readf64, readu16, readu32, seek, StatsChannel, EPICS_EPOCH_OFFSET};
use crate::eventsitem::EventsItem;
use crate::plainevents::{PlainEvents, ScalarPlainEvents};
use crate::plainevents::{PlainEvents, ScalarPlainEvents, WavePlainEvents};
use err::Error;
use items::eventvalues::EventValues;
use items::waveevents::WaveEvents;
use netpod::log::*;
use netpod::timeunits::SEC;
use netpod::{NanoRange, Nanos};
@@ -11,13 +14,17 @@ use std::convert::TryInto;
use std::io::SeekFrom;
use tokio::fs::File;
use super::indextree::DataheaderPos;
#[derive(Debug)]
#[derive(Clone, Debug)]
enum DbrType {
DbrString = 0,
DbrInt = 1,
DbrShort = 1,
DbrStsFloat = 9,
DbrTimeString = 14,
DbrTimeShort = 15,
DbrTimeFloat = 16,
DbrTimeEnum = 17,
DbrTimeChar = 18,
DbrTimeLong = 19,
DbrTimeDouble = 20,
}
@@ -26,8 +33,14 @@ impl DbrType {
use DbrType::*;
let res = match k {
0 => DbrString,
1 => DbrInt,
1 => DbrShort,
9 => DbrStsFloat,
14 => DbrTimeString,
15 => DbrTimeShort,
16 => DbrTimeFloat,
17 => DbrTimeEnum,
18 => DbrTimeChar,
19 => DbrTimeLong,
20 => DbrTimeDouble,
_ => {
let msg = format!("not a valid/supported dbr type: {}", k);
@@ -37,16 +50,60 @@ impl DbrType {
Ok(res)
}
#[allow(dead_code)]
fn byte_len(&self) -> usize {
fn meta_len(&self) -> usize {
use DbrType::*;
match self {
DbrString => 0,
DbrInt => 4,
DbrStsFloat => 1,
DbrTimeDouble => 16,
DbrShort => 0,
DbrStsFloat => 4,
DbrTimeString => 12,
DbrTimeShort => 12,
DbrTimeFloat => 12,
DbrTimeEnum => 12,
DbrTimeChar => 12,
DbrTimeLong => 12,
DbrTimeDouble => 12,
}
}
fn pad_meta(&self) -> usize {
use DbrType::*;
match self {
DbrString => 0,
DbrShort => 0,
DbrStsFloat => 0,
DbrTimeString => 0,
DbrTimeShort => 2,
DbrTimeFloat => 0,
DbrTimeEnum => 2,
DbrTimeChar => 3,
DbrTimeLong => 0,
DbrTimeDouble => 4,
}
}
fn val_len(&self) -> usize {
use DbrType::*;
match self {
DbrString => 40,
DbrShort => 2,
DbrStsFloat => 4,
DbrTimeString => 40,
DbrTimeShort => 2,
DbrTimeFloat => 4,
DbrTimeEnum => 2,
DbrTimeChar => 1,
DbrTimeLong => 4,
DbrTimeDouble => 8,
}
}
fn msg_len(&self, count: usize) -> usize {
let n = self.meta_len() + self.pad_meta() + count * self.val_len();
let r = n % 8;
let n = if r == 0 { n } else { n + 8 - r };
n
}
}
#[derive(Debug)]
@@ -204,6 +261,148 @@ pub async fn read_datafile_header2(rb: &mut RingBuf<File>, pos: DataheaderPos) -
Ok(ret)
}
trait MetaParse {
fn parse_meta(buf: &[u8]) -> (u64, usize);
}
struct NoneMetaParse;
impl MetaParse for NoneMetaParse {
#[inline(always)]
fn parse_meta(_buf: &[u8]) -> (u64, usize) {
(0, 0)
}
}
struct TimeMetaParse;
impl MetaParse for TimeMetaParse {
#[inline(always)]
fn parse_meta(buf: &[u8]) -> (u64, usize) {
let tsa = u32::from_be_bytes(buf[4..8].try_into().unwrap());
let tsb = u32::from_be_bytes(buf[8..12].try_into().unwrap());
let ts = tsa as u64 * SEC + tsb as u64 + EPICS_EPOCH_OFFSET;
(ts, 12)
}
}
#[inline(always)]
fn parse_msg<MP: MetaParse, F, VT>(
buf: &[u8],
_meta_parse: MP,
dbrt: DbrType,
dbrcount: usize,
valf: F,
) -> Result<(u64, VT, usize), Error>
where
F: Fn(&[u8], usize) -> VT,
{
let (ts, n) = MP::parse_meta(buf);
let buf = &buf[n + dbrt.pad_meta()..];
Ok((ts, valf(buf, dbrcount), n))
}
macro_rules! ex_s {
($sty:ident, $n:ident) => {
fn $n(buf: &[u8], _dbrcount: usize) -> $sty {
const R: usize = std::mem::size_of::<$sty>();
$sty::from_be_bytes(buf[0..R].try_into().unwrap())
}
};
}
macro_rules! ex_v {
($sty:ident, $n:ident) => {
fn $n(mut buf: &[u8], dbrcount: usize) -> Vec<$sty> {
const R: usize = std::mem::size_of::<$sty>();
let mut a = Vec::with_capacity(dbrcount);
for _ in 0..dbrcount {
let v = $sty::from_be_bytes(buf[0..R].try_into().unwrap());
a.push(v);
buf = &buf[R..];
}
a
}
};
}
ex_s!(i8, ex_s_i8);
ex_s!(i16, ex_s_i16);
ex_s!(i32, ex_s_i32);
ex_s!(f32, ex_s_f32);
ex_s!(f64, ex_s_f64);
ex_v!(i8, ex_v_i8);
ex_v!(i16, ex_v_i16);
ex_v!(i32, ex_v_i32);
ex_v!(f32, ex_v_f32);
ex_v!(f64, ex_v_f64);
macro_rules! read_msg {
($sty:ident, $exfs:ident, $exfv:ident, $evvar:ident, $rb:expr, $msglen:expr, $numsamples:expr, $dbrt:expr, $dbrcount:ident) => {
if $dbrcount == 1 {
let mut evs = EventValues::empty();
for _ in 0..$numsamples {
$rb.fill_min($msglen).await?;
let buf = $rb.data();
let (ts, val, _) = parse_msg(buf, TimeMetaParse, $dbrt.clone(), $dbrcount, $exfs)?;
evs.tss.push(ts);
evs.values.push(val);
$rb.adv($msglen);
}
let evs = ScalarPlainEvents::$evvar(evs);
let plain = PlainEvents::Scalar(evs);
let item = EventsItem::Plain(plain);
item
} else {
let mut evs = WaveEvents::empty();
for _ in 0..$numsamples {
$rb.fill_min($msglen).await?;
let buf = $rb.data();
let (ts, val, _) = parse_msg(buf, TimeMetaParse, $dbrt.clone(), $dbrcount, $exfv)?;
evs.tss.push(ts);
evs.vals.push(val);
$rb.adv($msglen);
}
let evs = WavePlainEvents::$evvar(evs);
let plain = PlainEvents::Wave(evs);
let item = EventsItem::Plain(plain);
item
}
};
}
async fn _format_debug_1(rb: &mut RingBuf<File>, dbrcount: usize) -> Result<(), Error> {
rb.fill_min(1024 * 10).await?;
for i1 in 0..19 {
let hex = format_hex_block(&rb.data()[512 * i1..], 512);
error!("dbrcount {} block\n{}", dbrcount, hex);
}
return Err(Error::with_msg_no_trace("EXIT"));
}
fn _format_debug_2(evs: WaveEvents<i32>) -> Result<(), Error> {
info!("tss: {:?}", evs.tss);
let n = evs.vals.len();
let vals: Vec<_> = evs
.vals
.iter()
.enumerate()
.filter(|&(i, _)| i < 3 || i + 3 >= n)
.map(|(_i, j)| {
if j.len() > 6 {
let mut a = j[0..3].to_vec();
a.extend_from_slice(&j[j.len() - 3..]);
a.to_vec()
} else {
j.to_vec()
}
})
.collect();
info!("vals: {:?}", vals);
Ok(())
}
pub async fn read_data2(
rb: &mut RingBuf<File>,
datafile_header: &DatafileHeader,
@@ -211,58 +410,55 @@ pub async fn read_data2(
_expand: bool,
) -> Result<EventsItem, Error> {
// TODO handle expand mode
//let dhpos = datafile_header.pos.0 + DATA_HEADER_LEN_ON_DISK as u64;
//seek(file, SeekFrom::Start(dhpos), stats).await?;
let res = match &datafile_header.dbr_type {
DbrType::DbrTimeDouble => {
if datafile_header.dbr_count == 1 {
trace!("~~~~~~~~~~~~~~~~~~~~~ read scalar DbrTimeDouble");
let mut evs = EventValues {
tss: vec![],
values: vec![],
};
let n1 = datafile_header.num_samples as usize;
//let n2 = datafile_header.dbr_type.byte_len();
let n2 = 2 + 2 + 4 + 4 + (4) + 8;
let n3 = n1 * n2;
rb.fill_min(n3).await?;
//let mut buf = vec![0; n3];
//read_exact(file, &mut buf, stats).await?;
let buf = rb.data();
let mut p1 = 0;
let mut ntot = 0;
while p1 < n3 - n2 {
let _status = u16::from_be_bytes(buf[p1..p1 + 2].try_into().unwrap());
p1 += 2;
let _severity = u16::from_be_bytes(buf[p1..p1 + 2].try_into().unwrap());
p1 += 2;
let ts1a = u32::from_be_bytes(buf[p1..p1 + 4].try_into().unwrap());
p1 += 4;
let ts1b = u32::from_be_bytes(buf[p1..p1 + 4].try_into().unwrap());
p1 += 4;
let ts1 = ts1a as u64 * SEC + ts1b as u64 + EPICS_EPOCH_OFFSET;
p1 += 4;
let value = f64::from_be_bytes(buf[p1..p1 + 8].try_into().unwrap());
p1 += 8;
ntot += 1;
if ts1 >= range.beg && ts1 < range.end {
evs.tss.push(ts1);
evs.values.push(value);
}
}
rb.adv(n3);
//info!("parsed block with {} / {} events", ntot, evs.tss.len());
let evs = ScalarPlainEvents::Double(evs);
{
let dpos = datafile_header.pos.0 + DATA_HEADER_LEN_ON_DISK as u64;
if rb.rp_abs() != dpos {
warn!("read_data2 rb not positioned {} vs {}", rb.rp_abs(), dpos);
rb.seek(dpos).await?;
}
}
let numsamples = datafile_header.num_samples as usize;
let dbrcount = datafile_header.dbr_count;
let dbrt = datafile_header.dbr_type.clone();
let dbrt = if let DbrType::DbrTimeEnum = dbrt {
DbrType::DbrTimeShort
} else {
dbrt
};
let msg_len = dbrt.msg_len(dbrcount);
{
if (datafile_header.buf_size as usize) < numsamples * msg_len {
return Err(Error::with_msg_no_trace(format!(
"buffer too small for data {} {} {}",
datafile_header.buf_size, numsamples, msg_len
)));
}
}
if dbrcount == 0 {
return Err(Error::with_msg_no_trace(format!("unexpected dbrcount {}", dbrcount)));
}
let res = match &dbrt {
DbrType::DbrTimeChar => read_msg!(i8, ex_s_i8, ex_v_i8, Byte, rb, msg_len, numsamples, dbrt, dbrcount),
DbrType::DbrTimeShort => read_msg!(i16, ex_s_i16, ex_v_i16, Short, rb, msg_len, numsamples, dbrt, dbrcount),
DbrType::DbrTimeLong => read_msg!(i32, ex_s_i32, ex_v_i32, Int, rb, msg_len, numsamples, dbrt, dbrcount),
DbrType::DbrTimeFloat => read_msg!(f32, ex_s_f32, ex_v_f32, Float, rb, msg_len, numsamples, dbrt, dbrcount),
DbrType::DbrTimeDouble => read_msg!(f64, ex_s_f64, ex_v_f64, Double, rb, msg_len, numsamples, dbrt, dbrcount),
DbrType::DbrTimeString => {
if dbrcount == 1 {
// TODO
let evs = ScalarPlainEvents::Byte(EventValues::empty());
let plain = PlainEvents::Scalar(evs);
let item = EventsItem::Plain(plain);
item
} else {
let msg = format!("dbr_count {:?} not yet supported", datafile_header.dbr_count);
error!("{}", msg);
return Err(Error::with_msg_no_trace(msg));
// TODO
let evs = WavePlainEvents::Double(WaveEvents::empty());
let plain = PlainEvents::Wave(evs);
let item = EventsItem::Plain(plain);
item
}
}
_ => {
DbrType::DbrTimeEnum | DbrType::DbrShort | DbrType::DbrString | DbrType::DbrStsFloat => {
let msg = format!("Type {:?} not yet supported", datafile_header.dbr_type);
error!("{}", msg);
return Err(Error::with_msg_no_trace(msg));

View File

@@ -200,7 +200,6 @@ impl ScanIndexFiles {
}
} else if rows.len() == 1 {
let rid = rows[0].try_get(0)?;
info!("select done: {}", rid);
rid
} else {
return Err(Error::with_msg("not unique"));
@@ -360,7 +359,6 @@ impl ScanChannels {
}
} else if rows.len() == 1 {
let rid = rows[0].try_get(0)?;
info!("select done: {}", rid);
rid
} else {
return Err(Error::with_msg("not unique"));

View File

@@ -150,6 +150,6 @@ impl<F> fmt::Debug for RingBuf<F> {
impl<F> Drop for RingBuf<F> {
fn drop(&mut self) {
info!("Drop {:?}", self);
trace!("Drop {:?}", self);
}
}