Factor usage of common error type more

This commit is contained in:
Dominik Werder
2021-12-08 13:20:07 +01:00
parent c39af81097
commit 3c64eafd14
56 changed files with 751 additions and 354 deletions

View File

@@ -15,7 +15,7 @@ use crate::timed::Timed;
use crate::wrap_task;
use async_channel::{Receiver, Sender};
use commonio::StatsChannel;
use err::Error;
use err::{ErrStr, Error};
use futures_util::StreamExt;
use items::{StreamItem, WithLen};
use netpod::log::*;
@@ -151,7 +151,7 @@ pub fn list_all_channels(node: &ChannelArchiver) -> Receiver<Result<ListChannelI
index_path: index_path.to_str().unwrap().into(),
matches: mm,
};
tx.send(Ok(item)).await?;
tx.send(Ok(item)).await.errstr()?;
//info!("{:?} parent {:?} channel {}", index_path, index_path.parent(), ch);
//break;
}
@@ -162,15 +162,29 @@ pub fn list_all_channels(node: &ChannelArchiver) -> Receiver<Result<ListChannelI
rx
}
struct ErrWrap(tokio_postgres::Error);
impl From<tokio_postgres::Error> for ErrWrap {
fn from(x: tokio_postgres::Error) -> Self {
Self(x)
}
}
impl From<ErrWrap> for Error {
fn from(_: ErrWrap) -> Self {
todo!()
}
}
pub async fn channel_config_from_db(
q: &ChannelConfigQuery,
conf: &ChannelArchiver,
) -> Result<ChannelConfigResponse, Error> {
let dbc = database_connect(&conf.database).await?;
let sql = "select config from channels where name = $1";
let rows = dbc.query(sql, &[&q.channel.name()]).await?;
let rows = dbc.query(sql, &[&q.channel.name()]).await.errstr()?;
if let Some(row) = rows.first() {
let cfg: JsVal = row.try_get(0)?;
let cfg: JsVal = row.try_get(0).errstr()?;
let val = cfg
.get("shape")
.ok_or_else(|| Error::with_msg_no_trace("shape not found on config"))?;

View File

@@ -193,7 +193,7 @@ mod test {
use netpod::Database;
#[test]
fn find_ref_1() -> Result<(), Error> {
fn find_ref_1() -> Result<(), err::Error> {
let fut = async move {
let channel = Channel {
backend: "sls-archive".into(),

View File

@@ -1,5 +1,5 @@
use crate::archeng::indexfiles::database_connect;
use err::Error;
use err::{ErrStr, Error};
use futures_core::{Future, Stream};
use futures_util::{FutureExt, StreamExt};
use netpod::log::*;
@@ -85,7 +85,7 @@ impl Stream for ChannelNameStream {
"select rowid, name from channels where config = '{}'::jsonb and name > $1 order by name limit 64",
&[&max_name],
)
.await?;
.await.errstr()?;
Ok::<_, Error>(rows)
};
self.select_fut = Some(Box::pin(fut));
@@ -183,7 +183,8 @@ impl Stream for ConfigStream {
let fut = async move {
let dbc = database_connect(&dbconf).await?;
dbc.query("update channels set config = $2 where name = $1", &[&name, &config])
.await?;
.await
.errstr()?;
Ok(())
};
self.update_fut = Some(Box::pin(fut));
@@ -197,7 +198,8 @@ impl Stream for ConfigStream {
let fut = async move {
let dbc = database_connect(&dbconf).await?;
dbc.query("update channels set config = $2 where name = $1", &[&name, &config])
.await?;
.await
.errstr()?;
Ok(())
};
self.update_fut = Some(Box::pin(fut));

View File

@@ -109,24 +109,36 @@ impl DbrType {
#[derive(Debug)]
pub struct DatafileHeader {
pos: DataheaderPos,
#[allow(unused)]
dir_offset: u32,
// Should be absolute file position of the next data header
// together with `fname_next`.
// But unfortunately not always set?
#[allow(unused)]
next_offset: u32,
#[allow(unused)]
prev_offset: u32,
#[allow(unused)]
curr_offset: u32,
pub num_samples: u32,
#[allow(unused)]
ctrl_info_offset: u32,
buf_size: u32,
#[allow(unused)]
buf_free: u32,
dbr_type: DbrType,
dbr_count: usize,
#[allow(unused)]
period: f64,
#[allow(unused)]
ts_beg: Nanos,
#[allow(unused)]
ts_end: Nanos,
#[allow(unused)]
ts_next_file: Nanos,
#[allow(unused)]
fname_next: String,
#[allow(unused)]
fname_prev: String,
}

View File

@@ -2,7 +2,7 @@ use crate::timed::Timed;
use crate::wrap_task;
use async_channel::Receiver;
use commonio::{open_read, read, StatsChannel};
use err::Error;
use err::{ErrStr, Error};
use futures_core::{Future, Stream};
use futures_util::stream::unfold;
use netpod::log::*;
@@ -35,19 +35,19 @@ pub fn list_index_files(node: &ChannelArchiver) -> Receiver<Result<PathBuf, Erro
let ft = e.file_type().await?;
if ft.is_file() {
if e.file_name().to_string_lossy() == "index" {
tx.send(Ok(e.path())).await?;
tx.send(Ok(e.path())).await.errstr()?;
}
}
}
} else if ft.is_file() {
if e.file_name().to_string_lossy() == "index" {
tx.send(Ok(e.path())).await?;
tx.send(Ok(e.path())).await.errstr()?;
}
}
}
} else if ft.is_file() {
if e.file_name().to_string_lossy() == "index" {
tx.send(Ok(e.path())).await?;
tx.send(Ok(e.path())).await.errstr()?;
}
}
}
@@ -105,7 +105,7 @@ pub async fn get_level_1(lev0: Vec<PathBuf>) -> Result<Vec<PathBuf>, Error> {
pub async fn database_connect(db_config: &Database) -> Result<PgClient, Error> {
let d = db_config;
let uri = format!("postgresql://{}:{}@{}:{}/{}", d.user, d.pass, d.host, 5432, d.name);
let (cl, conn) = tokio_postgres::connect(&uri, tokio_postgres::NoTls).await?;
let (cl, conn) = tokio_postgres::connect(&uri, tokio_postgres::NoTls).await.errstr()?;
// TODO monitor connection drop.
let _cjh = tokio::spawn(async move {
if let Err(e) = conn.await {
@@ -179,27 +179,29 @@ impl ScanIndexFiles {
let ps = p.to_string_lossy();
let rows = dbc
.query("select rowid from indexfiles where path = $1", &[&ps])
.await?;
.await
.errstr()?;
let rid: i64 = if rows.len() == 0 {
let rows = dbc
.query(
"insert into indexfiles (path) values ($1) on conflict do nothing returning rowid",
&[&ps],
)
.await?;
.await
.errstr()?;
if rows.len() == 0 {
error!("insert failed, maybe concurrent insert?");
// TODO try this channel again? or the other process handled it?
err::todoval()
} else if rows.len() == 1 {
let rid = rows[0].try_get(0)?;
let rid = rows[0].try_get(0).errstr()?;
info!("insert done: {}", rid);
rid
} else {
return Err(Error::with_msg("not unique"));
}
} else if rows.len() == 1 {
let rid = rows[0].try_get(0)?;
let rid = rows[0].try_get(0).errstr()?;
rid
} else {
return Err(Error::with_msg("not unique"));
@@ -313,7 +315,7 @@ impl ScanChannels {
let dbc = database_connect(&self.conf.database).await?;
let sql =
"select path from indexfiles where ts_last_channel_search < now() - interval '1 hour' limit 1";
let rows = dbc.query(sql, &[]).await?;
let rows = dbc.query(sql, &[]).await.errstr()?;
let mut paths = vec![];
for row in rows {
paths.push(row.get::<_, String>(0));
@@ -329,36 +331,38 @@ impl ScanChannels {
if let Some(path) = paths.pop() {
let rows = dbc
.query("select rowid from indexfiles where path = $1", &[&path])
.await?;
.await
.errstr()?;
if rows.len() == 1 {
let indexfile_rid: i64 = rows[0].try_get(0)?;
let indexfile_rid: i64 = rows[0].try_get(0).errstr()?;
let mut file = open_read(path.clone().into(), stats).await?;
let mut basics = super::indextree::IndexFileBasics::from_file(path, &mut file, stats).await?;
let entries = basics.all_channel_entries(&mut file, stats).await?;
for entry in entries {
let rows = dbc
.query("select rowid from channels where name = $1", &[&entry.channel_name()])
.await?;
.await
.errstr()?;
let rid: i64 = if rows.len() == 0 {
let rows = dbc
.query(
"insert into channels (name) values ($1) on conflict do nothing returning rowid",
&[&entry.channel_name()],
)
.await?;
.await.errstr()?;
if rows.len() == 0 {
error!("insert failed, maybe concurrent insert?");
// TODO try this channel again? or the other process handled it?
err::todoval()
} else if rows.len() == 1 {
let rid = rows[0].try_get(0)?;
let rid = rows[0].try_get(0).errstr()?;
info!("insert done: {}", rid);
rid
} else {
return Err(Error::with_msg("not unique"));
}
} else if rows.len() == 1 {
let rid = rows[0].try_get(0)?;
let rid = rows[0].try_get(0).errstr()?;
rid
} else {
return Err(Error::with_msg("not unique"));
@@ -367,13 +371,15 @@ impl ScanChannels {
"insert into channel_index_map (channel, index) values ($1, $2) on conflict do nothing",
&[&rid, &indexfile_rid],
)
.await?;
.await
.errstr()?;
}
dbc.query(
"update indexfiles set ts_last_channel_search = now() where rowid = $1",
&[&indexfile_rid],
)
.await?;
.await
.errstr()?;
}
}
self.steps = Done;
@@ -410,8 +416,14 @@ enum RetClass {
#[derive(Debug)]
enum IndexCat {
Machine { rc: RetClass },
Beamline { rc: RetClass, name: String },
Machine {
rc: RetClass,
},
#[allow(unused)]
Beamline {
rc: RetClass,
name: String,
},
}
#[derive(Debug)]
@@ -532,10 +544,10 @@ fn categorize_index_files(list: &Vec<String>) -> Result<Vec<IndexFile>, Error> {
pub async fn index_file_path_list(channel: Channel, dbconf: Database) -> Result<Vec<PathBuf>, Error> {
let dbc = database_connect(&dbconf).await?;
let sql = "select i.path from indexfiles i, channels c, channel_index_map m where c.name = $1 and m.channel = c.rowid and i.rowid = m.index";
let rows = dbc.query(sql, &[&channel.name()]).await?;
let rows = dbc.query(sql, &[&channel.name()]).await.errstr()?;
let mut index_paths = vec![];
for row in rows {
index_paths.push(row.try_get(0)?);
index_paths.push(row.try_get(0).errstr()?);
}
let list = categorize_index_files(&index_paths)?;
let ret = list.into_iter().map(|k| k.path).collect();

View File

@@ -1,17 +1,17 @@
use crate::archeng::backreadbuf::BackReadBuf;
use crate::archeng::{format_hex_block, name_hash, readu16, readu32, readu64, StatsChannel, EPICS_EPOCH_OFFSET};
use commonio::open_read;
use commonio::ringbuf::RingBuf;
use err::Error;
use netpod::{log::*, NanoRange};
use netpod::{timeunits::SEC, FilePos, Nanos};
use netpod::log::*;
use netpod::timeunits::SEC;
use netpod::{FilePos, NanoRange, Nanos};
use std::collections::VecDeque;
use std::fmt;
use std::path::{Path, PathBuf};
use std::time::{Duration, Instant};
use tokio::fs::File;
use super::backreadbuf::BackReadBuf;
pub trait HeaderVersion: Send + Sync + fmt::Debug {
fn version(&self) -> u8;
fn read_offset(&self, buf: &[u8], pos: usize) -> u64;
@@ -71,6 +71,7 @@ pub struct NamedHashChannelEntry {
next: u64,
id_rtree_pos: u64,
channel_name: String,
#[allow(dead_code)]
id_txt: String,
}
@@ -80,6 +81,7 @@ impl NamedHashChannelEntry {
}
}
#[allow(dead_code)]
#[derive(Debug)]
pub struct IndexFileBasics {
path: PathBuf,
@@ -778,7 +780,9 @@ pub async fn read_rtree_entrypoint(
#[derive(Debug)]
pub struct TreeSearchStats {
#[allow(dead_code)]
duration: Duration,
#[allow(dead_code)]
node_reads: usize,
}

49
archapp/src/err.rs Normal file
View File

@@ -0,0 +1,49 @@
use std::fmt;
pub struct Error(err::Error);
impl Error {
pub fn with_msg<S: Into<String>>(s: S) -> Self {
Self(err::Error::with_msg(s))
}
pub fn with_msg_no_trace<S: Into<String>>(s: S) -> Self {
Self(err::Error::with_msg_no_trace(s))
}
}
impl fmt::Debug for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(fmt)
}
}
impl From<Error> for err::Error {
fn from(x: Error) -> Self {
x.0
}
}
impl From<std::string::FromUtf8Error> for Error {
fn from(k: std::string::FromUtf8Error) -> Self {
Self::with_msg(k.to_string())
}
}
impl From<std::io::Error> for Error {
fn from(k: std::io::Error) -> Self {
Self::with_msg(k.to_string())
}
}
impl<T> From<async_channel::SendError<T>> for Error {
fn from(k: async_channel::SendError<T>) -> Self {
Self::with_msg(k.to_string())
}
}
impl From<serde_json::Error> for Error {
fn from(k: serde_json::Error) -> Self {
Self::with_msg(k.to_string())
}
}

View File

@@ -3,7 +3,7 @@ use crate::parse::multi::parse_all_ts;
use crate::parse::PbFileReader;
use crate::storagemerge::StorageMerge;
use chrono::{TimeZone, Utc};
use err::Error;
use err::{ErrStr, Error};
use futures_core::Stream;
use futures_util::StreamExt;
use items::binnedevents::{MultiBinWaveEvents, SingleBinWaveEvents, XBinnedEvents};
@@ -393,7 +393,7 @@ pub async fn make_single_event_pipe(
}
let ei2 = ei.x_aggregate(&evq.agg_kind);
let g = Ok(StreamItem::DataItem(RangeCompletableItem::Data(ei2)));
tx.send(g).await?;
tx.send(g).await.errstr()?;
if let Some(t) = tslast {
if t >= evq.range.end {
info!("after requested range, break");

View File

@@ -3,6 +3,7 @@ pub mod generated;
#[cfg(not(feature = "devread"))]
pub mod generated {}
pub mod archeng;
pub mod err;
pub mod events;
#[cfg(feature = "devread")]
pub mod parse;
@@ -14,14 +15,13 @@ pub mod storagemerge;
pub mod test;
pub mod timed;
use std::sync::atomic::{AtomicUsize, Ordering};
use ::err::Error;
use async_channel::Sender;
use err::Error;
use futures_core::Future;
use netpod::log::*;
#[cfg(not(feature = "devread"))]
pub use parsestub as parse;
use std::sync::atomic::{AtomicUsize, Ordering};
fn unescape_archapp_msg(inp: &[u8], mut ret: Vec<u8>) -> Result<Vec<u8>, Error> {
ret.clear();

View File

@@ -6,7 +6,7 @@ use crate::unescape_archapp_msg;
use archapp_xc::*;
use async_channel::{bounded, Receiver};
use chrono::{TimeZone, Utc};
use err::Error;
use err::{ErrStr, Error};
use items::eventsitem::EventsItem;
use items::eventvalues::EventValues;
use items::plainevents::{PlainEvents, ScalarPlainEvents, WavePlainEvents};
@@ -445,7 +445,7 @@ pub async fn scan_files_inner(
}
},
Err(e) => {
tx.send(Err(e.into())).await?;
tx.send(Err(e.into())).await.errstr()?;
}
}
}
@@ -465,12 +465,15 @@ pub async fn scan_files_inner(
if channel_path != normalized_channel_name {
{
let s = format!("{} - {}", channel_path, normalized_channel_name);
tx.send(Ok(Box::new(serde_json::to_value(&s)?) as ItemSerBox)).await?;
tx.send(Ok(Box::new(serde_json::to_value(&s)?) as ItemSerBox))
.await
.errstr()?;
}
tx.send(Ok(
Box::new(JsonValue::String(format!("MISMATCH --------------------"))) as ItemSerBox,
))
.await?;
.await
.errstr()?;
} else {
if false {
dbconn::insert_channel(channel_path.into(), ndi.facility, &dbc).await?;
@@ -484,7 +487,8 @@ pub async fn scan_files_inner(
pbr.channel_name(),
msg.variant_name()
))?) as ItemSerBox))
.await?;
.await
.errstr()?;
}
}
}

View File

@@ -1,6 +1,5 @@
use std::time::Instant;
use netpod::log::*;
use std::time::Instant;
pub struct Timed {
name: String,