Reduce log chatter

This commit is contained in:
Dominik Werder
2021-11-09 19:18:03 +01:00
parent fa86c7ab7d
commit 2f608a8a4e
32 changed files with 388 additions and 194 deletions

View File

@@ -2,7 +2,10 @@
name = "disk"
version = "0.0.1-a.1"
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
edition = "2018"
edition = "2021"
[lib]
path = "src/disk.rs"
[dependencies]
serde = { version = "1.0", features = ["derive"] }

View File

@@ -82,7 +82,7 @@ impl ChannelExecFunction for BinnedBinaryChannelExec {
let perf_opts = PerfOpts { inmem_bufcap: 512 };
let souter = match PreBinnedPatchRange::covering_range(self.query.range().clone(), self.query.bin_count()) {
Ok(Some(pre_range)) => {
info!("BinnedBinaryChannelExec found pre_range: {:?}", pre_range);
debug!("BinnedBinaryChannelExec found pre_range: {:?}", pre_range);
if range.grid_spec.bin_t_len() < pre_range.grid_spec.bin_t_len() {
let msg = format!(
"BinnedBinaryChannelExec incompatible ranges:\npre_range: {:?}\nrange: {:?}",
@@ -109,7 +109,7 @@ impl ChannelExecFunction for BinnedBinaryChannelExec {
Ok(Box::pin(s) as Pin<Box<dyn Stream<Item = Result<Bytes, Error>> + Send>>)
}
Ok(None) => {
info!(
debug!(
"BinnedBinaryChannelExec no covering range for prebinned, merge from remotes instead {:?}",
range
);
@@ -326,7 +326,7 @@ impl ChannelExecFunction for BinnedJsonChannelExec {
let perf_opts = PerfOpts { inmem_bufcap: 512 };
let souter = match PreBinnedPatchRange::covering_range(self.query.range().clone(), self.query.bin_count()) {
Ok(Some(pre_range)) => {
info!("BinnedJsonChannelExec found pre_range: {:?}", pre_range);
debug!("BinnedJsonChannelExec found pre_range: {:?}", pre_range);
if range.grid_spec.bin_t_len() < pre_range.grid_spec.bin_t_len() {
let msg = format!(
"BinnedJsonChannelExec incompatible ranges:\npre_range: {:?}\nrange: {:?}",
@@ -354,7 +354,7 @@ impl ChannelExecFunction for BinnedJsonChannelExec {
Ok(Box::pin(s) as Pin<Box<dyn Stream<Item = Result<Bytes, Error>> + Send>>)
}
Ok(None) => {
info!(
debug!(
"BinnedJsonChannelExec no covering range for prebinned, merge from remotes instead {:?}",
range
);

View File

@@ -342,7 +342,7 @@ impl FromUrl for BinnedQuery {
.parse()
.map_err(|e| Error::with_msg(format!("can not parse doLog {:?}", e)))?,
};
info!("BinnedQuery::from_url {:?}", ret);
debug!("BinnedQuery::from_url {:?}", ret);
Ok(ret)
}
}

View File

@@ -355,7 +355,7 @@ where
Ok(item) => match item {
StreamItem::Log(item) => {
if do_log {
info!("collect_plain_events_json log {:?}", item);
debug!("collect_plain_events_json log {:?}", item);
}
}
StreamItem::Stats(item) => match item {
@@ -396,7 +396,7 @@ where
}
}
let ret = serde_json::to_value(collector.result()?)?;
info!("Total duration: {:?}", total_duration);
debug!("Total duration: {:?}", total_duration);
Ok(ret)
}

View File

@@ -247,7 +247,7 @@ async fn open_files_inner(
}
}
let h = OpenedFileSet { timebin: tb, files: a };
info!(
debug!(
"----- open_files_inner giving OpenedFileSet with {} files",
h.files.len()
);
@@ -351,13 +351,13 @@ async fn open_expanded_files_inner(
for path in paths::datapaths_for_timebin(tb, &channel_config, &node).await? {
let w = position_file(&path, range, true, false).await?;
if w.found {
info!("----- open_expanded_files_inner w.found for {:?}", path);
debug!("----- open_expanded_files_inner w.found for {:?}", path);
a.push(w.file);
found_pre = true;
}
}
let h = OpenedFileSet { timebin: tb, files: a };
info!(
debug!(
"----- open_expanded_files_inner giving OpenedFileSet with {} files",
h.files.len()
);
@@ -387,7 +387,8 @@ async fn open_expanded_files_inner(
p1 += 1;
}
} else {
info!("Could not find some event before the requested range, fall back to standard file list.");
// TODO emit statsfor this or log somewhere?
debug!("Could not find some event before the requested range, fall back to standard file list.");
// Try to locate files according to non-expand-algorithm.
open_files_inner(chtx, range, &channel_config, node).await?;
}
@@ -423,7 +424,7 @@ fn expanded_file_list() {
while let Some(file) = files.next().await {
match file {
Ok(k) => {
info!("opened file: {:?}", k);
debug!("opened file: {:?}", k);
paths.push(k.files);
}
Err(e) => {

View File

@@ -300,10 +300,10 @@ impl NeedMinBuffer {
}
}
// TODO remove this again
// TODO collect somewhere else
impl Drop for NeedMinBuffer {
fn drop(&mut self) {
info!("NeedMinBuffer Drop Stats:\nbuf_len_histo: {:?}", self.buf_len_histo);
debug!("NeedMinBuffer Drop Stats:\nbuf_len_histo: {:?}", self.buf_len_histo);
}
}
@@ -355,7 +355,8 @@ impl Stream for NeedMinBuffer {
Ready(Some(Err(e.into())))
}
Ready(None) => {
info!("NeedMinBuffer histo: {:?}", self.buf_len_histo);
// TODO collect somewhere
debug!("NeedMinBuffer histo: {:?}", self.buf_len_histo);
Ready(None)
}
Pending => Pending,

View File

@@ -114,7 +114,7 @@ impl Stream for EventChunkerMultifile {
let file = ofs.files.pop().unwrap();
let path = file.path;
let msg = format!("handle OFS {:?}", ofs);
info!("{}", msg);
debug!("{}", msg);
let item = LogItem::quick(Level::INFO, msg);
match file.file {
Some(file) => {
@@ -141,12 +141,12 @@ impl Stream for EventChunkerMultifile {
Ready(Some(Ok(StreamItem::Log(item))))
} else if ofs.files.len() == 0 {
let msg = format!("handle OFS {:?} NO FILES", ofs);
info!("{}", msg);
debug!("{}", msg);
let item = LogItem::quick(Level::INFO, msg);
Ready(Some(Ok(StreamItem::Log(item))))
} else {
let msg = format!("handle OFS MERGED {:?}", ofs);
warn!("{}", msg);
debug!("{}", msg);
let item = LogItem::quick(Level::INFO, msg);
let mut chunkers = vec![];
for of in ofs.files {
@@ -255,7 +255,8 @@ mod test {
Ok(item) => match item {
StreamItem::DataItem(item) => match item {
RangeCompletableItem::Data(item) => {
info!("item: {:?}", item.tss.iter().map(|x| x / MS).collect::<Vec<_>>());
// TODO assert more
debug!("item: {:?}", item.tss.iter().map(|x| x / MS).collect::<Vec<_>>());
event_count += item.tss.len();
for ts in item.tss {
tss.push(ts);
@@ -280,7 +281,8 @@ mod test {
end: DAY + MS * 100,
};
let res = read_expanded_for_range(range, 0)?;
info!("got {:?}", res.1);
// TODO assert more
debug!("got {:?}", res.1);
if res.0 != 3 {
Err(Error::with_msg(format!("unexpected number of events: {}", res.0)))?;
}

View File

@@ -46,10 +46,10 @@ pub struct EventChunker {
unordered_warn_count: usize,
}
// TODO remove again, use it explicitly
impl Drop for EventChunker {
fn drop(&mut self) {
info!(
// TODO collect somewhere
debug!(
"EventChunker Drop Stats:\ndecomp_dt_histo: {:?}\nitem_len_emit_histo: {:?}",
self.decomp_dt_histo, self.item_len_emit_histo
);

View File

@@ -39,10 +39,10 @@ pub struct MergedStream<S, ITY> {
stats_items: VecDeque<StatsItem>,
}
// TODO get rid, log info explicitly.
impl<S, ITY> Drop for MergedStream<S, ITY> {
fn drop(&mut self) {
info!(
// TODO collect somewhere
debug!(
"MergedStream Drop Stats:\nbatch_len_emit_histo: {:?}",
self.batch_len_emit_histo
);
@@ -204,7 +204,7 @@ where
for ii in 0..batch.len() {
aa.push(batch.ts(ii));
}
info!("MergedBlobsStream A emits {} events tss {:?}", batch.len(), aa);
debug!("MergedBlobsStream A emits {} events tss {:?}", batch.len(), aa);
};
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(batch)))))
} else {
@@ -265,7 +265,7 @@ where
for ii in 0..batch.len() {
aa.push(batch.ts(ii));
}
info!("MergedBlobsStream B emits {} events tss {:?}", batch.len(), aa);
debug!("MergedBlobsStream B emits {} events tss {:?}", batch.len(), aa);
};
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(batch)))))
} else {
@@ -374,9 +374,10 @@ mod test {
let mut merged = MergedStream::new(inps);
let mut cevs = CollectedEvents { tss: vec![] };
let mut i1 = 0;
// TODO assert more
while let Some(item) = merged.next().await {
if let Ok(StreamItem::DataItem(RangeCompletableItem::Data(item))) = item {
info!("item: {:?}", item);
debug!("item: {:?}", item);
for ts in item.tss {
cevs.tss.push(ts);
}
@@ -386,8 +387,8 @@ mod test {
break;
}
}
info!("read {} data items", i1);
info!("cevs: {:?}", cevs);
debug!("read {} data items", i1);
debug!("cevs: {:?}", cevs);
Ok(cevs)
}

View File

@@ -32,7 +32,7 @@ where
Sitemty<<ENP as EventsNodeProcessor>::Output>: FrameType,
{
pub fn new(evq: RawEventsQuery, perf_opts: PerfOpts, cluster: Cluster) -> Self {
info!("MergedFromRemotes evq {:?}", evq);
debug!("MergedFromRemotes evq {:?}", evq);
let mut tcp_establish_futs = vec![];
for node in &cluster.nodes {
let f = x_processed_stream_from_node::<ENP>(evq.clone(), perf_opts.clone(), node.clone());

View File

@@ -28,7 +28,7 @@ where
<ENP as EventsNodeProcessor>::Output: Unpin + 'static,
Result<StreamItem<RangeCompletableItem<<ENP as EventsNodeProcessor>::Output>>, err::Error>: FrameType,
{
netpod::log::info!("x_processed_stream_from_node to: {}:{}", node.host, node.port_raw);
netpod::log::debug!("x_processed_stream_from_node to: {}:{}", node.host, node.port_raw);
let net = TcpStream::connect(format!("{}:{}", node.host, node.port_raw)).await?;
let qjs = serde_json::to_string(&query)?;
let (netin, mut netout) = net.into_split();
@@ -48,7 +48,7 @@ pub async fn x_processed_event_blobs_stream_from_node(
perf_opts: PerfOpts,
node: Node,
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>, Error> {
netpod::log::info!(
netpod::log::debug!(
"x_processed_event_blobs_stream_from_node to: {}:{}",
node.host,
node.port_raw