Imagebuffer reads, time binning

This commit is contained in:
Dominik Werder
2023-01-19 20:05:25 +01:00
parent 9c68476626
commit 8495853f8e
26 changed files with 341 additions and 127 deletions

View File

@@ -17,11 +17,12 @@ pub mod read3;
pub mod read4;
pub mod streamlog;
use bytes::{Bytes, BytesMut};
use bytes::Bytes;
use bytes::BytesMut;
use err::Error;
use futures_core::Stream;
use futures_util::future::FusedFuture;
use futures_util::{FutureExt, TryFutureExt};
use futures_util::{FutureExt, StreamExt, TryFutureExt};
use netpod::log::*;
use netpod::ReadSys;
use netpod::{ChannelConfig, DiskIoTune, Node, Shape};
@@ -32,12 +33,15 @@ use std::mem;
use std::os::unix::prelude::AsRawFd;
use std::path::PathBuf;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::task::Context;
use std::task::Poll;
use std::time::Instant;
use streams::dtflags::{ARRAY, BIG_ENDIAN, COMPRESSION, SHAPE};
use streams::filechunkread::FileChunkRead;
use tokio::fs::{File, OpenOptions};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeekExt, ReadBuf};
use tokio::fs::File;
use tokio::fs::OpenOptions;
use tokio::io::ReadBuf;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeekExt};
use tokio::sync::mpsc;
// TODO transform this into a self-test or remove.
@@ -230,6 +234,55 @@ impl Stream for FileContentStream {
}
}
fn start_read5(file: File, tx: async_channel::Sender<Result<FileChunkRead, Error>>) -> Result<(), Error> {
let fut = async move {
info!("start_read5 BEGIN");
let mut file = file;
loop {
let mut buf = BytesMut::new();
buf.resize(1024 * 256, 0);
match file.read(&mut buf).await {
Ok(n) => {
buf.truncate(n);
let item = FileChunkRead::with_buf(buf);
match tx.send(Ok(item)).await {
Ok(()) => {}
Err(_e) => break,
}
}
Err(e) => match tx.send(Err(e.into())).await {
Ok(()) => {}
Err(_e) => break,
},
}
}
info!("start_read5 DONE");
};
tokio::task::spawn(fut);
Ok(())
}
pub struct FileContentStream5 {
rx: async_channel::Receiver<Result<FileChunkRead, Error>>,
}
impl FileContentStream5 {
pub fn new(file: File, _disk_io_tune: DiskIoTune) -> Result<Self, Error> {
let (tx, rx) = async_channel::bounded(32);
start_read5(file, tx)?;
let ret = Self { rx };
Ok(ret)
}
}
impl Stream for FileContentStream5 {
type Item = Result<FileChunkRead, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
self.rx.poll_next_unpin(cx)
}
}
enum FCS2 {
Idle,
Reading(
@@ -622,6 +675,10 @@ pub fn file_content_stream(
let s = FileContentStream4::new(file, disk_io_tune);
Box::pin(s) as _
}
ReadSys::Read5 => {
let s = FileContentStream5::new(file, disk_io_tune).unwrap();
Box::pin(s) as _
}
}
}

View File

@@ -47,6 +47,7 @@ impl EventChunkerMultifile {
expand: bool,
do_decompress: bool,
) -> Self {
info!("EventChunkerMultifile do_decompress {do_decompress}");
let file_chan = if expand {
open_expanded_files(&range, &channel_config, node)
} else {
@@ -186,8 +187,11 @@ impl Stream for EventChunkerMultifile {
let item = LogItem::quick(Level::INFO, msg);
Ready(Some(Ok(StreamItem::Log(item))))
} else {
let msg = format!("handle OFS MERGED {:?}", ofs);
let msg = format!("handle OFS MERGED timebin {}", ofs.timebin);
info!("{}", msg);
for x in &ofs.files {
info!(" path {:?}", x.path);
}
let item = LogItem::quick(Level::INFO, msg);
let mut chunkers = vec![];
for of in ofs.files {

View File

@@ -353,6 +353,7 @@ pub async fn make_event_blobs_pipe(
evq: &PlainEventsQuery,
node_config: &NodeConfigCached,
) -> Result<Pin<Box<dyn Stream<Item = Box<dyn Framable + Send>> + Send>>, Error> {
info!("make_event_blobs_pipe {evq:?}");
if false {
match dbconn::channel_exists(evq.channel(), &node_config).await {
Ok(_) => (),