This commit is contained in:
Dominik Werder
2022-03-11 20:52:08 +01:00
parent 04def20be3
commit c67e8e4dbb
11 changed files with 163 additions and 54 deletions

View File

@@ -313,7 +313,7 @@ async fn get_timebins(channel_config: &ChannelConfig, node: Node) -> Result<Vec<
Ok(timebins)
}
Err(e) => {
info!(
debug!(
"get_timebins no timebins for {:?} {:?} p0 {:?}",
channel_config, e, p0
);

View File

@@ -39,7 +39,7 @@ use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use std::{fmt, mem};
use tokio::fs::{File, OpenOptions};
use tokio::io::{AsyncRead, AsyncSeekExt, ReadBuf};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeekExt, ReadBuf};
use tokio::sync::mpsc;
// TODO transform this into a self-test or remove.
@@ -259,6 +259,89 @@ impl Stream for FileContentStream {
}
}
enum FCS2 {
Idle,
Reading(
(
Box<BytesMut>,
Pin<Box<dyn Future<Output = Result<usize, Error>> + Send>>,
),
),
}
pub struct FileContentStream2 {
fcs: FCS2,
file: Pin<Box<File>>,
disk_io_tune: DiskIoTune,
done: bool,
complete: bool,
}
impl FileContentStream2 {
pub fn new(file: File, disk_io_tune: DiskIoTune) -> Self {
let file = Box::pin(file);
Self {
fcs: FCS2::Idle,
file,
disk_io_tune,
done: false,
complete: false,
}
}
fn make_reading(&mut self) {
let mut buf = Box::new(BytesMut::with_capacity(self.disk_io_tune.read_buffer_len));
let bufref = unsafe { &mut *((&mut buf as &mut BytesMut) as *mut BytesMut) };
let fileref = unsafe { &mut *((&mut self.file) as *mut Pin<Box<File>>) };
let fut = AsyncReadExt::read_buf(fileref, bufref).map_err(|e| e.into());
self.fcs = FCS2::Reading((buf, Box::pin(fut)));
}
}
impl Stream for FileContentStream2 {
type Item = Result<FileChunkRead, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
loop {
break if self.complete {
panic!("poll_next on complete")
} else if self.done {
self.complete = true;
Ready(None)
} else {
match self.fcs {
FCS2::Idle => {
self.make_reading();
continue;
}
FCS2::Reading((ref mut buf, ref mut fut)) => match fut.poll_unpin(cx) {
Ready(Ok(n)) => {
let mut buf2 = BytesMut::new();
std::mem::swap(buf as &mut BytesMut, &mut buf2);
let item = FileChunkRead {
buf: buf2,
duration: Duration::from_millis(0),
};
if n == 0 {
self.done = true;
} else {
self.make_reading();
}
Ready(Some(Ok(item)))
}
Ready(Err(e)) => {
self.done = true;
Ready(Some(Err(e.into())))
}
Pending => Pending,
},
}
};
}
}
}
enum FCS3 {
GetPosition,
ReadingSimple,
@@ -566,12 +649,16 @@ pub fn file_content_stream(
file: File,
disk_io_tune: DiskIoTune,
) -> Pin<Box<dyn Stream<Item = Result<FileChunkRead, Error>> + Send>> {
warn!("file_content_stream disk_io_tune {disk_io_tune:?}");
debug!("file_content_stream disk_io_tune {disk_io_tune:?}");
match &disk_io_tune.read_sys {
ReadSys::TokioAsyncRead => {
let s = FileContentStream::new(file, disk_io_tune);
Box::pin(s) as Pin<Box<dyn Stream<Item = _> + Send>>
}
ReadSys::Read2 => {
let s = FileContentStream2::new(file, disk_io_tune);
Box::pin(s) as _
}
ReadSys::Read3 => {
let s = FileContentStream3::new(file, disk_io_tune);
Box::pin(s) as _

View File

@@ -109,7 +109,7 @@ impl Stream for EventChunkerMultifile {
self.max_ts = g;
const EMIT_COUNT_MAX: usize = 10;
if self.emit_count < EMIT_COUNT_MAX {
info!(
debug!(
"EventChunkerMultifile emit {}/{} events {}",
self.emit_count,
EMIT_COUNT_MAX,

View File

@@ -25,7 +25,7 @@ pub struct MergedBlobsFromRemotes {
impl MergedBlobsFromRemotes {
pub fn new(evq: RawEventsQuery, perf_opts: PerfOpts, cluster: Cluster) -> Self {
info!("MergedBlobsFromRemotes evq {:?}", evq);
debug!("MergedBlobsFromRemotes evq {:?}", evq);
let mut tcp_establish_futs = vec![];
for node in &cluster.nodes {
let f = x_processed_event_blobs_stream_from_node(evq.clone(), perf_opts.clone(), node.clone());

View File

@@ -12,6 +12,7 @@ use err::Error;
use futures_core::Stream;
use items::frame::{make_frame, make_term_frame};
use items::{EventsNodeProcessor, FrameType, RangeCompletableItem, Sitemty, StreamItem};
use netpod::log::*;
use netpod::query::RawEventsQuery;
use netpod::{EventQueryJsonStringFrame, Node, PerfOpts};
use std::pin::Pin;
@@ -28,7 +29,7 @@ where
<ENP as EventsNodeProcessor>::Output: Unpin + 'static,
Result<StreamItem<RangeCompletableItem<<ENP as EventsNodeProcessor>::Output>>, err::Error>: FrameType,
{
netpod::log::debug!("x_processed_stream_from_node to: {}:{}", node.host, node.port_raw);
debug!("x_processed_stream_from_node to: {}:{}", node.host, node.port_raw);
let net = TcpStream::connect(format!("{}:{}", node.host, node.port_raw)).await?;
let qjs = serde_json::to_string(&query)?;
let (netin, mut netout) = net.into_split();
@@ -48,10 +49,9 @@ pub async fn x_processed_event_blobs_stream_from_node(
perf_opts: PerfOpts,
node: Node,
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>, Error> {
netpod::log::debug!(
debug!(
"x_processed_event_blobs_stream_from_node to: {}:{}",
node.host,
node.port_raw
node.host, node.port_raw
);
let net = TcpStream::connect(format!("{}:{}", node.host, node.port_raw)).await?;
let qjs = serde_json::to_string(&query)?;

View File

@@ -130,23 +130,6 @@ impl Read4 {
}
return;
}
if false {
let ec = unsafe { libc::madvise(fd, 0, libc::SEEK_CUR) };
if ec == -1 {
let errno = unsafe { *libc::__errno_location() };
let msg = format!("seek error wid {wid} fd {fd} errno {errno}");
error!("{}", msg);
let e = Error::with_msg_no_trace(msg);
match rt.results.blocking_send(Err(e)) {
Ok(_) => {}
Err(_) => {
self.can_not_publish.fetch_add(1, Ordering::AcqRel);
error!("Can not publish error");
}
}
return;
}
}
let mut rpos = ec as u64;
let mut apos = rpos / rt.buflen * rt.buflen;
let mut prc = 0;