Read2
This commit is contained in:
@@ -313,7 +313,7 @@ async fn get_timebins(channel_config: &ChannelConfig, node: Node) -> Result<Vec<
|
||||
Ok(timebins)
|
||||
}
|
||||
Err(e) => {
|
||||
info!(
|
||||
debug!(
|
||||
"get_timebins no timebins for {:?} {:?} p0 {:?}",
|
||||
channel_config, e, p0
|
||||
);
|
||||
|
||||
@@ -39,7 +39,7 @@ use std::task::{Context, Poll};
|
||||
use std::time::{Duration, Instant};
|
||||
use std::{fmt, mem};
|
||||
use tokio::fs::{File, OpenOptions};
|
||||
use tokio::io::{AsyncRead, AsyncSeekExt, ReadBuf};
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeekExt, ReadBuf};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
// TODO transform this into a self-test or remove.
|
||||
@@ -259,6 +259,89 @@ impl Stream for FileContentStream {
|
||||
}
|
||||
}
|
||||
|
||||
enum FCS2 {
|
||||
Idle,
|
||||
Reading(
|
||||
(
|
||||
Box<BytesMut>,
|
||||
Pin<Box<dyn Future<Output = Result<usize, Error>> + Send>>,
|
||||
),
|
||||
),
|
||||
}
|
||||
|
||||
pub struct FileContentStream2 {
|
||||
fcs: FCS2,
|
||||
file: Pin<Box<File>>,
|
||||
disk_io_tune: DiskIoTune,
|
||||
done: bool,
|
||||
complete: bool,
|
||||
}
|
||||
|
||||
impl FileContentStream2 {
|
||||
pub fn new(file: File, disk_io_tune: DiskIoTune) -> Self {
|
||||
let file = Box::pin(file);
|
||||
Self {
|
||||
fcs: FCS2::Idle,
|
||||
file,
|
||||
disk_io_tune,
|
||||
done: false,
|
||||
complete: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn make_reading(&mut self) {
|
||||
let mut buf = Box::new(BytesMut::with_capacity(self.disk_io_tune.read_buffer_len));
|
||||
let bufref = unsafe { &mut *((&mut buf as &mut BytesMut) as *mut BytesMut) };
|
||||
let fileref = unsafe { &mut *((&mut self.file) as *mut Pin<Box<File>>) };
|
||||
let fut = AsyncReadExt::read_buf(fileref, bufref).map_err(|e| e.into());
|
||||
self.fcs = FCS2::Reading((buf, Box::pin(fut)));
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for FileContentStream2 {
|
||||
type Item = Result<FileChunkRead, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
use Poll::*;
|
||||
loop {
|
||||
break if self.complete {
|
||||
panic!("poll_next on complete")
|
||||
} else if self.done {
|
||||
self.complete = true;
|
||||
Ready(None)
|
||||
} else {
|
||||
match self.fcs {
|
||||
FCS2::Idle => {
|
||||
self.make_reading();
|
||||
continue;
|
||||
}
|
||||
FCS2::Reading((ref mut buf, ref mut fut)) => match fut.poll_unpin(cx) {
|
||||
Ready(Ok(n)) => {
|
||||
let mut buf2 = BytesMut::new();
|
||||
std::mem::swap(buf as &mut BytesMut, &mut buf2);
|
||||
let item = FileChunkRead {
|
||||
buf: buf2,
|
||||
duration: Duration::from_millis(0),
|
||||
};
|
||||
if n == 0 {
|
||||
self.done = true;
|
||||
} else {
|
||||
self.make_reading();
|
||||
}
|
||||
Ready(Some(Ok(item)))
|
||||
}
|
||||
Ready(Err(e)) => {
|
||||
self.done = true;
|
||||
Ready(Some(Err(e.into())))
|
||||
}
|
||||
Pending => Pending,
|
||||
},
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum FCS3 {
|
||||
GetPosition,
|
||||
ReadingSimple,
|
||||
@@ -566,12 +649,16 @@ pub fn file_content_stream(
|
||||
file: File,
|
||||
disk_io_tune: DiskIoTune,
|
||||
) -> Pin<Box<dyn Stream<Item = Result<FileChunkRead, Error>> + Send>> {
|
||||
warn!("file_content_stream disk_io_tune {disk_io_tune:?}");
|
||||
debug!("file_content_stream disk_io_tune {disk_io_tune:?}");
|
||||
match &disk_io_tune.read_sys {
|
||||
ReadSys::TokioAsyncRead => {
|
||||
let s = FileContentStream::new(file, disk_io_tune);
|
||||
Box::pin(s) as Pin<Box<dyn Stream<Item = _> + Send>>
|
||||
}
|
||||
ReadSys::Read2 => {
|
||||
let s = FileContentStream2::new(file, disk_io_tune);
|
||||
Box::pin(s) as _
|
||||
}
|
||||
ReadSys::Read3 => {
|
||||
let s = FileContentStream3::new(file, disk_io_tune);
|
||||
Box::pin(s) as _
|
||||
|
||||
@@ -109,7 +109,7 @@ impl Stream for EventChunkerMultifile {
|
||||
self.max_ts = g;
|
||||
const EMIT_COUNT_MAX: usize = 10;
|
||||
if self.emit_count < EMIT_COUNT_MAX {
|
||||
info!(
|
||||
debug!(
|
||||
"EventChunkerMultifile emit {}/{} events {}",
|
||||
self.emit_count,
|
||||
EMIT_COUNT_MAX,
|
||||
|
||||
@@ -25,7 +25,7 @@ pub struct MergedBlobsFromRemotes {
|
||||
|
||||
impl MergedBlobsFromRemotes {
|
||||
pub fn new(evq: RawEventsQuery, perf_opts: PerfOpts, cluster: Cluster) -> Self {
|
||||
info!("MergedBlobsFromRemotes evq {:?}", evq);
|
||||
debug!("MergedBlobsFromRemotes evq {:?}", evq);
|
||||
let mut tcp_establish_futs = vec![];
|
||||
for node in &cluster.nodes {
|
||||
let f = x_processed_event_blobs_stream_from_node(evq.clone(), perf_opts.clone(), node.clone());
|
||||
|
||||
@@ -12,6 +12,7 @@ use err::Error;
|
||||
use futures_core::Stream;
|
||||
use items::frame::{make_frame, make_term_frame};
|
||||
use items::{EventsNodeProcessor, FrameType, RangeCompletableItem, Sitemty, StreamItem};
|
||||
use netpod::log::*;
|
||||
use netpod::query::RawEventsQuery;
|
||||
use netpod::{EventQueryJsonStringFrame, Node, PerfOpts};
|
||||
use std::pin::Pin;
|
||||
@@ -28,7 +29,7 @@ where
|
||||
<ENP as EventsNodeProcessor>::Output: Unpin + 'static,
|
||||
Result<StreamItem<RangeCompletableItem<<ENP as EventsNodeProcessor>::Output>>, err::Error>: FrameType,
|
||||
{
|
||||
netpod::log::debug!("x_processed_stream_from_node to: {}:{}", node.host, node.port_raw);
|
||||
debug!("x_processed_stream_from_node to: {}:{}", node.host, node.port_raw);
|
||||
let net = TcpStream::connect(format!("{}:{}", node.host, node.port_raw)).await?;
|
||||
let qjs = serde_json::to_string(&query)?;
|
||||
let (netin, mut netout) = net.into_split();
|
||||
@@ -48,10 +49,9 @@ pub async fn x_processed_event_blobs_stream_from_node(
|
||||
perf_opts: PerfOpts,
|
||||
node: Node,
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>, Error> {
|
||||
netpod::log::debug!(
|
||||
debug!(
|
||||
"x_processed_event_blobs_stream_from_node to: {}:{}",
|
||||
node.host,
|
||||
node.port_raw
|
||||
node.host, node.port_raw
|
||||
);
|
||||
let net = TcpStream::connect(format!("{}:{}", node.host, node.port_raw)).await?;
|
||||
let qjs = serde_json::to_string(&query)?;
|
||||
|
||||
@@ -130,23 +130,6 @@ impl Read4 {
|
||||
}
|
||||
return;
|
||||
}
|
||||
if false {
|
||||
let ec = unsafe { libc::madvise(fd, 0, libc::SEEK_CUR) };
|
||||
if ec == -1 {
|
||||
let errno = unsafe { *libc::__errno_location() };
|
||||
let msg = format!("seek error wid {wid} fd {fd} errno {errno}");
|
||||
error!("{}", msg);
|
||||
let e = Error::with_msg_no_trace(msg);
|
||||
match rt.results.blocking_send(Err(e)) {
|
||||
Ok(_) => {}
|
||||
Err(_) => {
|
||||
self.can_not_publish.fetch_add(1, Ordering::AcqRel);
|
||||
error!("Can not publish error");
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
let mut rpos = ec as u64;
|
||||
let mut apos = rpos / rt.buflen * rt.buflen;
|
||||
let mut prc = 0;
|
||||
|
||||
@@ -18,7 +18,8 @@ hyper-tls = { version="0.5.0" }
|
||||
bytes = "1.0.1"
|
||||
futures-core = "0.3.14"
|
||||
futures-util = "0.3.14"
|
||||
tracing = "0.1.25"
|
||||
tracing = "0.1"
|
||||
tracing-futures = "0.2"
|
||||
async-channel = "1.6"
|
||||
itertools = "0.10.1"
|
||||
chrono = "0.4.19"
|
||||
|
||||
@@ -11,7 +11,7 @@ use items::{RangeCompletableItem, Sitemty, StreamItem};
|
||||
use itertools::Itertools;
|
||||
use netpod::query::RawEventsQuery;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::{log::*, DiskIoTune, ACCEPT_ALL};
|
||||
use netpod::{log::*, DiskIoTune, ReadSys, ACCEPT_ALL};
|
||||
use netpod::{ByteSize, Channel, FileIoBufferSize, NanoRange, NodeConfigCached, PerfOpts, Shape, APP_OCTET};
|
||||
use netpod::{ChannelSearchQuery, ChannelSearchResult, ProxyConfig, APP_JSON};
|
||||
use parse::channelconfig::{
|
||||
@@ -23,6 +23,7 @@ use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::{Duration, Instant};
|
||||
use tracing_futures::Instrument;
|
||||
use url::Url;
|
||||
|
||||
pub trait BackendAware {
|
||||
@@ -485,6 +486,10 @@ pub struct Api1Query {
|
||||
events_max: u64,
|
||||
#[serde(default)]
|
||||
io_queue_len: u64,
|
||||
#[serde(default)]
|
||||
log_level: String,
|
||||
#[serde(default)]
|
||||
read_sys: String,
|
||||
}
|
||||
|
||||
impl Api1Query {
|
||||
@@ -496,6 +501,8 @@ impl Api1Query {
|
||||
if self.io_queue_len != 0 {
|
||||
k.read_queue_len = self.io_queue_len as usize;
|
||||
}
|
||||
let read_sys: ReadSys = self.read_sys.as_str().into();
|
||||
k.read_sys = read_sys;
|
||||
k
|
||||
}
|
||||
}
|
||||
@@ -576,7 +583,7 @@ impl DataApiPython3DataStream {
|
||||
for i1 in 0..b.tss.len() {
|
||||
const EVIMAX: usize = 6;
|
||||
if *count_events < EVIMAX {
|
||||
info!(
|
||||
debug!(
|
||||
"ev info {}/{} decomps len {:?} BE {:?} scalar-type {:?} shape {:?} comps {:?}",
|
||||
*count_events + 1,
|
||||
EVIMAX,
|
||||
@@ -611,7 +618,7 @@ impl DataApiPython3DataStream {
|
||||
compression,
|
||||
};
|
||||
let h = serde_json::to_string(&head)?;
|
||||
info!("sending channel header {}", h);
|
||||
debug!("sending channel header {}", h);
|
||||
let l1 = 1 + h.as_bytes().len() as u32;
|
||||
d.put_u32(l1);
|
||||
d.put_u8(0);
|
||||
@@ -722,7 +729,7 @@ impl Stream for DataApiPython3DataStream {
|
||||
MatchingConfigEntry::Entry(entry) => entry.clone(),
|
||||
};
|
||||
let channel = self.channels[self.chan_ix - 1].clone();
|
||||
info!("found channel_config for {}: {:?}", channel.name, entry);
|
||||
debug!("found channel_config for {}: {:?}", channel.name, entry);
|
||||
let evq = RawEventsQuery {
|
||||
channel,
|
||||
range: self.range.clone(),
|
||||
@@ -856,11 +863,6 @@ impl Api1EventsBinaryHandler {
|
||||
}
|
||||
|
||||
pub async fn handle(&self, req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
info!(
|
||||
"Api1EventsBinaryHandler::handle uri: {:?} headers: {:?}",
|
||||
req.uri(),
|
||||
req.headers()
|
||||
);
|
||||
if req.method() != Method::POST {
|
||||
return Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?);
|
||||
}
|
||||
@@ -872,18 +874,34 @@ impl Api1EventsBinaryHandler {
|
||||
.map_err(|e| Error::with_msg_no_trace(format!("{e:?}")))?
|
||||
.to_owned();
|
||||
let body_data = hyper::body::to_bytes(body).await?;
|
||||
info!(
|
||||
"Api1EventsBinaryHandler query json: {}",
|
||||
String::from_utf8_lossy(&body_data)
|
||||
);
|
||||
let qu: Api1Query = if let Ok(qu) = serde_json::from_slice(&body_data) {
|
||||
qu
|
||||
} else {
|
||||
error!("got body_data: {:?}", String::from_utf8(body_data[..].to_vec()));
|
||||
return Err(Error::with_msg_no_trace("can not parse query"));
|
||||
};
|
||||
let span = if qu.log_level == "trace" {
|
||||
tracing::span!(tracing::Level::TRACE, "log_span_t")
|
||||
} else if qu.log_level == "debug" {
|
||||
tracing::span!(tracing::Level::DEBUG, "log_span_d")
|
||||
} else {
|
||||
tracing::Span::none()
|
||||
};
|
||||
self.handle_for_query(qu, accept, span.clone(), node_config)
|
||||
.instrument(span)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn handle_for_query(
|
||||
&self,
|
||||
qu: Api1Query,
|
||||
accept: String,
|
||||
span: tracing::Span,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
// TODO this should go to usage statistics:
|
||||
info!(
|
||||
"Api1Query {:?} {} {:?}",
|
||||
"Handle Api1Query {:?} {} {:?}",
|
||||
qu.range,
|
||||
qu.channels.len(),
|
||||
qu.channels.first()
|
||||
@@ -892,7 +910,7 @@ impl Api1EventsBinaryHandler {
|
||||
let end_date = chrono::DateTime::parse_from_rfc3339(&qu.range.end_date);
|
||||
let beg_date = beg_date?;
|
||||
let end_date = end_date?;
|
||||
info!("Api1Query beg_date {:?} end_date {:?}", beg_date, end_date);
|
||||
trace!("Api1Query beg_date {:?} end_date {:?}", beg_date, end_date);
|
||||
//let url = Url::parse(&format!("dummy:{}", req.uri()))?;
|
||||
//let query = PlainEventsBinaryQuery::from_url(&url)?;
|
||||
if accept != APP_OCTET && accept != ACCEPT_ALL {
|
||||
@@ -925,8 +943,10 @@ impl Api1EventsBinaryHandler {
|
||||
status_id.clone(),
|
||||
node_config.clone(),
|
||||
);
|
||||
let s = s.instrument(span);
|
||||
let body = BodyStream::wrapped(s, format!("Api1EventsBinaryHandler"));
|
||||
let ret = response(StatusCode::OK).header("x-daqbuffer-request-id", status_id);
|
||||
let ret = ret.body(BodyStream::wrapped(s, format!("Api1EventsBinaryHandler")))?;
|
||||
let ret = ret.body(body)?;
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1335,6 +1335,7 @@ impl Default for FileIoBufferSize {
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum ReadSys {
|
||||
TokioAsyncRead,
|
||||
Read2,
|
||||
Read3,
|
||||
Read4,
|
||||
}
|
||||
@@ -1349,6 +1350,8 @@ impl From<&str> for ReadSys {
|
||||
fn from(k: &str) -> Self {
|
||||
if k == "TokioAsyncRead" {
|
||||
Self::TokioAsyncRead
|
||||
} else if k == "Read2" {
|
||||
Self::Read2
|
||||
} else if k == "Read3" {
|
||||
Self::Read3
|
||||
} else if k == "Read4" {
|
||||
|
||||
@@ -21,6 +21,11 @@ impl Buffer {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
self.rp = 0;
|
||||
self.wp = 0;
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.wp - self.rp
|
||||
}
|
||||
@@ -86,7 +91,7 @@ impl Buffer {
|
||||
self.rp = 0;
|
||||
self.wp = ll;
|
||||
} else if self.wp == self.buf.len() {
|
||||
eprintln!("ERROR no more space in buffer");
|
||||
//eprintln!("ERROR no more space in buffer");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -193,12 +198,21 @@ pub fn append_inner(dirname: &str, mut stdin: Stdin) -> Result<(), Error> {
|
||||
let mut buf = Buffer::new();
|
||||
loop {
|
||||
// Get some more data.
|
||||
let b = buf.writable();
|
||||
let mut b = buf.writable();
|
||||
if false {
|
||||
write!(&mut fout, "[APPEND-WRITABLE] {} writable bytes\n", b.len())?;
|
||||
}
|
||||
if b.len() < 1 {
|
||||
eprintln!("ERROR attempt to read with zero length buffer");
|
||||
if b.len() == 0 {
|
||||
write!(&mut fout, "[DISCARD] {} discarded bytes\n", b.len())?;
|
||||
buf.reset();
|
||||
b = buf.writable();
|
||||
}
|
||||
let b = b;
|
||||
if b.len() == 0 {
|
||||
let msg = format!("[ERROR DISCARD] still no space wp {} rp {}\n", buf.wp, buf.rp);
|
||||
write!(&mut fout, "{}", msg)?;
|
||||
let e = Error::with_msg_no_trace(msg);
|
||||
return Err(e);
|
||||
}
|
||||
let n1 = stdin.read(b)?;
|
||||
buf.inc_wp(n1);
|
||||
@@ -232,6 +246,12 @@ pub fn append_inner(dirname: &str, mut stdin: Stdin) -> Result<(), Error> {
|
||||
bytes_written += j.len() as u64 + 1;
|
||||
}
|
||||
buf.advance(n2);
|
||||
if buf.len() > 256 {
|
||||
write!(&mut fout, "[TRUNCATED LINE FOLLOWS]\n")?;
|
||||
fout.write_all(&buf.readable()[..256])?;
|
||||
fout.write_all(b"\n")?;
|
||||
buf.reset();
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("ERROR parse fail: {e}");
|
||||
|
||||
@@ -101,13 +101,8 @@ pub fn tracing_init() {
|
||||
"archapp::archeng::backreadbuf=info",
|
||||
"archapp::archeng::pipe=debug",
|
||||
"archapp::storagemerge=info",
|
||||
"streams::rangefilter=info",
|
||||
"items::eventvalues=info",
|
||||
"items::xbinnedscalarevents=info",
|
||||
"disk=debug",
|
||||
"nodenet::conn=info",
|
||||
"daqbuffer::test=info",
|
||||
"dq=info",
|
||||
"[log_span_d]=debug",
|
||||
"[log_span_t]=trace",
|
||||
]
|
||||
.join(","),
|
||||
))
|
||||
|
||||
Reference in New Issue
Block a user