Clean up left todo items

This commit is contained in:
Dominik Werder
2021-06-10 10:38:51 +02:00
parent da50d772a0
commit 9c1cf3bba3
14 changed files with 288 additions and 143 deletions

View File

@@ -93,21 +93,10 @@ async fn go() -> Result<(), Error> {
#[test]
fn simple_fetch() {
use netpod::Nanos;
use netpod::{
timeunits::*, ByteOrder, Channel, ChannelConfig, Cluster, Database, Node, NodeConfig, ScalarType, Shape,
};
use netpod::{timeunits::*, ByteOrder, Channel, ChannelConfig, ScalarType, Shape};
taskrun::run(async {
let _rh = daqbuffer::nodes::require_test_hosts_running()?;
let t1 = chrono::Utc::now();
let node = Node {
host: "localhost".into(),
listen: "0.0.0.0".into(),
port: 8360,
port_raw: 8360 + 100,
data_base_path: err::todoval(),
ksprefix: "daq_swissfel".into(),
split: 0,
backend: "testbackend".into(),
};
let query = netpod::AggQuerySingleChannel {
channel_config: ChannelConfig {
channel: Channel {
@@ -118,7 +107,7 @@ fn simple_fetch() {
time_bin_size: Nanos { ns: DAY },
array: true,
scalar_type: ScalarType::F64,
shape: Shape::Wave(err::todoval()),
shape: Shape::Wave(42),
byte_order: ByteOrder::big_endian(),
compression: true,
},
@@ -126,23 +115,7 @@ fn simple_fetch() {
tb_file_count: 1,
buffer_size: 1024 * 8,
};
let cluster = Cluster {
nodes: vec![node],
database: Database {
name: "daqbuffer".into(),
host: "localhost".into(),
user: "daqbuffer".into(),
pass: "daqbuffer".into(),
},
};
let node_config = NodeConfig {
name: format!("{}:{}", cluster.nodes[0].host, cluster.nodes[0].port),
cluster,
};
let node_config: Result<NodeConfigCached, Error> = node_config.into();
let node_config = node_config?;
let query_string = serde_json::to_string(&query).unwrap();
let host = tokio::spawn(httpret::host(node_config.clone()));
let req = hyper::Request::builder()
.method(http::Method::POST)
.uri("http://localhost:8360/api/4/parsed_raw")
@@ -176,8 +149,6 @@ fn simple_fetch() {
ntot / 1024 / 1024,
throughput
);
drop(host);
//Err::<(), _>(format!("test error").into())
Ok(())
})
.unwrap();

View File

@@ -6,6 +6,7 @@ use tracing::{debug, error, info, trace, warn};
pub mod cli;
pub mod client;
pub mod nodes;
#[cfg(test)]
pub mod test;

57
daqbuffer/src/nodes.rs Normal file
View File

@@ -0,0 +1,57 @@
use crate::spawn_test_hosts;
use err::Error;
use netpod::{Cluster, Database, Node};
use std::sync::{Arc, Mutex};
use tokio::task::JoinHandle;
pub struct RunningHosts {
pub cluster: Cluster,
_jhs: Vec<JoinHandle<Result<(), Error>>>,
}
lazy_static::lazy_static! {
static ref HOSTS_RUNNING: Mutex<Option<Arc<RunningHosts>>> = Mutex::new(None);
}
pub fn require_test_hosts_running() -> Result<Arc<RunningHosts>, Error> {
let mut g = HOSTS_RUNNING.lock().unwrap();
match g.as_ref() {
None => {
let cluster = test_cluster();
let jhs = spawn_test_hosts(cluster.clone());
let ret = RunningHosts {
cluster: cluster.clone(),
_jhs: jhs,
};
let a = Arc::new(ret);
*g = Some(a.clone());
Ok(a)
}
Some(gg) => Ok(gg.clone()),
}
}
fn test_cluster() -> Cluster {
let nodes = (0..3)
.into_iter()
.map(|id| Node {
host: "localhost".into(),
listen: "0.0.0.0".into(),
port: 8360 + id as u16,
port_raw: 8360 + id as u16 + 100,
data_base_path: format!("../tmpdata/node{:02}", id).into(),
ksprefix: "ks".into(),
split: id,
backend: "testbackend".into(),
})
.collect();
Cluster {
nodes: nodes,
database: Database {
name: "daqbuffer".into(),
host: "localhost".into(),
user: "daqbuffer".into(),
pass: "daqbuffer".into(),
},
}
}

View File

@@ -1,4 +1,4 @@
use crate::spawn_test_hosts;
use crate::nodes::require_test_hosts_running;
use bytes::BytesMut;
use chrono::{DateTime, Utc};
use disk::agg::streams::{StatsItem, StreamItem};
@@ -14,68 +14,14 @@ use futures_util::TryStreamExt;
use http::StatusCode;
use hyper::Body;
use netpod::log::*;
use netpod::{AggKind, Channel, Cluster, Database, HostPort, NanoRange, Node, PerfOpts};
use netpod::{AggKind, Channel, Cluster, HostPort, NanoRange, PerfOpts};
use serde::de::DeserializeOwned;
use std::fmt::Debug;
use std::future::ready;
use std::sync::{Arc, Mutex};
use tokio::io::AsyncRead;
use tokio::task::JoinHandle;
pub mod json;
struct RunningHosts {
cluster: Cluster,
_jhs: Vec<JoinHandle<Result<(), Error>>>,
}
lazy_static::lazy_static! {
static ref HOSTS_RUNNING: Mutex<Option<Arc<RunningHosts>>> = Mutex::new(None);
}
fn require_test_hosts_running() -> Result<Arc<RunningHosts>, Error> {
let mut g = HOSTS_RUNNING.lock().unwrap();
match g.as_ref() {
None => {
let cluster = test_cluster();
let jhs = spawn_test_hosts(cluster.clone());
let ret = RunningHosts {
cluster: cluster.clone(),
_jhs: jhs,
};
let a = Arc::new(ret);
*g = Some(a.clone());
Ok(a)
}
Some(gg) => Ok(gg.clone()),
}
}
fn test_cluster() -> Cluster {
let nodes = (0..3)
.into_iter()
.map(|id| Node {
host: "localhost".into(),
listen: "0.0.0.0".into(),
port: 8360 + id as u16,
port_raw: 8360 + id as u16 + 100,
data_base_path: format!("../tmpdata/node{:02}", id).into(),
ksprefix: "ks".into(),
split: id,
backend: "testbackend".into(),
})
.collect();
Cluster {
nodes: nodes,
database: Database {
name: "daqbuffer".into(),
host: "localhost".into(),
user: "daqbuffer".into(),
pass: "daqbuffer".into(),
},
}
}
#[test]
fn get_binned_binary() {
taskrun::run(get_binned_binary_inner()).unwrap();

View File

@@ -1,4 +1,4 @@
use crate::test::require_test_hosts_running;
use crate::nodes::require_test_hosts_running;
use chrono::{DateTime, Utc};
use disk::binned::query::BinnedQuery;
use err::Error;
@@ -6,6 +6,7 @@ use http::StatusCode;
use hyper::Body;
use netpod::log::*;
use netpod::{AggKind, Channel, Cluster, HostPort, NanoRange};
use std::time::Duration;
#[test]
fn get_binned_json_0() {
@@ -15,22 +16,46 @@ fn get_binned_json_0() {
async fn get_binned_json_0_inner() -> Result<(), Error> {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
get_binned_json_0_inner2(
"wave-f64-be-n21",
get_binned_json_common(
"scalar-i32-be",
"1970-01-01T00:20:10.000Z",
"1970-01-01T01:20:30.000Z",
10,
cluster,
13,
true,
)
.await
}
async fn get_binned_json_0_inner2(
#[test]
fn get_binned_json_1() {
taskrun::run(get_binned_json_1_inner()).unwrap();
}
async fn get_binned_json_1_inner() -> Result<(), Error> {
let rh = require_test_hosts_running()?;
let cluster = &rh.cluster;
get_binned_json_common(
"wave-f64-be-n21",
"1970-01-01T00:20:10.000Z",
"1970-01-01T01:20:45.000Z",
10,
cluster,
13,
true,
)
.await
}
async fn get_binned_json_common(
channel_name: &str,
beg_date: &str,
end_date: &str,
bin_count: u32,
cluster: &Cluster,
expect_bin_count: u32,
expect_finalised_range: bool,
) -> Result<(), Error> {
let t1 = Utc::now();
let agg_kind = AggKind::DimXBins1;
@@ -43,7 +68,8 @@ async fn get_binned_json_0_inner2(
name: channel_name.into(),
};
let range = NanoRange::from_date_time(beg_date, end_date);
let query = BinnedQuery::new(channel, range, bin_count, agg_kind);
let mut query = BinnedQuery::new(channel, range, bin_count, agg_kind);
query.set_timeout(Duration::from_millis(15000));
let url = query.url(&HostPort::from_node(node0));
info!("get_binned_json_0 get {}", url);
let req = hyper::Request::builder()
@@ -57,14 +83,38 @@ async fn get_binned_json_0_inner2(
error!("client response {:?}", res);
}
let res = hyper::body::to_bytes(res.into_body()).await?;
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
info!("get_binned_json_0 DONE time {} ms", ms);
let res = String::from_utf8(res.to_vec())?;
let res: serde_json::Value = serde_json::from_str(res.as_str())?;
info!(
"result from endpoint: --------------\n{}\n--------------",
serde_json::to_string_pretty(&res)?
);
let t2 = chrono::Utc::now();
let ms = t2.signed_duration_since(t1).num_milliseconds() as u64;
info!("get_binned_json_0 DONE time {} ms", ms);
if expect_finalised_range {
if !res
.get("finalisedRange")
.ok_or(Error::with_msg("missing finalisedRange"))?
.as_bool()
.ok_or(Error::with_msg("key finalisedRange not bool"))?
{
return Err(Error::with_msg("expected finalisedRange"));
}
} else if res.get("finalisedRange").is_some() {
return Err(Error::with_msg("expect absent finalisedRange"));
}
if res.get("counts").unwrap().as_array().unwrap().len() != expect_bin_count as usize {
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
}
if res.get("mins").unwrap().as_array().unwrap().len() != expect_bin_count as usize {
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
}
if res.get("maxs").unwrap().as_array().unwrap().len() != expect_bin_count as usize {
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
}
if res.get("avgs").unwrap().as_array().unwrap().len() != expect_bin_count as usize {
return Err(Error::with_msg(format!("expect_bin_count {}", expect_bin_count)));
}
Ok(())
}

View File

@@ -1,5 +1,6 @@
use crate::agg::binnedt::{TimeBinnableType, TimeBinnableTypeAggregator};
use crate::agg::streams::Appendable;
use crate::agg::{Fits, FitsInside};
use crate::binned::{
EventsNodeProcessor, FilterFittingInside, MinMaxAvgBins, NumOps, PushableIndex, RangeOverlapInfo, ReadPbv,
ReadableFromFile, WithLen, WithTimestamps,
@@ -83,9 +84,36 @@ impl<NTY> RangeOverlapInfo for XBinnedScalarEvents<NTY> {
}
}
impl<NTY> FitsInside for XBinnedScalarEvents<NTY> {
fn fits_inside(&self, range: NanoRange) -> Fits {
if self.tss.is_empty() {
Fits::Empty
} else {
let t1 = *self.tss.first().unwrap();
let t2 = *self.tss.last().unwrap();
if t2 < range.beg {
Fits::Lower
} else if t1 > range.end {
Fits::Greater
} else if t1 < range.beg && t2 > range.end {
Fits::PartlyLowerAndGreater
} else if t1 < range.beg {
Fits::PartlyLower
} else if t2 > range.end {
Fits::PartlyGreater
} else {
Fits::Inside
}
}
}
}
impl<NTY> FilterFittingInside for XBinnedScalarEvents<NTY> {
fn filter_fitting_inside(self, _fit_range: NanoRange) -> Option<Self> {
todo!()
fn filter_fitting_inside(self, fit_range: NanoRange) -> Option<Self> {
match self.fits_inside(fit_range) {
Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self),
_ => None,
}
}
}

View File

@@ -16,7 +16,7 @@ pub enum StreamItem<T> {
Stats(StatsItem),
}
pub trait Collector {
pub trait Collector: WithLen {
type Input: Collectable;
type Output: Serialize;
fn ingest(&mut self, src: &Self::Input);

View File

@@ -220,16 +220,28 @@ fn make_num_pipeline_entry<PPP>(
) -> Result<BinnedResponseDyn, Error>
where
PPP: PipelinePostProcessA,
PPP: PipelinePostProcessB<MinMaxAvgBins<u8>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<u16>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<u32>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<u64>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<i8>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<i16>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<i32>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<i64>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<f32>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<f64>>,
{
match scalar_type {
ScalarType::U8 => match_end!(u8, byte_order, shape, query, ppp, node_config),
ScalarType::U16 => match_end!(u16, byte_order, shape, query, ppp, node_config),
ScalarType::U32 => match_end!(u32, byte_order, shape, query, ppp, node_config),
ScalarType::U64 => match_end!(u64, byte_order, shape, query, ppp, node_config),
ScalarType::I8 => match_end!(i8, byte_order, shape, query, ppp, node_config),
ScalarType::I16 => match_end!(i16, byte_order, shape, query, ppp, node_config),
ScalarType::I32 => match_end!(i32, byte_order, shape, query, ppp, node_config),
ScalarType::I64 => match_end!(i64, byte_order, shape, query, ppp, node_config),
ScalarType::F32 => match_end!(f32, byte_order, shape, query, ppp, node_config),
ScalarType::F64 => match_end!(f64, byte_order, shape, query, ppp, node_config),
// TODO complete set
_ => todo!(),
}
}
@@ -240,8 +252,15 @@ async fn make_num_pipeline<PPP>(
) -> Result<BinnedResponseDyn, Error>
where
PPP: PipelinePostProcessA,
PPP: PipelinePostProcessB<MinMaxAvgBins<u8>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<u16>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<u32>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<u64>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<i8>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<i16>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<i32>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<i64>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<f32>>,
PPP: PipelinePostProcessB<MinMaxAvgBins<f64>>,
{
if query.channel().backend != node_config.node.backend {
@@ -306,11 +325,17 @@ where
}
}
struct CollectForJson {}
struct CollectForJson {
timeout: Duration,
abort_after_bin_count: u32,
}
impl CollectForJson {
pub fn new() -> Self {
Self {}
pub fn new(timeout: Duration, abort_after_bin_count: u32) -> Self {
Self {
timeout,
abort_after_bin_count,
}
}
}
@@ -322,11 +347,16 @@ pub struct JsonCollector {
}
impl JsonCollector {
pub fn new<NTY>(inp: Pin<Box<dyn Stream<Item = Sitemty<MinMaxAvgBins<NTY>>> + Send>>, bin_count_exp: u32) -> Self
pub fn new<NTY>(
inp: Pin<Box<dyn Stream<Item = Sitemty<MinMaxAvgBins<NTY>>> + Send>>,
bin_count_exp: u32,
timeout: Duration,
abort_after_bin_count: u32,
) -> Self
where
NTY: NumOps + Serialize + 'static,
{
let fut = collect_all(inp, bin_count_exp);
let fut = collect_all(inp, bin_count_exp, timeout, abort_after_bin_count);
let fut = Box::pin(fut);
Self { fut, done: false }
}
@@ -397,7 +427,7 @@ where
inp: Pin<Box<dyn Stream<Item = Sitemty<MinMaxAvgBins<NTY>>> + Send>>,
bin_count_exp: u32,
) -> Pin<Box<dyn Stream<Item = Box<dyn BinnedResponseItem>> + Send>> {
let s = JsonCollector::new(inp, bin_count_exp);
let s = JsonCollector::new(inp, bin_count_exp, self.timeout, self.abort_after_bin_count);
Box::pin(s)
}
}
@@ -488,12 +518,17 @@ impl Serialize for IsoDateTime {
}
}
pub async fn collect_all<T, S>(stream: S, bin_count_exp: u32) -> Result<serde_json::Value, Error>
pub async fn collect_all<T, S>(
stream: S,
bin_count_exp: u32,
timeout: Duration,
abort_after_bin_count: u32,
) -> Result<serde_json::Value, Error>
where
S: Stream<Item = Sitemty<T>> + Unpin,
T: Collectable,
{
let deadline = tokio::time::Instant::now() + Duration::from_millis(1000);
let deadline = tokio::time::Instant::now() + timeout;
let mut collector = <T as Collectable>::new_collector(bin_count_exp);
let mut i1 = 0;
let mut stream = stream;
@@ -501,11 +536,15 @@ where
let item = if i1 == 0 {
stream.next().await
} else {
match tokio::time::timeout_at(deadline, stream.next()).await {
Ok(k) => k,
Err(_) => {
collector.set_timed_out();
None
if abort_after_bin_count > 0 && collector.len() >= abort_after_bin_count as usize {
None
} else {
match tokio::time::timeout_at(deadline, stream.next()).await {
Ok(k) => k,
Err(_) => {
collector.set_timed_out();
None
}
}
}
};
@@ -542,7 +581,12 @@ pub async fn binned_json(
node_config: &NodeConfigCached,
query: &BinnedQuery,
) -> Result<Pin<Box<dyn Stream<Item = Result<Bytes, Error>> + Send>>, Error> {
let pl = make_num_pipeline(query, CollectForJson::new(), node_config).await?;
let pl = make_num_pipeline(
query,
CollectForJson::new(query.timeout(), query.abort_after_bin_count()),
node_config,
)
.await?;
let ret = pl.stream.map(|item| {
let fr = item.to_json_result()?;
let buf = fr.to_json_bytes()?;
@@ -933,6 +977,15 @@ impl<NTY> MinMaxAvgBinsCollector<NTY> {
}
}
impl<NTY> WithLen for MinMaxAvgBinsCollector<NTY>
where
NTY: NumOps + Serialize,
{
fn len(&self) -> usize {
self.vals.ts1s.len()
}
}
impl<NTY> Collector for MinMaxAvgBinsCollector<NTY>
where
NTY: NumOps + Serialize,
@@ -974,7 +1027,7 @@ where
};
let ret = MinMaxAvgBinsCollectedResult::<NTY> {
ts_bin_edges: tsa,
counts: vec![],
counts: self.vals.counts,
mins: self.vals.mins,
maxs: self.vals.maxs,
avgs: self.vals.avgs,

View File

@@ -3,6 +3,7 @@ use err::Error;
use netpod::log::*;
use netpod::{AggKind, ByteSize, Channel, HostPort, NanoRange, PreBinnedPatchCoord, ToNanos};
use std::collections::BTreeMap;
use std::time::Duration;
#[derive(Clone, Debug)]
pub struct PreBinnedQuery {
@@ -171,6 +172,8 @@ pub struct BinnedQuery {
cache_usage: CacheUsage,
disk_stats_every: ByteSize,
report_error: bool,
timeout: Duration,
abort_after_bin_count: u32,
}
impl BinnedQuery {
@@ -183,6 +186,8 @@ impl BinnedQuery {
cache_usage: CacheUsage::Use,
disk_stats_every: ByteSize(1024 * 1024 * 4),
report_error: false,
timeout: Duration::from_millis(2000),
abort_after_bin_count: 0,
}
}
@@ -217,6 +222,17 @@ impl BinnedQuery {
.map_or("false", |k| k)
.parse()
.map_err(|e| Error::with_msg(format!("can not parse reportError {:?}", e)))?,
timeout: params
.get("timeout")
.map_or("2000", |k| k)
.parse::<u64>()
.map(|k| Duration::from_millis(k))
.map_err(|e| Error::with_msg(format!("can not parse timeout {:?}", e)))?,
abort_after_bin_count: params
.get("abortAfterBinCount")
.map_or("0", |k| k)
.parse()
.map_err(|e| Error::with_msg(format!("can not parse abortAfterBinCount {:?}", e)))?,
};
info!("BinnedQuery::from_request {:?}", ret);
Ok(ret)
@@ -250,6 +266,14 @@ impl BinnedQuery {
self.report_error
}
pub fn timeout(&self) -> Duration {
self.timeout
}
pub fn abort_after_bin_count(&self) -> u32 {
self.abort_after_bin_count
}
pub fn set_cache_usage(&mut self, k: CacheUsage) {
self.cache_usage = k;
}
@@ -258,12 +282,16 @@ impl BinnedQuery {
self.disk_stats_every = k;
}
pub fn set_timeout(&mut self, k: Duration) {
self.timeout = k;
}
// TODO the BinnedQuery itself should maybe already carry the full HostPort?
// On the other hand, want to keep the flexibility for the fail over possibility..
pub fn url(&self, host: &HostPort) -> String {
let date_fmt = "%Y-%m-%dT%H:%M:%S.%3fZ";
format!(
"http://{}:{}/api/4/binned?cacheUsage={}&channelBackend={}&channelName={}&binCount={}&begDate={}&endDate={}&diskStatsEveryKb={}",
"http://{}:{}/api/4/binned?cacheUsage={}&channelBackend={}&channelName={}&binCount={}&begDate={}&endDate={}&diskStatsEveryKb={}&timeout={}&abortAfterBinCount={}",
host.host,
host.port,
self.cache_usage,
@@ -273,6 +301,8 @@ impl BinnedQuery {
Utc.timestamp_nanos(self.range.beg as i64).format(date_fmt),
Utc.timestamp_nanos(self.range.end as i64).format(date_fmt),
self.disk_stats_every.bytes() / 1024,
self.timeout.as_millis(),
self.abort_after_bin_count,
)
}
}

View File

@@ -76,15 +76,6 @@ impl AsyncRead for HttpBodyAsAsyncRead {
}
}
pub struct BytesWrap {}
impl From<BytesWrap> for Bytes {
fn from(_k: BytesWrap) -> Self {
error!("TODO convert result to octets");
todo!("TODO convert result to octets")
}
}
pub fn node_ix_for_patch(patch_coord: &PreBinnedPatchCoord, channel: &Channel, cluster: &Cluster) -> u32 {
let mut hash = tiny_keccak::Sha3::v256();
hash.update(channel.backend.as_bytes());

View File

@@ -1,6 +1,7 @@
use crate::agg::binnedt::TimeBinnableType;
use crate::agg::enp::{Identity, WaveXBinner};
use crate::agg::streams::{Appendable, StreamItem};
use crate::agg::{Fits, FitsInside};
use crate::binned::{
EventValuesAggregator, EventsNodeProcessor, FilterFittingInside, MinMaxAvgBins, NumOps, PushableIndex,
RangeCompletableItem, RangeOverlapInfo, ReadPbv, ReadableFromFile, WithLen, WithTimestamps,
@@ -213,9 +214,36 @@ impl<VT> RangeOverlapInfo for EventValues<VT> {
}
}
impl<VT> FitsInside for EventValues<VT> {
fn fits_inside(&self, range: NanoRange) -> Fits {
if self.tss.is_empty() {
Fits::Empty
} else {
let t1 = *self.tss.first().unwrap();
let t2 = *self.tss.last().unwrap();
if t2 < range.beg {
Fits::Lower
} else if t1 > range.end {
Fits::Greater
} else if t1 < range.beg && t2 > range.end {
Fits::PartlyLowerAndGreater
} else if t1 < range.beg {
Fits::PartlyLower
} else if t2 > range.end {
Fits::PartlyGreater
} else {
Fits::Inside
}
}
}
}
impl<VT> FilterFittingInside for EventValues<VT> {
fn filter_fitting_inside(self, _fit_range: NanoRange) -> Option<Self> {
todo!()
fn filter_fitting_inside(self, fit_range: NanoRange) -> Option<Self> {
match self.fits_inside(fit_range) {
Fits::Inside | Fits::PartlyGreater | Fits::PartlyLower | Fits::PartlyLowerAndGreater => Some(self),
_ => None,
}
}
}

View File

@@ -91,8 +91,8 @@ impl Stream for FileReader {
}
}
#[allow(dead_code)]
struct Fopen1 {
#[allow(dead_code)]
opts: OpenOptions,
fut: Pin<Box<dyn Future<Output = Result<File, std::io::Error>>>>,
term: bool,
@@ -104,8 +104,6 @@ impl Fopen1 {
let mut o1 = OpenOptions::new();
let o2 = o1.read(true);
let res = o2.open(path);
//() == res;
//todo!()
res.await
}) as Pin<Box<dyn Future<Output = Result<File, std::io::Error>>>>;
let _fut2: Box<dyn Future<Output = u32>> = Box::new(async { 123 });

View File

@@ -4,13 +4,13 @@ Error handling and reporting.
use http::uri::InvalidUri;
use nom::error::ErrorKind;
use serde::{Deserialize, Serialize, Serializer};
use serde::{Deserialize, Serialize};
use std::fmt::Debug;
use std::net::AddrParseError;
use std::num::{ParseFloatError, ParseIntError};
use std::string::FromUtf8Error;
use tokio::task::JoinError;
use std::sync::PoisonError;
use tokio::task::JoinError;
/**
The common error type for this application.
@@ -23,14 +23,6 @@ pub struct Error {
trace_str: Option<String>,
}
#[allow(dead_code)]
fn ser_trace<S>(_: &Option<backtrace::Backtrace>, _: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
todoval()
}
impl Error {
pub fn with_msg<S: Into<String>>(s: S) -> Self {
Self {

View File

@@ -257,8 +257,8 @@ impl hyper::body::HttpBody for BodyStreamWrap {
type Data = bytes::Bytes;
type Error = Error;
fn poll_data(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Result<Self::Data, Self::Error>>> {
todo!()
fn poll_data(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Result<Self::Data, Self::Error>>> {
self.0.inner.poll_next_unpin(cx)
}
fn poll_trailers(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Result<Option<HeaderMap>, Self::Error>> {