After rustfmt

This commit is contained in:
Dominik Werder
2021-04-16 14:43:21 +02:00
parent 1150bb3c55
commit 3afcddb1c7
16 changed files with 951 additions and 832 deletions

View File

@@ -1,20 +1,23 @@
#[allow(unused_imports)]
use tracing::{error, warn, info, debug, trace};
use crate::agg::MinMaxAvgScalarBinBatch;
use bytes::{BufMut, Bytes, BytesMut};
use chrono::{DateTime, Utc};
use err::Error;
use futures_core::Stream;
use futures_util::{pin_mut, FutureExt, StreamExt, TryFutureExt};
use http::uri::Scheme;
use netpod::{
AggKind, Channel, Cluster, NanoRange, Node, NodeConfig, PreBinnedPatchCoord,
PreBinnedPatchGridSpec, PreBinnedPatchIterator, PreBinnedPatchRange, ToNanos,
};
use serde::{Deserialize, Serialize};
use std::future::{ready, Future};
use std::pin::Pin;
use std::task::{Context, Poll};
use futures_core::Stream;
use futures_util::{StreamExt, FutureExt, pin_mut, TryFutureExt};
use bytes::{Bytes, BytesMut, BufMut};
use chrono::{DateTime, Utc};
use netpod::{Node, Cluster, AggKind, NanoRange, ToNanos, PreBinnedPatchRange, PreBinnedPatchIterator, PreBinnedPatchCoord, Channel, NodeConfig, PreBinnedPatchGridSpec};
use crate::agg::MinMaxAvgScalarBinBatch;
use http::uri::Scheme;
use tiny_keccak::Hasher;
use serde::{Serialize, Deserialize};
use std::sync::Arc;
use std::task::{Context, Poll};
use tiny_keccak::Hasher;
use tokio::fs::OpenOptions;
#[allow(unused_imports)]
use tracing::{debug, error, info, trace, warn};
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Query {
@@ -25,17 +28,24 @@ pub struct Query {
}
impl Query {
pub fn from_request(req: &http::request::Parts) -> Result<Self, Error> {
let params = netpod::query_params(req.uri.query());
let beg_date = params.get("beg_date").ok_or(Error::with_msg("missing beg_date"))?;
let end_date = params.get("end_date").ok_or(Error::with_msg("missing end_date"))?;
let beg_date = params
.get("beg_date")
.ok_or(Error::with_msg("missing beg_date"))?;
let end_date = params
.get("end_date")
.ok_or(Error::with_msg("missing end_date"))?;
let ret = Query {
range: NanoRange {
beg: beg_date.parse::<DateTime<Utc>>()?.to_nanos(),
end: end_date.parse::<DateTime<Utc>>()?.to_nanos(),
},
count: params.get("bin_count").ok_or(Error::with_msg("missing beg_date"))?.parse().unwrap(),
count: params
.get("bin_count")
.ok_or(Error::with_msg("missing beg_date"))?
.parse()
.unwrap(),
agg_kind: AggKind::DimXBins1,
channel: Channel {
backend: params.get("channel_backend").unwrap().into(),
@@ -45,11 +55,12 @@ impl Query {
info!("Query::from_request {:?}", ret);
Ok(ret)
}
}
pub fn binned_bytes_for_http(node_config: Arc<NodeConfig>, query: &Query) -> Result<BinnedBytesForHttpStream, Error> {
pub fn binned_bytes_for_http(
node_config: Arc<NodeConfig>,
query: &Query,
) -> Result<BinnedBytesForHttpStream, Error> {
let agg_kind = AggKind::DimXBins1;
// TODO
@@ -59,15 +70,18 @@ pub fn binned_bytes_for_http(node_config: Arc<NodeConfig>, query: &Query) -> Res
Some(spec) => {
info!("GOT PreBinnedPatchGridSpec: {:?}", spec);
warn!("Pass here to BinnedStream what kind of Agg, range, ...");
let s1 = BinnedStream::new(PreBinnedPatchIterator::from_range(spec), query.channel.clone(), agg_kind, node_config.clone());
let s1 = BinnedStream::new(
PreBinnedPatchIterator::from_range(spec),
query.channel.clone(),
agg_kind,
node_config.clone(),
);
// Iterate over the patches.
// Request the patch from each node.
// Merge.
// Agg+Bin.
// Deliver.
let ret = BinnedBytesForHttpStream {
inp: s1,
};
let ret = BinnedBytesForHttpStream { inp: s1 };
Ok(ret)
}
None => {
@@ -78,13 +92,11 @@ pub fn binned_bytes_for_http(node_config: Arc<NodeConfig>, query: &Query) -> Res
}
}
pub struct BinnedBytesForHttpStream {
inp: BinnedStream,
}
impl BinnedBytesForHttpStream {
}
impl BinnedBytesForHttpStream {}
impl Stream for BinnedBytesForHttpStream {
type Item = Result<Bytes, Error>;
@@ -102,13 +114,8 @@ impl Stream for BinnedBytesForHttpStream {
Pending => Pending,
}
}
}
#[derive(Clone, Debug)]
pub struct PreBinnedQuery {
patch: PreBinnedPatchCoord,
@@ -117,7 +124,6 @@ pub struct PreBinnedQuery {
}
impl PreBinnedQuery {
pub fn from_request(req: &http::request::Parts) -> Result<Self, Error> {
let params = netpod::query_params(req.uri.query());
let ret = PreBinnedQuery {
@@ -131,34 +137,44 @@ impl PreBinnedQuery {
info!("PreBinnedQuery::from_request {:?}", ret);
Ok(ret)
}
}
// NOTE This answers a request for a single valid pre-binned patch.
// A user must first make sure that the grid spec is valid, and that this node is responsible for it.
// Otherwise it is an error.
pub fn pre_binned_bytes_for_http(node_config: Arc<NodeConfig>, query: &PreBinnedQuery) -> Result<PreBinnedValueByteStream, Error> {
info!("pre_binned_bytes_for_http {:?} {:?}", query, node_config.node);
let ret = PreBinnedValueByteStream::new(query.patch.clone(), query.channel.clone(), query.agg_kind.clone(), node_config);
pub fn pre_binned_bytes_for_http(
node_config: Arc<NodeConfig>,
query: &PreBinnedQuery,
) -> Result<PreBinnedValueByteStream, Error> {
info!(
"pre_binned_bytes_for_http {:?} {:?}",
query, node_config.node
);
let ret = PreBinnedValueByteStream::new(
query.patch.clone(),
query.channel.clone(),
query.agg_kind.clone(),
node_config,
);
Ok(ret)
}
pub struct PreBinnedValueByteStream {
inp: PreBinnedValueStream,
}
impl PreBinnedValueByteStream {
pub fn new(patch: PreBinnedPatchCoord, channel: Channel, agg_kind: AggKind, node_config: Arc<NodeConfig>) -> Self {
pub fn new(
patch: PreBinnedPatchCoord,
channel: Channel,
agg_kind: AggKind,
node_config: Arc<NodeConfig>,
) -> Self {
warn!("PreBinnedValueByteStream");
Self {
inp: PreBinnedValueStream::new(patch, channel, agg_kind, node_config),
}
}
}
impl Stream for PreBinnedValueByteStream {
@@ -177,25 +193,25 @@ impl Stream for PreBinnedValueByteStream {
Pending => Pending,
}
}
}
pub struct PreBinnedValueStream {
patch_coord: PreBinnedPatchCoord,
channel: Channel,
agg_kind: AggKind,
node_config: Arc<NodeConfig>,
open_check_local_file: Option<Pin<Box<dyn Future<Output=Result<tokio::fs::File, std::io::Error>> + Send>>>,
fut2: Option<Pin<Box<dyn Stream<Item=Result<MinMaxAvgScalarBinBatch, Error>> + Send>>>,
open_check_local_file:
Option<Pin<Box<dyn Future<Output = Result<tokio::fs::File, std::io::Error>> + Send>>>,
fut2: Option<Pin<Box<dyn Stream<Item = Result<MinMaxAvgScalarBinBatch, Error>> + Send>>>,
}
impl PreBinnedValueStream {
pub fn new(patch_coord: PreBinnedPatchCoord, channel: Channel, agg_kind: AggKind, node_config: Arc<NodeConfig>) -> Self {
pub fn new(
patch_coord: PreBinnedPatchCoord,
channel: Channel,
agg_kind: AggKind,
node_config: Arc<NodeConfig>,
) -> Self {
let node_ix = node_ix_for_patch(&patch_coord, &channel, &node_config.cluster);
assert!(node_ix == node_config.node.id);
Self {
@@ -209,7 +225,10 @@ impl PreBinnedValueStream {
}
fn try_setup_fetch_prebinned_higher_res(&mut self) {
info!("try to find a next better granularity for {:?}", self.patch_coord);
info!(
"try to find a next better granularity for {:?}",
self.patch_coord
);
let g = self.patch_coord.bin_t_len();
let range = NanoRange {
beg: self.patch_coord.patch_beg(),
@@ -218,7 +237,14 @@ impl PreBinnedValueStream {
match PreBinnedPatchRange::covering_range(range, self.patch_coord.bin_count() + 1) {
Some(range) => {
let h = range.grid_spec.bin_t_len();
info!("FOUND NEXT GRAN g {} h {} ratio {} mod {} {:?}", g, h, g/h, g%h, range);
info!(
"FOUND NEXT GRAN g {} h {} ratio {} mod {} {:?}",
g,
h,
g / h,
g % h,
range
);
assert!(g / h > 1);
assert!(g / h < 20);
assert!(g % h == 0);
@@ -228,14 +254,19 @@ impl PreBinnedValueStream {
let node_config = self.node_config.clone();
let mut patch_it = PreBinnedPatchIterator::from_range(range);
let s = futures_util::stream::iter(patch_it)
.map(move |coord| {
PreBinnedValueFetchedStream::new(coord, channel.clone(), agg_kind.clone(), node_config.clone())
})
.flatten()
.map(move |k| {
info!("ITEM from sub res bin_size {} {:?}", bin_size, k);
k
});
.map(move |coord| {
PreBinnedValueFetchedStream::new(
coord,
channel.clone(),
agg_kind.clone(),
node_config.clone(),
)
})
.flatten()
.map(move |k| {
info!("ITEM from sub res bin_size {} {:?}", bin_size, k);
k
});
self.fut2 = Some(Box::pin(s));
}
None => {
@@ -250,7 +281,6 @@ impl PreBinnedValueStream {
}
}
}
}
impl Stream for PreBinnedValueStream {
@@ -262,48 +292,41 @@ impl Stream for PreBinnedValueStream {
'outer: loop {
break if let Some(fut) = self.fut2.as_mut() {
fut.poll_next_unpin(cx)
}
else if let Some(fut) = self.open_check_local_file.as_mut() {
} else if let Some(fut) = self.open_check_local_file.as_mut() {
match fut.poll_unpin(cx) {
Ready(Ok(file)) => {
todo!("IMPLEMENT READ FROM LOCAL CACHE");
Pending
}
Ready(Err(e)) => {
match e.kind() {
std::io::ErrorKind::NotFound => {
warn!("TODO LOCAL CACHE FILE NOT FOUND");
self.try_setup_fetch_prebinned_higher_res();
continue 'outer;
}
_ => {
error!("File I/O error: {:?}", e);
Ready(Some(Err(e.into())))
}
Ready(Err(e)) => match e.kind() {
std::io::ErrorKind::NotFound => {
warn!("TODO LOCAL CACHE FILE NOT FOUND");
self.try_setup_fetch_prebinned_higher_res();
continue 'outer;
}
}
_ => {
error!("File I/O error: {:?}", e);
Ready(Some(Err(e.into())))
}
},
Pending => Pending,
}
}
else {
} else {
use std::os::unix::fs::OpenOptionsExt;
let mut opts = std::fs::OpenOptions::new();
opts.read(true);
let fut = async {
tokio::fs::OpenOptions::from(opts)
.open("/DOESNOTEXIST").await
.open("/DOESNOTEXIST")
.await
};
self.open_check_local_file = Some(Box::pin(fut));
continue 'outer;
}
};
}
}
}
pub struct PreBinnedValueFetchedStream {
uri: http::Uri,
patch_coord: PreBinnedPatchCoord,
@@ -312,8 +335,12 @@ pub struct PreBinnedValueFetchedStream {
}
impl PreBinnedValueFetchedStream {
pub fn new(patch_coord: PreBinnedPatchCoord, channel: Channel, agg_kind: AggKind, node_config: Arc<NodeConfig>) -> Self {
pub fn new(
patch_coord: PreBinnedPatchCoord,
channel: Channel,
agg_kind: AggKind,
node_config: Arc<NodeConfig>,
) -> Self {
let nodeix = node_ix_for_patch(&patch_coord, &channel, &node_config.cluster);
let node = &node_config.cluster.nodes[nodeix as usize];
warn!("TODO defining property of a PreBinnedPatchCoord? patchlen + ix? binsize + patchix? binsize + patchsize + patchix?");
@@ -325,7 +352,9 @@ impl PreBinnedValueFetchedStream {
channel.backend,
channel.name,
agg_kind,
).parse().unwrap();
)
.parse()
.unwrap();
Self {
uri,
patch_coord,
@@ -333,7 +362,6 @@ impl PreBinnedValueFetchedStream {
res: None,
}
}
}
impl Stream for PreBinnedValueFetchedStream {
@@ -351,79 +379,72 @@ impl Stream for PreBinnedValueFetchedStream {
pin_mut!(res);
use hyper::body::HttpBody;
match res.poll_data(cx) {
Ready(Some(Ok(k))) => {
Pending
}
Ready(Some(Ok(k))) => Pending,
Ready(Some(Err(e))) => Ready(Some(Err(e.into()))),
Ready(None) => Ready(None),
Pending => Pending,
}
}
None => {
match self.resfut.as_mut() {
Some(mut resfut) => {
match resfut.poll_unpin(cx) {
Ready(res) => {
match res {
Ok(res) => {
info!("GOT result from SUB REQUEST: {:?}", res);
self.res = Some(res);
continue 'outer;
}
Err(e) => {
error!("PreBinnedValueStream error in stream {:?}", e);
Ready(Some(Err(e.into())))
}
}
}
Pending => {
Pending
}
None => match self.resfut.as_mut() {
Some(mut resfut) => match resfut.poll_unpin(cx) {
Ready(res) => match res {
Ok(res) => {
info!("GOT result from SUB REQUEST: {:?}", res);
self.res = Some(res);
continue 'outer;
}
}
None => {
let req = hyper::Request::builder()
Err(e) => {
error!("PreBinnedValueStream error in stream {:?}", e);
Ready(Some(Err(e.into())))
}
},
Pending => Pending,
},
None => {
let req = hyper::Request::builder()
.method(http::Method::GET)
.uri(&self.uri)
.body(hyper::Body::empty())?;
let client = hyper::Client::new();
info!("START REQUEST FOR {:?}", req);
self.resfut = Some(client.request(req));
continue 'outer;
}
let client = hyper::Client::new();
info!("START REQUEST FOR {:?}", req);
self.resfut = Some(client.request(req));
continue 'outer;
}
}
}
},
};
}
}
}
pub struct BinnedStream {
inp: Pin<Box<dyn Stream<Item=Result<MinMaxAvgScalarBinBatch, Error>> + Send>>,
inp: Pin<Box<dyn Stream<Item = Result<MinMaxAvgScalarBinBatch, Error>> + Send>>,
}
impl BinnedStream {
pub fn new(patch_it: PreBinnedPatchIterator, channel: Channel, agg_kind: AggKind, node_config: Arc<NodeConfig>) -> Self {
pub fn new(
patch_it: PreBinnedPatchIterator,
channel: Channel,
agg_kind: AggKind,
node_config: Arc<NodeConfig>,
) -> Self {
warn!("BinnedStream will open a PreBinnedValueStream");
let mut patch_it = patch_it;
let inp = futures_util::stream::iter(patch_it)
.map(move |coord| {
PreBinnedValueFetchedStream::new(coord, channel.clone(), agg_kind.clone(), node_config.clone())
})
.flatten()
.map(|k| {
info!("ITEM {:?}", k);
k
});
Self {
inp: Box::pin(inp),
}
.map(move |coord| {
PreBinnedValueFetchedStream::new(
coord,
channel.clone(),
agg_kind.clone(),
node_config.clone(),
)
})
.flatten()
.map(|k| {
info!("ITEM {:?}", k);
k
});
Self { inp: Box::pin(inp) }
}
}
impl Stream for BinnedStream {
@@ -433,33 +454,28 @@ impl Stream for BinnedStream {
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
match self.inp.poll_next_unpin(cx) {
Ready(Some(Ok(k))) => {
Ready(Some(Ok(k)))
}
Ready(Some(Ok(k))) => Ready(Some(Ok(k))),
Ready(Some(Err(e))) => Ready(Some(Err(e))),
Ready(None) => Ready(None),
Pending => Pending,
}
}
}
pub struct SomeReturnThing {}
impl From<SomeReturnThing> for Bytes {
fn from(k: SomeReturnThing) -> Self {
error!("TODO convert result to octets");
todo!("TODO convert result to octets")
}
}
pub fn node_ix_for_patch(patch_coord: &PreBinnedPatchCoord, channel: &Channel, cluster: &Cluster) -> u32 {
pub fn node_ix_for_patch(
patch_coord: &PreBinnedPatchCoord,
channel: &Channel,
cluster: &Cluster,
) -> u32 {
let mut hash = tiny_keccak::Sha3::v256();
hash.update(channel.backend.as_bytes());
hash.update(channel.name.as_bytes());