WIP
This commit is contained in:
@@ -5,12 +5,12 @@ authors = ["Dominik Werder <dominik.werder@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
http = "0.2"
|
||||
hyper = { version = "0.14", features = ["http1", "http2", "client", "server", "tcp", "stream"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
http = "0.2"
|
||||
chrono = { version = "0.4.19", features = ["serde"] }
|
||||
tokio = { version = "1.4.0", features = ["rt-multi-thread", "io-util", "net", "time", "sync", "fs"] }
|
||||
tokio = { version = "1.5.0", features = ["rt-multi-thread", "io-util", "net", "time", "sync", "fs"] }
|
||||
hyper = { version = "0.14", features = ["http1", "http2", "client", "server", "tcp", "stream"] }
|
||||
async-channel = "1.6"
|
||||
bytes = "1.0.1"
|
||||
byteorder = "1.4.3"
|
||||
|
||||
@@ -9,6 +9,7 @@ use futures_util::{pin_mut, StreamExt, future::ready};
|
||||
use netpod::{Channel, ChannelConfig, ScalarType, Shape, Node, timeunits::*};
|
||||
use crate::merge::MergeDim1F32Stream;
|
||||
use netpod::BinSpecDimT;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub trait AggregatorTdim {
|
||||
type InputValue;
|
||||
@@ -725,6 +726,7 @@ fn agg_x_dim_0() {
|
||||
|
||||
async fn agg_x_dim_0_inner() {
|
||||
let node = make_test_node(0);
|
||||
let node = Arc::new(node);
|
||||
let query = netpod::AggQuerySingleChannel {
|
||||
channel_config: ChannelConfig {
|
||||
channel: Channel {
|
||||
@@ -745,7 +747,7 @@ async fn agg_x_dim_0_inner() {
|
||||
let bin_count = 20;
|
||||
let ts1 = query.timebin as u64 * query.channel_config.time_bin_size;
|
||||
let ts2 = ts1 + HOUR * 24;
|
||||
let fut1 = crate::EventBlobsComplete::new(&query, query.channel_config.clone(), &node)
|
||||
let fut1 = crate::EventBlobsComplete::new(&query, query.channel_config.clone(), node)
|
||||
.into_dim_1_f32_stream()
|
||||
//.take(1000)
|
||||
.map(|q| {
|
||||
@@ -779,6 +781,7 @@ async fn agg_x_dim_1_inner() {
|
||||
// /data/sf-databuffer/daq_swissfel/daq_swissfel_3/byTime/S10BC01-DBAM070\:BAM_CH1_NORM/*
|
||||
// S10BC01-DBAM070:BAM_CH1_NORM
|
||||
let node = make_test_node(0);
|
||||
let node = Arc::new(node);
|
||||
let query = netpod::AggQuerySingleChannel {
|
||||
channel_config: ChannelConfig {
|
||||
channel: Channel {
|
||||
@@ -799,7 +802,7 @@ async fn agg_x_dim_1_inner() {
|
||||
let bin_count = 10;
|
||||
let ts1 = query.timebin as u64 * query.channel_config.time_bin_size;
|
||||
let ts2 = ts1 + HOUR * 24;
|
||||
let fut1 = crate::EventBlobsComplete::new(&query, query.channel_config.clone(), &node)
|
||||
let fut1 = crate::EventBlobsComplete::new(&query, query.channel_config.clone(), node)
|
||||
.into_dim_1_f32_stream()
|
||||
//.take(1000)
|
||||
.map(|q| {
|
||||
@@ -850,7 +853,8 @@ async fn merge_0_inner() {
|
||||
make_test_node(k)
|
||||
})
|
||||
.map(|node| {
|
||||
crate::EventBlobsComplete::new(&query, query.channel_config.clone(), &node)
|
||||
let node = Arc::new(node);
|
||||
crate::EventBlobsComplete::new(&query, query.channel_config.clone(), node)
|
||||
.into_dim_1_f32_stream()
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -8,12 +8,14 @@ use futures_core::Stream;
|
||||
use futures_util::{StreamExt, FutureExt, pin_mut};
|
||||
use bytes::{Bytes, BytesMut, BufMut};
|
||||
use chrono::{DateTime, Utc};
|
||||
use netpod::{Node, Cluster, AggKind, NanoRange, ToNanos, PreBinnedPatchGridSpec, PreBinnedPatchIterator, PreBinnedPatchCoord, Channel};
|
||||
use netpod::{Node, Cluster, AggKind, NanoRange, ToNanos, PreBinnedPatchGridSpec, PreBinnedPatchIterator, PreBinnedPatchCoord, Channel, NodeConfig};
|
||||
use crate::agg::MinMaxAvgScalarBinBatch;
|
||||
use http::uri::Scheme;
|
||||
use tiny_keccak::Hasher;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Query {
|
||||
range: NanoRange,
|
||||
count: u64,
|
||||
@@ -47,12 +49,7 @@ impl Query {
|
||||
}
|
||||
|
||||
|
||||
pub struct BinParams {
|
||||
pub node: Node,
|
||||
pub cluster: Cluster,
|
||||
}
|
||||
|
||||
pub fn binned_bytes_for_http(params: BinParams, query: &Query) -> Result<BinnedBytesForHttpStream, Error> {
|
||||
pub fn binned_bytes_for_http(node_config: Arc<NodeConfig>, query: &Query) -> Result<BinnedBytesForHttpStream, Error> {
|
||||
let agg_kind = AggKind::DimXBins1;
|
||||
|
||||
// TODO
|
||||
@@ -62,7 +59,7 @@ pub fn binned_bytes_for_http(params: BinParams, query: &Query) -> Result<BinnedB
|
||||
Some(spec) => {
|
||||
info!("GOT PreBinnedPatchGridSpec: {:?}", spec);
|
||||
warn!("Pass here to BinnedStream what kind of Agg, range, ...");
|
||||
let s1 = BinnedStream::new(PreBinnedPatchIterator::from_range(spec), query.channel.clone(), agg_kind, params.cluster.clone());
|
||||
let s1 = BinnedStream::new(PreBinnedPatchIterator::from_range(spec), query.channel.clone(), agg_kind, node_config.cluster.clone());
|
||||
// Iterate over the patches.
|
||||
// Request the patch from each node.
|
||||
// Merge.
|
||||
@@ -75,6 +72,7 @@ pub fn binned_bytes_for_http(params: BinParams, query: &Query) -> Result<BinnedB
|
||||
}
|
||||
None => {
|
||||
// Merge raw data
|
||||
error!("TODO merge raw data");
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -108,6 +106,78 @@ impl Stream for BinnedBytesForHttpStream {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PreBinnedQuery {
|
||||
patch: PreBinnedPatchCoord,
|
||||
agg_kind: AggKind,
|
||||
channel: Channel,
|
||||
}
|
||||
|
||||
impl PreBinnedQuery {
|
||||
|
||||
pub fn from_request(req: &http::request::Parts) -> Result<Self, Error> {
|
||||
let params = netpod::query_params(req.uri.query());
|
||||
let ret = PreBinnedQuery {
|
||||
patch: PreBinnedPatchCoord {
|
||||
range: NanoRange {
|
||||
beg: params.get("beg").ok_or(Error::with_msg("missing beg"))?.parse()?,
|
||||
end: params.get("end").ok_or(Error::with_msg("missing end"))?.parse()?,
|
||||
},
|
||||
},
|
||||
agg_kind: AggKind::DimXBins1,
|
||||
channel: Channel {
|
||||
backend: params.get("channel_backend").unwrap().into(),
|
||||
keyspace: params.get("channel_keyspace").unwrap().parse().unwrap(),
|
||||
name: params.get("channel_name").unwrap().into(),
|
||||
},
|
||||
};
|
||||
info!("PreBinnedQuery::from_request {:?}", ret);
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
// NOTE This answers a request for a single valid pre-binned patch.
|
||||
// A user must first make sure that the grid spec is valid, and that this node is responsible for it.
|
||||
// Otherwise it is an error.
|
||||
pub fn pre_binned_bytes_for_http(node_config: Arc<NodeConfig>, query: &PreBinnedQuery) -> Result<PreBinnedValueByteStream, Error> {
|
||||
info!("pre_binned_bytes_for_http {:?} {:?}", query, node_config.node);
|
||||
let ret = PreBinnedValueByteStream::new(query.patch.clone(), query.channel.clone(), query.agg_kind.clone(), node_config);
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
|
||||
pub struct PreBinnedValueByteStream {
|
||||
}
|
||||
|
||||
impl PreBinnedValueByteStream {
|
||||
|
||||
pub fn new(patch: PreBinnedPatchCoord, channel: Channel, agg_kind: AggKind, node_config: Arc<NodeConfig>) -> Self {
|
||||
Self {
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl Stream for PreBinnedValueByteStream {
|
||||
type Item = Result<Bytes, Error>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
error!("PreBinnedValueByteStream poll_next");
|
||||
todo!()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
pub struct PreBinnedValueStream {
|
||||
uri: http::Uri,
|
||||
patch_coord: PreBinnedPatchCoord,
|
||||
@@ -117,7 +187,7 @@ pub struct PreBinnedValueStream {
|
||||
|
||||
impl PreBinnedValueStream {
|
||||
|
||||
pub fn new(patch_coord: PreBinnedPatchCoord, channel: Channel, agg_kind: AggKind, cluster: Cluster) -> Self {
|
||||
pub fn new(patch_coord: PreBinnedPatchCoord, channel: Channel, agg_kind: AggKind, cluster: Arc<Cluster>) -> Self {
|
||||
let nodeix = node_ix_for_patch(&patch_coord, &channel, &cluster);
|
||||
let node = &cluster.nodes[nodeix];
|
||||
let uri: hyper::Uri = format!(
|
||||
@@ -142,8 +212,16 @@ impl PreBinnedValueStream {
|
||||
|
||||
pub fn node_ix_for_patch(patch_coord: &PreBinnedPatchCoord, channel: &Channel, cluster: &Cluster) -> usize {
|
||||
let mut hash = tiny_keccak::Sha3::v256();
|
||||
hash.update(channel.backend.as_bytes());
|
||||
hash.update(channel.name.as_bytes());
|
||||
0
|
||||
hash.update(&patch_coord.range.beg.to_le_bytes());
|
||||
hash.update(&patch_coord.range.end.to_le_bytes());
|
||||
let mut out = [0; 32];
|
||||
hash.finalize(&mut out);
|
||||
let mut a = [out[0], out[1], out[2], out[3]];
|
||||
let ix = u32::from_le_bytes(a) % cluster.nodes.len() as u32;
|
||||
info!("node_ix_for_patch {}", ix);
|
||||
ix as usize
|
||||
}
|
||||
|
||||
|
||||
@@ -177,7 +255,7 @@ impl Stream for PreBinnedValueStream {
|
||||
Ready(res) => {
|
||||
match res {
|
||||
Ok(res) => {
|
||||
info!("Got result from subrequest: {:?}", res);
|
||||
info!("GOT result from SUB REQUEST: {:?}", res);
|
||||
self.res = Some(res);
|
||||
continue 'outer;
|
||||
}
|
||||
@@ -190,12 +268,12 @@ impl Stream for PreBinnedValueStream {
|
||||
}
|
||||
}
|
||||
None => {
|
||||
|
||||
let req = hyper::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri(&self.uri)
|
||||
.body(hyper::Body::empty())?;
|
||||
let client = hyper::Client::new();
|
||||
info!("START REQUEST FOR {:?}", req);
|
||||
self.resfut = Some(client.request(req));
|
||||
continue 'outer;
|
||||
}
|
||||
@@ -214,13 +292,17 @@ pub struct BinnedStream {
|
||||
|
||||
impl BinnedStream {
|
||||
|
||||
pub fn new(patch_it: PreBinnedPatchIterator, channel: Channel, agg_kind: AggKind, cluster: Cluster) -> Self {
|
||||
pub fn new(patch_it: PreBinnedPatchIterator, channel: Channel, agg_kind: AggKind, cluster: Arc<Cluster>) -> Self {
|
||||
let mut patch_it = patch_it;
|
||||
let inp = futures_util::stream::iter(patch_it)
|
||||
.map(move |coord| {
|
||||
PreBinnedValueStream::new(coord, channel.clone(), agg_kind.clone(), cluster.clone())
|
||||
})
|
||||
.flatten();
|
||||
.flatten()
|
||||
.map(|k| {
|
||||
info!("ITEM {:?}", k);
|
||||
k
|
||||
});
|
||||
Self {
|
||||
inp: Box::pin(inp),
|
||||
}
|
||||
@@ -253,6 +335,7 @@ pub struct SomeReturnThing {}
|
||||
impl From<SomeReturnThing> for Bytes {
|
||||
|
||||
fn from(k: SomeReturnThing) -> Self {
|
||||
error!("TODO convert result to octets");
|
||||
todo!("TODO convert result to octets")
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ use bytes::{Bytes, BytesMut, Buf};
|
||||
use std::path::PathBuf;
|
||||
use bitshuffle::bitshuffle_decompress;
|
||||
use netpod::{ScalarType, Shape, Node, ChannelConfig};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub mod agg;
|
||||
pub mod gen;
|
||||
@@ -20,8 +21,8 @@ pub mod merge;
|
||||
pub mod cache;
|
||||
|
||||
|
||||
pub async fn read_test_1(query: &netpod::AggQuerySingleChannel, node: &Node) -> Result<netpod::BodyStream, Error> {
|
||||
let path = datapath(query.timebin as u64, &query.channel_config, node);
|
||||
pub async fn read_test_1(query: &netpod::AggQuerySingleChannel, node: Arc<Node>) -> Result<netpod::BodyStream, Error> {
|
||||
let path = datapath(query.timebin as u64, &query.channel_config, &node);
|
||||
debug!("try path: {:?}", path);
|
||||
let fin = OpenOptions::new()
|
||||
.read(true)
|
||||
@@ -143,7 +144,7 @@ impl FusedFuture for Fopen1 {
|
||||
unsafe impl Send for Fopen1 {}
|
||||
|
||||
|
||||
pub fn raw_concat_channel_read_stream_try_open_in_background(query: &netpod::AggQuerySingleChannel, node: &Node) -> impl Stream<Item=Result<Bytes, Error>> + Send {
|
||||
pub fn raw_concat_channel_read_stream_try_open_in_background(query: &netpod::AggQuerySingleChannel, node: Arc<Node>) -> impl Stream<Item=Result<Bytes, Error>> + Send {
|
||||
let mut query = query.clone();
|
||||
let node = node.clone();
|
||||
async_stream::stream! {
|
||||
@@ -268,11 +269,11 @@ pub fn raw_concat_channel_read_stream_try_open_in_background(query: &netpod::Agg
|
||||
}
|
||||
|
||||
|
||||
pub fn raw_concat_channel_read_stream_file_pipe(query: &netpod::AggQuerySingleChannel, node: &netpod::Node) -> impl Stream<Item=Result<BytesMut, Error>> + Send {
|
||||
pub fn raw_concat_channel_read_stream_file_pipe(query: &netpod::AggQuerySingleChannel, node: Arc<Node>) -> impl Stream<Item=Result<BytesMut, Error>> + Send {
|
||||
let query = query.clone();
|
||||
let node = node.clone();
|
||||
async_stream::stream! {
|
||||
let chrx = open_files(&query, &node);
|
||||
let chrx = open_files(&query, node.clone());
|
||||
while let Ok(file) = chrx.recv().await {
|
||||
let mut file = match file {
|
||||
Ok(k) => k,
|
||||
@@ -294,7 +295,7 @@ pub fn raw_concat_channel_read_stream_file_pipe(query: &netpod::AggQuerySingleCh
|
||||
}
|
||||
}
|
||||
|
||||
fn open_files(query: &netpod::AggQuerySingleChannel, node: &netpod::Node) -> async_channel::Receiver<Result<tokio::fs::File, Error>> {
|
||||
fn open_files(query: &netpod::AggQuerySingleChannel, node: Arc<Node>) -> async_channel::Receiver<Result<tokio::fs::File, Error>> {
|
||||
let (chtx, chrx) = async_channel::bounded(2);
|
||||
let mut query = query.clone();
|
||||
let node = node.clone();
|
||||
@@ -346,11 +347,11 @@ pub fn file_content_stream(mut file: tokio::fs::File, buffer_size: usize) -> imp
|
||||
}
|
||||
|
||||
|
||||
pub fn parsed1(query: &netpod::AggQuerySingleChannel, node: &netpod::Node) -> impl Stream<Item=Result<Bytes, Error>> + Send {
|
||||
pub fn parsed1(query: &netpod::AggQuerySingleChannel, node: Arc<Node>) -> impl Stream<Item=Result<Bytes, Error>> + Send {
|
||||
let query = query.clone();
|
||||
let node = node.clone();
|
||||
async_stream::stream! {
|
||||
let filerx = open_files(&query, &node);
|
||||
let filerx = open_files(&query, node.clone());
|
||||
while let Ok(fileres) = filerx.recv().await {
|
||||
match fileres {
|
||||
Ok(file) => {
|
||||
@@ -392,7 +393,8 @@ pub struct EventBlobsComplete {
|
||||
}
|
||||
|
||||
impl EventBlobsComplete {
|
||||
pub fn new(query: &netpod::AggQuerySingleChannel, channel_config: ChannelConfig, node: &netpod::Node) -> Self {
|
||||
|
||||
pub fn new(query: &netpod::AggQuerySingleChannel, channel_config: ChannelConfig, node: Arc<Node>) -> Self {
|
||||
Self {
|
||||
file_chan: open_files(query, node),
|
||||
evs: None,
|
||||
@@ -400,6 +402,7 @@ impl EventBlobsComplete {
|
||||
channel_config,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl Stream for EventBlobsComplete {
|
||||
@@ -446,11 +449,11 @@ impl Stream for EventBlobsComplete {
|
||||
}
|
||||
|
||||
|
||||
pub fn event_blobs_complete(query: &netpod::AggQuerySingleChannel, node: &netpod::Node) -> impl Stream<Item=Result<EventFull, Error>> + Send {
|
||||
pub fn event_blobs_complete(query: &netpod::AggQuerySingleChannel, node: Arc<Node>) -> impl Stream<Item=Result<EventFull, Error>> + Send {
|
||||
let query = query.clone();
|
||||
let node = node.clone();
|
||||
async_stream::stream! {
|
||||
let filerx = open_files(&query, &node);
|
||||
let filerx = open_files(&query, node.clone());
|
||||
while let Ok(fileres) = filerx.recv().await {
|
||||
match fileres {
|
||||
Ok(file) => {
|
||||
@@ -782,7 +785,7 @@ impl NeedMinBuffer {
|
||||
impl Stream for NeedMinBuffer {
|
||||
type Item = Result<BytesMut, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
loop {
|
||||
let mut again = false;
|
||||
let g = &mut self.inp;
|
||||
@@ -833,7 +836,7 @@ impl Stream for NeedMinBuffer {
|
||||
|
||||
|
||||
|
||||
pub fn raw_concat_channel_read_stream(query: &netpod::AggQuerySingleChannel, node: &netpod::Node) -> impl Stream<Item=Result<Bytes, Error>> + Send {
|
||||
pub fn raw_concat_channel_read_stream(query: &netpod::AggQuerySingleChannel, node: Arc<Node>) -> impl Stream<Item=Result<Bytes, Error>> + Send {
|
||||
let mut query = query.clone();
|
||||
let node = node.clone();
|
||||
async_stream::stream! {
|
||||
@@ -841,7 +844,7 @@ pub fn raw_concat_channel_read_stream(query: &netpod::AggQuerySingleChannel, nod
|
||||
loop {
|
||||
let timebin = 18700 + i1;
|
||||
query.timebin = timebin;
|
||||
let s2 = raw_concat_channel_read_stream_timebin(&query, &node);
|
||||
let s2 = raw_concat_channel_read_stream_timebin(&query, node.clone());
|
||||
pin_mut!(s2);
|
||||
while let Some(item) = s2.next().await {
|
||||
yield item;
|
||||
@@ -855,7 +858,7 @@ pub fn raw_concat_channel_read_stream(query: &netpod::AggQuerySingleChannel, nod
|
||||
}
|
||||
|
||||
|
||||
pub fn raw_concat_channel_read_stream_timebin(query: &netpod::AggQuerySingleChannel, node: &netpod::Node) -> impl Stream<Item=Result<Bytes, Error>> {
|
||||
pub fn raw_concat_channel_read_stream_timebin(query: &netpod::AggQuerySingleChannel, node: Arc<Node>) -> impl Stream<Item=Result<Bytes, Error>> {
|
||||
let query = query.clone();
|
||||
let node = node.clone();
|
||||
async_stream::stream! {
|
||||
@@ -885,7 +888,7 @@ pub fn raw_concat_channel_read_stream_timebin(query: &netpod::AggQuerySingleChan
|
||||
}
|
||||
|
||||
|
||||
fn datapath(timebin: u64, config: &netpod::ChannelConfig, node: &netpod::Node) -> PathBuf {
|
||||
fn datapath(timebin: u64, config: &netpod::ChannelConfig, node: &Node) -> PathBuf {
|
||||
//let pre = "/data/sf-databuffer/daq_swissfel";
|
||||
node.data_base_path
|
||||
.join(format!("{}_{}", node.ksprefix, config.channel.keyspace))
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::num::ParseIntError;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Error {
|
||||
msg: String,
|
||||
@@ -75,3 +77,11 @@ impl From<chrono::format::ParseError> for Error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ParseIntError> for Error {
|
||||
fn from (k: ParseIntError) -> Self {
|
||||
Self {
|
||||
msg: k.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,8 @@ hyper = { version = "0.14", features = ["http1", "http2", "client", "server", "t
|
||||
http = "0.2"
|
||||
url = "2.2"
|
||||
bytes = "1.0.1"
|
||||
futures-core = "0.3.12"
|
||||
futures-core = "0.3.14"
|
||||
futures-util = "0.3.14"
|
||||
tracing = "0.1.25"
|
||||
serde_json = "1.0"
|
||||
async-channel = "1.6"
|
||||
|
||||
@@ -8,23 +8,23 @@ use hyper::server::Server;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use std::task::{Context, Poll};
|
||||
use std::pin::Pin;
|
||||
use netpod::{Node, Cluster, AggKind};
|
||||
use disk::cache::BinParams;
|
||||
use futures_util::FutureExt;
|
||||
use netpod::{Node, Cluster, AggKind, NodeConfig};
|
||||
use std::sync::Arc;
|
||||
use disk::cache::PreBinnedQuery;
|
||||
use std::future::Future;
|
||||
use std::panic::UnwindSafe;
|
||||
|
||||
pub async fn host(node: Node, cluster: Cluster) -> Result<(), Error> {
|
||||
let addr = SocketAddr::from(([0, 0, 0, 0], node.port));
|
||||
pub async fn host(node_config: Arc<NodeConfig>) -> Result<(), Error> {
|
||||
let addr = SocketAddr::from(([0, 0, 0, 0], node_config.node.port));
|
||||
let make_service = make_service_fn({
|
||||
move |_conn| {
|
||||
let node = node.clone();
|
||||
let cluster = cluster.clone();
|
||||
let node_config = node_config.clone();
|
||||
async move {
|
||||
Ok::<_, Error>(service_fn({
|
||||
move |req| {
|
||||
let hc = HostConf {
|
||||
node: node.clone(),
|
||||
cluster: cluster.clone(),
|
||||
};
|
||||
data_api_proxy(req, hc)
|
||||
let f = data_api_proxy(req, node_config.clone());
|
||||
Cont { f: Box::pin(f) }
|
||||
}
|
||||
}))
|
||||
}
|
||||
@@ -34,8 +34,8 @@ pub async fn host(node: Node, cluster: Cluster) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn data_api_proxy(req: Request<Body>, hconf: HostConf) -> Result<Response<Body>, Error> {
|
||||
match data_api_proxy_try(req, hconf).await {
|
||||
async fn data_api_proxy(req: Request<Body>, node_config: Arc<NodeConfig>) -> Result<Response<Body>, Error> {
|
||||
match data_api_proxy_try(req, node_config).await {
|
||||
Ok(k) => { Ok(k) }
|
||||
Err(e) => {
|
||||
error!("{:?}", e);
|
||||
@@ -44,7 +44,30 @@ async fn data_api_proxy(req: Request<Body>, hconf: HostConf) -> Result<Response<
|
||||
}
|
||||
}
|
||||
|
||||
async fn data_api_proxy_try(req: Request<Body>, hconf: HostConf) -> Result<Response<Body>, Error> {
|
||||
struct Cont<F> {
|
||||
f: Pin<Box<F>>,
|
||||
}
|
||||
|
||||
impl<F> Future for Cont<F> where F: Future {
|
||||
type Output = <F as Future>::Output;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
/*let h = std::panic::catch_unwind(|| {
|
||||
self.f.poll_unpin(cx)
|
||||
});
|
||||
match h {
|
||||
Ok(k) => k,
|
||||
Err(e) => todo!(),
|
||||
}*/
|
||||
self.f.poll_unpin(cx)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl<F> UnwindSafe for Cont<F> {}
|
||||
|
||||
|
||||
async fn data_api_proxy_try(req: Request<Body>, node_config: Arc<NodeConfig>) -> Result<Response<Body>, Error> {
|
||||
let uri = req.uri().clone();
|
||||
let path = uri.path();
|
||||
if path == "/api/1/parsed_raw" {
|
||||
@@ -57,7 +80,15 @@ async fn data_api_proxy_try(req: Request<Body>, hconf: HostConf) -> Result<Respo
|
||||
}
|
||||
else if path == "/api/1/binned" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(binned(req, hconf).await?)
|
||||
Ok(binned(req, node_config.clone()).await?)
|
||||
}
|
||||
else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
}
|
||||
else if path == "/api/1/prebinned" {
|
||||
if req.method() == Method::GET {
|
||||
Ok(prebinned(req, node_config.clone()).await?)
|
||||
}
|
||||
else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
@@ -87,7 +118,7 @@ async fn parsed_raw(req: Request<Body>) -> Result<Response<Body>, Error> {
|
||||
let query: AggQuerySingleChannel = serde_json::from_slice(&bodyslice)?;
|
||||
//let q = disk::read_test_1(&query).await?;
|
||||
//let s = q.inner;
|
||||
let s = disk::parsed1(&query, &node);
|
||||
let s = disk::parsed1(&query, node);
|
||||
let res = response(StatusCode::OK)
|
||||
.body(Body::wrap_stream(s))?;
|
||||
/*
|
||||
@@ -131,7 +162,7 @@ impl hyper::body::HttpBody for BodyStreamWrap {
|
||||
}
|
||||
|
||||
|
||||
async fn binned(req: Request<Body>, hconf: HostConf) -> Result<Response<Body>, Error> {
|
||||
async fn binned(req: Request<Body>, node_config: Arc<NodeConfig>) -> Result<Response<Body>, Error> {
|
||||
let (head, body) = req.into_parts();
|
||||
//let params = netpod::query_params(head.uri.query());
|
||||
|
||||
@@ -142,11 +173,7 @@ async fn binned(req: Request<Body>, hconf: HostConf) -> Result<Response<Body>, E
|
||||
// Extract the relevant channel config entry.
|
||||
|
||||
let query = disk::cache::Query::from_request(&head)?;
|
||||
let params = BinParams {
|
||||
node: hconf.node.clone(),
|
||||
cluster: hconf.cluster.clone(),
|
||||
};
|
||||
let ret = match disk::cache::binned_bytes_for_http(params, &query) {
|
||||
let ret = match disk::cache::binned_bytes_for_http(node_config, &query) {
|
||||
Ok(s) => {
|
||||
response(StatusCode::OK)
|
||||
.body(Body::wrap_stream(s))?
|
||||
@@ -160,18 +187,13 @@ async fn binned(req: Request<Body>, hconf: HostConf) -> Result<Response<Body>, E
|
||||
}
|
||||
|
||||
|
||||
async fn prebinned(req: Request<Body>, hconf: HostConf) -> Result<Response<Body>, Error> {
|
||||
async fn prebinned(req: Request<Body>, node_config: Arc<NodeConfig>) -> Result<Response<Body>, Error> {
|
||||
let (head, body) = req.into_parts();
|
||||
todo!("create a new PreBinnedQuery and let extract from query");
|
||||
let params = BinParams {
|
||||
node: hconf.node.clone(),
|
||||
cluster: hconf.cluster.clone(),
|
||||
};
|
||||
todo!("create this new entry point in disk::cache");
|
||||
let ret = match Ok(()) {
|
||||
let q = PreBinnedQuery::from_request(&head)?;
|
||||
let ret = match disk::cache::pre_binned_bytes_for_http(node_config, &q) {
|
||||
Ok(s) => {
|
||||
response(StatusCode::OK)
|
||||
.body(Body::wrap_stream(______))?
|
||||
.body(Body::wrap_stream(s))?
|
||||
}
|
||||
Err(e) => {
|
||||
error!("{:?}", e);
|
||||
@@ -180,11 +202,3 @@ async fn prebinned(req: Request<Body>, hconf: HostConf) -> Result<Response<Body>
|
||||
};
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HostConf {
|
||||
node: Node,
|
||||
cluster: Cluster,
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ use serde::{Serialize, Deserialize};
|
||||
use err::Error;
|
||||
use std::path::PathBuf;
|
||||
use chrono::{DateTime, Utc, TimeZone};
|
||||
use std::sync::Arc;
|
||||
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
@@ -90,7 +91,7 @@ impl ScalarType {
|
||||
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Debug)]
|
||||
pub struct Node {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
@@ -106,9 +107,16 @@ impl Node {
|
||||
}
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Debug)]
|
||||
pub struct Cluster {
|
||||
pub nodes: Vec<Node>,
|
||||
pub nodes: Vec<Arc<Node>>,
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct NodeConfig {
|
||||
pub node: Arc<Node>,
|
||||
pub cluster: Arc<Cluster>,
|
||||
}
|
||||
|
||||
|
||||
@@ -255,9 +263,9 @@ impl BinSpecDimT {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PreBinnedPatchGridSpec {
|
||||
range: NanoRange,
|
||||
bs: u64,
|
||||
count: u64,
|
||||
pub range: NanoRange,
|
||||
pub bs: u64,
|
||||
pub count: u64,
|
||||
}
|
||||
|
||||
impl PreBinnedPatchGridSpec {
|
||||
@@ -317,6 +325,14 @@ pub struct PreBinnedPatchCoord {
|
||||
pub range: NanoRange,
|
||||
}
|
||||
|
||||
impl PreBinnedPatchCoord {
|
||||
|
||||
pub fn bs(&self) -> u64 {
|
||||
self.range.end - self.range.beg
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
pub struct PreBinnedPatchIterator {
|
||||
spec: PreBinnedPatchGridSpec,
|
||||
agg_kind: AggKind,
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
#[allow(unused_imports)]
|
||||
use tracing::{error, warn, info, debug, trace};
|
||||
use err::Error;
|
||||
use netpod::{ChannelConfig, Channel, timeunits::*, ScalarType, Shape, Node, Cluster};
|
||||
use netpod::{ChannelConfig, Channel, timeunits::*, ScalarType, Shape, Node, Cluster, NodeConfig};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub fn main() {
|
||||
match taskrun::run(go()) {
|
||||
@@ -39,6 +40,7 @@ fn simple_fetch() {
|
||||
ksprefix: "daq_swissfel".into(),
|
||||
split: 0,
|
||||
};
|
||||
let node = Arc::new(node);
|
||||
let query = netpod::AggQuerySingleChannel {
|
||||
channel_config: ChannelConfig {
|
||||
channel: Channel {
|
||||
@@ -59,8 +61,14 @@ fn simple_fetch() {
|
||||
let cluster = Cluster {
|
||||
nodes: vec![node],
|
||||
};
|
||||
let cluster = Arc::new(cluster);
|
||||
let node_config = NodeConfig {
|
||||
cluster: cluster,
|
||||
node: cluster.nodes[0].clone(),
|
||||
};
|
||||
let node_config = Arc::new(node_config);
|
||||
let query_string = serde_json::to_string(&query).unwrap();
|
||||
let _host = tokio::spawn(httpret::host(cluster.nodes[0].clone(), cluster.clone()));
|
||||
let _host = tokio::spawn(httpret::host(node_config));
|
||||
let req = hyper::Request::builder()
|
||||
.method(http::Method::POST)
|
||||
.uri("http://localhost:8360/api/1/parsed_raw")
|
||||
|
||||
@@ -2,8 +2,9 @@
|
||||
use tracing::{error, warn, info, debug, trace};
|
||||
use err::Error;
|
||||
use tokio::task::JoinHandle;
|
||||
use netpod::{Node, Cluster};
|
||||
use netpod::{Node, Cluster, NodeConfig};
|
||||
use hyper::Body;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub mod cli;
|
||||
|
||||
@@ -15,9 +16,9 @@ fn get_cached_0() {
|
||||
#[cfg(test)]
|
||||
async fn get_cached_0_inner() -> Result<(), Error> {
|
||||
let t1 = chrono::Utc::now();
|
||||
let cluster = test_cluster();
|
||||
let cluster = Arc::new(test_cluster());
|
||||
let node0 = &cluster.nodes[0];
|
||||
let hosts = spawn_test_hosts(&cluster);
|
||||
let hosts = spawn_test_hosts(cluster.clone());
|
||||
let req = hyper::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri(format!("http://{}:{}/api/1/binned?channel_backend=testbackend&channel_keyspace=3&channel_name=wave1&bin_count=4&beg_date=1970-01-01T00:00:10.000Z&end_date=1970-01-01T00:00:51.000Z", node0.host, node0.port))
|
||||
@@ -54,13 +55,14 @@ async fn get_cached_0_inner() -> Result<(), Error> {
|
||||
|
||||
fn test_cluster() -> Cluster {
|
||||
let nodes = (0..1).into_iter().map(|k| {
|
||||
Node {
|
||||
let node = Node {
|
||||
host: "localhost".into(),
|
||||
port: 8360 + k,
|
||||
data_base_path: format!("../tmpdata/node{:02}", k).into(),
|
||||
ksprefix: "ks".into(),
|
||||
split: 0,
|
||||
}
|
||||
};
|
||||
Arc::new(node)
|
||||
})
|
||||
.collect();
|
||||
Cluster {
|
||||
@@ -68,10 +70,14 @@ fn test_cluster() -> Cluster {
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_test_hosts(cluster: &Cluster) -> Vec<JoinHandle<Result<(), Error>>> {
|
||||
fn spawn_test_hosts(cluster: Arc<Cluster>) -> Vec<JoinHandle<Result<(), Error>>> {
|
||||
let mut ret = vec![];
|
||||
for node in &cluster.nodes {
|
||||
let h = tokio::spawn(httpret::host(node.clone(), cluster.clone()));
|
||||
let node_config = NodeConfig {
|
||||
cluster: cluster.clone(),
|
||||
node: node.clone(),
|
||||
};
|
||||
let h = tokio::spawn(httpret::host(Arc::new(node_config)));
|
||||
ret.push(h);
|
||||
}
|
||||
ret
|
||||
|
||||
@@ -5,7 +5,7 @@ authors = ["Dominik Werder <dominik.werder@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1.4.0", features = ["rt-multi-thread", "io-util", "net", "time", "sync", "fs"] }
|
||||
tokio = { version = "1.5.0", features = ["rt-multi-thread", "io-util", "net", "time", "sync", "fs"] }
|
||||
tracing = "0.1.25"
|
||||
tracing-subscriber = "0.2.17"
|
||||
err = { path = "../err" }
|
||||
|
||||
Reference in New Issue
Block a user