Rename SfDbChannel
This commit is contained in:
File diff suppressed because it is too large
Load Diff
@@ -19,9 +19,7 @@ use tokio::io::AsyncReadExt;
|
||||
|
||||
pub fn main() {
|
||||
match taskrun::run(go()) {
|
||||
Ok(k) => {
|
||||
info!("{:?}", k);
|
||||
}
|
||||
Ok(()) => {}
|
||||
Err(k) => {
|
||||
error!("{:?}", k);
|
||||
}
|
||||
@@ -115,13 +113,10 @@ async fn go() -> Result<(), Error> {
|
||||
SubCmd::GenerateTestData => {
|
||||
disk::gen::gen_test_data().await?;
|
||||
}
|
||||
SubCmd::Logappend(k) => {
|
||||
let jh = tokio::task::spawn_blocking(move || {
|
||||
taskrun::append::append(&k.dir, k.total_size_max_bytes(), std::io::stdin()).unwrap();
|
||||
});
|
||||
jh.await.map_err(Error::from_string)?;
|
||||
}
|
||||
SubCmd::Test => (),
|
||||
SubCmd::Version => {
|
||||
println!("{}", clap::crate_version!());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -133,15 +128,15 @@ fn simple_fetch() {
|
||||
use daqbuffer::err::ErrConv;
|
||||
use netpod::timeunits::*;
|
||||
use netpod::ByteOrder;
|
||||
use netpod::Channel;
|
||||
use netpod::ScalarType;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::Shape;
|
||||
taskrun::run(async {
|
||||
let _rh = daqbufp2::nodes::require_test_hosts_running()?;
|
||||
let t1 = chrono::Utc::now();
|
||||
let query = AggQuerySingleChannel {
|
||||
channel_config: SfDbChConf {
|
||||
channel: Channel {
|
||||
channel: SfDbChannel {
|
||||
backend: "sf-databuffer".into(),
|
||||
name: "S10BC01-DBAM070:BAM_CH1_NORM".into(),
|
||||
series: None,
|
||||
@@ -193,7 +188,7 @@ fn simple_fetch() {
|
||||
ntot / 1024 / 1024,
|
||||
throughput
|
||||
);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -16,8 +16,8 @@ pub enum SubCmd {
|
||||
Proxy(Proxy),
|
||||
Client(Client),
|
||||
GenerateTestData,
|
||||
Logappend(Logappend),
|
||||
Test,
|
||||
Version,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
@@ -73,17 +73,3 @@ pub struct BinnedClient {
|
||||
#[arg(long, default_value = "1048576")]
|
||||
pub disk_stats_every_kb: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct Logappend {
|
||||
#[arg(long)]
|
||||
pub dir: String,
|
||||
#[arg(long)]
|
||||
pub total_mb: Option<u64>,
|
||||
}
|
||||
|
||||
impl Logappend {
|
||||
pub fn total_size_max_bytes(&self) -> u64 {
|
||||
1024 * 1024 * self.total_mb.unwrap_or(20)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ use netpod::query::CacheUsage;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::AppendToUrl;
|
||||
use netpod::ByteSize;
|
||||
use netpod::Channel;
|
||||
use netpod::HostPort;
|
||||
use netpod::PerfOpts;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::APP_OCTET;
|
||||
use query::api4::binned::BinnedQuery;
|
||||
use streams::frames::inmem::InMemoryFrameAsyncReadStream;
|
||||
@@ -61,7 +61,7 @@ pub async fn get_binned(
|
||||
info!("end {}", end_date);
|
||||
info!("-------");
|
||||
let t1 = Utc::now();
|
||||
let channel = Channel {
|
||||
let channel = SfDbChannel {
|
||||
backend: channel_backend.clone(),
|
||||
name: channel_name.into(),
|
||||
series: None,
|
||||
|
||||
@@ -6,9 +6,9 @@ use http::StatusCode;
|
||||
use hyper::Body;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::Channel;
|
||||
use netpod::Cluster;
|
||||
use netpod::HostPort;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::APP_JSON;
|
||||
use url::Url;
|
||||
|
||||
@@ -16,7 +16,7 @@ const TEST_BACKEND: &str = "testbackend-00";
|
||||
|
||||
// Fetches all data, not streaming, meant for basic test cases that fit in memory.
|
||||
async fn fetch_data_api_python_blob(
|
||||
channels: Vec<Channel>,
|
||||
channels: Vec<SfDbChannel>,
|
||||
beg_date: &str,
|
||||
end_date: &str,
|
||||
cluster: &Cluster,
|
||||
@@ -67,7 +67,7 @@ fn api3_hdf_dim0_00() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
let jsv = fetch_data_api_python_blob(
|
||||
vec![Channel {
|
||||
vec![SfDbChannel {
|
||||
backend: TEST_BACKEND.into(),
|
||||
name: "test-gen-i32-dim0-v00".into(),
|
||||
series: None,
|
||||
|
||||
@@ -13,9 +13,9 @@ use items_2::binsdim0::BinsDim0CollectedResult;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::AppendToUrl;
|
||||
use netpod::Channel;
|
||||
use netpod::Cluster;
|
||||
use netpod::HostPort;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::APP_JSON;
|
||||
use query::api4::binned::BinnedQuery;
|
||||
use serde_json::Value as JsonValue;
|
||||
@@ -29,7 +29,7 @@ fn make_query<S: Into<String>>(
|
||||
end_date: &str,
|
||||
bin_count_min: u32,
|
||||
) -> Result<BinnedQuery, Error> {
|
||||
let channel = Channel {
|
||||
let channel = SfDbChannel {
|
||||
backend: TEST_BACKEND.into(),
|
||||
name: name.into(),
|
||||
series: None,
|
||||
@@ -47,7 +47,7 @@ fn binned_d0_json_00() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
let jsv = get_binned_json(
|
||||
Channel {
|
||||
SfDbChannel {
|
||||
backend: TEST_BACKEND.into(),
|
||||
name: "test-gen-i32-dim0-v01".into(),
|
||||
series: None,
|
||||
@@ -104,7 +104,7 @@ fn binned_d0_json_01a() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
let jsv = get_binned_json(
|
||||
Channel {
|
||||
SfDbChannel {
|
||||
backend: TEST_BACKEND.into(),
|
||||
name: "test-gen-i32-dim0-v01".into(),
|
||||
series: None,
|
||||
@@ -162,7 +162,7 @@ fn binned_d0_json_01b() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
let jsv = get_binned_json(
|
||||
Channel {
|
||||
SfDbChannel {
|
||||
backend: TEST_BACKEND.into(),
|
||||
name: "test-gen-i32-dim0-v01".into(),
|
||||
series: None,
|
||||
@@ -220,7 +220,7 @@ fn binned_d0_json_02() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
let jsv = get_binned_json(
|
||||
Channel {
|
||||
SfDbChannel {
|
||||
backend: TEST_BACKEND.into(),
|
||||
name: "test-gen-f64-dim1-v00".into(),
|
||||
series: None,
|
||||
@@ -279,7 +279,7 @@ fn binned_d0_json_03() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
let jsv = get_binned_json(
|
||||
Channel {
|
||||
SfDbChannel {
|
||||
backend: TEST_BACKEND.into(),
|
||||
name: "test-gen-f64-dim1-v00".into(),
|
||||
series: None,
|
||||
@@ -312,7 +312,7 @@ fn binned_d0_json_04() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
let jsv = get_binned_json(
|
||||
Channel {
|
||||
SfDbChannel {
|
||||
backend: TEST_BACKEND.into(),
|
||||
name: "test-gen-i32-dim0-v01".into(),
|
||||
series: None,
|
||||
@@ -365,7 +365,7 @@ fn binned_d0_json_04() -> Result<(), Error> {
|
||||
}
|
||||
|
||||
async fn get_binned_json(
|
||||
channel: Channel,
|
||||
channel: SfDbChannel,
|
||||
beg_date: &str,
|
||||
end_date: &str,
|
||||
bin_count: u32,
|
||||
|
||||
@@ -10,9 +10,9 @@ use items_2::eventsdim0::EventsDim0CollectorOutput;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::AppendToUrl;
|
||||
use netpod::Channel;
|
||||
use netpod::Cluster;
|
||||
use netpod::HostPort;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::APP_JSON;
|
||||
use query::api4::events::PlainEventsQuery;
|
||||
use serde_json::Value as JsonValue;
|
||||
@@ -21,7 +21,7 @@ use url::Url;
|
||||
const TEST_BACKEND: &str = "testbackend-00";
|
||||
|
||||
fn make_query<S: Into<String>>(name: S, beg_date: &str, end_date: &str) -> Result<PlainEventsQuery, Error> {
|
||||
let channel = Channel {
|
||||
let channel = SfDbChannel {
|
||||
backend: TEST_BACKEND.into(),
|
||||
name: name.into(),
|
||||
series: None,
|
||||
@@ -59,7 +59,7 @@ fn events_plain_json_02_range_incomplete() -> Result<(), Error> {
|
||||
let rh = require_test_hosts_running()?;
|
||||
let cluster = &rh.cluster;
|
||||
let jsv = events_plain_json(
|
||||
Channel {
|
||||
SfDbChannel {
|
||||
backend: TEST_BACKEND.into(),
|
||||
name: "test-gen-i32-dim0-v01".into(),
|
||||
series: None,
|
||||
@@ -79,7 +79,7 @@ fn events_plain_json_02_range_incomplete() -> Result<(), Error> {
|
||||
|
||||
// TODO improve by a more information-rich return type.
|
||||
async fn events_plain_json(
|
||||
channel: Channel,
|
||||
channel: SfDbChannel,
|
||||
beg_date: &str,
|
||||
end_date: &str,
|
||||
cluster: &Cluster,
|
||||
|
||||
@@ -6,13 +6,13 @@ use items_0::WithLen;
|
||||
use items_2::eventsdim0::EventsDim0CollectorOutput;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::Channel;
|
||||
use netpod::SfDbChannel;
|
||||
use query::api4::events::PlainEventsQuery;
|
||||
|
||||
const BACKEND: &str = "testbackend-00";
|
||||
|
||||
pub fn make_query<S: Into<String>>(name: S, beg_date: &str, end_date: &str) -> Result<PlainEventsQuery, Error> {
|
||||
let channel = Channel {
|
||||
let channel = SfDbChannel {
|
||||
backend: BACKEND.into(),
|
||||
name: name.into(),
|
||||
series: None,
|
||||
|
||||
@@ -8,8 +8,8 @@ use netpod::log::*;
|
||||
use netpod::query::CacheUsage;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::AppendToUrl;
|
||||
use netpod::Channel;
|
||||
use netpod::Cluster;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::APP_JSON;
|
||||
use query::api4::binned::BinnedQuery;
|
||||
use std::time::Duration;
|
||||
@@ -39,7 +39,7 @@ async fn get_json_common(
|
||||
let beg_date: DateTime<Utc> = beg_date.parse()?;
|
||||
let end_date: DateTime<Utc> = end_date.parse()?;
|
||||
let channel_backend = TEST_BACKEND;
|
||||
let channel = Channel {
|
||||
let channel = SfDbChannel {
|
||||
backend: channel_backend.into(),
|
||||
name: channel_name.into(),
|
||||
series: None,
|
||||
|
||||
@@ -2,9 +2,9 @@ use crate::ErrConv;
|
||||
use err::Error;
|
||||
use netpod::log::*;
|
||||
use netpod::ChConf;
|
||||
use netpod::Channel;
|
||||
use netpod::NodeConfigCached;
|
||||
use netpod::ScalarType;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::Shape;
|
||||
|
||||
/// It is an unsolved question as to how we want to uniquely address channels.
|
||||
@@ -15,7 +15,7 @@ use netpod::Shape;
|
||||
/// Otherwise we try to uniquely identify the series id from the given information.
|
||||
/// In the future, we can even try to involve time range information for that, but backends like
|
||||
/// old archivers and sf databuffer do not support such lookup.
|
||||
pub async fn chconf_from_scylla_type_backend(channel: &Channel, ncc: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
pub async fn chconf_from_scylla_type_backend(channel: &SfDbChannel, ncc: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
if channel.backend != ncc.node_config.cluster.backend {
|
||||
warn!(
|
||||
"mismatched backend {} vs {}",
|
||||
|
||||
@@ -12,7 +12,7 @@ use err::Error;
|
||||
use err::Res2;
|
||||
use netpod::log::*;
|
||||
use netpod::TableSizes;
|
||||
use netpod::{Channel, Database, NodeConfigCached};
|
||||
use netpod::{Database, NodeConfigCached, SfDbChannel};
|
||||
use netpod::{ScalarType, Shape};
|
||||
use pg::{Client as PgClient, NoTls};
|
||||
use std::sync::Arc;
|
||||
@@ -71,7 +71,7 @@ pub async fn create_connection(db_config: &Database) -> Result<PgClient, Error>
|
||||
Ok(cl)
|
||||
}
|
||||
|
||||
pub async fn channel_exists(channel: &Channel, node_config: &NodeConfigCached) -> Result<bool, Error> {
|
||||
pub async fn channel_exists(channel: &SfDbChannel, node_config: &NodeConfigCached) -> Result<bool, Error> {
|
||||
let cl = create_connection(&node_config.node_config.cluster.database).await?;
|
||||
let rows = cl
|
||||
.query("select rowid from channels where name = $1::text", &[&channel.name])
|
||||
@@ -155,7 +155,7 @@ pub async fn insert_channel(name: String, facility: i64, dbc: &PgClient) -> Resu
|
||||
}
|
||||
|
||||
// Currently only for scylla type backends
|
||||
pub async fn find_series(channel: &Channel, pgclient: Arc<PgClient>) -> Result<(u64, ScalarType, Shape), Error> {
|
||||
pub async fn find_series(channel: &SfDbChannel, pgclient: Arc<PgClient>) -> Result<(u64, ScalarType, Shape), Error> {
|
||||
info!("find_series channel {:?}", channel);
|
||||
let rows = if let Some(series) = channel.series() {
|
||||
let q = "select series, facility, channel, scalar_type, shape_dims from series_by_channel where series = $1";
|
||||
@@ -194,7 +194,7 @@ pub async fn find_series(channel: &Channel, pgclient: Arc<PgClient>) -> Result<(
|
||||
|
||||
// Currently only for sf-databuffer type backends
|
||||
// Note: we currently treat the channels primary key as series-id for sf-databuffer type backends.
|
||||
pub async fn find_series_sf_databuffer(channel: &Channel, pgclient: Arc<PgClient>) -> Res2<u64> {
|
||||
pub async fn find_series_sf_databuffer(channel: &SfDbChannel, pgclient: Arc<PgClient>) -> Res2<u64> {
|
||||
info!("find_series channel {:?}", channel);
|
||||
let sql = "select rowid from facilities where name = $1";
|
||||
let rows = pgclient.query(sql, &[&channel.backend()]).await.err_conv()?;
|
||||
|
||||
@@ -2,11 +2,14 @@ use crate::create_connection;
|
||||
use crate::ErrConv;
|
||||
use err::Error;
|
||||
use netpod::log::*;
|
||||
use netpod::Channel;
|
||||
use netpod::NodeConfigCached;
|
||||
use netpod::SfDbChannel;
|
||||
|
||||
// For sf-databuffer backend, given a Channel, try to complete the information if only id is given.
|
||||
pub async fn sf_databuffer_fetch_channel_by_series(channel: Channel, ncc: &NodeConfigCached) -> Result<Channel, Error> {
|
||||
pub async fn sf_databuffer_fetch_channel_by_series(
|
||||
channel: SfDbChannel,
|
||||
ncc: &NodeConfigCached,
|
||||
) -> Result<SfDbChannel, Error> {
|
||||
info!("sf_databuffer_fetch_channel_by_series");
|
||||
// TODO should not be needed at some point.
|
||||
if channel.backend().is_empty() || channel.name().is_empty() {
|
||||
@@ -29,7 +32,7 @@ pub async fn sf_databuffer_fetch_channel_by_series(channel: Channel, ncc: &NodeC
|
||||
if let Some(row) = rows.pop() {
|
||||
info!("sf_databuffer_fetch_channel_by_series got a row {row:?}");
|
||||
let name: String = row.get(0);
|
||||
let channel = Channel {
|
||||
let channel = SfDbChannel {
|
||||
series: channel.series,
|
||||
backend: ncc.node_config.cluster.backend.clone(),
|
||||
name,
|
||||
|
||||
@@ -483,8 +483,8 @@ async fn update_db_with_channel_config(
|
||||
} else {
|
||||
return Ok(UpdateChannelConfigResult::NotFound);
|
||||
};
|
||||
if meta.len() > 8 * 1024 * 1024 {
|
||||
return Err(Error::with_msg("meta data too long"));
|
||||
if meta.len() > 40 * 1024 * 1024 {
|
||||
return Err(Error::with_msg("meta data too long {meta:?}"));
|
||||
}
|
||||
let rows = dbc
|
||||
.query(
|
||||
|
||||
@@ -2,16 +2,17 @@ use crate::eventblobs::EventChunkerMultifile;
|
||||
use crate::eventchunker::EventChunkerConf;
|
||||
use crate::AggQuerySingleChannel;
|
||||
use crate::SfDbChConf;
|
||||
use err::Error;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::test_data_base_path_databuffer;
|
||||
use netpod::timeunits::*;
|
||||
use netpod::ByteOrder;
|
||||
use netpod::ByteSize;
|
||||
use netpod::Channel;
|
||||
use netpod::DiskIoTune;
|
||||
use netpod::Node;
|
||||
use netpod::ScalarType;
|
||||
use netpod::SfDatabuffer;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::Shape;
|
||||
use netpod::TsNano;
|
||||
|
||||
@@ -38,7 +39,7 @@ pub fn make_test_node(id: u32) -> Node {
|
||||
fn agg_x_dim_0() {
|
||||
taskrun::run(async {
|
||||
agg_x_dim_0_inner().await;
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
@@ -47,7 +48,7 @@ async fn agg_x_dim_0_inner() {
|
||||
let node = make_test_node(0);
|
||||
let query = AggQuerySingleChannel {
|
||||
channel_config: SfDbChConf {
|
||||
channel: Channel {
|
||||
channel: SfDbChannel {
|
||||
backend: "sf-databuffer".into(),
|
||||
name: "S10BC01-DBAM070:EOM1_T1".into(),
|
||||
series: None,
|
||||
@@ -92,7 +93,7 @@ async fn agg_x_dim_0_inner() {
|
||||
fn agg_x_dim_1() {
|
||||
taskrun::run(async {
|
||||
agg_x_dim_1_inner().await;
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
@@ -104,7 +105,7 @@ async fn agg_x_dim_1_inner() {
|
||||
let node = make_test_node(0);
|
||||
let query = AggQuerySingleChannel {
|
||||
channel_config: SfDbChConf {
|
||||
channel: Channel {
|
||||
channel: SfDbChannel {
|
||||
backend: "ks".into(),
|
||||
name: "wave1".into(),
|
||||
series: None,
|
||||
|
||||
@@ -3,10 +3,10 @@ use err::Error;
|
||||
use netpod::log::*;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::AggKind;
|
||||
use netpod::Channel;
|
||||
use netpod::Cluster;
|
||||
use netpod::NodeConfigCached;
|
||||
use netpod::PreBinnedPatchCoordEnum;
|
||||
use netpod::SfDbChannel;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::VecDeque;
|
||||
@@ -18,7 +18,7 @@ use tiny_keccak::Hasher;
|
||||
|
||||
// For file-based caching, this determined the node where the cache file is located.
|
||||
// No longer needed for scylla-based caching.
|
||||
pub fn node_ix_for_patch(patch_coord: &PreBinnedPatchCoordEnum, channel: &Channel, cluster: &Cluster) -> u32 {
|
||||
pub fn node_ix_for_patch(patch_coord: &PreBinnedPatchCoordEnum, channel: &SfDbChannel, cluster: &Cluster) -> u32 {
|
||||
let mut hash = tiny_keccak::Sha3::v256();
|
||||
hash.update(channel.backend.as_bytes());
|
||||
hash.update(channel.name.as_bytes());
|
||||
@@ -36,13 +36,13 @@ pub fn node_ix_for_patch(patch_coord: &PreBinnedPatchCoordEnum, channel: &Channe
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct CacheFileDesc {
|
||||
// What identifies a cached file?
|
||||
channel: Channel,
|
||||
channel: SfDbChannel,
|
||||
patch: PreBinnedPatchCoordEnum,
|
||||
agg_kind: AggKind,
|
||||
}
|
||||
|
||||
impl CacheFileDesc {
|
||||
pub fn new(channel: Channel, patch: PreBinnedPatchCoordEnum, agg_kind: AggKind) -> Self {
|
||||
pub fn new(channel: SfDbChannel, patch: PreBinnedPatchCoordEnum, agg_kind: AggKind) -> Self {
|
||||
Self {
|
||||
channel,
|
||||
patch,
|
||||
@@ -104,7 +104,7 @@ pub async fn write_pb_cache_min_max_avg_scalar<T>(
|
||||
values: T,
|
||||
patch: PreBinnedPatchCoordEnum,
|
||||
agg_kind: AggKind,
|
||||
channel: Channel,
|
||||
channel: SfDbChannel,
|
||||
node_config: NodeConfigCached,
|
||||
) -> Result<WrittenPbCache, Error>
|
||||
where
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
use crate::SfDbChConf;
|
||||
use err::Error;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::Channel;
|
||||
use netpod::NodeConfigCached;
|
||||
use netpod::SfDbChannel;
|
||||
use parse::channelconfig::extract_matching_config_entry;
|
||||
use parse::channelconfig::read_local_config;
|
||||
use parse::channelconfig::ChannelConfigs;
|
||||
use parse::channelconfig::MatchingConfigEntry;
|
||||
|
||||
pub async fn config(range: NanoRange, channel: Channel, node_config: &NodeConfigCached) -> Result<SfDbChConf, Error> {
|
||||
pub async fn config(
|
||||
range: NanoRange,
|
||||
channel: SfDbChannel,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<SfDbChConf, Error> {
|
||||
let channel_configs = read_local_config(channel.clone(), node_config.clone()).await?;
|
||||
let entry_res = match extract_matching_config_entry(&range, &channel_configs) {
|
||||
Ok(k) => k,
|
||||
@@ -46,6 +50,6 @@ pub async fn config(range: NanoRange, channel: Channel, node_config: &NodeConfig
|
||||
Ok(channel_config)
|
||||
}
|
||||
|
||||
pub async fn configs(channel: Channel, node_config: &NodeConfigCached) -> Result<ChannelConfigs, Error> {
|
||||
pub async fn configs(channel: SfDbChannel, node_config: &NodeConfigCached) -> Result<ChannelConfigs, Error> {
|
||||
read_local_config(channel.clone(), node_config.clone()).await
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use crate::SfDbChConf;
|
||||
|
||||
use super::paths;
|
||||
use crate::SfDbChConf;
|
||||
use bytes::BytesMut;
|
||||
use err::ErrStr;
|
||||
use err::Error;
|
||||
@@ -445,7 +444,7 @@ mod test {
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, true);
|
||||
assert_eq!(res.file.pos, 23);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -463,7 +462,7 @@ mod test {
|
||||
assert_eq!(res.found, false);
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, false);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -482,7 +481,7 @@ mod test {
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, true);
|
||||
assert_eq!(res.file.pos, 23);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -501,7 +500,7 @@ mod test {
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, true);
|
||||
assert_eq!(res.file.pos, 179);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -520,7 +519,7 @@ mod test {
|
||||
assert_eq!(res.found, false);
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, false);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -539,7 +538,7 @@ mod test {
|
||||
assert_eq!(res.found, false);
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, false);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -557,7 +556,7 @@ mod test {
|
||||
assert_eq!(res.found, false);
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, false);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -576,7 +575,7 @@ mod test {
|
||||
assert_eq!(res.file.index, true);
|
||||
assert_eq!(res.file.positioned, true);
|
||||
assert_eq!(res.file.pos, 184);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -594,7 +593,7 @@ mod test {
|
||||
assert_eq!(res.found, false);
|
||||
assert_eq!(res.file.index, true);
|
||||
assert_eq!(res.file.positioned, false);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -612,7 +611,7 @@ mod test {
|
||||
assert_eq!(res.found, false);
|
||||
assert_eq!(res.file.index, true);
|
||||
assert_eq!(res.file.positioned, false);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -631,7 +630,7 @@ mod test {
|
||||
assert_eq!(res.file.index, true);
|
||||
assert_eq!(res.file.positioned, false);
|
||||
assert_eq!(res.file.pos, 0);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -655,10 +654,10 @@ mod test {
|
||||
.await?;
|
||||
assert_eq!(res.1, true);
|
||||
assert_eq!(res.3, 75);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -673,7 +672,7 @@ mod test {
|
||||
assert_eq!(res.found, false);
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, false);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -692,7 +691,7 @@ mod test {
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, true);
|
||||
assert_eq!(res.file.pos, 23);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -711,7 +710,7 @@ mod test {
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, true);
|
||||
assert_eq!(res.file.pos, 75);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -731,7 +730,7 @@ mod test {
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, true);
|
||||
assert_eq!(res.file.pos, 2995171);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -750,7 +749,7 @@ mod test {
|
||||
assert_eq!(res.found, false);
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, false);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -770,7 +769,7 @@ mod test {
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, true);
|
||||
assert_eq!(res.file.pos, 23);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -790,7 +789,7 @@ mod test {
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, true);
|
||||
assert_eq!(res.file.pos, 75);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -810,7 +809,7 @@ mod test {
|
||||
assert_eq!(res.file.index, false);
|
||||
assert_eq!(res.file.positioned, true);
|
||||
assert_eq!(res.file.pos, 127);
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut)?;
|
||||
Ok(())
|
||||
@@ -822,7 +821,7 @@ mod test {
|
||||
beg: DAY + HOUR * 5,
|
||||
end: DAY + HOUR * 8,
|
||||
};
|
||||
let chn = netpod::Channel {
|
||||
let chn = netpod::SfDbChannel {
|
||||
backend: BACKEND.into(),
|
||||
name: "scalar-i32-be".into(),
|
||||
series: None,
|
||||
@@ -860,7 +859,7 @@ mod test {
|
||||
n = paths.len()
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(task).unwrap();
|
||||
}
|
||||
|
||||
@@ -27,11 +27,11 @@ use futures_util::StreamExt;
|
||||
use futures_util::TryFutureExt;
|
||||
use netpod::log::*;
|
||||
use netpod::ByteOrder;
|
||||
use netpod::Channel;
|
||||
use netpod::DiskIoTune;
|
||||
use netpod::Node;
|
||||
use netpod::ReadSys;
|
||||
use netpod::ScalarType;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::Shape;
|
||||
use netpod::TsNano;
|
||||
use serde::Deserialize;
|
||||
@@ -63,7 +63,7 @@ use tokio::sync::mpsc;
|
||||
// TODO move to databuffer-specific crate
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct SfDbChConf {
|
||||
pub channel: Channel,
|
||||
pub channel: SfDbChannel,
|
||||
pub keyspace: u8,
|
||||
pub time_bin_size: TsNano,
|
||||
pub scalar_type: ScalarType,
|
||||
|
||||
@@ -298,7 +298,7 @@ mod test {
|
||||
const BACKEND: &str = "testbackend-00";
|
||||
|
||||
fn read_expanded_for_range(range: NanoRange, nodeix: usize) -> Result<(usize, Vec<u64>), Error> {
|
||||
let chn = netpod::Channel {
|
||||
let chn = netpod::SfDbChannel {
|
||||
backend: BACKEND.into(),
|
||||
name: "scalar-i32-be".into(),
|
||||
series: None,
|
||||
|
||||
@@ -53,11 +53,15 @@ pub struct EventChunker {
|
||||
seen_after_range_count: usize,
|
||||
unordered_warn_count: usize,
|
||||
repeated_ts_warn_count: usize,
|
||||
config_mismatch_discard: usize,
|
||||
}
|
||||
|
||||
impl Drop for EventChunker {
|
||||
fn drop(&mut self) {
|
||||
// TODO collect somewhere
|
||||
if self.config_mismatch_discard != 0 {
|
||||
warn!("config_mismatch_discard {}", self.config_mismatch_discard);
|
||||
}
|
||||
debug!(
|
||||
"EventChunker Drop Stats:\ndecomp_dt_histo: {:?}\nitem_len_emit_histo: {:?}",
|
||||
self.decomp_dt_histo, self.item_len_emit_histo
|
||||
@@ -124,6 +128,7 @@ impl EventChunker {
|
||||
seen_after_range_count: 0,
|
||||
unordered_warn_count: 0,
|
||||
repeated_ts_warn_count: 0,
|
||||
config_mismatch_discard: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -352,73 +357,111 @@ impl EventChunker {
|
||||
let type_size = scalar_type.bytes() as u32;
|
||||
let ele_count = value_bytes / type_size as u64;
|
||||
let ele_size = type_size;
|
||||
match self.channel_config.shape {
|
||||
let config_matches = match self.channel_config.shape {
|
||||
Shape::Scalar => {
|
||||
if is_array {
|
||||
Err(Error::with_msg(format!(
|
||||
"ChannelConfig expects Scalar but we find event is_array"
|
||||
)))?;
|
||||
if false {
|
||||
error!(
|
||||
"channel config mismatch {:?} {:?} {:?} {:?}",
|
||||
self.channel_config, is_array, ele_count, self.dbg_path,
|
||||
);
|
||||
}
|
||||
if false {
|
||||
return Err(Error::with_msg(format!(
|
||||
"ChannelConfig expects {:?} but we find event is_array",
|
||||
self.channel_config,
|
||||
)));
|
||||
}
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
Shape::Wave(dim1count) => {
|
||||
if dim1count != ele_count as u32 {
|
||||
Err(Error::with_msg(format!(
|
||||
"ChannelConfig expects {:?} but event has ele_count {}",
|
||||
self.channel_config.shape, ele_count,
|
||||
)))?;
|
||||
if false {
|
||||
error!(
|
||||
"channel config mismatch {:?} {:?} {:?} {:?}",
|
||||
self.channel_config, is_array, ele_count, self.dbg_path,
|
||||
);
|
||||
}
|
||||
if false {
|
||||
return Err(Error::with_msg(format!(
|
||||
"ChannelConfig expects {:?} but event has ele_count {}",
|
||||
self.channel_config, ele_count,
|
||||
)));
|
||||
}
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
Shape::Image(n1, n2) => {
|
||||
let nt = n1 as usize * n2 as usize;
|
||||
if nt != ele_count as usize {
|
||||
Err(Error::with_msg(format!(
|
||||
"ChannelConfig expects {:?} but event has ele_count {}",
|
||||
self.channel_config.shape, ele_count,
|
||||
)))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
let data = &buf.as_ref()[(p1 as usize)..(p1 as usize + k1 as usize)];
|
||||
let decomp = {
|
||||
if self.do_decompress {
|
||||
assert!(data.len() > 12);
|
||||
let ts1 = Instant::now();
|
||||
let decomp_bytes = (type_size * ele_count as u32) as usize;
|
||||
let mut decomp = vec![0; decomp_bytes];
|
||||
// TODO limit the buf slice range
|
||||
match bitshuffle_decompress(
|
||||
&data[12..],
|
||||
&mut decomp,
|
||||
ele_count as usize,
|
||||
ele_size as usize,
|
||||
0,
|
||||
) {
|
||||
Ok(c1) => {
|
||||
assert!(c1 as u64 + 12 == k1);
|
||||
let ts2 = Instant::now();
|
||||
let dt = ts2.duration_since(ts1);
|
||||
// TODO analyze the histo
|
||||
self.decomp_dt_histo.ingest(dt.as_secs() as u32 + dt.subsec_micros());
|
||||
Some(decomp)
|
||||
if false {
|
||||
error!(
|
||||
"channel config mismatch {:?} {:?} {:?} {:?}",
|
||||
self.channel_config, is_array, ele_count, self.dbg_path,
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(Error::with_msg(format!("decompression failed {:?}", e)))?;
|
||||
if false {
|
||||
return Err(Error::with_msg(format!(
|
||||
"ChannelConfig expects {:?} but event has ele_count {}",
|
||||
self.channel_config, ele_count,
|
||||
)));
|
||||
}
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
ret.add_event(
|
||||
ts,
|
||||
pulse,
|
||||
Some(data.to_vec()),
|
||||
decomp,
|
||||
ScalarType::from_dtype_index(type_index)?,
|
||||
is_big_endian,
|
||||
shape_this,
|
||||
comp_this,
|
||||
);
|
||||
if config_matches {
|
||||
let data = buf.as_ref()[(p1 as usize)..(p1 as usize + k1 as usize)].as_ref();
|
||||
let decomp = {
|
||||
if self.do_decompress {
|
||||
assert!(data.len() > 12);
|
||||
let ts1 = Instant::now();
|
||||
let decomp_bytes = (type_size * ele_count as u32) as usize;
|
||||
let mut decomp = vec![0; decomp_bytes];
|
||||
// TODO limit the buf slice range
|
||||
match bitshuffle_decompress(
|
||||
&data[12..],
|
||||
&mut decomp,
|
||||
ele_count as usize,
|
||||
ele_size as usize,
|
||||
0,
|
||||
) {
|
||||
Ok(c1) => {
|
||||
assert!(c1 as u64 + 12 == k1);
|
||||
let ts2 = Instant::now();
|
||||
let dt = ts2.duration_since(ts1);
|
||||
// TODO analyze the histo
|
||||
self.decomp_dt_histo.ingest(dt.as_secs() as u32 + dt.subsec_micros());
|
||||
Some(decomp)
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(Error::with_msg(format!("decompression failed {:?}", e)))?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
ret.add_event(
|
||||
ts,
|
||||
pulse,
|
||||
Some(data.to_vec()),
|
||||
decomp,
|
||||
ScalarType::from_dtype_index(type_index)?,
|
||||
is_big_endian,
|
||||
shape_this,
|
||||
comp_this,
|
||||
);
|
||||
} else {
|
||||
self.config_mismatch_discard += 1;
|
||||
}
|
||||
} else {
|
||||
if len < p1 as u32 + 4 {
|
||||
let msg = format!("uncomp len: {} p1: {}", len, p1);
|
||||
|
||||
@@ -7,11 +7,11 @@ use err::Error;
|
||||
use netpod::log::*;
|
||||
use netpod::timeunits::*;
|
||||
use netpod::ByteOrder;
|
||||
use netpod::Channel;
|
||||
use netpod::GenVar;
|
||||
use netpod::Node;
|
||||
use netpod::ScalarType;
|
||||
use netpod::SfDatabuffer;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::Shape;
|
||||
use netpod::TsNano;
|
||||
use std::path::Path;
|
||||
@@ -34,7 +34,7 @@ pub async fn gen_test_data() -> Result<(), Error> {
|
||||
{
|
||||
let chn = ChannelGenProps {
|
||||
config: SfDbChConf {
|
||||
channel: Channel {
|
||||
channel: SfDbChannel {
|
||||
backend: backend.clone(),
|
||||
name: "scalar-i32-be".into(),
|
||||
series: None,
|
||||
@@ -53,7 +53,7 @@ pub async fn gen_test_data() -> Result<(), Error> {
|
||||
ensemble.channels.push(chn);
|
||||
let chn = ChannelGenProps {
|
||||
config: SfDbChConf {
|
||||
channel: Channel {
|
||||
channel: SfDbChannel {
|
||||
backend: backend.clone(),
|
||||
name: "wave-f64-be-n21".into(),
|
||||
series: None,
|
||||
@@ -72,7 +72,7 @@ pub async fn gen_test_data() -> Result<(), Error> {
|
||||
ensemble.channels.push(chn);
|
||||
let chn = ChannelGenProps {
|
||||
config: SfDbChConf {
|
||||
channel: Channel {
|
||||
channel: SfDbChannel {
|
||||
backend: backend.clone(),
|
||||
name: "wave-u16-le-n77".into(),
|
||||
series: None,
|
||||
@@ -91,7 +91,7 @@ pub async fn gen_test_data() -> Result<(), Error> {
|
||||
ensemble.channels.push(chn);
|
||||
let chn = ChannelGenProps {
|
||||
config: SfDbChConf {
|
||||
channel: Channel {
|
||||
channel: SfDbChannel {
|
||||
backend: backend.clone(),
|
||||
name: "tw-scalar-i32-be".into(),
|
||||
series: None,
|
||||
@@ -110,7 +110,7 @@ pub async fn gen_test_data() -> Result<(), Error> {
|
||||
ensemble.channels.push(chn);
|
||||
let chn = ChannelGenProps {
|
||||
config: SfDbChConf {
|
||||
channel: Channel {
|
||||
channel: SfDbChannel {
|
||||
backend: backend.clone(),
|
||||
name: "const-regular-scalar-i32-be".into(),
|
||||
series: None,
|
||||
|
||||
@@ -17,9 +17,9 @@ use netpod::range::evrange::NanoRange;
|
||||
use netpod::AggKind;
|
||||
use netpod::ByteSize;
|
||||
use netpod::ChConf;
|
||||
use netpod::Channel;
|
||||
use netpod::DiskIoTune;
|
||||
use netpod::NodeConfigCached;
|
||||
use netpod::SfDbChannel;
|
||||
use parse::channelconfig::extract_matching_config_entry;
|
||||
use parse::channelconfig::read_local_config;
|
||||
use parse::channelconfig::ConfigEntry;
|
||||
@@ -130,10 +130,10 @@ pub async fn make_event_pipe(
|
||||
|
||||
pub async fn get_applicable_entry(
|
||||
range: &NanoRange,
|
||||
channel: Channel,
|
||||
channel: SfDbChannel,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<ConfigEntry, Error> {
|
||||
info!("---------- disk::raw::conn::get_applicable_entry");
|
||||
debug!("disk::raw::conn::get_applicable_entry");
|
||||
let channel_config = read_local_config(channel.clone(), node_config.clone()).await?;
|
||||
let entry_res = match extract_matching_config_entry(range, &channel_config) {
|
||||
Ok(k) => k,
|
||||
@@ -159,7 +159,7 @@ pub async fn get_applicable_entry(
|
||||
|
||||
pub fn make_local_event_blobs_stream(
|
||||
range: NanoRange,
|
||||
channel: Channel,
|
||||
channel: SfDbChannel,
|
||||
entry: &ConfigEntry,
|
||||
expand: bool,
|
||||
do_decompress: bool,
|
||||
@@ -208,7 +208,7 @@ pub fn make_local_event_blobs_stream(
|
||||
|
||||
pub fn make_remote_event_blobs_stream(
|
||||
range: NanoRange,
|
||||
channel: Channel,
|
||||
channel: SfDbChannel,
|
||||
entry: &ConfigEntry,
|
||||
expand: bool,
|
||||
do_decompress: bool,
|
||||
@@ -216,7 +216,7 @@ pub fn make_remote_event_blobs_stream(
|
||||
disk_io_tune: DiskIoTune,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<impl Stream<Item = Sitemty<EventFull>>, Error> {
|
||||
info!("make_remote_event_blobs_stream");
|
||||
debug!("make_remote_event_blobs_stream");
|
||||
let shape = match entry.to_shape() {
|
||||
Ok(k) => k,
|
||||
Err(e) => return Err(e)?,
|
||||
@@ -363,7 +363,7 @@ pub async fn make_event_blobs_pipe(
|
||||
evq: &PlainEventsQuery,
|
||||
node_config: &NodeConfigCached,
|
||||
) -> Result<Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>, Error> {
|
||||
info!("make_event_blobs_pipe {evq:?}");
|
||||
debug!("make_event_blobs_pipe {evq:?}");
|
||||
if evq.channel().backend() == TEST_BACKEND {
|
||||
make_event_blobs_pipe_test(evq, node_config).await
|
||||
} else {
|
||||
|
||||
@@ -9,7 +9,7 @@ use netpod::log::*;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::ByteOrder;
|
||||
use netpod::ByteSize;
|
||||
use netpod::Channel;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::Shape;
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs::File;
|
||||
@@ -81,7 +81,7 @@ pub fn main() -> Result<(), Error> {
|
||||
let inp = Box::pin(disk::file_content_stream(path.clone(), file, disk_io_tune));
|
||||
let ce = &config.entries[0];
|
||||
let channel_config = SfDbChConf {
|
||||
channel: Channel {
|
||||
channel: SfDbChannel {
|
||||
backend: String::new(),
|
||||
name: config.channel_name.clone(),
|
||||
series: None,
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
pub mod configquorum;
|
||||
|
||||
use crate::err::Error;
|
||||
use crate::gather::gather_get_json_generic;
|
||||
use crate::gather::SubRes;
|
||||
@@ -32,7 +34,6 @@ use netpod::query::api1::Api1Query;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::ByteSize;
|
||||
use netpod::Channel;
|
||||
use netpod::ChannelSearchQuery;
|
||||
use netpod::ChannelSearchResult;
|
||||
use netpod::DiskIoTune;
|
||||
@@ -40,6 +41,7 @@ use netpod::NodeConfigCached;
|
||||
use netpod::PerfOpts;
|
||||
use netpod::ProxyConfig;
|
||||
use netpod::ScalarType;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::Shape;
|
||||
use netpod::ACCEPT_ALL;
|
||||
use netpod::APP_JSON;
|
||||
@@ -53,6 +55,7 @@ use query::api4::events::PlainEventsQuery;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
@@ -639,9 +642,9 @@ impl Api1ChannelHeader {
|
||||
|
||||
pub struct DataApiPython3DataStream {
|
||||
range: NanoRange,
|
||||
channels: Vec<Channel>,
|
||||
channels: VecDeque<SfDbChannel>,
|
||||
current_channel: Option<SfDbChannel>,
|
||||
node_config: NodeConfigCached,
|
||||
chan_ix: usize,
|
||||
chan_stream: Option<Pin<Box<dyn Stream<Item = Result<BytesMut, Error>> + Send>>>,
|
||||
config_fut: Option<Pin<Box<dyn Future<Output = Result<ChannelConfigs, Error>> + Send>>>,
|
||||
disk_io_tune: DiskIoTune,
|
||||
@@ -658,7 +661,7 @@ pub struct DataApiPython3DataStream {
|
||||
impl DataApiPython3DataStream {
|
||||
pub fn new(
|
||||
range: NanoRange,
|
||||
channels: Vec<Channel>,
|
||||
channels: Vec<SfDbChannel>,
|
||||
disk_io_tune: DiskIoTune,
|
||||
do_decompress: bool,
|
||||
events_max: u64,
|
||||
@@ -667,9 +670,9 @@ impl DataApiPython3DataStream {
|
||||
) -> Self {
|
||||
Self {
|
||||
range,
|
||||
channels,
|
||||
channels: channels.into_iter().collect(),
|
||||
current_channel: None,
|
||||
node_config,
|
||||
chan_ix: 0,
|
||||
chan_stream: None,
|
||||
config_fut: None,
|
||||
disk_io_tune,
|
||||
@@ -685,7 +688,7 @@ impl DataApiPython3DataStream {
|
||||
|
||||
fn convert_item(
|
||||
b: EventFull,
|
||||
channel: &Channel,
|
||||
channel: &SfDbChannel,
|
||||
entry: &ConfigEntry,
|
||||
header_out: &mut bool,
|
||||
count_events: &mut usize,
|
||||
@@ -755,6 +758,136 @@ impl DataApiPython3DataStream {
|
||||
}
|
||||
Ok(d)
|
||||
}
|
||||
|
||||
fn handle_chan_stream_ready(&mut self, item: Result<BytesMut, Error>) -> Option<Result<BytesMut, Error>> {
|
||||
match item {
|
||||
Ok(k) => {
|
||||
let n = Instant::now();
|
||||
if n.duration_since(self.ping_last) >= Duration::from_millis(2000) {
|
||||
let mut sb = crate::status_board().unwrap();
|
||||
sb.mark_alive(&self.status_id);
|
||||
self.ping_last = n;
|
||||
}
|
||||
Some(Ok(k))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("DataApiPython3DataStream emit error: {e:?}");
|
||||
self.chan_stream = None;
|
||||
self.data_done = true;
|
||||
let mut sb = crate::status_board().unwrap();
|
||||
sb.add_error(&self.status_id, e);
|
||||
if false {
|
||||
// TODO format as python data api error frame:
|
||||
let mut buf = BytesMut::with_capacity(1024);
|
||||
buf.put_slice("".as_bytes());
|
||||
Some(Ok(buf))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_config_fut_ready(&mut self, item: Result<ChannelConfigs, Error>) -> Result<(), Error> {
|
||||
match item {
|
||||
Ok(config) => {
|
||||
self.config_fut = None;
|
||||
let entry_res = match extract_matching_config_entry(&self.range, &config) {
|
||||
Ok(k) => k,
|
||||
Err(e) => return Err(e)?,
|
||||
};
|
||||
match entry_res {
|
||||
MatchingConfigEntry::None => {
|
||||
warn!("DataApiPython3DataStream no config entry found for {:?}", config);
|
||||
self.chan_stream = Some(Box::pin(stream::empty()));
|
||||
Ok(())
|
||||
}
|
||||
MatchingConfigEntry::Multiple => {
|
||||
warn!(
|
||||
"DataApiPython3DataStream multiple config entries found for {:?}",
|
||||
config
|
||||
);
|
||||
self.chan_stream = Some(Box::pin(stream::empty()));
|
||||
Ok(())
|
||||
}
|
||||
MatchingConfigEntry::Entry(entry) => {
|
||||
let entry = entry.clone();
|
||||
let channel = self.current_channel.as_ref().unwrap();
|
||||
debug!("found channel_config for {}: {:?}", channel.name, entry);
|
||||
let evq = PlainEventsQuery::new(channel.clone(), self.range.clone()).for_event_blobs();
|
||||
debug!("query for event blobs retrieval: evq {evq:?}");
|
||||
// TODO important TODO
|
||||
debug!("TODO fix magic inmem_bufcap");
|
||||
debug!("TODO add timeout option to data api3 download");
|
||||
let perf_opts = PerfOpts::default();
|
||||
// TODO is this a good to place decide this?
|
||||
let s = if self.node_config.node_config.cluster.is_central_storage {
|
||||
info!("Set up central storage stream");
|
||||
// TODO pull up this config
|
||||
let event_chunker_conf = EventChunkerConf::new(ByteSize::kb(1024));
|
||||
let s = make_local_event_blobs_stream(
|
||||
evq.range().try_into()?,
|
||||
evq.channel().clone(),
|
||||
&entry,
|
||||
evq.one_before_range(),
|
||||
self.do_decompress,
|
||||
event_chunker_conf,
|
||||
self.disk_io_tune.clone(),
|
||||
&self.node_config,
|
||||
)?;
|
||||
Box::pin(s) as Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>
|
||||
} else {
|
||||
if let Some(sh) = &entry.shape {
|
||||
if sh.len() > 1 {
|
||||
warn!("Remote stream fetch for shape {sh:?}");
|
||||
}
|
||||
}
|
||||
debug!("Set up merged remote stream");
|
||||
let s = MergedBlobsFromRemotes::new(
|
||||
evq,
|
||||
perf_opts,
|
||||
self.node_config.node_config.cluster.clone(),
|
||||
);
|
||||
Box::pin(s) as Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>
|
||||
};
|
||||
let s = s.map({
|
||||
let mut header_out = false;
|
||||
let mut count_events = 0;
|
||||
let channel = self.current_channel.clone().unwrap();
|
||||
move |b| {
|
||||
let ret = match b {
|
||||
Ok(b) => {
|
||||
let f = match b {
|
||||
StreamItem::DataItem(RangeCompletableItem::Data(b)) => Self::convert_item(
|
||||
b,
|
||||
&channel,
|
||||
&entry,
|
||||
&mut header_out,
|
||||
&mut count_events,
|
||||
)?,
|
||||
_ => BytesMut::new(),
|
||||
};
|
||||
Ok(f)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
};
|
||||
ret
|
||||
}
|
||||
});
|
||||
//let _ = Box::new(s) as Box<dyn Stream<Item = Result<BytesMut, Error>> + Unpin>;
|
||||
let evm = if self.events_max == 0 {
|
||||
usize::MAX
|
||||
} else {
|
||||
self.events_max as usize
|
||||
};
|
||||
self.chan_stream = Some(Box::pin(s.map_err(Error::from).take(evm)));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => Err(Error::with_msg_no_trace(format!("can not parse channel config {e}"))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for DataApiPython3DataStream {
|
||||
@@ -771,148 +904,33 @@ impl Stream for DataApiPython3DataStream {
|
||||
} else {
|
||||
if let Some(stream) = &mut self.chan_stream {
|
||||
match stream.poll_next_unpin(cx) {
|
||||
Ready(k) => match k {
|
||||
Some(k) => match k {
|
||||
Ok(k) => {
|
||||
let n = Instant::now();
|
||||
if n.duration_since(self.ping_last) >= Duration::from_millis(2000) {
|
||||
let mut sb = crate::status_board().unwrap();
|
||||
sb.mark_alive(&self.status_id);
|
||||
self.ping_last = n;
|
||||
}
|
||||
Ready(Some(Ok(k)))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("DataApiPython3DataStream emit error: {e:?}");
|
||||
self.data_done = true;
|
||||
let mut sb = crate::status_board().unwrap();
|
||||
sb.add_error(&self.status_id, e);
|
||||
if false {
|
||||
// TODO format as python data api error frame:
|
||||
let mut buf = BytesMut::with_capacity(1024);
|
||||
buf.put_slice("".as_bytes());
|
||||
Ready(Some(Ok(buf)))
|
||||
} else {
|
||||
self.data_done = true;
|
||||
Ready(None)
|
||||
}
|
||||
}
|
||||
},
|
||||
None => {
|
||||
self.chan_stream = None;
|
||||
continue;
|
||||
}
|
||||
},
|
||||
Ready(Some(k)) => Ready(self.handle_chan_stream_ready(k)),
|
||||
Ready(None) => {
|
||||
self.chan_stream = None;
|
||||
continue;
|
||||
}
|
||||
Pending => Pending,
|
||||
}
|
||||
} else if let Some(fut) = &mut self.config_fut {
|
||||
match fut.poll_unpin(cx) {
|
||||
Ready(Ok(config)) => {
|
||||
self.config_fut = None;
|
||||
let entry_res = match extract_matching_config_entry(&self.range, &config) {
|
||||
Ok(k) => k,
|
||||
Err(e) => return Err(e)?,
|
||||
};
|
||||
match entry_res {
|
||||
MatchingConfigEntry::None => {
|
||||
warn!("DataApiPython3DataStream no config entry found for {:?}", config);
|
||||
self.chan_stream = Some(Box::pin(stream::empty()));
|
||||
continue;
|
||||
}
|
||||
MatchingConfigEntry::Multiple => {
|
||||
warn!(
|
||||
"DataApiPython3DataStream multiple config entries found for {:?}",
|
||||
config
|
||||
);
|
||||
self.chan_stream = Some(Box::pin(stream::empty()));
|
||||
continue;
|
||||
}
|
||||
MatchingConfigEntry::Entry(entry) => {
|
||||
let entry = entry.clone();
|
||||
let channel = self.channels[self.chan_ix - 1].clone();
|
||||
debug!("found channel_config for {}: {:?}", channel.name, entry);
|
||||
let evq = PlainEventsQuery::new(channel, self.range.clone()).for_event_blobs();
|
||||
info!("query for event blobs retrieval: evq {evq:?}");
|
||||
warn!("TODO fix magic inmem_bufcap");
|
||||
warn!("TODO add timeout option to data api3 download");
|
||||
let perf_opts = PerfOpts::default();
|
||||
// TODO is this a good to place decide this?
|
||||
let s = if self.node_config.node_config.cluster.is_central_storage {
|
||||
info!("Set up central storage stream");
|
||||
// TODO pull up this config
|
||||
let event_chunker_conf = EventChunkerConf::new(ByteSize::kb(1024));
|
||||
let s = make_local_event_blobs_stream(
|
||||
evq.range().try_into()?,
|
||||
evq.channel().clone(),
|
||||
&entry,
|
||||
evq.one_before_range(),
|
||||
self.do_decompress,
|
||||
event_chunker_conf,
|
||||
self.disk_io_tune.clone(),
|
||||
&self.node_config,
|
||||
)?;
|
||||
Box::pin(s) as Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>
|
||||
} else {
|
||||
if let Some(sh) = &entry.shape {
|
||||
if sh.len() > 1 {
|
||||
warn!("Remote stream fetch for shape {sh:?}");
|
||||
}
|
||||
}
|
||||
debug!("Set up merged remote stream");
|
||||
let s = MergedBlobsFromRemotes::new(
|
||||
evq,
|
||||
perf_opts,
|
||||
self.node_config.node_config.cluster.clone(),
|
||||
);
|
||||
Box::pin(s) as Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>
|
||||
};
|
||||
let s = s.map({
|
||||
let mut header_out = false;
|
||||
let mut count_events = 0;
|
||||
let channel = self.channels[self.chan_ix - 1].clone();
|
||||
move |b| {
|
||||
let ret = match b {
|
||||
Ok(b) => {
|
||||
let f = match b {
|
||||
StreamItem::DataItem(RangeCompletableItem::Data(b)) => {
|
||||
Self::convert_item(
|
||||
b,
|
||||
&channel,
|
||||
&entry,
|
||||
&mut header_out,
|
||||
&mut count_events,
|
||||
)?
|
||||
}
|
||||
_ => BytesMut::new(),
|
||||
};
|
||||
Ok(f)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
};
|
||||
ret
|
||||
}
|
||||
});
|
||||
//let _ = Box::new(s) as Box<dyn Stream<Item = Result<BytesMut, Error>> + Unpin>;
|
||||
let evm = if self.events_max == 0 {
|
||||
usize::MAX
|
||||
} else {
|
||||
self.events_max as usize
|
||||
};
|
||||
self.chan_stream = Some(Box::pin(s.map_err(Error::from).take(evm)));
|
||||
continue;
|
||||
}
|
||||
Ready(k) => match self.handle_config_fut_ready(k) {
|
||||
Ok(()) => continue,
|
||||
Err(e) => {
|
||||
self.config_fut = None;
|
||||
self.data_done = true;
|
||||
error!("api1_binary_events error {:?}", e);
|
||||
Ready(Some(Err(e)))
|
||||
}
|
||||
}
|
||||
Ready(Err(e)) => {
|
||||
self.config_fut = None;
|
||||
self.data_done = true;
|
||||
error!("api1_binary_events error {:?}", e);
|
||||
Ready(Some(Err(Error::with_msg_no_trace("can not parse channel config"))))
|
||||
}
|
||||
},
|
||||
Pending => Pending,
|
||||
}
|
||||
} else {
|
||||
if self.chan_ix >= self.channels.len() {
|
||||
if let Some(channel) = self.channels.pop_front() {
|
||||
self.current_channel = Some(channel.clone());
|
||||
let fut = read_local_config(channel, self.node_config.clone()).map_err(Error::from);
|
||||
self.config_fut = Some(Box::pin(fut));
|
||||
continue;
|
||||
} else {
|
||||
self.data_done = true;
|
||||
{
|
||||
let n = Instant::now();
|
||||
@@ -922,12 +940,6 @@ impl Stream for DataApiPython3DataStream {
|
||||
sb.mark_ok(&self.status_id);
|
||||
}
|
||||
continue;
|
||||
} else {
|
||||
let channel = self.channels[self.chan_ix].clone();
|
||||
self.chan_ix += 1;
|
||||
let fut = read_local_config(channel.clone(), self.node_config.clone()).map_err(Error::from);
|
||||
self.config_fut = Some(Box::pin(fut));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -1034,7 +1046,7 @@ impl Api1EventsBinaryHandler {
|
||||
let chans = qu
|
||||
.channels()
|
||||
.iter()
|
||||
.map(|ch| Channel {
|
||||
.map(|ch| SfDbChannel {
|
||||
backend: backend.into(),
|
||||
name: ch.name().into(),
|
||||
series: None,
|
||||
@@ -1058,9 +1070,9 @@ impl Api1EventsBinaryHandler {
|
||||
Ok(ret)
|
||||
} else {
|
||||
// TODO set the public error code and message and return Err(e).
|
||||
let e = Error::with_public_msg(format!("Unsupported Accept: {:?}", accept));
|
||||
error!("{e:?}");
|
||||
return Ok(response(StatusCode::NOT_ACCEPTABLE).body(Body::empty())?);
|
||||
let e = Error::with_public_msg(format!("Unsupported Accept: {}", accept));
|
||||
error!("{e}");
|
||||
Ok(response(StatusCode::NOT_ACCEPTABLE).body(Body::empty())?)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
4
httpret/src/api1/configquorum.rs
Normal file
4
httpret/src/api1/configquorum.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub async fn find_config_quorum() {
|
||||
// TODO create new endpoint which only returns the most matching config entry
|
||||
// for some given channel and time range.
|
||||
}
|
||||
@@ -13,12 +13,12 @@ use netpod::log::*;
|
||||
use netpod::query::prebinned::PreBinnedQuery;
|
||||
use netpod::timeunits::*;
|
||||
use netpod::ChConf;
|
||||
use netpod::Channel;
|
||||
use netpod::ChannelConfigQuery;
|
||||
use netpod::ChannelConfigResponse;
|
||||
use netpod::FromUrl;
|
||||
use netpod::NodeConfigCached;
|
||||
use netpod::ScalarType;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::Shape;
|
||||
use netpod::ACCEPT_ALL;
|
||||
use netpod::APP_JSON;
|
||||
@@ -100,7 +100,7 @@ impl ChannelConfigHandler {
|
||||
let conf = if let Some(_scyco) = &node_config.node_config.cluster.scylla {
|
||||
let c = nodenet::channelconfig::channel_config(q.range.clone(), q.channel.clone(), node_config).await?;
|
||||
ChannelConfigResponse {
|
||||
channel: Channel {
|
||||
channel: SfDbChannel {
|
||||
series: c.series.clone(),
|
||||
backend: q.channel.backend().into(),
|
||||
name: c.name,
|
||||
@@ -110,9 +110,9 @@ impl ChannelConfigHandler {
|
||||
shape: c.shape,
|
||||
}
|
||||
} else if let Some(_) = &node_config.node.channel_archiver {
|
||||
return Err(Error::with_msg_no_trace("no archiver"));
|
||||
return Err(Error::with_msg_no_trace("channel archiver not supported"));
|
||||
} else if let Some(_) = &node_config.node.archiver_appliance {
|
||||
return Err(Error::with_msg_no_trace("no archapp"));
|
||||
return Err(Error::with_msg_no_trace("archiver appliance not supported"));
|
||||
} else {
|
||||
parse::channelconfig::channel_config(&q, node_config).await?
|
||||
};
|
||||
@@ -332,7 +332,7 @@ impl FromUrl for ChannelsWithTypeQuery {
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ChannelListWithType {
|
||||
channels: Vec<Channel>,
|
||||
channels: Vec<SfDbChannel>,
|
||||
}
|
||||
|
||||
pub struct ScyllaChannelsWithType {}
|
||||
@@ -392,7 +392,7 @@ impl ScyllaChannelsWithType {
|
||||
let mut list = Vec::new();
|
||||
for row in res.rows_typed_or_empty::<(String, i64)>() {
|
||||
let (channel_name, series) = row.err_conv()?;
|
||||
let ch = Channel {
|
||||
let ch = SfDbChannel {
|
||||
backend: backend.into(),
|
||||
name: channel_name,
|
||||
series: Some(series as u64),
|
||||
|
||||
@@ -290,6 +290,25 @@ async fn http_service_inner(
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if path.starts_with("/api/4/private/logtest/") {
|
||||
if req.method() == Method::GET {
|
||||
if path.ends_with("/trace") {
|
||||
trace!("test trace log output");
|
||||
} else if path.ends_with("/debug") {
|
||||
debug!("test debug log output");
|
||||
} else if path.ends_with("/info") {
|
||||
info!("test info log output");
|
||||
} else if path.ends_with("/warn") {
|
||||
warn!("test warn log output");
|
||||
} else if path.ends_with("/error") {
|
||||
error!("test error log output");
|
||||
} else {
|
||||
error!("test unknown log output");
|
||||
}
|
||||
Ok(response(StatusCode::OK).body(Body::empty())?)
|
||||
} else {
|
||||
Ok(response(StatusCode::METHOD_NOT_ALLOWED).body(Body::empty())?)
|
||||
}
|
||||
} else if let Some(h) = api4::status::StatusNodesRecursive::handler(&req) {
|
||||
h.handle(req, ctx, &node_config).await
|
||||
} else if let Some(h) = StatusBoardAllHandler::handler(&req) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "items_proc"
|
||||
version = "0.0.1"
|
||||
version = "0.0.2"
|
||||
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
@@ -9,4 +9,4 @@ path = "src/items_proc.rs"
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
syn = "1"
|
||||
syn = "2"
|
||||
|
||||
@@ -595,12 +595,12 @@ pub struct NodeStatus {
|
||||
pub subs: VecDeque<NodeStatusSub>,
|
||||
}
|
||||
|
||||
// Describes a "channel" which is a time-series with a unique name within a "backend".
|
||||
// Describes a swissfel-databuffer style "channel" which is a time-series with a unique name within a "backend".
|
||||
// Also the concept of "backend" could be split into "facility" and some optional other identifier
|
||||
// for cases like e.g. post-mortem, or to differentiate between channel-access and bsread for cases where
|
||||
// the same channel-name is delivered via different methods.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Channel {
|
||||
pub struct SfDbChannel {
|
||||
pub series: Option<u64>,
|
||||
// "backend" is currently used in the existing systems for multiple purposes:
|
||||
// it can indicate the facility (eg. sf-databuffer, hipa, ...) but also
|
||||
@@ -609,7 +609,7 @@ pub struct Channel {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl Channel {
|
||||
impl SfDbChannel {
|
||||
pub fn backend(&self) -> &str {
|
||||
&self.backend
|
||||
}
|
||||
@@ -623,14 +623,14 @@ impl Channel {
|
||||
}
|
||||
}
|
||||
|
||||
impl FromUrl for Channel {
|
||||
impl FromUrl for SfDbChannel {
|
||||
fn from_url(url: &Url) -> Result<Self, Error> {
|
||||
let pairs = get_url_query_pairs(url);
|
||||
Self::from_pairs(&pairs)
|
||||
}
|
||||
|
||||
fn from_pairs(pairs: &BTreeMap<String, String>) -> Result<Self, Error> {
|
||||
let ret = Channel {
|
||||
let ret = SfDbChannel {
|
||||
backend: pairs
|
||||
.get("backend")
|
||||
.ok_or(Error::with_public_msg("missing backend"))?
|
||||
@@ -654,7 +654,7 @@ impl FromUrl for Channel {
|
||||
}
|
||||
}
|
||||
|
||||
impl AppendToUrl for Channel {
|
||||
impl AppendToUrl for SfDbChannel {
|
||||
fn append_to_url(&self, url: &mut Url) {
|
||||
let mut g = url.query_pairs_mut();
|
||||
g.append_pair("backend", &self.backend);
|
||||
@@ -669,13 +669,13 @@ impl AppendToUrl for Channel {
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ChannelTyped {
|
||||
pub channel: Channel,
|
||||
pub channel: SfDbChannel,
|
||||
pub scalar_type: ScalarType,
|
||||
pub shape: Shape,
|
||||
}
|
||||
|
||||
impl ChannelTyped {
|
||||
pub fn channel(&self) -> &Channel {
|
||||
pub fn channel(&self) -> &SfDbChannel {
|
||||
&self.channel
|
||||
}
|
||||
}
|
||||
@@ -2238,7 +2238,7 @@ pub fn get_url_query_pairs(url: &Url) -> BTreeMap<String, String> {
|
||||
// The presence of a configuration in some range does not imply that there is any data available.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ChannelConfigQuery {
|
||||
pub channel: Channel,
|
||||
pub channel: SfDbChannel,
|
||||
pub range: NanoRange,
|
||||
pub expand: bool,
|
||||
}
|
||||
@@ -2265,14 +2265,14 @@ impl FromUrl for ChannelConfigQuery {
|
||||
let beg_date = pairs
|
||||
.get("begDate")
|
||||
.map(String::from)
|
||||
.unwrap_or_else(|| String::from("2000-01-01T00:00:00Z"));
|
||||
.unwrap_or_else(|| String::from("1970-01-01T00:00:00Z"));
|
||||
let end_date = pairs
|
||||
.get("endDate")
|
||||
.map(String::from)
|
||||
.unwrap_or_else(|| String::from("3000-01-01T00:00:00Z"));
|
||||
let expand = pairs.get("expand").map(|s| s == "true").unwrap_or(false);
|
||||
let ret = Self {
|
||||
channel: Channel::from_pairs(&pairs)?,
|
||||
channel: SfDbChannel::from_pairs(&pairs)?,
|
||||
range: NanoRange {
|
||||
beg: beg_date.parse::<DateTime<Utc>>()?.to_nanos(),
|
||||
end: end_date.parse::<DateTime<Utc>>()?.to_nanos(),
|
||||
@@ -2305,7 +2305,7 @@ impl AppendToUrl for ChannelConfigQuery {
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ChannelConfigResponse {
|
||||
#[serde(rename = "channel")]
|
||||
pub channel: Channel,
|
||||
pub channel: SfDbChannel,
|
||||
#[serde(rename = "scalarType")]
|
||||
pub scalar_type: ScalarType,
|
||||
#[serde(rename = "byteOrder", default, skip_serializing_if = "Option::is_none")]
|
||||
|
||||
@@ -8,13 +8,13 @@ use crate::log::*;
|
||||
use crate::AggKind;
|
||||
use crate::AppendToUrl;
|
||||
use crate::ByteSize;
|
||||
use crate::Channel;
|
||||
use crate::FromUrl;
|
||||
use crate::HasBackend;
|
||||
use crate::HasTimeout;
|
||||
use crate::NanoRange;
|
||||
use crate::PulseRange;
|
||||
use crate::SeriesRange;
|
||||
use crate::SfDbChannel;
|
||||
use crate::ToNanos;
|
||||
use chrono::DateTime;
|
||||
use chrono::TimeZone;
|
||||
@@ -252,12 +252,12 @@ pub fn agg_kind_from_binning_scheme(pairs: &BTreeMap<String, String>) -> Result<
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ChannelStateEventsQuery {
|
||||
channel: Channel,
|
||||
channel: SfDbChannel,
|
||||
range: NanoRange,
|
||||
}
|
||||
|
||||
impl ChannelStateEventsQuery {
|
||||
pub fn new(channel: Channel, range: NanoRange) -> Self {
|
||||
pub fn new(channel: SfDbChannel, range: NanoRange) -> Self {
|
||||
Self { channel, range }
|
||||
}
|
||||
|
||||
@@ -265,7 +265,7 @@ impl ChannelStateEventsQuery {
|
||||
&self.range
|
||||
}
|
||||
|
||||
pub fn channel(&self) -> &Channel {
|
||||
pub fn channel(&self) -> &SfDbChannel {
|
||||
&self.channel
|
||||
}
|
||||
|
||||
@@ -273,7 +273,7 @@ impl ChannelStateEventsQuery {
|
||||
self.channel.series = Some(series);
|
||||
}
|
||||
|
||||
pub fn channel_mut(&mut self) -> &mut Channel {
|
||||
pub fn channel_mut(&mut self) -> &mut SfDbChannel {
|
||||
&mut self.channel
|
||||
}
|
||||
}
|
||||
@@ -300,7 +300,7 @@ impl FromUrl for ChannelStateEventsQuery {
|
||||
let beg_date = pairs.get("begDate").ok_or(Error::with_msg("missing begDate"))?;
|
||||
let end_date = pairs.get("endDate").ok_or(Error::with_msg("missing endDate"))?;
|
||||
let ret = Self {
|
||||
channel: Channel::from_pairs(&pairs)?,
|
||||
channel: SfDbChannel::from_pairs(&pairs)?,
|
||||
range: NanoRange {
|
||||
beg: beg_date.parse::<DateTime<Utc>>()?.to_nanos(),
|
||||
end: end_date.parse::<DateTime<Utc>>()?.to_nanos(),
|
||||
|
||||
@@ -4,10 +4,10 @@ use super::CacheUsage;
|
||||
use crate::AggKind;
|
||||
use crate::AppendToUrl;
|
||||
use crate::ByteSize;
|
||||
use crate::Channel;
|
||||
use crate::FromUrl;
|
||||
use crate::PreBinnedPatchCoordEnum;
|
||||
use crate::ScalarType;
|
||||
use crate::SfDbChannel;
|
||||
use crate::Shape;
|
||||
use err::Error;
|
||||
use std::collections::BTreeMap;
|
||||
@@ -16,7 +16,7 @@ use url::Url;
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PreBinnedQuery {
|
||||
patch: PreBinnedPatchCoordEnum,
|
||||
channel: Channel,
|
||||
channel: SfDbChannel,
|
||||
scalar_type: ScalarType,
|
||||
shape: Shape,
|
||||
agg_kind: Option<AggKind>,
|
||||
@@ -28,7 +28,7 @@ pub struct PreBinnedQuery {
|
||||
impl PreBinnedQuery {
|
||||
pub fn new(
|
||||
patch: PreBinnedPatchCoordEnum,
|
||||
channel: Channel,
|
||||
channel: SfDbChannel,
|
||||
scalar_type: ScalarType,
|
||||
shape: Shape,
|
||||
agg_kind: Option<AggKind>,
|
||||
@@ -64,7 +64,7 @@ impl PreBinnedQuery {
|
||||
.map(|x| Shape::from_url_str(&x))??;
|
||||
let ret = Self {
|
||||
patch: PreBinnedPatchCoordEnum::from_pairs(&pairs)?,
|
||||
channel: Channel::from_pairs(&pairs)?,
|
||||
channel: SfDbChannel::from_pairs(&pairs)?,
|
||||
scalar_type,
|
||||
shape,
|
||||
agg_kind: agg_kind_from_binning_scheme(&pairs)?,
|
||||
@@ -85,7 +85,7 @@ impl PreBinnedQuery {
|
||||
&self.patch
|
||||
}
|
||||
|
||||
pub fn channel(&self) -> &Channel {
|
||||
pub fn channel(&self) -> &SfDbChannel {
|
||||
&self.channel
|
||||
}
|
||||
|
||||
|
||||
@@ -3,14 +3,14 @@ use err::Error;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::ChConf;
|
||||
use netpod::Channel;
|
||||
use netpod::NodeConfigCached;
|
||||
use netpod::ScalarType;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::Shape;
|
||||
|
||||
const TEST_BACKEND: &str = "testbackend-00";
|
||||
|
||||
pub async fn channel_config(range: NanoRange, channel: Channel, ncc: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
pub async fn channel_config(range: NanoRange, channel: SfDbChannel, ncc: &NodeConfigCached) -> Result<ChConf, Error> {
|
||||
if channel.backend() == TEST_BACKEND {
|
||||
let backend = channel.backend().into();
|
||||
// TODO the series-ids here are just random. Need to integrate with better test setup.
|
||||
@@ -75,14 +75,14 @@ pub async fn channel_config(range: NanoRange, channel: Channel, ncc: &NodeConfig
|
||||
};
|
||||
ret
|
||||
} else if ncc.node_config.cluster.scylla.is_some() {
|
||||
info!("try to get ChConf for scylla type backend");
|
||||
debug!("try to get ChConf for scylla type backend");
|
||||
let ret = dbconn::channelconfig::chconf_from_scylla_type_backend(&channel, ncc)
|
||||
.await
|
||||
.map_err(Error::from)?;
|
||||
Ok(ret)
|
||||
} else if ncc.node.sf_databuffer.is_some() {
|
||||
info!("channel_config BEFORE {channel:?}");
|
||||
info!("try to get ChConf for sf-databuffer type backend");
|
||||
debug!("channel_config BEFORE {channel:?}");
|
||||
debug!("try to get ChConf for sf-databuffer type backend");
|
||||
// TODO in the future we should not need this:
|
||||
let mut channel = sf_databuffer_fetch_channel_by_series(channel, ncc).await?;
|
||||
if channel.series.is_none() {
|
||||
@@ -92,9 +92,9 @@ pub async fn channel_config(range: NanoRange, channel: Channel, ncc: &NodeConfig
|
||||
channel.series = Some(series);
|
||||
}
|
||||
let channel = channel;
|
||||
info!("channel_config AFTER {channel:?}");
|
||||
debug!("channel_config AFTER {channel:?}");
|
||||
let c1 = disk::channelconfig::config(range, channel.clone(), ncc).await?;
|
||||
info!("channel_config THEN {c1:?}");
|
||||
debug!("channel_config THEN {c1:?}");
|
||||
let ret = ChConf {
|
||||
backend: c1.channel.backend,
|
||||
series: channel.series,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::conn::events_conn_handler;
|
||||
use err::Error;
|
||||
use futures_util::StreamExt;
|
||||
use items_0::streamitem::sitem_data;
|
||||
use items_0::streamitem::Sitemty;
|
||||
@@ -13,7 +14,6 @@ use items_2::framable::Framable;
|
||||
use items_2::frame::decode_frame;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::timeunits::SEC;
|
||||
use netpod::Channel;
|
||||
use netpod::Cluster;
|
||||
use netpod::Database;
|
||||
use netpod::FileIoBufferSize;
|
||||
@@ -22,6 +22,7 @@ use netpod::NodeConfig;
|
||||
use netpod::NodeConfigCached;
|
||||
use netpod::PerfOpts;
|
||||
use netpod::SfDatabuffer;
|
||||
use netpod::SfDbChannel;
|
||||
use query::api4::events::PlainEventsQuery;
|
||||
use streams::frames::inmem::InMemoryFrameAsyncReadStream;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
@@ -73,7 +74,7 @@ fn raw_data_00() {
|
||||
},
|
||||
ix: 0,
|
||||
};
|
||||
let channel = Channel {
|
||||
let channel = SfDbChannel {
|
||||
series: None,
|
||||
backend: TEST_BACKEND.into(),
|
||||
name: "scalar-i32".into(),
|
||||
@@ -117,7 +118,7 @@ fn raw_data_00() {
|
||||
}
|
||||
}
|
||||
jh.await.unwrap().unwrap();
|
||||
Ok(())
|
||||
Ok::<_, Error>(())
|
||||
};
|
||||
taskrun::run(fut).unwrap();
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
use err::Error;
|
||||
use netpod::log::*;
|
||||
use netpod::range::evrange::NanoRange;
|
||||
use netpod::timeunits::DAY;
|
||||
use netpod::timeunits::MS;
|
||||
use netpod::ByteOrder;
|
||||
use netpod::Channel;
|
||||
use netpod::ChannelConfigQuery;
|
||||
use netpod::ChannelConfigResponse;
|
||||
use netpod::NodeConfigCached;
|
||||
use netpod::ScalarType;
|
||||
use netpod::SfDbChannel;
|
||||
use netpod::Shape;
|
||||
use netpod::TsNano;
|
||||
use nom::bytes::complete::take;
|
||||
@@ -277,11 +278,17 @@ pub fn parse_config(inp: &[u8]) -> NRes<ChannelConfigs> {
|
||||
return mkerr(format!("no channel name. len1 {}", len1));
|
||||
}
|
||||
let (inp, chn) = take((len1 - 8) as usize)(inp)?;
|
||||
let channel_name = match String::from_utf8(chn.to_vec()) {
|
||||
Ok(k) => k,
|
||||
Err(e) => {
|
||||
return mkerr(format!("channelName utf8 error {:?}", e));
|
||||
}
|
||||
};
|
||||
let (inp, len2) = be_i32(inp)?;
|
||||
if len1 != len2 {
|
||||
return mkerr(format!("Mismatch len1 {} len2 {}", len1, len2));
|
||||
}
|
||||
let mut entries = vec![];
|
||||
let mut entries = Vec::new();
|
||||
let mut inp_a = inp;
|
||||
while inp_a.len() > 0 {
|
||||
let inp = inp_a;
|
||||
@@ -291,16 +298,33 @@ pub fn parse_config(inp: &[u8]) -> NRes<ChannelConfigs> {
|
||||
}
|
||||
inp_a = inp;
|
||||
}
|
||||
let channel_name = match String::from_utf8(chn.to_vec()) {
|
||||
Ok(k) => k,
|
||||
Err(e) => {
|
||||
return mkerr(format!("channelName utf8 error {:?}", e));
|
||||
}
|
||||
};
|
||||
// TODO hack to accommodate for parts of databuffer nodes failing:
|
||||
if false && channel_name == "SARFE10-PSSS059:SPECTRUM_X" {
|
||||
warn!("apply hack for {channel_name}");
|
||||
entries = entries
|
||||
.into_iter()
|
||||
.map(|mut e| {
|
||||
e.is_array = true;
|
||||
e.shape = Some(vec![2560]);
|
||||
e
|
||||
})
|
||||
.collect();
|
||||
}
|
||||
if false && channel_name == "SARFE10-PSSS059:SPECTRUM_Y" {
|
||||
warn!("apply hack for {channel_name}");
|
||||
entries = entries
|
||||
.into_iter()
|
||||
.map(|mut e| {
|
||||
e.is_array = true;
|
||||
e.shape = Some(vec![2560]);
|
||||
e
|
||||
})
|
||||
.collect();
|
||||
}
|
||||
let ret = ChannelConfigs {
|
||||
format_version: ver,
|
||||
channel_name: channel_name,
|
||||
entries: entries,
|
||||
channel_name,
|
||||
entries,
|
||||
};
|
||||
Ok((inp, ret))
|
||||
}
|
||||
@@ -322,7 +346,7 @@ pub async fn channel_config(q: &ChannelConfigQuery, ncc: &NodeConfigCached) -> R
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
async fn read_local_config_real(channel: Channel, ncc: &NodeConfigCached) -> Result<ChannelConfigs, Error> {
|
||||
async fn read_local_config_real(channel: SfDbChannel, ncc: &NodeConfigCached) -> Result<ChannelConfigs, Error> {
|
||||
let path = ncc
|
||||
.node
|
||||
.sf_databuffer
|
||||
@@ -356,7 +380,7 @@ async fn read_local_config_real(channel: Channel, ncc: &NodeConfigCached) -> Res
|
||||
Ok(config.1)
|
||||
}
|
||||
|
||||
async fn read_local_config_test(channel: Channel, ncc: &NodeConfigCached) -> Result<ChannelConfigs, Error> {
|
||||
async fn read_local_config_test(channel: SfDbChannel, ncc: &NodeConfigCached) -> Result<ChannelConfigs, Error> {
|
||||
if channel.name() == "test-gen-i32-dim0-v00" {
|
||||
let ret = ChannelConfigs {
|
||||
format_version: 0,
|
||||
@@ -423,7 +447,7 @@ async fn read_local_config_test(channel: Channel, ncc: &NodeConfigCached) -> Res
|
||||
}
|
||||
|
||||
// TODO can I take parameters as ref, even when used in custom streams?
|
||||
pub async fn read_local_config(channel: Channel, ncc: NodeConfigCached) -> Result<ChannelConfigs, Error> {
|
||||
pub async fn read_local_config(channel: SfDbChannel, ncc: NodeConfigCached) -> Result<ChannelConfigs, Error> {
|
||||
if channel.backend() == TEST_BACKEND {
|
||||
read_local_config_test(channel, &ncc).await
|
||||
} else {
|
||||
@@ -442,6 +466,10 @@ pub fn extract_matching_config_entry<'a>(
|
||||
range: &NanoRange,
|
||||
channel_config: &'a ChannelConfigs,
|
||||
) -> Result<MatchingConfigEntry<'a>, Error> {
|
||||
// TODO remove temporary
|
||||
if channel_config.channel_name == "SARFE10-PSSS059:SPECTRUM_X" {
|
||||
debug!("found config {:?}", channel_config);
|
||||
}
|
||||
let mut ixs = Vec::new();
|
||||
for i1 in 0..channel_config.entries.len() {
|
||||
let e1 = &channel_config.entries[i1];
|
||||
@@ -461,7 +489,12 @@ pub fn extract_matching_config_entry<'a>(
|
||||
} else if ixs.len() > 1 {
|
||||
Ok(MatchingConfigEntry::Multiple)
|
||||
} else {
|
||||
Ok(MatchingConfigEntry::Entry(&channel_config.entries[ixs[0]]))
|
||||
let e = &channel_config.entries[ixs[0]];
|
||||
// TODO remove temporary
|
||||
if channel_config.channel_name == "SARFE10-PSSS059:SPECTRUM_X" {
|
||||
debug!("found matching entry {:?}", e);
|
||||
}
|
||||
Ok(MatchingConfigEntry::Entry(e))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,10 +8,10 @@ use netpod::query::TimeRangeQuery;
|
||||
use netpod::range::evrange::SeriesRange;
|
||||
use netpod::AppendToUrl;
|
||||
use netpod::ByteSize;
|
||||
use netpod::Channel;
|
||||
use netpod::FromUrl;
|
||||
use netpod::HasBackend;
|
||||
use netpod::HasTimeout;
|
||||
use netpod::SfDbChannel;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::BTreeMap;
|
||||
@@ -20,7 +20,7 @@ use url::Url;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct BinnedQuery {
|
||||
channel: Channel,
|
||||
channel: SfDbChannel,
|
||||
range: SeriesRange,
|
||||
bin_count: u32,
|
||||
#[serde(
|
||||
@@ -43,7 +43,7 @@ pub struct BinnedQuery {
|
||||
}
|
||||
|
||||
impl BinnedQuery {
|
||||
pub fn new(channel: Channel, range: SeriesRange, bin_count: u32) -> Self {
|
||||
pub fn new(channel: SfDbChannel, range: SeriesRange, bin_count: u32) -> Self {
|
||||
Self {
|
||||
channel,
|
||||
range,
|
||||
@@ -62,7 +62,7 @@ impl BinnedQuery {
|
||||
&self.range
|
||||
}
|
||||
|
||||
pub fn channel(&self) -> &Channel {
|
||||
pub fn channel(&self) -> &SfDbChannel {
|
||||
&self.channel
|
||||
}
|
||||
|
||||
@@ -115,7 +115,7 @@ impl BinnedQuery {
|
||||
self.channel.series = Some(series);
|
||||
}
|
||||
|
||||
pub fn channel_mut(&mut self) -> &mut Channel {
|
||||
pub fn channel_mut(&mut self) -> &mut SfDbChannel {
|
||||
&mut self.channel
|
||||
}
|
||||
|
||||
@@ -169,7 +169,7 @@ impl FromUrl for BinnedQuery {
|
||||
return Err(Error::with_msg_no_trace("no series range in url"));
|
||||
};
|
||||
let ret = Self {
|
||||
channel: Channel::from_pairs(&pairs)?,
|
||||
channel: SfDbChannel::from_pairs(&pairs)?,
|
||||
range,
|
||||
bin_count: pairs
|
||||
.get("binCount")
|
||||
|
||||
@@ -9,10 +9,10 @@ use netpod::query::TimeRangeQuery;
|
||||
use netpod::range::evrange::SeriesRange;
|
||||
use netpod::AppendToUrl;
|
||||
use netpod::ByteSize;
|
||||
use netpod::Channel;
|
||||
use netpod::FromUrl;
|
||||
use netpod::HasBackend;
|
||||
use netpod::HasTimeout;
|
||||
use netpod::SfDbChannel;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::BTreeMap;
|
||||
@@ -21,7 +21,7 @@ use url::Url;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct PlainEventsQuery {
|
||||
channel: Channel,
|
||||
channel: SfDbChannel,
|
||||
range: SeriesRange,
|
||||
#[serde(default, skip_serializing_if = "is_false", rename = "oneBeforeRange")]
|
||||
one_before_range: bool,
|
||||
@@ -51,7 +51,7 @@ pub struct PlainEventsQuery {
|
||||
}
|
||||
|
||||
impl PlainEventsQuery {
|
||||
pub fn new<R>(channel: Channel, range: R) -> Self
|
||||
pub fn new<R>(channel: SfDbChannel, range: R) -> Self
|
||||
where
|
||||
R: Into<SeriesRange>,
|
||||
{
|
||||
@@ -72,7 +72,7 @@ impl PlainEventsQuery {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn channel(&self) -> &Channel {
|
||||
pub fn channel(&self) -> &SfDbChannel {
|
||||
&self.channel
|
||||
}
|
||||
|
||||
@@ -189,7 +189,7 @@ impl FromUrl for PlainEventsQuery {
|
||||
return Err(Error::with_msg_no_trace("no series range in url"));
|
||||
};
|
||||
let ret = Self {
|
||||
channel: Channel::from_pairs(pairs)?,
|
||||
channel: SfDbChannel::from_pairs(pairs)?,
|
||||
range,
|
||||
one_before_range: pairs.get("oneBeforeRange").map_or("false", |x| x.as_ref()) == "true",
|
||||
transform: TransformQuery::from_pairs(pairs)?,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::errconv::ErrConv;
|
||||
use err::Error;
|
||||
use futures_util::StreamExt;
|
||||
use netpod::{log::*, Channel, ScalarType, Shape};
|
||||
use netpod::{log::*, ScalarType, SfDbChannel, Shape};
|
||||
use netpod::{ChannelConfigQuery, ChannelConfigResponse};
|
||||
use scylla::Session as ScySession;
|
||||
use std::sync::Arc;
|
||||
@@ -19,7 +19,7 @@ pub async fn config_from_scylla(chq: ChannelConfigQuery, scy: Arc<ScySession>) -
|
||||
let cols = row.into_typed::<(i64, i32, Vec<i32>)>().err_conv()?;
|
||||
let scalar_type = ScalarType::from_scylla_i32(cols.1)?;
|
||||
let shape = Shape::from_scylla_shape_dims(&cols.2)?;
|
||||
let channel = Channel {
|
||||
let channel = SfDbChannel {
|
||||
series: Some(cols.0 as _),
|
||||
backend: chq.channel.backend().into(),
|
||||
name: chq.channel.name().into(),
|
||||
|
||||
@@ -141,7 +141,8 @@ where
|
||||
return Err(e);
|
||||
}
|
||||
self.inp_bytes_consumed += lentot as u64;
|
||||
trace!("parsed frame well len {}", len);
|
||||
// TODO metrics
|
||||
//trace!("parsed frame well len {}", len);
|
||||
let ret = InMemoryFrame {
|
||||
len,
|
||||
tyid,
|
||||
|
||||
@@ -1,328 +0,0 @@
|
||||
use err::Error;
|
||||
use std::borrow::Cow;
|
||||
use std::fs;
|
||||
use std::io::{BufWriter, Read, Seek, SeekFrom, Stdin, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
const MAX_PER_FILE: u64 = 1024 * 1024 * 2;
|
||||
|
||||
pub struct Buffer {
|
||||
buf: Vec<u8>,
|
||||
wp: usize,
|
||||
rp: usize,
|
||||
}
|
||||
|
||||
const BUFFER_CAP: usize = 1024 * 8;
|
||||
|
||||
impl Buffer {
|
||||
pub fn new() -> Buffer {
|
||||
Self {
|
||||
buf: vec![0; BUFFER_CAP],
|
||||
wp: 0,
|
||||
rp: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
self.rp = 0;
|
||||
self.wp = 0;
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.wp - self.rp
|
||||
}
|
||||
|
||||
pub fn check_invariant(&self) {
|
||||
if self.wp > self.buf.len() {
|
||||
eprintln!("ERROR wp {} rp {}", self.wp, self.rp);
|
||||
}
|
||||
if self.rp > self.wp {
|
||||
eprintln!("ERROR wp {} rp {}", self.wp, self.rp);
|
||||
}
|
||||
assert!(self.wp <= self.buf.len());
|
||||
assert!(self.rp <= self.wp);
|
||||
}
|
||||
|
||||
pub fn writable(&mut self) -> &mut [u8] {
|
||||
self.check_invariant();
|
||||
self.wrap_if_needed();
|
||||
&mut self.buf[self.wp..]
|
||||
}
|
||||
|
||||
pub fn readable(&self) -> &[u8] {
|
||||
self.check_invariant();
|
||||
&self.buf[self.rp..self.wp]
|
||||
}
|
||||
|
||||
pub fn advance(&mut self, c: usize) {
|
||||
self.check_invariant();
|
||||
if c > self.len() {
|
||||
eprintln!("ERROR advance wp {} rp {} c {}", self.wp, self.rp, c);
|
||||
}
|
||||
assert!(c <= self.len());
|
||||
self.rp += c;
|
||||
}
|
||||
|
||||
pub fn inc_wp(&mut self, c: usize) {
|
||||
self.check_invariant();
|
||||
if c > self.buf.len() - self.wp {
|
||||
eprintln!("ERROR inc_wp wp {} rp {} c {}", self.wp, self.rp, c);
|
||||
}
|
||||
assert!(c <= self.buf.len() - self.wp);
|
||||
self.wp += c;
|
||||
}
|
||||
|
||||
fn wrap_if_needed(&mut self) {
|
||||
self.check_invariant();
|
||||
//eprintln!("wrap_if_needed wp {} rp {}", self.wp, self.rp);
|
||||
if self.wp == 0 {
|
||||
} else if self.rp == self.wp {
|
||||
self.rp = 0;
|
||||
self.wp = 0;
|
||||
} else if self.rp > self.buf.len() / 4 * 3 {
|
||||
if self.rp >= self.wp {
|
||||
eprintln!("ERROR wrap_if_needed wp {} rp {}", self.wp, self.rp);
|
||||
}
|
||||
assert!(self.rp < self.wp);
|
||||
let ll = self.len();
|
||||
unsafe {
|
||||
let src = &self.buf[self.rp..][0] as *const u8;
|
||||
let dst = &mut self.buf[..][0] as *mut u8;
|
||||
std::ptr::copy(src, dst, ll);
|
||||
}
|
||||
self.rp = 0;
|
||||
self.wp = ll;
|
||||
} else if self.wp == self.buf.len() {
|
||||
//eprintln!("ERROR no more space in buffer");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_lines(buf: &[u8]) -> Result<(Vec<Cow<str>>, usize), Error> {
|
||||
let mut ret = Vec::new();
|
||||
let mut i1 = 0;
|
||||
let mut i2 = 0;
|
||||
while i1 < buf.len() {
|
||||
if buf[i1] == 0xa {
|
||||
ret.push(String::from_utf8_lossy(&buf[i2..i1]));
|
||||
i1 += 1;
|
||||
i2 += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
while i1 < buf.len() {
|
||||
if buf[i1] == 0xa {
|
||||
const MAX: usize = 1024;
|
||||
if i2 + MAX < i1 {
|
||||
ret.push(String::from_utf8_lossy(&buf[i2..(i2 + MAX)]));
|
||||
} else {
|
||||
ret.push(String::from_utf8_lossy(&buf[i2..i1]));
|
||||
}
|
||||
i1 += 1;
|
||||
i2 = i1;
|
||||
while i1 < buf.len() {
|
||||
if buf[i1] == 0xa {
|
||||
ret.push(String::from_utf8_lossy(&buf[i2..i1]));
|
||||
i1 += 1;
|
||||
i2 += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
i1 += 1;
|
||||
}
|
||||
Ok((ret, i2))
|
||||
}
|
||||
|
||||
struct Fileinfo {
|
||||
path: PathBuf,
|
||||
name: String,
|
||||
len: u64,
|
||||
}
|
||||
|
||||
fn file_list(dir: &Path) -> Result<Vec<Fileinfo>, Error> {
|
||||
let mut ret = Vec::new();
|
||||
let rd = fs::read_dir(&dir)?;
|
||||
for e in rd {
|
||||
let e = e?;
|
||||
let fnos = e.file_name();
|
||||
let fns = fnos.to_str().unwrap_or("");
|
||||
if fns.starts_with("info-20") && fns.ends_with(".log") {
|
||||
let meta = e.metadata()?;
|
||||
let info = Fileinfo {
|
||||
path: e.path(),
|
||||
name: fns.into(),
|
||||
len: meta.len(),
|
||||
};
|
||||
ret.push(info);
|
||||
}
|
||||
}
|
||||
ret.sort_by(|a, b| std::cmp::Ord::cmp(&a.name, &b.name));
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
fn open_latest_or_new(dir: &Path) -> Result<BufWriter<fs::File>, Error> {
|
||||
let list = file_list(dir)?;
|
||||
if let Some(latest) = list.last() {
|
||||
if latest.len < MAX_PER_FILE {
|
||||
let ret = fs::OpenOptions::new().write(true).append(true).open(&latest.path)?;
|
||||
let ret = BufWriter::new(ret);
|
||||
return Ok(ret);
|
||||
}
|
||||
}
|
||||
next_file(dir)
|
||||
}
|
||||
|
||||
fn next_file(dir: &Path) -> Result<BufWriter<fs::File>, Error> {
|
||||
let ts = chrono::Utc::now();
|
||||
let s = ts.format("%Y-%m-%d--%H-%M-%S").to_string();
|
||||
let mut ret = fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(dir.join(format!("info-{}.log", s)))?;
|
||||
if ret.seek(SeekFrom::Current(0))? != 0 {
|
||||
return Err(Error::with_msg_no_trace("new file already exists"));
|
||||
}
|
||||
let ret = BufWriter::new(ret);
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub fn append_inner(dirname: &str, total_size_max: u64, mut stdin: Stdin) -> Result<(), Error> {
|
||||
let mut bytes_written = 0;
|
||||
let dir = PathBuf::from(dirname);
|
||||
let mut fout = open_latest_or_new(&dir)?;
|
||||
let mut buf = Buffer::new();
|
||||
loop {
|
||||
// Get some more data.
|
||||
let mut b = buf.writable();
|
||||
if false {
|
||||
write!(&mut fout, "[APPEND-WRITABLE] {} writable bytes\n", b.len())?;
|
||||
}
|
||||
if b.len() == 0 {
|
||||
write!(&mut fout, "[DISCARD] {} discarded bytes\n", b.len())?;
|
||||
buf.reset();
|
||||
b = buf.writable();
|
||||
}
|
||||
let b = b;
|
||||
if b.len() == 0 {
|
||||
let msg = format!("[ERROR DISCARD] still no space wp {} rp {}\n", buf.wp, buf.rp);
|
||||
write!(&mut fout, "{}", msg)?;
|
||||
let e = Error::with_msg_no_trace(msg);
|
||||
return Err(e);
|
||||
}
|
||||
let n1 = stdin.read(b)?;
|
||||
buf.inc_wp(n1);
|
||||
if false {
|
||||
eprintln!(
|
||||
"{} bytes read from stdin, total readable {} bytes",
|
||||
n1,
|
||||
buf.readable().len()
|
||||
);
|
||||
}
|
||||
if false {
|
||||
write!(
|
||||
&mut fout,
|
||||
"[APPEND-INFO] {} bytes read from stdin, total readable {} bytes\n",
|
||||
n1,
|
||||
buf.readable().len()
|
||||
)?;
|
||||
}
|
||||
match parse_lines(buf.readable()) {
|
||||
Ok((lines, n2)) => {
|
||||
if false {
|
||||
eprintln!("parse_lines Ok n2 {n2} lines len {}", lines.len());
|
||||
}
|
||||
if false {
|
||||
write!(&mut fout, "[APPEND-PARSED-LINES]: {}\n", lines.len())?;
|
||||
}
|
||||
for line in lines {
|
||||
let j = line.as_bytes();
|
||||
fout.write_all(j)?;
|
||||
fout.write_all(b"\n")?;
|
||||
bytes_written += j.len() as u64 + 1;
|
||||
}
|
||||
buf.advance(n2);
|
||||
if buf.len() > 256 {
|
||||
write!(&mut fout, "[TRUNCATED LINE FOLLOWS]\n")?;
|
||||
fout.write_all(&buf.readable()[..256])?;
|
||||
fout.write_all(b"\n")?;
|
||||
buf.reset();
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("ERROR parse fail: {e}");
|
||||
write!(&mut fout, "[APPEND-PARSE-ERROR]: {e}\n")?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
fout.flush()?;
|
||||
if bytes_written >= (MAX_PER_FILE >> 3) {
|
||||
bytes_written = 0;
|
||||
let l1 = fout.seek(SeekFrom::End(0))?;
|
||||
if l1 >= MAX_PER_FILE {
|
||||
let rd = fs::read_dir(&dir)?;
|
||||
let mut w = Vec::new();
|
||||
for e in rd {
|
||||
let e = e?;
|
||||
let fnos = e.file_name();
|
||||
let fns = fnos.to_str().unwrap();
|
||||
if fns.starts_with("info-20") && fns.ends_with(".log") {
|
||||
let meta = e.metadata()?;
|
||||
w.push((e.path(), meta.len()));
|
||||
}
|
||||
}
|
||||
w.sort_by(|a, b| std::cmp::Ord::cmp(a, b));
|
||||
for q in &w {
|
||||
write!(&mut fout, "[APPEND-SEES-FILE] {}\n", q.0.to_string_lossy())?;
|
||||
}
|
||||
let mut lentot = w.iter().map(|g| g.1).fold(0, |a, x| a + x);
|
||||
write!(&mut fout, "[APPEND-LENTOT] {}\n", lentot)?;
|
||||
for q in w {
|
||||
if lentot <= total_size_max {
|
||||
break;
|
||||
}
|
||||
write!(&mut fout, "[APPEND-REMOVE] {} {}\n", q.1, q.0.to_string_lossy())?;
|
||||
fs::remove_file(q.0)?;
|
||||
if q.1 < lentot {
|
||||
lentot -= q.1;
|
||||
} else {
|
||||
lentot = 0;
|
||||
}
|
||||
}
|
||||
fout = next_file(&dir)?;
|
||||
};
|
||||
}
|
||||
if n1 == 0 {
|
||||
eprintln!("break because n1 == 0");
|
||||
break Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn append(dirname: &str, total_size_max: u64, stdin: Stdin) -> Result<(), Error> {
|
||||
match append_inner(dirname, total_size_max, stdin) {
|
||||
Ok(k) => {
|
||||
eprintln!("append_inner has returned");
|
||||
Ok(k)
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("ERROR append {e:?}");
|
||||
let dir = PathBuf::from(dirname);
|
||||
let mut fout = open_latest_or_new(&dir)?;
|
||||
let _ = write!(fout, "ERROR in append_inner: {e:?}");
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vec_index() {
|
||||
let mut buf = vec![0u8; BUFFER_CAP];
|
||||
let a = &mut buf[BUFFER_CAP - 1..BUFFER_CAP];
|
||||
a[0] = 123;
|
||||
let a = &mut buf[BUFFER_CAP..];
|
||||
assert_eq!(a.len(), 0);
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
pub mod append;
|
||||
|
||||
use crate::log::*;
|
||||
use err::Error;
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::panic;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use tokio::runtime::Runtime;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
@@ -72,22 +72,23 @@ pub fn get_runtime_opts(nworkers: usize, nblocking: usize) -> Arc<Runtime> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run<T, F>(fut: F) -> Result<T, Error>
|
||||
pub fn run<T, E, F>(fut: F) -> Result<T, E>
|
||||
where
|
||||
F: Future<Output = Result<T, Error>>,
|
||||
F: Future<Output = Result<T, E>>,
|
||||
E: fmt::Display,
|
||||
{
|
||||
let runtime = get_runtime();
|
||||
match tracing_init() {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
eprintln!("TRACING: {e:?}");
|
||||
Err(()) => {
|
||||
eprintln!("ERROR tracing: can not init");
|
||||
}
|
||||
}
|
||||
let res = runtime.block_on(async { fut.await });
|
||||
match res {
|
||||
Ok(k) => Ok(k),
|
||||
Err(e) => {
|
||||
error!("Catched: {:?}", e);
|
||||
error!("ERROR catched: {e}");
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
@@ -109,6 +110,7 @@ fn tracing_init_inner() -> Result<(), Error> {
|
||||
let fmt_layer = tracing_subscriber::fmt::Layer::new()
|
||||
.with_timer(timer)
|
||||
.with_target(true)
|
||||
.with_ansi(false)
|
||||
.with_thread_names(true)
|
||||
.with_filter(filter);
|
||||
let z = tracing_subscriber::registry().with(fmt_layer);
|
||||
@@ -125,6 +127,7 @@ fn tracing_init_inner() -> Result<(), Error> {
|
||||
/*let fmt_layer = tracing_subscriber::fmt::Layer::new()
|
||||
.with_timer(timer)
|
||||
.with_target(true)
|
||||
.with_ansi(false)
|
||||
.with_thread_names(true)
|
||||
.with_filter(tracing_subscriber::EnvFilter::from_default_env());*/
|
||||
let url = "http://[::1]:6947";
|
||||
|
||||
Reference in New Issue
Block a user