Revive postingest

This commit is contained in:
Dominik Werder
2024-06-20 00:34:48 +02:00
parent ebc623436e
commit 995defaff3
18 changed files with 508 additions and 124 deletions

View File

@@ -165,6 +165,7 @@ pub enum ChannelConnectedInfo {
#[derive(Clone, Debug, Serialize)]
pub struct ChannelStateInfo {
pub stnow: SystemTime,
pub cssid: ChannelStatusSeriesId,
pub addr: SocketAddrV4,
pub series: Option<SeriesId>,
@@ -184,6 +185,10 @@ pub struct ChannelStateInfo {
pub item_recv_ivl_ema: Option<f32>,
pub interest_score: f32,
pub conf: ChannelConfig,
pub recv_last: SystemTime,
pub write_st_last: SystemTime,
pub write_mt_last: SystemTime,
pub write_lt_last: SystemTime,
}
mod ser_instant {
@@ -358,6 +363,7 @@ struct CreatedState {
ts_alive_last: Instant,
// Updated on monitoring, polling or when the channel config changes to reset the timeout
ts_activity_last: Instant,
st_activity_last: SystemTime,
ts_msp_last: u64,
ts_msp_grid_last: u32,
inserted_in_ts_msp: u64,
@@ -374,11 +380,15 @@ struct CreatedState {
account_emit_last: TsMs,
account_count: u64,
account_bytes: u64,
dw_st_last: SystemTime,
dw_mt_last: SystemTime,
dw_lt_last: SystemTime,
}
impl CreatedState {
fn dummy() -> Self {
let tsnow = Instant::now();
let stnow = SystemTime::now();
Self {
cssid: ChannelStatusSeriesId::new(0),
cid: Cid(0),
@@ -388,6 +398,7 @@ impl CreatedState {
ts_created: tsnow,
ts_alive_last: tsnow,
ts_activity_last: tsnow,
st_activity_last: stnow,
ts_msp_last: 0,
ts_msp_grid_last: 0,
inserted_in_ts_msp: 0,
@@ -404,6 +415,9 @@ impl CreatedState {
account_emit_last: TsMs(0),
account_count: 0,
account_bytes: 0,
dw_st_last: SystemTime::UNIX_EPOCH,
dw_mt_last: SystemTime::UNIX_EPOCH,
dw_lt_last: SystemTime::UNIX_EPOCH,
}
}
}
@@ -438,7 +452,13 @@ impl ChannelConf {
}
impl ChannelState {
fn to_info(&self, cssid: ChannelStatusSeriesId, addr: SocketAddrV4, conf: ChannelConfig) -> ChannelStateInfo {
fn to_info(
&self,
cssid: ChannelStatusSeriesId,
addr: SocketAddrV4,
conf: ChannelConfig,
stnow: SystemTime,
) -> ChannelStateInfo {
let channel_connected_info = match self {
ChannelState::Init(..) => ChannelConnectedInfo::Disconnected,
ChannelState::Creating { .. } => ChannelConnectedInfo::Connecting,
@@ -472,6 +492,19 @@ impl ChannelState {
ChannelState::Writable(s) => Some(s.channel.recv_bytes),
_ => None,
};
let (recv_last, write_st_last, write_mt_last, write_lt_last) = match self {
ChannelState::Writable(s) => {
let a = s.channel.st_activity_last;
let b = s.channel.dw_st_last;
let c = s.channel.dw_mt_last;
let d = s.channel.dw_lt_last;
(a, b, c, d)
}
_ => {
let a = SystemTime::UNIX_EPOCH;
(a, a, a, a)
}
};
let item_recv_ivl_ema = match self {
ChannelState::Writable(s) => {
let ema = s.channel.item_recv_ivl_ema.ema();
@@ -489,6 +522,7 @@ impl ChannelState {
};
let interest_score = 1. / item_recv_ivl_ema.unwrap_or(1e10).max(1e-6).min(1e10);
ChannelStateInfo {
stnow,
cssid,
addr,
series,
@@ -502,6 +536,10 @@ impl ChannelState {
item_recv_ivl_ema,
interest_score,
conf,
recv_last,
write_st_last,
write_mt_last,
write_lt_last,
}
}
@@ -749,7 +787,8 @@ pub enum CaConnEventValue {
pub enum EndOfStreamReason {
UnspecifiedReason,
Error(Error),
ConnectFail,
ConnectRefused,
ConnectTimeout,
OnCommand,
RemoteClosed,
IocTimeout,
@@ -910,8 +949,12 @@ impl CaConn {
fn trigger_shutdown(&mut self, reason: ShutdownReason) {
let channel_reason = match &reason {
ShutdownReason::ConnectFail => {
self.state = CaConnState::Shutdown(EndOfStreamReason::ConnectFail);
ShutdownReason::ConnectRefused => {
self.state = CaConnState::Shutdown(EndOfStreamReason::ConnectRefused);
ChannelStatusClosedReason::ConnectFail
}
ShutdownReason::ConnectTimeout => {
self.state = CaConnState::Shutdown(EndOfStreamReason::ConnectTimeout);
ChannelStatusClosedReason::ConnectFail
}
ShutdownReason::IoError => {
@@ -1319,7 +1362,7 @@ impl CaConn {
// return Err(Error::with_msg_no_trace());
return Ok(());
};
// debug!("handle_event_add_res {ev:?}");
trace!("handle_event_add_res {:?}", ch_s.cssid());
match ch_s {
ChannelState::Writable(st) => {
// debug!(
@@ -1608,10 +1651,10 @@ impl CaConn {
);
crst.ts_alive_last = tsnow;
crst.ts_activity_last = tsnow;
crst.st_activity_last = stnow;
crst.item_recv_ivl_ema.tick(tsnow);
crst.recv_count += 1;
crst.recv_bytes += payload_len as u64;
let series = writer.sid();
// TODO should attach these counters already to Writable state.
let ts_local = {
let epoch = stnow.duration_since(std::time::UNIX_EPOCH).unwrap_or(Duration::ZERO);
@@ -1631,7 +1674,16 @@ impl CaConn {
Self::check_ev_value_data(&value.data, &writer.scalar_type())?;
{
let val: DataValue = value.data.into();
writer.write(TsNano::from_ns(ts), TsNano::from_ns(ts_local), val, iqdqs)?;
let ((dwst, dwmt, dwlt),) = writer.write(TsNano::from_ns(ts), TsNano::from_ns(ts_local), val, iqdqs)?;
if dwst {
crst.dw_st_last = stnow;
}
if dwmt {
crst.dw_mt_last = stnow;
}
if dwlt {
crst.dw_lt_last = stnow;
}
}
}
if false {
@@ -1641,6 +1693,7 @@ impl CaConn {
if tsnow.duration_since(crst.insert_recv_ivl_last) >= Duration::from_millis(10000) {
crst.insert_recv_ivl_last = tsnow;
let ema = crst.insert_item_ivl_ema.ema();
let _ = ema;
}
if crst.muted_before == 0 {}
crst.muted_before = 1;
@@ -1668,6 +1721,7 @@ impl CaConn {
},
CaDataScalarValue::I16(..) => match &scalar_type {
ScalarType::I16 => {}
ScalarType::Enum => {}
_ => {
error!("MISMATCH got i16 exp {:?}", scalar_type);
}
@@ -1998,7 +2052,8 @@ impl CaConn {
// TODO count this unexpected case.
}
CaMsgTy::CreateChanRes(k) => {
self.handle_create_chan_res(k, tsnow)?;
let stnow = SystemTime::now();
self.handle_create_chan_res(k, tsnow, stnow)?;
cx.waker().wake_by_ref();
}
CaMsgTy::EventAddRes(ev) => {
@@ -2095,7 +2150,12 @@ impl CaConn {
res.map_err(Into::into)
}
fn handle_create_chan_res(&mut self, k: proto::CreateChanRes, tsnow: Instant) -> Result<(), Error> {
fn handle_create_chan_res(
&mut self,
k: proto::CreateChanRes,
tsnow: Instant,
stnow: SystemTime,
) -> Result<(), Error> {
let cid = Cid(k.cid);
let sid = Sid(k.sid);
let conf = if let Some(x) = self.channels.get_mut(&cid) {
@@ -2132,6 +2192,7 @@ impl CaConn {
ts_created: tsnow,
ts_alive_last: tsnow,
ts_activity_last: tsnow,
st_activity_last: stnow,
ts_msp_last: 0,
ts_msp_grid_last: 0,
inserted_in_ts_msp: u64::MAX,
@@ -2148,6 +2209,9 @@ impl CaConn {
account_emit_last: TsMs::from_ms_u64(0),
account_count: 0,
account_bytes: 0,
dw_st_last: SystemTime::UNIX_EPOCH,
dw_mt_last: SystemTime::UNIX_EPOCH,
dw_lt_last: SystemTime::UNIX_EPOCH,
};
*chst = ChannelState::MakingSeriesWriter(MakingSeriesWriterState { tsbeg: tsnow, channel });
let job = EstablishWorkerJob::new(
@@ -2218,6 +2282,7 @@ impl CaConn {
Ok(Ready(Some(())))
}
Ok(Err(e)) => {
use std::io::ErrorKind;
info!("error connect to {addr} {e}");
let addr = addr.clone();
self.iqdqs
@@ -2226,7 +2291,11 @@ impl CaConn {
addr,
status: ConnectionStatus::ConnectError,
}))?;
self.trigger_shutdown(ShutdownReason::IoError);
let reason = match e.kind() {
ErrorKind::ConnectionRefused => ShutdownReason::ConnectRefused,
_ => ShutdownReason::IoError,
};
self.trigger_shutdown(reason);
Ok(Ready(Some(())))
}
Err(e) => {
@@ -2239,7 +2308,7 @@ impl CaConn {
addr,
status: ConnectionStatus::ConnectTimeout,
}))?;
self.trigger_shutdown(ShutdownReason::IocTimeout);
self.trigger_shutdown(ShutdownReason::ConnectTimeout);
Ok(Ready(Some(())))
}
}
@@ -2372,10 +2441,11 @@ impl CaConn {
}
fn emit_channel_status(&mut self) -> Result<(), Error> {
let stnow = SystemTime::now();
let mut channel_statuses = BTreeMap::new();
for (_, conf) in self.channels.iter() {
let chst = &conf.state;
let chinfo = chst.to_info(chst.cssid(), self.remote_addr_dbg, conf.conf.clone());
let chinfo = chst.to_info(chst.cssid(), self.remote_addr_dbg, conf.conf.clone(), stnow);
channel_statuses.insert(chst.cssid(), chinfo);
}
// trace2!("{:?}", channel_statuses);
@@ -2407,21 +2477,21 @@ impl CaConn {
ChannelState::Writable(st1) => {
let ch = &mut st1.channel;
if ch.account_emit_last != msp {
ch.account_emit_last = msp;
if ch.account_count != 0 {
let series_id = ch.cssid.id();
let series = st1.writer.sid();
let count = ch.account_count as i64;
let bytes = ch.account_bytes as i64;
ch.account_count = 0;
ch.account_bytes = 0;
let item = QueryItem::Accounting(Accounting {
part: (series_id & 0xff) as i32,
part: (series.id() & 0xff) as i32,
ts: msp,
series: SeriesId::new(series_id),
series,
count,
bytes,
});
self.iqdqs.emit_status_item(item)?;
ch.account_emit_last = msp;
}
}
}

View File

@@ -946,7 +946,8 @@ impl CaConnSet {
warn!("received error {addr} {e}");
self.handle_connect_fail(addr)?
}
EndOfStreamReason::ConnectFail => self.handle_connect_fail(addr)?,
EndOfStreamReason::ConnectRefused => self.handle_connect_fail(addr)?,
EndOfStreamReason::ConnectTimeout => self.handle_connect_fail(addr)?,
EndOfStreamReason::OnCommand => {
// warn!("TODO make sure no channel is in state which could trigger health timeout")
}
@@ -1103,10 +1104,12 @@ impl CaConnSet {
let mut eos_reason = None;
while let Some(item) = conn.next().await {
trace!("ca_conn_item_merge_inner item {}", item.desc_short());
if let Some(x) = eos_reason {
let e = Error::with_msg_no_trace(format!("CaConn delivered already eos {addr} {x:?}"));
error!("{e}");
return Err(e);
if let Some(x) = &eos_reason {
// TODO enable again, should not happen.
// let e = Error::with_msg_no_trace(format!("CaConn delivered already eos {addr} {x:?}"));
// error!("{e}");
// return Err(e);
warn!("CaConn {addr} EOS reason [{x:?}] after [{eos_reason:?}]");
}
stats.item_count.inc();
match item.value {
@@ -1497,25 +1500,29 @@ impl CaConnSet {
fn try_push_ca_conn_cmds(&mut self, cx: &mut Context) -> Result<(), Error> {
use Poll::*;
for (_, v) in self.ca_conn_ress.iter_mut() {
for (addr, v) in self.ca_conn_ress.iter_mut() {
let tx = &mut v.sender;
loop {
if false {
if v.cmd_queue.len() != 0 || tx.is_sending() {
debug!("try_push_ca_conn_cmds {:?} {:?}", v.cmd_queue.len(), tx.len());
}
}
break if tx.is_sending() {
match tx.poll_unpin(cx) {
Ready(Ok(())) => {
self.stats.try_push_ca_conn_cmds_sent.inc();
continue;
}
Ready(Err(e)) => {
error!("try_push_ca_conn_cmds {e}");
return Err(Error::with_msg_no_trace(format!("{e}")));
}
Pending => (),
Ready(Err(e)) => match e {
scywr::senderpolling::Error::NoSendInProgress => {
error!("try_push_ca_conn_cmds {e}");
return Err(Error::with_msg_no_trace(format!("{e}")));
}
scywr::senderpolling::Error::Closed(_) => {
// TODO
// Should be nothing to do here.
// The connection ended, which CaConnSet notices anyway.
// self.handle_connect_fail(addr)?;
self.stats.try_push_ca_conn_cmds_closed().inc();
}
},
Pending => {}
}
} else if let Some(item) = v.cmd_queue.pop_front() {
tx.as_mut().send_pin(item);

View File

@@ -244,7 +244,7 @@ pub enum CaDataScalarValue {
I32(i32),
F32(f32),
F64(f64),
Enum(i16),
Enum(i16, String),
String(String),
// TODO remove, CA has no bool, make new enum for other use cases.
Bool(bool),
@@ -259,7 +259,7 @@ impl From<CaDataScalarValue> for scywr::iteminsertqueue::ScalarValue {
CaDataScalarValue::I32(x) => ScalarValue::I32(x),
CaDataScalarValue::F32(x) => ScalarValue::F32(x),
CaDataScalarValue::F64(x) => ScalarValue::F64(x),
CaDataScalarValue::Enum(x) => ScalarValue::Enum(x),
CaDataScalarValue::Enum(x, y) => ScalarValue::Enum(x, y),
CaDataScalarValue::String(x) => ScalarValue::String(x),
CaDataScalarValue::Bool(x) => ScalarValue::Bool(x),
}

View File

@@ -19,6 +19,7 @@ pub struct CaIngestOpts {
backend: String,
channels: Option<PathBuf>,
api_bind: String,
udp_broadcast_bind: Option<String>,
search: Vec<String>,
#[serde(default)]
search_blacklist: Vec<String>,
@@ -53,6 +54,10 @@ impl CaIngestOpts {
self.api_bind.clone()
}
pub fn udp_broadcast_bind(&self) -> Option<&str> {
self.udp_broadcast_bind.as_ref().map(String::as_str)
}
pub fn postgresql_config(&self) -> &Database {
&self.postgresql
}

View File

@@ -1,4 +1,5 @@
#![allow(unused)]
pub mod ingest;
pub mod postingest;
pub mod status;
@@ -18,14 +19,17 @@ use axum::http;
use axum::response::IntoResponse;
use axum::response::Response;
use bytes::Bytes;
use dbpg::seriesbychannel::ChannelInfoQuery;
use err::Error;
use http::Request;
use http::StatusCode;
use http_body::Body;
use log::*;
use scywr::insertqueues::InsertQueuesTx;
use scywr::iteminsertqueue::QueryItem;
use serde::Deserialize;
use serde::Serialize;
use serde_json::json;
use stats::CaConnSetStats;
use stats::CaConnStats;
use stats::CaConnStatsAgg;
@@ -37,12 +41,17 @@ use stats::IocFinderStats;
use stats::SeriesByChannelStats;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::net::SocketAddr;
use std::net::SocketAddrV4;
use std::pin::Pin;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
use std::time::Duration;
use taskrun::tokio;
use taskrun::tokio::net::TcpListener;
struct PublicErrorMsg(String);
@@ -59,15 +68,45 @@ impl ToPublicErrorMsg for err::Error {
}
}
pub struct Res123 {
content: Option<Bytes>,
}
impl http_body::Body for Res123 {
type Data = Bytes;
type Error = Error;
fn poll_frame(
mut self: Pin<&mut Self>,
cx: &mut Context,
) -> Poll<Option<Result<http_body::Frame<Self::Data>, Self::Error>>> {
use Poll::*;
match self.content.take() {
Some(x) => Ready(Some(Ok(http_body::Frame::data(x)))),
None => Ready(None),
}
}
}
impl IntoResponse for PublicErrorMsg {
fn into_response(self) -> axum::response::Response {
let msgbytes = self.0.as_bytes();
let body = axum::body::Bytes::from(msgbytes.to_vec());
let body = axum::body::Full::new(body);
let body = body.map_err(|_| axum::Error::new(Error::from_string("error while trying to create fixed body")));
let body = axum::body::BoxBody::new(body);
let x = axum::response::Response::builder().status(500).body(body).unwrap();
x
// let body = axum::body::Bytes::from(msgbytes.to_vec());
// let body = http_body::Frame::data(body);
// let body = body.map_err(|_| axum::Error::new(Error::from_string("error while trying to create fixed body")));
// let body = http_body::combinators::BoxBody::new(body);
// let body = axum::body::Body::new(body);
// let x = axum::response::Response::builder().status(500).body(body).unwrap();
// return x;
// x
// let boddat = http_body::Empty::new();
let res: Res123 = Res123 {
content: Some(Bytes::from(self.0.as_bytes().to_vec())),
};
let bod = axum::body::Body::new(res);
// let ret: http::Response<Bytes> = todo!();
let ret = http::Response::builder().status(500).body(bod).unwrap();
ret
}
}
@@ -268,10 +307,30 @@ fn metrics(stats_set: &StatsSet) -> String {
[s1, s2, s3, s4, s5, s6, s7].join("")
}
fn make_routes(dcom: Arc<DaemonComm>, connset_cmd_tx: Sender<CaConnSetEvent>, stats_set: StatsSet) -> axum::Router {
pub struct RoutesResources {
backend: String,
worker_tx: Sender<ChannelInfoQuery>,
iqtx: InsertQueuesTx,
}
impl RoutesResources {
pub fn new(backend: String, worker_tx: Sender<ChannelInfoQuery>, iqtx: InsertQueuesTx) -> Self {
Self {
backend,
worker_tx,
iqtx,
}
}
}
fn make_routes(
rres: Arc<RoutesResources>,
dcom: Arc<DaemonComm>,
connset_cmd_tx: Sender<CaConnSetEvent>,
stats_set: StatsSet,
) -> axum::Router {
use axum::extract;
use axum::routing::get;
use axum::routing::put;
use axum::routing::{get, post, put};
use axum::Router;
use http::StatusCode;
@@ -290,12 +349,51 @@ fn make_routes(dcom: Arc<DaemonComm>, connset_cmd_tx: Sender<CaConnSetEvent>, st
)
.route("/path3/", get(|| async { (StatusCode::OK, format!("Hello there!")) })),
)
.route(
"/daqingest/metrics",
get({
let stats_set = stats_set.clone();
|| async move { metrics(&stats_set) }
}),
.nest(
"/daqingest",
Router::new()
.fallback(|| async { axum::Json(json!({"subcommands":["channel", "metrics"]})) })
.nest(
"/metrics",
Router::new().fallback(|| async { StatusCode::NOT_FOUND }).route(
"/",
get({
let stats_set = stats_set.clone();
|| async move { metrics(&stats_set) }
}),
),
)
.nest(
"/channel",
Router::new()
.fallback(|| async { axum::Json(json!({"subcommands":["states"]})) })
.route(
"/states",
get({
let tx = connset_cmd_tx.clone();
|Query(params): Query<HashMap<String, String>>| status::channel_states(params, tx)
}),
)
.route(
"/add",
get({
let dcom = dcom.clone();
|Query(params): Query<HashMap<String, String>>| channel_add(params, dcom)
}),
),
)
.nest(
"/ingest",
Router::new().route(
"/v1",
post({
let rres = rres.clone();
move |(params, body): (Query<HashMap<String, String>>, axum::body::Body)| {
ingest::post_v01((params, body), rres)
}
}),
),
),
)
.route(
"/daqingest/metricbeat",
@@ -315,13 +413,6 @@ fn make_routes(dcom: Arc<DaemonComm>, connset_cmd_tx: Sender<CaConnSetEvent>, st
|Query(params): Query<HashMap<String, String>>| find_channel(params, dcom)
}),
)
.route(
"/daqingest/channel/states",
get({
let tx = connset_cmd_tx.clone();
|Query(params): Query<HashMap<String, String>>| status::channel_states(params, tx)
}),
)
.route(
"/daqingest/private/channel/states",
get({
@@ -329,13 +420,6 @@ fn make_routes(dcom: Arc<DaemonComm>, connset_cmd_tx: Sender<CaConnSetEvent>, st
|Query(params): Query<HashMap<String, String>>| private_channel_states(params, tx)
}),
)
.route(
"/daqingest/channel/add",
get({
let dcom = dcom.clone();
|Query(params): Query<HashMap<String, String>>| channel_add(params, dcom)
}),
)
.route(
"/daqingest/channel/remove",
get({
@@ -393,20 +477,18 @@ pub async fn metrics_service(
connset_cmd_tx: Sender<CaConnSetEvent>,
stats_set: StatsSet,
shutdown_signal: Receiver<u32>,
rres: Arc<RoutesResources>,
) -> Result<(), Error> {
info!("metrics service start {bind_to}");
let addr = bind_to.parse().map_err(Error::from_string)?;
let router = make_routes(dcom, connset_cmd_tx, stats_set).into_make_service();
axum::Server::bind(&addr)
.serve(router)
let addr: SocketAddr = bind_to.parse().map_err(Error::from_string)?;
let router = make_routes(rres, dcom, connset_cmd_tx, stats_set).into_make_service();
let listener = TcpListener::bind(addr).await?;
// into_make_service_with_connect_info
axum::serve(listener, router)
.with_graceful_shutdown(async move {
let _ = shutdown_signal.recv().await;
})
.await
.inspect(|x| {
info!("metrics service finished with {x:?}");
})
.map_err(Error::from_string)?;
.await?;
Ok(())
}

View File

@@ -0,0 +1,105 @@
use super::RoutesResources;
use axum::extract::FromRequest;
use axum::extract::Query;
use axum::Json;
use err::thiserror;
use err::ThisError;
use futures_util::StreamExt;
use futures_util::TryStreamExt;
use items_2::eventsdim0::EventsDim0;
use netpod::log::*;
use netpod::ScalarType;
use netpod::Shape;
use netpod::TsNano;
use scywr::insertqueues::InsertDeques;
use scywr::iteminsertqueue::DataValue;
use scywr::iteminsertqueue::QueryItem;
use scywr::iteminsertqueue::ScalarValue;
use serieswriter::writer::SeriesWriter;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::io::Cursor;
use std::sync::Arc;
use std::time::SystemTime;
use streams::framed_bytes::FramedBytesStream;
// use core::io::BorrowedBuf;
#[derive(Debug, ThisError)]
pub enum Error {
Logic,
SeriesWriter(#[from] serieswriter::writer::Error),
MissingChannelName,
SendError,
Decode,
FramedBytes(#[from] streams::framed_bytes::Error),
}
struct BodyRead {}
pub async fn post_v01(
(Query(params), body): (Query<HashMap<String, String>>, axum::body::Body),
rres: Arc<RoutesResources>,
) -> Json<serde_json::Value> {
match post_v01_try(params, body, rres).await {
Ok(k) => k,
Err(e) => Json(serde_json::Value::String(e.to_string())),
}
}
async fn post_v01_try(
params: HashMap<String, String>,
body: axum::body::Body,
rres: Arc<RoutesResources>,
) -> Result<Json<serde_json::Value>, Error> {
info!("params {:?}", params);
let stnow = SystemTime::now();
let worker_tx = rres.worker_tx.clone();
let backend = rres.backend.clone();
let channel = params.get("channelName").ok_or(Error::MissingChannelName)?.into();
let scalar_type = ScalarType::I16;
let shape = Shape::Scalar;
info!("establishing...");
let mut writer = SeriesWriter::establish(worker_tx, backend, channel, scalar_type, shape, stnow).await?;
let mut iqdqs = InsertDeques::new();
let mut iqtx = rres.iqtx.clone();
// iqtx.send_all(&mut iqdqs).await.map_err(|_| Error::SendError)?;
// let deque = &mut iqdqs.st_rf3_rx;
let mut frames = FramedBytesStream::new(body.into_data_stream().map_err(|_| streams::framed_bytes::Error::Logic));
while let Some(frame) = frames.try_next().await? {
info!("got frame len {}", frame.len());
let evs: EventsDim0<i16> = ciborium::de::from_reader(Cursor::new(frame)).map_err(|_| Error::Decode)?;
info!("see events {:?}", evs);
let deque = &mut iqdqs.st_rf3_rx;
for (i, (&ts, &val)) in evs.tss.iter().zip(evs.values.iter()).enumerate() {
info!("ev {:6} {:20} {:20}", i, ts, val);
let val = DataValue::Scalar(ScalarValue::I16(val));
writer.write(TsNano::from_ns(ts), TsNano::from_ns(ts), val, deque)?;
}
iqtx.send_all(&mut iqdqs).await.map_err(|_| Error::SendError)?;
}
let deque = &mut iqdqs.st_rf3_rx;
finish_writers(vec![&mut writer], deque)?;
iqtx.send_all(&mut iqdqs).await.map_err(|_| Error::SendError)?;
let ret = Json(serde_json::json!({
"result": true,
}));
Ok(ret)
}
fn tick_writers(sws: Vec<&mut SeriesWriter>, deque: &mut VecDeque<QueryItem>) -> Result<(), Error> {
for sw in sws {
sw.tick(deque)?;
}
Ok(())
}
fn finish_writers(sws: Vec<&mut SeriesWriter>, deque: &mut VecDeque<QueryItem>) -> Result<(), Error> {
for sw in sws {
sw.tick(deque)?;
}
Ok(())
}

View File

@@ -7,6 +7,7 @@ use serde::Serialize;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::net::SocketAddr;
use std::time::SystemTime;
#[derive(Debug, Serialize)]
pub struct ChannelStates {
@@ -20,6 +21,20 @@ struct ChannelState {
archiving_configuration: ChannelConfig,
recv_count: u64,
recv_bytes: u64,
#[serde(with = "humantime_serde", skip_serializing_if = "system_time_epoch")]
recv_last: SystemTime,
#[serde(with = "humantime_serde", skip_serializing_if = "system_time_epoch")]
write_st_last: SystemTime,
#[serde(with = "humantime_serde", skip_serializing_if = "system_time_epoch")]
write_mt_last: SystemTime,
#[serde(with = "humantime_serde", skip_serializing_if = "system_time_epoch")]
write_lt_last: SystemTime,
#[serde(with = "humantime_serde", skip_serializing_if = "system_time_epoch")]
updated: SystemTime,
}
fn system_time_epoch(x: &SystemTime) -> bool {
*x == SystemTime::UNIX_EPOCH
}
#[derive(Debug, Serialize)]
@@ -62,6 +77,11 @@ pub async fn channel_states(params: HashMap<String, String>, tx: Sender<CaConnSe
archiving_configuration: st1.config,
recv_count: 0,
recv_bytes: 0,
recv_last: SystemTime::UNIX_EPOCH,
write_st_last: SystemTime::UNIX_EPOCH,
write_mt_last: SystemTime::UNIX_EPOCH,
write_lt_last: SystemTime::UNIX_EPOCH,
updated: SystemTime::UNIX_EPOCH,
};
states.channels.insert(k, chst);
}
@@ -72,6 +92,11 @@ pub async fn channel_states(params: HashMap<String, String>, tx: Sender<CaConnSe
archiving_configuration: st1.config,
recv_count: 0,
recv_bytes: 0,
recv_last: SystemTime::UNIX_EPOCH,
write_st_last: SystemTime::UNIX_EPOCH,
write_mt_last: SystemTime::UNIX_EPOCH,
write_lt_last: SystemTime::UNIX_EPOCH,
updated: SystemTime::UNIX_EPOCH,
};
states.channels.insert(k, chst);
}
@@ -85,6 +110,11 @@ pub async fn channel_states(params: HashMap<String, String>, tx: Sender<CaConnSe
archiving_configuration: st1.config,
recv_count: 0,
recv_bytes: 0,
recv_last: SystemTime::UNIX_EPOCH,
write_st_last: SystemTime::UNIX_EPOCH,
write_mt_last: SystemTime::UNIX_EPOCH,
write_lt_last: SystemTime::UNIX_EPOCH,
updated: SystemTime::UNIX_EPOCH,
};
states.channels.insert(k, chst);
}
@@ -98,6 +128,11 @@ pub async fn channel_states(params: HashMap<String, String>, tx: Sender<CaConnSe
archiving_configuration: st1.config,
recv_count: 0,
recv_bytes: 0,
recv_last: SystemTime::UNIX_EPOCH,
write_st_last: SystemTime::UNIX_EPOCH,
write_mt_last: SystemTime::UNIX_EPOCH,
write_lt_last: SystemTime::UNIX_EPOCH,
updated: SystemTime::UNIX_EPOCH,
};
states.channels.insert(k, chst);
}
@@ -111,6 +146,11 @@ pub async fn channel_states(params: HashMap<String, String>, tx: Sender<CaConnSe
archiving_configuration: st1.config,
recv_count: 0,
recv_bytes: 0,
recv_last: SystemTime::UNIX_EPOCH,
write_st_last: SystemTime::UNIX_EPOCH,
write_mt_last: SystemTime::UNIX_EPOCH,
write_lt_last: SystemTime::UNIX_EPOCH,
updated: SystemTime::UNIX_EPOCH,
};
states.channels.insert(k, chst);
}
@@ -128,6 +168,11 @@ pub async fn channel_states(params: HashMap<String, String>, tx: Sender<CaConnSe
archiving_configuration: st1.config,
recv_count,
recv_bytes,
recv_last: st6.recv_last,
write_st_last: st6.write_st_last,
write_mt_last: st6.write_mt_last,
write_lt_last: st6.write_lt_last,
updated: st6.stnow,
};
states.channels.insert(k, chst);
}
@@ -138,6 +183,11 @@ pub async fn channel_states(params: HashMap<String, String>, tx: Sender<CaConnSe
archiving_configuration: st1.config,
recv_count,
recv_bytes,
recv_last: st6.recv_last,
write_st_last: st6.write_st_last,
write_mt_last: st6.write_mt_last,
write_lt_last: st6.write_lt_last,
updated: st6.stnow,
};
states.channels.insert(k, chst);
}
@@ -148,6 +198,11 @@ pub async fn channel_states(params: HashMap<String, String>, tx: Sender<CaConnSe
archiving_configuration: st1.config,
recv_count,
recv_bytes,
recv_last: st6.recv_last,
write_st_last: st6.write_st_last,
write_mt_last: st6.write_mt_last,
write_lt_last: st6.write_lt_last,
updated: st6.stnow,
};
states.channels.insert(k, chst);
}
@@ -158,6 +213,11 @@ pub async fn channel_states(params: HashMap<String, String>, tx: Sender<CaConnSe
archiving_configuration: st1.config,
recv_count,
recv_bytes,
recv_last: st6.recv_last,
write_st_last: st6.write_st_last,
write_mt_last: st6.write_mt_last,
write_lt_last: st6.write_lt_last,
updated: st6.stnow,
};
states.channels.insert(k, chst);
}
@@ -174,6 +234,11 @@ pub async fn channel_states(params: HashMap<String, String>, tx: Sender<CaConnSe
archiving_configuration: st1.config,
recv_count: 0,
recv_bytes: 0,
recv_last: SystemTime::UNIX_EPOCH,
write_st_last: SystemTime::UNIX_EPOCH,
write_mt_last: SystemTime::UNIX_EPOCH,
write_lt_last: SystemTime::UNIX_EPOCH,
updated: SystemTime::UNIX_EPOCH,
};
states.channels.insert(k, chst);
}
@@ -184,6 +249,11 @@ pub async fn channel_states(params: HashMap<String, String>, tx: Sender<CaConnSe
archiving_configuration: st1.config,
recv_count: 0,
recv_bytes: 0,
recv_last: SystemTime::UNIX_EPOCH,
write_st_last: SystemTime::UNIX_EPOCH,
write_mt_last: SystemTime::UNIX_EPOCH,
write_lt_last: SystemTime::UNIX_EPOCH,
updated: SystemTime::UNIX_EPOCH,
};
states.channels.insert(k, chst);
}
@@ -194,6 +264,11 @@ pub async fn channel_states(params: HashMap<String, String>, tx: Sender<CaConnSe
archiving_configuration: st1.config,
recv_count: 0,
recv_bytes: 0,
recv_last: SystemTime::UNIX_EPOCH,
write_st_last: SystemTime::UNIX_EPOCH,
write_mt_last: SystemTime::UNIX_EPOCH,
write_lt_last: SystemTime::UNIX_EPOCH,
updated: SystemTime::UNIX_EPOCH,
};
states.channels.insert(k, chst);
}