Remove old stats struct
This commit is contained in:
@@ -2337,7 +2337,7 @@ impl CaConn {
|
||||
Self::check_ev_value_data(&value.data, &writer.scalar_type())?;
|
||||
crst.muted_before = 0;
|
||||
crst.insert_item_ivl_ema.tick(tsnow);
|
||||
// binwriter.ingest(tsev, value.f32_for_binning(), iqdqs)?;
|
||||
binwriter.ingest(tsev, value.f32_for_binning(), iqdqs)?;
|
||||
{
|
||||
let wres = writer.write(CaWriterValue::new(value, crst), tscaproto, tsev, iqdqs)?;
|
||||
crst.status_emit_count += wres.nstatus() as u64;
|
||||
|
||||
@@ -44,7 +44,6 @@ use statemap::ConnectionState;
|
||||
use statemap::ConnectionStateValue;
|
||||
use statemap::WithStatusSeriesIdState;
|
||||
use statemap::WithStatusSeriesIdStateInner;
|
||||
use stats::IocFinderStats;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
@@ -247,7 +246,6 @@ pub enum CaConnSetItem {
|
||||
pub struct CaConnSetCtrl {
|
||||
tx: Sender<CaConnSetEvent>,
|
||||
rx: Receiver<CaConnSetItem>,
|
||||
ioc_finder_stats: Arc<IocFinderStats>,
|
||||
jh: JoinHandle<Result<(), Error>>,
|
||||
}
|
||||
|
||||
@@ -305,10 +303,6 @@ impl CaConnSetCtrl {
|
||||
self.jh.await??;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn ioc_finder_stats(&self) -> &Arc<IocFinderStats> {
|
||||
&self.ioc_finder_stats
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -432,14 +426,8 @@ impl CaConnSet {
|
||||
let (connset_inp_tx, connset_inp_rx) = async_channel::bounded(200);
|
||||
let (connset_out_tx, connset_out_rx) = async_channel::bounded(200);
|
||||
let (find_ioc_res_tx, find_ioc_res_rx) = async_channel::bounded(400);
|
||||
let ioc_finder_stats = Arc::new(IocFinderStats::new());
|
||||
let (find_ioc_query_tx, ioc_finder_jh) = super::finder::start_finder(
|
||||
find_ioc_res_tx.clone(),
|
||||
backend.clone(),
|
||||
ingest_opts,
|
||||
ioc_finder_stats.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
let (find_ioc_query_tx, ioc_finder_jh) =
|
||||
super::finder::start_finder(find_ioc_res_tx.clone(), backend.clone(), ingest_opts).unwrap();
|
||||
let (channel_info_res_tx, channel_info_res_rx) = async_channel::bounded(400);
|
||||
let connset = Self {
|
||||
ticker: Self::new_self_ticker(),
|
||||
@@ -485,7 +473,6 @@ impl CaConnSet {
|
||||
CaConnSetCtrl {
|
||||
tx: connset_inp_tx,
|
||||
rx: connset_out_rx,
|
||||
ioc_finder_stats,
|
||||
jh,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,9 +12,7 @@ use dbpg::iocindex::IocSearchIndexWorker;
|
||||
use dbpg::postgres::Row as PgRow;
|
||||
use log::*;
|
||||
use netpod::Database;
|
||||
use stats::IocFinderStats;
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use taskrun::tokio;
|
||||
@@ -75,10 +73,9 @@ pub fn start_finder(
|
||||
tx: Sender<VecDeque<FindIocRes>>,
|
||||
backend: String,
|
||||
opts: CaIngestOpts,
|
||||
stats: Arc<IocFinderStats>,
|
||||
) -> Result<(Sender<IocAddrQuery>, JoinHandle<Result<(), Error>>), Error> {
|
||||
let (qtx, qrx) = async_channel::bounded(CURRENT_SEARCH_PENDING_MAX);
|
||||
let jh = taskrun::spawn(finder_full(qrx, tx, backend, opts, stats));
|
||||
let jh = taskrun::spawn(finder_full(qrx, tx, backend, opts));
|
||||
Ok((qtx, jh))
|
||||
}
|
||||
|
||||
@@ -87,17 +84,10 @@ async fn finder_full(
|
||||
tx: Sender<VecDeque<FindIocRes>>,
|
||||
backend: String,
|
||||
opts: CaIngestOpts,
|
||||
stats: Arc<IocFinderStats>,
|
||||
) -> Result<(), Error> {
|
||||
let (tx1, rx1) = async_channel::bounded(20);
|
||||
let jh1 = taskrun::spawn(finder_worker(
|
||||
qrx,
|
||||
tx1,
|
||||
backend,
|
||||
opts.postgresql_config().clone(),
|
||||
stats.clone(),
|
||||
));
|
||||
let jh2 = taskrun::spawn(finder_network_if_not_found(rx1, tx, opts.clone(), stats));
|
||||
let jh1 = taskrun::spawn(finder_worker(qrx, tx1, backend, opts.postgresql_config().clone()));
|
||||
let jh2 = taskrun::spawn(finder_network_if_not_found(rx1, tx, opts.clone()));
|
||||
jh1.await??;
|
||||
trace!("finder::finder_full awaited A");
|
||||
jh2.await??;
|
||||
@@ -111,7 +101,6 @@ async fn finder_worker(
|
||||
tx: Sender<VecDeque<FindIocRes>>,
|
||||
backend: String,
|
||||
db: Database,
|
||||
stats: Arc<IocFinderStats>,
|
||||
) -> Result<(), Error> {
|
||||
// TODO do something with join handle
|
||||
let (batch_rx, jh_batch) =
|
||||
@@ -123,7 +112,6 @@ async fn finder_worker(
|
||||
tx.clone(),
|
||||
backend.clone(),
|
||||
db.clone(),
|
||||
stats.clone(),
|
||||
));
|
||||
jhs.push(jh);
|
||||
}
|
||||
@@ -141,7 +129,6 @@ async fn finder_worker_single(
|
||||
tx: Sender<VecDeque<FindIocRes>>,
|
||||
backend: String,
|
||||
db: Database,
|
||||
stats: Arc<IocFinderStats>,
|
||||
) -> Result<(), Error> {
|
||||
debug!("finder_worker_single make_pg_client");
|
||||
let (pg, jh) = make_pg_client(&db).await?;
|
||||
@@ -159,8 +146,9 @@ async fn finder_worker_single(
|
||||
for e in batch.iter().filter(|x| series::dbg::dbg_chn(x.name())) {
|
||||
info!("searching database for {:?}", e);
|
||||
}
|
||||
stats.dbsearcher_batch_recv().inc();
|
||||
stats.dbsearcher_item_recv().add(batch.len() as _);
|
||||
// TODO
|
||||
// stats.dbsearcher_batch_recv().inc();
|
||||
// stats.dbsearcher_item_recv().add(batch.len() as _);
|
||||
let ts1 = Instant::now();
|
||||
let (batch, pass_through) = batch.into_iter().fold((Vec::new(), Vec::new()), |(mut a, mut b), x| {
|
||||
if x.use_cache() {
|
||||
@@ -189,9 +177,9 @@ async fn finder_worker_single(
|
||||
}
|
||||
match qres {
|
||||
Ok(rows) => {
|
||||
stats.dbsearcher_select_res_0().add(rows.len() as _);
|
||||
// stats.dbsearcher_select_res_0().add(rows.len() as _);
|
||||
if rows.len() != batch.len() {
|
||||
stats.dbsearcher_select_error_len_mismatch().inc();
|
||||
// stats.dbsearcher_select_error_len_mismatch().inc();
|
||||
error!("query result len {} batch len {}", rows.len(), batch.len());
|
||||
tokio::time::sleep(Duration::from_millis(1000)).await;
|
||||
continue;
|
||||
@@ -215,8 +203,9 @@ async fn finder_worker_single(
|
||||
let items_len = items.len();
|
||||
match tx.send(items).await {
|
||||
Ok(_) => {
|
||||
stats.dbsearcher_batch_send().inc();
|
||||
stats.dbsearcher_item_send().add(items_len as _);
|
||||
// TODO
|
||||
// stats.dbsearcher_batch_send().inc();
|
||||
// stats.dbsearcher_item_send().add(items_len as _);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("finder sees: {}", e);
|
||||
@@ -243,10 +232,9 @@ async fn finder_network_if_not_found(
|
||||
rx: Receiver<VecDeque<FindIocRes>>,
|
||||
tx: Sender<VecDeque<FindIocRes>>,
|
||||
opts: CaIngestOpts,
|
||||
stats: Arc<IocFinderStats>,
|
||||
) -> Result<(), Error> {
|
||||
let self_name = "finder_network_if_not_found";
|
||||
let (net_tx, net_rx, jh_ca_search) = ca_search_workers_start(&opts, stats.clone()).await?;
|
||||
let (net_tx, net_rx, jh_ca_search) = ca_search_workers_start(&opts).await?;
|
||||
let jh2 = taskrun::spawn(process_net_result(net_rx, tx.clone(), opts.clone()));
|
||||
'outer: while let Ok(item) = rx.recv().await {
|
||||
let mut res = VecDeque::new();
|
||||
|
||||
@@ -9,13 +9,11 @@ use log::*;
|
||||
use proto::CaMsg;
|
||||
use proto::CaMsgTy;
|
||||
use proto::HeadInfo;
|
||||
use stats::IocFinderStats;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::VecDeque;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::net::SocketAddrV4;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::task::Context;
|
||||
@@ -120,7 +118,6 @@ pub struct FindIocStream {
|
||||
thr_msg_1: ThrottleTrace,
|
||||
#[allow(unused)]
|
||||
thr_msg_2: ThrottleTrace,
|
||||
stats: Arc<IocFinderStats>,
|
||||
}
|
||||
|
||||
impl FindIocStream {
|
||||
@@ -131,7 +128,6 @@ impl FindIocStream {
|
||||
batch_run_max: Duration,
|
||||
in_flight_max: usize,
|
||||
batch_size: usize,
|
||||
stats: Arc<IocFinderStats>,
|
||||
) -> Self {
|
||||
let sock = unsafe { Self::create_socket() }.unwrap();
|
||||
let afd = AsyncFd::new(sock.0).unwrap();
|
||||
@@ -159,7 +155,6 @@ impl FindIocStream {
|
||||
thr_msg_0: ThrottleTrace::new(Duration::from_millis(1000)),
|
||||
thr_msg_1: ThrottleTrace::new(Duration::from_millis(1000)),
|
||||
thr_msg_2: ThrottleTrace::new(Duration::from_millis(1000)),
|
||||
stats,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,10 +279,7 @@ impl FindIocStream {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
unsafe fn try_read(
|
||||
sock: i32,
|
||||
stats: &IocFinderStats,
|
||||
) -> Poll<Result<(SocketAddrV4, Vec<(SearchId, SocketAddrV4)>), Error>> {
|
||||
unsafe fn try_read(sock: i32) -> Poll<Result<(SocketAddrV4, Vec<(SearchId, SocketAddrV4)>), Error>> {
|
||||
let tsnow = Instant::now();
|
||||
let mut saddr_mem = [0u8; std::mem::size_of::<libc::sockaddr>()];
|
||||
let mut saddr_len: libc::socklen_t = saddr_mem.len() as _;
|
||||
@@ -310,14 +302,14 @@ impl FindIocStream {
|
||||
return Poll::Ready(Err(Error::ReadFailure));
|
||||
}
|
||||
} else if ec < 0 {
|
||||
stats.ca_udp_io_error().inc();
|
||||
// stats.ca_udp_io_error().inc();
|
||||
error!("unexpected received {ec}");
|
||||
Poll::Ready(Err(Error::ReadFailure))
|
||||
} else if ec == 0 {
|
||||
stats.ca_udp_io_empty().inc();
|
||||
// stats.ca_udp_io_empty().inc();
|
||||
Poll::Ready(Err(Error::ReadEmpty))
|
||||
} else {
|
||||
stats.ca_udp_io_recv().inc();
|
||||
// stats.ca_udp_io_recv().inc();
|
||||
let saddr2: libc::sockaddr_in = unsafe { std::mem::transmute_copy(&saddr_mem) };
|
||||
let src_addr = Ipv4Addr::from(saddr2.sin_addr.s_addr.to_ne_bytes());
|
||||
let src_port = u16::from_be(saddr2.sin_port);
|
||||
@@ -366,15 +358,15 @@ impl FindIocStream {
|
||||
accounted += 16 + hi.payload_len();
|
||||
}
|
||||
if accounted != ec as u32 {
|
||||
stats.ca_udp_unaccounted_data().inc();
|
||||
// stats.ca_udp_unaccounted_data().inc();
|
||||
debug!("unaccounted data ec {} accounted {}", ec, accounted);
|
||||
}
|
||||
if msgs.len() < 1 {
|
||||
stats.ca_udp_warn().inc();
|
||||
// stats.ca_udp_warn().inc();
|
||||
debug!("received answer without messages");
|
||||
}
|
||||
if msgs.len() == 1 {
|
||||
stats.ca_udp_warn().inc();
|
||||
// stats.ca_udp_warn().inc();
|
||||
debug!("received answer with single message: {msgs:?}");
|
||||
}
|
||||
let mut good = true;
|
||||
@@ -384,7 +376,7 @@ impl FindIocStream {
|
||||
good = false;
|
||||
}
|
||||
} else {
|
||||
stats.ca_udp_first_msg_not_version().inc();
|
||||
// stats.ca_udp_first_msg_not_version().inc();
|
||||
}
|
||||
// trace2!("recv {:?} {:?}", src_addr, msgs);
|
||||
let mut res = Vec::new();
|
||||
@@ -398,7 +390,7 @@ impl FindIocStream {
|
||||
res.push((SearchId(k.id), addr));
|
||||
}
|
||||
_ => {
|
||||
stats.ca_udp_error().inc();
|
||||
// stats.ca_udp_error().inc();
|
||||
warn!("try_read: unknown message received {:?}", msg.ty);
|
||||
}
|
||||
}
|
||||
@@ -449,7 +441,7 @@ impl FindIocStream {
|
||||
};
|
||||
self.in_flight.insert(bid.clone(), batch);
|
||||
self.batch_send_queue.push_back(bid);
|
||||
self.stats.ca_udp_batch_created().inc();
|
||||
// stats.ca_udp_batch_created().inc();
|
||||
}
|
||||
|
||||
fn handle_result(&mut self, src: SocketAddrV4, res: Vec<(SearchId, SocketAddrV4)>) {
|
||||
@@ -477,11 +469,11 @@ impl FindIocStream {
|
||||
dt,
|
||||
};
|
||||
// trace!("udp search response {res:?}");
|
||||
self.stats.ca_udp_recv_result().inc();
|
||||
// stats.ca_udp_recv_result().inc();
|
||||
self.out_queue.push_back(res);
|
||||
}
|
||||
None => {
|
||||
self.stats.ca_udp_logic_error().inc();
|
||||
// stats.ca_udp_logic_error().inc();
|
||||
error!(
|
||||
"logic error batch sids / channels lens: {} vs {}",
|
||||
batch.sids.len(),
|
||||
@@ -537,7 +529,7 @@ impl FindIocStream {
|
||||
sids.push(sid.clone());
|
||||
chns.push(batch.channels[i2].clone());
|
||||
dts.push(dt);
|
||||
self.stats.ca_udp_recv_timeout().inc();
|
||||
// stats.ca_udp_recv_timeout().inc();
|
||||
}
|
||||
}
|
||||
bids.push(bid.clone());
|
||||
@@ -692,7 +684,7 @@ impl Stream for FindIocStream {
|
||||
break match self.afd.poll_read_ready(cx) {
|
||||
Ready(Ok(mut g)) => {
|
||||
// debug!("BLOCK AA");
|
||||
match unsafe { Self::try_read(self.sock.0, &self.stats) } {
|
||||
match unsafe { Self::try_read(self.sock.0) } {
|
||||
Ready(Ok((src, res))) => {
|
||||
self.handle_result(src, res);
|
||||
if self.ready_for_end_of_stream() {
|
||||
|
||||
@@ -4,12 +4,10 @@ use async_channel::Receiver;
|
||||
use async_channel::Sender;
|
||||
use futures_util::StreamExt;
|
||||
use log::*;
|
||||
use stats::IocFinderStats;
|
||||
use std::collections::VecDeque;
|
||||
use std::net::IpAddr;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::SocketAddrV4;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use taskrun::tokio;
|
||||
use tokio::task::JoinHandle;
|
||||
@@ -59,7 +57,6 @@ async fn resolve_address(addr_str: &str) -> Result<SocketAddr, Error> {
|
||||
|
||||
pub async fn ca_search_workers_start(
|
||||
opts: &CaIngestOpts,
|
||||
stats: Arc<IocFinderStats>,
|
||||
) -> Result<
|
||||
(
|
||||
Sender<String>,
|
||||
@@ -72,7 +69,7 @@ pub async fn ca_search_workers_start(
|
||||
let batch_run_max = Duration::from_millis(800);
|
||||
let (inp_tx, inp_rx) = async_channel::bounded(256);
|
||||
let (out_tx, out_rx) = async_channel::bounded(256);
|
||||
let finder = FindIocStream::new(inp_rx, search_tgts, blacklist, batch_run_max, 20, 16, stats);
|
||||
let finder = FindIocStream::new(inp_rx, search_tgts, blacklist, batch_run_max, 20, 16);
|
||||
let jh = taskrun::spawn(finder_run(finder, out_tx));
|
||||
Ok((inp_tx, out_rx, jh))
|
||||
}
|
||||
|
||||
@@ -33,11 +33,6 @@ use scywr::iteminsertqueue::QueryItem;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde_json::json;
|
||||
use stats::CaProtoStats;
|
||||
use stats::DaemonStats;
|
||||
use stats::InsertWorkerStats;
|
||||
use stats::IocFinderStats;
|
||||
use stats::SeriesByChannelStats;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
@@ -128,28 +123,12 @@ impl IntoResponse for CustomErrorResponse {
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct StatsSet {
|
||||
daemon: Arc<DaemonStats>,
|
||||
insert_worker_stats: Arc<InsertWorkerStats>,
|
||||
series_by_channel_stats: Arc<SeriesByChannelStats>,
|
||||
ioc_finder_stats: Arc<IocFinderStats>,
|
||||
insert_frac: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
impl StatsSet {
|
||||
pub fn new(
|
||||
daemon: Arc<DaemonStats>,
|
||||
insert_worker_stats: Arc<InsertWorkerStats>,
|
||||
series_by_channel_stats: Arc<SeriesByChannelStats>,
|
||||
ioc_finder_stats: Arc<IocFinderStats>,
|
||||
insert_frac: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
Self {
|
||||
daemon,
|
||||
insert_worker_stats,
|
||||
series_by_channel_stats,
|
||||
ioc_finder_stats,
|
||||
insert_frac,
|
||||
}
|
||||
pub fn new(insert_frac: Arc<AtomicU64>) -> Self {
|
||||
Self { insert_frac }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,23 +319,12 @@ impl DaemonComm {
|
||||
|
||||
fn metricbeat(stats_set: &StatsSet) -> axum::Json<serde_json::Value> {
|
||||
let mut map = serde_json::Map::new();
|
||||
map.insert("daemon".to_string(), stats_set.daemon.json());
|
||||
map.insert("insert_worker_stats".to_string(), stats_set.insert_worker_stats.json());
|
||||
// map.insert("insert_worker_stats".to_string(), stats_set.insert_worker_stats.json());
|
||||
let mut ret = serde_json::Map::new();
|
||||
ret.insert("daqingest".to_string(), serde_json::Value::Object(map));
|
||||
axum::Json(serde_json::Value::Object(ret))
|
||||
}
|
||||
|
||||
fn metrics(stats_set: &StatsSet) -> String {
|
||||
let ss = [
|
||||
stats_set.daemon.prometheus(),
|
||||
stats_set.insert_worker_stats.prometheus(),
|
||||
stats_set.series_by_channel_stats.prometheus(),
|
||||
stats_set.ioc_finder_stats.prometheus(),
|
||||
];
|
||||
ss.join("")
|
||||
}
|
||||
|
||||
pub struct RoutesResources {
|
||||
backend: String,
|
||||
worker_tx: Sender<ChannelInfoQuery>,
|
||||
@@ -427,10 +395,8 @@ fn make_routes(
|
||||
let dcom = dcom.clone();
|
||||
let stats_set = stats_set.clone();
|
||||
|| async move {
|
||||
let prom2 = metrics2(dcom).await.unwrap_or(String::new());
|
||||
let mut s = metrics(&stats_set);
|
||||
s.push_str(&prom2);
|
||||
s
|
||||
let prom = metrics2(dcom).await.unwrap_or(String::new());
|
||||
prom
|
||||
}
|
||||
}),
|
||||
),
|
||||
|
||||
Reference in New Issue
Block a user