Reduce log chatter
This commit is contained in:
@@ -10,9 +10,7 @@ use hyper::{Body, Client, Request, Response};
|
||||
use items::{RangeCompletableItem, Sitemty, StreamItem};
|
||||
use itertools::Itertools;
|
||||
use netpod::query::RawEventsQuery;
|
||||
use netpod::{
|
||||
log::*, ByteSize, Channel, FileIoBufferSize, NanoRange, NodeConfigCached, PerfOpts, ScalarType, Shape, APP_OCTET,
|
||||
};
|
||||
use netpod::{log::*, ByteSize, Channel, FileIoBufferSize, NanoRange, NodeConfigCached, PerfOpts, Shape, APP_OCTET};
|
||||
use netpod::{ChannelSearchQuery, ChannelSearchResult, ProxyConfig, APP_JSON};
|
||||
use parse::channelconfig::{
|
||||
extract_matching_config_entry, read_local_config, Config, ConfigEntry, MatchingConfigEntry,
|
||||
@@ -575,7 +573,7 @@ impl DataApiPython3DataStream {
|
||||
if !*header_out {
|
||||
let head = Api1ChannelHeader {
|
||||
name: channel.name.clone(),
|
||||
ty: scalar_type_to_api3proto(&b.scalar_types[i1]).into(),
|
||||
ty: b.scalar_types[i1].to_api3proto().into(),
|
||||
byte_order: if b.be[i1] {
|
||||
"BIG_ENDIAN".into()
|
||||
} else {
|
||||
@@ -688,7 +686,7 @@ impl Stream for DataApiPython3DataStream {
|
||||
let perf_opts = PerfOpts { inmem_bufcap: 1024 * 4 };
|
||||
// TODO is this a good to place decide this?
|
||||
let s = if self.node_config.node_config.cluster.is_central_storage {
|
||||
info!("Set up central storage stream");
|
||||
debug!("Set up central storage stream");
|
||||
// TODO pull up this config
|
||||
let event_chunker_conf = EventChunkerConf::new(ByteSize::kb(1024));
|
||||
let s = disk::raw::conn::make_local_event_blobs_stream(
|
||||
@@ -703,7 +701,7 @@ impl Stream for DataApiPython3DataStream {
|
||||
)?;
|
||||
Box::pin(s) as Pin<Box<dyn Stream<Item = Sitemty<EventFull>> + Send>>
|
||||
} else {
|
||||
info!("Set up merged remote stream");
|
||||
debug!("Set up merged remote stream");
|
||||
let s = disk::merge::mergedblobsfromremotes::MergedBlobsFromRemotes::new(
|
||||
evq,
|
||||
perf_opts,
|
||||
@@ -769,7 +767,7 @@ impl Stream for DataApiPython3DataStream {
|
||||
}
|
||||
|
||||
pub async fn api1_binary_events(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
info!("api1_binary_events uri: {:?} headers: {:?}", req.uri(), req.headers());
|
||||
debug!("api1_binary_events uri: {:?} headers: {:?}", req.uri(), req.headers());
|
||||
let accept_def = "";
|
||||
let accept = req
|
||||
.headers()
|
||||
@@ -784,12 +782,12 @@ pub async fn api1_binary_events(req: Request<Body>, node_config: &NodeConfigCach
|
||||
error!("got body_data: {:?}", String::from_utf8(body_data[..].to_vec()));
|
||||
return Err(Error::with_msg_no_trace("can not parse query"));
|
||||
};
|
||||
info!("got Api1Query: {:?}", qu);
|
||||
debug!("got Api1Query: {:?}", qu);
|
||||
let beg_date = chrono::DateTime::parse_from_rfc3339(&qu.range.start_date);
|
||||
let end_date = chrono::DateTime::parse_from_rfc3339(&qu.range.end_date);
|
||||
let beg_date = beg_date?;
|
||||
let end_date = end_date?;
|
||||
info!("beg_date {:?} end_date {:?}", beg_date, end_date);
|
||||
debug!("beg_date {:?} end_date {:?}", beg_date, end_date);
|
||||
//let url = Url::parse(&format!("dummy:{}", req.uri()))?;
|
||||
//let query = PlainEventsBinaryQuery::from_url(&url)?;
|
||||
// TODO add stricter check for types, check with client.
|
||||
@@ -832,22 +830,6 @@ pub async fn api1_binary_events(req: Request<Body>, node_config: &NodeConfigCach
|
||||
return Ok(ret);
|
||||
}
|
||||
|
||||
fn scalar_type_to_api3proto(sty: &ScalarType) -> &'static str {
|
||||
match sty {
|
||||
ScalarType::U8 => "uint8",
|
||||
ScalarType::U16 => "uint16",
|
||||
ScalarType::U32 => "uint32",
|
||||
ScalarType::U64 => "uint64",
|
||||
ScalarType::I8 => "int8",
|
||||
ScalarType::I16 => "int16",
|
||||
ScalarType::I32 => "int32",
|
||||
ScalarType::I64 => "int64",
|
||||
ScalarType::F32 => "float32",
|
||||
ScalarType::F64 => "float64",
|
||||
ScalarType::BOOL => "bool",
|
||||
}
|
||||
}
|
||||
|
||||
fn shape_to_api3proto(sh: &Option<Vec<u32>>) -> Vec<u32> {
|
||||
match sh {
|
||||
None => vec![],
|
||||
|
||||
@@ -50,7 +50,8 @@ pub async fn host(node_config: NodeConfigCached) -> Result<(), Error> {
|
||||
let addr = SocketAddr::from_str(&format!("{}:{}", node_config.node.listen, node_config.node.port))?;
|
||||
let make_service = make_service_fn({
|
||||
move |conn: &AddrStream| {
|
||||
info!("new connection from {:?}", conn.remote_addr());
|
||||
// TODO send to logstash
|
||||
debug!("new connection from {:?}", conn.remote_addr());
|
||||
let node_config = node_config.clone();
|
||||
async move {
|
||||
Ok::<_, Error>(service_fn({
|
||||
@@ -167,7 +168,8 @@ macro_rules! static_http_api1 {
|
||||
}
|
||||
|
||||
async fn http_service_try(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
info!("http_service_try {:?}", req.uri());
|
||||
// TODO send to logstash
|
||||
debug!("http_service_try {:?}", req.uri());
|
||||
let uri = req.uri().clone();
|
||||
let path = uri.path();
|
||||
if path == "/api/4/node_status" {
|
||||
@@ -299,7 +301,6 @@ async fn http_service_try(req: Request<Body>, node_config: &NodeConfigCached) ->
|
||||
} else if let Some(h) = channelarchiver::BlockStream::should_handle(path) {
|
||||
h.handle(req, &node_config).await
|
||||
} else if path.starts_with("/api/1/requestStatus/") {
|
||||
info!("{}", path);
|
||||
Ok(response(StatusCode::OK).body(Body::from("{}"))?)
|
||||
} else if path.starts_with("/api/1/documentation/") {
|
||||
if req.method() == Method::GET {
|
||||
@@ -424,7 +425,7 @@ async fn binned(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Re
|
||||
let desc = format!("binned-BEG-{}-END-{}", query.range().beg / SEC, query.range().end / SEC);
|
||||
let span1 = span!(Level::INFO, "httpret::binned", desc = &desc.as_str());
|
||||
span1.in_scope(|| {
|
||||
info!("binned STARTING {:?}", query);
|
||||
debug!("binned STARTING {:?}", query);
|
||||
});
|
||||
match head.headers.get(http::header::ACCEPT) {
|
||||
Some(v) if v == APP_OCTET => binned_binary(query, node_config).await,
|
||||
@@ -473,7 +474,7 @@ async fn prebinned(req: Request<Body>, node_config: &NodeConfigCached) -> Result
|
||||
);
|
||||
let span1 = span!(Level::INFO, "httpret::prebinned", desc = &desc.as_str());
|
||||
span1.in_scope(|| {
|
||||
info!("prebinned STARTING");
|
||||
debug!("prebinned STARTING");
|
||||
});
|
||||
let fut = disk::binned::prebinned::pre_binned_bytes_for_http(node_config, &query).instrument(span1);
|
||||
let ret = match fut.await {
|
||||
@@ -491,7 +492,7 @@ async fn prebinned(req: Request<Body>, node_config: &NodeConfigCached) -> Result
|
||||
}
|
||||
|
||||
async fn plain_events(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
info!("httpret plain_events headers: {:?}", req.headers());
|
||||
debug!("httpret plain_events headers: {:?}", req.headers());
|
||||
let accept_def = "";
|
||||
let accept = req
|
||||
.headers()
|
||||
@@ -522,7 +523,7 @@ async fn plain_events(req: Request<Body>, node_config: &NodeConfigCached) -> Res
|
||||
}
|
||||
|
||||
async fn plain_events_binary(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
info!("httpret plain_events_binary req: {:?}", req);
|
||||
debug!("httpret plain_events_binary req: {:?}", req);
|
||||
let url = Url::parse(&format!("dummy:{}", req.uri()))?;
|
||||
let query = PlainEventsBinaryQuery::from_url(&url)?;
|
||||
let op = disk::channelexec::PlainEvents::new(
|
||||
@@ -538,7 +539,7 @@ async fn plain_events_binary(req: Request<Body>, node_config: &NodeConfigCached)
|
||||
}
|
||||
|
||||
async fn plain_events_json(req: Request<Body>, node_config: &NodeConfigCached) -> Result<Response<Body>, Error> {
|
||||
info!("httpret plain_events_json req: {:?}", req);
|
||||
debug!("httpret plain_events_json req: {:?}", req);
|
||||
let (head, _body) = req.into_parts();
|
||||
let query = PlainEventsJsonQuery::from_request_head(&head)?;
|
||||
let op = disk::channelexec::PlainEventsJson::new(
|
||||
|
||||
Reference in New Issue
Block a user