Factored out more crates

This commit is contained in:
Dominik Werder
2024-11-07 21:16:55 +01:00
parent b913fcc8c6
commit 1d9250ef15
66 changed files with 17 additions and 16661 deletions

View File

@@ -1,14 +0,0 @@
[package]
name = "bitshuffle"
version = "0.0.2"
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
edition = "2021"
[lib]
path = "src/bitshuffle.rs"
[dependencies]
libc = "0.2.92"
[build-dependencies]
cc = "1.0.67"

View File

@@ -1,10 +0,0 @@
fn main() {
cc::Build::new()
.file("src/bitshuffle.c")
.file("src/bitshuffle_core.c")
.file("src/iochain.c")
.file("src/lz4.c")
.include("src")
.warnings(false)
.compile("bitshufbundled");
}

View File

@@ -1,164 +0,0 @@
/*
* Bitshuffle - Filter for improving compression of typed binary data.
*
* Author: Kiyoshi Masui <kiyo@physics.ubc.ca>
* Website: http://www.github.com/kiyo-masui/bitshuffle
* Created: 2014
*
* See LICENSE file for details about copyright and rights to use.
*
*/
#include "bitshuffle.h"
#include "bitshuffle_core.h"
#include "bitshuffle_internals.h"
#include "lz4.h"
#include <stdio.h>
#include <string.h>
// Constants.
// Use fast decompression instead of safe decompression for LZ4.
// #define BSHUF_LZ4_DECOMPRESS_FAST
// Macros.
#define CHECK_ERR_FREE_LZ(count, buf) if (count < 0) { \
free(buf); return count - 1000; }
/* Bitshuffle and compress a single block. */
int64_t bshuf_compress_lz4_block(ioc_chain *C_ptr, \
const size_t size, const size_t elem_size) {
int64_t nbytes, count;
void *tmp_buf_bshuf;
void *tmp_buf_lz4;
size_t this_iter;
const void *in;
void *out;
tmp_buf_bshuf = malloc(size * elem_size);
if (tmp_buf_bshuf == NULL) return -1;
tmp_buf_lz4 = malloc(LZ4_compressBound(size * elem_size));
if (tmp_buf_lz4 == NULL){
free(tmp_buf_bshuf);
return -1;
}
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter, (void*) ((char*) in + size * elem_size));
count = bshuf_trans_bit_elem(in, tmp_buf_bshuf, size, elem_size);
if (count < 0) {
free(tmp_buf_lz4);
free(tmp_buf_bshuf);
return count;
}
nbytes = LZ4_compress((const char*) tmp_buf_bshuf, (char*) tmp_buf_lz4, size * elem_size);
free(tmp_buf_bshuf);
CHECK_ERR_FREE_LZ(nbytes, tmp_buf_lz4);
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter, (void *) ((char *) out + nbytes + 4));
bshuf_write_uint32_BE(out, nbytes);
memcpy((char *) out + 4, tmp_buf_lz4, nbytes);
free(tmp_buf_lz4);
return nbytes + 4;
}
/* Decompress and bitunshuffle a single block. */
int64_t bshuf_decompress_lz4_block(ioc_chain *C_ptr,
const size_t size, const size_t elem_size) {
int64_t nbytes, count;
void *out, *tmp_buf;
const void *in;
size_t this_iter;
int32_t nbytes_from_header;
in = ioc_get_in(C_ptr, &this_iter);
nbytes_from_header = bshuf_read_uint32_BE(in);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + nbytes_from_header + 4));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
#ifdef BSHUF_LZ4_DECOMPRESS_FAST
nbytes = LZ4_decompress_fast((const char*) in + 4, (char*) tmp_buf, size * elem_size);
CHECK_ERR_FREE_LZ(nbytes, tmp_buf);
if (nbytes != nbytes_from_header) {
free(tmp_buf);
return -91;
}
#else
nbytes = LZ4_decompress_safe((const char*) in + 4, (char *) tmp_buf, nbytes_from_header,
size * elem_size);
CHECK_ERR_FREE_LZ(nbytes, tmp_buf);
if (nbytes != size * elem_size) {
free(tmp_buf);
return -91;
}
nbytes = nbytes_from_header;
#endif
count = bshuf_untrans_bit_elem(tmp_buf, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
nbytes += 4;
free(tmp_buf);
return nbytes;
}
/* ---- Public functions ----
*
* See header file for description and usage.
*
*/
size_t bshuf_compress_lz4_bound(const size_t size,
const size_t elem_size, size_t block_size) {
size_t bound, leftover;
if (block_size == 0) {
block_size = bshuf_default_block_size(elem_size);
}
if (block_size % BSHUF_BLOCKED_MULT) return -81;
// Note that each block gets a 4 byte header.
// Size of full blocks.
bound = (LZ4_compressBound(block_size * elem_size) + 4) * (size / block_size);
// Size of partial blocks, if any.
leftover = ((size % block_size) / BSHUF_BLOCKED_MULT) * BSHUF_BLOCKED_MULT;
if (leftover) bound += LZ4_compressBound(leftover * elem_size) + 4;
// Size of uncompressed data not fitting into any blocks.
bound += (size % BSHUF_BLOCKED_MULT) * elem_size;
return bound;
}
int64_t bshuf_compress_lz4(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_compress_lz4_block, in, out, size,
elem_size, block_size);
}
int64_t bshuf_decompress_lz4(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_decompress_lz4_block, in, out, size,
elem_size, block_size);
}

View File

@@ -1,123 +0,0 @@
/*
* Bitshuffle - Filter for improving compression of typed binary data.
*
* This file is part of Bitshuffle
* Author: Kiyoshi Masui <kiyo@physics.ubc.ca>
* Website: http://www.github.com/kiyo-masui/bitshuffle
* Created: 2014
*
* See LICENSE file for details about copyright and rights to use.
*
*
* Header File
*
* Worker routines return an int64_t which is the number of bytes processed
* if positive or an error code if negative.
*
* Error codes:
* -1 : Failed to allocate memory.
* -11 : Missing SSE.
* -12 : Missing AVX.
* -80 : Input size not a multiple of 8.
* -81 : block_size not multiple of 8.
* -91 : Decompression error, wrong number of bytes processed.
* -1YYY : Error internal to compression routine with error code -YYY.
*/
#ifndef BITSHUFFLE_H
#define BITSHUFFLE_H
#include <stdlib.h>
#include "bitshuffle_core.h"
#ifdef __cplusplus
extern "C" {
#endif
/* ---- bshuf_compress_lz4_bound ----
*
* Bound on size of data compressed with *bshuf_compress_lz4*.
*
* Parameters
* ----------
* size : number of elements in input
* elem_size : element size of typed data
* block_size : Process in blocks of this many elements. Pass 0 to
* select automatically (recommended).
*
* Returns
* -------
* Bound on compressed data size.
*
*/
size_t bshuf_compress_lz4_bound(const size_t size,
const size_t elem_size, size_t block_size);
/* ---- bshuf_compress_lz4 ----
*
* Bitshuffled and compress the data using LZ4.
*
* Transpose within elements, in blocks of data of *block_size* elements then
* compress the blocks using LZ4. In the output buffer, each block is prefixed
* by a 4 byte integer giving the compressed size of that block.
*
* Output buffer must be large enough to hold the compressed data. This could
* be in principle substantially larger than the input buffer. Use the routine
* *bshuf_compress_lz4_bound* to get an upper limit.
*
* Parameters
* ----------
* in : input buffer, must be of size * elem_size bytes
* out : output buffer, must be large enough to hold data.
* size : number of elements in input
* elem_size : element size of typed data
* block_size : Process in blocks of this many elements. Pass 0 to
* select automatically (recommended).
*
* Returns
* -------
* number of bytes used in output buffer, negative error-code if failed.
*
*/
int64_t bshuf_compress_lz4(const void* in, void* out, const size_t size, const size_t
elem_size, size_t block_size);
/* ---- bshuf_decompress_lz4 ----
*
* Undo compression and bitshuffling.
*
* Decompress data then un-bitshuffle it in blocks of *block_size* elements.
*
* To properly unshuffle bitshuffled data, *size*, *elem_size* and *block_size*
* must patch the parameters used to compress the data.
*
* NOT TO BE USED WITH UNTRUSTED DATA: This routine uses the function
* LZ4_decompress_fast from LZ4, which does not protect against maliciously
* formed datasets. By modifying the compressed data, this function could be
* coerced into leaving the boundaries of the input buffer.
*
* Parameters
* ----------
* in : input buffer
* out : output buffer, must be of size * elem_size bytes
* size : number of elements in input
* elem_size : element size of typed data
* block_size : Process in blocks of this many elements. Pass 0 to
* select automatically (recommended).
*
* Returns
* -------
* number of bytes consumed in *input* buffer, negative error-code if failed.
*
*/
int64_t bshuf_decompress_lz4(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // BITSHUFFLE_H

View File

@@ -1,141 +0,0 @@
use libc::{c_int, size_t};
extern "C" {
pub fn bshuf_compress_lz4(
inp: *const u8,
out: *const u8,
size: size_t,
elem_size: size_t,
block_size: size_t,
) -> i64;
pub fn bshuf_decompress_lz4(
inp: *const u8,
out: *const u8,
size: size_t,
elem_size: size_t,
block_size: size_t,
) -> i64;
pub fn LZ4_decompress_safe(
source: *const u8,
dest: *mut u8,
compressedSize: c_int,
maxDecompressedSize: c_int,
) -> c_int;
}
pub fn bitshuffle_compress(
inp: &[u8],
out: &mut [u8],
size: usize,
elem_size: usize,
block_size: usize,
) -> Result<usize, isize> {
unsafe {
let n = bshuf_compress_lz4(inp.as_ptr(), out.as_mut_ptr(), size, elem_size, block_size);
if n >= 0 {
Ok(n as usize)
} else {
Err(n as isize)
}
}
}
pub fn bitshuffle_decompress(
inp: &[u8],
out: &mut [u8],
size: usize,
elem_size: usize,
block_size: usize,
) -> Result<usize, isize> {
unsafe {
let n = bshuf_decompress_lz4(inp.as_ptr(), out.as_mut_ptr(), size, elem_size, block_size);
if n >= 0 {
Ok(n as usize)
} else {
Err(n as isize)
}
}
}
pub fn lz4_decompress(inp: &[u8], out: &mut [u8]) -> Result<usize, isize> {
let max_out = out.len() as _;
let ec = unsafe { LZ4_decompress_safe(inp.as_ptr(), out.as_mut_ptr(), inp.len() as _, max_out) };
if ec < 0 {
Err(ec as _)
} else {
Ok(ec as _)
}
}
#[cfg(test)]
mod _simd {
use std::arch::x86_64::_mm_loadu_si128;
use std::arch::x86_64::_mm_shuffle_epi32;
use std::ptr::null;
#[test]
fn simd_test() {
if is_x86_feature_detected!("sse") {
eprintln!("have sse 1");
}
if is_x86_feature_detected!("sse2") {
eprintln!("have sse 2");
unsafe { simd_sse_2() };
}
if is_x86_feature_detected!("sse3") {
eprintln!("have sse 3");
unsafe { simd_sse_3() };
}
if is_x86_feature_detected!("sse4.1") {
eprintln!("have sse 4.1");
unsafe { simd_sse_4_1() };
}
if is_x86_feature_detected!("sse4.2") {
eprintln!("have sse 4.2");
unsafe { simd_sse_4_2() };
}
if is_x86_feature_detected!("sse4a") {
eprintln!("have sse 4 a");
}
if is_x86_feature_detected!("avx") {
eprintln!("have avx 1");
}
if is_x86_feature_detected!("avx2") {
eprintln!("have avx 2");
}
}
#[target_feature(enable = "sse2")]
unsafe fn simd_sse_2() {
// _mm_loadu_si128(null());
eprintln!("sse 2 done");
}
#[target_feature(enable = "sse3")]
unsafe fn simd_sse_3() {
// core::arch::asm!();
// core::arch::global_asm!();
let a = core::arch::x86_64::_mm256_setzero_si256();
let b = core::arch::x86_64::_mm256_set_epi32(7, 3, 9, 11, 17, 13, 19, 21);
let x = core::arch::x86_64::_mm256_xor_si256(a, b);
core::arch::x86_64::_mm256_loadu_si256(&x as *const _);
// core::arch::x86_64::vpl!();
// core::arch::x86_64::vps!();
// let c = core::arch::x86_64::_mm256_shuffle_i32x4(a, b);
eprintln!("sse 3 done");
}
#[target_feature(enable = "sse4.1")]
unsafe fn simd_sse_4_1() {
// _mm_loadu_si128(null());
eprintln!("sse 4.1 done");
}
#[target_feature(enable = "sse4.2")]
unsafe fn simd_sse_4_2() {
// _mm_loadu_si128(null());
eprintln!("sse 4.2 done");
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,159 +0,0 @@
/*
* Bitshuffle - Filter for improving compression of typed binary data.
*
* This file is part of Bitshuffle
* Author: Kiyoshi Masui <kiyo@physics.ubc.ca>
* Website: http://www.github.com/kiyo-masui/bitshuffle
* Created: 2014
*
* See LICENSE file for details about copyright and rights to use.
*
*
* Header File
*
* Worker routines return an int64_t which is the number of bytes processed
* if positive or an error code if negative.
*
* Error codes:
* -1 : Failed to allocate memory.
* -11 : Missing SSE.
* -12 : Missing AVX.
* -13 : Missing Arm Neon.
* -80 : Input size not a multiple of 8.
* -81 : block_size not multiple of 8.
* -91 : Decompression error, wrong number of bytes processed.
* -1YYY : Error internal to compression routine with error code -YYY.
*/
#ifndef BITSHUFFLE_CORE_H
#define BITSHUFFLE_CORE_H
#include <stdint.h>
#if 0
// We assume GNU g++ defining `__cplusplus` has stdint.h
#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) || defined(__cplusplus)
#else
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef signed int int32_t;
typedef unsigned long long uint64_t;
typedef long long int64_t;
#endif
#endif
#include <stdlib.h>
// These are usually set in the setup.py.
#ifndef BSHUF_VERSION_MAJOR
#define BSHUF_VERSION_MAJOR 0
#define BSHUF_VERSION_MINOR 3
#define BSHUF_VERSION_POINT 5
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* --- bshuf_using_SSE2 ----
*
* Whether routines where compiled with the SSE2 instruction set.
*
* Returns
* -------
* 1 if using SSE2, 0 otherwise.
*
*/
int bshuf_using_SSE2(void);
/* ---- bshuf_using_AVX2 ----
*
* Whether routines where compiled with the AVX2 instruction set.
*
* Returns
* -------
* 1 if using AVX2, 0 otherwise.
*
*/
int bshuf_using_AVX2(void);
/* ---- bshuf_default_block_size ----
*
* The default block size as function of element size.
*
* This is the block size used by the blocked routines (any routine
* taking a *block_size* argument) when the block_size is not provided
* (zero is passed).
*
* The results of this routine are guaranteed to be stable such that
* shuffled/compressed data can always be decompressed.
*
* Parameters
* ----------
* elem_size : element size of data to be shuffled/compressed.
*
*/
size_t bshuf_default_block_size(const size_t elem_size);
/* ---- bshuf_bitshuffle ----
*
* Bitshuffle the data.
*
* Transpose the bits within elements, in blocks of *block_size*
* elements.
*
* Parameters
* ----------
* in : input buffer, must be of size * elem_size bytes
* out : output buffer, must be of size * elem_size bytes
* size : number of elements in input
* elem_size : element size of typed data
* block_size : Do transpose in blocks of this many elements. Pass 0 to
* select automatically (recommended).
*
* Returns
* -------
* number of bytes processed, negative error-code if failed.
*
*/
int64_t bshuf_bitshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size);
/* ---- bshuf_bitunshuffle ----
*
* Unshuffle bitshuffled data.
*
* Untranspose the bits within elements, in blocks of *block_size*
* elements.
*
* To properly unshuffle bitshuffled data, *size*, *elem_size* and *block_size*
* must match the parameters used to shuffle the data.
*
* Parameters
* ----------
* in : input buffer, must be of size * elem_size bytes
* out : output buffer, must be of size * elem_size bytes
* size : number of elements in input
* elem_size : element size of typed data
* block_size : Do transpose in blocks of this many elements. Pass 0 to
* select automatically (recommended).
*
* Returns
* -------
* number of bytes processed, negative error-code if failed.
*
*/
int64_t bshuf_bitunshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // BITSHUFFLE_CORE_H

View File

@@ -1,77 +0,0 @@
/*
* Bitshuffle - Filter for improving compression of typed binary data.
*
* This file is part of Bitshuffle
* Author: Kiyoshi Masui <kiyo@physics.ubc.ca>
* Website: http://www.github.com/kiyo-masui/bitshuffle
* Created: 2014
*
* See LICENSE file for details about copyright and rights to use.
*/
#ifndef BITSHUFFLE_INTERNALS_H
#define BITSHUFFLE_INTERNALS_H
#include <stdint.h>
#if 0
// We assume GNU g++ defining `__cplusplus` has stdint.h
#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) || defined(__cplusplus)
#else
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef signed int int32_t;
typedef unsigned long long uint64_t;
typedef long long int64_t;
#endif
#endif
#include <stdlib.h>
#include "iochain.h"
// Constants.
#ifndef BSHUF_MIN_RECOMMEND_BLOCK
#define BSHUF_MIN_RECOMMEND_BLOCK 128
#define BSHUF_BLOCKED_MULT 8 // Block sizes must be multiple of this.
#define BSHUF_TARGET_BLOCK_SIZE_B 8192
#endif
// Macros.
#define CHECK_ERR_FREE(count, buf) if (count < 0) { free(buf); return count; }
#ifdef __cplusplus
extern "C" {
#endif
/* ---- Utility functions for internal use only ---- */
int64_t bshuf_trans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size);
/* Read a 32 bit unsigned integer from a buffer big endian order. */
uint32_t bshuf_read_uint32_BE(const void* buf);
/* Write a 32 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint32_BE(void* buf, uint32_t num);
int64_t bshuf_untrans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size);
/* Function definition for worker functions that process a single block. */
typedef int64_t (*bshufBlockFunDef)(ioc_chain* C_ptr,
const size_t size, const size_t elem_size);
/* Wrap a function for processing a single block to process an entire buffer in
* parallel. */
int64_t bshuf_blocked_wrap_fun(bshufBlockFunDef fun, const void* in, void* out,
const size_t size, const size_t elem_size, size_t block_size);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // BITSHUFFLE_INTERNALS_H

View File

@@ -1,89 +0,0 @@
/*
* IOchain - Distribute a chain of dependant IO events amoung threads.
*
* This file is part of Bitshuffle
* Author: Kiyoshi Masui <kiyo@physics.ubc.ca>
* Website: http://www.github.com/kiyo-masui/bitshuffle
* Created: 2014
*
* See LICENSE file for details about copyright and rights to use.
*
*/
#include <stdlib.h>
#include "iochain.h"
void ioc_init(ioc_chain *C, const void *in_ptr_0, void *out_ptr_0) {
#ifdef _OPENMP
omp_init_lock(&C->next_lock);
for (size_t ii = 0; ii < IOC_SIZE; ii ++) {
omp_init_lock(&(C->in_pl[ii].lock));
omp_init_lock(&(C->out_pl[ii].lock));
}
#endif
C->next = 0;
C->in_pl[0].ptr = in_ptr_0;
C->out_pl[0].ptr = out_ptr_0;
}
void ioc_destroy(ioc_chain *C) {
#ifdef _OPENMP
omp_destroy_lock(&C->next_lock);
for (size_t ii = 0; ii < IOC_SIZE; ii ++) {
omp_destroy_lock(&(C->in_pl[ii].lock));
omp_destroy_lock(&(C->out_pl[ii].lock));
}
#endif
}
const void * ioc_get_in(ioc_chain *C, size_t *this_iter) {
#ifdef _OPENMP
omp_set_lock(&C->next_lock);
#pragma omp flush
#endif
*this_iter = C->next;
C->next ++;
#ifdef _OPENMP
omp_set_lock(&(C->in_pl[*this_iter % IOC_SIZE].lock));
omp_set_lock(&(C->in_pl[(*this_iter + 1) % IOC_SIZE].lock));
omp_set_lock(&(C->out_pl[(*this_iter + 1) % IOC_SIZE].lock));
omp_unset_lock(&C->next_lock);
#endif
return C->in_pl[*this_iter % IOC_SIZE].ptr;
}
void ioc_set_next_in(ioc_chain *C, size_t* this_iter, void* in_ptr) {
C->in_pl[(*this_iter + 1) % IOC_SIZE].ptr = in_ptr;
#ifdef _OPENMP
omp_unset_lock(&(C->in_pl[(*this_iter + 1) % IOC_SIZE].lock));
#endif
}
void * ioc_get_out(ioc_chain *C, size_t *this_iter) {
#ifdef _OPENMP
omp_set_lock(&(C->out_pl[(*this_iter) % IOC_SIZE].lock));
#pragma omp flush
#endif
void *out_ptr = C->out_pl[*this_iter % IOC_SIZE].ptr;
#ifdef _OPENMP
omp_unset_lock(&(C->out_pl[(*this_iter) % IOC_SIZE].lock));
#endif
return out_ptr;
}
void ioc_set_next_out(ioc_chain *C, size_t *this_iter, void* out_ptr) {
C->out_pl[(*this_iter + 1) % IOC_SIZE].ptr = out_ptr;
#ifdef _OPENMP
omp_unset_lock(&(C->out_pl[(*this_iter + 1) % IOC_SIZE].lock));
// *in_pl[this_iter]* lock released at the end of the iteration to avoid being
// overtaken by previous threads and having *out_pl[this_iter]* corrupted.
// Especially worried about thread 0, iteration 0.
omp_unset_lock(&(C->in_pl[(*this_iter) % IOC_SIZE].lock));
#endif
}

View File

@@ -1,93 +0,0 @@
/*
* IOchain - Distribute a chain of dependant IO events amoung threads.
*
* This file is part of Bitshuffle
* Author: Kiyoshi Masui <kiyo@physics.ubc.ca>
* Website: http://www.github.com/kiyo-masui/bitshuffle
* Created: 2014
*
* See LICENSE file for details about copyright and rights to use.
*
*
* Header File
*
* Similar in concept to a queue. Each task includes reading an input
* and writing output, but the location of the input/output (the pointers)
* depend on the previous item in the chain.
*
* This is designed for parallelizing blocked compression/decompression IO,
* where the destination of a compressed block depends on the compressed size
* of all previous blocks.
*
* Implemented with OpenMP locks.
*
*
* Usage
* -----
* - Call `ioc_init` in serial block.
* - Each thread should create a local variable *size_t this_iter* and
* pass its address to all function calls. Its value will be set
* inside the functions and is used to identify the thread.
* - Each thread must call each of the `ioc_get*` and `ioc_set*` methods
* exactly once per iteration, starting with `ioc_get_in` and ending
* with `ioc_set_next_out`.
* - The order (`ioc_get_in`, `ioc_set_next_in`, *work*, `ioc_get_out`,
* `ioc_set_next_out`, *work*) is most efficient.
* - Have each thread call `ioc_end_pop`.
* - `ioc_get_in` is blocked until the previous entry's
* `ioc_set_next_in` is called.
* - `ioc_get_out` is blocked until the previous entry's
* `ioc_set_next_out` is called.
* - There are no blocks on the very first iteration.
* - Call `ioc_destroy` in serial block.
* - Safe for num_threads >= IOC_SIZE (but less efficient).
*
*/
#ifndef IOCHAIN_H
#define IOCHAIN_H
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define IOC_SIZE 33
typedef struct ioc_ptr_and_lock {
#ifdef _OPENMP
omp_lock_t lock;
#endif
void *ptr;
} ptr_and_lock;
typedef struct ioc_const_ptr_and_lock {
#ifdef _OPENMP
omp_lock_t lock;
#endif
const void *ptr;
} const_ptr_and_lock;
typedef struct ioc_chain {
#ifdef _OPENMP
omp_lock_t next_lock;
#endif
size_t next;
const_ptr_and_lock in_pl[IOC_SIZE];
ptr_and_lock out_pl[IOC_SIZE];
} ioc_chain;
void ioc_init(ioc_chain *C, const void *in_ptr_0, void *out_ptr_0);
void ioc_destroy(ioc_chain *C);
const void * ioc_get_in(ioc_chain *C, size_t *this_iter);
void ioc_set_next_in(ioc_chain *C, size_t* this_iter, void* in_ptr);
void * ioc_get_out(ioc_chain *C, size_t *this_iter);
void ioc_set_next_out(ioc_chain *C, size_t *this_iter, void* out_ptr);
#endif // IOCHAIN_H

File diff suppressed because it is too large Load Diff

View File

@@ -1,360 +0,0 @@
/*
LZ4 - Fast LZ compression algorithm
Header File
Copyright (C) 2011-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- LZ4 source repository : https://github.com/Cyan4973/lz4
- LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
*/
#pragma once
#if defined (__cplusplus)
extern "C" {
#endif
/*
* lz4.h provides block compression functions, and gives full buffer control to programmer.
* If you need to generate inter-operable compressed data (respecting LZ4 frame specification),
* and can let the library handle its own memory, please use lz4frame.h instead.
*/
/**************************************
* Version
**************************************/
#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
#define LZ4_VERSION_MINOR 7 /* for new (non-breaking) interface capabilities */
#define LZ4_VERSION_RELEASE 1 /* for tweaks, bug-fixes, or development */
#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
int LZ4_versionNumber (void);
/**************************************
* Tuning parameter
**************************************/
/*
* LZ4_MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
*/
#define LZ4_MEMORY_USAGE 14
/**************************************
* Simple Functions
**************************************/
int LZ4_compress_default(const char* source, char* dest, int sourceSize, int maxDestSize);
int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize);
/*
LZ4_compress_default() :
Compresses 'sourceSize' bytes from buffer 'source'
into already allocated 'dest' buffer of size 'maxDestSize'.
Compression is guaranteed to succeed if 'maxDestSize' >= LZ4_compressBound(sourceSize).
It also runs faster, so it's a recommended setting.
If the function cannot compress 'source' into a more limited 'dest' budget,
compression stops *immediately*, and the function result is zero.
As a consequence, 'dest' content is not valid.
This function never writes outside 'dest' buffer, nor read outside 'source' buffer.
sourceSize : Max supported value is LZ4_MAX_INPUT_VALUE
maxDestSize : full or partial size of buffer 'dest' (which must be already allocated)
return : the number of bytes written into buffer 'dest' (necessarily <= maxOutputSize)
or 0 if compression fails
LZ4_decompress_safe() :
compressedSize : is the precise full size of the compressed block.
maxDecompressedSize : is the size of destination buffer, which must be already allocated.
return : the number of bytes decompressed into destination buffer (necessarily <= maxDecompressedSize)
If destination buffer is not large enough, decoding will stop and output an error code (<0).
If the source stream is detected malformed, the function will stop decoding and return a negative result.
This function is protected against buffer overflow exploits, including malicious data packets.
It never writes outside output buffer, nor reads outside input buffer.
*/
/**************************************
* Advanced Functions
**************************************/
#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
/*
LZ4_compressBound() :
Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
This function is primarily useful for memory allocation purposes (destination buffer size).
Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
Note that LZ4_compress_default() compress faster when dest buffer size is >= LZ4_compressBound(srcSize)
inputSize : max supported value is LZ4_MAX_INPUT_SIZE
return : maximum output size in a "worst case" scenario
or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE)
*/
int LZ4_compressBound(int inputSize);
/*
LZ4_compress_fast() :
Same as LZ4_compress_default(), but allows to select an "acceleration" factor.
The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
An acceleration value of "1" is the same as regular LZ4_compress_default()
Values <= 0 will be replaced by ACCELERATION_DEFAULT (see lz4.c), which is 1.
*/
int LZ4_compress_fast (const char* source, char* dest, int sourceSize, int maxDestSize, int acceleration);
/*
LZ4_compress_fast_extState() :
Same compression function, just using an externally allocated memory space to store compression state.
Use LZ4_sizeofState() to know how much memory must be allocated,
and allocate it on 8-bytes boundaries (using malloc() typically).
Then, provide it as 'void* state' to compression function.
*/
int LZ4_sizeofState(void);
int LZ4_compress_fast_extState (void* state, const char* source, char* dest, int inputSize, int maxDestSize, int acceleration);
/*
LZ4_compress_destSize() :
Reverse the logic, by compressing as much data as possible from 'source' buffer
into already allocated buffer 'dest' of size 'targetDestSize'.
This function either compresses the entire 'source' content into 'dest' if it's large enough,
or fill 'dest' buffer completely with as much data as possible from 'source'.
*sourceSizePtr : will be modified to indicate how many bytes where read from 'source' to fill 'dest'.
New value is necessarily <= old value.
return : Nb bytes written into 'dest' (necessarily <= targetDestSize)
or 0 if compression fails
*/
int LZ4_compress_destSize (const char* source, char* dest, int* sourceSizePtr, int targetDestSize);
/*
LZ4_decompress_fast() :
originalSize : is the original and therefore uncompressed size
return : the number of bytes read from the source buffer (in other words, the compressed size)
If the source stream is detected malformed, the function will stop decoding and return a negative result.
Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes.
note : This function fully respect memory boundaries for properly formed compressed data.
It is a bit faster than LZ4_decompress_safe().
However, it does not provide any protection against intentionally modified data stream (malicious input).
Use this function in trusted environment only (data to decode comes from a trusted source).
*/
int LZ4_decompress_fast (const char* source, char* dest, int originalSize);
/*
LZ4_decompress_safe_partial() :
This function decompress a compressed block of size 'compressedSize' at position 'source'
into destination buffer 'dest' of size 'maxDecompressedSize'.
The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached,
reducing decompression time.
return : the number of bytes decoded in the destination buffer (necessarily <= maxDecompressedSize)
Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller.
Always control how many bytes were decoded.
If the source stream is detected malformed, the function will stop decoding and return a negative result.
This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets
*/
int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize);
/***********************************************
* Streaming Compression Functions
***********************************************/
#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4)
#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(long long))
/*
* LZ4_stream_t
* information structure to track an LZ4 stream.
* important : init this structure content before first use !
* note : only allocated directly the structure if you are statically linking LZ4
* If you are using liblz4 as a DLL, please use below construction methods instead.
*/
typedef struct { long long table[LZ4_STREAMSIZE_U64]; } LZ4_stream_t;
/*
* LZ4_resetStream
* Use this function to init an allocated LZ4_stream_t structure
*/
void LZ4_resetStream (LZ4_stream_t* streamPtr);
/*
* LZ4_createStream will allocate and initialize an LZ4_stream_t structure
* LZ4_freeStream releases its memory.
* In the context of a DLL (liblz4), please use these methods rather than the static struct.
* They are more future proof, in case of a change of LZ4_stream_t size.
*/
LZ4_stream_t* LZ4_createStream(void);
int LZ4_freeStream (LZ4_stream_t* streamPtr);
/*
* LZ4_loadDict
* Use this function to load a static dictionary into LZ4_stream.
* Any previous data will be forgotten, only 'dictionary' will remain in memory.
* Loading a size of 0 is allowed.
* Return : dictionary size, in bytes (necessarily <= 64 KB)
*/
int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
/*
* LZ4_compress_fast_continue
* Compress buffer content 'src', using data from previously compressed blocks as dictionary to improve compression ratio.
* Important : Previous data blocks are assumed to still be present and unmodified !
* 'dst' buffer must be already allocated.
* If maxDstSize >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
* If not, and if compressed data cannot fit into 'dst' buffer size, compression stops, and function returns a zero.
*/
int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int maxDstSize, int acceleration);
/*
* LZ4_saveDict
* If previously compressed data block is not guaranteed to remain available at its memory location
* save it into a safer place (char* safeBuffer)
* Note : you don't need to call LZ4_loadDict() afterwards,
* dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue()
* Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error
*/
int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int dictSize);
/************************************************
* Streaming Decompression Functions
************************************************/
#define LZ4_STREAMDECODESIZE_U64 4
#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
typedef struct { unsigned long long table[LZ4_STREAMDECODESIZE_U64]; } LZ4_streamDecode_t;
/*
* LZ4_streamDecode_t
* information structure to track an LZ4 stream.
* init this structure content using LZ4_setStreamDecode or memset() before first use !
*
* In the context of a DLL (liblz4) please prefer usage of construction methods below.
* They are more future proof, in case of a change of LZ4_streamDecode_t size in the future.
* LZ4_createStreamDecode will allocate and initialize an LZ4_streamDecode_t structure
* LZ4_freeStreamDecode releases its memory.
*/
LZ4_streamDecode_t* LZ4_createStreamDecode(void);
int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
/*
* LZ4_setStreamDecode
* Use this function to instruct where to find the dictionary.
* Setting a size of 0 is allowed (same effect as reset).
* Return : 1 if OK, 0 if error
*/
int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
/*
*_continue() :
These decoding functions allow decompression of multiple blocks in "streaming" mode.
Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB)
In the case of a ring buffers, decoding buffer must be either :
- Exactly same size as encoding buffer, with same update rule (block boundaries at same positions)
In which case, the decoding & encoding ring buffer can have any size, including very small ones ( < 64 KB).
- Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
maxBlockSize is implementation dependent. It's the maximum size you intend to compress into a single block.
In which case, encoding and decoding buffers do not need to be synchronized,
and encoding ring buffer can have any size, including small ones ( < 64 KB).
- _At least_ 64 KB + 8 bytes + maxBlockSize.
In which case, encoding and decoding buffers do not need to be synchronized,
and encoding ring buffer can have any size, including larger than decoding buffer.
Whenever these conditions are not possible, save the last 64KB of decoded data into a safe buffer,
and indicate where it is saved using LZ4_setStreamDecode()
*/
int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxDecompressedSize);
int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize);
/*
Advanced decoding functions :
*_usingDict() :
These decoding functions work the same as
a combination of LZ4_setStreamDecode() followed by LZ4_decompress_x_continue()
They are stand-alone. They don't need nor update an LZ4_streamDecode_t structure.
*/
int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize);
int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize);
/**************************************
* Obsolete Functions
**************************************/
/* Deprecate Warnings */
/* Should these warnings messages be a problem,
it is generally possible to disable them,
with -Wno-deprecated-declarations for gcc
or _CRT_SECURE_NO_WARNINGS in Visual for example.
You can also define LZ4_DEPRECATE_WARNING_DEFBLOCK. */
#ifndef LZ4_DEPRECATE_WARNING_DEFBLOCK
# define LZ4_DEPRECATE_WARNING_DEFBLOCK
# define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
# if (LZ4_GCC_VERSION >= 405) || defined(__clang__)
# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
# elif (LZ4_GCC_VERSION >= 301)
# define LZ4_DEPRECATED(message) __attribute__((deprecated))
# elif defined(_MSC_VER)
# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
# else
# pragma message("WARNING: You need to implement LZ4_DEPRECATED for this compiler")
# define LZ4_DEPRECATED(message)
# endif
#endif /* LZ4_DEPRECATE_WARNING_DEFBLOCK */
/* Obsolete compression functions */
/* These functions are planned to start generate warnings by r131 approximately */
int LZ4_compress (const char* source, char* dest, int sourceSize);
int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize);
int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
/* Obsolete decompression functions */
/* These function names are completely deprecated and must no longer be used.
They are only provided here for compatibility with older programs.
- LZ4_uncompress is the same as LZ4_decompress_fast
- LZ4_uncompress_unknownOutputSize is the same as LZ4_decompress_safe
These function prototypes are now disabled; uncomment them only if you really need them.
It is highly recommended to stop using these prototypes and migrate to maintained ones */
/* int LZ4_uncompress (const char* source, char* dest, int outputSize); */
/* int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); */
/* Obsolete streaming functions; use new streaming interface whenever possible */
LZ4_DEPRECATED("use LZ4_createStream() instead") void* LZ4_create (char* inputBuffer);
LZ4_DEPRECATED("use LZ4_createStream() instead") int LZ4_sizeofStreamState(void);
LZ4_DEPRECATED("use LZ4_resetStream() instead") int LZ4_resetStreamState(void* state, char* inputBuffer);
LZ4_DEPRECATED("use LZ4_saveDict() instead") char* LZ4_slideInputBuffer (void* state);
/* Obsolete streaming decoding functions */
LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
#if defined (__cplusplus)
}
#endif

View File

@@ -21,4 +21,3 @@ daqbuf-err = { path = "../../../daqbuf-err" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
taskrun = { path = "../taskrun" }
items_0 = { path = "../../../daqbuf-items-0", package = "daqbuf-items-0" }
items_proc = { path = "../items_proc" }

View File

@@ -29,6 +29,6 @@ httpret = { path = "../httpret" }
httpclient = { path = "../httpclient" }
disk = { path = "../disk" }
items_0 = { path = "../../../daqbuf-items-0", package = "daqbuf-items-0" }
items_2 = { path = "../items_2" }
items_2 = { path = "../../../daqbuf-items-2", package = "daqbuf-items-2" }
streams = { path = "../streams" }
parse = { path = "../parse" }
parse = { path = "../../../daqbuf-parse", package = "daqbuf-parse" }

View File

@@ -22,5 +22,5 @@ chrono = "0.4.38"
regex = "1.10.4"
daqbuf-err = { path = "../../../daqbuf-err" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
parse = { path = "../parse" }
parse = { path = "../../../daqbuf-parse", package = "daqbuf-parse" }
taskrun = { path = "../taskrun" }

View File

@@ -35,10 +35,10 @@ taskrun = { path = "../taskrun" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
query = { path = "../query" }
dbconn = { path = "../dbconn" }
parse = { path = "../parse" }
parse = { path = "../../../daqbuf-parse", package = "daqbuf-parse" }
items_0 = { path = "../../../daqbuf-items-0", package = "daqbuf-items-0" }
items_2 = { path = "../items_2" }
items_2 = { path = "../../../daqbuf-items-2", package = "daqbuf-items-2" }
streams = { path = "../streams" }
streamio = { path = "../streamio" }
httpclient = { path = "../httpclient" }
bitshuffle = { path = "../bitshuffle" }
bitshuffle = { path = "../../../daqbuf-bitshuffle", package = "daqbuf-bitshuffle" }

View File

@@ -16,6 +16,6 @@ bytes = "1.7"
daqbuf-err = { path = "../../../daqbuf-err" }
taskrun = { path = "../taskrun" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
parse = { path = "../parse" }
parse = { path = "../../../daqbuf-parse", package = "daqbuf-parse" }
disk = { path = "../disk" }
streams = { path = "../streams" }

View File

@@ -21,7 +21,7 @@ bytes = "1.5.0"
async-channel = "1.9.0"
daqbuf-err = { path = "../../../daqbuf-err" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
parse = { path = "../parse" }
parse = { path = "../../../daqbuf-parse", package = "daqbuf-parse" }
streams = { path = "../streams" }
thiserror = "=0.0.1"

View File

@@ -34,8 +34,8 @@ query = { path = "../query" }
dbconn = { path = "../dbconn" }
disk = { path = "../disk" }
items_0 = { path = "../../../daqbuf-items-0", package = "daqbuf-items-0" }
items_2 = { path = "../items_2" }
parse = { path = "../parse" }
items_2 = { path = "../../../daqbuf-items-2", package = "daqbuf-items-2" }
parse = { path = "../../../daqbuf-parse", package = "daqbuf-parse" }
streams = { path = "../streams" }
streamio = { path = "../streamio" }
nodenet = { path = "../nodenet" }

View File

@@ -1,37 +0,0 @@
[package]
name = "items_2"
version = "0.0.2"
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
edition = "2021"
[lib]
path = "src/items_2.rs"
doctest = false
[dependencies]
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
ciborium = "0.2.1"
rmp-serde = "1.1.1"
postcard = { version = "1.0.0", features = ["use-std"] }
erased-serde = "0.4"
typetag = "0.2.14"
bytes = "1.2.1"
num-traits = "0.2.15"
chrono = { version = "0.4.19", features = ["serde"] }
crc32fast = "1.3.2"
futures-util = "0.3.24"
humantime-serde = "1.1.1"
thiserror = "0.0.1"
daqbuf-err = { path = "../../../daqbuf-err" }
items_0 = { path = "../../../daqbuf-items-0", package = "daqbuf-items-0" }
items_proc = { path = "../items_proc" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
parse = { path = "../parse" }
bitshuffle = { path = "../bitshuffle" }
[patch.crates-io]
thiserror = { git = "https://github.com/dominikwerder/thiserror.git", branch = "cstm" }
[features]
heavy = []

View File

@@ -1,41 +0,0 @@
use items_0::Empty;
use items_0::Extendable;
use items_0::WithLen;
use serde::Deserialize;
use serde::Serialize;
use std::collections::VecDeque;
#[derive(Debug, Serialize, Deserialize)]
pub struct AccountingEvents {
pub tss: VecDeque<u64>,
pub count: VecDeque<u64>,
pub bytes: VecDeque<u64>,
}
impl Empty for AccountingEvents {
fn empty() -> Self {
Self {
tss: VecDeque::new(),
count: VecDeque::new(),
bytes: VecDeque::new(),
}
}
}
impl WithLen for AccountingEvents {
fn len(&self) -> usize {
self.tss.len()
}
}
impl Extendable for AccountingEvents {
fn extend_from(&mut self, src: &mut Self) {
use core::mem::replace;
let v = replace(&mut src.tss, VecDeque::new());
self.tss.extend(v.into_iter());
let v = replace(&mut src.count, VecDeque::new());
self.count.extend(v.into_iter());
let v = replace(&mut src.bytes, VecDeque::new());
self.bytes.extend(v.into_iter());
}
}

View File

@@ -1,11 +0,0 @@
pub mod aggregator;
pub mod binnedvaluetype;
pub mod container_bins;
pub mod container_events;
pub mod timeweight;
pub mod valuetype;
#[cfg(test)]
mod test;
use super::binning as ___;

View File

@@ -1,211 +0,0 @@
use super::container_events::EventValueType;
use core::fmt;
use netpod::log::*;
use netpod::DtNano;
use netpod::EnumVariant;
use serde::Deserialize;
use serde::Serialize;
#[allow(unused)]
macro_rules! trace_event { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_result { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
pub trait AggTimeWeightOutputAvg: fmt::Debug + Clone + Send + Serialize + for<'a> Deserialize<'a> {}
impl AggTimeWeightOutputAvg for u8 {}
impl AggTimeWeightOutputAvg for u16 {}
impl AggTimeWeightOutputAvg for u32 {}
impl AggTimeWeightOutputAvg for u64 {}
impl AggTimeWeightOutputAvg for i8 {}
impl AggTimeWeightOutputAvg for i16 {}
impl AggTimeWeightOutputAvg for i32 {}
impl AggTimeWeightOutputAvg for i64 {}
impl AggTimeWeightOutputAvg for f32 {}
impl AggTimeWeightOutputAvg for f64 {}
impl AggTimeWeightOutputAvg for EnumVariant {}
impl AggTimeWeightOutputAvg for String {}
impl AggTimeWeightOutputAvg for bool {}
pub trait AggregatorTimeWeight<EVT>: fmt::Debug + Send
where
EVT: EventValueType,
{
fn new() -> Self;
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: EVT);
fn reset_for_new_bin(&mut self);
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> EVT::AggTimeWeightOutputAvg;
}
#[derive(Debug)]
pub struct AggregatorNumeric {
sum: f64,
}
trait AggWithF64: EventValueType<AggTimeWeightOutputAvg = f64> {
fn as_f64(&self) -> f64;
}
impl AggWithF64 for f64 {
fn as_f64(&self) -> f64 {
*self
}
}
impl<EVT> AggregatorTimeWeight<EVT> for AggregatorNumeric
where
EVT: AggWithF64,
{
fn new() -> Self {
Self { sum: 0. }
}
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: EVT) {
let f = dt.ns() as f64 / bl.ns() as f64;
trace_event!("INGEST {} {:?}", f, val);
self.sum += f * val.as_f64();
}
fn reset_for_new_bin(&mut self) {
self.sum = 0.;
}
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> EVT::AggTimeWeightOutputAvg {
let sum = self.sum.clone();
trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction);
self.sum = 0.;
sum / filled_width_fraction as f64
}
}
impl AggregatorTimeWeight<f32> for AggregatorNumeric {
fn new() -> Self {
Self { sum: 0. }
}
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: f32) {
let f = dt.ns() as f64 / bl.ns() as f64;
trace_event!("INGEST {} {}", f, val);
self.sum += f * val as f64;
}
fn reset_for_new_bin(&mut self) {
self.sum = 0.;
}
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f32 {
let sum = self.sum.clone() as f32;
trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction);
self.sum = 0.;
sum / filled_width_fraction
}
}
macro_rules! impl_agg_tw_for_agg_num {
($evt:ty) => {
impl AggregatorTimeWeight<$evt> for AggregatorNumeric {
fn new() -> Self {
Self { sum: 0. }
}
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: $evt) {
let f = dt.ns() as f64 / bl.ns() as f64;
trace_event!("INGEST {} {}", f, val);
self.sum += f * val as f64;
}
fn reset_for_new_bin(&mut self) {
self.sum = 0.;
}
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f64 {
let sum = self.sum.clone();
trace_result!(
"result_and_reset_for_new_bin sum {} {}",
sum,
filled_width_fraction
);
self.sum = 0.;
sum / filled_width_fraction as f64
}
}
};
}
impl_agg_tw_for_agg_num!(u8);
impl_agg_tw_for_agg_num!(u16);
impl_agg_tw_for_agg_num!(u32);
impl_agg_tw_for_agg_num!(i8);
impl_agg_tw_for_agg_num!(i16);
impl_agg_tw_for_agg_num!(i32);
impl_agg_tw_for_agg_num!(i64);
impl AggregatorTimeWeight<u64> for AggregatorNumeric {
fn new() -> Self {
Self { sum: 0. }
}
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: u64) {
let f = dt.ns() as f64 / bl.ns() as f64;
trace_event!("INGEST {} {}", f, val);
self.sum += f * val as f64;
}
fn reset_for_new_bin(&mut self) {
self.sum = 0.;
}
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f64 {
let sum = self.sum.clone();
trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction);
self.sum = 0.;
sum / filled_width_fraction as f64
}
}
impl AggregatorTimeWeight<bool> for AggregatorNumeric {
fn new() -> Self {
Self { sum: 0. }
}
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: bool) {
let f = dt.ns() as f64 / bl.ns() as f64;
trace_event!("INGEST {} {}", f, val);
self.sum += f * val as u8 as f64;
}
fn reset_for_new_bin(&mut self) {
self.sum = 0.;
}
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f64 {
let sum = self.sum.clone();
trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction);
self.sum = 0.;
sum / filled_width_fraction as f64
}
}
impl AggregatorTimeWeight<String> for AggregatorNumeric {
fn new() -> Self {
Self { sum: 0. }
}
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: String) {
let f = dt.ns() as f64 / bl.ns() as f64;
trace_event!("INGEST {} {}", f, val);
self.sum += f * val.len() as f64;
}
fn reset_for_new_bin(&mut self) {
self.sum = 0.;
}
fn result_and_reset_for_new_bin(&mut self, filled_width_fraction: f32) -> f64 {
let sum = self.sum.clone();
trace_result!("result_and_reset_for_new_bin sum {} {}", sum, filled_width_fraction);
self.sum = 0.;
sum / filled_width_fraction as f64
}
}

View File

@@ -1,8 +0,0 @@
pub trait BinnedValueType {}
pub struct BinnedNumericValue<EVT> {
avg: f32,
_t: Option<EVT>,
}
impl<EVT> BinnedValueType for BinnedNumericValue<EVT> {}

View File

@@ -1,653 +0,0 @@
use super::aggregator::AggregatorNumeric;
use super::aggregator::AggregatorTimeWeight;
use super::container_events::EventValueType;
use super::___;
use crate::ts_offs_from_abs;
use crate::ts_offs_from_abs_with_anchor;
use core::fmt;
use daqbuf_err as err;
use err::thiserror;
use err::ThisError;
use items_0::collect_s::CollectableDyn;
use items_0::collect_s::CollectedDyn;
use items_0::collect_s::ToJsonResult;
use items_0::timebin::BinningggContainerBinsDyn;
use items_0::timebin::BinsBoxed;
use items_0::vecpreview::VecPreview;
use items_0::AsAnyMut;
use items_0::AsAnyRef;
use items_0::TypeName;
use items_0::WithLen;
use netpod::log::*;
use netpod::EnumVariant;
use netpod::TsNano;
use serde::Deserialize;
use serde::Serialize;
use std::any;
use std::collections::VecDeque;
use std::mem;
#[allow(unused)]
macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
#[derive(Debug, ThisError)]
#[cstm(name = "ContainerBins")]
pub enum ContainerBinsError {
Unordered,
}
pub trait BinValueType: fmt::Debug + Clone + PartialOrd {
// type Container: Container<Self>;
// type AggregatorTimeWeight: AggregatorTimeWeight<Self>;
// type AggTimeWeightOutputAvg;
// fn identity_sum() -> Self;
// fn add_weighted(&self, add: &Self, f: f32) -> Self;
}
#[derive(Debug, Clone)]
pub struct BinSingle<EVT> {
pub ts1: TsNano,
pub ts2: TsNano,
pub cnt: u64,
pub min: EVT,
pub max: EVT,
pub avg: f32,
pub lst: EVT,
pub fnl: bool,
}
#[derive(Debug, Clone)]
pub struct BinRef<'a, EVT>
where
EVT: EventValueType,
{
pub ts1: TsNano,
pub ts2: TsNano,
pub cnt: u64,
pub min: &'a EVT,
pub max: &'a EVT,
pub avg: &'a EVT::AggTimeWeightOutputAvg,
pub lst: &'a EVT,
pub fnl: bool,
}
pub struct IterDebug<'a, EVT>
where
EVT: EventValueType,
{
bins: &'a ContainerBins<EVT>,
ix: usize,
len: usize,
}
impl<'a, EVT> Iterator for IterDebug<'a, EVT>
where
EVT: EventValueType,
{
type Item = BinRef<'a, EVT>;
fn next(&mut self) -> Option<Self::Item> {
if self.ix < self.bins.len() && self.ix < self.len {
let b = &self.bins;
let i = self.ix;
self.ix += 1;
let ret = BinRef {
ts1: b.ts1s[i],
ts2: b.ts2s[i],
cnt: b.cnts[i],
min: &b.mins[i],
max: &b.maxs[i],
avg: &b.avgs[i],
lst: &b.lsts[i],
fnl: b.fnls[i],
};
Some(ret)
} else {
None
}
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct ContainerBins<EVT>
where
EVT: EventValueType,
{
ts1s: VecDeque<TsNano>,
ts2s: VecDeque<TsNano>,
cnts: VecDeque<u64>,
mins: VecDeque<EVT>,
maxs: VecDeque<EVT>,
avgs: VecDeque<EVT::AggTimeWeightOutputAvg>,
lsts: VecDeque<EVT>,
fnls: VecDeque<bool>,
}
impl<EVT> ContainerBins<EVT>
where
EVT: EventValueType,
{
pub fn from_constituents(
ts1s: VecDeque<TsNano>,
ts2s: VecDeque<TsNano>,
cnts: VecDeque<u64>,
mins: VecDeque<EVT>,
maxs: VecDeque<EVT>,
avgs: VecDeque<EVT::AggTimeWeightOutputAvg>,
lsts: VecDeque<EVT>,
fnls: VecDeque<bool>,
) -> Self {
Self {
ts1s,
ts2s,
cnts,
mins,
maxs,
avgs,
lsts,
fnls,
}
}
pub fn type_name() -> &'static str {
any::type_name::<Self>()
}
pub fn new() -> Self {
Self {
ts1s: VecDeque::new(),
ts2s: VecDeque::new(),
cnts: VecDeque::new(),
mins: VecDeque::new(),
maxs: VecDeque::new(),
avgs: VecDeque::new(),
lsts: VecDeque::new(),
fnls: VecDeque::new(),
}
}
pub fn len(&self) -> usize {
self.ts1s.len()
}
pub fn verify(&self) -> Result<(), ContainerBinsError> {
if self.ts1s.iter().zip(self.ts1s.iter().skip(1)).any(|(&a, &b)| a > b) {
return Err(ContainerBinsError::Unordered);
}
if self.ts2s.iter().zip(self.ts2s.iter().skip(1)).any(|(&a, &b)| a > b) {
return Err(ContainerBinsError::Unordered);
}
Ok(())
}
pub fn ts1_first(&self) -> Option<TsNano> {
self.ts1s.front().map(|&x| x)
}
pub fn ts2_last(&self) -> Option<TsNano> {
self.ts2s.back().map(|&x| x)
}
pub fn ts1s_iter(&self) -> std::collections::vec_deque::Iter<TsNano> {
self.ts1s.iter()
}
pub fn ts2s_iter(&self) -> std::collections::vec_deque::Iter<TsNano> {
self.ts2s.iter()
}
pub fn cnts_iter(&self) -> std::collections::vec_deque::Iter<u64> {
self.cnts.iter()
}
pub fn mins_iter(&self) -> std::collections::vec_deque::Iter<EVT> {
self.mins.iter()
}
pub fn maxs_iter(&self) -> std::collections::vec_deque::Iter<EVT> {
self.maxs.iter()
}
pub fn avgs_iter(&self) -> std::collections::vec_deque::Iter<EVT::AggTimeWeightOutputAvg> {
self.avgs.iter()
}
pub fn fnls_iter(&self) -> std::collections::vec_deque::Iter<bool> {
self.fnls.iter()
}
pub fn zip_iter(
&self,
) -> std::iter::Zip<
std::iter::Zip<
std::iter::Zip<
std::iter::Zip<
std::iter::Zip<
std::iter::Zip<
std::collections::vec_deque::Iter<TsNano>,
std::collections::vec_deque::Iter<TsNano>,
>,
std::collections::vec_deque::Iter<u64>,
>,
std::collections::vec_deque::Iter<EVT>,
>,
std::collections::vec_deque::Iter<EVT>,
>,
std::collections::vec_deque::Iter<EVT::AggTimeWeightOutputAvg>,
>,
std::collections::vec_deque::Iter<bool>,
> {
self.ts1s_iter()
.zip(self.ts2s_iter())
.zip(self.cnts_iter())
.zip(self.mins_iter())
.zip(self.maxs_iter())
.zip(self.avgs_iter())
.zip(self.fnls_iter())
}
pub fn edges_iter(
&self,
) -> std::iter::Zip<std::collections::vec_deque::Iter<TsNano>, std::collections::vec_deque::Iter<TsNano>> {
self.ts1s.iter().zip(self.ts2s.iter())
}
pub fn len_before(&self, end: TsNano) -> usize {
let pp = self.ts2s.partition_point(|&x| x <= end);
assert!(pp <= self.len(), "len_before pp {} len {}", pp, self.len());
pp
}
pub fn pop_front(&mut self) -> Option<BinSingle<EVT>> {
todo!("pop_front");
let ts1 = if let Some(x) = self.ts1s.pop_front() {
x
} else {
return None;
};
let ts2 = if let Some(x) = self.ts2s.pop_front() {
x
} else {
return None;
};
todo!()
}
pub fn push_back(
&mut self,
ts1: TsNano,
ts2: TsNano,
cnt: u64,
min: EVT,
max: EVT,
avg: EVT::AggTimeWeightOutputAvg,
lst: EVT,
fnl: bool,
) {
self.ts1s.push_back(ts1);
self.ts2s.push_back(ts2);
self.cnts.push_back(cnt);
self.mins.push_back(min);
self.maxs.push_back(max);
self.avgs.push_back(avg);
self.lsts.push_back(lst);
self.fnls.push_back(fnl);
}
pub fn iter_debug(&self) -> IterDebug<EVT> {
IterDebug {
bins: self,
ix: 0,
len: self.len(),
}
}
}
impl<EVT> fmt::Debug for ContainerBins<EVT>
where
EVT: EventValueType,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let self_name = any::type_name::<Self>();
write!(
fmt,
"{self_name} {{ len: {:?}, ts1s: {:?}, ts2s: {:?}, cnts: {:?}, avgs {:?}, fnls {:?} }}",
self.len(),
VecPreview::new(&self.ts1s),
VecPreview::new(&self.ts2s),
VecPreview::new(&self.cnts),
VecPreview::new(&self.avgs),
VecPreview::new(&self.fnls),
)
}
}
impl<EVT> fmt::Display for ContainerBins<EVT>
where
EVT: EventValueType,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(self, fmt)
}
}
impl<EVT> AsAnyMut for ContainerBins<EVT>
where
EVT: EventValueType,
{
fn as_any_mut(&mut self) -> &mut dyn any::Any {
self
}
}
impl<EVT> WithLen for ContainerBins<EVT>
where
EVT: EventValueType,
{
fn len(&self) -> usize {
Self::len(self)
}
}
impl<EVT> TypeName for ContainerBins<EVT>
where
EVT: EventValueType,
{
fn type_name(&self) -> String {
BinningggContainerBinsDyn::type_name(self).into()
}
}
impl<EVT> AsAnyRef for ContainerBins<EVT>
where
EVT: EventValueType,
{
fn as_any_ref(&self) -> &dyn any::Any {
self
}
}
#[derive(Debug)]
pub struct ContainerBinsCollectorOutput<EVT>
where
EVT: EventValueType,
{
bins: ContainerBins<EVT>,
}
impl<EVT> TypeName for ContainerBinsCollectorOutput<EVT>
where
EVT: EventValueType,
{
fn type_name(&self) -> String {
any::type_name::<Self>().into()
}
}
impl<EVT> AsAnyRef for ContainerBinsCollectorOutput<EVT>
where
EVT: EventValueType,
{
fn as_any_ref(&self) -> &dyn any::Any {
self
}
}
impl<EVT> AsAnyMut for ContainerBinsCollectorOutput<EVT>
where
EVT: EventValueType,
{
fn as_any_mut(&mut self) -> &mut dyn any::Any {
self
}
}
impl<EVT> WithLen for ContainerBinsCollectorOutput<EVT>
where
EVT: EventValueType,
{
fn len(&self) -> usize {
self.bins.len()
}
}
#[derive(Debug, Serialize)]
struct ContainerBinsCollectorOutputUser<EVT>
where
EVT: EventValueType,
{
#[serde(rename = "tsAnchor")]
ts_anchor_sec: u64,
#[serde(rename = "ts1Ms")]
ts1_off_ms: VecDeque<u64>,
#[serde(rename = "ts2Ms")]
ts2_off_ms: VecDeque<u64>,
#[serde(rename = "ts1Ns")]
ts1_off_ns: VecDeque<u64>,
#[serde(rename = "ts2Ns")]
ts2_off_ns: VecDeque<u64>,
#[serde(rename = "counts")]
counts: VecDeque<u64>,
#[serde(rename = "mins")]
mins: VecDeque<EVT>,
#[serde(rename = "maxs")]
maxs: VecDeque<EVT>,
#[serde(rename = "avgs")]
avgs: VecDeque<EVT::AggTimeWeightOutputAvg>,
// #[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
// range_final: bool,
// #[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
// timed_out: bool,
// #[serde(rename = "missingBins", default, skip_serializing_if = "CmpZero::is_zero")]
// missing_bins: u32,
// #[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
// continue_at: Option<IsoDateTime>,
// #[serde(rename = "finishedAt", default, skip_serializing_if = "Option::is_none")]
// finished_at: Option<IsoDateTime>,
}
impl<EVT> ToJsonResult for ContainerBinsCollectorOutput<EVT>
where
EVT: EventValueType,
{
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
let bins = &self.bins;
let ts1sns: Vec<_> = bins.ts1s.iter().map(|x| x.ns()).collect();
let ts2sns: Vec<_> = bins.ts2s.iter().map(|x| x.ns()).collect();
let (ts_anch, ts1ms, ts1ns) = ts_offs_from_abs(&ts1sns);
let (ts2ms, ts2ns) = ts_offs_from_abs_with_anchor(ts_anch, &ts2sns);
let counts = bins.cnts.clone();
let mins = bins.mins.clone();
let maxs = bins.maxs.clone();
let avgs = bins.avgs.clone();
let val = ContainerBinsCollectorOutputUser::<EVT> {
ts_anchor_sec: ts_anch,
ts1_off_ms: ts1ms,
ts2_off_ms: ts2ms,
ts1_off_ns: ts1ns,
ts2_off_ns: ts2ns,
counts,
mins,
maxs,
avgs,
};
serde_json::to_value(&val)
}
}
impl<EVT> CollectedDyn for ContainerBinsCollectorOutput<EVT> where EVT: EventValueType {}
#[derive(Debug)]
pub struct ContainerBinsCollector<EVT>
where
EVT: EventValueType,
{
bins: ContainerBins<EVT>,
timed_out: bool,
range_final: bool,
}
impl<EVT> ContainerBinsCollector<EVT> where EVT: EventValueType {}
impl<EVT> WithLen for ContainerBinsCollector<EVT>
where
EVT: EventValueType,
{
fn len(&self) -> usize {
self.bins.len()
}
}
impl<EVT> items_0::container::ByteEstimate for ContainerBinsCollector<EVT>
where
EVT: EventValueType,
{
fn byte_estimate(&self) -> u64 {
// TODO need better estimate
self.bins.len() as u64 * 200
}
}
impl<EVT> items_0::collect_s::CollectorDyn for ContainerBinsCollector<EVT>
where
EVT: EventValueType,
{
fn ingest(&mut self, src: &mut dyn CollectableDyn) {
if let Some(src) = src.as_any_mut().downcast_mut::<ContainerBins<EVT>>() {
src.drain_into(&mut self.bins, 0..src.len());
} else {
let srcn = src.type_name();
panic!("wrong src type {srcn}");
}
}
fn set_range_complete(&mut self) {
self.range_final = true;
}
fn set_timed_out(&mut self) {
self.timed_out = true;
}
fn set_continue_at_here(&mut self) {
debug!("TODO remember the continue at");
}
fn result(
&mut self,
range: Option<netpod::range::evrange::SeriesRange>,
binrange: Option<netpod::BinnedRangeEnum>,
) -> Result<Box<dyn items_0::collect_s::CollectedDyn>, err::Error> {
// TODO do we need to set timeout, continueAt or anything?
let bins = mem::replace(&mut self.bins, ContainerBins::new());
let ret = ContainerBinsCollectorOutput { bins };
Ok(Box::new(ret))
}
}
impl<EVT> CollectableDyn for ContainerBins<EVT>
where
EVT: EventValueType,
{
fn new_collector(&self) -> Box<dyn items_0::collect_s::CollectorDyn> {
let ret = ContainerBinsCollector::<EVT> {
bins: ContainerBins::new(),
timed_out: false,
range_final: false,
};
Box::new(ret)
}
}
impl<EVT> BinningggContainerBinsDyn for ContainerBins<EVT>
where
EVT: EventValueType,
{
fn type_name(&self) -> &'static str {
any::type_name::<Self>()
}
fn empty(&self) -> BinsBoxed {
Box::new(Self::new())
}
fn clone(&self) -> BinsBoxed {
Box::new(<Self as Clone>::clone(self))
}
fn edges_iter(
&self,
) -> std::iter::Zip<std::collections::vec_deque::Iter<TsNano>, std::collections::vec_deque::Iter<TsNano>> {
self.ts1s.iter().zip(self.ts2s.iter())
}
fn drain_into(&mut self, dst: &mut dyn BinningggContainerBinsDyn, range: std::ops::Range<usize>) {
let obj = dst.as_any_mut();
if let Some(dst) = obj.downcast_mut::<Self>() {
dst.ts1s.extend(self.ts1s.drain(range.clone()));
dst.ts2s.extend(self.ts2s.drain(range.clone()));
dst.cnts.extend(self.cnts.drain(range.clone()));
dst.mins.extend(self.mins.drain(range.clone()));
dst.maxs.extend(self.maxs.drain(range.clone()));
dst.avgs.extend(self.avgs.drain(range.clone()));
dst.lsts.extend(self.lsts.drain(range.clone()));
dst.fnls.extend(self.fnls.drain(range.clone()));
} else {
let styn = any::type_name::<EVT>();
panic!("unexpected drain EVT {} dst {}", styn, Self::type_name());
}
}
fn fix_numerics(&mut self) {
for ((min, max), avg) in self.mins.iter_mut().zip(self.maxs.iter_mut()).zip(self.avgs.iter_mut()) {}
}
}
pub struct ContainerBinsTakeUpTo<'a, EVT>
where
EVT: EventValueType,
{
evs: &'a mut ContainerBins<EVT>,
len: usize,
}
impl<'a, EVT> ContainerBinsTakeUpTo<'a, EVT>
where
EVT: EventValueType,
{
pub fn new(evs: &'a mut ContainerBins<EVT>, len: usize) -> Self {
let len = len.min(evs.len());
Self { evs, len }
}
}
impl<'a, EVT> ContainerBinsTakeUpTo<'a, EVT>
where
EVT: EventValueType,
{
pub fn ts1_first(&self) -> Option<TsNano> {
self.evs.ts1_first()
}
pub fn ts2_last(&self) -> Option<TsNano> {
self.evs.ts2_last()
}
pub fn len(&self) -> usize {
self.len
}
pub fn pop_front(&mut self) -> Option<BinSingle<EVT>> {
if self.len != 0 {
if let Some(ev) = self.evs.pop_front() {
self.len -= 1;
Some(ev)
} else {
None
}
} else {
None
}
}
}

View File

@@ -1,276 +0,0 @@
use super::aggregator::AggTimeWeightOutputAvg;
use super::aggregator::AggregatorNumeric;
use super::aggregator::AggregatorTimeWeight;
use super::timeweight::timeweight_events_dyn::BinnedEventsTimeweightDynbox;
use core::fmt;
use daqbuf_err as err;
use err::thiserror;
use err::ThisError;
use items_0::timebin::BinningggContainerEventsDyn;
use items_0::vecpreview::PreviewRange;
use items_0::vecpreview::VecPreview;
use items_0::AsAnyRef;
use netpod::BinnedRange;
use netpod::TsNano;
use serde::Deserialize;
use serde::Serialize;
use std::any;
use std::collections::VecDeque;
#[allow(unused)]
macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
#[derive(Debug, ThisError)]
#[cstm(name = "ValueContainerError")]
pub enum ValueContainerError {}
pub trait Container<EVT>: fmt::Debug + Send + Clone + PreviewRange + Serialize + for<'a> Deserialize<'a> {
fn new() -> Self;
// fn verify(&self) -> Result<(), ValueContainerError>;
fn push_back(&mut self, val: EVT);
fn pop_front(&mut self) -> Option<EVT>;
}
pub trait EventValueType: fmt::Debug + Clone + PartialOrd + Send + 'static + Serialize {
type Container: Container<Self>;
type AggregatorTimeWeight: AggregatorTimeWeight<Self>;
type AggTimeWeightOutputAvg: AggTimeWeightOutputAvg;
// fn identity_sum() -> Self;
// fn add_weighted(&self, add: &Self, f: f32) -> Self;
}
impl<EVT> Container<EVT> for VecDeque<EVT>
where
EVT: EventValueType + Serialize + for<'a> Deserialize<'a>,
{
fn new() -> Self {
VecDeque::new()
}
fn push_back(&mut self, val: EVT) {
self.push_back(val);
}
fn pop_front(&mut self) -> Option<EVT> {
self.pop_front()
}
}
macro_rules! impl_event_value_type {
($evt:ty) => {
impl EventValueType for $evt {
type Container = VecDeque<Self>;
type AggregatorTimeWeight = AggregatorNumeric;
type AggTimeWeightOutputAvg = f64;
}
};
}
impl_event_value_type!(u8);
impl_event_value_type!(u16);
impl_event_value_type!(u32);
impl_event_value_type!(u64);
impl_event_value_type!(i8);
impl_event_value_type!(i16);
impl_event_value_type!(i32);
impl_event_value_type!(i64);
// impl_event_value_type!(f32);
// impl_event_value_type!(f64);
impl EventValueType for f32 {
type Container = VecDeque<Self>;
type AggregatorTimeWeight = AggregatorNumeric;
type AggTimeWeightOutputAvg = f32;
}
impl EventValueType for f64 {
type Container = VecDeque<Self>;
type AggregatorTimeWeight = AggregatorNumeric;
type AggTimeWeightOutputAvg = f64;
}
impl EventValueType for bool {
type Container = VecDeque<Self>;
type AggregatorTimeWeight = AggregatorNumeric;
type AggTimeWeightOutputAvg = f64;
}
impl EventValueType for String {
type Container = VecDeque<Self>;
type AggregatorTimeWeight = AggregatorNumeric;
type AggTimeWeightOutputAvg = f64;
}
#[derive(Debug, Clone)]
pub struct EventSingle<EVT> {
pub ts: TsNano,
pub val: EVT,
}
#[derive(Debug, ThisError)]
#[cstm(name = "EventsContainerError")]
pub enum EventsContainerError {
Unordered,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct ContainerEvents<EVT>
where
EVT: EventValueType,
{
tss: VecDeque<TsNano>,
vals: <EVT as EventValueType>::Container,
}
impl<EVT> ContainerEvents<EVT>
where
EVT: EventValueType,
{
pub fn from_constituents(tss: VecDeque<TsNano>, vals: <EVT as EventValueType>::Container) -> Self {
Self { tss, vals }
}
pub fn type_name() -> &'static str {
any::type_name::<Self>()
}
pub fn new() -> Self {
Self {
tss: VecDeque::new(),
vals: Container::new(),
}
}
pub fn len(&self) -> usize {
self.tss.len()
}
pub fn verify(&self) -> Result<(), EventsContainerError> {
if self.tss.iter().zip(self.tss.iter().skip(1)).any(|(&a, &b)| a > b) {
return Err(EventsContainerError::Unordered);
}
Ok(())
}
pub fn ts_first(&self) -> Option<TsNano> {
self.tss.front().map(|&x| x)
}
pub fn ts_last(&self) -> Option<TsNano> {
self.tss.back().map(|&x| x)
}
pub fn len_before(&self, end: TsNano) -> usize {
let pp = self.tss.partition_point(|&x| x < end);
assert!(pp <= self.len(), "len_before pp {} len {}", pp, self.len());
pp
}
pub fn pop_front(&mut self) -> Option<EventSingle<EVT>> {
if let (Some(ts), Some(val)) = (self.tss.pop_front(), self.vals.pop_front()) {
Some(EventSingle { ts, val })
} else {
None
}
}
pub fn push_back(&mut self, ts: TsNano, val: EVT) {
self.tss.push_back(ts);
self.vals.push_back(val);
}
}
impl<EVT> fmt::Debug for ContainerEvents<EVT>
where
EVT: EventValueType,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let self_name = any::type_name::<Self>();
write!(
fmt,
"{self_name} {{ len: {:?}, tss: {:?}, vals {:?} }}",
self.len(),
VecPreview::new(&self.tss),
VecPreview::new(&self.vals),
)
}
}
impl<EVT> AsAnyRef for ContainerEvents<EVT>
where
EVT: EventValueType,
{
fn as_any_ref(&self) -> &dyn any::Any {
self
}
}
pub struct ContainerEventsTakeUpTo<'a, EVT>
where
EVT: EventValueType,
{
evs: &'a mut ContainerEvents<EVT>,
len: usize,
}
impl<'a, EVT> ContainerEventsTakeUpTo<'a, EVT>
where
EVT: EventValueType,
{
pub fn new(evs: &'a mut ContainerEvents<EVT>, len: usize) -> Self {
let len = len.min(evs.len());
Self { evs, len }
}
}
impl<'a, EVT> ContainerEventsTakeUpTo<'a, EVT>
where
EVT: EventValueType,
{
pub fn ts_first(&self) -> Option<TsNano> {
self.evs.ts_first()
}
pub fn ts_last(&self) -> Option<TsNano> {
self.evs.ts_last()
}
pub fn len(&self) -> usize {
self.len
}
pub fn pop_front(&mut self) -> Option<EventSingle<EVT>> {
if self.len != 0 {
if let Some(ev) = self.evs.pop_front() {
self.len -= 1;
Some(ev)
} else {
None
}
} else {
None
}
}
}
impl<EVT> BinningggContainerEventsDyn for ContainerEvents<EVT>
where
EVT: EventValueType,
{
fn type_name(&self) -> &'static str {
std::any::type_name::<Self>()
}
fn binned_events_timeweight_traitobj(
&self,
range: BinnedRange<TsNano>,
) -> Box<dyn items_0::timebin::BinnedEventsTimeweightTrait> {
BinnedEventsTimeweightDynbox::<EVT>::new(range)
}
fn to_anybox(&mut self) -> Box<dyn std::any::Any> {
let ret = core::mem::replace(self, Self::new());
Box::new(ret)
}
}

View File

@@ -1,15 +0,0 @@
mod events00;
use super::container_events::ContainerEvents;
use super::___;
use netpod::log::*;
use std::any;
#[test]
fn test_use_serde() {
let x = ContainerEvents::<f32>::new();
let a: &dyn any::Any = &x;
assert_eq!(a.downcast_ref::<String>().is_some(), false);
assert_eq!(a.downcast_ref::<ContainerEvents<f32>>().is_some(), true);
let s = serde_json::to_string(&x).unwrap();
let _: ContainerEvents<f32> = serde_json::from_str(&s).unwrap();
}

View File

@@ -1,488 +0,0 @@
use crate::binning::container_bins::ContainerBins;
use crate::binning::container_events::ContainerEvents;
use crate::binning::timeweight::timeweight_events::BinnedEventsTimeweight;
use daqbuf_err as err;
use err::thiserror;
use err::ThisError;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::BinnedRange;
use netpod::DtMs;
use netpod::EnumVariant;
use netpod::TsNano;
use std::collections::VecDeque;
#[derive(Debug, ThisError)]
#[cstm(name = "Error")]
enum Error {
Timeweight(#[from] crate::binning::timeweight::timeweight_events::Error),
AssertMsg(String),
}
// fn prepare_data_with_cuts(beg_ms: u64, cuts: VecDeque<u64>) -> VecDeque<ContainerEvents<f32>> {
// let beg = TsNano::from_ms(beg_ms);
// let end = TsNano::from_ms(120);
// let mut cut_next = cuts.pop_front().unwrap_or(u64::MAX);
// let mut ret = VecDeque::new();
// let ivl = DtMs::from_ms_u64(x)
// }
fn pu(c: &mut ContainerEvents<f32>, ts_ms: u64, val: f32)
// where
// C: AsMut<ContainerEvents<f32>>,
// C: std::borrow::BorrowMut<ContainerEvents<f32>>,
{
c.push_back(TsNano::from_ms(ts_ms), val);
}
trait IntoVecDequeU64 {
fn into_vec_deque_u64(self) -> VecDeque<u64>;
}
impl IntoVecDequeU64 for &str {
fn into_vec_deque_u64(self) -> VecDeque<u64> {
self.split_ascii_whitespace().map(|x| x.parse().unwrap()).collect()
}
}
trait IntoVecDequeF32 {
fn into_vec_deque_f32(self) -> VecDeque<f32>;
}
impl IntoVecDequeF32 for &str {
fn into_vec_deque_f32(self) -> VecDeque<f32> {
self.split_ascii_whitespace().map(|x| x.parse().unwrap()).collect()
}
}
fn exp_u64<'a>(
vals: impl Iterator<Item = &'a u64>,
exps: impl Iterator<Item = &'a u64>,
tag: &str,
) -> Result<(), Error> {
let mut it_a = vals;
let mut it_b = exps;
let mut i = 0;
loop {
let a = it_a.next();
let b = it_b.next();
if a.is_none() && b.is_none() {
break;
}
if let (Some(&val), Some(&exp)) = (a, b) {
if val != exp {
return Err(Error::AssertMsg(format!("{tag} val {} exp {} i {}", val, exp, i)));
}
} else {
return Err(Error::AssertMsg(format!("{tag} len mismatch")));
}
i += 1;
}
Ok(())
}
fn exp_f32<'a>(
vals: impl Iterator<Item = &'a f32>,
exps: impl Iterator<Item = &'a f32>,
tag: &str,
) -> Result<(), Error> {
let mut it_a = vals;
let mut it_b = exps;
let mut i = 0;
loop {
let a = it_a.next();
let b = it_b.next();
if a.is_none() && b.is_none() {
break;
}
if let (Some(&val), Some(&exp)) = (a, b) {
if netpod::f32_close(val, exp) == false {
return Err(Error::AssertMsg(format!("{tag} val {} exp {} i {}", val, exp, i)));
}
} else {
return Err(Error::AssertMsg(format!("{tag} len mismatch")));
}
i += 1;
}
Ok(())
}
#[cfg(test)]
fn exp_cnts(bins: &ContainerBins<f32>, exps: impl IntoVecDequeU64) -> Result<(), Error> {
exp_u64(bins.cnts_iter(), exps.into_vec_deque_u64().iter(), "exp_cnts")
}
#[cfg(test)]
fn exp_mins(bins: &ContainerBins<f32>, exps: impl IntoVecDequeF32) -> Result<(), Error> {
exp_f32(bins.mins_iter(), exps.into_vec_deque_f32().iter(), "exp_mins")
}
#[cfg(test)]
fn exp_maxs(bins: &ContainerBins<f32>, exps: impl IntoVecDequeF32) -> Result<(), Error> {
exp_f32(bins.maxs_iter(), exps.into_vec_deque_f32().iter(), "exp_maxs")
}
fn exp_avgs(bins: &ContainerBins<f32>, exps: impl IntoVecDequeF32) -> Result<(), Error> {
let exps = exps.into_vec_deque_f32();
let mut it_a = bins.iter_debug();
let mut it_b = exps.iter();
let mut i = 0;
loop {
let a = it_a.next();
let b = it_b.next();
if a.is_none() && b.is_none() {
break;
}
if let (Some(a), Some(&exp)) = (a, b) {
let val = *a.avg as f32;
if netpod::f32_close(val, exp) == false {
return Err(Error::AssertMsg(format!("exp_avgs val {} exp {} i {}", val, exp, i)));
}
} else {
return Err(Error::AssertMsg(format!(
"len mismatch {} vs {}",
bins.len(),
exps.len()
)));
}
i += 1;
}
Ok(())
}
#[test]
fn test_bin_events_f32_simple_with_before_00() -> Result<(), Error> {
let beg = TsNano::from_ms(110);
let end = TsNano::from_ms(120);
let nano_range = NanoRange {
beg: beg.ns(),
end: end.ns(),
};
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
let mut binner = BinnedEventsTimeweight::new(range);
let mut evs = ContainerEvents::<f32>::new();
evs.push_back(TsNano::from_ms(103), 2.0);
binner.ingest(evs)?;
binner.input_done_range_final()?;
let bins = binner.output();
exp_cnts(&bins, "0")?;
exp_mins(&bins, "2.")?;
exp_maxs(&bins, "2.")?;
exp_avgs(&bins, "2.")?;
let bins = binner.output();
assert_eq!(bins.len(), 0);
Ok(())
}
#[test]
fn test_bin_events_f32_simple_with_before_01_range_final() -> Result<(), Error> {
let beg = TsNano::from_ms(110);
let end = TsNano::from_ms(130);
let nano_range = NanoRange {
beg: beg.ns(),
end: end.ns(),
};
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
let mut binner = BinnedEventsTimeweight::new(range);
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 103, 2.0);
binner.ingest(evs)?;
binner.input_done_range_final()?;
let bins = binner.output();
exp_cnts(&bins, "0 0")?;
exp_mins(&bins, "2. 2.")?;
exp_maxs(&bins, "2. 2.")?;
exp_avgs(&bins, "2. 2.")?;
let bins = binner.output();
assert_eq!(bins.len(), 0);
Ok(())
}
#[test]
fn test_bin_events_f32_simple_00() -> Result<(), Error> {
let beg = TsNano::from_ms(100);
let end = TsNano::from_ms(120);
let nano_range = NanoRange {
beg: beg.ns(),
end: end.ns(),
};
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
let mut binner = BinnedEventsTimeweight::new(range);
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 100, 2.0);
pu(em, 104, 2.4);
binner.ingest(evs)?;
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 111, 1.0);
pu(em, 112, 1.2);
pu(em, 113, 1.4);
binner.ingest(evs)?;
binner.input_done_range_open()?;
let bins = binner.output();
for b in bins.iter_debug() {
trace!("{b:?}");
}
exp_cnts(&bins, "2 3")?;
exp_mins(&bins, "2. 1.")?;
exp_maxs(&bins, "2.4 2.4")?;
exp_avgs(&bins, "2.24 1.5333")?;
let bins = binner.output();
assert_eq!(bins.len(), 0);
Ok(())
}
#[test]
fn test_bin_events_f32_simple_01() -> Result<(), Error> {
let beg = TsNano::from_ms(100);
let end = TsNano::from_ms(120);
let nano_range = NanoRange {
beg: beg.ns(),
end: end.ns(),
};
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
let mut binner = BinnedEventsTimeweight::new(range);
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 102, 2.0);
pu(em, 104, 2.4);
binner.ingest(evs)?;
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 111, 1.0);
pu(em, 112, 1.2);
pu(em, 113, 1.4);
binner.ingest(evs)?;
binner.input_done_range_open()?;
let bins = binner.output();
for b in bins.iter_debug() {
trace!("{b:?}");
}
assert_eq!(bins.len(), 2);
exp_cnts(&bins, "2 3")?;
exp_mins(&bins, "2. 1.")?;
exp_maxs(&bins, "2.4 2.4")?;
exp_avgs(&bins, "2.30 1.5333")?;
let bins = binner.output();
assert_eq!(bins.len(), 0);
Ok(())
}
#[test]
fn test_bin_events_f32_small_range_final() -> Result<(), Error> {
let beg = TsNano::from_ms(100);
let end = TsNano::from_ms(120);
let nano_range = NanoRange {
beg: beg.ns(),
end: end.ns(),
};
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
let mut binner = BinnedEventsTimeweight::new(range);
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 102, 2.0);
pu(em, 104, 2.4);
binner.ingest(evs)?;
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 111, 1.0);
pu(em, 112, 1.2);
pu(em, 113, 1.4);
binner.ingest(evs)?;
binner.input_done_range_final()?;
let bins = binner.output();
for b in bins.iter_debug() {
trace!("{b:?}");
}
assert_eq!(bins.len(), 2);
exp_cnts(&bins, "2 3")?;
exp_mins(&bins, "2. 1.")?;
exp_maxs(&bins, "2.4 2.4")?;
exp_avgs(&bins, "2.30 1.44")?;
let bins = binner.output();
assert_eq!(bins.len(), 0);
Ok(())
}
#[test]
fn test_bin_events_f32_small_intermittent_silence_range_open() -> Result<(), Error> {
let beg = TsNano::from_ms(100);
let end = TsNano::from_ms(150);
let nano_range = NanoRange {
beg: beg.ns(),
end: end.ns(),
};
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
let mut binner = BinnedEventsTimeweight::new(range);
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 102, 2.0);
pu(em, 104, 2.4);
binner.ingest(evs)?;
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 111, 1.0);
pu(em, 112, 1.2);
binner.ingest(evs)?;
// TODO take bins already here and assert.
// TODO combine all bins together for combined assert.
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 113, 1.4);
pu(em, 146, 1.3);
pu(em, 148, 1.2);
binner.ingest(evs)?;
binner.input_done_range_open()?;
let bins = binner.output();
for b in bins.iter_debug() {
trace!("{b:?}");
}
assert_eq!(bins.len(), 5);
exp_cnts(&bins, "2 3 0 0 2")?;
exp_mins(&bins, "2.0 1.0 1.4 1.4 1.2")?;
exp_maxs(&bins, "2.4 2.4 1.4 1.4 1.4")?;
exp_avgs(&bins, "2.30 1.44 1.4 1.4 1.375")?;
let bins = binner.output();
assert_eq!(bins.len(), 0);
Ok(())
}
#[test]
fn test_bin_events_f32_small_intermittent_silence_range_final() -> Result<(), Error> {
let beg = TsNano::from_ms(100);
let end = TsNano::from_ms(150);
let nano_range = NanoRange {
beg: beg.ns(),
end: end.ns(),
};
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
let mut binner = BinnedEventsTimeweight::new(range);
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 102, 2.0);
pu(em, 104, 2.4);
binner.ingest(evs)?;
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 111, 1.0);
pu(em, 112, 1.2);
binner.ingest(evs)?;
// TODO take bins already here and assert.
// TODO combine all bins together for combined assert.
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 113, 1.4);
pu(em, 146, 1.3);
pu(em, 148, 1.2);
binner.ingest(evs)?;
binner.input_done_range_final()?;
let bins = binner.output();
for b in bins.iter_debug() {
trace!("{b:?}");
}
exp_cnts(&bins, "2 3 0 0 2")?;
exp_mins(&bins, "2.0 1.0 1.4 1.4 1.2")?;
exp_maxs(&bins, "2.4 2.4 1.4 1.4 1.4")?;
exp_avgs(&bins, "2.30 1.44 1.4 1.4 1.34")?;
let bins = binner.output();
assert_eq!(bins.len(), 0);
Ok(())
}
#[test]
fn test_bin_events_f32_small_intermittent_silence_minmax_no_edge_range_final() -> Result<(), Error> {
let beg = TsNano::from_ms(110);
let end = TsNano::from_ms(120);
let nano_range = NanoRange {
beg: beg.ns(),
end: end.ns(),
};
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
let mut binner = BinnedEventsTimeweight::new(range);
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 109, 50.);
binner.ingest(evs)?;
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 111, 40.);
// pu(em, 112, 1.2);
// binner.ingest(evs)?;
// let mut evs = ContainerEvents::<f32>::new();
// let em = &mut evs;
// pu(em, 113, 1.4);
// pu(em, 120, 1.4);
// pu(em, 146, 1.3);
// pu(em, 148, 1.2);
binner.ingest(evs)?;
binner.input_done_range_final()?;
let bins = binner.output();
for b in bins.iter_debug() {
trace!("{b:?}");
}
exp_cnts(&bins, "1")?;
exp_mins(&bins, "40.")?;
exp_maxs(&bins, "50.")?;
let bins = binner.output();
assert_eq!(bins.len(), 0);
Ok(())
}
#[test]
fn test_bin_events_f32_small_intermittent_silence_minmax_edge_range_final() -> Result<(), Error> {
let beg = TsNano::from_ms(110);
let end = TsNano::from_ms(120);
let nano_range = NanoRange {
beg: beg.ns(),
end: end.ns(),
};
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
let mut binner = BinnedEventsTimeweight::new(range);
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 109, 50.);
binner.ingest(evs)?;
let mut evs = ContainerEvents::<f32>::new();
let em = &mut evs;
pu(em, 110, 40.);
// pu(em, 112, 1.2);
// binner.ingest(evs)?;
// let mut evs = ContainerEvents::<f32>::new();
// let em = &mut evs;
// pu(em, 113, 1.4);
// pu(em, 120, 1.4);
// pu(em, 146, 1.3);
// pu(em, 148, 1.2);
binner.ingest(evs)?;
binner.input_done_range_final()?;
let bins = binner.output();
for b in bins.iter_debug() {
trace!("{b:?}");
}
exp_cnts(&bins, "1")?;
exp_mins(&bins, "40.")?;
exp_maxs(&bins, "40.")?;
let bins = binner.output();
assert_eq!(bins.len(), 0);
Ok(())
}
#[test]
fn test_bin_events_enum_simple_range_final() -> Result<(), Error> {
let beg = TsNano::from_ms(100);
let end = TsNano::from_ms(120);
let nano_range = NanoRange {
beg: beg.ns(),
end: end.ns(),
};
let range = BinnedRange::from_nano_range(nano_range, DtMs::from_ms_u64(10));
let mut binner = BinnedEventsTimeweight::new(range);
let mut evs = ContainerEvents::new();
evs.push_back(TsNano::from_ms(103), EnumVariant::new(1, "one"));
evs.push_back(TsNano::from_ms(104), EnumVariant::new(2, "two"));
binner.ingest(evs)?;
binner.input_done_range_final()?;
let bins = binner.output();
Ok(())
}

View File

@@ -1,16 +0,0 @@
pub mod timeweight_bins;
pub mod timeweight_bins_dyn;
pub mod timeweight_events;
pub mod timeweight_events_dyn;
use super::___;
use netpod::log::*;
#[allow(unused)]
macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_ingest { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_ingest_detail { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }

View File

@@ -1,5 +0,0 @@
use super::___;
use netpod::log::*;
#[allow(unused)]
macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }

View File

@@ -1,27 +0,0 @@
use futures_util::Stream;
use items_0::streamitem::Sitemty;
use items_0::timebin::BinningggContainerBinsDyn;
use netpod::BinnedRange;
use netpod::TsNano;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
pub struct BinnedBinsTimeweightStream {}
impl BinnedBinsTimeweightStream {
pub fn new(
range: BinnedRange<TsNano>,
inp: Pin<Box<dyn Stream<Item = Sitemty<Box<dyn BinningggContainerBinsDyn>>> + Send>>,
) -> Self {
todo!()
}
}
impl Stream for BinnedBinsTimeweightStream {
type Item = Sitemty<Box<dyn BinningggContainerBinsDyn>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
todo!()
}
}

View File

@@ -1,645 +0,0 @@
use super::super::container_events::EventValueType;
use crate::binning::aggregator::AggregatorTimeWeight;
use crate::binning::container_bins::ContainerBins;
use crate::binning::container_events::ContainerEvents;
use crate::binning::container_events::ContainerEventsTakeUpTo;
use crate::binning::container_events::EventSingle;
use core::fmt;
use daqbuf_err as err;
use err::thiserror;
use err::ThisError;
use netpod::log::*;
use netpod::BinnedRange;
use netpod::DtNano;
use netpod::TsNano;
use std::mem;
#[allow(unused)]
macro_rules! trace_ { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_init { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_cycle { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_event_next { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_ingest_init_lst { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_ingest_minmax { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_ingest_event { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_ingest_firsts { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_ingest_finish_bin { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_ingest_container { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_ingest_container_2 { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_fill_until { ($($arg:tt)*) => ( if false { trace_!($($arg)*); }) }
#[cold]
#[inline]
#[allow(unused)]
fn cold() {}
const DEBUG_CHECKS: bool = true;
#[derive(Debug, ThisError)]
#[cstm(name = "BinnedEventsTimeweight")]
pub enum Error {
BadContainer(#[from] super::super::container_events::EventsContainerError),
Unordered,
EventAfterRange,
NoLstAfterFirst,
EmptyContainerInnerHandler,
NoLstButMinMax,
WithLstButEventBeforeRange,
WithMinMaxButEventBeforeRange,
NoMinMaxAfterInit,
ExpectEventWithinRange,
}
type MinMax<EVT> = (EventSingle<EVT>, EventSingle<EVT>);
#[derive(Clone)]
struct LstRef<'a, EVT>(&'a EventSingle<EVT>);
struct LstMut<'a, EVT>(&'a mut EventSingle<EVT>);
#[derive(Debug)]
struct InnerB<EVT>
where
EVT: EventValueType,
{
cnt: u64,
active_beg: TsNano,
active_end: TsNano,
active_len: DtNano,
filled_until: TsNano,
filled_width: DtNano,
agg: <EVT as EventValueType>::AggregatorTimeWeight,
}
impl<EVT> InnerB<EVT>
where
EVT: EventValueType,
{
// NOTE that this is also used during bin-cycle.
fn ingest_event_with_lst_gt_range_beg_agg(&mut self, ev: EventSingle<EVT>, lst: LstRef<EVT>) {
let selfname = "ingest_event_with_lst_gt_range_beg_agg";
trace_ingest_event!("{selfname} {:?}", ev);
if DEBUG_CHECKS {
if ev.ts <= self.active_beg {
panic!("should never get here");
}
if ev.ts >= self.active_end {
panic!("should never get here");
}
}
let dt = ev.ts.delta(self.filled_until);
trace_ingest_event!("{selfname} dt {:?} ev {:?}", dt, ev);
// TODO can the caller already take the value and replace it afterwards with the current value?
// This fn could swap the value in lst and directly use it.
// This would require that any call path does not mess with lst.
// NOTE that this fn is also used during bin-cycle.
self.agg.ingest(dt, self.active_len, lst.0.val.clone());
self.filled_width = self.filled_width.add(dt);
self.filled_until = ev.ts;
}
fn ingest_event_with_lst_gt_range_beg_2(&mut self, ev: EventSingle<EVT>, lst: LstMut<EVT>) -> Result<(), Error> {
let selfname = "ingest_event_with_lst_gt_range_beg_2";
trace_ingest_event!("{selfname}");
self.ingest_event_with_lst_gt_range_beg_agg(ev.clone(), LstRef(lst.0));
InnerA::apply_lst_after_event_handled(ev, lst);
// self.cnt += 1;
Ok(())
}
fn ingest_event_with_lst_gt_range_beg(
&mut self,
ev: EventSingle<EVT>,
lst: LstMut<EVT>,
minmax: &mut MinMax<EVT>,
) -> Result<(), Error> {
let selfname = "ingest_event_with_lst_gt_range_beg";
trace_ingest_event!("{selfname}");
// TODO if the event is exactly on the current bin first edge, then there is no contribution to the avg yet
// and I must initialize the min/max with the current event.
InnerA::apply_min_max(&ev, minmax);
self.ingest_event_with_lst_gt_range_beg_2(ev.clone(), lst)?;
Ok(())
}
fn ingest_event_with_lst_eq_range_beg(
&mut self,
ev: EventSingle<EVT>,
lst: LstMut<EVT>,
minmax: &mut MinMax<EVT>,
) -> Result<(), Error> {
let selfname = "ingest_event_with_lst_eq_range_beg";
trace_ingest_event!("{selfname}");
// TODO if the event is exactly on the current bin first edge, then there is no contribution to the avg yet
// and I must initialize the min/max with the current event.
InnerA::apply_min_max(&ev, minmax);
InnerA::apply_lst_after_event_handled(ev, lst);
Ok(())
}
fn ingest_with_lst_gt_range_beg(
&mut self,
mut evs: ContainerEventsTakeUpTo<EVT>,
lst: LstMut<EVT>,
minmax: &mut MinMax<EVT>,
) -> Result<(), Error> {
let selfname = "ingest_with_lst_gt_range_beg";
trace_ingest_event!("{selfname}");
while let Some(ev) = evs.pop_front() {
trace_event_next!("EVENT POP FRONT {:?} {:30}", ev, selfname);
if ev.ts <= self.active_beg {
panic!("should never get here");
}
if ev.ts >= self.active_end {
panic!("should never get here");
}
self.ingest_event_with_lst_gt_range_beg(ev.clone(), LstMut(lst.0), minmax)?;
self.cnt += 1;
}
Ok(())
}
fn ingest_with_lst_ge_range_beg(
&mut self,
mut evs: ContainerEventsTakeUpTo<EVT>,
lst: LstMut<EVT>,
minmax: &mut MinMax<EVT>,
) -> Result<(), Error> {
let selfname = "ingest_with_lst_ge_range_beg";
trace_ingest_event!("{selfname}");
while let Some(ev) = evs.pop_front() {
trace_event_next!("EVENT POP FRONT {:?} {:30}", ev, selfname);
if ev.ts < self.active_beg {
panic!("should never get here");
}
if ev.ts >= self.active_end {
panic!("should never get here");
}
if ev.ts == self.active_beg {
self.ingest_event_with_lst_eq_range_beg(ev, LstMut(lst.0), minmax)?;
self.cnt += 1;
} else {
self.ingest_event_with_lst_gt_range_beg(ev.clone(), LstMut(lst.0), minmax)?;
self.cnt += 1;
trace_ingest_event!("{selfname} now calling ingest_with_lst_gt_range_beg");
return self.ingest_with_lst_gt_range_beg(evs, LstMut(lst.0), minmax);
}
}
Ok(())
}
fn ingest_with_lst_minmax(
&mut self,
evs: ContainerEventsTakeUpTo<EVT>,
lst: LstMut<EVT>,
minmax: &mut MinMax<EVT>,
) -> Result<(), Error> {
let selfname = "ingest_with_lst_minmax";
trace_ingest_event!("{selfname}");
// TODO how to handle the min max? I don't take event data yet out of the container.
if let Some(ts0) = evs.ts_first() {
trace_ingest_event!("EVENT POP FRONT {selfname}");
trace_ingest_event!("EVENT TIMESTAMP FRONT {:?} {selfname}", ts0);
if ts0 < self.active_beg {
panic!("should never get here");
} else {
self.ingest_with_lst_ge_range_beg(evs, lst, minmax)
}
} else {
Ok(())
}
}
// PRECONDITION: filled_until < ts <= active_end
fn fill_until(&mut self, ts: TsNano, lst: LstRef<EVT>) {
let b = self;
assert!(b.filled_until < ts);
assert!(ts <= b.active_end);
let dt = ts.delta(b.filled_until);
trace_fill_until!("fill_until ts {:?} dt {:?} lst {:?}", ts, dt, lst.0);
assert!(b.filled_until < ts);
assert!(ts <= b.active_end);
b.agg.ingest(dt, b.active_len, lst.0.val.clone());
b.filled_width = b.filled_width.add(dt);
b.filled_until = ts;
}
}
#[derive(Debug)]
struct InnerA<EVT>
where
EVT: EventValueType,
{
inner_b: InnerB<EVT>,
minmax: Option<(EventSingle<EVT>, EventSingle<EVT>)>,
}
impl<EVT> InnerA<EVT>
where
EVT: EventValueType,
{
fn apply_min_max(ev: &EventSingle<EVT>, minmax: &mut MinMax<EVT>) {
if ev.val < minmax.0.val {
minmax.0 = ev.clone();
}
if ev.val > minmax.1.val {
minmax.1 = ev.clone();
}
}
fn apply_lst_after_event_handled(ev: EventSingle<EVT>, lst: LstMut<EVT>) {
*lst.0 = ev;
}
fn init_minmax(&mut self, ev: &EventSingle<EVT>) {
trace_ingest_minmax!("init_minmax {:?}", ev);
self.minmax = Some((ev.clone(), ev.clone()));
}
fn init_minmax_with_lst(&mut self, ev: &EventSingle<EVT>, lst: LstRef<EVT>) {
trace_ingest_minmax!("init_minmax_with_lst {:?} {:?}", ev, lst.0);
let minmax = self.minmax.insert((lst.0.clone(), lst.0.clone()));
Self::apply_min_max(ev, minmax);
}
fn ingest_with_lst(&mut self, mut evs: ContainerEventsTakeUpTo<EVT>, lst: LstMut<EVT>) -> Result<(), Error> {
let selfname = "ingest_with_lst";
trace_ingest_container!("{selfname} evs len {}", evs.len());
let b = &mut self.inner_b;
if let Some(minmax) = self.minmax.as_mut() {
b.ingest_with_lst_minmax(evs, lst, minmax)
} else {
if let Some(ev) = evs.pop_front() {
trace_event_next!("EVENT POP FRONT {:?} {selfname:30}", ev);
let beg = b.active_beg;
let end = b.active_end;
if ev.ts < beg {
panic!("should never get here");
} else if ev.ts >= end {
panic!("should never get here");
} else {
if ev.ts == beg {
self.init_minmax(&ev);
InnerA::apply_lst_after_event_handled(ev, lst);
let b = &mut self.inner_b;
b.cnt += 1;
Ok(())
} else {
self.init_minmax_with_lst(&ev, LstRef(lst.0));
let b = &mut self.inner_b;
if let Some(minmax) = self.minmax.as_mut() {
if ev.ts == beg {
panic!("logic error, is handled before");
} else {
b.ingest_event_with_lst_gt_range_beg_2(ev, LstMut(lst.0))?;
}
b.cnt += 1;
b.ingest_with_lst_minmax(evs, lst, minmax)
} else {
Err(Error::NoMinMaxAfterInit)
}
}
}
} else {
Ok(())
}
}
}
fn reset_01(&mut self, lst: LstRef<EVT>) {
let selfname = "reset_01";
let b = &mut self.inner_b;
trace_cycle!(
"{selfname} active_end {:?} filled_until {:?}",
b.active_end,
b.filled_until
);
let div = b.active_len.ns();
let old_end = b.active_end;
let ts1 = TsNano::from_ns(b.active_end.ns() / div * div);
assert!(ts1 == old_end);
b.active_beg = ts1;
b.active_end = ts1.add_dt_nano(b.active_len);
b.filled_until = ts1;
b.filled_width = DtNano::from_ns(0);
b.cnt = 0;
self.minmax = Some((lst.0.clone(), lst.0.clone()));
}
fn push_out_and_reset(&mut self, lst: LstRef<EVT>, range_final: bool, out: &mut ContainerBins<EVT>) {
let selfname = "push_out_and_reset";
// TODO there is not always good enough input to produce a meaningful bin.
// TODO can we always reset, and what exactly does reset mean here?
// TODO what logic can I save here? To output a bin I need to have min, max, lst.
let b = &mut self.inner_b;
let minmax = self.minmax.get_or_insert_with(|| {
trace_cycle!("{selfname} minmax not yet set");
(lst.0.clone(), lst.0.clone())
});
{
let filled_width_fraction = b.filled_width.fraction_of(b.active_len);
let res = b.agg.result_and_reset_for_new_bin(filled_width_fraction);
out.push_back(
b.active_beg,
b.active_end,
b.cnt,
minmax.0.val.clone(),
minmax.1.val.clone(),
res,
lst.0.val.clone(),
range_final,
);
}
self.reset_01(lst);
}
}
pub struct BinnedEventsTimeweight<EVT>
where
EVT: EventValueType,
{
lst: Option<EventSingle<EVT>>,
range: BinnedRange<TsNano>,
inner_a: InnerA<EVT>,
out: ContainerBins<EVT>,
produce_cnt_zero: bool,
}
impl<EVT> fmt::Debug for BinnedEventsTimeweight<EVT>
where
EVT: EventValueType,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("BinnedEventsTimeweight")
.field("lst", &self.lst)
.field("range", &self.range)
.field("inner_a", &self.inner_a)
.field("out", &self.out)
.finish()
}
}
impl<EVT> BinnedEventsTimeweight<EVT>
where
EVT: EventValueType,
{
pub fn new(range: BinnedRange<TsNano>) -> Self {
let active_beg = range.nano_beg();
let active_end = active_beg.add_dt_nano(range.bin_len.to_dt_nano());
let active_len = active_end.delta(active_beg);
Self {
range,
inner_a: InnerA::<EVT> {
inner_b: InnerB {
cnt: 0,
active_beg,
active_end,
active_len,
filled_until: active_beg,
filled_width: DtNano::from_ns(0),
agg: <<EVT as EventValueType>::AggregatorTimeWeight as AggregatorTimeWeight<EVT>>::new(),
},
minmax: None,
},
lst: None,
out: ContainerBins::new(),
produce_cnt_zero: true,
}
}
pub fn disable_cnt_zero(self) -> Self {
let mut ret = self;
ret.produce_cnt_zero = false;
ret
}
fn ingest_event_without_lst(&mut self, ev: EventSingle<EVT>) -> Result<(), Error> {
let selfname = "ingest_event_without_lst";
let b = &self.inner_a.inner_b;
if ev.ts >= b.active_end {
panic!("{selfname} should never get here");
} else {
trace_ingest_init_lst!("ingest_event_without_lst set lst {:?}", ev);
self.lst = Some(ev.clone());
if ev.ts >= b.active_beg {
trace_ingest_minmax!("ingest_event_without_lst");
self.inner_a.init_minmax(&ev);
let b = &mut self.inner_a.inner_b;
b.cnt += 1;
b.filled_until = ev.ts;
}
Ok(())
}
}
fn ingest_without_lst(&mut self, mut evs: ContainerEventsTakeUpTo<EVT>) -> Result<(), Error> {
let selfname = "ingest_without_lst";
if let Some(ev) = evs.pop_front() {
trace_event_next!("EVENT POP FRONT {:?} {:30}", ev, selfname);
if ev.ts >= self.inner_a.inner_b.active_end {
panic!("{selfname} should never get here");
} else {
self.ingest_event_without_lst(ev)?;
if let Some(lst) = self.lst.as_mut() {
self.inner_a.ingest_with_lst(evs, LstMut(lst))
} else {
Err(Error::NoLstAfterFirst)
}
}
} else {
Ok(())
}
}
// Caller asserts that evs is ordered within the current container
// and with respect to the last container, if any.
fn ingest_ordered(&mut self, evs: ContainerEventsTakeUpTo<EVT>) -> Result<(), Error> {
if let Some(lst) = self.lst.as_mut() {
self.inner_a.ingest_with_lst(evs, LstMut(lst))
} else {
if self.inner_a.minmax.is_some() {
Err(Error::NoLstButMinMax)
} else {
self.ingest_without_lst(evs)
}
}
}
fn cycle_01(&mut self, ts: TsNano) {
let b = &self.inner_a.inner_b;
trace_cycle!("cycle_01 {:?} {:?}", ts, b.active_end);
assert!(b.active_beg < ts);
assert!(b.active_beg <= b.filled_until);
assert!(b.filled_until < ts);
assert!(b.filled_until <= b.active_end);
let div = b.active_len.ns();
if let Some(lst) = self.lst.as_ref() {
let lst = LstRef(lst);
if self.produce_cnt_zero {
let mut i = 0;
loop {
i += 1;
assert!(i < 100000, "too many iterations");
let b = &self.inner_a.inner_b;
if ts > b.filled_until {
if ts >= b.active_end {
if b.filled_until < b.active_end {
self.inner_a.inner_b.fill_until(b.active_end, lst.clone());
}
self.inner_a.push_out_and_reset(lst.clone(), true, &mut self.out);
} else {
self.inner_a.inner_b.fill_until(ts, lst.clone());
}
} else {
break;
}
}
} else {
let b = &self.inner_a.inner_b;
if ts > b.filled_until {
if ts >= b.active_end {
if b.filled_until < b.active_end {
self.inner_a.inner_b.fill_until(b.active_end, lst.clone());
}
self.inner_a.push_out_and_reset(lst.clone(), true, &mut self.out);
} else {
// TODO should not hit this case. Prove it, assert it.
self.inner_a.inner_b.fill_until(ts, lst.clone());
}
} else {
// TODO should never hit this case. Count.
}
// TODO jump to next bin
// TODO merge with the other reset
// Below uses the same code
let ts1 = TsNano::from_ns(ts.ns() / div * div);
let b = &mut self.inner_a.inner_b;
b.active_beg = ts1;
b.active_end = ts1.add_dt_nano(b.active_len);
b.filled_until = ts1;
b.filled_width = DtNano::from_ns(0);
b.cnt = 0;
b.agg.reset_for_new_bin();
// assert!(self.inner_a.minmax.is_none());
trace_cycle!("cycled direct to {:?} {:?}", b.active_beg, b.active_end);
}
} else {
assert!(self.inner_a.minmax.is_none());
// TODO merge with the other reset
let ts1 = TsNano::from_ns(ts.ns() / div * div);
let b = &mut self.inner_a.inner_b;
b.active_beg = ts1;
b.active_end = ts1.add_dt_nano(b.active_len);
b.filled_until = ts1;
b.filled_width = DtNano::from_ns(0);
b.cnt = 0;
b.agg.reset_for_new_bin();
trace_cycle!("cycled direct to {:?} {:?}", b.active_beg, b.active_end);
}
}
fn cycle_02(&mut self) {
let b = &self.inner_a.inner_b;
trace_cycle!("cycle_02 {:?}", b.active_end);
if let Some(lst) = self.lst.as_ref() {
let lst = LstRef(lst);
self.inner_a.push_out_and_reset(lst, false, &mut self.out);
} else {
// there is nothing we can produce
}
}
pub fn ingest(&mut self, mut evs_all: ContainerEvents<EVT>) -> Result<(), Error> {
// It is this type's task to find and store the one-before event.
// We then pass it to the aggregation.
// AggregatorTimeWeight needs a function for that.
// What about counting the events that actually fall into the range?
// Maybe that should be done in this type.
// That way we can pass the values and weights to the aggregation, and count the in-range here.
// This type must also "close" the current aggregation by passing the "last" and init the next.
// ALSO: need to keep track of the "lst". Probably best done in this type as well?
// TODO should rely on external stream adapter for verification to not duplicate things.
evs_all.verify()?;
loop {
break if let Some(ts) = evs_all.ts_first() {
trace_ingest_event!("EVENT TIMESTAMP FRONT {:?} ingest", ts);
let b = &mut self.inner_a.inner_b;
if ts >= self.range.nano_end() {
return Err(Error::EventAfterRange);
}
if ts >= b.active_end {
assert!(b.filled_until < b.active_end, "{} < {}", b.filled_until, b.active_end);
self.cycle_01(ts);
}
let n1 = evs_all.len();
let len_before = evs_all.len_before(self.inner_a.inner_b.active_end);
let evs = ContainerEventsTakeUpTo::new(&mut evs_all, len_before);
if let Some(lst) = self.lst.as_ref() {
if ts < lst.ts {
return Err(Error::Unordered);
} else {
self.ingest_ordered(evs)?
}
} else {
self.ingest_ordered(evs)?
};
trace_ingest_container_2!("ingest after still left len evs {}", evs_all.len());
let n2 = evs_all.len();
if n2 != 0 {
if n2 == n1 {
panic!("no progress");
}
continue;
}
} else {
()
};
}
Ok(())
}
pub fn input_done_range_final(&mut self) -> Result<(), Error> {
trace_cycle!("input_done_range_final");
self.cycle_01(self.range.nano_end());
Ok(())
}
pub fn input_done_range_open(&mut self) -> Result<(), Error> {
trace_cycle!("input_done_range_open");
self.cycle_02();
Ok(())
}
pub fn output_len(&self) -> usize {
self.out.len()
}
pub fn output(&mut self) -> ContainerBins<EVT> {
mem::replace(&mut self.out, ContainerBins::new())
}
}

View File

@@ -1,276 +0,0 @@
use super::timeweight_events::BinnedEventsTimeweight;
use crate::binning::container_events::ContainerEvents;
use crate::binning::container_events::EventValueType;
use crate::channelevents::ChannelEvents;
use daqbuf_err as err;
use err::thiserror;
use err::ThisError;
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::streamitem::LogItem;
use items_0::streamitem::Sitemty;
use items_0::timebin::BinnedEventsTimeweightTrait;
use items_0::timebin::BinningggContainerBinsDyn;
use items_0::timebin::BinningggError;
use items_0::timebin::BinsBoxed;
use items_0::timebin::EventsBoxed;
use netpod::log::*;
use netpod::BinnedRange;
use netpod::TsNano;
use std::ops::ControlFlow;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
macro_rules! trace_input_container { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
macro_rules! trace_emit { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
#[derive(Debug, ThisError)]
#[cstm(name = "BinnedEventsTimeweightDyn")]
pub enum Error {
InnerDynMissing,
}
#[derive(Debug)]
pub struct BinnedEventsTimeweightDynbox<EVT>
where
EVT: EventValueType,
{
binner: BinnedEventsTimeweight<EVT>,
}
impl<EVT> BinnedEventsTimeweightDynbox<EVT>
where
EVT: EventValueType + 'static,
{
pub fn new(range: BinnedRange<TsNano>) -> Box<dyn BinnedEventsTimeweightTrait> {
let ret = Self {
binner: BinnedEventsTimeweight::new(range),
};
Box::new(ret)
}
}
impl<EVT> BinnedEventsTimeweightTrait for BinnedEventsTimeweightDynbox<EVT>
where
EVT: EventValueType,
{
fn ingest(&mut self, mut evs: EventsBoxed) -> Result<(), BinningggError> {
// let a = (&evs as &dyn any::Any).downcast_ref::<String>();
// evs.downcast::<String>();
// evs.as_anybox().downcast::<ContainerEvents<f64>>();
match evs.to_anybox().downcast::<ContainerEvents<EVT>>() {
Ok(evs) => {
let evs = {
let a = evs;
*a
};
Ok(self.binner.ingest(evs)?)
}
Err(_) => Err(BinningggError::TypeMismatch {
have: evs.type_name().into(),
expect: std::any::type_name::<ContainerEvents<EVT>>().into(),
}),
}
}
fn input_done_range_final(&mut self) -> Result<(), BinningggError> {
Ok(self.binner.input_done_range_final()?)
}
fn input_done_range_open(&mut self) -> Result<(), BinningggError> {
Ok(self.binner.input_done_range_open()?)
}
fn output(&mut self) -> Result<Option<BinsBoxed>, BinningggError> {
if self.binner.output_len() == 0 {
Ok(None)
} else {
let c = self.binner.output();
Ok(Some(Box::new(c)))
}
}
}
#[derive(Debug)]
pub struct BinnedEventsTimeweightLazy {
range: BinnedRange<TsNano>,
binned_events: Option<Box<dyn BinnedEventsTimeweightTrait>>,
}
impl BinnedEventsTimeweightLazy {
pub fn new(range: BinnedRange<TsNano>) -> Self {
Self {
range,
binned_events: None,
}
}
}
impl BinnedEventsTimeweightTrait for BinnedEventsTimeweightLazy {
fn ingest(&mut self, evs_all: EventsBoxed) -> Result<(), BinningggError> {
self.binned_events
.get_or_insert_with(|| evs_all.binned_events_timeweight_traitobj(self.range.clone()))
.ingest(evs_all)
}
fn input_done_range_final(&mut self) -> Result<(), BinningggError> {
self.binned_events
.as_mut()
.map(|x| x.input_done_range_final())
.unwrap_or_else(|| {
debug!("TODO something to do if we miss the binner here?");
Ok(())
})
}
fn input_done_range_open(&mut self) -> Result<(), BinningggError> {
self.binned_events
.as_mut()
.map(|x| x.input_done_range_open())
.unwrap_or(Ok(()))
}
fn output(&mut self) -> Result<Option<BinsBoxed>, BinningggError> {
self.binned_events.as_mut().map(|x| x.output()).unwrap_or(Ok(None))
}
}
enum StreamState {
Reading,
Done,
Invalid,
}
pub struct BinnedEventsTimeweightStream {
state: StreamState,
inp: Pin<Box<dyn Stream<Item = Sitemty<ChannelEvents>> + Send>>,
binned_events: BinnedEventsTimeweightLazy,
range_complete: bool,
}
impl BinnedEventsTimeweightStream {
pub fn new(range: BinnedRange<TsNano>, inp: Pin<Box<dyn Stream<Item = Sitemty<ChannelEvents>> + Send>>) -> Self {
Self {
state: StreamState::Reading,
inp,
binned_events: BinnedEventsTimeweightLazy::new(range),
range_complete: false,
}
}
fn handle_sitemty(
mut self: Pin<&mut Self>,
item: Sitemty<ChannelEvents>,
_cx: &mut Context,
) -> ControlFlow<Poll<Option<<Self as Stream>::Item>>> {
use items_0::streamitem::RangeCompletableItem::*;
use items_0::streamitem::StreamItem::*;
use ControlFlow::*;
use Poll::*;
match item {
Ok(x) => match x {
DataItem(x) => match x {
Data(x) => match x {
ChannelEvents::Events(evs) => match self.binned_events.ingest(evs.to_container_events()) {
Ok(()) => {
match self.binned_events.output() {
Ok(Some(x)) => {
if x.len() == 0 {
Continue(())
} else {
Break(Ready(Some(Ok(DataItem(Data(x))))))
}
}
Ok(None) => Continue(()),
Err(e) => Break(Ready(Some(Err(err::Error::from_string(e))))),
}
// Continue(())
}
Err(e) => Break(Ready(Some(Err(err::Error::from_string(e))))),
},
ChannelEvents::Status(_) => {
// TODO use the status
Continue(())
}
},
RangeComplete => {
self.range_complete = true;
Continue(())
}
},
Log(x) => Break(Ready(Some(Ok(Log(x))))),
Stats(x) => Break(Ready(Some(Ok(Stats(x))))),
},
Err(e) => {
self.state = StreamState::Done;
Break(Ready(Some(Err(e))))
}
}
}
fn handle_eos(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<<Self as Stream>::Item>> {
trace_input_container!("handle_eos");
use items_0::streamitem::RangeCompletableItem::*;
use items_0::streamitem::StreamItem::*;
use Poll::*;
self.state = StreamState::Done;
if self.range_complete {
self.binned_events
.input_done_range_final()
.map_err(err::Error::from_string)?;
} else {
self.binned_events
.input_done_range_open()
.map_err(err::Error::from_string)?;
}
match self.binned_events.output().map_err(err::Error::from_string)? {
Some(x) => {
trace_emit!("seeing ready bins {:?}", x);
Ready(Some(Ok(DataItem(Data(x)))))
}
None => {
let item = LogItem::from_node(888, Level::INFO, format!("no bins ready on eos"));
Ready(Some(Ok(Log(item))))
}
}
}
fn handle_main(mut self: Pin<&mut Self>, cx: &mut Context) -> ControlFlow<Poll<Option<<Self as Stream>::Item>>> {
use ControlFlow::*;
use Poll::*;
let ret = match &self.state {
StreamState::Reading => match self.as_mut().inp.poll_next_unpin(cx) {
Ready(Some(x)) => self.as_mut().handle_sitemty(x, cx),
Ready(None) => Break(self.as_mut().handle_eos(cx)),
Pending => Break(Pending),
},
StreamState::Done => {
self.state = StreamState::Invalid;
Break(Ready(None))
}
StreamState::Invalid => {
panic!("StreamState::Invalid")
}
};
if let Break(Ready(Some(Err(_)))) = ret {
self.state = StreamState::Done;
}
ret
}
}
impl Stream for BinnedEventsTimeweightStream {
type Item = Sitemty<Box<dyn BinningggContainerBinsDyn>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use ControlFlow::*;
loop {
break match self.as_mut().handle_main(cx) {
Break(x) => x,
Continue(()) => continue,
};
}
}
}

View File

@@ -1,85 +0,0 @@
use super::aggregator::AggregatorTimeWeight;
use super::container_events::Container;
use super::container_events::EventValueType;
use core::fmt;
use items_0::vecpreview::PreviewRange;
use netpod::DtNano;
use netpod::EnumVariant;
use serde::Deserialize;
use serde::Serialize;
use std::collections::VecDeque;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnumVariantContainer {
ixs: VecDeque<u16>,
names: VecDeque<String>,
}
impl PreviewRange for EnumVariantContainer {
fn preview<'a>(&'a self) -> Box<dyn fmt::Debug + 'a> {
let ret = items_0::vecpreview::PreviewCell {
a: self.ixs.front(),
b: self.ixs.back(),
};
Box::new(ret)
}
}
impl Container<EnumVariant> for EnumVariantContainer {
fn new() -> Self {
Self {
ixs: VecDeque::new(),
names: VecDeque::new(),
}
}
fn push_back(&mut self, val: EnumVariant) {
let (ix, name) = val.into_parts();
self.ixs.push_back(ix);
self.names.push_back(name);
}
fn pop_front(&mut self) -> Option<EnumVariant> {
if let (Some(a), Some(b)) = (self.ixs.pop_front(), self.names.pop_front()) {
Some(EnumVariant::new(a, b))
} else {
None
}
}
}
#[derive(Debug)]
pub struct EnumVariantAggregatorTimeWeight {
sum: f32,
}
impl AggregatorTimeWeight<EnumVariant> for EnumVariantAggregatorTimeWeight {
fn new() -> Self {
Self { sum: 0. }
}
fn ingest(&mut self, dt: DtNano, bl: DtNano, val: EnumVariant) {
let f = dt.ns() as f32 / bl.ns() as f32;
eprintln!("INGEST ENUM {} {:?}", f, val);
self.sum += f * val.ix() as f32;
}
fn reset_for_new_bin(&mut self) {
self.sum = 0.;
}
fn result_and_reset_for_new_bin(
&mut self,
filled_width_fraction: f32,
) -> <EnumVariant as EventValueType>::AggTimeWeightOutputAvg {
let ret = self.sum.clone();
self.sum = 0.;
ret / filled_width_fraction
}
}
impl EventValueType for EnumVariant {
type Container = EnumVariantContainer;
type AggregatorTimeWeight = EnumVariantAggregatorTimeWeight;
type AggTimeWeightOutputAvg = f32;
}

View File

@@ -1,905 +0,0 @@
use crate::ts_offs_from_abs;
use crate::ts_offs_from_abs_with_anchor;
use crate::IsoDateTime;
use daqbuf_err as err;
use err::Error;
use items_0::collect_s::CollectableDyn;
use items_0::collect_s::CollectableType;
use items_0::collect_s::CollectedDyn;
use items_0::collect_s::CollectorTy;
use items_0::collect_s::ToJsonResult;
use items_0::container::ByteEstimate;
use items_0::overlap::HasTimestampDeque;
use items_0::scalar_ops::AsPrimF32;
use items_0::scalar_ops::ScalarOps;
use items_0::timebin::TimeBinnableTy;
use items_0::timebin::TimeBinnerTy;
use items_0::timebin::TimeBins;
use items_0::vecpreview::VecPreview;
use items_0::AppendAllFrom;
use items_0::AppendEmptyBin;
use items_0::AsAnyMut;
use items_0::AsAnyRef;
use items_0::Empty;
use items_0::HasNonemptyFirstBin;
use items_0::Resettable;
use items_0::TypeName;
use items_0::WithLen;
use netpod::is_false;
use netpod::log::*;
use netpod::range::evrange::SeriesRange;
use netpod::timeunits::SEC;
use netpod::BinnedRange;
use netpod::BinnedRangeEnum;
use netpod::CmpZero;
use netpod::Dim0Kind;
use netpod::TsNano;
use serde::Deserialize;
use serde::Serialize;
use std::any;
use std::any::Any;
use std::collections::VecDeque;
use std::fmt;
use std::mem;
use std::ops::Range;
#[allow(unused)]
macro_rules! trace_ingest { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
// TODO make members private
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct BinsDim0<NTY> {
pub ts1s: VecDeque<u64>,
pub ts2s: VecDeque<u64>,
pub cnts: VecDeque<u64>,
pub mins: VecDeque<NTY>,
pub maxs: VecDeque<NTY>,
pub avgs: VecDeque<f32>,
pub lsts: VecDeque<NTY>,
pub dim0kind: Option<Dim0Kind>,
}
impl<STY> TypeName for BinsDim0<STY> {
fn type_name(&self) -> String {
any::type_name::<Self>().into()
}
}
impl<NTY> fmt::Debug for BinsDim0<NTY>
where
NTY: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let self_name = any::type_name::<Self>();
if true {
return fmt::Display::fmt(self, fmt);
}
if true {
write!(
fmt,
"{self_name} count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}",
self.ts1s.len(),
self.ts1s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
self.ts2s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
self.cnts,
self.mins,
self.maxs,
self.avgs,
)
} else {
write!(
fmt,
"{self_name} count {} edges {:?} .. {:?} counts {:?} .. {:?} avgs {:?} .. {:?}",
self.ts1s.len(),
self.ts1s.front().map(|k| k / SEC),
self.ts2s.back().map(|k| k / SEC),
self.cnts.front(),
self.cnts.back(),
self.avgs.front(),
self.avgs.back(),
)
}
}
}
impl<NTY> fmt::Display for BinsDim0<NTY>
where
NTY: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let self_name = any::type_name::<Self>();
write!(
fmt,
"{self_name} {{ len: {:?}, ts1s: {:?}, ts2s {:?}, counts {:?}, mins {:?}, maxs {:?}, avgs {:?}, lsts {:?} }}",
self.len(),
VecPreview::new(&self.ts1s),
VecPreview::new(&self.ts2s),
VecPreview::new(&self.cnts),
VecPreview::new(&self.mins),
VecPreview::new(&self.maxs),
VecPreview::new(&self.avgs),
VecPreview::new(&self.lsts),
)
}
}
impl<NTY: ScalarOps> BinsDim0<NTY> {
pub fn push(&mut self, ts1: u64, ts2: u64, count: u64, min: NTY, max: NTY, avg: f32, lst: NTY) {
if avg < min.as_prim_f32_b() || avg > max.as_prim_f32_b() {
// TODO rounding issues?
debug!("bad avg");
}
self.ts1s.push_back(ts1);
self.ts2s.push_back(ts2);
self.cnts.push_back(count);
self.mins.push_back(min);
self.maxs.push_back(max);
self.avgs.push_back(avg);
self.lsts.push_back(lst);
}
pub fn equal_slack(&self, other: &Self) -> bool {
if self.len() != other.len() {
return false;
}
for (&a, &b) in self.ts1s.iter().zip(other.ts1s.iter()) {
if a != b {
return false;
}
}
for (&a, &b) in self.ts2s.iter().zip(other.ts2s.iter()) {
if a != b {
return false;
}
}
for (a, b) in self.mins.iter().zip(other.mins.iter()) {
if !a.equal_slack(b) {
return false;
}
}
for (a, b) in self.maxs.iter().zip(other.maxs.iter()) {
if !a.equal_slack(b) {
return false;
}
}
for (a, b) in self.avgs.iter().zip(other.avgs.iter()) {
if !a.equal_slack(b) {
return false;
}
}
true
}
// TODO make this part of a new bins trait, similar like Events trait.
// TODO check for error?
pub fn drain_into(&mut self, dst: &mut Self, range: Range<usize>) -> () {
dst.ts1s.extend(self.ts1s.drain(range.clone()));
dst.ts2s.extend(self.ts2s.drain(range.clone()));
dst.cnts.extend(self.cnts.drain(range.clone()));
dst.mins.extend(self.mins.drain(range.clone()));
dst.maxs.extend(self.maxs.drain(range.clone()));
dst.avgs.extend(self.avgs.drain(range.clone()));
dst.lsts.extend(self.lsts.drain(range.clone()));
}
}
impl<NTY> AsAnyRef for BinsDim0<NTY>
where
NTY: ScalarOps,
{
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<STY> AsAnyMut for BinsDim0<STY>
where
STY: ScalarOps,
{
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<STY> Empty for BinsDim0<STY> {
fn empty() -> Self {
Self {
ts1s: VecDeque::new(),
ts2s: VecDeque::new(),
cnts: VecDeque::new(),
mins: VecDeque::new(),
maxs: VecDeque::new(),
avgs: VecDeque::new(),
lsts: VecDeque::new(),
dim0kind: None,
}
}
}
impl<STY> WithLen for BinsDim0<STY> {
fn len(&self) -> usize {
self.ts1s.len()
}
}
impl<STY: ScalarOps> ByteEstimate for BinsDim0<STY> {
fn byte_estimate(&self) -> u64 {
// TODO
// Should use a better estimate for waveform and string types,
// or keep some aggregated byte count on push.
let n = self.len();
if n == 0 {
0
} else {
// TODO use the actual size of one/some of the elements.
let i = n * 2 / 3;
let w1 = self.mins[i].byte_estimate();
let w2 = self.maxs[i].byte_estimate();
(n as u64 * (8 + 8 + 8 + 4 + w1 + w2)) as u64
}
}
}
impl<STY> Resettable for BinsDim0<STY> {
fn reset(&mut self) {
self.ts1s.clear();
self.ts2s.clear();
self.cnts.clear();
self.mins.clear();
self.maxs.clear();
self.avgs.clear();
self.lsts.clear();
}
}
impl<STY: ScalarOps> HasNonemptyFirstBin for BinsDim0<STY> {
fn has_nonempty_first_bin(&self) -> bool {
self.cnts.front().map_or(false, |x| *x > 0)
}
}
impl<STY: ScalarOps> HasTimestampDeque for BinsDim0<STY> {
fn timestamp_min(&self) -> Option<u64> {
self.ts1s.front().map(|x| *x)
}
fn timestamp_max(&self) -> Option<u64> {
self.ts2s.back().map(|x| *x)
}
fn pulse_min(&self) -> Option<u64> {
todo!()
}
fn pulse_max(&self) -> Option<u64> {
todo!()
}
}
impl<NTY: ScalarOps> AppendEmptyBin for BinsDim0<NTY> {
fn append_empty_bin(&mut self, ts1: u64, ts2: u64) {
debug!("AppendEmptyBin::append_empty_bin should not get used");
self.ts1s.push_back(ts1);
self.ts2s.push_back(ts2);
self.cnts.push_back(0);
self.mins.push_back(NTY::zero_b());
self.maxs.push_back(NTY::zero_b());
self.avgs.push_back(0.);
self.lsts.push_back(NTY::zero_b());
}
}
impl<NTY: ScalarOps> AppendAllFrom for BinsDim0<NTY> {
fn append_all_from(&mut self, src: &mut Self) {
debug!("AppendAllFrom::append_all_from should not get used");
self.ts1s.extend(src.ts1s.drain(..));
self.ts2s.extend(src.ts2s.drain(..));
self.cnts.extend(src.cnts.drain(..));
self.mins.extend(src.mins.drain(..));
self.maxs.extend(src.maxs.drain(..));
self.avgs.extend(src.avgs.drain(..));
self.lsts.extend(src.lsts.drain(..));
}
}
impl<NTY: ScalarOps> TimeBins for BinsDim0<NTY> {
fn ts_min(&self) -> Option<u64> {
self.ts1s.front().map(Clone::clone)
}
fn ts_max(&self) -> Option<u64> {
self.ts2s.back().map(Clone::clone)
}
fn ts_min_max(&self) -> Option<(u64, u64)> {
if let (Some(min), Some(max)) = (self.ts1s.front().map(Clone::clone), self.ts2s.back().map(Clone::clone)) {
Some((min, max))
} else {
None
}
}
}
#[derive(Debug)]
pub struct BinsDim0TimeBinnerTy<STY>
where
STY: ScalarOps,
{
ts1now: TsNano,
ts2now: TsNano,
binrange: BinnedRange<TsNano>,
do_time_weight: bool,
emit_empty_bins: bool,
range_complete: bool,
out: <Self as TimeBinnerTy>::Output,
cnt: u64,
min: STY,
max: STY,
avg: f64,
lst: STY,
filled_up_to: TsNano,
last_seen_avg: f32,
}
impl<STY> BinsDim0TimeBinnerTy<STY>
where
STY: ScalarOps,
{
pub fn type_name() -> &'static str {
any::type_name::<Self>()
}
pub fn new(binrange: BinnedRange<TsNano>, do_time_weight: bool, emit_empty_bins: bool) -> Self {
// let ts1now = TsNano::from_ns(binrange.bin_off * binrange.bin_len.ns());
// let ts2 = ts1.add_dt_nano(binrange.bin_len.to_dt_nano());
let ts1now = TsNano::from_ns(binrange.nano_beg().ns());
let ts2now = ts1now.add_dt_nano(binrange.bin_len.to_dt_nano());
Self {
ts1now,
ts2now,
binrange,
do_time_weight,
emit_empty_bins,
range_complete: false,
out: <Self as TimeBinnerTy>::Output::empty(),
cnt: 0,
min: STY::zero_b(),
max: STY::zero_b(),
avg: 0.,
lst: STY::zero_b(),
filled_up_to: ts1now,
last_seen_avg: 0.,
}
}
// used internally for the aggregation
fn reset_agg(&mut self) {
self.cnt = 0;
self.min = STY::zero_b();
self.max = STY::zero_b();
self.avg = 0.;
}
}
impl<STY> TimeBinnerTy for BinsDim0TimeBinnerTy<STY>
where
STY: ScalarOps,
{
type Input = BinsDim0<STY>;
type Output = BinsDim0<STY>;
fn ingest(&mut self, item: &mut Self::Input) {
trace_ingest!("<{} as TimeBinnerTy>::ingest {:?}", Self::type_name(), item);
let mut count_before = 0;
for ((((((&ts1, &ts2), &cnt), min), max), &avg), lst) in item
.ts1s
.iter()
.zip(&item.ts2s)
.zip(&item.cnts)
.zip(&item.mins)
.zip(&item.maxs)
.zip(&item.avgs)
.zip(&item.lsts)
{
if ts1 < self.ts1now.ns() {
if ts2 > self.ts1now.ns() {
error!("{} bad input grid mismatch", Self::type_name());
continue;
}
// warn!("encountered bin from time before {} {}", ts1, self.ts1now.ns());
trace_ingest!("{} input bin before {}", Self::type_name(), TsNano::from_ns(ts1));
self.min = min.clone();
self.max = max.clone();
self.lst = lst.clone();
count_before += 1;
continue;
} else {
if ts2 > self.ts2now.ns() {
if ts2 - ts1 > self.ts2now.ns() - self.ts1now.ns() {
panic!("incoming bin len too large");
} else if ts1 < self.ts2now.ns() {
panic!("encountered unaligned input bin");
} else {
let mut i = 0;
while ts1 >= self.ts2now.ns() {
self.cycle();
i += 1;
if i > 50000 {
panic!("cycle forward too many iterations");
}
}
}
} else {
// ok, we're still inside the current bin
}
}
if cnt == 0 {
// ignore input bin, it does not contain any valid information.
} else {
if self.cnt == 0 {
self.cnt = cnt;
self.min = min.clone();
self.max = max.clone();
if self.do_time_weight {
let f = (ts2 - ts1) as f64 / (self.ts2now.ns() - self.ts1now.ns()) as f64;
self.avg = avg as f64 * f;
} else {
panic!("TODO non-time-weighted binning to be impl");
}
} else {
self.cnt += cnt;
if *min < self.min {
self.min = min.clone();
}
if *max > self.max {
self.max = max.clone();
}
if self.do_time_weight {
let f = (ts2 - ts1) as f64 / (self.ts2now.ns() - self.ts1now.ns()) as f64;
self.avg += avg as f64 * f;
} else {
panic!("TODO non-time-weighted binning to be impl");
}
}
self.filled_up_to = TsNano::from_ns(ts2);
self.last_seen_avg = avg;
}
}
if count_before != 0 {
warn!(
"----- seen {} / {} input bins from time before",
count_before,
item.len()
);
}
}
fn set_range_complete(&mut self) {
self.range_complete = true;
}
fn bins_ready_count(&self) -> usize {
self.out.len()
}
fn bins_ready(&mut self) -> Option<Self::Output> {
if self.out.len() != 0 {
let ret = core::mem::replace(&mut self.out, BinsDim0::empty());
Some(ret)
} else {
None
}
}
fn push_in_progress(&mut self, push_empty: bool) {
if self.filled_up_to != self.ts2now {
if self.cnt != 0 {
info!("push_in_progress partially filled bin");
if self.do_time_weight {
let f = (self.ts2now.ns() - self.filled_up_to.ns()) as f64
/ (self.ts2now.ns() - self.ts1now.ns()) as f64;
self.avg += self.lst.as_prim_f32_b() as f64 * f;
self.filled_up_to = self.ts2now;
} else {
panic!("TODO non-time-weighted binning to be impl");
}
} else {
if self.filled_up_to != self.ts1now {
error!("partially filled bin with cnt 0");
}
}
}
if self.cnt == 0 && !push_empty {
self.reset_agg();
} else {
let min = self.min.clone();
let max = self.max.clone();
let avg = self.avg as f32;
if avg < min.as_prim_f32_b() || avg > max.as_prim_f32_b() {
// TODO rounding issues?
debug!("bad avg");
}
self.out.ts1s.push_back(self.ts1now.ns());
self.out.ts2s.push_back(self.ts2now.ns());
self.out.cnts.push_back(self.cnt);
self.out.mins.push_back(min);
self.out.maxs.push_back(max);
self.out.avgs.push_back(avg);
self.out.lsts.push_back(self.lst.clone());
self.reset_agg();
}
}
fn cycle(&mut self) {
self.push_in_progress(true);
self.ts1now = self.ts1now.add_dt_nano(self.binrange.bin_len.to_dt_nano());
self.ts2now = self.ts2now.add_dt_nano(self.binrange.bin_len.to_dt_nano());
}
fn empty(&self) -> Option<Self::Output> {
Some(<Self as TimeBinnerTy>::Output::empty())
}
fn append_empty_until_end(&mut self) {
let mut i = 0;
while self.ts2now.ns() < self.binrange.full_range().end() {
self.cycle();
i += 1;
if i > 100000 {
panic!("append_empty_until_end too many iterations");
}
}
}
}
impl<STY: ScalarOps> TimeBinnableTy for BinsDim0<STY> {
type TimeBinner = BinsDim0TimeBinnerTy<STY>;
fn time_binner_new(
&self,
binrange: BinnedRangeEnum,
do_time_weight: bool,
emit_empty_bins: bool,
) -> Self::TimeBinner {
match binrange {
BinnedRangeEnum::Time(binrange) => BinsDim0TimeBinnerTy::new(binrange, do_time_weight, emit_empty_bins),
BinnedRangeEnum::Pulse(_) => todo!("TimeBinnableTy for BinsDim0 Pulse"),
}
}
}
// TODO rename to BinsDim0CollectorOutput
#[derive(Debug, Serialize, Deserialize)]
pub struct BinsDim0CollectedResult<NTY> {
#[serde(rename = "tsAnchor")]
ts_anchor_sec: u64,
#[serde(rename = "ts1Ms")]
ts1_off_ms: VecDeque<u64>,
#[serde(rename = "ts2Ms")]
ts2_off_ms: VecDeque<u64>,
#[serde(rename = "ts1Ns")]
ts1_off_ns: VecDeque<u64>,
#[serde(rename = "ts2Ns")]
ts2_off_ns: VecDeque<u64>,
#[serde(rename = "counts")]
counts: VecDeque<u64>,
#[serde(rename = "mins")]
mins: VecDeque<NTY>,
#[serde(rename = "maxs")]
maxs: VecDeque<NTY>,
#[serde(rename = "avgs")]
avgs: VecDeque<f32>,
#[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
range_final: bool,
#[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
timed_out: bool,
#[serde(rename = "missingBins", default, skip_serializing_if = "CmpZero::is_zero")]
missing_bins: u32,
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
continue_at: Option<IsoDateTime>,
#[serde(rename = "finishedAt", default, skip_serializing_if = "Option::is_none")]
finished_at: Option<IsoDateTime>,
}
// TODO temporary fix for the enum output
impl<STY> BinsDim0CollectedResult<STY>
where
STY: ScalarOps,
{
pub fn boxed_collected_with_enum_fix(&self) -> Box<dyn CollectedDyn> {
if let Some(bins) = self
.as_any_ref()
.downcast_ref::<BinsDim0CollectedResult<netpod::EnumVariant>>()
{
debug!("boxed_collected_with_enum_fix");
let mins = self.mins.iter().map(|x| 6).collect();
let maxs = self.mins.iter().map(|x| 7).collect();
let bins = BinsDim0CollectedResult::<u16> {
ts_anchor_sec: self.ts_anchor_sec.clone(),
ts1_off_ms: self.ts1_off_ms.clone(),
ts2_off_ms: self.ts2_off_ms.clone(),
ts1_off_ns: self.ts1_off_ns.clone(),
ts2_off_ns: self.ts2_off_ns.clone(),
counts: self.counts.clone(),
mins,
maxs,
avgs: self.avgs.clone(),
range_final: self.range_final.clone(),
timed_out: self.timed_out.clone(),
missing_bins: self.missing_bins.clone(),
continue_at: self.continue_at.clone(),
finished_at: self.finished_at.clone(),
};
Box::new(bins)
} else {
let bins = Self {
ts_anchor_sec: self.ts_anchor_sec.clone(),
ts1_off_ms: self.ts1_off_ms.clone(),
ts2_off_ms: self.ts2_off_ms.clone(),
ts1_off_ns: self.ts1_off_ns.clone(),
ts2_off_ns: self.ts2_off_ns.clone(),
counts: self.counts.clone(),
mins: self.mins.clone(),
maxs: self.maxs.clone(),
avgs: self.avgs.clone(),
range_final: self.range_final.clone(),
timed_out: self.timed_out.clone(),
missing_bins: self.missing_bins.clone(),
continue_at: self.continue_at.clone(),
finished_at: self.finished_at.clone(),
};
Box::new(bins)
}
}
}
impl<NTY> AsAnyRef for BinsDim0CollectedResult<NTY>
where
NTY: 'static,
{
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<NTY> AsAnyMut for BinsDim0CollectedResult<NTY>
where
NTY: 'static,
{
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<STY> TypeName for BinsDim0CollectedResult<STY> {
fn type_name(&self) -> String {
any::type_name::<Self>().into()
}
}
impl<NTY: ScalarOps> WithLen for BinsDim0CollectedResult<NTY> {
fn len(&self) -> usize {
self.mins.len()
}
}
impl<NTY: ScalarOps> CollectedDyn for BinsDim0CollectedResult<NTY> {}
impl<NTY> BinsDim0CollectedResult<NTY> {
pub fn ts_anchor_sec(&self) -> u64 {
self.ts_anchor_sec
}
pub fn ts1_off_ms(&self) -> &VecDeque<u64> {
&self.ts1_off_ms
}
pub fn ts2_off_ms(&self) -> &VecDeque<u64> {
&self.ts2_off_ms
}
pub fn counts(&self) -> &VecDeque<u64> {
&self.counts
}
pub fn range_final(&self) -> bool {
self.range_final
}
pub fn timed_out(&self) -> bool {
self.timed_out
}
pub fn missing_bins(&self) -> u32 {
self.missing_bins
}
pub fn continue_at(&self) -> Option<IsoDateTime> {
self.continue_at.clone()
}
pub fn mins(&self) -> &VecDeque<NTY> {
&self.mins
}
pub fn maxs(&self) -> &VecDeque<NTY> {
&self.maxs
}
pub fn avgs(&self) -> &VecDeque<f32> {
&self.avgs
}
}
impl<NTY: ScalarOps> ToJsonResult for BinsDim0CollectedResult<NTY> {
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
serde_json::to_value(self)
}
}
#[derive(Debug)]
pub struct BinsDim0Collector<NTY> {
vals: Option<BinsDim0<NTY>>,
timed_out: bool,
range_final: bool,
}
impl<NTY> BinsDim0Collector<NTY> {
pub fn self_name() -> &'static str {
any::type_name::<Self>()
}
pub fn new() -> Self {
Self {
timed_out: false,
range_final: false,
vals: None,
}
}
}
impl<NTY> WithLen for BinsDim0Collector<NTY> {
fn len(&self) -> usize {
self.vals.as_ref().map_or(0, WithLen::len)
}
}
impl<STY: ScalarOps> ByteEstimate for BinsDim0Collector<STY> {
fn byte_estimate(&self) -> u64 {
self.vals.as_ref().map_or(0, ByteEstimate::byte_estimate)
}
}
impl<NTY: ScalarOps> CollectorTy for BinsDim0Collector<NTY> {
type Input = BinsDim0<NTY>;
type Output = BinsDim0CollectedResult<NTY>;
fn ingest(&mut self, src: &mut Self::Input) {
if self.vals.is_none() {
self.vals = Some(Self::Input::empty());
}
let vals = self.vals.as_mut().unwrap();
vals.ts1s.append(&mut src.ts1s);
vals.ts2s.append(&mut src.ts2s);
vals.cnts.append(&mut src.cnts);
vals.mins.append(&mut src.mins);
vals.maxs.append(&mut src.maxs);
vals.avgs.append(&mut src.avgs);
vals.lsts.append(&mut src.lsts);
}
fn set_range_complete(&mut self) {
self.range_final = true;
}
fn set_timed_out(&mut self) {
self.timed_out = true;
}
fn set_continue_at_here(&mut self) {
debug!("{}::set_continue_at_here", Self::self_name());
// TODO for bins, do nothing: either we have all bins or not.
}
fn result(
&mut self,
_range: Option<SeriesRange>,
binrange: Option<BinnedRangeEnum>,
) -> Result<Self::Output, Error> {
trace!("trying to make a result from {self:?}");
let bin_count_exp = if let Some(r) = &binrange {
r.bin_count() as u32
} else {
debug!("no binrange given");
0
};
let mut vals = if let Some(x) = self.vals.take() {
x
} else {
return Err(Error::with_msg_no_trace("BinsDim0Collector without vals"));
};
let bin_count = vals.ts1s.len() as u32;
debug!(
"result make missing bins bin_count_exp {} bin_count {}",
bin_count_exp, bin_count
);
let (missing_bins, continue_at, finished_at) = if bin_count < bin_count_exp {
match vals.ts2s.back() {
Some(&k) => {
let missing_bins = bin_count_exp - bin_count;
let continue_at = IsoDateTime::from_ns_u64(k);
let u = k + (k - vals.ts1s.back().unwrap()) * missing_bins as u64;
let finished_at = IsoDateTime::from_ns_u64(u);
(missing_bins, Some(continue_at), Some(finished_at))
}
None => {
warn!("can not determine continue-at parameters");
(0, None, None)
}
}
} else {
(0, None, None)
};
if vals.ts1s.as_slices().1.len() != 0 {
warn!("ts1s non-contiguous");
}
if vals.ts2s.as_slices().1.len() != 0 {
warn!("ts2s non-contiguous");
}
let ts1s = vals.ts1s.make_contiguous();
let ts2s = vals.ts2s.make_contiguous();
let (ts_anch, ts1ms, ts1ns) = ts_offs_from_abs(ts1s);
let (ts2ms, ts2ns) = ts_offs_from_abs_with_anchor(ts_anch, ts2s);
let counts = vals.cnts;
let mins = vals.mins;
let maxs = vals.maxs;
let avgs = vals.avgs;
let ret = BinsDim0CollectedResult::<NTY> {
ts_anchor_sec: ts_anch,
ts1_off_ms: ts1ms,
ts1_off_ns: ts1ns,
ts2_off_ms: ts2ms,
ts2_off_ns: ts2ns,
counts,
mins,
maxs,
avgs,
range_final: self.range_final,
timed_out: self.timed_out,
missing_bins,
continue_at,
finished_at,
};
*self = Self::new();
Ok(ret)
}
}
impl<NTY: ScalarOps> CollectableType for BinsDim0<NTY> {
type Collector = BinsDim0Collector<NTY>;
fn new_collector() -> Self::Collector {
Self::Collector::new()
}
}
#[derive(Debug)]
pub struct BinsDim0Aggregator<NTY> {
range: SeriesRange,
cnt: u64,
minmaxlst: Option<(NTY, NTY, NTY)>,
sumc: u64,
sum: f32,
}
impl<NTY: ScalarOps> BinsDim0Aggregator<NTY> {
pub fn new(range: SeriesRange, _do_time_weight: bool) -> Self {
Self {
range,
cnt: 0,
minmaxlst: None,
sumc: 0,
sum: 0f32,
}
}
}

View File

@@ -1,523 +0,0 @@
use crate::ts_offs_from_abs;
use crate::ts_offs_from_abs_with_anchor;
use crate::IsoDateTime;
use daqbuf_err as err;
use err::Error;
use items_0::collect_s::CollectableDyn;
use items_0::collect_s::CollectableType;
use items_0::collect_s::CollectedDyn;
use items_0::collect_s::CollectorTy;
use items_0::collect_s::ToJsonResult;
use items_0::container::ByteEstimate;
use items_0::scalar_ops::AsPrimF32;
use items_0::scalar_ops::ScalarOps;
use items_0::timebin::TimeBins;
use items_0::AppendEmptyBin;
use items_0::AsAnyMut;
use items_0::AsAnyRef;
use items_0::Empty;
use items_0::Resettable;
use items_0::TypeName;
use items_0::WithLen;
use netpod::is_false;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::range::evrange::SeriesRange;
use netpod::timeunits::SEC;
use netpod::BinnedRangeEnum;
use netpod::CmpZero;
use netpod::Dim0Kind;
use serde::Deserialize;
use serde::Serialize;
use std::any;
use std::any::Any;
use std::collections::VecDeque;
use std::fmt;
use std::mem;
use std::ops::Range;
#[allow(unused)]
macro_rules! trace4 {
($($arg:tt)*) => ();
($($arg:tt)*) => (eprintln!($($arg)*));
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct BinsXbinDim0<NTY> {
ts1s: VecDeque<u64>,
ts2s: VecDeque<u64>,
counts: VecDeque<u64>,
mins: VecDeque<NTY>,
maxs: VecDeque<NTY>,
avgs: VecDeque<f32>,
// TODO could consider more variables:
// ts min/max, pulse min/max, avg of mins, avg of maxs, variances, etc...
dim0kind: Option<Dim0Kind>,
}
impl<STY> TypeName for BinsXbinDim0<STY> {
fn type_name(&self) -> String {
any::type_name::<Self>().into()
}
}
impl<NTY> fmt::Debug for BinsXbinDim0<NTY>
where
NTY: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let self_name = any::type_name::<Self>();
write!(
fmt,
"{self_name} count {} ts1s {:?} ts2s {:?} counts {:?} mins {:?} maxs {:?} avgs {:?}",
self.ts1s.len(),
self.ts1s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
self.ts2s.iter().map(|k| k / SEC).collect::<Vec<_>>(),
self.counts,
self.mins,
self.maxs,
self.avgs,
)
}
}
impl<NTY: ScalarOps> BinsXbinDim0<NTY> {
pub fn from_content(
ts1s: VecDeque<u64>,
ts2s: VecDeque<u64>,
counts: VecDeque<u64>,
mins: VecDeque<NTY>,
maxs: VecDeque<NTY>,
avgs: VecDeque<f32>,
) -> Self {
Self {
ts1s,
ts2s,
counts,
mins,
maxs,
avgs,
dim0kind: None,
}
}
pub fn counts(&self) -> &VecDeque<u64> {
&self.counts
}
pub fn push(&mut self, ts1: u64, ts2: u64, count: u64, min: NTY, max: NTY, avg: f32) {
self.ts1s.push_back(ts1);
self.ts2s.push_back(ts2);
self.counts.push_back(count);
self.mins.push_back(min);
self.maxs.push_back(max);
self.avgs.push_back(avg);
}
pub fn append_zero(&mut self, beg: u64, end: u64) {
self.ts1s.push_back(beg);
self.ts2s.push_back(end);
self.counts.push_back(0);
self.mins.push_back(NTY::zero_b());
self.maxs.push_back(NTY::zero_b());
self.avgs.push_back(0.);
}
pub fn append_all_from(&mut self, src: &mut Self) {
self.ts1s.extend(src.ts1s.drain(..));
self.ts2s.extend(src.ts2s.drain(..));
self.counts.extend(src.counts.drain(..));
self.mins.extend(src.mins.drain(..));
self.maxs.extend(src.maxs.drain(..));
self.avgs.extend(src.avgs.drain(..));
}
pub fn equal_slack(&self, other: &Self) -> bool {
for (&a, &b) in self.ts1s.iter().zip(other.ts1s.iter()) {
if a != b {
return false;
}
}
for (&a, &b) in self.ts2s.iter().zip(other.ts2s.iter()) {
if a != b {
return false;
}
}
for (a, b) in self.mins.iter().zip(other.mins.iter()) {
if !a.equal_slack(b) {
return false;
}
}
for (a, b) in self.maxs.iter().zip(other.maxs.iter()) {
if !a.equal_slack(b) {
return false;
}
}
for (a, b) in self.avgs.iter().zip(other.avgs.iter()) {
if !a.equal_slack(b) {
return false;
}
}
true
}
}
impl<NTY> AsAnyRef for BinsXbinDim0<NTY>
where
NTY: ScalarOps,
{
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<STY> AsAnyMut for BinsXbinDim0<STY>
where
STY: ScalarOps,
{
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<STY> Empty for BinsXbinDim0<STY> {
fn empty() -> Self {
Self {
ts1s: VecDeque::new(),
ts2s: VecDeque::new(),
counts: VecDeque::new(),
mins: VecDeque::new(),
maxs: VecDeque::new(),
avgs: VecDeque::new(),
dim0kind: None,
}
}
}
impl<STY> WithLen for BinsXbinDim0<STY> {
fn len(&self) -> usize {
self.ts1s.len()
}
}
impl<STY: ScalarOps> ByteEstimate for BinsXbinDim0<STY> {
fn byte_estimate(&self) -> u64 {
// TODO
// Should use a better estimate for waveform and string types,
// or keep some aggregated byte count on push.
let n = self.len();
if n == 0 {
0
} else {
// TODO use the actual size of one/some of the elements.
let i = n * 2 / 3;
let w1 = self.mins[i].byte_estimate();
let w2 = self.maxs[i].byte_estimate();
(n as u64 * (8 + 8 + 8 + 4 + w1 + w2)) as u64
}
}
}
impl<STY> Resettable for BinsXbinDim0<STY> {
fn reset(&mut self) {
self.ts1s.clear();
self.ts2s.clear();
self.counts.clear();
self.mins.clear();
self.maxs.clear();
self.avgs.clear();
}
}
impl<NTY: ScalarOps> AppendEmptyBin for BinsXbinDim0<NTY> {
fn append_empty_bin(&mut self, ts1: u64, ts2: u64) {
self.ts1s.push_back(ts1);
self.ts2s.push_back(ts2);
self.counts.push_back(0);
self.mins.push_back(NTY::zero_b());
self.maxs.push_back(NTY::zero_b());
self.avgs.push_back(0.);
}
}
impl<NTY: ScalarOps> TimeBins for BinsXbinDim0<NTY> {
fn ts_min(&self) -> Option<u64> {
self.ts1s.front().map(Clone::clone)
}
fn ts_max(&self) -> Option<u64> {
self.ts2s.back().map(Clone::clone)
}
fn ts_min_max(&self) -> Option<(u64, u64)> {
if let (Some(min), Some(max)) = (self.ts1s.front().map(Clone::clone), self.ts2s.back().map(Clone::clone)) {
Some((min, max))
} else {
None
}
}
}
// TODO rename to BinsDim0CollectorOutput
#[derive(Debug, Serialize, Deserialize)]
pub struct BinsXbinDim0CollectedResult<NTY> {
#[serde(rename = "tsAnchor")]
ts_anchor_sec: u64,
#[serde(rename = "ts1Ms")]
ts1_off_ms: VecDeque<u64>,
#[serde(rename = "ts2Ms")]
ts2_off_ms: VecDeque<u64>,
#[serde(rename = "ts1Ns")]
ts1_off_ns: VecDeque<u64>,
#[serde(rename = "ts2Ns")]
ts2_off_ns: VecDeque<u64>,
#[serde(rename = "counts")]
counts: VecDeque<u64>,
#[serde(rename = "mins")]
mins: VecDeque<NTY>,
#[serde(rename = "maxs")]
maxs: VecDeque<NTY>,
#[serde(rename = "avgs")]
avgs: VecDeque<f32>,
#[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
range_final: bool,
#[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
timed_out: bool,
#[serde(rename = "missingBins", default, skip_serializing_if = "CmpZero::is_zero")]
missing_bins: u32,
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
continue_at: Option<IsoDateTime>,
#[serde(rename = "finishedAt", default, skip_serializing_if = "Option::is_none")]
finished_at: Option<IsoDateTime>,
}
impl<NTY> AsAnyRef for BinsXbinDim0CollectedResult<NTY>
where
NTY: ScalarOps,
{
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<NTY> AsAnyMut for BinsXbinDim0CollectedResult<NTY>
where
NTY: ScalarOps,
{
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<STY> TypeName for BinsXbinDim0CollectedResult<STY> {
fn type_name(&self) -> String {
any::type_name::<Self>().into()
}
}
impl<NTY: ScalarOps> WithLen for BinsXbinDim0CollectedResult<NTY> {
fn len(&self) -> usize {
self.mins.len()
}
}
impl<NTY: ScalarOps> CollectedDyn for BinsXbinDim0CollectedResult<NTY> {}
impl<NTY> BinsXbinDim0CollectedResult<NTY> {
pub fn ts_anchor_sec(&self) -> u64 {
self.ts_anchor_sec
}
pub fn ts1_off_ms(&self) -> &VecDeque<u64> {
&self.ts1_off_ms
}
pub fn ts2_off_ms(&self) -> &VecDeque<u64> {
&self.ts2_off_ms
}
pub fn counts(&self) -> &VecDeque<u64> {
&self.counts
}
pub fn range_final(&self) -> bool {
self.range_final
}
pub fn missing_bins(&self) -> u32 {
self.missing_bins
}
pub fn continue_at(&self) -> Option<IsoDateTime> {
self.continue_at.clone()
}
pub fn mins(&self) -> &VecDeque<NTY> {
&self.mins
}
pub fn maxs(&self) -> &VecDeque<NTY> {
&self.maxs
}
}
impl<NTY: ScalarOps> ToJsonResult for BinsXbinDim0CollectedResult<NTY> {
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
serde_json::to_value(self)
}
}
#[derive(Debug)]
pub struct BinsXbinDim0Collector<NTY> {
vals: BinsXbinDim0<NTY>,
timed_out: bool,
range_final: bool,
}
impl<NTY> BinsXbinDim0Collector<NTY> {
pub fn self_name() -> &'static str {
any::type_name::<Self>()
}
pub fn new() -> Self {
Self {
vals: BinsXbinDim0::empty(),
timed_out: false,
range_final: false,
}
}
}
impl<NTY> WithLen for BinsXbinDim0Collector<NTY> {
fn len(&self) -> usize {
self.vals.len()
}
}
impl<STY: ScalarOps> ByteEstimate for BinsXbinDim0Collector<STY> {
fn byte_estimate(&self) -> u64 {
self.vals.byte_estimate()
}
}
impl<NTY: ScalarOps> CollectorTy for BinsXbinDim0Collector<NTY> {
type Input = BinsXbinDim0<NTY>;
type Output = BinsXbinDim0CollectedResult<NTY>;
fn ingest(&mut self, src: &mut Self::Input) {
trace!("\n\n----------- BinsXbinDim0Collector ingest\n{:?}\n\n", src);
// TODO could be optimized by non-contiguous container.
self.vals.ts1s.append(&mut src.ts1s);
self.vals.ts2s.append(&mut src.ts2s);
self.vals.counts.append(&mut src.counts);
self.vals.mins.append(&mut src.mins);
self.vals.maxs.append(&mut src.maxs);
self.vals.avgs.append(&mut src.avgs);
}
fn set_range_complete(&mut self) {
self.range_final = true;
}
fn set_timed_out(&mut self) {
self.timed_out = true;
}
fn set_continue_at_here(&mut self) {
debug!("{}::set_continue_at_here", Self::self_name());
// TODO for bins, do nothing: either we have all bins or not.
}
fn result(
&mut self,
_range: std::option::Option<SeriesRange>,
binrange: Option<BinnedRangeEnum>,
) -> Result<Self::Output, Error> {
let bin_count_exp = if let Some(r) = &binrange {
r.bin_count() as u32
} else {
0
};
let bin_count = self.vals.ts1s.len() as u32;
let (missing_bins, continue_at, finished_at) = if bin_count < bin_count_exp {
match self.vals.ts2s.back() {
Some(&k) => {
let missing_bins = bin_count_exp - bin_count;
let continue_at = IsoDateTime::from_ns_u64(k);
let u = k + (k - self.vals.ts1s.back().unwrap()) * missing_bins as u64;
let finished_at = IsoDateTime::from_ns_u64(u);
(missing_bins, Some(continue_at), Some(finished_at))
}
None => {
warn!("can not determine continue-at parameters");
(0, None, None)
}
}
} else {
(0, None, None)
};
if self.vals.ts1s.as_slices().1.len() != 0 {
panic!();
}
if self.vals.ts2s.as_slices().1.len() != 0 {
panic!();
}
let tst1 = ts_offs_from_abs(self.vals.ts1s.as_slices().0);
let tst2 = ts_offs_from_abs_with_anchor(tst1.0, self.vals.ts2s.as_slices().0);
let counts = mem::replace(&mut self.vals.counts, VecDeque::new());
let mins = mem::replace(&mut self.vals.mins, VecDeque::new());
let maxs = mem::replace(&mut self.vals.maxs, VecDeque::new());
let avgs = mem::replace(&mut self.vals.avgs, VecDeque::new());
let ret = BinsXbinDim0CollectedResult::<NTY> {
ts_anchor_sec: tst1.0,
ts1_off_ms: tst1.1,
ts1_off_ns: tst1.2,
ts2_off_ms: tst2.0,
ts2_off_ns: tst2.1,
counts,
mins,
maxs,
avgs,
range_final: self.range_final,
timed_out: self.timed_out,
missing_bins,
continue_at,
finished_at,
};
Ok(ret)
}
}
impl<NTY: ScalarOps> CollectableType for BinsXbinDim0<NTY> {
type Collector = BinsXbinDim0Collector<NTY>;
fn new_collector() -> Self::Collector {
Self::Collector::new()
}
}
#[derive(Debug)]
pub struct BinsXbinDim0Aggregator<NTY> {
range: SeriesRange,
count: u64,
min: NTY,
max: NTY,
// Carry over to next bin:
avg: f32,
sumc: u64,
sum: f32,
}
impl<NTY: ScalarOps> BinsXbinDim0Aggregator<NTY> {
pub fn new(range: SeriesRange, _do_time_weight: bool) -> Self {
Self {
range,
count: 0,
min: NTY::zero_b(),
max: NTY::zero_b(),
avg: 0.,
sumc: 0,
sum: 0f32,
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,58 +0,0 @@
use crate::eventsdim0::EventsDim0;
use crate::eventsdim1::EventsDim1;
use crate::Error;
use daqbuf_err as err;
use items_0::Empty;
use items_0::Events;
use netpod::log::*;
use netpod::EnumVariant;
use netpod::ScalarType;
use netpod::Shape;
pub fn empty_events_dyn_ev(scalar_type: &ScalarType, shape: &Shape) -> Result<Box<dyn Events>, Error> {
let ret: Box<dyn Events> = match shape {
Shape::Scalar => {
use ScalarType::*;
type K<T> = EventsDim0<T>;
match scalar_type {
U8 => Box::new(K::<u8>::empty()),
U16 => Box::new(K::<u16>::empty()),
U32 => Box::new(K::<u32>::empty()),
U64 => Box::new(K::<u64>::empty()),
I8 => Box::new(K::<i8>::empty()),
I16 => Box::new(K::<i16>::empty()),
I32 => Box::new(K::<i32>::empty()),
I64 => Box::new(K::<i64>::empty()),
F32 => Box::new(K::<f32>::empty()),
F64 => Box::new(K::<f64>::empty()),
BOOL => Box::new(K::<bool>::empty()),
STRING => Box::new(K::<String>::empty()),
Enum => Box::new(K::<EnumVariant>::empty()),
}
}
Shape::Wave(..) => {
use ScalarType::*;
type K<T> = EventsDim1<T>;
match scalar_type {
U8 => Box::new(K::<u8>::empty()),
U16 => Box::new(K::<u16>::empty()),
U32 => Box::new(K::<u32>::empty()),
U64 => Box::new(K::<u64>::empty()),
I8 => Box::new(K::<i8>::empty()),
I16 => Box::new(K::<i16>::empty()),
I32 => Box::new(K::<i32>::empty()),
I64 => Box::new(K::<i64>::empty()),
F32 => Box::new(K::<f32>::empty()),
F64 => Box::new(K::<f64>::empty()),
BOOL => Box::new(K::<bool>::empty()),
STRING => Box::new(K::<String>::empty()),
Enum => Box::new(K::<EnumVariant>::empty()),
}
}
Shape::Image(..) => {
error!("TODO empty_events_dyn_ev {scalar_type:?} {shape:?}");
err::todoval()
}
};
Ok(ret)
}

View File

@@ -1,431 +0,0 @@
use crate::framable::FrameType;
use crate::merger::Mergeable;
use bytes::BytesMut;
use daqbuf_err as err;
use err::thiserror;
use err::ThisError;
use items_0::container::ByteEstimate;
use items_0::framable::FrameTypeInnerStatic;
use items_0::streamitem::EVENT_FULL_FRAME_TYPE_ID;
use items_0::Empty;
use items_0::MergeError;
use items_0::WithLen;
#[allow(unused)]
use netpod::log::*;
use netpod::ScalarType;
use netpod::Shape;
use parse::channelconfig::CompressionMethod;
use serde::Deserialize;
use serde::Deserializer;
use serde::Serialize;
use serde::Serializer;
use std::borrow::Cow;
use std::collections::VecDeque;
use std::time::Instant;
#[allow(unused)]
macro_rules! trace2 {
($($arg:tt)*) => {};
($($arg:tt)*) => { trace!($($arg)*) };
}
#[derive(Debug, Serialize, Deserialize)]
pub struct EventFull {
pub tss: VecDeque<u64>,
pub pulses: VecDeque<u64>,
pub blobs: VecDeque<Vec<u8>>,
//#[serde(with = "decomps_serde")]
pub scalar_types: VecDeque<ScalarType>,
pub be: VecDeque<bool>,
pub shapes: VecDeque<Shape>,
pub comps: VecDeque<Option<CompressionMethod>>,
pub entry_payload_max: u64,
}
#[allow(unused)]
mod decomps_serde {
use super::*;
pub fn serialize<S>(t: &VecDeque<Option<BytesMut>>, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let a: Vec<_> = t
.iter()
.map(|k| match k {
None => None,
Some(j) => Some(j[..].to_vec()),
})
.collect();
Serialize::serialize(&a, s)
}
pub fn deserialize<'de, D>(d: D) -> Result<VecDeque<Option<BytesMut>>, D::Error>
where
D: Deserializer<'de>,
{
let a: Vec<Option<Vec<u8>>> = Deserialize::deserialize(d)?;
let a = a
.iter()
.map(|k| match k {
None => None,
Some(j) => {
let mut a = BytesMut::new();
a.extend_from_slice(&j);
Some(a)
}
})
.collect();
Ok(a)
}
}
impl EventFull {
pub fn push(
&mut self,
ts: u64,
pulse: u64,
blob: Vec<u8>,
scalar_type: ScalarType,
be: bool,
shape: Shape,
comp: Option<CompressionMethod>,
) {
let m1 = blob.len();
self.entry_payload_max = self.entry_payload_max.max(m1 as u64);
self.tss.push_back(ts);
self.pulses.push_back(pulse);
self.blobs.push_back(blob);
self.scalar_types.push_back(scalar_type);
self.be.push_back(be);
self.shapes.push_back(shape);
self.comps.push_back(comp);
}
// TODO possible to get rid of this?
pub fn truncate_ts(&mut self, end: u64) {
let mut nkeep = usize::MAX;
for (i, &ts) in self.tss.iter().enumerate() {
if ts >= end {
nkeep = i;
break;
}
}
self.tss.truncate(nkeep);
self.pulses.truncate(nkeep);
self.blobs.truncate(nkeep);
self.scalar_types.truncate(nkeep);
self.be.truncate(nkeep);
self.shapes.truncate(nkeep);
self.comps.truncate(nkeep);
}
// NOTE needed because the databuffer actually doesn't write the correct shape per event.
pub fn overwrite_all_shapes(&mut self, shape: &Shape) {
for u in &mut self.shapes {
*u = shape.clone();
}
}
pub fn pop_back(&mut self) {
self.tss.pop_back();
self.pulses.pop_back();
self.blobs.pop_back();
self.scalar_types.pop_back();
self.be.pop_back();
self.shapes.pop_back();
self.comps.pop_back();
}
pub fn keep_ixs(&mut self, ixs: &[bool]) {
fn inner<T>(v: &mut VecDeque<T>, ixs: &[bool]) {
let mut it = ixs.iter();
v.retain_mut(move |_| it.next().map(Clone::clone).unwrap_or(false));
}
inner(&mut self.tss, ixs);
inner(&mut self.pulses, ixs);
inner(&mut self.blobs, ixs);
inner(&mut self.scalar_types, ixs);
inner(&mut self.be, ixs);
inner(&mut self.shapes, ixs);
inner(&mut self.comps, ixs);
}
}
impl FrameTypeInnerStatic for EventFull {
const FRAME_TYPE_ID: u32 = EVENT_FULL_FRAME_TYPE_ID;
}
impl FrameType for EventFull {
fn frame_type_id(&self) -> u32 {
<Self as FrameTypeInnerStatic>::FRAME_TYPE_ID
}
}
impl Empty for EventFull {
fn empty() -> Self {
Self {
tss: VecDeque::new(),
pulses: VecDeque::new(),
blobs: VecDeque::new(),
scalar_types: VecDeque::new(),
be: VecDeque::new(),
shapes: VecDeque::new(),
comps: VecDeque::new(),
entry_payload_max: 0,
}
}
}
impl WithLen for EventFull {
fn len(&self) -> usize {
self.tss.len()
}
}
impl ByteEstimate for EventFull {
fn byte_estimate(&self) -> u64 {
self.len() as u64 * (64 + self.entry_payload_max)
}
}
impl Mergeable for EventFull {
fn ts_min(&self) -> Option<u64> {
self.tss.front().map(|&x| x)
}
fn ts_max(&self) -> Option<u64> {
self.tss.back().map(|&x| x)
}
fn new_empty(&self) -> Self {
Empty::empty()
}
fn clear(&mut self) {
self.tss.clear();
self.pulses.clear();
self.blobs.clear();
self.scalar_types.clear();
self.be.clear();
self.shapes.clear();
self.comps.clear();
self.entry_payload_max = 0;
}
fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError> {
// TODO make it harder to forget new members when the struct may get modified in the future
let r = range.0..range.1;
let mut max = dst.entry_payload_max;
for i in r.clone() {
max = max.max(self.blobs[i].len() as _);
}
dst.entry_payload_max = max;
dst.tss.extend(self.tss.drain(r.clone()));
dst.pulses.extend(self.pulses.drain(r.clone()));
dst.blobs.extend(self.blobs.drain(r.clone()));
dst.scalar_types.extend(self.scalar_types.drain(r.clone()));
dst.be.extend(self.be.drain(r.clone()));
dst.shapes.extend(self.shapes.drain(r.clone()));
dst.comps.extend(self.comps.drain(r.clone()));
Ok(())
}
fn find_lowest_index_gt(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate() {
if m > ts {
return Some(i);
}
}
None
}
fn find_lowest_index_ge(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate() {
if m >= ts {
return Some(i);
}
}
None
}
fn find_highest_index_lt(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate().rev() {
if m < ts {
return Some(i);
}
}
None
}
fn tss(&self) -> Vec<netpod::TsMs> {
self.tss.iter().map(|x| netpod::TsMs::from_ns_u64(*x)).collect()
}
}
#[derive(Debug, ThisError, Serialize, Deserialize)]
#[cstm(name = "Decompress")]
pub enum DecompError {
TooLittleInput,
BadCompresionBlockSize,
UnusedBytes,
BitshuffleError,
ShapeMakesNoSense,
UnexpectedCompressedScalarValue,
}
fn decompress(databuf: &[u8], type_size: u32) -> Result<Vec<u8>, DecompError> {
// TODO collect decompression stats
let ts1 = Instant::now();
if databuf.len() < 12 {
return Err(DecompError::TooLittleInput);
}
let value_bytes = u64::from_be_bytes(databuf[0..8].try_into().unwrap());
let block_size = u32::from_be_bytes(databuf[8..12].try_into().unwrap());
trace2!(
"decompress len {} value_bytes {} block_size {}",
databuf.len(),
value_bytes,
block_size
);
if block_size > 1024 * 32 {
return Err(DecompError::BadCompresionBlockSize);
}
let ele_count = value_bytes / type_size as u64;
trace2!(
"ele_count {} ele_count_2 {} ele_count_exp {}",
ele_count,
ele_count_2,
ele_count_exp
);
let mut decomp: Vec<u8> = Vec::with_capacity(type_size as usize * ele_count as usize);
unsafe {
decomp.set_len(decomp.capacity());
}
// #[cfg(DISABLED)]
match bitshuffle::bitshuffle_decompress(&databuf[12..], &mut decomp, ele_count as _, type_size as _, 0) {
Ok(c1) => {
if 12 + c1 != databuf.len() {
Err(DecompError::UnusedBytes)
} else {
let ts2 = Instant::now();
let _dt = ts2.duration_since(ts1);
// TODO analyze the histo
//self.decomp_dt_histo.ingest(dt.as_secs() as u32 + dt.subsec_micros());
Ok(decomp)
}
}
Err(_) => Err(DecompError::BitshuffleError),
}
// todo!("bitshuffle not available")
}
impl EventFull {
/// Tries to infer the actual shape of the event from what's on disk and what we expect.
/// The event data on disk usually always indicate "scalar" even for waveforms.
/// If the data is compressed via bslz4 then we can infer the number of elements
/// but we still don't know whether that's an image or a waveform.
/// Therefore, the function accepts the expected shape to at least make an assumption
/// about whether this is an image or a waveform.
pub fn shape_derived(
&self,
i: usize,
scalar_type_exp: &ScalarType,
shape_exp: &Shape,
) -> Result<Shape, DecompError> {
match shape_exp {
Shape::Scalar => match &self.comps[i] {
Some(_) => match scalar_type_exp {
ScalarType::STRING => Ok(Shape::Scalar),
_ => Err(DecompError::UnexpectedCompressedScalarValue),
},
None => Ok(Shape::Scalar),
},
Shape::Wave(_) => match &self.shapes[i] {
Shape::Scalar => match &self.comps[i] {
Some(comp) => match comp {
CompressionMethod::BitshuffleLZ4 => {
let type_size = self.scalar_types[i].bytes() as u32;
match self.blobs[i][0..8].try_into() {
Ok(a) => {
let value_bytes = u64::from_be_bytes(a);
let value_bytes = value_bytes as u32;
if value_bytes % type_size != 0 {
Err(DecompError::ShapeMakesNoSense)
} else {
let n = value_bytes / type_size;
// Here we still can't know whether the disk contains a waveform or image
// so we assume that the user input is correct:
Ok(Shape::Wave(n))
}
}
Err(_) => Err(DecompError::ShapeMakesNoSense),
}
}
},
None => Err(DecompError::ShapeMakesNoSense),
},
Shape::Wave(s) => Ok(Shape::Wave(s.clone())),
Shape::Image(_, _) => Err(DecompError::ShapeMakesNoSense),
},
Shape::Image(a, b) => match &self.shapes[i] {
Shape::Scalar => match &self.comps[i] {
Some(comp) => match comp {
CompressionMethod::BitshuffleLZ4 => {
let type_size = self.scalar_types[i].bytes() as u32;
match self.blobs[i][0..8].try_into() {
Ok(vb) => {
let value_bytes = u64::from_be_bytes(vb);
let value_bytes = value_bytes as u32;
if value_bytes % type_size != 0 {
Err(DecompError::ShapeMakesNoSense)
} else {
let n = value_bytes / type_size;
// Here we still can't know whether the disk contains a waveform or image
// so we assume that the user input is correct.
// NOTE
// We only know the number of pixels from the compressed blob but we can't
// know the actual shape.
// Can only rely on user input and check that total number of pixels agree.
if *a * *b != n {
Err(DecompError::ShapeMakesNoSense)
} else {
Ok(Shape::Image(*a, *b))
}
}
}
Err(_) => Err(DecompError::ShapeMakesNoSense),
}
}
},
None => Err(DecompError::ShapeMakesNoSense),
},
Shape::Wave(_) => Err(DecompError::ShapeMakesNoSense),
Shape::Image(a, b) => Ok(Shape::Image(*a, *b)),
},
}
}
pub fn data_raw(&self, i: usize) -> &[u8] {
&self.blobs[i]
}
pub fn data_decompressed(&self, i: usize) -> Result<Cow<[u8]>, DecompError> {
if let Some(comp) = &self.comps[i] {
match comp {
CompressionMethod::BitshuffleLZ4 => {
// NOTE the event data on databuffer disk seems to contain the correct scalar type
// but the shape of the event record seems always "scalar" even for waveforms
// so we must derive the shape of the compressed data from the length of the
// uncompressed byte blob and the byte size of the scalar type.
let type_size = self.scalar_types[i].bytes() as u32;
let data = decompress(&self.blobs[i], type_size)?;
Ok(Cow::Owned(data))
}
}
} else {
let data = &self.blobs[i];
Ok(Cow::Borrowed(data.as_slice()))
}
}
}

View File

@@ -1,869 +0,0 @@
use crate::IsoDateTime;
use daqbuf_err as err;
use err::Error;
use items_0::collect_s::CollectableDyn;
use items_0::collect_s::CollectedDyn;
use items_0::collect_s::CollectorTy;
use items_0::collect_s::ToJsonResult;
use items_0::container::ByteEstimate;
use items_0::overlap::HasTimestampDeque;
use items_0::scalar_ops::ScalarOps;
use items_0::Appendable;
use items_0::AsAnyMut;
use items_0::AsAnyRef;
use items_0::Empty;
use items_0::Events;
use items_0::EventsNonObj;
use items_0::MergeError;
use items_0::Resettable;
use items_0::TypeName;
use items_0::WithLen;
use netpod::is_false;
use netpod::log::*;
use netpod::range::evrange::SeriesRange;
use netpod::timeunits::MS;
use netpod::timeunits::SEC;
use netpod::BinnedRangeEnum;
use netpod::TsNano;
use serde::Deserialize;
use serde::Serialize;
use std::any;
use std::any::Any;
use std::collections::VecDeque;
use std::fmt;
use std::mem;
#[allow(unused)]
macro_rules! trace_init { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_ingest_item { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_ingest_event { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace2 { ($($arg:tt)*) => ( if false { trace!($($arg)*); }) }
#[allow(unused)]
macro_rules! trace_binning { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
#[allow(unused)]
macro_rules! debug_ingest { ($($arg:tt)*) => ( if true { trace!($($arg)*); }) }
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct EventsDim0NoPulse<STY> {
pub tss: VecDeque<u64>,
pub values: VecDeque<STY>,
}
impl<STY> From<EventsDim0NoPulse<STY>> for EventsDim0<STY> {
fn from(value: EventsDim0NoPulse<STY>) -> Self {
let pulses = vec![0; value.tss.len()].into();
Self {
tss: value.tss,
pulses,
values: value.values,
}
}
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct EventsDim0<STY> {
pub tss: VecDeque<u64>,
pub pulses: VecDeque<u64>,
pub values: VecDeque<STY>,
}
impl<STY> EventsDim0<STY> {
pub fn type_name() -> &'static str {
std::any::type_name::<Self>()
}
pub fn push_back(&mut self, ts: u64, pulse: u64, value: STY) {
self.tss.push_back(ts);
self.pulses.push_back(pulse);
self.values.push_back(value);
}
pub fn push_front(&mut self, ts: u64, pulse: u64, value: STY) {
self.tss.push_front(ts);
self.pulses.push_front(pulse);
self.values.push_front(value);
}
pub fn serde_id() -> &'static str {
"EventsDim0"
}
pub fn tss(&self) -> &VecDeque<u64> {
&self.tss
}
// only for testing at the moment
pub fn private_values_ref(&self) -> &VecDeque<STY> {
&self.values
}
pub fn private_values_mut(&mut self) -> &mut VecDeque<STY> {
&mut self.values
}
}
impl<STY> AsAnyRef for EventsDim0<STY>
where
STY: ScalarOps,
{
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<STY> AsAnyMut for EventsDim0<STY>
where
STY: ScalarOps,
{
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<STY> Empty for EventsDim0<STY> {
fn empty() -> Self {
Self {
tss: VecDeque::new(),
pulses: VecDeque::new(),
values: VecDeque::new(),
}
}
}
impl<STY> fmt::Debug for EventsDim0<STY>
where
STY: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
if false {
write!(
fmt,
"{} {{ count {} ts {:?} vals {:?} }}",
self.type_name(),
self.tss.len(),
self.tss.iter().map(|x| x / SEC).collect::<Vec<_>>(),
self.values,
)
} else {
write!(
fmt,
"{} {{ count {} ts {:?} .. {:?} vals {:?} .. {:?} }}",
self.type_name(),
self.tss.len(),
self.tss.front().map(|&x| TsNano::from_ns(x)),
self.tss.back().map(|&x| TsNano::from_ns(x)),
self.values.front(),
self.values.back(),
)
}
}
}
impl<STY> WithLen for EventsDim0<STY> {
fn len(&self) -> usize {
self.tss.len()
}
}
impl<STY: ScalarOps> ByteEstimate for EventsDim0<STY> {
fn byte_estimate(&self) -> u64 {
// TODO
// Should use a better estimate for waveform and string types,
// or keep some aggregated byte count on push.
let n = self.len();
if n == 0 {
0
} else {
// TODO use the actual size of one/some of the elements.
let i = n * 2 / 3;
let sty_bytes = self.values[i].byte_estimate();
(n as u64 * (8 + 8 + sty_bytes)) as u64
}
}
}
impl<STY> Resettable for EventsDim0<STY> {
fn reset(&mut self) {
self.tss.clear();
self.pulses.clear();
self.values.clear();
}
}
impl<STY: ScalarOps> HasTimestampDeque for EventsDim0<STY> {
fn timestamp_min(&self) -> Option<u64> {
self.tss.front().map(|x| *x)
}
fn timestamp_max(&self) -> Option<u64> {
self.tss.back().map(|x| *x)
}
fn pulse_min(&self) -> Option<u64> {
self.pulses.front().map(|x| *x)
}
fn pulse_max(&self) -> Option<u64> {
self.pulses.back().map(|x| *x)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct EventsDim0ChunkOutput<STY> {
tss: VecDeque<u64>,
pulses: VecDeque<u64>,
values: VecDeque<STY>,
scalar_type: String,
}
impl<STY: ScalarOps> EventsDim0ChunkOutput<STY> {}
#[derive(Debug)]
pub struct EventsDim0Collector<STY> {
vals: EventsDim0<STY>,
range_final: bool,
timed_out: bool,
needs_continue_at: bool,
}
impl<STY> EventsDim0Collector<STY> {
pub fn self_name() -> &'static str {
any::type_name::<Self>()
}
pub fn new() -> Self {
debug!("EventsDim0Collector NEW");
Self {
vals: EventsDim0::empty(),
range_final: false,
timed_out: false,
needs_continue_at: false,
}
}
}
impl<STY> WithLen for EventsDim0Collector<STY> {
fn len(&self) -> usize {
WithLen::len(&self.vals)
}
}
impl<STY: ScalarOps> ByteEstimate for EventsDim0Collector<STY> {
fn byte_estimate(&self) -> u64 {
ByteEstimate::byte_estimate(&self.vals)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct EventsDim0CollectorOutput<STY> {
#[serde(rename = "tsAnchor")]
ts_anchor_sec: u64,
#[serde(rename = "tsMs")]
ts_off_ms: VecDeque<u64>,
#[serde(rename = "tsNs")]
ts_off_ns: VecDeque<u64>,
#[serde(rename = "pulseAnchor")]
pulse_anchor: u64,
#[serde(rename = "pulseOff")]
pulse_off: VecDeque<u64>,
#[serde(rename = "values")]
values: VecDeque<STY>,
#[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
range_final: bool,
#[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
timed_out: bool,
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
continue_at: Option<IsoDateTime>,
}
impl<STY: ScalarOps> EventsDim0CollectorOutput<STY> {
pub fn ts_anchor_sec(&self) -> u64 {
self.ts_anchor_sec
}
pub fn ts_off_ms(&self) -> &VecDeque<u64> {
&self.ts_off_ms
}
pub fn pulse_anchor(&self) -> u64 {
self.pulse_anchor
}
pub fn pulse_off(&self) -> &VecDeque<u64> {
&self.pulse_off
}
/// Note: only used for unit tests.
pub fn values_to_f32(&self) -> VecDeque<f32> {
self.values.iter().map(|x| x.as_prim_f32_b()).collect()
}
pub fn range_final(&self) -> bool {
self.range_final
}
pub fn timed_out(&self) -> bool {
self.timed_out
}
pub fn is_valid(&self) -> bool {
if self.ts_off_ms.len() != self.ts_off_ns.len() {
false
} else if self.ts_off_ms.len() != self.pulse_off.len() {
false
} else if self.ts_off_ms.len() != self.values.len() {
false
} else {
true
}
}
pub fn info_str(&self) -> String {
use fmt::Write;
let mut out = String::new();
write!(
out,
"ts_off_ms {} ts_off_ns {} pulse_off {} values {}",
self.ts_off_ms.len(),
self.ts_off_ns.len(),
self.pulse_off.len(),
self.values.len(),
)
.unwrap();
out
}
}
impl<STY> AsAnyRef for EventsDim0CollectorOutput<STY>
where
STY: 'static,
{
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<STY> AsAnyMut for EventsDim0CollectorOutput<STY>
where
STY: 'static,
{
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<STY> TypeName for EventsDim0CollectorOutput<STY> {
fn type_name(&self) -> String {
any::type_name::<Self>().into()
}
}
impl<STY: ScalarOps> WithLen for EventsDim0CollectorOutput<STY> {
fn len(&self) -> usize {
self.values.len()
}
}
impl<STY: ScalarOps> ToJsonResult for EventsDim0CollectorOutput<STY> {
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
serde_json::to_value(self)
}
}
impl<STY: ScalarOps> CollectedDyn for EventsDim0CollectorOutput<STY> {}
impl<STY: ScalarOps> CollectorTy for EventsDim0Collector<STY> {
type Input = EventsDim0<STY>;
type Output = EventsDim0CollectorOutput<STY>;
fn ingest(&mut self, src: &mut Self::Input) {
self.vals.tss.append(&mut src.tss);
self.vals.pulses.append(&mut src.pulses);
self.vals.values.append(&mut src.values);
}
fn set_range_complete(&mut self) {
self.range_final = true;
}
fn set_timed_out(&mut self) {
self.timed_out = true;
self.needs_continue_at = true;
}
fn set_continue_at_here(&mut self) {
self.needs_continue_at = true;
}
fn result(
&mut self,
range: Option<SeriesRange>,
_binrange: Option<BinnedRangeEnum>,
) -> Result<Self::Output, Error> {
debug!(
"{} result() needs_continue_at {}",
Self::self_name(),
self.needs_continue_at
);
// If we timed out, we want to hint the client from where to continue.
// This is tricky: currently, client can not request a left-exclusive range.
// We currently give the timestamp of the last event plus a small delta.
// The amount of the delta must take into account what kind of timestamp precision the client
// can parse and handle.
let vals = &mut self.vals;
let continue_at = if self.needs_continue_at {
if let Some(ts) = vals.tss.back() {
let x = Some(IsoDateTime::from_ns_u64(*ts / MS * MS + MS));
x
} else {
if let Some(range) = &range {
match range {
SeriesRange::TimeRange(x) => Some(IsoDateTime::from_ns_u64(x.beg + SEC)),
SeriesRange::PulseRange(_) => {
error!("TODO emit create continueAt for pulse range");
Some(IsoDateTime::from_ns_u64(0))
}
}
} else {
Some(IsoDateTime::from_ns_u64(0))
}
}
} else {
None
};
let tss_sl = vals.tss.make_contiguous();
let pulses_sl = vals.pulses.make_contiguous();
let (ts_anchor_sec, ts_off_ms, ts_off_ns) = crate::ts_offs_from_abs(tss_sl);
let (pulse_anchor, pulse_off) = crate::pulse_offs_from_abs(pulses_sl);
let values = mem::replace(&mut vals.values, VecDeque::new());
if ts_off_ms.len() != ts_off_ns.len() {
return Err(Error::with_msg_no_trace("collected len mismatch"));
}
if ts_off_ms.len() != pulse_off.len() {
return Err(Error::with_msg_no_trace("collected len mismatch"));
}
if ts_off_ms.len() != values.len() {
return Err(Error::with_msg_no_trace("collected len mismatch"));
}
let ret = Self::Output {
ts_anchor_sec,
ts_off_ms,
ts_off_ns,
pulse_anchor,
pulse_off,
values,
range_final: self.range_final,
timed_out: self.timed_out,
continue_at,
};
if !ret.is_valid() {
error!("invalid:\n{}", ret.info_str());
}
Ok(ret)
}
}
impl<STY: ScalarOps> items_0::collect_s::CollectableType for EventsDim0<STY> {
type Collector = EventsDim0Collector<STY>;
fn new_collector() -> Self::Collector {
Self::Collector::new()
}
}
#[derive(Debug)]
pub struct EventsDim0Aggregator<STY> {
range: SeriesRange,
count: u64,
minmaxlst: Option<(STY, STY, STY)>,
sumc: u64,
sum: f32,
int_ts: u64,
last_ts: u64,
do_time_weight: bool,
events_ignored_count: u64,
items_seen: usize,
}
impl<STY> Drop for EventsDim0Aggregator<STY> {
fn drop(&mut self) {
// TODO collect as stats for the request context:
trace!("count {} ignored {}", self.count, self.events_ignored_count);
}
}
impl<STY> TypeName for EventsDim0<STY> {
fn type_name(&self) -> String {
let self_name = any::type_name::<Self>();
format!("{self_name}")
}
}
impl<STY: ScalarOps> EventsNonObj for EventsDim0<STY> {
fn into_tss_pulses(self: Box<Self>) -> (VecDeque<u64>, VecDeque<u64>) {
trace!(
"{}::into_tss_pulses len {} len {}",
Self::type_name(),
self.tss.len(),
self.pulses.len()
);
(self.tss, self.pulses)
}
}
macro_rules! try_to_container_events {
($sty:ty, $this:expr) => {
let this = $this;
if let Some(evs) = this.as_any_ref().downcast_ref::<EventsDim0<$sty>>() {
use crate::binning::container_events::ContainerEvents;
let tss = this.tss.iter().map(|&x| TsNano::from_ns(x)).collect();
let vals = evs.values.clone();
let ret = ContainerEvents::<$sty>::from_constituents(tss, vals);
return Box::new(ret);
}
};
}
impl<STY: ScalarOps> Events for EventsDim0<STY> {
fn verify(&self) -> bool {
let mut good = true;
let n = self.tss.len();
for (&ts1, &ts2) in self.tss.iter().zip(self.tss.range(n.min(1)..n)) {
if ts1 > ts2 {
good = false;
error!("unordered event data ts1 {} ts2 {}", ts1, ts2);
break;
}
}
good
}
fn output_info(&self) -> String {
let n2 = self.tss.len().max(1) - 1;
let min = if let Some(ts) = self.tss.get(0) {
TsNano::from_ns(*ts).fmt().to_string()
} else {
String::from("None")
};
let max = if let Some(ts) = self.tss.get(n2) {
TsNano::from_ns(*ts).fmt().to_string()
} else {
String::from("None")
};
format!(
"EventsDim0OutputInfo {{ len {}, ts_min {}, ts_max {} }}",
self.tss.len(),
min,
max,
)
}
fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn {
self
}
fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn {
self
}
fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn {
self
}
fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box<dyn Events> {
// TODO improve the search
let n1 = self.tss.iter().take_while(|&&x| x <= ts_end).count();
let tss = self.tss.drain(..n1).collect();
let pulses = self.pulses.drain(..n1).collect();
let values = self.values.drain(..n1).collect();
let ret = Self { tss, pulses, values };
Box::new(ret)
}
fn new_empty_evs(&self) -> Box<dyn Events> {
Box::new(Self::empty())
}
fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), MergeError> {
// TODO as_any and as_any_mut are declared on unrelated traits. Simplify.
if let Some(dst) = dst.as_any_mut().downcast_mut::<Self>() {
// TODO make it harder to forget new members when the struct may get modified in the future
let r = range.0..range.1;
dst.tss.extend(self.tss.drain(r.clone()));
dst.pulses.extend(self.pulses.drain(r.clone()));
dst.values.extend(self.values.drain(r.clone()));
Ok(())
} else {
error!(
"downcast to EventsDim0 FAILED\n\n{}\n\n{}\n\n",
self.type_name(),
dst.type_name()
);
panic!();
Err(MergeError::NotCompatible)
}
}
fn find_lowest_index_gt_evs(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate() {
if m > ts {
return Some(i);
}
}
None
}
fn find_lowest_index_ge_evs(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate() {
if m >= ts {
return Some(i);
}
}
None
}
fn find_highest_index_lt_evs(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate().rev() {
if m < ts {
return Some(i);
}
}
None
}
fn ts_min(&self) -> Option<u64> {
self.tss.front().map(|&x| x)
}
fn ts_max(&self) -> Option<u64> {
self.tss.back().map(|&x| x)
}
fn partial_eq_dyn(&self, other: &dyn Events) -> bool {
if let Some(other) = other.as_any_ref().downcast_ref::<Self>() {
self == other
} else {
false
}
}
fn serde_id(&self) -> &'static str {
Self::serde_id()
}
fn nty_id(&self) -> u32 {
STY::SUB
}
fn clone_dyn(&self) -> Box<dyn Events> {
Box::new(self.clone())
}
fn tss(&self) -> &VecDeque<u64> {
&self.tss
}
fn pulses(&self) -> &VecDeque<u64> {
&self.pulses
}
fn frame_type_id(&self) -> u32 {
error!("TODO frame_type_id should not be called");
// TODO make more nice
panic!()
}
fn to_min_max_avg(&mut self) -> Box<dyn Events> {
let dst = Self {
tss: mem::replace(&mut self.tss, Default::default()),
pulses: mem::replace(&mut self.pulses, Default::default()),
values: mem::replace(&mut self.values, Default::default()),
};
Box::new(dst)
}
fn to_json_string(&self) -> String {
// TODO redesign with mut access, rename to `into_` and take the values out.
let mut tss = self.tss.clone();
let mut pulses = self.pulses.clone();
let mut values = self.values.clone();
let tss_sl = tss.make_contiguous();
let pulses_sl = pulses.make_contiguous();
let (ts_anchor_sec, ts_off_ms, ts_off_ns) = crate::ts_offs_from_abs(tss_sl);
let (pulse_anchor, pulse_off) = crate::pulse_offs_from_abs(pulses_sl);
let values = mem::replace(&mut values, VecDeque::new());
let ret = EventsDim0CollectorOutput {
ts_anchor_sec,
ts_off_ms,
ts_off_ns,
pulse_anchor,
pulse_off,
values,
range_final: false,
timed_out: false,
continue_at: None,
};
serde_json::to_string(&ret).unwrap()
}
fn to_json_vec_u8(&self) -> Vec<u8> {
self.to_json_string().into_bytes()
}
fn to_cbor_vec_u8(&self) -> Vec<u8> {
// TODO redesign with mut access, rename to `into_` and take the values out.
let ret = EventsDim0ChunkOutput {
// TODO use &mut to swap the content
tss: self.tss.clone(),
pulses: self.pulses.clone(),
values: self.values.clone(),
scalar_type: STY::scalar_type_name().into(),
};
let mut buf = Vec::new();
ciborium::into_writer(&ret, &mut buf).unwrap();
buf
}
fn clear(&mut self) {
self.tss.clear();
self.pulses.clear();
self.values.clear();
}
fn to_dim0_f32_for_binning(&self) -> Box<dyn Events> {
let mut ret = EventsDim0::empty();
for (&ts, val) in self.tss.iter().zip(self.values.iter()) {
ret.push(ts, 0, val.as_prim_f32_b());
}
Box::new(ret)
}
fn to_container_events(&self) -> Box<dyn ::items_0::timebin::BinningggContainerEventsDyn> {
try_to_container_events!(u8, self);
try_to_container_events!(u16, self);
try_to_container_events!(u32, self);
try_to_container_events!(u64, self);
try_to_container_events!(i8, self);
try_to_container_events!(i16, self);
try_to_container_events!(i32, self);
try_to_container_events!(i64, self);
try_to_container_events!(f32, self);
try_to_container_events!(f64, self);
try_to_container_events!(bool, self);
try_to_container_events!(String, self);
let this = self;
if let Some(evs) = self.as_any_ref().downcast_ref::<EventsDim0<netpod::EnumVariant>>() {
use crate::binning::container_events::ContainerEvents;
let tss = this.tss.iter().map(|&x| TsNano::from_ns(x)).collect();
use crate::binning::container_events::Container;
let mut vals = crate::binning::valuetype::EnumVariantContainer::new();
for x in evs.values.iter() {
vals.push_back(x.clone());
}
let ret = ContainerEvents::<netpod::EnumVariant>::from_constituents(tss, vals);
return Box::new(ret);
}
let styn = any::type_name::<STY>();
todo!("TODO to_container_events for {styn}")
}
}
impl<STY> Appendable<STY> for EventsDim0<STY>
where
STY: ScalarOps,
{
fn push(&mut self, ts: u64, pulse: u64, value: STY) {
self.tss.push_back(ts);
self.pulses.push_back(pulse);
self.values.push_back(value);
}
}
#[cfg(test)]
mod test_frame {
use super::*;
use crate::channelevents::ChannelEvents;
use crate::framable::Framable;
use crate::framable::INMEM_FRAME_ENCID;
use crate::frame::decode_frame;
use crate::inmem::InMemoryFrame;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
#[test]
fn events_serialize() {
// taskrun::tracing_init_testing().unwrap();
let mut events = EventsDim0::empty();
events.push(123, 234, 55f32);
let events = events;
let events: Box<dyn Events> = Box::new(events);
let item = ChannelEvents::Events(events);
let item = Ok::<_, Error>(StreamItem::DataItem(RangeCompletableItem::Data(item)));
let mut buf = item.make_frame_dyn().unwrap();
let s = String::from_utf8_lossy(&buf[20..buf.len() - 4]);
eprintln!("[[{s}]]");
let buflen = buf.len();
let frame = InMemoryFrame {
encid: INMEM_FRAME_ENCID,
tyid: 0x2500,
len: (buflen - 24) as _,
buf: buf.split_off(20).split_to(buflen - 20 - 4).freeze(),
};
let item: Sitemty<ChannelEvents> = decode_frame(&frame).unwrap();
let item = if let Ok(x) = item { x } else { panic!() };
let item = if let StreamItem::DataItem(x) = item {
x
} else {
panic!()
};
let item = if let RangeCompletableItem::Data(x) = item {
x
} else {
panic!()
};
let mut item = if let ChannelEvents::Events(x) = item {
x
} else {
panic!()
};
let item = if let Some(item) = item.as_any_mut().downcast_mut::<EventsDim0<f32>>() {
item
} else {
panic!()
};
assert_eq!(item.tss(), &[123]);
}
}
#[cfg(test)]
mod test_serde_opt {
use super::*;
#[derive(Serialize)]
struct A {
a: Option<String>,
#[serde(default)]
b: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
c: Option<String>,
}
#[test]
fn test_a() {
let s = serde_json::to_string(&A {
a: None,
b: None,
c: None,
})
.unwrap();
assert_eq!(s, r#"{"a":null,"b":null}"#);
}
}

View File

@@ -1,469 +0,0 @@
use daqbuf_err as err;
use err::Error;
use items_0::collect_s::CollectableDyn;
use items_0::collect_s::CollectedDyn;
use items_0::collect_s::CollectorDyn;
use items_0::collect_s::CollectorTy;
use items_0::collect_s::ToJsonBytes;
use items_0::collect_s::ToJsonResult;
use items_0::container::ByteEstimate;
use items_0::isodate::IsoDateTime;
use items_0::scalar_ops::ScalarOps;
use items_0::timebin::TimeBinnableTy;
use items_0::timebin::TimeBinnerTy;
use items_0::AsAnyMut;
use items_0::AsAnyRef;
use items_0::Events;
use items_0::EventsNonObj;
use items_0::TypeName;
use items_0::WithLen;
use netpod::log::*;
use netpod::range::evrange::SeriesRange;
use netpod::timeunits::MS;
use netpod::timeunits::SEC;
use netpod::BinnedRangeEnum;
use serde::Deserialize;
use serde::Serialize;
use std::any;
use std::any::Any;
use std::collections::VecDeque;
use std::mem;
#[allow(unused)]
macro_rules! trace_collect_result {
($($arg:tt)*) => {
if false {
trace!($($arg)*);
}
};
}
#[derive(Debug)]
pub struct EventsDim0EnumCollector {
vals: EventsDim0Enum,
range_final: bool,
timed_out: bool,
needs_continue_at: bool,
}
impl EventsDim0EnumCollector {
pub fn new() -> Self {
Self {
vals: EventsDim0Enum::new(),
range_final: false,
timed_out: false,
needs_continue_at: false,
}
}
}
impl TypeName for EventsDim0EnumCollector {
fn type_name(&self) -> String {
"EventsDim0EnumCollector".into()
}
}
impl WithLen for EventsDim0EnumCollector {
fn len(&self) -> usize {
self.vals.tss.len()
}
}
impl ByteEstimate for EventsDim0EnumCollector {
fn byte_estimate(&self) -> u64 {
// TODO does it need to be more accurate?
30 * self.len() as u64
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct EventsDim0EnumCollectorOutput {
#[serde(rename = "tsAnchor")]
ts_anchor_sec: u64,
#[serde(rename = "tsMs")]
ts_off_ms: VecDeque<u64>,
#[serde(rename = "tsNs")]
ts_off_ns: VecDeque<u64>,
#[serde(rename = "values")]
vals: VecDeque<u16>,
#[serde(rename = "valuestrings")]
valstrs: VecDeque<String>,
#[serde(rename = "rangeFinal", default, skip_serializing_if = "netpod::is_false")]
range_final: bool,
#[serde(rename = "timedOut", default, skip_serializing_if = "netpod::is_false")]
timed_out: bool,
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
continue_at: Option<IsoDateTime>,
}
impl WithLen for EventsDim0EnumCollectorOutput {
fn len(&self) -> usize {
todo!()
}
}
impl AsAnyRef for EventsDim0EnumCollectorOutput {
fn as_any_ref(&self) -> &dyn Any {
todo!()
}
}
impl AsAnyMut for EventsDim0EnumCollectorOutput {
fn as_any_mut(&mut self) -> &mut dyn Any {
todo!()
}
}
impl TypeName for EventsDim0EnumCollectorOutput {
fn type_name(&self) -> String {
any::type_name::<Self>().into()
}
}
impl ToJsonResult for EventsDim0EnumCollectorOutput {
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
todo!()
}
}
impl CollectedDyn for EventsDim0EnumCollectorOutput {}
impl CollectorTy for EventsDim0EnumCollector {
type Input = EventsDim0Enum;
type Output = EventsDim0EnumCollectorOutput;
fn ingest(&mut self, src: &mut EventsDim0Enum) {
self.vals.tss.append(&mut src.tss);
self.vals.values.append(&mut src.values);
self.vals.valuestrs.append(&mut src.valuestrs);
}
fn set_range_complete(&mut self) {
self.range_final = true;
}
fn set_timed_out(&mut self) {
self.timed_out = true;
self.needs_continue_at = true;
}
fn set_continue_at_here(&mut self) {
self.needs_continue_at = true;
}
fn result(
&mut self,
range: Option<SeriesRange>,
binrange: Option<BinnedRangeEnum>,
) -> Result<EventsDim0EnumCollectorOutput, Error> {
trace_collect_result!(
"{} result() needs_continue_at {}",
self.type_name(),
self.needs_continue_at
);
// If we timed out, we want to hint the client from where to continue.
// This is tricky: currently, client can not request a left-exclusive range.
// We currently give the timestamp of the last event plus a small delta.
// The amount of the delta must take into account what kind of timestamp precision the client
// can parse and handle.
let vals = &mut self.vals;
let continue_at = if self.needs_continue_at {
if let Some(ts) = vals.tss.back() {
let x = Some(IsoDateTime::from_ns_u64(*ts / MS * MS + MS));
x
} else {
if let Some(range) = &range {
match range {
SeriesRange::TimeRange(x) => Some(IsoDateTime::from_ns_u64(x.beg + SEC)),
SeriesRange::PulseRange(_) => {
error!("TODO emit create continueAt for pulse range");
Some(IsoDateTime::from_ns_u64(0))
}
}
} else {
Some(IsoDateTime::from_ns_u64(0))
}
}
} else {
None
};
let tss_sl = vals.tss.make_contiguous();
let (ts_anchor_sec, ts_off_ms, ts_off_ns) = crate::ts_offs_from_abs(tss_sl);
let valixs = mem::replace(&mut vals.values, VecDeque::new());
let valstrs = mem::replace(&mut vals.valuestrs, VecDeque::new());
let vals = valixs;
if ts_off_ms.len() != ts_off_ns.len() {
return Err(Error::with_msg_no_trace("collected len mismatch"));
}
if ts_off_ms.len() != vals.len() {
return Err(Error::with_msg_no_trace("collected len mismatch"));
}
if ts_off_ms.len() != valstrs.len() {
return Err(Error::with_msg_no_trace("collected len mismatch"));
}
let ret = Self::Output {
ts_anchor_sec,
ts_off_ms,
ts_off_ns,
vals,
valstrs,
range_final: self.range_final,
timed_out: self.timed_out,
continue_at,
};
Ok(ret)
}
}
// Experiment with having this special case for enums
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct EventsDim0Enum {
pub tss: VecDeque<u64>,
pub values: VecDeque<u16>,
pub valuestrs: VecDeque<String>,
}
impl EventsDim0Enum {
pub fn new() -> Self {
Self {
tss: VecDeque::new(),
values: VecDeque::new(),
valuestrs: VecDeque::new(),
}
}
pub fn push_back(&mut self, ts: u64, value: u16, valuestr: String) {
self.tss.push_back(ts);
self.values.push_back(value);
self.valuestrs.push_back(valuestr);
}
}
impl TypeName for EventsDim0Enum {
fn type_name(&self) -> String {
"EventsDim0Enum".into()
}
}
impl AsAnyRef for EventsDim0Enum {
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl AsAnyMut for EventsDim0Enum {
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl WithLen for EventsDim0Enum {
fn len(&self) -> usize {
self.tss.len()
}
}
impl CollectableDyn for EventsDim0Enum {
fn new_collector(&self) -> Box<dyn CollectorDyn> {
Box::new(EventsDim0EnumCollector::new())
}
}
// impl Events
impl ByteEstimate for EventsDim0Enum {
fn byte_estimate(&self) -> u64 {
todo!()
}
}
impl EventsNonObj for EventsDim0Enum {
fn into_tss_pulses(self: Box<Self>) -> (VecDeque<u64>, VecDeque<u64>) {
todo!()
}
}
// NOTE just a dummy because currently we don't use this for time binning
#[derive(Debug)]
pub struct EventsDim0EnumTimeBinner;
impl TimeBinnerTy for EventsDim0EnumTimeBinner {
type Input = EventsDim0Enum;
type Output = ();
fn ingest(&mut self, item: &mut Self::Input) {
todo!()
}
fn set_range_complete(&mut self) {
todo!()
}
fn bins_ready_count(&self) -> usize {
todo!()
}
fn bins_ready(&mut self) -> Option<Self::Output> {
todo!()
}
fn push_in_progress(&mut self, push_empty: bool) {
todo!()
}
fn cycle(&mut self) {
todo!()
}
fn empty(&self) -> Option<Self::Output> {
todo!()
}
fn append_empty_until_end(&mut self) {
todo!()
}
}
// NOTE just a dummy because currently we don't use this for time binning
impl TimeBinnableTy for EventsDim0Enum {
type TimeBinner = EventsDim0EnumTimeBinner;
fn time_binner_new(
&self,
binrange: BinnedRangeEnum,
do_time_weight: bool,
emit_empty_bins: bool,
) -> Self::TimeBinner {
todo!()
}
}
// NOTE just a dummy because currently we don't use this for time binning
#[derive(Debug, Serialize, Deserialize)]
pub struct EventsDim0EnumChunkOutput {
tss: VecDeque<u64>,
values: VecDeque<u16>,
valuestrings: VecDeque<String>,
scalar_type: String,
}
impl Events for EventsDim0Enum {
fn verify(&self) -> bool {
todo!()
}
fn output_info(&self) -> String {
todo!()
}
fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn {
todo!()
}
fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn {
todo!()
}
fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn {
todo!()
}
fn ts_min(&self) -> Option<u64> {
todo!()
}
fn ts_max(&self) -> Option<u64> {
todo!()
}
fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box<dyn Events> {
todo!()
}
fn new_empty_evs(&self) -> Box<dyn Events> {
todo!()
}
fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), items_0::MergeError> {
todo!()
}
fn find_lowest_index_gt_evs(&self, ts: u64) -> Option<usize> {
todo!()
}
fn find_lowest_index_ge_evs(&self, ts: u64) -> Option<usize> {
todo!()
}
fn find_highest_index_lt_evs(&self, ts: u64) -> Option<usize> {
todo!()
}
fn clone_dyn(&self) -> Box<dyn Events> {
todo!()
}
fn partial_eq_dyn(&self, other: &dyn Events) -> bool {
todo!()
}
fn serde_id(&self) -> &'static str {
todo!()
}
fn nty_id(&self) -> u32 {
todo!()
}
fn tss(&self) -> &VecDeque<u64> {
todo!()
}
fn pulses(&self) -> &VecDeque<u64> {
todo!()
}
fn frame_type_id(&self) -> u32 {
todo!()
}
fn to_min_max_avg(&mut self) -> Box<dyn Events> {
todo!()
}
fn to_json_string(&self) -> String {
todo!()
}
fn to_json_vec_u8(&self) -> Vec<u8> {
self.to_json_string().into_bytes()
}
fn to_cbor_vec_u8(&self) -> Vec<u8> {
// TODO redesign with mut access, rename to `into_` and take the values out.
let ret = EventsDim0EnumChunkOutput {
// TODO use &mut to swap the content
tss: self.tss.clone(),
values: self.values.clone(),
valuestrings: self.valuestrs.clone(),
scalar_type: netpod::EnumVariant::scalar_type_name().into(),
};
let mut buf = Vec::new();
ciborium::into_writer(&ret, &mut buf).unwrap();
buf
}
fn clear(&mut self) {
todo!()
}
fn to_dim0_f32_for_binning(&self) -> Box<dyn Events> {
todo!("{}::to_dim0_f32_for_binning", self.type_name())
}
fn to_container_events(&self) -> Box<dyn ::items_0::timebin::BinningggContainerEventsDyn> {
todo!("{}::to_container_events", self.type_name())
}
}

View File

@@ -1,691 +0,0 @@
use crate::binsdim0::BinsDim0;
use crate::eventsxbindim0::EventsXbinDim0;
use crate::IsoDateTime;
use daqbuf_err as err;
use err::Error;
use items_0::collect_s::CollectableDyn;
use items_0::collect_s::CollectableType;
use items_0::collect_s::CollectedDyn;
use items_0::collect_s::CollectorTy;
use items_0::collect_s::ToJsonResult;
use items_0::container::ByteEstimate;
use items_0::overlap::HasTimestampDeque;
use items_0::scalar_ops::ScalarOps;
use items_0::Appendable;
use items_0::AsAnyMut;
use items_0::AsAnyRef;
use items_0::Empty;
use items_0::Events;
use items_0::EventsNonObj;
use items_0::MergeError;
use items_0::TypeName;
use items_0::WithLen;
use netpod::is_false;
use netpod::log::*;
use netpod::range::evrange::SeriesRange;
use netpod::timeunits::MS;
use netpod::timeunits::SEC;
use netpod::BinnedRangeEnum;
use serde::Deserialize;
use serde::Serialize;
use std::any;
use std::any::Any;
use std::collections::VecDeque;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
#[allow(unused)]
macro_rules! trace2 {
(EN$($arg:tt)*) => ();
($($arg:tt)*) => (trace!($($arg)*));
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct EventsDim1NoPulse<STY> {
pub tss: VecDeque<u64>,
pub values: VecDeque<Vec<STY>>,
}
impl<STY> From<EventsDim1NoPulse<STY>> for EventsDim1<STY> {
fn from(value: EventsDim1NoPulse<STY>) -> Self {
let pulses = vec![0; value.tss.len()].into();
Self {
tss: value.tss,
pulses,
values: value.values,
}
}
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct EventsDim1<STY> {
pub tss: VecDeque<u64>,
pub pulses: VecDeque<u64>,
pub values: VecDeque<Vec<STY>>,
}
impl<STY> EventsDim1<STY> {
#[inline(always)]
pub fn push(&mut self, ts: u64, pulse: u64, value: Vec<STY>) {
self.tss.push_back(ts);
self.pulses.push_back(pulse);
self.values.push_back(value);
}
#[inline(always)]
pub fn push_front(&mut self, ts: u64, pulse: u64, value: Vec<STY>) {
self.tss.push_front(ts);
self.pulses.push_front(pulse);
self.values.push_front(value);
}
pub fn serde_id() -> &'static str {
"EventsDim1"
}
pub fn tss(&self) -> &VecDeque<u64> {
&self.tss
}
}
impl<STY> AsAnyRef for EventsDim1<STY>
where
STY: ScalarOps,
{
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<STY> AsAnyMut for EventsDim1<STY>
where
STY: ScalarOps,
{
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<STY> Empty for EventsDim1<STY> {
fn empty() -> Self {
Self {
tss: VecDeque::new(),
pulses: VecDeque::new(),
values: VecDeque::new(),
}
}
}
impl<STY> fmt::Debug for EventsDim1<STY>
where
STY: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
if false {
write!(
fmt,
"EventsDim1 {{ count {} ts {:?} vals {:?} }}",
self.tss.len(),
self.tss.iter().map(|x| x / SEC).collect::<Vec<_>>(),
self.values,
)
} else {
write!(
fmt,
"EventsDim1 {{ count {} ts {:?} .. {:?} vals {:?} .. {:?} }}",
self.tss.len(),
self.tss.front().map(|x| x / SEC),
self.tss.back().map(|x| x / SEC),
self.values.front(),
self.values.back(),
)
}
}
}
impl<STY> WithLen for EventsDim1<STY> {
fn len(&self) -> usize {
self.tss.len()
}
}
impl<STY> ByteEstimate for EventsDim1<STY> {
fn byte_estimate(&self) -> u64 {
let stylen = mem::size_of::<STY>();
let n = self.values.front().map_or(0, Vec::len);
(self.len() * (8 + 8 + n * stylen)) as u64
}
}
impl<STY: ScalarOps> HasTimestampDeque for EventsDim1<STY> {
fn timestamp_min(&self) -> Option<u64> {
self.tss.front().map(|x| *x)
}
fn timestamp_max(&self) -> Option<u64> {
self.tss.back().map(|x| *x)
}
fn pulse_min(&self) -> Option<u64> {
self.pulses.front().map(|x| *x)
}
fn pulse_max(&self) -> Option<u64> {
self.pulses.back().map(|x| *x)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct EventsDim1ChunkOutput<STY> {
tss: VecDeque<u64>,
pulses: VecDeque<u64>,
values: VecDeque<Vec<STY>>,
scalar_type: String,
}
impl<STY: ScalarOps> EventsDim1ChunkOutput<STY> {}
#[derive(Debug)]
pub struct EventsDim1Collector<STY> {
vals: EventsDim1<STY>,
range_final: bool,
timed_out: bool,
needs_continue_at: bool,
}
impl<STY> EventsDim1Collector<STY> {
pub fn self_name() -> &'static str {
any::type_name::<Self>()
}
pub fn new() -> Self {
Self {
vals: EventsDim1::empty(),
range_final: false,
timed_out: false,
needs_continue_at: false,
}
}
}
impl<STY> WithLen for EventsDim1Collector<STY> {
fn len(&self) -> usize {
WithLen::len(&self.vals)
}
}
impl<STY> ByteEstimate for EventsDim1Collector<STY> {
fn byte_estimate(&self) -> u64 {
ByteEstimate::byte_estimate(&self.vals)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct EventsDim1CollectorOutput<STY> {
#[serde(rename = "tsAnchor")]
ts_anchor_sec: u64,
#[serde(rename = "tsMs")]
ts_off_ms: VecDeque<u64>,
#[serde(rename = "tsNs")]
ts_off_ns: VecDeque<u64>,
#[serde(rename = "pulseAnchor")]
pulse_anchor: u64,
#[serde(rename = "pulseOff")]
pulse_off: VecDeque<u64>,
#[serde(rename = "values")]
values: VecDeque<Vec<STY>>,
#[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
range_final: bool,
#[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
timed_out: bool,
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
continue_at: Option<IsoDateTime>,
}
impl<STY: ScalarOps> EventsDim1CollectorOutput<STY> {
pub fn ts_anchor_sec(&self) -> u64 {
self.ts_anchor_sec
}
pub fn ts_off_ms(&self) -> &VecDeque<u64> {
&self.ts_off_ms
}
pub fn pulse_anchor(&self) -> u64 {
self.pulse_anchor
}
pub fn pulse_off(&self) -> &VecDeque<u64> {
&self.pulse_off
}
/// Note: only used for unit tests.
pub fn values_to_f32(&self) -> VecDeque<Vec<f32>> {
self.values
.iter()
.map(|x| x.iter().map(|x| x.as_prim_f32_b()).collect())
.collect()
}
pub fn range_final(&self) -> bool {
self.range_final
}
pub fn timed_out(&self) -> bool {
self.timed_out
}
pub fn is_valid(&self) -> bool {
if self.ts_off_ms.len() != self.ts_off_ns.len() {
false
} else if self.ts_off_ms.len() != self.pulse_off.len() {
false
} else if self.ts_off_ms.len() != self.values.len() {
false
} else {
true
}
}
pub fn info_str(&self) -> String {
use fmt::Write;
let mut out = String::new();
write!(
out,
"ts_off_ms {} ts_off_ns {} pulse_off {} values {}",
self.ts_off_ms.len(),
self.ts_off_ns.len(),
self.pulse_off.len(),
self.values.len(),
)
.unwrap();
out
}
}
impl<STY> AsAnyRef for EventsDim1CollectorOutput<STY>
where
STY: 'static,
{
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<STY> AsAnyMut for EventsDim1CollectorOutput<STY>
where
STY: 'static,
{
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<STY> TypeName for EventsDim1CollectorOutput<STY> {
fn type_name(&self) -> String {
any::type_name::<Self>().into()
}
}
impl<STY: ScalarOps> WithLen for EventsDim1CollectorOutput<STY> {
fn len(&self) -> usize {
self.values.len()
}
}
impl<STY: ScalarOps> ToJsonResult for EventsDim1CollectorOutput<STY> {
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
serde_json::to_value(self)
}
}
impl<STY: ScalarOps> CollectedDyn for EventsDim1CollectorOutput<STY> {}
impl<STY: ScalarOps> CollectorTy for EventsDim1Collector<STY> {
type Input = EventsDim1<STY>;
type Output = EventsDim1CollectorOutput<STY>;
fn ingest(&mut self, src: &mut Self::Input) {
self.vals.tss.append(&mut src.tss);
self.vals.pulses.append(&mut src.pulses);
self.vals.values.append(&mut src.values);
}
fn set_range_complete(&mut self) {
self.range_final = true;
}
fn set_timed_out(&mut self) {
self.timed_out = true;
}
fn set_continue_at_here(&mut self) {
debug!("{}::set_continue_at_here", Self::self_name());
self.needs_continue_at = true;
}
// TODO unify with dim0 case
fn result(
&mut self,
range: Option<SeriesRange>,
_binrange: Option<BinnedRangeEnum>,
) -> Result<Self::Output, Error> {
// If we timed out, we want to hint the client from where to continue.
// This is tricky: currently, client can not request a left-exclusive range.
// We currently give the timestamp of the last event plus a small delta.
// The amount of the delta must take into account what kind of timestamp precision the client
// can parse and handle.
let vals = &mut self.vals;
let continue_at = if self.timed_out {
if let Some(ts) = vals.tss.back() {
Some(IsoDateTime::from_ns_u64(*ts + MS))
} else {
if let Some(range) = &range {
match range {
SeriesRange::TimeRange(x) => Some(IsoDateTime::from_ns_u64(x.beg + SEC)),
SeriesRange::PulseRange(_) => {
error!("TODO emit create continueAt for pulse range");
Some(IsoDateTime::from_ns_u64(0))
}
}
} else {
warn!("can not determine continue-at parameters");
Some(IsoDateTime::from_ns_u64(0))
}
}
} else {
None
};
let tss_sl = vals.tss.make_contiguous();
let pulses_sl = vals.pulses.make_contiguous();
let (ts_anchor_sec, ts_off_ms, ts_off_ns) = crate::ts_offs_from_abs(tss_sl);
let (pulse_anchor, pulse_off) = crate::pulse_offs_from_abs(pulses_sl);
let values = mem::replace(&mut vals.values, VecDeque::new());
if ts_off_ms.len() != ts_off_ns.len() {
return Err(Error::with_msg_no_trace("collected len mismatch"));
}
if ts_off_ms.len() != pulse_off.len() {
return Err(Error::with_msg_no_trace("collected len mismatch"));
}
if ts_off_ms.len() != values.len() {
return Err(Error::with_msg_no_trace("collected len mismatch"));
}
let ret = Self::Output {
ts_anchor_sec,
ts_off_ms,
ts_off_ns,
pulse_anchor,
pulse_off,
values,
range_final: self.range_final,
timed_out: self.timed_out,
continue_at,
};
if !ret.is_valid() {
error!("invalid:\n{}", ret.info_str());
}
Ok(ret)
}
}
impl<STY: ScalarOps> CollectableType for EventsDim1<STY> {
type Collector = EventsDim1Collector<STY>;
fn new_collector() -> Self::Collector {
Self::Collector::new()
}
}
#[derive(Debug)]
pub struct EventsDim1Aggregator<STY> {
_last_seen_val: Option<STY>,
events_taken_count: u64,
events_ignored_count: u64,
}
impl<STY> Drop for EventsDim1Aggregator<STY> {
fn drop(&mut self) {
// TODO collect as stats for the request context:
trace!(
"taken {} ignored {}",
self.events_taken_count,
self.events_ignored_count
);
}
}
impl<STY: ScalarOps> EventsDim1Aggregator<STY> {
pub fn new(_range: SeriesRange, _do_time_weight: bool) -> Self {
panic!("TODO remove")
}
}
impl<STY> items_0::TypeName for EventsDim1<STY> {
fn type_name(&self) -> String {
let sty = std::any::type_name::<STY>();
format!("EventsDim1<{sty}>")
}
}
impl<STY: ScalarOps> EventsNonObj for EventsDim1<STY> {
fn into_tss_pulses(self: Box<Self>) -> (VecDeque<u64>, VecDeque<u64>) {
panic!("TODO remove")
}
}
impl<STY: ScalarOps> Events for EventsDim1<STY> {
fn verify(&self) -> bool {
let mut good = true;
let mut ts_max = 0;
for ts in &self.tss {
let ts = *ts;
if ts < ts_max {
good = false;
error!("unordered event data ts {} ts_max {}", ts, ts_max);
}
ts_max = ts_max.max(ts);
}
good
}
fn output_info(&self) -> String {
let n2 = self.tss.len().max(1) - 1;
format!(
"EventsDim1OutputInfo {{ len {}, ts_min {}, ts_max {} }}",
self.tss.len(),
self.tss.get(0).map_or(-1i64, |&x| x as i64),
self.tss.get(n2).map_or(-1i64, |&x| x as i64),
)
}
fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn {
self
}
fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn {
self
}
fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn {
self
}
fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box<dyn Events> {
// TODO improve the search
let n1 = self.tss.iter().take_while(|&&x| x <= ts_end).count();
let tss = self.tss.drain(..n1).collect();
let pulses = self.pulses.drain(..n1).collect();
let values = self.values.drain(..n1).collect();
let ret = Self { tss, pulses, values };
Box::new(ret)
}
fn new_empty_evs(&self) -> Box<dyn Events> {
Box::new(Self::empty())
}
fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), MergeError> {
// TODO as_any and as_any_mut are declared on unrelated traits. Simplify.
if let Some(dst) = dst.as_any_mut().downcast_mut::<Self>() {
// TODO make it harder to forget new members when the struct may get modified in the future
let r = range.0..range.1;
dst.tss.extend(self.tss.drain(r.clone()));
dst.pulses.extend(self.pulses.drain(r.clone()));
dst.values.extend(self.values.drain(r.clone()));
Ok(())
} else {
error!("downcast to EventsDim0 FAILED");
Err(MergeError::NotCompatible)
}
}
fn find_lowest_index_gt_evs(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate() {
if m > ts {
return Some(i);
}
}
None
}
fn find_lowest_index_ge_evs(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate() {
if m >= ts {
return Some(i);
}
}
None
}
fn find_highest_index_lt_evs(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate().rev() {
if m < ts {
return Some(i);
}
}
None
}
fn ts_min(&self) -> Option<u64> {
self.tss.front().map(|&x| x)
}
fn ts_max(&self) -> Option<u64> {
self.tss.back().map(|&x| x)
}
fn partial_eq_dyn(&self, other: &dyn Events) -> bool {
if let Some(other) = other.as_any_ref().downcast_ref::<Self>() {
self == other
} else {
false
}
}
fn serde_id(&self) -> &'static str {
Self::serde_id()
}
fn nty_id(&self) -> u32 {
STY::SUB
}
fn clone_dyn(&self) -> Box<dyn Events> {
Box::new(self.clone())
}
fn tss(&self) -> &VecDeque<u64> {
&self.tss
}
fn pulses(&self) -> &VecDeque<u64> {
&self.pulses
}
fn frame_type_id(&self) -> u32 {
// TODO make more nice
panic!()
}
fn to_min_max_avg(&mut self) -> Box<dyn Events> {
let mins = self
.values
.iter()
.map(|x| STY::find_vec_min(x))
.map(|x| x.unwrap_or_else(|| STY::zero_b()))
.collect();
let maxs = self
.values
.iter()
.map(|x| STY::find_vec_max(x))
.map(|x| x.unwrap_or_else(|| STY::zero_b()))
.collect();
let avgs = self
.values
.iter()
.map(|x| STY::avg_vec(x))
.map(|x| x.unwrap_or_else(|| STY::zero_b()))
.map(|x| x.as_prim_f32_b())
.collect();
let item = EventsXbinDim0 {
tss: mem::replace(&mut self.tss, VecDeque::new()),
pulses: mem::replace(&mut self.pulses, VecDeque::new()),
mins,
maxs,
avgs,
};
Box::new(item)
}
fn to_json_string(&self) -> String {
let ret = EventsDim1ChunkOutput {
// TODO use &mut to swap the content
tss: self.tss.clone(),
pulses: self.pulses.clone(),
values: self.values.clone(),
scalar_type: STY::scalar_type_name().into(),
};
serde_json::to_string(&ret).unwrap()
}
fn to_json_vec_u8(&self) -> Vec<u8> {
self.to_json_string().into_bytes()
}
fn to_cbor_vec_u8(&self) -> Vec<u8> {
let ret = EventsDim1ChunkOutput {
// TODO use &mut to swap the content
tss: self.tss.clone(),
pulses: self.pulses.clone(),
values: self.values.clone(),
scalar_type: STY::scalar_type_name().into(),
};
let mut buf = Vec::new();
ciborium::into_writer(&ret, &mut buf).unwrap();
buf
}
fn clear(&mut self) {
self.tss.clear();
self.pulses.clear();
self.values.clear();
}
fn to_dim0_f32_for_binning(&self) -> Box<dyn Events> {
todo!("{}::to_dim0_f32_for_binning", self.type_name())
}
fn to_container_events(&self) -> Box<dyn ::items_0::timebin::BinningggContainerEventsDyn> {
todo!("{}::to_container_events", self.type_name())
}
}
impl<STY> Appendable<Vec<STY>> for EventsDim1<STY>
where
STY: ScalarOps,
{
fn push(&mut self, ts: u64, pulse: u64, value: Vec<STY>) {
Self::push(self, ts, pulse, value)
}
}

View File

@@ -1,779 +0,0 @@
use crate::binsxbindim0::BinsXbinDim0;
use crate::IsoDateTime;
use daqbuf_err as err;
use err::Error;
use items_0::collect_s::CollectableDyn;
use items_0::collect_s::CollectableType;
use items_0::collect_s::CollectedDyn;
use items_0::collect_s::CollectorTy;
use items_0::collect_s::ToJsonResult;
use items_0::container::ByteEstimate;
use items_0::overlap::HasTimestampDeque;
use items_0::scalar_ops::ScalarOps;
use items_0::timebin::TimeBinnerTy;
use items_0::AsAnyMut;
use items_0::AsAnyRef;
use items_0::Empty;
use items_0::Events;
use items_0::EventsNonObj;
use items_0::MergeError;
use items_0::TypeName;
use items_0::WithLen;
use netpod::is_false;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::range::evrange::SeriesRange;
use netpod::timeunits::SEC;
use netpod::BinnedRangeEnum;
use serde::Deserialize;
use serde::Serialize;
use std::any;
use std::any::Any;
use std::collections::VecDeque;
use std::fmt;
use std::mem;
#[allow(unused)]
macro_rules! trace_ingest {
($($arg:tt)*) => {};
($($arg:tt)*) => { trace!($($arg)*) };
}
#[allow(unused)]
macro_rules! trace2 {
($($arg:tt)*) => {};
($($arg:tt)*) => { trace!($($arg)*) };
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub struct EventsXbinDim0<NTY> {
pub tss: VecDeque<u64>,
pub pulses: VecDeque<u64>,
pub mins: VecDeque<NTY>,
pub maxs: VecDeque<NTY>,
pub avgs: VecDeque<f32>,
// TODO maybe add variance?
}
impl<NTY> EventsXbinDim0<NTY> {
#[inline(always)]
pub fn push(&mut self, ts: u64, pulse: u64, min: NTY, max: NTY, avg: f32) {
self.tss.push_back(ts);
self.pulses.push_back(pulse);
self.mins.push_back(min);
self.maxs.push_back(max);
self.avgs.push_back(avg);
}
#[inline(always)]
pub fn push_front(&mut self, ts: u64, pulse: u64, min: NTY, max: NTY, avg: f32) {
self.tss.push_front(ts);
self.pulses.push_front(pulse);
self.mins.push_front(min);
self.maxs.push_front(max);
self.avgs.push_front(avg);
}
pub fn serde_id() -> &'static str {
"EventsXbinDim0"
}
}
impl<STY> TypeName for EventsXbinDim0<STY> {
fn type_name(&self) -> String {
any::type_name::<Self>().into()
}
}
impl<STY> fmt::Debug for EventsXbinDim0<STY>
where
STY: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
if false {
write!(
fmt,
"{} {{ count {} ts {:?} vals {:?} }}",
self.type_name(),
self.tss.len(),
self.tss.iter().map(|x| x / SEC).collect::<Vec<_>>(),
self.avgs,
)
} else {
write!(
fmt,
"{} {{ count {} ts {:?} .. {:?} vals {:?} .. {:?} }}",
self.type_name(),
self.tss.len(),
self.tss.front().map(|x| x / SEC),
self.tss.back().map(|x| x / SEC),
self.avgs.front(),
self.avgs.back(),
)
}
}
}
impl<STY> ByteEstimate for EventsXbinDim0<STY> {
fn byte_estimate(&self) -> u64 {
let stylen = mem::size_of::<STY>();
(self.len() * (8 + 8 + 2 * stylen + 4)) as u64
}
}
impl<STY> Empty for EventsXbinDim0<STY> {
fn empty() -> Self {
Self {
tss: VecDeque::new(),
pulses: VecDeque::new(),
mins: VecDeque::new(),
maxs: VecDeque::new(),
avgs: VecDeque::new(),
}
}
}
impl<STY> AsAnyRef for EventsXbinDim0<STY>
where
STY: ScalarOps,
{
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<STY> AsAnyMut for EventsXbinDim0<STY>
where
STY: ScalarOps,
{
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<STY> WithLen for EventsXbinDim0<STY> {
fn len(&self) -> usize {
self.tss.len()
}
}
impl<STY: ScalarOps> HasTimestampDeque for EventsXbinDim0<STY> {
fn timestamp_min(&self) -> Option<u64> {
self.tss.front().map(|x| *x)
}
fn timestamp_max(&self) -> Option<u64> {
self.tss.back().map(|x| *x)
}
fn pulse_min(&self) -> Option<u64> {
self.pulses.front().map(|x| *x)
}
fn pulse_max(&self) -> Option<u64> {
self.pulses.back().map(|x| *x)
}
}
impl<STY: ScalarOps> EventsNonObj for EventsXbinDim0<STY> {
fn into_tss_pulses(self: Box<Self>) -> (VecDeque<u64>, VecDeque<u64>) {
info!(
"EventsXbinDim0::into_tss_pulses len {} len {}",
self.tss.len(),
self.pulses.len()
);
(self.tss, self.pulses)
}
}
impl<STY: ScalarOps> Events for EventsXbinDim0<STY> {
fn verify(&self) -> bool {
let mut good = true;
let mut ts_max = 0;
for ts in &self.tss {
let ts = *ts;
if ts < ts_max {
good = false;
error!("unordered event data ts {} ts_max {}", ts, ts_max);
}
ts_max = ts_max.max(ts);
}
good
}
fn output_info(&self) -> String {
let n2 = self.tss.len().max(1) - 1;
format!(
"EventsXbinDim0OutputInfo {{ len {}, ts_min {}, ts_max {} }}",
self.tss.len(),
self.tss.get(0).map_or(-1i64, |&x| x as i64),
self.tss.get(n2).map_or(-1i64, |&x| x as i64),
)
}
fn as_collectable_mut(&mut self) -> &mut dyn CollectableDyn {
self
}
fn as_collectable_with_default_ref(&self) -> &dyn CollectableDyn {
self
}
fn as_collectable_with_default_mut(&mut self) -> &mut dyn CollectableDyn {
self
}
fn take_new_events_until_ts(&mut self, ts_end: u64) -> Box<dyn Events> {
// TODO improve the search
let n1 = self.tss.iter().take_while(|&&x| x <= ts_end).count();
let tss = self.tss.drain(..n1).collect();
let pulses = self.pulses.drain(..n1).collect();
let mins = self.mins.drain(..n1).collect();
let maxs = self.maxs.drain(..n1).collect();
let avgs = self.avgs.drain(..n1).collect();
let ret = Self {
tss,
pulses,
mins,
maxs,
avgs,
};
Box::new(ret)
}
fn new_empty_evs(&self) -> Box<dyn Events> {
Box::new(Self::empty())
}
fn drain_into_evs(&mut self, dst: &mut dyn Events, range: (usize, usize)) -> Result<(), MergeError> {
// TODO as_any and as_any_mut are declared on unrelated traits. Simplify.
if let Some(dst) = dst.as_any_mut().downcast_mut::<Self>() {
// TODO make it harder to forget new members when the struct may get modified in the future
let r = range.0..range.1;
dst.tss.extend(self.tss.drain(r.clone()));
dst.pulses.extend(self.pulses.drain(r.clone()));
dst.mins.extend(self.mins.drain(r.clone()));
dst.maxs.extend(self.maxs.drain(r.clone()));
dst.avgs.extend(self.avgs.drain(r.clone()));
Ok(())
} else {
error!("downcast to {} FAILED", self.type_name());
Err(MergeError::NotCompatible)
}
}
fn find_lowest_index_gt_evs(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate() {
if m > ts {
return Some(i);
}
}
None
}
fn find_lowest_index_ge_evs(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate() {
if m >= ts {
return Some(i);
}
}
None
}
fn find_highest_index_lt_evs(&self, ts: u64) -> Option<usize> {
for (i, &m) in self.tss.iter().enumerate().rev() {
if m < ts {
return Some(i);
}
}
None
}
fn ts_min(&self) -> Option<u64> {
self.tss.front().map(|&x| x)
}
fn ts_max(&self) -> Option<u64> {
self.tss.back().map(|&x| x)
}
fn partial_eq_dyn(&self, other: &dyn Events) -> bool {
if let Some(other) = other.as_any_ref().downcast_ref::<Self>() {
self == other
} else {
false
}
}
fn serde_id(&self) -> &'static str {
Self::serde_id()
}
fn nty_id(&self) -> u32 {
STY::SUB
}
fn clone_dyn(&self) -> Box<dyn Events> {
Box::new(self.clone())
}
fn tss(&self) -> &VecDeque<u64> {
&self.tss
}
fn pulses(&self) -> &VecDeque<u64> {
&self.pulses
}
fn frame_type_id(&self) -> u32 {
error!("TODO frame_type_id should not be called");
// TODO make more nice
panic!()
}
fn to_min_max_avg(&mut self) -> Box<dyn Events> {
let dst = Self {
tss: mem::replace(&mut self.tss, Default::default()),
pulses: mem::replace(&mut self.pulses, Default::default()),
mins: mem::replace(&mut self.mins, Default::default()),
maxs: mem::replace(&mut self.maxs, Default::default()),
avgs: mem::replace(&mut self.avgs, Default::default()),
};
Box::new(dst)
}
fn to_json_string(&self) -> String {
todo!()
}
fn to_json_vec_u8(&self) -> Vec<u8> {
todo!()
}
fn to_cbor_vec_u8(&self) -> Vec<u8> {
todo!()
}
fn clear(&mut self) {
self.tss.clear();
self.pulses.clear();
self.mins.clear();
self.maxs.clear();
self.avgs.clear();
}
fn to_dim0_f32_for_binning(&self) -> Box<dyn Events> {
todo!("{}::to_dim0_f32_for_binning", self.type_name())
}
fn to_container_events(&self) -> Box<dyn ::items_0::timebin::BinningggContainerEventsDyn> {
todo!("{}::to_container_events", self.type_name())
}
}
#[derive(Debug)]
pub struct EventsXbinDim0Aggregator<STY>
where
STY: ScalarOps,
{
range: SeriesRange,
/// Number of events which actually fall in this bin.
count: u64,
min: STY,
max: STY,
/// Number of times we accumulated to the sum of this bin.
sumc: u64,
sum: f32,
int_ts: u64,
last_ts: u64,
last_vals: Option<(STY, STY, f32)>,
did_min_max: bool,
do_time_weight: bool,
events_ignored_count: u64,
}
impl<STY> EventsXbinDim0Aggregator<STY>
where
STY: ScalarOps,
{
pub fn type_name() -> &'static str {
std::any::type_name::<Self>()
}
pub fn new(range: SeriesRange, do_time_weight: bool) -> Self {
let int_ts = range.beg_u64();
Self {
range,
did_min_max: false,
count: 0,
min: STY::zero_b(),
max: STY::zero_b(),
sumc: 0,
sum: 0f32,
int_ts,
last_ts: 0,
last_vals: None,
events_ignored_count: 0,
do_time_weight,
}
}
fn apply_min_max(&mut self, min: &STY, max: &STY) {
if self.did_min_max != (self.sumc > 0) {
panic!("logic error apply_min_max {} {}", self.did_min_max, self.sumc);
}
if self.sumc == 0 {
self.did_min_max = true;
self.min = min.clone();
self.max = max.clone();
} else {
if *min < self.min {
self.min = min.clone();
}
if *max > self.max {
self.max = max.clone();
}
}
}
fn apply_event_unweight(&mut self, avg: f32, min: STY, max: STY) {
//debug!("apply_event_unweight");
self.apply_min_max(&min, &max);
self.sumc += 1;
let vf = avg;
if vf.is_nan() {
} else {
self.sum += vf;
}
}
// Only integrate, do not count because it is used even if the event does not fall into current bin.
fn apply_event_time_weight(&mut self, px: u64) {
trace_ingest!(
"apply_event_time_weight px {} count {} sumc {} events_ignored_count {}",
px,
self.count,
self.sumc,
self.events_ignored_count
);
if let Some((min, max, avg)) = self.last_vals.as_ref() {
let vf = *avg;
{
let min = min.clone();
let max = max.clone();
self.apply_min_max(&min, &max);
}
self.sumc += 1;
let w = (px - self.int_ts) as f32 * 1e-9;
if vf.is_nan() {
} else {
self.sum += vf * w;
}
self.int_ts = px;
} else {
debug!("apply_event_time_weight NO VALUE");
}
}
fn ingest_unweight(&mut self, item: &EventsXbinDim0<STY>) {
/*for i1 in 0..item.tss.len() {
let ts = item.tss[i1];
let avg = item.avgs[i1];
let min = item.mins[i1].clone();
let max = item.maxs[i1].clone();
if ts < self.range.beg {
} else if ts >= self.range.end {
} else {
self.apply_event_unweight(avg, min, max);
}
}*/
todo!()
}
fn ingest_time_weight(&mut self, item: &EventsXbinDim0<STY>) {
trace!(
"{} ingest_time_weight range {:?} last_ts {:?} int_ts {:?}",
Self::type_name(),
self.range,
self.last_ts,
self.int_ts
);
let range_beg = self.range.beg_u64();
let range_end = self.range.end_u64();
for (((&ts, min), max), avg) in item
.tss
.iter()
.zip(item.mins.iter())
.zip(item.maxs.iter())
.zip(item.avgs.iter())
{
if ts >= range_end {
self.events_ignored_count += 1;
// TODO break early when tests pass.
//break;
} else if ts >= range_beg {
self.apply_event_time_weight(ts);
self.count += 1;
self.last_ts = ts;
self.last_vals = Some((min.clone(), max.clone(), avg.clone()));
} else {
self.events_ignored_count += 1;
self.last_ts = ts;
self.last_vals = Some((min.clone(), max.clone(), avg.clone()));
}
}
}
fn result_reset_unweight(&mut self, range: SeriesRange) -> BinsXbinDim0<STY> {
/*let avg = if self.sumc == 0 {
0f32
} else {
self.sum / self.sumc as f32
};
let ret = BinsXbinDim0::from_content(
[self.range.beg].into(),
[self.range.end].into(),
[self.count].into(),
[self.min.clone()].into(),
[self.max.clone()].into(),
[avg].into(),
);
self.int_ts = range.beg;
self.range = range;
self.sum = 0f32;
self.sumc = 0;
self.did_min_max = false;
self.min = NTY::zero_b();
self.max = NTY::zero_b();
ret*/
todo!()
}
fn result_reset_time_weight(&mut self, range: SeriesRange) -> BinsXbinDim0<STY> {
trace!("{} result_reset_time_weight", Self::type_name());
// TODO check callsite for correct expand status.
if self.range.is_time() {
self.apply_event_time_weight(self.range.end_u64());
} else {
error!("TODO result_reset_time_weight");
err::todoval()
}
let range_beg = self.range.beg_u64();
let range_end = self.range.end_u64();
let (min, max, avg) = if self.sumc > 0 {
let avg = self.sum / (self.range.delta_u64() as f32 * 1e-9);
(self.min.clone(), self.max.clone(), avg)
} else {
let (min, max, avg) = match &self.last_vals {
Some((min, max, avg)) => {
warn!("\n\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! SHOULD ALWAYS HAVE ACCUMULATED IN THIS CASE");
(min.clone(), max.clone(), avg.clone())
}
None => (STY::zero_b(), STY::zero_b(), 0.),
};
(min, max, avg)
};
let ret = BinsXbinDim0::from_content(
[range_beg].into(),
[range_end].into(),
[self.count].into(),
[min.clone()].into(),
[max.clone()].into(),
[avg].into(),
);
self.int_ts = range.beg_u64();
self.range = range;
self.count = 0;
self.sumc = 0;
self.sum = 0.;
self.did_min_max = false;
self.min = STY::zero_b();
self.max = STY::zero_b();
ret
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct EventsXbinDim0CollectorOutput<NTY> {
#[serde(rename = "tsAnchor")]
ts_anchor_sec: u64,
#[serde(rename = "tsMs")]
ts_off_ms: VecDeque<u64>,
#[serde(rename = "tsNs")]
ts_off_ns: VecDeque<u64>,
#[serde(rename = "pulseAnchor")]
pulse_anchor: u64,
#[serde(rename = "pulseOff")]
pulse_off: VecDeque<u64>,
#[serde(rename = "mins")]
mins: VecDeque<NTY>,
#[serde(rename = "maxs")]
maxs: VecDeque<NTY>,
#[serde(rename = "avgs")]
avgs: VecDeque<f32>,
#[serde(rename = "rangeFinal", default, skip_serializing_if = "is_false")]
range_final: bool,
#[serde(rename = "timedOut", default, skip_serializing_if = "is_false")]
timed_out: bool,
#[serde(rename = "continueAt", default, skip_serializing_if = "Option::is_none")]
continue_at: Option<IsoDateTime>,
}
impl<NTY> AsAnyRef for EventsXbinDim0CollectorOutput<NTY>
where
NTY: ScalarOps,
{
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<NTY> AsAnyMut for EventsXbinDim0CollectorOutput<NTY>
where
NTY: ScalarOps,
{
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
impl<STY> TypeName for EventsXbinDim0CollectorOutput<STY> {
fn type_name(&self) -> String {
any::type_name::<Self>().into()
}
}
impl<NTY: ScalarOps> WithLen for EventsXbinDim0CollectorOutput<NTY> {
fn len(&self) -> usize {
self.mins.len()
}
}
impl<NTY> ToJsonResult for EventsXbinDim0CollectorOutput<NTY>
where
NTY: ScalarOps,
{
fn to_json_value(&self) -> Result<serde_json::Value, serde_json::Error> {
serde_json::to_value(self)
}
}
impl<NTY> CollectedDyn for EventsXbinDim0CollectorOutput<NTY> where NTY: ScalarOps {}
#[derive(Debug)]
pub struct EventsXbinDim0Collector<NTY> {
vals: EventsXbinDim0<NTY>,
range_final: bool,
timed_out: bool,
needs_continue_at: bool,
}
impl<NTY> EventsXbinDim0Collector<NTY> {
pub fn self_name() -> &'static str {
any::type_name::<Self>()
}
pub fn new() -> Self {
Self {
range_final: false,
timed_out: false,
vals: EventsXbinDim0::empty(),
needs_continue_at: false,
}
}
}
impl<NTY> WithLen for EventsXbinDim0Collector<NTY> {
fn len(&self) -> usize {
WithLen::len(&self.vals)
}
}
impl<STY> ByteEstimate for EventsXbinDim0Collector<STY> {
fn byte_estimate(&self) -> u64 {
ByteEstimate::byte_estimate(&self.vals)
}
}
impl<NTY> CollectorTy for EventsXbinDim0Collector<NTY>
where
NTY: ScalarOps,
{
type Input = EventsXbinDim0<NTY>;
type Output = EventsXbinDim0CollectorOutput<NTY>;
fn ingest(&mut self, src: &mut Self::Input) {
self.vals.tss.append(&mut src.tss);
self.vals.pulses.append(&mut src.pulses);
self.vals.mins.append(&mut src.mins);
self.vals.maxs.append(&mut src.maxs);
self.vals.avgs.append(&mut src.avgs);
}
fn set_range_complete(&mut self) {
self.range_final = true;
}
fn set_timed_out(&mut self) {
self.timed_out = true;
}
fn set_continue_at_here(&mut self) {
self.needs_continue_at = true;
}
fn result(
&mut self,
range: Option<SeriesRange>,
_binrange: Option<BinnedRangeEnum>,
) -> Result<Self::Output, Error> {
/*use std::mem::replace;
let continue_at = if self.timed_out {
if let Some(ts) = self.vals.tss.back() {
Some(IsoDateTime::from_u64(*ts + netpod::timeunits::MS))
} else {
if let Some(range) = &range {
Some(IsoDateTime::from_u64(range.beg + netpod::timeunits::SEC))
} else {
warn!("can not determine continue-at parameters");
None
}
}
} else {
None
};
let mins = replace(&mut self.vals.mins, VecDeque::new());
let maxs = replace(&mut self.vals.maxs, VecDeque::new());
let avgs = replace(&mut self.vals.avgs, VecDeque::new());
self.vals.tss.make_contiguous();
self.vals.pulses.make_contiguous();
let tst = crate::ts_offs_from_abs(self.vals.tss.as_slices().0);
let (pulse_anchor, pulse_off) = crate::pulse_offs_from_abs(&self.vals.pulses.as_slices().0);
let ret = Self::Output {
ts_anchor_sec: tst.0,
ts_off_ms: tst.1,
ts_off_ns: tst.2,
pulse_anchor,
pulse_off,
mins,
maxs,
avgs,
range_final: self.range_final,
timed_out: self.timed_out,
continue_at,
};
Ok(ret)*/
todo!()
}
}
impl<NTY> CollectableType for EventsXbinDim0<NTY>
where
NTY: ScalarOps,
{
type Collector = EventsXbinDim0Collector<NTY>;
fn new_collector() -> Self::Collector {
Self::Collector::new()
}
}

View File

@@ -1,221 +0,0 @@
use crate::frame::make_error_frame;
use crate::frame::make_frame_2;
use crate::frame::make_log_frame;
use crate::frame::make_range_complete_frame;
use crate::frame::make_stats_frame;
use bytes::BytesMut;
use daqbuf_err as err;
use items_0::framable::FrameTypeInnerDyn;
use items_0::framable::FrameTypeInnerStatic;
use items_0::streamitem::LogItem;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StatsItem;
use items_0::streamitem::StreamItem;
use items_0::streamitem::ERROR_FRAME_TYPE_ID;
use items_0::streamitem::EVENT_QUERY_JSON_STRING_FRAME;
use items_0::streamitem::SITEMTY_NONSPEC_FRAME_TYPE_ID;
use items_0::Events;
use netpod::log::*;
use serde::de::DeserializeOwned;
use serde::Deserialize;
use serde::Serialize;
pub const INMEM_FRAME_ENCID: u32 = 0x12121212;
pub const INMEM_FRAME_HEAD: usize = 20;
pub const INMEM_FRAME_FOOT: usize = 4;
pub const INMEM_FRAME_MAGIC: u32 = 0xc6c3b73d;
#[derive(Debug, thiserror::Error)]
#[cstm(name = "ItemFramable")]
pub enum Error {
Msg(String),
DummyError,
Frame(#[from] crate::frame::Error),
}
struct ErrMsg<E>(E)
where
E: ToString;
impl<E> From<ErrMsg<E>> for Error
where
E: ToString,
{
fn from(value: ErrMsg<E>) -> Self {
Self::Msg(value.0.to_string())
}
}
pub trait FrameTypeStatic {
const FRAME_TYPE_ID: u32;
}
impl<T> FrameTypeStatic for Sitemty<T>
where
T: FrameTypeInnerStatic,
{
const FRAME_TYPE_ID: u32 = <T as FrameTypeInnerStatic>::FRAME_TYPE_ID;
}
// Framable trait objects need some inspection to handle the supposed-to-be common Err ser format:
// Meant to be implemented by Sitemty.
pub trait FrameType {
fn frame_type_id(&self) -> u32;
}
impl<T> FrameType for Box<T>
where
T: FrameType,
{
fn frame_type_id(&self) -> u32 {
self.as_ref().frame_type_id()
}
}
impl FrameType for Box<dyn Events> {
fn frame_type_id(&self) -> u32 {
self.as_ref().frame_type_id()
}
}
pub trait Framable {
fn make_frame_dyn(&self) -> Result<BytesMut, Error>;
}
pub trait FramableInner: erased_serde::Serialize + FrameTypeInnerDyn + Send {
fn _dummy(&self);
}
impl<T: erased_serde::Serialize + FrameTypeInnerDyn + Send> FramableInner for T {
fn _dummy(&self) {}
}
impl<T> Framable for Sitemty<T>
where
T: Sized + serde::Serialize + FrameType,
{
fn make_frame_dyn(&self) -> Result<BytesMut, Error> {
match self {
Ok(StreamItem::DataItem(RangeCompletableItem::Data(k))) => {
let frame_type_id = k.frame_type_id();
make_frame_2(self, frame_type_id).map_err(Error::from)
}
Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)) => {
make_range_complete_frame().map_err(Error::from)
}
Ok(StreamItem::Log(item)) => make_log_frame(item).map_err(Error::from),
Ok(StreamItem::Stats(item)) => make_stats_frame(item).map_err(Error::from),
Err(e) => {
info!("calling make_error_frame for [[{e}]]");
make_error_frame(e).map_err(Error::from)
}
}
}
}
impl<T> Framable for Box<T>
where
T: Framable + ?Sized,
{
fn make_frame_dyn(&self) -> Result<BytesMut, Error> {
self.as_ref().make_frame_dyn()
}
}
pub trait FrameDecodable: FrameTypeStatic + DeserializeOwned {
fn from_error(e: err::Error) -> Self;
fn from_log(item: LogItem) -> Self;
fn from_stats(item: StatsItem) -> Self;
fn from_range_complete() -> Self;
}
impl<T> FrameDecodable for Sitemty<T>
where
T: FrameTypeInnerStatic + DeserializeOwned,
{
fn from_error(e: err::Error) -> Self {
Err(e)
}
fn from_log(item: LogItem) -> Self {
Ok(StreamItem::Log(item))
}
fn from_stats(item: StatsItem) -> Self {
Ok(StreamItem::Stats(item))
}
fn from_range_complete() -> Self {
Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EventQueryJsonStringFrame(pub String);
impl EventQueryJsonStringFrame {
pub fn str(&self) -> &str {
&self.0
}
}
impl FrameTypeInnerStatic for EventQueryJsonStringFrame {
const FRAME_TYPE_ID: u32 = EVENT_QUERY_JSON_STRING_FRAME;
}
impl FrameType for EventQueryJsonStringFrame {
fn frame_type_id(&self) -> u32 {
EventQueryJsonStringFrame::FRAME_TYPE_ID
}
}
impl<T> FrameType for Sitemty<T>
where
T: FrameType,
{
fn frame_type_id(&self) -> u32 {
match self {
Ok(item) => match item {
StreamItem::DataItem(item) => match item {
RangeCompletableItem::RangeComplete => SITEMTY_NONSPEC_FRAME_TYPE_ID,
RangeCompletableItem::Data(item) => item.frame_type_id(),
},
StreamItem::Log(_) => SITEMTY_NONSPEC_FRAME_TYPE_ID,
StreamItem::Stats(_) => SITEMTY_NONSPEC_FRAME_TYPE_ID,
},
Err(_) => ERROR_FRAME_TYPE_ID,
}
}
}
#[test]
fn test_frame_log() {
use crate::channelevents::ChannelEvents;
use crate::frame::decode_from_slice;
use netpod::log::Level;
let item = LogItem {
node_ix: 123,
level: Level::TRACE,
msg: format!("test-log-message"),
};
let item: Sitemty<ChannelEvents> = Ok(StreamItem::Log(item));
let buf = Framable::make_frame_dyn(&item).unwrap();
let len = u32::from_le_bytes(buf[12..16].try_into().unwrap());
let item2: LogItem = decode_from_slice(&buf[20..20 + len as usize]).unwrap();
}
#[test]
fn test_frame_error() {
use crate::channelevents::ChannelEvents;
use crate::frame::json_from_slice;
let item: Sitemty<ChannelEvents> = items_0::streamitem::sitem_err_from_string("dummyerror");
let buf = Framable::make_frame_dyn(&item).unwrap();
let len = u32::from_le_bytes(buf[12..16].try_into().unwrap());
let tyid = u32::from_le_bytes(buf[8..12].try_into().unwrap());
if tyid != ERROR_FRAME_TYPE_ID {
panic!("bad tyid");
}
eprintln!("buf len {} len {}", buf.len(), len);
let item2: items_0::streamitem::SitemErrTy = json_from_slice(&buf[20..20 + len as usize]).unwrap();
}

View File

@@ -1,433 +0,0 @@
use crate::framable::FrameDecodable;
use crate::framable::INMEM_FRAME_ENCID;
use crate::framable::INMEM_FRAME_FOOT;
use crate::framable::INMEM_FRAME_HEAD;
use crate::framable::INMEM_FRAME_MAGIC;
use crate::inmem::InMemoryFrame;
use bincode::config::FixintEncoding;
use bincode::config::LittleEndian;
use bincode::config::RejectTrailing;
use bincode::config::WithOtherEndian;
use bincode::config::WithOtherIntEncoding;
use bincode::config::WithOtherTrailing;
use bincode::DefaultOptions;
use bytes::BufMut;
use bytes::BytesMut;
use daqbuf_err as err;
use items_0::bincode;
use items_0::streamitem::LogItem;
use items_0::streamitem::StatsItem;
use items_0::streamitem::ERROR_FRAME_TYPE_ID;
use items_0::streamitem::LOG_FRAME_TYPE_ID;
use items_0::streamitem::RANGE_COMPLETE_FRAME_TYPE_ID;
use items_0::streamitem::STATS_FRAME_TYPE_ID;
use items_0::streamitem::TERM_FRAME_TYPE_ID;
use netpod::log::*;
use serde::Serialize;
use std::any;
use std::io;
#[derive(Debug, thiserror::Error)]
#[cstm(name = "ItemFrame")]
pub enum Error {
TooLongPayload(usize),
UnknownEncoder(u32),
#[error("BufferMismatch({0}, {1}, {2})")]
BufferMismatch(u32, usize, u32),
#[error("TyIdMismatch({0}, {1})")]
TyIdMismatch(u32, u32),
Msg(String),
Bincode(#[from] Box<bincode::ErrorKind>),
RmpEnc(#[from] rmp_serde::encode::Error),
RmpDec(#[from] rmp_serde::decode::Error),
ErasedSerde(#[from] erased_serde::Error),
Postcard(#[from] postcard::Error),
SerdeJson(#[from] serde_json::Error),
}
struct ErrMsg<E>(E)
where
E: ToString;
impl<E> From<ErrMsg<E>> for Error
where
E: ToString,
{
fn from(value: ErrMsg<E>) -> Self {
Self::Msg(value.0.to_string())
}
}
pub fn bincode_ser<W>(
w: W,
) -> bincode::Serializer<
W,
WithOtherTrailing<
WithOtherIntEncoding<WithOtherEndian<DefaultOptions, LittleEndian>, FixintEncoding>,
RejectTrailing,
>,
>
where
W: io::Write,
{
use bincode::Options;
let opts = DefaultOptions::new()
.with_little_endian()
.with_fixint_encoding()
.reject_trailing_bytes();
let ser = bincode::Serializer::new(w, opts);
ser
}
fn bincode_to_vec<S>(item: S) -> Result<Vec<u8>, Error>
where
S: Serialize,
{
let mut out = Vec::new();
let mut ser = bincode_ser(&mut out);
item.serialize(&mut ser)?;
Ok(out)
}
fn bincode_from_slice<T>(buf: &[u8]) -> Result<T, Error>
where
T: for<'de> serde::Deserialize<'de>,
{
use bincode::Options;
let opts = DefaultOptions::new()
.with_little_endian()
.with_fixint_encoding()
.reject_trailing_bytes();
let mut de = bincode::Deserializer::from_slice(buf, opts);
<T as serde::Deserialize>::deserialize(&mut de).map_err(Into::into)
}
fn msgpack_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
where
T: Serialize,
{
rmp_serde::to_vec_named(&item).map_err(Error::from)
}
fn msgpack_erased_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
where
T: erased_serde::Serialize,
{
let mut out = Vec::new();
{
let mut ser1 = rmp_serde::Serializer::new(&mut out).with_struct_map();
let mut ser2 = <dyn erased_serde::Serializer>::erase(&mut ser1);
item.erased_serialize(&mut ser2)?;
}
Ok(out)
}
fn msgpack_from_slice<T>(buf: &[u8]) -> Result<T, Error>
where
T: for<'de> serde::Deserialize<'de>,
{
rmp_serde::from_slice(buf).map_err(Error::from)
}
fn postcard_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
where
T: Serialize,
{
postcard::to_stdvec(&item).map_err(Error::from)
}
fn postcard_erased_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
where
T: erased_serde::Serialize,
{
use postcard::ser_flavors::Flavor;
let mut ser1 = postcard::Serializer {
output: postcard::ser_flavors::AllocVec::new(),
};
{
let mut ser2 = <dyn erased_serde::Serializer>::erase(&mut ser1);
item.erased_serialize(&mut ser2)
}?;
let ret = ser1.output.finalize()?;
Ok(ret)
}
pub fn postcard_from_slice<T>(buf: &[u8]) -> Result<T, Error>
where
T: for<'de> serde::Deserialize<'de>,
{
Ok(postcard::from_bytes(buf)?)
}
fn json_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
where
T: Serialize,
{
Ok(serde_json::to_vec(&item)?)
}
pub fn json_from_slice<T>(buf: &[u8]) -> Result<T, Error>
where
T: for<'de> serde::Deserialize<'de>,
{
Ok(serde_json::from_slice(buf)?)
}
pub fn encode_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
where
T: Serialize,
{
if false {
msgpack_to_vec(item)
} else if false {
bincode_to_vec(item)
} else {
postcard_to_vec(item)
}
}
pub fn encode_erased_to_vec<T>(item: T) -> Result<Vec<u8>, Error>
where
T: erased_serde::Serialize,
{
if false {
msgpack_erased_to_vec(item)
} else {
postcard_erased_to_vec(item)
}
}
pub fn decode_from_slice<T>(buf: &[u8]) -> Result<T, Error>
where
T: for<'de> serde::Deserialize<'de>,
{
if false {
msgpack_from_slice(buf)
} else if false {
bincode_from_slice(buf)
} else {
postcard_from_slice(buf)
}
}
pub fn make_frame_2<T>(item: T, fty: u32) -> Result<BytesMut, Error>
where
T: erased_serde::Serialize,
{
let enc = encode_erased_to_vec(item)?;
if enc.len() > u32::MAX as usize {
return Err(Error::TooLongPayload(enc.len()));
}
let mut h = crc32fast::Hasher::new();
h.update(&enc);
let payload_crc = h.finalize();
// TODO reserve also for footer via constant
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len());
buf.put_u32_le(INMEM_FRAME_MAGIC);
buf.put_u32_le(INMEM_FRAME_ENCID);
buf.put_u32_le(fty);
buf.put_u32_le(enc.len() as u32);
buf.put_u32_le(payload_crc);
// TODO add padding to align to 8 bytes.
buf.put(enc.as_ref());
let mut h = crc32fast::Hasher::new();
h.update(&buf);
let frame_crc = h.finalize();
buf.put_u32_le(frame_crc);
return Ok(buf);
}
// TODO remove duplication for these similar `make_*_frame` functions:
pub fn make_error_frame(error: &err::Error) -> Result<BytesMut, Error> {
// error frames are always encoded as json
match json_to_vec(error) {
Ok(enc) => {
let mut h = crc32fast::Hasher::new();
h.update(&enc);
let payload_crc = h.finalize();
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len());
buf.put_u32_le(INMEM_FRAME_MAGIC);
buf.put_u32_le(INMEM_FRAME_ENCID);
buf.put_u32_le(ERROR_FRAME_TYPE_ID);
buf.put_u32_le(enc.len() as u32);
buf.put_u32_le(payload_crc);
buf.put(enc.as_ref());
let mut h = crc32fast::Hasher::new();
h.update(&buf);
let frame_crc = h.finalize();
buf.put_u32_le(frame_crc);
Ok(buf)
}
Err(e) => Err(e)?,
}
}
pub fn make_log_frame(item: &LogItem) -> Result<BytesMut, Error> {
match encode_to_vec(item) {
Ok(enc) => {
let mut h = crc32fast::Hasher::new();
h.update(&enc);
let payload_crc = h.finalize();
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len());
buf.put_u32_le(INMEM_FRAME_MAGIC);
buf.put_u32_le(INMEM_FRAME_ENCID);
buf.put_u32_le(LOG_FRAME_TYPE_ID);
buf.put_u32_le(enc.len() as u32);
buf.put_u32_le(payload_crc);
buf.put(enc.as_ref());
let mut h = crc32fast::Hasher::new();
h.update(&buf);
let frame_crc = h.finalize();
buf.put_u32_le(frame_crc);
Ok(buf)
}
Err(e) => Err(e)?,
}
}
pub fn make_stats_frame(item: &StatsItem) -> Result<BytesMut, Error> {
match encode_to_vec(item) {
Ok(enc) => {
let mut h = crc32fast::Hasher::new();
h.update(&enc);
let payload_crc = h.finalize();
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len());
buf.put_u32_le(INMEM_FRAME_MAGIC);
buf.put_u32_le(INMEM_FRAME_ENCID);
buf.put_u32_le(STATS_FRAME_TYPE_ID);
buf.put_u32_le(enc.len() as u32);
buf.put_u32_le(payload_crc);
buf.put(enc.as_ref());
let mut h = crc32fast::Hasher::new();
h.update(&buf);
let frame_crc = h.finalize();
buf.put_u32_le(frame_crc);
Ok(buf)
}
Err(e) => Err(e)?,
}
}
pub fn make_range_complete_frame() -> Result<BytesMut, Error> {
let enc = [];
let mut h = crc32fast::Hasher::new();
h.update(&enc);
let payload_crc = h.finalize();
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len());
buf.put_u32_le(INMEM_FRAME_MAGIC);
buf.put_u32_le(INMEM_FRAME_ENCID);
buf.put_u32_le(RANGE_COMPLETE_FRAME_TYPE_ID);
buf.put_u32_le(enc.len() as u32);
buf.put_u32_le(payload_crc);
buf.put(enc.as_ref());
let mut h = crc32fast::Hasher::new();
h.update(&buf);
let frame_crc = h.finalize();
buf.put_u32_le(frame_crc);
Ok(buf)
}
pub fn make_term_frame() -> Result<BytesMut, Error> {
let enc = [];
let mut h = crc32fast::Hasher::new();
h.update(&enc);
let payload_crc = h.finalize();
let mut buf = BytesMut::with_capacity(INMEM_FRAME_HEAD + INMEM_FRAME_FOOT + enc.len());
buf.put_u32_le(INMEM_FRAME_MAGIC);
buf.put_u32_le(INMEM_FRAME_ENCID);
buf.put_u32_le(TERM_FRAME_TYPE_ID);
buf.put_u32_le(enc.len() as u32);
buf.put_u32_le(payload_crc);
buf.put(enc.as_ref());
let mut h = crc32fast::Hasher::new();
h.update(&buf);
let frame_crc = h.finalize();
buf.put_u32_le(frame_crc);
Ok(buf)
}
pub fn decode_frame<T>(frame: &InMemoryFrame) -> Result<T, Error>
where
T: FrameDecodable,
{
if frame.encid() != INMEM_FRAME_ENCID {
return Err(Error::UnknownEncoder(frame.encid()));
}
if frame.len() as usize != frame.buf().len() {
return Err(Error::BufferMismatch(frame.len(), frame.buf().len(), frame.tyid()));
}
if frame.tyid() == ERROR_FRAME_TYPE_ID {
// error frames are always encoded as json
let k: err::Error = match json_from_slice(frame.buf()) {
Ok(item) => item,
Err(e) => {
error!("deserialize len {} ERROR_FRAME_TYPE_ID {}", frame.buf().len(), e);
let n = frame.buf().len().min(256);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
}
};
Ok(T::from_error(k))
} else if frame.tyid() == LOG_FRAME_TYPE_ID {
let k: LogItem = match decode_from_slice(frame.buf()) {
Ok(item) => item,
Err(e) => {
error!("deserialize len {} LOG_FRAME_TYPE_ID {}", frame.buf().len(), e);
let n = frame.buf().len().min(128);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
}
};
Ok(T::from_log(k))
} else if frame.tyid() == STATS_FRAME_TYPE_ID {
let k: StatsItem = match decode_from_slice(frame.buf()) {
Ok(item) => item,
Err(e) => {
error!("deserialize len {} STATS_FRAME_TYPE_ID {}", frame.buf().len(), e);
let n = frame.buf().len().min(128);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("frame.buf as string: {:?}", s);
Err(e)?
}
};
Ok(T::from_stats(k))
} else if frame.tyid() == RANGE_COMPLETE_FRAME_TYPE_ID {
// There is currently no content in this variant.
Ok(T::from_range_complete())
} else {
let tyid = T::FRAME_TYPE_ID;
if frame.tyid() != tyid {
Err(Error::TyIdMismatch(tyid, frame.tyid()))
} else {
match decode_from_slice(frame.buf()) {
Ok(item) => Ok(item),
Err(e) => {
error!(
"decode_from_slice error len {} tyid {:04x} T {}",
frame.buf().len(),
frame.tyid(),
any::type_name::<T>()
);
let n = frame.buf().len().min(64);
let s = String::from_utf8_lossy(&frame.buf()[..n]);
error!("decode_from_slice bad frame.buf as bytes: {:?}", &frame.buf()[..n]);
error!("decode_from_slice bad frame.buf as string: {:?}", s);
Err(e)?
}
}
}
}
}
pub fn crchex<T>(t: T) -> String
where
T: AsRef<[u8]>,
{
let mut h = crc32fast::Hasher::new();
h.update(t.as_ref());
let crc = h.finalize();
format!("{:08x}", crc)
}

View File

@@ -1,34 +0,0 @@
use bytes::Bytes;
use std::fmt;
pub struct InMemoryFrame {
pub encid: u32,
pub tyid: u32,
pub len: u32,
pub buf: Bytes,
}
impl InMemoryFrame {
pub fn encid(&self) -> u32 {
self.encid
}
pub fn tyid(&self) -> u32 {
self.tyid
}
pub fn len(&self) -> u32 {
self.len
}
pub fn buf(&self) -> &Bytes {
&self.buf
}
}
impl fmt::Debug for InMemoryFrame {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
fmt,
"InMemoryFrame {{ encid: {:x} tyid: {:x} len {} }}",
self.encid, self.tyid, self.len
)
}
}

View File

@@ -1,178 +0,0 @@
pub mod accounting;
pub mod binning;
pub mod binsdim0;
pub mod binsxbindim0;
pub mod channelevents;
pub mod empty;
pub mod eventfull;
pub mod eventsdim0;
pub mod eventsdim0enum;
pub mod eventsdim1;
pub mod eventsxbindim0;
pub mod framable;
pub mod frame;
pub mod inmem;
pub mod merger;
pub mod streams;
#[cfg(feature = "heavy")]
#[cfg(test)]
pub mod test;
pub mod testgen;
pub mod transform;
use channelevents::ChannelEvents;
use daqbuf_err as err;
use futures_util::Stream;
use items_0::isodate::IsoDateTime;
use items_0::streamitem::Sitemty;
use items_0::transform::EventTransform;
use items_0::Empty;
use items_0::Events;
use items_0::MergeError;
use merger::Mergeable;
use netpod::range::evrange::SeriesRange;
use netpod::timeunits::*;
use std::collections::VecDeque;
use std::fmt;
pub fn ts_offs_from_abs(tss: &[u64]) -> (u64, VecDeque<u64>, VecDeque<u64>) {
let ts_anchor_sec = tss.first().map_or(0, |&k| k) / SEC;
let ts_anchor_ns = ts_anchor_sec * SEC;
let ts_off_ms: VecDeque<_> = tss.iter().map(|&k| (k - ts_anchor_ns) / MS).collect();
let ts_off_ns = tss
.iter()
.zip(ts_off_ms.iter().map(|&k| k * MS))
.map(|(&j, k)| (j - ts_anchor_ns - k))
.collect();
(ts_anchor_sec, ts_off_ms, ts_off_ns)
}
pub fn ts_offs_from_abs_with_anchor(ts_anchor_sec: u64, tss: &[u64]) -> (VecDeque<u64>, VecDeque<u64>) {
let ts_anchor_ns = ts_anchor_sec * SEC;
let ts_off_ms: VecDeque<_> = tss.iter().map(|&k| (k - ts_anchor_ns) / MS).collect();
let ts_off_ns = tss
.iter()
.zip(ts_off_ms.iter().map(|&k| k * MS))
.map(|(&j, k)| (j - ts_anchor_ns - k))
.collect();
(ts_off_ms, ts_off_ns)
}
pub fn pulse_offs_from_abs(pulse: &[u64]) -> (u64, VecDeque<u64>) {
let pulse_anchor = pulse.first().map_or(0, |&k| k) / 10000 * 10000;
let pulse_off = pulse.iter().map(|&k| k - pulse_anchor).collect();
(pulse_anchor, pulse_off)
}
#[derive(Debug, PartialEq)]
pub enum ErrorKind {
General,
#[allow(unused)]
MismatchedType,
}
// TODO stack error better
#[derive(Debug, PartialEq)]
pub struct Error {
#[allow(unused)]
kind: ErrorKind,
msg: Option<String>,
}
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{self:?}")
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Self {
Self { kind, msg: None }
}
}
impl From<String> for Error {
fn from(msg: String) -> Self {
Self {
msg: Some(msg),
kind: ErrorKind::General,
}
}
}
// TODO this discards structure
impl From<err::Error> for Error {
fn from(e: err::Error) -> Self {
Self {
msg: Some(format!("{e}")),
kind: ErrorKind::General,
}
}
}
// TODO this discards structure
impl From<Error> for err::Error {
fn from(e: Error) -> Self {
err::Error::with_msg_no_trace(format!("{e}"))
}
}
impl std::error::Error for Error {}
impl serde::de::Error for Error {
fn custom<T>(msg: T) -> Self
where
T: fmt::Display,
{
format!("{msg}").into()
}
}
pub fn make_iso_ts(tss: &[u64]) -> Vec<IsoDateTime> {
tss.iter().map(|&k| IsoDateTime::from_ns_u64(k)).collect()
}
impl Mergeable for Box<dyn Events> {
fn ts_min(&self) -> Option<u64> {
self.as_ref().ts_min()
}
fn ts_max(&self) -> Option<u64> {
self.as_ref().ts_max()
}
fn new_empty(&self) -> Self {
self.as_ref().new_empty_evs()
}
fn clear(&mut self) {
Events::clear(self.as_mut())
}
fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError> {
self.as_mut().drain_into_evs(dst, range)
}
fn find_lowest_index_gt(&self, ts: u64) -> Option<usize> {
self.as_ref().find_lowest_index_gt_evs(ts)
}
fn find_lowest_index_ge(&self, ts: u64) -> Option<usize> {
self.as_ref().find_lowest_index_ge_evs(ts)
}
fn find_highest_index_lt(&self, ts: u64) -> Option<usize> {
self.as_ref().find_highest_index_lt_evs(ts)
}
fn tss(&self) -> Vec<netpod::TsMs> {
Events::tss(self)
.iter()
.map(|x| netpod::TsMs::from_ns_u64(*x))
.collect()
}
}
pub trait ChannelEventsInput: Stream<Item = Sitemty<ChannelEvents>> + EventTransform + Send {}
impl<T> ChannelEventsInput for T where T: Stream<Item = Sitemty<ChannelEvents>> + EventTransform + Send {}

View File

@@ -1,491 +0,0 @@
use crate::Error;
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::container::ByteEstimate;
use items_0::on_sitemty_data;
use items_0::streamitem::sitem_data;
use items_0::streamitem::LogItem;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
use items_0::transform::EventTransform;
use items_0::transform::TransformProperties;
use items_0::transform::WithTransformProperties;
use items_0::Events;
use items_0::MergeError;
use items_0::WithLen;
use netpod::log::*;
use netpod::TsMs;
use std::collections::VecDeque;
use std::fmt;
use std::ops::ControlFlow;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
const OUT_MAX_BYTES: u64 = 1024 * 200;
const DO_DETECT_NON_MONO: bool = true;
#[allow(unused)]
macro_rules! trace2 {
($($arg:tt)*) => {};
($($arg:tt)*) => { trace!($($arg)*) };
}
#[allow(unused)]
macro_rules! trace3 {
($($arg:tt)*) => {};
($($arg:tt)*) => { trace!($($arg)*) };
}
#[allow(unused)]
macro_rules! trace4 {
($($arg:tt)*) => {};
($($arg:tt)*) => { trace!($($arg)*) };
}
pub trait Mergeable<Rhs = Self>: fmt::Debug + WithLen + ByteEstimate + Unpin {
fn ts_min(&self) -> Option<u64>;
fn ts_max(&self) -> Option<u64>;
fn new_empty(&self) -> Self;
fn clear(&mut self);
// TODO when MergeError::Full gets returned, any guarantees about what has been modified or kept unchanged?
fn drain_into(&mut self, dst: &mut Self, range: (usize, usize)) -> Result<(), MergeError>;
fn find_lowest_index_gt(&self, ts: u64) -> Option<usize>;
fn find_lowest_index_ge(&self, ts: u64) -> Option<usize>;
fn find_highest_index_lt(&self, ts: u64) -> Option<usize>;
// TODO only for testing:
fn tss(&self) -> Vec<TsMs>;
}
type MergeInp<T> = Pin<Box<dyn Stream<Item = Sitemty<T>> + Send>>;
pub struct Merger<T> {
inps: Vec<Option<MergeInp<T>>>,
items: Vec<Option<T>>,
out: Option<T>,
do_clear_out: bool,
out_max_len: usize,
range_complete: Vec<bool>,
out_of_band_queue: VecDeque<Sitemty<T>>,
log_queue: VecDeque<LogItem>,
dim0ix_max: u64,
done_emit_first_empty: bool,
done_data: bool,
done_buffered: bool,
done_range_complete: bool,
complete: bool,
poll_count: usize,
}
impl<T> fmt::Debug for Merger<T>
where
T: Mergeable,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let inps: Vec<_> = self.inps.iter().map(|x| x.is_some()).collect();
fmt.debug_struct(std::any::type_name::<Self>())
.field("inps", &inps)
.field("items", &self.items)
.field("out_max_len", &self.out_max_len)
.field("range_complete", &self.range_complete)
.field("out_of_band_queue", &self.out_of_band_queue.len())
.field("done_data", &self.done_data)
.field("done_buffered", &self.done_buffered)
.field("done_range_complete", &self.done_range_complete)
.finish()
}
}
impl<T> Merger<T>
where
T: Mergeable,
{
pub fn new(inps: Vec<MergeInp<T>>, out_max_len: Option<u32>) -> Self {
let n = inps.len();
Self {
inps: inps.into_iter().map(|x| Some(x)).collect(),
items: (0..n).into_iter().map(|_| None).collect(),
out: None,
do_clear_out: false,
out_max_len: out_max_len.unwrap_or(1000) as usize,
range_complete: vec![false; n],
out_of_band_queue: VecDeque::new(),
log_queue: VecDeque::new(),
dim0ix_max: 0,
done_emit_first_empty: false,
done_data: false,
done_buffered: false,
done_range_complete: false,
complete: false,
poll_count: 0,
}
}
fn drain_into_upto(src: &mut T, dst: &mut T, upto: u64) -> Result<(), MergeError> {
match src.find_lowest_index_gt(upto) {
Some(ilgt) => {
src.drain_into(dst, (0, ilgt))?;
}
None => {
// TODO should not be here.
src.drain_into(dst, (0, src.len()))?;
}
}
Ok(())
}
fn take_into_output_all(&mut self, src: &mut T) -> Result<(), MergeError> {
// TODO optimize the case when some large batch should be added to some existing small batch already in out.
// TODO maybe use two output slots?
self.take_into_output_upto(src, u64::MAX)
}
fn take_into_output_upto(&mut self, src: &mut T, upto: u64) -> Result<(), MergeError> {
// TODO optimize the case when some large batch should be added to some existing small batch already in out.
// TODO maybe use two output slots?
if let Some(out) = self.out.as_mut() {
Self::drain_into_upto(src, out, upto)?;
} else {
trace2!("move into fresh");
let mut fresh = src.new_empty();
Self::drain_into_upto(src, &mut fresh, upto)?;
self.out = Some(fresh);
}
Ok(())
}
fn process(mut self: Pin<&mut Self>, _cx: &mut Context) -> Result<ControlFlow<()>, Error> {
use ControlFlow::*;
trace4!("process");
let mut log_items = Vec::new();
let mut tslows = [None, None];
for (i1, itemopt) in self.items.iter_mut().enumerate() {
if let Some(item) = itemopt {
if let Some(t1) = item.ts_min() {
if let Some((_, a)) = tslows[0] {
if t1 < a {
tslows[1] = tslows[0];
tslows[0] = Some((i1, t1));
} else {
if let Some((_, b)) = tslows[1] {
if t1 < b {
tslows[1] = Some((i1, t1));
} else {
// nothing to do
}
} else {
tslows[1] = Some((i1, t1));
}
}
} else {
tslows[0] = Some((i1, t1));
}
} else {
// the item seems empty.
// TODO count for stats.
trace2!("empty item, something to do here?");
*itemopt = None;
return Ok(Continue(()));
}
}
}
if DO_DETECT_NON_MONO {
if let Some((i1, t1)) = tslows[0].as_ref() {
if *t1 <= self.dim0ix_max {
self.dim0ix_max = *t1;
let item = LogItem {
node_ix: *i1 as _,
level: Level::INFO,
msg: format!(
"dim0ix_max {} vs {} diff {}",
self.dim0ix_max,
t1,
self.dim0ix_max - t1
),
};
log_items.push(item);
}
}
}
trace4!("tslows {tslows:?}");
if let Some((il0, _tl0)) = tslows[0] {
if let Some((_il1, tl1)) = tslows[1] {
// There is a second input, take only up to the second highest timestamp
let item = self.items[il0].as_mut().unwrap();
if let Some(th0) = item.ts_max() {
if th0 <= tl1 {
// Can take the whole item
// TODO gather stats about this case. Should be never for databuffer, and often for scylla.
let mut item = self.items[il0].take().unwrap();
trace3!("Take all from item {item:?}");
match self.take_into_output_all(&mut item) {
Ok(()) => Ok(Break(())),
Err(MergeError::Full) | Err(MergeError::NotCompatible) => {
// TODO count for stats
trace3!("Put item back");
self.items[il0] = Some(item);
self.do_clear_out = true;
Ok(Break(()))
}
}
} else {
// Take only up to the lowest ts of the second-lowest input
let mut item = self.items[il0].take().unwrap();
trace3!("Take up to {tl1} from item {item:?}");
let res = self.take_into_output_upto(&mut item, tl1);
match res {
Ok(()) => {
if item.len() == 0 {
// TODO should never be here because we should have taken the whole item
Err(format!("Should have taken the whole item instead").into())
} else {
self.items[il0] = Some(item);
Ok(Break(()))
}
}
Err(MergeError::Full) | Err(MergeError::NotCompatible) => {
// TODO count for stats
info!("Put item back because {res:?}");
self.items[il0] = Some(item);
self.do_clear_out = true;
Ok(Break(()))
}
}
}
} else {
// TODO should never be here because ts-max should always exist here.
Err(format!("selected input without max ts").into())
}
} else {
// No other input, take the whole item
let mut item = self.items[il0].take().unwrap();
trace3!("Take all from item (no other input) {item:?}");
match self.take_into_output_all(&mut item) {
Ok(()) => Ok(Break(())),
Err(_) => {
// TODO count for stats
trace3!("Put item back");
self.items[il0] = Some(item);
self.do_clear_out = true;
Ok(Break(()))
}
}
}
} else {
Err(format!("after low ts search nothing found").into())
}
}
fn refill(mut self: Pin<&mut Self>, cx: &mut Context) -> Result<Poll<()>, Error> {
trace4!("refill");
use Poll::*;
let mut has_pending = false;
for i in 0..self.inps.len() {
if self.items[i].is_none() {
while let Some(inp) = self.inps[i].as_mut() {
match inp.poll_next_unpin(cx) {
Ready(Some(Ok(k))) => match k {
StreamItem::DataItem(k) => match k {
RangeCompletableItem::Data(k) => {
if self.done_emit_first_empty == false {
trace!("emit first empty marker item");
self.done_emit_first_empty = true;
let item = k.new_empty();
let item = sitem_data(item);
self.out_of_band_queue.push_back(item);
}
self.items[i] = Some(k);
trace4!("refilled {}", i);
}
RangeCompletableItem::RangeComplete => {
self.range_complete[i] = true;
trace!("range_complete {:?}", self.range_complete);
continue;
}
},
StreamItem::Log(item) => {
// TODO limit queue length
self.out_of_band_queue.push_back(Ok(StreamItem::Log(item)));
continue;
}
StreamItem::Stats(item) => {
// TODO limit queue length
self.out_of_band_queue.push_back(Ok(StreamItem::Stats(item)));
continue;
}
},
Ready(Some(Err(e))) => {
self.inps[i] = None;
return Err(e.into());
}
Ready(None) => {
self.inps[i] = None;
}
Pending => {
has_pending = true;
}
}
break;
}
}
}
if has_pending {
Ok(Pending)
} else {
Ok(Ready(()))
}
}
fn poll3(mut self: Pin<&mut Self>, cx: &mut Context) -> ControlFlow<Poll<Option<Result<T, Error>>>> {
use ControlFlow::*;
use Poll::*;
trace4!("poll3");
#[allow(unused)]
let ninps = self.inps.iter().filter(|a| a.is_some()).count();
let nitems = self.items.iter().filter(|a| a.is_some()).count();
let nitemsmissing = self
.inps
.iter()
.zip(self.items.iter())
.filter(|(a, b)| a.is_some() && b.is_none())
.count();
trace3!("ninps {ninps} nitems {nitems} nitemsmissing {nitemsmissing}");
if nitemsmissing != 0 {
let e = Error::from(format!("missing but no pending"));
return Break(Ready(Some(Err(e))));
}
let last_emit = nitems == 0;
if nitems != 0 {
match Self::process(Pin::new(&mut self), cx) {
Ok(Break(())) => {}
Ok(Continue(())) => {}
Err(e) => return Break(Ready(Some(Err(e)))),
}
}
if let Some(o) = self.out.as_ref() {
if o.len() >= self.out_max_len || o.byte_estimate() >= OUT_MAX_BYTES || self.do_clear_out || last_emit {
if o.len() > self.out_max_len {
debug!("MERGER OVERWEIGHT ITEM {} vs {}", o.len(), self.out_max_len);
}
trace3!("decide to output");
self.do_clear_out = false;
//Break(Ready(Some(Ok(self.out.take().unwrap()))))
let item = sitem_data(self.out.take().unwrap());
self.out_of_band_queue.push_back(item);
Continue(())
} else {
trace4!("not enough output yet");
Continue(())
}
} else {
trace!("no output candidate");
if last_emit {
Break(Ready(None))
} else {
Continue(())
}
}
}
fn poll2(mut self: Pin<&mut Self>, cx: &mut Context) -> ControlFlow<Poll<Option<Result<T, Error>>>> {
use ControlFlow::*;
use Poll::*;
match Self::refill(Pin::new(&mut self), cx) {
Ok(Ready(())) => Self::poll3(self, cx),
Ok(Pending) => Break(Pending),
Err(e) => Break(Ready(Some(Err(e)))),
}
}
}
impl<T> Stream for Merger<T>
where
T: Mergeable,
{
type Item = Sitemty<T>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
self.poll_count += 1;
let span1 = span!(Level::INFO, "Merger", pc = self.poll_count);
let _spg = span1.enter();
loop {
trace3!("poll");
break if let Some(item) = self.log_queue.pop_front() {
Ready(Some(Ok(StreamItem::Log(item))))
} else if self.poll_count == usize::MAX {
self.done_range_complete = true;
continue;
} else if self.complete {
panic!("poll after complete");
} else if self.done_range_complete {
self.complete = true;
Ready(None)
} else if self.done_buffered {
self.done_range_complete = true;
if self.range_complete.iter().all(|x| *x) {
trace!("emit RangeComplete");
Ready(Some(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete))))
} else {
continue;
}
} else if self.done_data {
trace!("done_data");
self.done_buffered = true;
if let Some(out) = self.out.take() {
trace!("done_data emit buffered len {}", out.len());
Ready(Some(sitem_data(out)))
} else {
continue;
}
} else if let Some(item) = self.out_of_band_queue.pop_front() {
let item = on_sitemty_data!(item, |k: T| {
trace3!("emit out-of-band data len {}", k.len());
sitem_data(k)
});
Ready(Some(item))
} else {
match Self::poll2(self.as_mut(), cx) {
ControlFlow::Continue(()) => continue,
ControlFlow::Break(k) => match k {
Ready(Some(Ok(out))) => {
if true {
error!("THIS BRANCH SHOULD NO LONGER OCCUR, REFACTOR");
self.done_data = true;
let e = Error::from(format!("TODO refactor direct emit in merger"));
return Ready(Some(Err(e.into())));
}
trace!("emit buffered len {}", out.len());
Ready(Some(sitem_data(out)))
}
Ready(Some(Err(e))) => {
self.done_data = true;
Ready(Some(Err(e.into())))
}
Ready(None) => {
self.done_data = true;
continue;
}
Pending => Pending,
},
}
};
}
}
}
impl<T> WithTransformProperties for Merger<T> {
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl<T> EventTransform for Merger<T>
where
T: Send,
{
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
todo!()
}
}

View File

@@ -1,290 +0,0 @@
use futures_util::Future;
use futures_util::FutureExt;
use futures_util::Stream;
use futures_util::StreamExt;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
use items_0::transform::EventStreamTrait;
use items_0::transform::EventTransform;
use items_0::transform::TransformProperties;
use items_0::transform::WithTransformProperties;
use items_0::Events;
use std::collections::VecDeque;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
pub struct Enumerate2<T> {
inp: T,
cnt: usize,
}
impl<T> Enumerate2<T> {
pub fn new(inp: T) -> Self
where
T: EventTransform,
{
Self { inp, cnt: 0 }
}
}
impl<T> Stream for Enumerate2<T>
where
T: Stream + Unpin,
{
type Item = (usize, <T as Stream>::Item);
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
match self.inp.poll_next_unpin(cx) {
Ready(Some(item)) => {
let i = self.cnt;
self.cnt += 1;
Ready(Some((i, item)))
}
Ready(None) => Ready(None),
Pending => Pending,
}
}
}
impl<T> WithTransformProperties for Enumerate2<T>
where
T: WithTransformProperties,
{
fn query_transform_properties(&self) -> TransformProperties {
self.inp.query_transform_properties()
}
}
impl<T> EventTransform for Enumerate2<T>
where
T: WithTransformProperties + Send,
{
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
todo!()
}
}
pub struct Then2<T, F, Fut> {
inp: Pin<Box<T>>,
f: Pin<Box<F>>,
fut: Option<Pin<Box<Fut>>>,
}
impl<T, F, Fut> Then2<T, F, Fut>
where
T: Stream,
F: Fn(<T as Stream>::Item) -> Fut,
{
pub fn new(inp: T, f: F) -> Self
where
T: EventTransform,
{
Self {
inp: Box::pin(inp),
f: Box::pin(f),
fut: None,
}
}
fn prepare_fut(&mut self, item: <T as Stream>::Item) {
self.fut = Some(Box::pin((self.f)(item)));
}
}
/*impl<T, F, Fut> Unpin for Then2<T, F, Fut>
where
T: Unpin,
F: Unpin,
Fut: Unpin,
{
}*/
impl<T, F, Fut> Stream for Then2<T, F, Fut>
where
T: Stream,
F: Fn(<T as Stream>::Item) -> Fut,
Fut: Future,
{
type Item = <Fut as Future>::Output;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
loop {
break if let Some(fut) = self.fut.as_mut() {
match fut.poll_unpin(cx) {
Ready(item) => {
self.fut = None;
Ready(Some(item))
}
Pending => Pending,
}
} else {
match self.inp.poll_next_unpin(cx) {
Ready(Some(item)) => {
self.prepare_fut(item);
continue;
}
Ready(None) => Ready(None),
Pending => Pending,
}
};
}
}
}
impl<T, F, Fut> WithTransformProperties for Then2<T, F, Fut>
where
T: EventTransform,
{
fn query_transform_properties(&self) -> TransformProperties {
self.inp.query_transform_properties()
}
}
impl<T, F, Fut> EventTransform for Then2<T, F, Fut>
where
T: EventTransform + Send,
F: Send,
Fut: Send,
{
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
todo!()
}
}
pub trait TransformerExt {
fn enumerate2(self) -> Enumerate2<Self>
where
Self: EventTransform + Sized;
fn then2<F, Fut>(self, f: F) -> Then2<Self, F, Fut>
where
Self: EventTransform + Stream + Sized,
F: Fn(<Self as Stream>::Item) -> Fut,
Fut: Future;
}
impl<T> TransformerExt for T {
fn enumerate2(self) -> Enumerate2<Self>
where
Self: EventTransform + Sized,
{
Enumerate2::new(self)
}
fn then2<F, Fut>(self, f: F) -> Then2<Self, F, Fut>
where
Self: EventTransform + Stream + Sized,
F: Fn(<Self as Stream>::Item) -> Fut,
Fut: Future,
{
Then2::new(self, f)
}
}
pub struct VecStream<T> {
inp: VecDeque<T>,
}
impl<T> VecStream<T> {
pub fn new(inp: VecDeque<T>) -> Self {
Self { inp }
}
}
impl<T> Stream for VecStream<T>
where
T: Unpin,
{
type Item = T;
fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
if let Some(item) = self.inp.pop_front() {
Ready(Some(item))
} else {
Ready(None)
}
}
}
impl<T> WithTransformProperties for VecStream<T> {
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl<T> EventTransform for VecStream<T>
where
T: Send,
{
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
todo!()
}
}
/// Wrap any event stream and provide transformation properties.
pub struct PlainEventStream<INP, T>
where
T: Events,
INP: Stream<Item = Sitemty<T>>,
{
inp: Pin<Box<INP>>,
}
impl<INP, T> PlainEventStream<INP, T>
where
T: Events,
INP: Stream<Item = Sitemty<T>>,
{
pub fn new(inp: INP) -> Self {
Self { inp: Box::pin(inp) }
}
}
impl<INP, T> Stream for PlainEventStream<INP, T>
where
T: Events,
INP: Stream<Item = Sitemty<T>>,
{
type Item = Sitemty<Box<dyn Events>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
use Poll::*;
match self.inp.poll_next_unpin(cx) {
Ready(Some(item)) => Ready(Some(match item {
Ok(item) => Ok(match item {
StreamItem::DataItem(item) => StreamItem::DataItem(match item {
RangeCompletableItem::RangeComplete => RangeCompletableItem::RangeComplete,
RangeCompletableItem::Data(item) => RangeCompletableItem::Data(Box::new(item)),
}),
StreamItem::Log(item) => StreamItem::Log(item),
StreamItem::Stats(item) => StreamItem::Stats(item),
}),
Err(e) => Err(e),
})),
Ready(None) => Ready(None),
Pending => Pending,
}
}
}
impl<INP, T> WithTransformProperties for PlainEventStream<INP, T>
where
T: Events,
INP: Stream<Item = Sitemty<T>>,
{
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl<INP, T> EventStreamTrait for PlainEventStream<INP, T>
where
T: Events,
INP: Stream<Item = Sitemty<T>> + Send,
{
}

View File

@@ -1,470 +0,0 @@
#[cfg(test)]
pub mod eventsdim0;
#[cfg(test)]
pub mod eventsdim1;
use crate::channelevents::ConnStatus;
use crate::channelevents::ConnStatusEvent;
use crate::eventsdim0::EventsDim0;
use crate::merger::Mergeable;
use crate::merger::Merger;
use crate::runfut;
use crate::streams::TransformerExt;
use crate::streams::VecStream;
use crate::testgen::make_some_boxed_d0_f32;
use crate::ChannelEvents;
use crate::Error;
use crate::Events;
use futures_util::stream;
use futures_util::StreamExt;
use items_0::streamitem::sitem_data;
use items_0::streamitem::RangeCompletableItem;
use items_0::streamitem::Sitemty;
use items_0::streamitem::StreamItem;
use items_0::Appendable;
use items_0::Empty;
use items_0::WithLen;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::timeunits::*;
use netpod::BinnedRangeEnum;
use std::time::Duration;
use std::time::Instant;
#[cfg(test)]
pub fn runfut<T, F>(fut: F) -> Result<T, err::Error>
where
F: std::future::Future<Output = Result<T, Error>>,
{
use futures_util::TryFutureExt;
let fut = fut.map_err(|e| e.into());
taskrun::run(fut)
}
#[test]
fn items_move_events() {
let evs = make_some_boxed_d0_f32(10, SEC, SEC, 0, 1846713782);
let v0 = ChannelEvents::Events(evs);
let mut v1 = v0.clone();
eprintln!("{v1:?}");
eprintln!("{}", v1.len());
let mut v2 = v1.new_empty();
match v1.find_lowest_index_gt(4) {
Some(ilgt) => {
v1.drain_into(&mut v2, (0, ilgt)).unwrap();
}
None => {
v1.drain_into(&mut v2, (0, v1.len())).unwrap();
}
}
eprintln!("{}", v1.len());
eprintln!("{}", v2.len());
match v1.find_lowest_index_gt(u64::MAX) {
Some(ilgt) => {
v1.drain_into(&mut v2, (0, ilgt)).unwrap();
}
None => {
v1.drain_into(&mut v2, (0, v1.len())).unwrap();
}
}
eprintln!("{}", v1.len());
eprintln!("{}", v2.len());
eprintln!("{v1:?}");
eprintln!("{v2:?}");
assert_eq!(v1.len(), 0);
assert_eq!(v2.len(), 10);
assert_eq!(v2, v0);
}
#[test]
fn items_merge_00() {
let fut = async {
use crate::merger::Merger;
let evs0 = make_some_boxed_d0_f32(10, SEC * 1, SEC * 2, 0, 1846713782);
let evs1 = make_some_boxed_d0_f32(10, SEC * 2, SEC * 2, 0, 828764893);
let v0 = ChannelEvents::Events(evs0);
let v1 = ChannelEvents::Events(evs1);
let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)]));
let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)]));
let mut merger = Merger::new(vec![stream0, stream1], Some(8));
while let Some(item) = merger.next().await {
eprintln!("{item:?}");
}
Ok(())
};
runfut(fut).unwrap();
}
#[test]
fn items_merge_01() {
let fut = async {
use crate::merger::Merger;
let evs0 = make_some_boxed_d0_f32(10, SEC * 1, SEC * 2, 0, 1846713782);
let evs1 = make_some_boxed_d0_f32(10, SEC * 2, SEC * 2, 0, 828764893);
let v0 = ChannelEvents::Events(evs0);
let v1 = ChannelEvents::Events(evs1);
let v2 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 100, ConnStatus::Connect)));
let v3 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2300, ConnStatus::Disconnect)));
let v4 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2800, ConnStatus::Connect)));
let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)]));
let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)]));
let stream2 = Box::pin(stream::iter(vec![sitem_data(v2), sitem_data(v3), sitem_data(v4)]));
let mut merger = Merger::new(vec![stream0, stream1, stream2], Some(8));
let mut total_event_count = 0;
while let Some(item) = merger.next().await {
eprintln!("{item:?}");
let item = item?;
match item {
StreamItem::DataItem(item) => match item {
RangeCompletableItem::RangeComplete => {}
RangeCompletableItem::Data(item) => {
total_event_count += item.len();
}
},
StreamItem::Log(_) => {}
StreamItem::Stats(_) => {}
}
}
assert_eq!(total_event_count, 23);
Ok(())
};
runfut(fut).unwrap();
}
#[test]
fn items_merge_02() {
let fut = async {
let evs0 = make_some_boxed_d0_f32(100, SEC * 1, SEC * 2, 0, 1846713782);
let evs1 = make_some_boxed_d0_f32(100, SEC * 2, SEC * 2, 0, 828764893);
let v0 = ChannelEvents::Events(evs0);
let v1 = ChannelEvents::Events(evs1);
let v2 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 100, ConnStatus::Connect)));
let v3 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2300, ConnStatus::Disconnect)));
let v4 = ChannelEvents::Status(Some(ConnStatusEvent::new(MS * 2800, ConnStatus::Connect)));
let stream0 = Box::pin(stream::iter(vec![sitem_data(v0)]));
let stream1 = Box::pin(stream::iter(vec![sitem_data(v1)]));
let stream2 = Box::pin(stream::iter(vec![sitem_data(v2), sitem_data(v3), sitem_data(v4)]));
let mut merger = Merger::new(vec![stream0, stream1, stream2], Some(8));
let mut total_event_count = 0;
while let Some(item) = merger.next().await {
eprintln!("{item:?}");
let item = item.unwrap();
match item {
StreamItem::DataItem(item) => match item {
RangeCompletableItem::RangeComplete => {}
RangeCompletableItem::Data(item) => {
total_event_count += item.len();
}
},
StreamItem::Log(_) => {}
StreamItem::Stats(_) => {}
}
}
assert_eq!(total_event_count, 203);
Ok(())
};
runfut(fut).unwrap();
}
#[test]
fn merge_00() {
let fut = async {
let mut events_vec1: Vec<Sitemty<ChannelEvents>> = Vec::new();
let mut events_vec2: Vec<Sitemty<ChannelEvents>> = Vec::new();
{
let mut events = EventsDim0::empty();
for i in 0..10 {
events.push(i * 100, i, i as f32 * 100.);
}
let cev = ChannelEvents::Events(Box::new(events.clone()));
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
let cev = ChannelEvents::Events(Box::new(events.clone()));
events_vec2.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
}
let inp1 = events_vec1;
let inp1 = futures_util::stream::iter(inp1);
let inp1 = Box::pin(inp1);
let inp2: Vec<Sitemty<ChannelEvents>> = Vec::new();
let inp2 = futures_util::stream::iter(inp2);
let inp2 = Box::pin(inp2);
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], Some(32));
// Expect an empty first item.
let item = merger.next().await;
let item = match item {
Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))) => item,
_ => panic!(),
};
assert_eq!(item.len(), 0);
let item = merger.next().await;
assert_eq!(item.as_ref(), events_vec2.get(0));
let item = merger.next().await;
assert_eq!(item.as_ref(), None);
Ok(())
};
runfut(fut).unwrap();
}
#[test]
fn merge_01() {
let fut = async {
let events_vec1 = {
let mut vec = Vec::new();
let mut events = EventsDim0::empty();
for i in 0..10 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events.clone()));
let mut events = EventsDim0::empty();
for i in 10..20 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events.clone()));
vec
};
let exp = events_vec1.clone();
let inp1 = events_vec1;
let inp1 = futures_util::stream::iter(inp1);
let inp1 = Box::pin(inp1);
let inp2: Vec<Sitemty<ChannelEvents>> = Vec::new();
let inp2 = futures_util::stream::iter(inp2);
let inp2 = Box::pin(inp2);
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], Some(10));
// Expect an empty first item.
let item = merger.next().await;
let item = match item {
Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))) => item,
_ => panic!(),
};
assert_eq!(item.len(), 0);
let item = merger.next().await;
assert_eq!(item.as_ref(), exp.get(0));
let item = merger.next().await;
assert_eq!(item.as_ref(), exp.get(1));
let item = merger.next().await;
assert_eq!(item.as_ref(), None);
Ok(())
};
runfut(fut).unwrap();
}
fn push_evd0(vec: &mut Vec<Sitemty<ChannelEvents>>, events: Box<dyn Events>) {
let cev = ChannelEvents::Events(events);
vec.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
}
#[test]
fn merge_02() {
let fut = async {
let events_vec1 = {
let mut vec = Vec::new();
let mut events = EventsDim0::empty();
for i in 0..10 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events));
let mut events = EventsDim0::empty();
for i in 10..20 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events));
vec
};
let events_vec2 = {
let mut vec = Vec::new();
let mut events = EventsDim0::empty();
for i in 0..10 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events));
let mut events = EventsDim0::empty();
for i in 10..12 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events));
let mut events = EventsDim0::empty();
for i in 12..20 {
events.push(i * 100, i, i as f32 * 100.);
}
push_evd0(&mut vec, Box::new(events));
vec
};
let inp2_events_a = {
let ev = ConnStatusEvent {
ts: 1199,
datetime: std::time::SystemTime::UNIX_EPOCH,
status: ConnStatus::Disconnect,
};
let item: Sitemty<ChannelEvents> = Ok(StreamItem::DataItem(RangeCompletableItem::Data(
ChannelEvents::Status(Some(ev)),
)));
vec![item]
};
let inp2_events_b = {
let ev = ConnStatusEvent {
ts: 1199,
datetime: std::time::SystemTime::UNIX_EPOCH,
status: ConnStatus::Disconnect,
};
let item: Sitemty<ChannelEvents> = Ok(StreamItem::DataItem(RangeCompletableItem::Data(
ChannelEvents::Status(Some(ev)),
)));
vec![item]
};
let inp1 = events_vec1;
let inp1 = futures_util::stream::iter(inp1);
let inp1 = Box::pin(inp1);
let inp2: Vec<Sitemty<ChannelEvents>> = inp2_events_a;
let inp2 = futures_util::stream::iter(inp2);
let inp2 = Box::pin(inp2);
let mut merger = crate::merger::Merger::new(vec![inp1, inp2], Some(10));
// Expect an empty first item.
let item = merger.next().await;
let item = match item {
Some(Ok(StreamItem::DataItem(RangeCompletableItem::Data(item)))) => item,
_ => panic!(),
};
assert_eq!(item.len(), 0);
let item = merger.next().await;
assert_eq!(item.as_ref(), events_vec2.get(0));
let item = merger.next().await;
assert_eq!(item.as_ref(), events_vec2.get(1));
let item = merger.next().await;
assert_eq!(item.as_ref(), inp2_events_b.get(0));
let item = merger.next().await;
assert_eq!(item.as_ref(), events_vec2.get(2));
let item = merger.next().await;
assert_eq!(item.as_ref(), None);
Ok(())
};
runfut(fut).unwrap();
}
#[test]
fn bin_01() {
const TSBASE: u64 = SEC * 1600000000;
fn val(ts: u64) -> f32 {
2f32 + ((ts / SEC) % 2) as f32 + 0.2 * ((ts / (MS * 100)) % 2) as f32
}
let fut = async {
let mut events_vec1 = Vec::new();
let mut t = TSBASE;
for _ in 0..20 {
let mut events = EventsDim0::empty();
for _ in 0..10 {
events.push(t, t, val(t));
t += MS * 100;
}
let cev = ChannelEvents::Events(Box::new(events));
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
}
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)));
let inp1 = events_vec1;
let inp1 = futures_util::stream::iter(inp1);
let inp1 = Box::pin(inp1);
let inp2 = Box::pin(futures_util::stream::empty()) as _;
let stream = crate::merger::Merger::new(vec![inp1, inp2], Some(32));
// covering_range result is subject to adjustments, instead, manually choose bin edges
let range = NanoRange {
beg: TSBASE + SEC * 1,
end: TSBASE + SEC * 10,
};
// let binrange = BinnedRangeEnum::covering_range(range.into(), 9).map_err(|e| format!("{e}"))?;
// let stream = Box::pin(stream);
// let deadline = Instant::now() + Duration::from_millis(4000);
// let do_time_weight = true;
// let emit_empty_bins = false;
// let res = BinnedCollected::new(
// binrange,
// ScalarType::F32,
// Shape::Scalar,
// do_time_weight,
// emit_empty_bins,
// deadline,
// Box::pin(stream),
// )
// .await?;
// eprintln!("res {:?}", res);
Ok::<_, Error>(())
};
runfut(fut).unwrap();
}
#[test]
fn binned_timeout_00() {
if true {
return;
}
// TODO items_2::binnedcollected::BinnedCollected is currently not used.
trace!("binned_timeout_01 uses a delay");
const TSBASE: u64 = SEC * 1600000000;
fn val(ts: u64) -> f32 {
2f32 + ((ts / SEC) % 2) as f32 + 0.2 * ((ts / (MS * 100)) % 2) as f32
}
eprintln!("binned_timeout_01 ENTER");
let fut = async {
eprintln!("binned_timeout_01 IN FUT");
let mut events_vec1: Vec<Sitemty<ChannelEvents>> = Vec::new();
let mut t = TSBASE;
for _ in 0..20 {
let mut events = EventsDim0::empty();
for _ in 0..10 {
events.push(t, t, val(t));
t += MS * 100;
}
let cev = ChannelEvents::Events(Box::new(events));
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::Data(cev))));
}
events_vec1.push(Ok(StreamItem::DataItem(RangeCompletableItem::RangeComplete)));
let inp1 = VecStream::new(events_vec1.into_iter().collect());
let inp1 = inp1.enumerate2().then2(|(i, k)| async move {
if i == 5 {
let _ = tokio::time::sleep(Duration::from_millis(10000)).await;
}
k
});
let edges: Vec<_> = (0..10).into_iter().map(|x| TSBASE + SEC * (1 + x)).collect();
let range = NanoRange {
beg: TSBASE + SEC * 1,
end: TSBASE + SEC * 10,
};
let binrange = BinnedRangeEnum::covering_range(range.into(), 9)?;
eprintln!("edges1: {:?}", edges);
//eprintln!("edges2: {:?}", binrange.edges());
let timeout = Duration::from_millis(400);
// let inp1 = Box::pin(inp1);
// let deadline = Instant::now() + timeout;
// let do_time_weight = true;
// let emit_empty_bins = false;
// TODO with new binning
// let res = BinnedCollected::new(
// binrange,
// ScalarType::F32,
// Shape::Scalar,
// do_time_weight,
// emit_empty_bins,
// deadline,
// inp1,
// )
// .await?;
// let r2: &BinsDim0CollectedResult<f32> = res.result.as_any_ref().downcast_ref().expect("res seems wrong type");
// eprintln!("rs: {r2:?}");
// assert_eq!(SEC * r2.ts_anchor_sec(), TSBASE + SEC);
// assert_eq!(r2.counts(), &[10, 10, 10]);
// assert_eq!(r2.mins(), &[3.0, 2.0, 3.0]);
// assert_eq!(r2.maxs(), &[3.2, 2.2, 3.2]);
// assert_eq!(r2.missing_bins(), 6);
// assert_eq!(r2.continue_at(), Some(IsoDateTime::from_ns_u64(TSBASE + SEC * 4)));
Ok::<_, Error>(())
};
runfut(fut).unwrap();
}

View File

@@ -1,24 +0,0 @@
use crate::eventsdim0::EventsDim0;
use items_0::Appendable;
use items_0::Empty;
use items_0::Events;
#[test]
fn collect_s_00() {
let mut evs = EventsDim0::empty();
evs.push(123, 4, 1.00f32);
evs.push(124, 5, 1.01);
let mut coll = evs.as_collectable_mut().new_collector();
coll.ingest(&mut evs);
assert_eq!(coll.len(), 2);
}
#[test]
fn collect_c_00() {
let mut evs = EventsDim0::empty();
evs.push(123, 4, 1.00f32);
evs.push(124, 5, 1.01);
let mut coll = evs.as_collectable_with_default_ref().new_collector();
coll.ingest(&mut evs);
assert_eq!(coll.len(), 2);
}

View File

@@ -1,25 +0,0 @@
use crate::eventsdim0::EventsDim0;
use crate::Events;
use items_0::Appendable;
use items_0::Empty;
#[allow(unused)]
fn xorshift32(state: u32) -> u32 {
let mut x = state;
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
x
}
pub fn make_some_boxed_d0_f32(n: usize, t0: u64, tstep: u64, tmask: u64, seed: u32) -> Box<dyn Events> {
let mut vstate = seed;
let mut events = EventsDim0::empty();
for i in 0..n {
vstate = xorshift32(vstate);
let ts = t0 + i as u64 * tstep + (vstate as u64 & tmask);
let value = i as f32 * 100. + vstate as f32 / u32::MAX as f32 / 10.;
events.push(ts, ts, value);
}
Box::new(events)
}

View File

@@ -1,84 +0,0 @@
//! Helper functions to create transforms which act locally on a batch of events.
//! Tailored to the usage pattern given by `TransformQuery`.
use crate::channelevents::ChannelEvents;
use crate::eventsdim0::EventsDim0;
use items_0::transform::EventTransform;
use items_0::transform::TransformEvent;
use items_0::transform::TransformProperties;
use items_0::transform::WithTransformProperties;
use items_0::Appendable;
use items_0::AsAnyMut;
use items_0::Empty;
use items_0::Events;
use items_0::EventsNonObj;
use netpod::log::*;
use std::mem;
struct TransformEventIdentity {}
impl WithTransformProperties for TransformEventIdentity {
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl EventTransform for TransformEventIdentity {
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
src
}
}
pub fn make_transform_identity() -> TransformEvent {
TransformEvent(Box::new(TransformEventIdentity {}))
}
struct TransformEventMinMaxAvg {}
impl WithTransformProperties for TransformEventMinMaxAvg {
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl EventTransform for TransformEventMinMaxAvg {
fn transform(&mut self, mut src: Box<dyn Events>) -> Box<dyn Events> {
src.to_min_max_avg()
}
}
pub fn make_transform_min_max_avg() -> TransformEvent {
TransformEvent(Box::new(TransformEventMinMaxAvg {}))
}
struct TransformEventPulseIdDiff {
pulse_last: Option<u64>,
}
impl WithTransformProperties for TransformEventPulseIdDiff {
fn query_transform_properties(&self) -> TransformProperties {
todo!()
}
}
impl EventTransform for TransformEventPulseIdDiff {
fn transform(&mut self, src: Box<dyn Events>) -> Box<dyn Events> {
let (tss, pulses) = EventsNonObj::into_tss_pulses(src);
let mut item = EventsDim0::empty();
let pulse_last = &mut self.pulse_last;
for (ts, pulse) in tss.into_iter().zip(pulses) {
let value = if let Some(last) = pulse_last {
pulse as i64 - *last as i64
} else {
0
};
item.push(ts, pulse, value);
*pulse_last = Some(pulse);
}
Box::new(ChannelEvents::Events(Box::new(item)))
}
}
pub fn make_transform_pulse_id_diff() -> TransformEvent {
TransformEvent(Box::new(TransformEventPulseIdDiff { pulse_last: None }))
}

View File

@@ -1,12 +0,0 @@
[package]
name = "items_proc"
version = "0.0.2"
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
edition = "2021"
[lib]
path = "src/items_proc.rs"
proc-macro = true
[dependencies]
syn = "2"

View File

@@ -1,98 +0,0 @@
use proc_macro::{TokenStream, TokenTree};
const TYS: [&str; 10] = ["u8", "u16", "u32", "u64", "i8", "i16", "i32", "i64", "f32", "f64"];
const IDS: [&str; 10] = ["U8", "U16", "U32", "U64", "I8", "I16", "I32", "I64", "F32", "F64"];
#[proc_macro]
pub fn make_answer(_item: TokenStream) -> TokenStream {
"fn answer() -> u32 { 42 }".parse().unwrap()
}
#[proc_macro]
pub fn tycases1(ts: TokenStream) -> TokenStream {
for tt in ts.clone() {
match tt {
TokenTree::Group(..) => (),
TokenTree::Ident(..) => (),
TokenTree::Punct(..) => (),
TokenTree::Literal(..) => (),
}
}
let tokens: Vec<_> = ts.clone().into_iter().collect();
let match_val = if let TokenTree::Ident(x) = tokens[0].clone() {
//panic!("GOT {}", x.to_string());
x.to_string()
} else {
panic!("match_val")
};
let enum_1_pre = if let TokenTree::Ident(x) = tokens[2].clone() {
//panic!("GOT {}", x.to_string());
x.to_string()
} else {
panic!("enum_1_pre")
};
let enum_1_suff = tokens[4].to_string();
let rhs = if let TokenTree::Group(x) = tokens[6].clone() {
//panic!("GOT {}", x.to_string());
x.to_string()
} else {
panic!("RHS mismatch {:?}", tokens[6])
};
//panic!("{:?}", tokens[0]);
let tys = ["u8", "u16", "u32", "u64", "i8", "i16", "i32", "i64", "f32", "f64"];
let ids = ["U8", "U16", "U32", "U64", "I8", "I16", "I32", "I64", "F32", "F64"];
let mut arms = vec![];
for (id, ty) in ids.iter().zip(&tys) {
let rhs = rhs.replace("$id", id);
let rhs = rhs.replace("$ty", ty);
let s = format!("{}::{}{} => {},", enum_1_pre, id, enum_1_suff, rhs);
arms.push(s);
}
arms.push(format!("{}::{}{} => {}", enum_1_pre, "String", enum_1_suff, "todo!()"));
let gen = format!("match {} {{\n{}\n}}", match_val, arms.join("\n"));
//panic!("GENERATED: {}", gen);
gen.parse().unwrap()
}
#[proc_macro]
pub fn enumvars(ts: TokenStream) -> TokenStream {
let tokens: Vec<_> = ts.clone().into_iter().collect();
let name = if let TokenTree::Ident(x) = tokens[0].clone() {
x.to_string()
} else {
panic!("name")
};
let rhe = if let TokenTree::Ident(x) = tokens[2].clone() {
x.to_string()
} else {
panic!("rhe")
};
let mut cases = vec![];
for (id, ty) in IDS.iter().zip(&TYS) {
let s = format!("{}({}<{}>),", id, rhe, ty);
cases.push(s);
}
let gen = format!(
"#[derive(Debug, Serialize, Deserialize)]\npub enum {} {{\n{}\n}}\n",
name,
cases.join("\n")
);
//panic!("GENERATED: {}", gen);
gen.parse().unwrap()
}
#[proc_macro]
pub fn enumvariants(ts: TokenStream) -> TokenStream {
//panic!("yoooo");
//syn::parse_macro_input!(ts as syn::DeriveInput);
//let tokens: Vec<_> = ts.into_iter().collect();
//let parsed: syn::DeriveInput = syn::parse_macro_input!(ts as syn::DeriveInput);
//let s = ts.to_string();
let parsed = syn::parse::<syn::Item>(ts);
//panic!("{:?}", parsed);
match parsed {
Ok(_ast) => {}
Err(e) => panic!("Parse error {e:?}"),
}
TokenStream::new()
}

View File

@@ -22,7 +22,7 @@ netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
query = { path = "../query" }
disk = { path = "../disk" }
items_0 = { path = "../../../daqbuf-items-0", package = "daqbuf-items-0" }
items_2 = { path = "../items_2" }
items_2 = { path = "../../../daqbuf-items-2", package = "daqbuf-items-2" }
dbconn = { path = "../dbconn" }
scyllaconn = { path = "../scyllaconn" }
taskrun = { path = "../taskrun" }

View File

@@ -1,17 +0,0 @@
[package]
name = "parse"
version = "0.0.2"
authors = ["Dominik Werder <dominik.werder@gmail.com>"]
edition = "2021"
[dependencies]
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
humantime-serde = "1.1"
chrono = { version = "0.4.26", features = ["serde"] }
bytes = "1.4"
byteorder = "1.4"
hex = "0.4.3"
nom = "7.1.3"
daqbuf-err = { path = "../../../daqbuf-err" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }

View File

@@ -1,507 +0,0 @@
use crate::channelconfig::CompressionMethod;
use crate::nom;
use daqbuf_err as err;
use netpod::log::*;
use netpod::ScalarType;
use netpod::Shape;
use nom::bytes::complete::take;
use nom::error::context;
use nom::error::ContextError;
use nom::error::ErrorKind;
use nom::error::ParseError;
use nom::multi::many0;
use nom::number::complete::be_u32;
use nom::number::complete::be_u64;
use nom::number::complete::be_u8;
use nom::Err;
use nom::IResult;
use nom::Needed;
use serde::Deserialize;
use serde::Serialize;
use std::fmt;
use std::num::NonZeroUsize;
type Nres<'a, O, E = nom::error::Error<&'a [u8]>> = Result<(&'a [u8], O), nom::Err<E>>;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Api1ByteOrder {
#[serde(rename = "LITTLE_ENDIAN")]
Little,
#[serde(rename = "BIG_ENDIAN")]
Big,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Api1ScalarType {
#[serde(rename = "uint8")]
U8,
#[serde(rename = "uint16")]
U16,
#[serde(rename = "uint32")]
U32,
#[serde(rename = "uint64")]
U64,
#[serde(rename = "int8")]
I8,
#[serde(rename = "int16")]
I16,
#[serde(rename = "int32")]
I32,
#[serde(rename = "int64")]
I64,
#[serde(rename = "float32")]
F32,
#[serde(rename = "float64")]
F64,
#[serde(rename = "bool")]
BOOL,
#[serde(rename = "string")]
STRING,
}
impl Api1ScalarType {
pub fn to_str(&self) -> &'static str {
use Api1ScalarType as A;
match self {
A::U8 => "uint8",
A::U16 => "uint16",
A::U32 => "uint32",
A::U64 => "uint64",
A::I8 => "int8",
A::I16 => "int16",
A::I32 => "int32",
A::I64 => "int64",
A::F32 => "float32",
A::F64 => "float64",
A::BOOL => "bool",
A::STRING => "string",
}
}
}
#[test]
fn test_custom_variant_name() {
let val = Api1ScalarType::F32;
assert_eq!(format!("{val:?}"), "F32");
assert_eq!(format!("{val}"), "float32");
let s = serde_json::to_string(&val).unwrap();
assert_eq!(s, "\"float32\"");
}
impl fmt::Display for Api1ScalarType {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.to_str())
}
}
impl From<&ScalarType> for Api1ScalarType {
fn from(k: &ScalarType) -> Self {
use Api1ScalarType as B;
use ScalarType as A;
match k {
A::U8 => B::U8,
A::U16 => B::U16,
A::U32 => B::U32,
A::U64 => B::U64,
A::I8 => B::I8,
A::I16 => B::I16,
A::I32 => B::I32,
A::I64 => B::I64,
A::F32 => B::F32,
A::F64 => B::F64,
A::BOOL => B::BOOL,
A::STRING => B::STRING,
// TODO treat enum as number only
A::Enum => B::U16,
}
}
}
impl From<ScalarType> for Api1ScalarType {
fn from(x: ScalarType) -> Self {
(&x).into()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Api1ChannelHeader {
name: String,
#[serde(rename = "type")]
ty: Api1ScalarType,
#[serde(rename = "byteOrder")]
byte_order: Api1ByteOrder,
#[serde(default)]
shape: Vec<u32>,
#[serde(default, skip_serializing_if = "Option::is_none", with = "serde_compression_method")]
compression: Option<CompressionMethod>,
}
impl Api1ChannelHeader {
pub fn new(
name: String,
ty: Api1ScalarType,
byte_order: Api1ByteOrder,
shape: Shape,
compression: Option<CompressionMethod>,
) -> Self {
Self {
name,
ty,
byte_order,
shape: shape.to_u32_vec(),
compression,
}
}
pub fn name(&self) -> &str {
&self.name
}
pub fn ty(&self) -> Api1ScalarType {
self.ty.clone()
}
}
mod serde_compression_method {
use super::CompressionMethod;
use serde::de;
use serde::de::Visitor;
use serde::Deserializer;
use serde::Serializer;
use std::fmt;
pub fn serialize<S>(v: &Option<CompressionMethod>, ser: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match v {
Some(v) => {
let n = match v {
CompressionMethod::BitshuffleLZ4 => 1,
};
ser.serialize_some(&n)
}
None => ser.serialize_none(),
}
}
struct VisC;
impl<'de> Visitor<'de> for VisC {
type Value = Option<CompressionMethod>;
fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "compression method index")
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
match v {
0 => Ok(None),
1 => Ok(Some(CompressionMethod::BitshuffleLZ4)),
_ => Err(de::Error::unknown_variant("compression variant index", &["0"])),
}
}
}
struct Vis;
impl<'de> Visitor<'de> for Vis {
type Value = Option<CompressionMethod>;
fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "optional compression method index")
}
fn visit_some<D>(self, de: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
de.deserialize_u64(VisC)
}
fn visit_none<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(None)
}
}
pub fn deserialize<'de, D>(de: D) -> Result<Option<CompressionMethod>, D::Error>
where
D: Deserializer<'de>,
{
de.deserialize_option(Vis)
}
}
#[test]
fn basic_header_ser_00() {
let h = Api1ChannelHeader {
name: "Name".into(),
ty: Api1ScalarType::F32,
byte_order: Api1ByteOrder::Big,
shape: Vec::new(),
compression: None,
};
let js = serde_json::to_string(&h).unwrap();
let vals = serde_json::from_str::<serde_json::Value>(&js).unwrap();
let x = vals.as_object().unwrap().get("compression");
assert_eq!(x, None)
}
#[test]
fn basic_header_ser_01() {
let h = Api1ChannelHeader {
name: "Name".into(),
ty: Api1ScalarType::F32,
byte_order: Api1ByteOrder::Big,
shape: Vec::new(),
compression: Some(CompressionMethod::BitshuffleLZ4),
};
let js = serde_json::to_string(&h).unwrap();
let vals = serde_json::from_str::<serde_json::Value>(&js).unwrap();
let x = vals.as_object().unwrap().get("compression").unwrap().as_i64();
assert_eq!(x, Some(1))
}
#[test]
fn basic_header_deser_00() {
let js = r#"{ "name": "ch1", "type": "float64", "byteOrder": "LITTLE_ENDIAN" }"#;
let h: Api1ChannelHeader = serde_json::from_str(js).unwrap();
assert!(h.compression.is_none());
}
#[test]
fn basic_header_deser_01() {
let js = r#"{ "name": "ch1", "type": "float64", "byteOrder": "LITTLE_ENDIAN", "compression": null }"#;
let h: Api1ChannelHeader = serde_json::from_str(js).unwrap();
assert!(h.compression.is_none());
}
#[test]
fn basic_header_deser_02() {
let js = r#"{ "name": "ch1", "type": "float64", "byteOrder": "LITTLE_ENDIAN", "compression": 0 }"#;
let h: Api1ChannelHeader = serde_json::from_str(js).unwrap();
assert!(h.compression.is_none());
}
#[test]
fn basic_header_deser_03() {
let js = r#"{ "name": "ch1", "type": "float64", "byteOrder": "LITTLE_ENDIAN", "compression": 1 }"#;
let h: Api1ChannelHeader = serde_json::from_str(js).unwrap();
assert!(h.compression.is_some());
assert_eq!(h.compression, Some(CompressionMethod::BitshuffleLZ4));
}
#[test]
fn basic_header_deser_04() {
let js = r#"{ "name": "ch1", "type": "float64", "byteOrder": "LITTLE_ENDIAN", "compression": 2 }"#;
let res = serde_json::from_str::<Api1ChannelHeader>(js);
assert!(res.is_err());
}
// u32be length_1.
// there is exactly length_1 more bytes in this message.
// u8 mtype: 0: channel-header, 1: data
// for mtype == 0:
// The rest is a JSON with the channel header.
// for mtype == 1:
// u64be timestamp
// u64be pulse
// After that comes exactly (length_1 - 17) bytes of data.
#[derive(Debug)]
pub struct Header {
header: Api1ChannelHeader,
}
impl Header {
pub fn header(&self) -> &Api1ChannelHeader {
&self.header
}
}
#[derive(Debug)]
pub struct Data {
ts: u64,
pulse: u64,
data: Vec<u8>,
}
impl Data {
pub fn ts(&self) -> u64 {
self.ts
}
pub fn pulse(&self) -> u64 {
self.pulse
}
pub fn data(&self) -> &[u8] {
&self.data
}
}
#[derive(Debug)]
pub enum Api1Frame {
Header(Header),
Data(Data),
}
fn fail_on_input<'a, T, E>(inp: &'a [u8]) -> Nres<T, E>
where
E: ParseError<&'a [u8]>,
{
let e = nom::error::ParseError::from_error_kind(inp, ErrorKind::Fail);
IResult::Err(Err::Failure(e))
}
fn header<'a, E>(inp: &'a [u8]) -> Nres<Header, E>
where
E: ParseError<&'a [u8]> + ContextError<&'a [u8]>,
{
match serde_json::from_slice(inp) {
Ok(k) => {
let k: Api1ChannelHeader = k;
eprintln!("Parsed header OK: {k:?}");
IResult::Ok((&inp[inp.len()..], Header { header: k }))
}
Err(e) => {
let s = String::from_utf8_lossy(inp);
error!("can not parse json: {e}\n{s:?}");
context("json parse", fail_on_input)(inp)
}
}
}
fn data<'a, E>(inp: &'a [u8]) -> Nres<Data, E>
where
E: ParseError<&'a [u8]>,
{
if inp.len() < 16 {
IResult::Err(Err::Incomplete(Needed::Size(NonZeroUsize::new(16).unwrap())))
} else {
let (inp, ts) = be_u64(inp)?;
let (inp, pulse) = be_u64(inp)?;
let (inp, data) = take(inp.len())(inp)?;
let data = data.into();
let res = Data { ts, pulse, data };
IResult::Ok((inp, res))
}
}
fn api1_frame_complete<'a, E>(inp: &'a [u8]) -> Nres<Api1Frame, E>
where
E: ParseError<&'a [u8]> + ContextError<&'a [u8]>,
{
let (inp, mtype) = be_u8(inp)?;
if mtype == 0 {
let (inp, val) = header(inp)?;
if inp.len() != 0 {
context("header did not consume all bytes", fail_on_input)(inp)
} else {
let res = Api1Frame::Header(val);
IResult::Ok((inp, res))
}
} else if mtype == 1 {
let (inp, val) = data(inp)?;
if inp.len() != 0 {
context("data did not consume all bytes", fail_on_input)(inp)
} else {
let res = Api1Frame::Data(val);
IResult::Ok((inp, res))
}
} else {
let e = Err::Incomplete(Needed::Size(NonZeroUsize::new(1).unwrap()));
IResult::Err(e)
}
}
fn api1_frame<'a, E>(inp: &'a [u8]) -> Nres<Api1Frame, E>
where
E: ParseError<&'a [u8]> + ContextError<&'a [u8]>,
{
let inp_orig = inp;
let (inp, len) = be_u32(inp)?;
if len < 1 {
IResult::Err(Err::Failure(ParseError::from_error_kind(inp, ErrorKind::Fail)))
} else {
if inp.len() < len as usize + 4 {
let e = Err::Incomplete(Needed::Size(NonZeroUsize::new(len as _).unwrap()));
IResult::Err(e)
} else {
let (inp, payload) = nom::bytes::complete::take(len)(inp)?;
let (inp, len2) = be_u32(inp)?;
if len != len2 {
IResult::Err(Err::Failure(ParseError::from_error_kind(inp_orig, ErrorKind::Fail)))
} else {
let (left, res) = api1_frame_complete(payload)?;
if left.len() != 0 {
context("frame did not consume all bytes", fail_on_input)(inp_orig)
} else {
IResult::Ok((inp, res))
}
}
}
}
}
pub fn api1_frames<'a, E>(inp: &'a [u8]) -> Nres<Vec<Api1Frame>, E>
where
E: ParseError<&'a [u8]> + ContextError<&'a [u8]>,
{
many0(api1_frame)(inp)
}
#[allow(unused)]
fn verbose_err(inp: &[u8]) -> Nres<u32> {
use nom::error::ErrorKind;
use nom::error::ParseError;
use nom::error::VerboseError;
use nom::Err;
let e = ParseError::from_error_kind(inp, ErrorKind::Fail);
IResult::Err(Err::Failure(e))
}
#[test]
fn combinator_default_err() {
be_u32::<_, nom::error::Error<_>>([1, 2, 3, 4].as_slice()).unwrap();
}
#[test]
fn test_basic_frames() -> Result<(), err::Error> {
use std::io::Write;
let mut buf = Vec::new();
let js = r#"{"name": "ch1", "type": "float64", "byteOrder": "LITTLE_ENDIAN"}"#;
buf.write(&(1 + js.as_bytes().len() as u32).to_be_bytes())?;
buf.write(&[0])?;
buf.write(js.as_bytes())?;
buf.write(&25u32.to_be_bytes())?;
buf.write(&[1])?;
buf.write(&20u64.to_be_bytes())?;
buf.write(&21u64.to_be_bytes())?;
buf.write(&5.123f64.to_be_bytes())?;
buf.write(&25u32.to_be_bytes())?;
buf.write(&[1])?;
buf.write(&22u64.to_be_bytes())?;
buf.write(&23u64.to_be_bytes())?;
buf.write(&7.88f64.to_be_bytes())?;
match api1_frames::<nom::error::Error<_>>(&buf) {
Ok((_, frames)) => {
assert_eq!(frames.len(), 3);
}
Err(e) => {
panic!("can not parse result: {e}")
}
};
Ok(())
}

View File

@@ -1,457 +0,0 @@
use daqbuf_err as err;
use err::*;
use netpod::log::*;
use netpod::range::evrange::NanoRange;
use netpod::timeunits::DAY;
use netpod::timeunits::MS;
use netpod::ByteOrder;
use netpod::DtNano;
use netpod::NodeConfigCached;
use netpod::ScalarType;
use netpod::SfDbChannel;
use netpod::Shape;
use netpod::TsNano;
use nom::bytes::complete::take;
use nom::number::complete::be_i16;
use nom::number::complete::be_i32;
use nom::number::complete::be_i64;
use nom::number::complete::be_i8;
use nom::number::complete::be_u8;
use nom::Needed;
use serde::Deserialize;
use serde::Serialize;
use std::fmt;
use std::time::Duration;
use std::time::SystemTime;
#[derive(Debug, ThisError)]
#[cstm(name = "ConfigParse")]
pub enum ConfigParseError {
NotSupportedOnNode,
FileNotFound,
PermissionDenied,
IO,
ParseError(String),
NotSupported,
}
impl<T: fmt::Debug> From<nom::Err<T>> for ConfigParseError {
fn from(k: nom::Err<T>) -> Self {
let msg = format!("nom::Err<T> {:?}", k);
Self::ParseError(msg)
}
}
impl<I> nom::error::ParseError<I> for ConfigParseError {
fn from_error_kind(_input: I, kind: nom::error::ErrorKind) -> Self {
let msg = format!("ParseError kind {:?}", kind);
Self::ParseError(msg)
}
fn append(_input: I, kind: nom::error::ErrorKind, other: Self) -> Self {
let msg = format!("ParseError kind {:?} other {:?}", kind, other);
Self::ParseError(msg)
}
}
type NRes<'a, O> = nom::IResult<&'a [u8], O, ConfigParseError>;
fn mkerr<'a, S, O>(msg: S) -> NRes<'a, O>
where
S: Into<String>,
{
let e = ConfigParseError::ParseError(msg.into());
Err(nom::Err::Error(e))
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CompressionMethod {
BitshuffleLZ4,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ConfigEntry {
pub ts: TsNano,
#[serde(with = "humantime_serde")]
pub ts_human: SystemTime,
pub pulse: i64,
pub ks: i32,
pub bs: DtNano,
pub split_count: i32,
pub status: i32,
pub bb: i8,
pub modulo: i32,
pub offset: i32,
/*
Precision:
0 'default' whatever that is
-7 f32
-16 f64
*/
pub precision: i16,
pub scalar_type: ScalarType,
pub is_compressed: bool,
pub is_shaped: bool,
pub is_array: bool,
pub byte_order: ByteOrder,
pub compression_method: Option<CompressionMethod>,
pub shape: Option<Vec<u32>>,
pub source_name: Option<String>,
pub unit: Option<String>,
pub description: Option<String>,
pub optional_fields: Option<String>,
pub value_converter: Option<String>,
}
impl ConfigEntry {
pub fn to_shape(&self) -> Result<Shape, Error> {
let ret = match &self.shape {
Some(lens) => {
if lens.len() == 1 {
Shape::Wave(lens[0])
} else if lens.len() == 2 {
Shape::Image(lens[0], lens[1])
} else {
// TODO
// Need a new Shape variant for images.
return Err(Error::with_msg(format!("Channel config unsupported shape {:?}", self)))?;
}
}
None => Shape::Scalar,
};
Ok(ret)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChannelConfigs {
pub format_version: i16,
pub channel_name: String,
pub entries: Vec<ConfigEntry>,
}
fn parse_short_string(inp: &[u8]) -> NRes<Option<String>> {
let (inp, len1) = be_i32(inp)?;
if len1 == -1 {
return Ok((inp, None));
}
if len1 < 4 {
return mkerr(format!("bad string len {}", len1));
}
if len1 > 500 {
return mkerr(format!("large string len {}", len1));
}
let (inp, snb) = take((len1 - 4) as usize)(inp)?;
match String::from_utf8(snb.to_vec()) {
Ok(s1) => Ok((inp, Some(s1))),
Err(e) => mkerr(format!("{:?}", e)),
}
}
pub fn parse_entry(inp: &[u8]) -> NRes<Option<ConfigEntry>> {
let (inp, len1) = be_i32(inp)?;
if len1 < 0 || len1 > 4000 {
return mkerr(format!("ConfigEntry bad len1 {}", len1));
}
if inp.len() == 0 {
return Ok((inp, None));
}
if inp.len() < len1 as usize - 4 {
return Err(nom::Err::Incomplete(Needed::new(len1 as usize - 4)));
}
let inp_e = &inp[(len1 - 8) as usize..];
let (inp, ts) = be_i64(inp)?;
let (inp, pulse) = be_i64(inp)?;
let (inp, ks) = be_i32(inp)?;
let (inp, bs) = be_i64(inp)?;
let bs = DtNano::from_ns(bs as u64 * MS);
let (inp, split_count) = be_i32(inp)?;
let (inp, status) = be_i32(inp)?;
let (inp, bb) = be_i8(inp)?;
let (inp, modulo) = be_i32(inp)?;
let (inp, offset) = be_i32(inp)?;
let (inp, precision) = be_i16(inp)?;
let (inp, dtlen) = be_i32(inp)?;
if dtlen > 100 {
return mkerr(format!("unexpected data type len {}", dtlen));
}
let (inp, dtmask) = be_u8(inp)?;
let is_compressed = dtmask & 0x80 != 0;
let is_array = dtmask & 0x40 != 0;
let byte_order = ByteOrder::from_dtype_flags(dtmask);
let is_shaped = dtmask & 0x10 != 0;
let (inp, dtype) = be_u8(inp)?;
if dtype > 13 {
return mkerr(format!("unexpected data type {}", dtype));
}
let scalar_type = match ScalarType::from_dtype_index(dtype) {
Ok(k) => k,
Err(e) => {
return mkerr(format!("Can not convert {} to DType {:?}", dtype, e));
}
};
let (inp, compression_method) = match is_compressed {
false => (inp, None),
true => {
let (inp, cm) = be_u8(inp)?;
match cm {
0 => (inp, Some(CompressionMethod::BitshuffleLZ4)),
_ => return mkerr(format!("unknown compression")),
}
}
};
let (inp, shape) = match is_shaped {
false => (inp, None),
true => {
let (mut inp, dim) = be_u8(inp)?;
if dim > 4 {
return mkerr(format!("unexpected number of dimensions: {}", dim));
}
let mut shape = vec![];
for _ in 0..dim {
let t1 = be_i32(inp)?;
inp = t1.0;
shape.push(t1.1 as u32);
}
(inp, Some(shape))
}
};
let (inp, source_name) = parse_short_string(inp)?;
let (inp, unit) = parse_short_string(inp)?;
let (inp, description) = parse_short_string(inp)?;
let (inp, optional_fields) = parse_short_string(inp)?;
let (inp, value_converter) = parse_short_string(inp)?;
assert_eq!(inp.len(), inp_e.len());
let (inp_e, len2) = be_i32(inp_e)?;
if len1 != len2 {
return mkerr(format!("mismatch len1 {} len2 {}", len1, len2));
}
Ok((
inp_e,
Some(ConfigEntry {
ts: TsNano::from_ns(ts as u64),
ts_human: SystemTime::UNIX_EPOCH + Duration::from_nanos(ts as u64),
pulse,
ks,
bs,
split_count: split_count,
status,
bb,
modulo,
offset,
precision,
scalar_type,
is_compressed: is_compressed,
is_array: is_array,
is_shaped: is_shaped,
byte_order,
compression_method: compression_method,
shape,
source_name: source_name,
unit,
description,
optional_fields: optional_fields,
value_converter: value_converter,
}),
))
}
/// Parse a complete configuration file from given in-memory input buffer.
fn parse_config_inner(inp: &[u8]) -> NRes<ChannelConfigs> {
let (inp, ver) = be_i16(inp)?;
let (inp, len1) = be_i32(inp)?;
if len1 <= 8 || len1 > 500 {
return mkerr(format!("no channel name. len1 {}", len1));
}
let (inp, chn) = take((len1 - 8) as usize)(inp)?;
let channel_name = match String::from_utf8(chn.to_vec()) {
Ok(k) => k,
Err(e) => {
return mkerr(format!("channelName utf8 error {:?}", e));
}
};
let (inp, len2) = be_i32(inp)?;
if len1 != len2 {
return mkerr(format!("Mismatch len1 {} len2 {}", len1, len2));
}
let mut entries = Vec::new();
let mut inp_a = inp;
while inp_a.len() > 0 {
let inp = inp_a;
let (inp, e) = parse_entry(inp)?;
if let Some(e) = e {
entries.push(e);
}
inp_a = inp;
}
// Do not sort the parsed config entries.
// We want to deliver the actual order which is found on disk.
// Important for troubleshooting.
let ret = ChannelConfigs {
format_version: ver,
channel_name,
entries,
};
Ok((inp, ret))
}
pub fn parse_config(inp: &[u8]) -> Result<ChannelConfigs, ConfigParseError> {
let (_inp, ret) = parse_config_inner(inp).map_err(|e| ConfigParseError::ParseError(e.to_string()))?;
Ok(ret)
}
#[derive(Clone)]
pub enum MatchingConfigEntry<'a> {
None,
Single(&'a ConfigEntry),
// In this case, we only return the entry which best matches to the time range
Multiple(&'a ConfigEntry),
}
impl<'a> MatchingConfigEntry<'a> {
pub fn best(&self) -> Option<&ConfigEntry> {
match self {
MatchingConfigEntry::None => None,
MatchingConfigEntry::Single(e) => Some(e),
MatchingConfigEntry::Multiple(e) => Some(e),
}
}
}
pub fn extract_matching_config_entry<'a>(
range: &NanoRange,
channel_config: &'a ChannelConfigs,
) -> Result<MatchingConfigEntry<'a>, ConfigParseError> {
const DO_DEBUG: bool = false;
if DO_DEBUG {
debug!("extract_matching_config_entry range {range:?}");
}
let mut a: Vec<_> = channel_config.entries.iter().enumerate().map(|(i, x)| (i, x)).collect();
a.sort_unstable_by_key(|(_, x)| x.ts.ns());
let a = a;
if DO_DEBUG {
debug!("------------------------------------------------------------------");
for x in &a {
debug!("SORTED {:3} {:?}", x.0, x.1.ks);
}
}
let b: Vec<_> = a
.into_iter()
.rev()
.map({
let mut last = None;
move |(i, x)| {
let k = last.clone();
last = Some(x.ts.clone());
(i, x, k)
}
})
.collect();
if DO_DEBUG {
debug!("------------------------------------------------------------------");
for x in &b {
debug!("NEIGHB {:3} {:?} {:?}", x.0, x.1.ks, x.2);
}
}
let c: Vec<_> = b
.into_iter()
.rev()
.map(|(i, e, tsn)| {
if let Some(ts2) = tsn.clone() {
if e.ts.ns() < range.end() {
let p = if e.ts.ns() < range.beg() {
range.beg()
} else {
e.ts.ns()
};
let q = if ts2.ns() < range.beg() {
range.beg()
} else {
if ts2.ns() < range.end() {
ts2.ns()
} else {
range.end()
}
};
(i, DtNano::from_ns(q - p), e)
} else {
(i, DtNano::from_ns(0), e)
}
} else {
if e.ts.ns() < range.end() {
if e.ts.ns() < range.beg() {
(i, DtNano::from_ns(range.delta()), e)
} else {
(i, DtNano::from_ns(range.end() - e.ts.ns()), e)
}
} else {
(i, DtNano::from_ns(0), e)
}
}
})
.collect();
if DO_DEBUG {
debug!("------------------------------------------------------------------");
for (i, dt, e) in &c {
debug!("WEIGHT {:3} {:?} {:?} {:?}", i, dt, e.ks, e.ts);
}
}
let mut c = c;
c.sort_unstable_by_key(|(_, dt, _)| u64::MAX - dt.ns());
let c = c;
if DO_DEBUG {
debug!("------------------------------------------------------------------");
for (i, dt, e) in &c {
debug!("WEISOR {:3} {:?} {:?} {:?}", i, dt, e.ks, e.ts);
}
}
if let Some(&(i, _, _)) = c.first() {
Ok(MatchingConfigEntry::Single(&channel_config.entries[i]))
} else {
Ok(MatchingConfigEntry::None)
}
}
#[cfg(test)]
mod test {
use super::parse_config;
fn read_data() -> Vec<u8> {
use std::io::Read;
//let path = "ks/config/S10CB01-RLOD100-PUP10:SIG-AMPLT/latest/00000_Config";
let cwd = std::env::current_dir();
netpod::log::info!("CWD: {:?}", cwd);
let path = "../resources/sf-daqbuf-33-S10CB01-RLOD100-PUP10:SIG-AMPLT-latest-00000_Config";
//let path = "../resources/sf-daqbuf-21-S10CB01-RLOD100-PUP10:SIG-AMPLT-latest-00000_Config";
let mut f1 = std::fs::File::open(path).unwrap();
let mut buf = Vec::new();
f1.read_to_end(&mut buf).unwrap();
buf
}
#[test]
fn parse_dummy() {
let config = parse_config(&[0, 0, 0, 0, 0, 11, 0x61, 0x62, 0x63, 0, 0, 0, 11, 0, 0, 0, 1]).unwrap();
assert_eq!(0, config.format_version);
assert_eq!("abc", config.channel_name);
}
#[test]
fn open_file() {
let config = parse_config(&read_data()).unwrap();
assert_eq!(config.format_version, 0);
assert_eq!(config.entries.len(), 18);
for e in &config.entries {
assert!(e.ts.ns() >= 631152000000000000);
assert!(e.ts.ns() <= 1613640673424172164);
assert!(e.shape.is_some());
}
}
}

View File

@@ -1,32 +0,0 @@
#[test]
fn test_json_trailing() {
use serde::Deserialize;
use serde_json::Value as JsonValue;
use std::io::Cursor;
if serde_json::from_str::<JsonValue>(r#"{}."#).is_ok() {
panic!("Should fail because of trailing character");
}
let cur = Cursor::new(r#"{}..."#);
let mut de = serde_json::Deserializer::from_reader(cur);
if JsonValue::deserialize(&mut de).is_err() {
panic!("Should allow trailing characters")
}
let cur = Cursor::new(r#"nullA"#);
let mut de = serde_json::Deserializer::from_reader(cur);
if let Ok(val) = JsonValue::deserialize(&mut de) {
if val != serde_json::json!(null) {
panic!("Bad parse")
}
} else {
panic!("Should allow trailing characters")
}
let cur = Cursor::new(r#" {}AA"#);
let mut de = serde_json::Deserializer::from_reader(cur);
if let Ok(val) = JsonValue::deserialize(&mut de) {
if val != serde_json::json!({}) {
panic!("Bad parse")
}
} else {
panic!("Should allow trailing characters")
}
}

View File

@@ -1,5 +0,0 @@
pub mod api1_parse;
pub mod channelconfig;
pub use nom;
mod jsonconf;

View File

@@ -15,7 +15,7 @@ humantime-serde = "1.1.1"
thiserror = "0.0.1"
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
items_0 = { path = "../../../daqbuf-items-0", package = "daqbuf-items-0" }
items_2 = { path = "../items_2" }
items_2 = { path = "../../../daqbuf-items-2", package = "daqbuf-items-2" }
[patch.crates-io]
thiserror = { git = "https://github.com/dominikwerder/thiserror.git", branch = "cstm" }

View File

@@ -16,7 +16,7 @@ daqbuf-err = { path = "../../../daqbuf-err" }
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
query = { path = "../query" }
items_0 = { path = "../../../daqbuf-items-0", package = "daqbuf-items-0" }
items_2 = { path = "../items_2" }
items_2 = { path = "../../../daqbuf-items-2", package = "daqbuf-items-2" }
streams = { path = "../streams" }
daqbuf-series = { path = "../../../daqbuf-series" }
taskrun = { path = "../taskrun" }

View File

@@ -26,8 +26,8 @@ wasmer = { version = "4.1.0", default-features = false, features = ["sys", "cran
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
query = { path = "../query" }
items_0 = { path = "../../../daqbuf-items-0", package = "daqbuf-items-0" }
items_2 = { path = "../items_2" }
parse = { path = "../parse" }
items_2 = { path = "../../../daqbuf-items-2", package = "daqbuf-items-2" }
parse = { path = "../../../daqbuf-parse", package = "daqbuf-parse" }
streams = { path = "../streams" }
http = "1"
http-body = "1"

View File

@@ -24,8 +24,8 @@ wasmer = { version = "4.1.0", default-features = false, features = ["sys", "cran
netpod = { path = "../../../daqbuf-netpod", package = "daqbuf-netpod" }
query = { path = "../query" }
items_0 = { path = "../../../daqbuf-items-0", package = "daqbuf-items-0" }
items_2 = { path = "../items_2" }
parse = { path = "../parse" }
items_2 = { path = "../../../daqbuf-items-2", package = "daqbuf-items-2" }
parse = { path = "../../../daqbuf-parse", package = "daqbuf-parse" }
http = "1"
http-body = "1"
http-body-util = "0.1.0"