mirror of
https://github.com/paulscherrerinstitute/sf_daq_buffer.git
synced 2026-04-27 07:20:48 +02:00
Merged to 1 project
This commit is contained in:
@@ -1,98 +0,0 @@
|
||||
#include "LiveH5Reader.hpp"
|
||||
#include "BufferUtils.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace core_buffer;
|
||||
|
||||
LiveH5Reader::LiveH5Reader(
|
||||
const std::string& device,
|
||||
const std::string& channel_name,
|
||||
const uint16_t source_id):
|
||||
current_filename_(device + "/" + channel_name + "/CURRENT"),
|
||||
source_id_(source_id),
|
||||
pulse_id_buffer_(make_unique<uint64_t[]>(FILE_MOD)),
|
||||
data_buffer_(make_unique<uint16_t[]>(MODULE_N_PIXELS))
|
||||
{
|
||||
// auto filename = BufferUtils::get_latest_file(current_filename_);
|
||||
// file_ = H5::H5File(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ);
|
||||
//
|
||||
// uint64_t base_pulse_id = start_pulse_id / core_buffer::FILE_MOD;
|
||||
// base_pulse_id *= core_buffer::FILE_MOD;
|
||||
//
|
||||
// current_file_max_pulse_id_ =
|
||||
//
|
||||
// image_dataset_ = input_file.openDataSet("image");
|
||||
// pulse_id_dataset_ = input_file.openDataSet("pulse_id");
|
||||
// frame_index_dataset_ = input_file.openDataSet("frame_id");
|
||||
// daq_rec_dataset_ = input_file.openDataSet("daq_rec");
|
||||
// n_received_packets_dataset_ = input_file.openDataSet("received_packets");
|
||||
|
||||
}
|
||||
|
||||
LiveH5Reader::~LiveH5Reader() {
|
||||
close_file();
|
||||
}
|
||||
|
||||
|
||||
|
||||
//void load_data_from_file (
|
||||
// FileBufferMetadata* metadata_buffer,
|
||||
// char* image_buffer,
|
||||
// const string &filename,
|
||||
// const size_t start_index)
|
||||
//{
|
||||
//
|
||||
// hsize_t b_image_dim[3] = {REPLAY_READ_BLOCK_SIZE, 512, 1024};
|
||||
// H5::DataSpace b_i_space (3, b_image_dim);
|
||||
// hsize_t b_i_count[] = {REPLAY_READ_BLOCK_SIZE, 512, 1024};
|
||||
// hsize_t b_i_start[] = {0, 0, 0};
|
||||
// b_i_space.selectHyperslab(H5S_SELECT_SET, b_i_count, b_i_start);
|
||||
//
|
||||
// hsize_t f_image_dim[3] = {FILE_MOD, 512, 1024};
|
||||
// H5::DataSpace f_i_space (3, f_image_dim);
|
||||
// hsize_t f_i_count[] = {REPLAY_READ_BLOCK_SIZE, 512, 1024};
|
||||
// hsize_t f_i_start[] = {start_index, 0, 0};
|
||||
// f_i_space.selectHyperslab(H5S_SELECT_SET, f_i_count, f_i_start);
|
||||
//
|
||||
// hsize_t b_metadata_dim[2] = {REPLAY_READ_BLOCK_SIZE, 1};
|
||||
// H5::DataSpace b_m_space (2, b_metadata_dim);
|
||||
// hsize_t b_m_count[] = {REPLAY_READ_BLOCK_SIZE, 1};
|
||||
// hsize_t b_m_start[] = {0, 0};
|
||||
// b_m_space.selectHyperslab(H5S_SELECT_SET, b_m_count, b_m_start);
|
||||
//
|
||||
// hsize_t f_metadata_dim[2] = {FILE_MOD, 1};
|
||||
// H5::DataSpace f_m_space (2, f_metadata_dim);
|
||||
// hsize_t f_m_count[] = {REPLAY_READ_BLOCK_SIZE, 1};
|
||||
// hsize_t f_m_start[] = {start_index, 0};
|
||||
// f_m_space.selectHyperslab(H5S_SELECT_SET, f_m_count, f_m_start);
|
||||
//
|
||||
// H5::H5File input_file(filename, H5F_ACC_RDONLY);
|
||||
//
|
||||
// auto image_dataset = input_file.openDataSet("image");
|
||||
// image_dataset.read(
|
||||
// image_buffer, H5::PredType::NATIVE_UINT16,
|
||||
// b_i_space, f_i_space);
|
||||
//
|
||||
// auto pulse_id_dataset = input_file.openDataSet("pulse_id");
|
||||
// pulse_id_dataset.read(
|
||||
// metadata_buffer->pulse_id, H5::PredType::NATIVE_UINT64,
|
||||
// b_m_space, f_m_space);
|
||||
//
|
||||
// auto frame_id_dataset = input_file.openDataSet("frame_id");
|
||||
// frame_id_dataset.read(
|
||||
// metadata_buffer->frame_index, H5::PredType::NATIVE_UINT64,
|
||||
// b_m_space, f_m_space);
|
||||
//
|
||||
// auto daq_rec_dataset = input_file.openDataSet("daq_rec");
|
||||
// daq_rec_dataset.read(
|
||||
// metadata_buffer->daq_rec, H5::PredType::NATIVE_UINT32,
|
||||
// b_m_space, f_m_space);
|
||||
//
|
||||
// auto received_packets_dataset =
|
||||
// input_file.openDataSet("received_packets");
|
||||
// received_packets_dataset.read(
|
||||
// metadata_buffer->n_received_packets, H5::PredType::NATIVE_UINT16,
|
||||
// b_m_space, f_m_space);
|
||||
//
|
||||
// input_file.close();
|
||||
//}
|
||||
@@ -1,255 +0,0 @@
|
||||
#include <stdexcept>
|
||||
#include <iostream>
|
||||
#include <cstddef>
|
||||
|
||||
#include "RingBuffer.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
||||
template <class T>
|
||||
RingBuffer<T>::RingBuffer(size_t n_slots) :
|
||||
n_slots_(n_slots),
|
||||
ringbuffer_slots_(n_slots, 0)
|
||||
{
|
||||
}
|
||||
|
||||
template <class T>
|
||||
RingBuffer<T>::~RingBuffer()
|
||||
{
|
||||
if (frame_data_buffer_ != NULL) {
|
||||
free(frame_data_buffer_);
|
||||
frame_data_buffer_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void RingBuffer<T>::initialize(const size_t requested_slot_size)
|
||||
{
|
||||
if (is_initialized()) {
|
||||
return;
|
||||
}
|
||||
|
||||
lock_guard<mutex> lock(ringbuffer_slots_mutex_);
|
||||
|
||||
if (initialized_) {
|
||||
|
||||
if (requested_slot_size > slot_size_) {
|
||||
stringstream err_msg;
|
||||
|
||||
using namespace date;
|
||||
using namespace chrono;
|
||||
err_msg << "[" << system_clock::now() << "]";
|
||||
err_msg << "[RingBuffer::initialize]";
|
||||
err_msg << " Already initialized with smaller slot_size ";
|
||||
err_msg << slot_size_ << " than requested_slot_size ";
|
||||
err_msg << requested_slot_size << endl;
|
||||
|
||||
throw runtime_error(err_msg.str());
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
write_index_ = 0;
|
||||
slot_size_ = requested_slot_size;
|
||||
buffer_size_ = slot_size_ * n_slots_;
|
||||
frame_data_buffer_ = new char[buffer_size_];
|
||||
buffer_used_slots_ = 0;
|
||||
|
||||
initialized_ = true;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
char* RingBuffer<T>::reserve(shared_ptr<T> frame_metadata)
|
||||
{
|
||||
if (!is_initialized()) {
|
||||
stringstream err_msg;
|
||||
|
||||
using namespace date;
|
||||
using namespace chrono;
|
||||
err_msg << "[" << system_clock::now() << "]";
|
||||
err_msg << "[RingBuffer::reserve]";
|
||||
err_msg << " Ringbuffer not initialized.";
|
||||
|
||||
throw runtime_error(err_msg.str());
|
||||
}
|
||||
|
||||
if (frame_metadata->frame_bytes_size > slot_size_) {
|
||||
stringstream err_msg;
|
||||
|
||||
using namespace date;
|
||||
using namespace chrono;
|
||||
err_msg << "[" << system_clock::now() << "]";
|
||||
err_msg << "[RingBuffer::reserve]";
|
||||
err_msg << " Received frame index " << frame_metadata->frame_index;
|
||||
err_msg << " that is too large for ring buffer slot.";
|
||||
err_msg << " Slot size " << slot_size_ << ", but frame bytes size ";
|
||||
err_msg << frame_metadata->frame_bytes_size << endl;
|
||||
|
||||
throw runtime_error(err_msg.str());
|
||||
}
|
||||
|
||||
// Check and reserve slot in the buffer.
|
||||
{
|
||||
lock_guard<mutex> lock(ringbuffer_slots_mutex_);
|
||||
|
||||
if (!ringbuffer_slots_[write_index_]) {
|
||||
ringbuffer_slots_[write_index_] = true;
|
||||
|
||||
frame_metadata->buffer_slot_index = write_index_;
|
||||
|
||||
write_index_ = (write_index_ + 1) % n_slots_;
|
||||
buffer_used_slots_++;
|
||||
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
return get_buffer_slot_address(frame_metadata->buffer_slot_index);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void RingBuffer<T>::commit(shared_ptr<T> frame_metadata)
|
||||
{
|
||||
lock_guard<mutex> lock(frame_metadata_queue_mutex_);
|
||||
|
||||
frame_metadata_queue_.push_back(frame_metadata);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
char* RingBuffer<T>::get_buffer_slot_address(size_t buffer_slot_index)
|
||||
{
|
||||
char* slot_memory_address =
|
||||
frame_data_buffer_ + (buffer_slot_index * slot_size_);
|
||||
|
||||
// Check if the memory address is valid.
|
||||
if (slot_memory_address > frame_data_buffer_ + buffer_size_) {
|
||||
stringstream err_msg;
|
||||
|
||||
using namespace date;
|
||||
using namespace chrono;
|
||||
err_msg << "[" << system_clock::now() << "]";
|
||||
err_msg << "[RingBuffer::get_buffer_slot_address]";
|
||||
err_msg << " Ring buffer address out of range." << endl;
|
||||
|
||||
throw runtime_error(err_msg.str());
|
||||
}
|
||||
|
||||
return slot_memory_address;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
pair<shared_ptr<T>, char*> RingBuffer<T>::read()
|
||||
{
|
||||
shared_ptr<T> frame_metadata;
|
||||
|
||||
{
|
||||
lock_guard<mutex> lock(frame_metadata_queue_mutex_);
|
||||
|
||||
// A NULL char* means no waiting data in the ring buffer.
|
||||
if (frame_metadata_queue_.empty()) {
|
||||
return {NULL, NULL};
|
||||
}
|
||||
|
||||
frame_metadata = frame_metadata_queue_.front();
|
||||
frame_metadata_queue_.pop_front();
|
||||
}
|
||||
|
||||
// Check if the references ring buffer slot is valid.
|
||||
{
|
||||
lock_guard<mutex> lock(ringbuffer_slots_mutex_);
|
||||
|
||||
if (!ringbuffer_slots_[frame_metadata->buffer_slot_index]) {
|
||||
stringstream err_msg;
|
||||
|
||||
using namespace date;
|
||||
using namespace chrono;
|
||||
err_msg << "[" << system_clock::now() << "]";
|
||||
err_msg << "[RingBuffer::read]";
|
||||
err_msg << " Ring buffer slot";
|
||||
err_msg << " referenced in message header ";
|
||||
err_msg << frame_metadata->buffer_slot_index << " is empty.";
|
||||
err_msg << endl;
|
||||
|
||||
throw runtime_error(err_msg.str());
|
||||
}
|
||||
}
|
||||
|
||||
return {frame_metadata,
|
||||
get_buffer_slot_address(frame_metadata->buffer_slot_index)};
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void RingBuffer<T>::release(size_t buffer_slot_index)
|
||||
{
|
||||
if (buffer_slot_index >= n_slots_) {
|
||||
stringstream err_msg;
|
||||
|
||||
using namespace date;
|
||||
using namespace chrono;
|
||||
err_msg << "[" << system_clock::now() << "]";
|
||||
err_msg << "[RingBuffer::release]";
|
||||
err_msg << " Slot index " << buffer_slot_index << " is out of range.";
|
||||
err_msg << " Ring buffer n_slots = " << n_slots_ << endl;
|
||||
|
||||
throw runtime_error(err_msg.str());
|
||||
}
|
||||
|
||||
{
|
||||
lock_guard<mutex> lock(ringbuffer_slots_mutex_);
|
||||
|
||||
if (ringbuffer_slots_[buffer_slot_index]) {
|
||||
ringbuffer_slots_[buffer_slot_index] = false;
|
||||
|
||||
buffer_used_slots_--;
|
||||
|
||||
} else {
|
||||
stringstream err_msg;
|
||||
|
||||
using namespace date;
|
||||
using namespace chrono;
|
||||
err_msg << "[" << system_clock::now() << "]";
|
||||
err_msg << "[RingBuffer::release]";
|
||||
err_msg << " Cannot release empty ring buffer slot ";
|
||||
err_msg << buffer_slot_index << endl;
|
||||
|
||||
throw runtime_error(err_msg.str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool RingBuffer<T>::is_empty()
|
||||
{
|
||||
lock_guard<mutex> lock(ringbuffer_slots_mutex_);
|
||||
|
||||
return buffer_used_slots_ == 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool RingBuffer<T>::is_initialized()
|
||||
{
|
||||
return initialized_.load(memory_order_relaxed);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void RingBuffer<T>::clear()
|
||||
{
|
||||
lock_guard<mutex> lock_slots(ringbuffer_slots_mutex_);
|
||||
lock_guard<mutex> lock_metadata(frame_metadata_queue_mutex_);
|
||||
|
||||
write_index_ = 0;
|
||||
buffer_used_slots_ = 0;
|
||||
ringbuffer_slots_ = vector<bool>(n_slots_, 0);
|
||||
frame_metadata_queue_.clear();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
size_t RingBuffer<T>::get_slot_size()
|
||||
{
|
||||
return slot_size_;
|
||||
}
|
||||
|
||||
template class RingBuffer<FrameMetadata>;
|
||||
template class RingBuffer<UdpFrameMetadata>;
|
||||
@@ -1,115 +0,0 @@
|
||||
#include "UdpRecvModule.hpp"
|
||||
#include "jungfrau.hpp"
|
||||
#include <iostream>
|
||||
#include <UdpReceiver.hpp>
|
||||
|
||||
using namespace std;
|
||||
|
||||
UdpRecvModule::UdpRecvModule(
|
||||
FastQueue<ModuleFrame>& queue,
|
||||
const uint16_t udp_port) :
|
||||
queue_(queue),
|
||||
is_receiving_(true)
|
||||
{
|
||||
receiving_thread_ = thread(
|
||||
&UdpRecvModule::receive_thread, this,
|
||||
udp_port);
|
||||
}
|
||||
|
||||
UdpRecvModule::~UdpRecvModule()
|
||||
{
|
||||
is_receiving_ = false;
|
||||
receiving_thread_.join();
|
||||
}
|
||||
|
||||
inline void UdpRecvModule::init_frame (
|
||||
ModuleFrame* frame_metadata,
|
||||
jungfrau_packet& packet_buffer)
|
||||
{
|
||||
frame_metadata->pulse_id = packet_buffer.bunchid;
|
||||
frame_metadata->frame_index = packet_buffer.framenum;
|
||||
frame_metadata->daq_rec = (uint64_t)packet_buffer.debug;
|
||||
}
|
||||
|
||||
inline void UdpRecvModule::reserve_next_frame_buffers(
|
||||
ModuleFrame*& frame_metadata,
|
||||
char*& frame_buffer)
|
||||
{
|
||||
int slot_id;
|
||||
if ((slot_id = queue_.reserve()) == -1)
|
||||
throw runtime_error("Queue is full.");
|
||||
|
||||
frame_metadata = queue_.get_metadata_buffer(slot_id);
|
||||
frame_metadata->pulse_id = 0;
|
||||
frame_metadata->n_received_packets = 0;
|
||||
|
||||
frame_buffer = queue_.get_data_buffer(slot_id);
|
||||
memset(frame_buffer, 0, JUNGFRAU_DATA_BYTES_PER_FRAME);
|
||||
}
|
||||
|
||||
void UdpRecvModule::receive_thread(const uint16_t udp_port)
|
||||
{
|
||||
try {
|
||||
|
||||
UdpReceiver udp_receiver;
|
||||
udp_receiver.bind(udp_port);
|
||||
|
||||
ModuleFrame* frame_metadata;
|
||||
char* frame_buffer;
|
||||
reserve_next_frame_buffers(frame_metadata, frame_buffer);
|
||||
|
||||
jungfrau_packet packet_buffer;
|
||||
|
||||
while (is_receiving_.load(memory_order_relaxed)) {
|
||||
|
||||
if (!udp_receiver.receive(
|
||||
&packet_buffer,
|
||||
JUNGFRAU_BYTES_PER_PACKET)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// First packet for this frame.
|
||||
if (frame_metadata->pulse_id == 0) {
|
||||
init_frame(frame_metadata, packet_buffer);
|
||||
|
||||
// Happens if the last packet from the previous frame gets lost.
|
||||
} else if (frame_metadata->pulse_id != packet_buffer.bunchid) {
|
||||
queue_.commit();
|
||||
reserve_next_frame_buffers(frame_metadata, frame_buffer);
|
||||
|
||||
init_frame(frame_metadata, packet_buffer);
|
||||
}
|
||||
|
||||
size_t frame_buffer_offset =
|
||||
JUNGFRAU_DATA_BYTES_PER_PACKET * packet_buffer.packetnum;
|
||||
|
||||
memcpy(
|
||||
(void*) (frame_buffer + frame_buffer_offset),
|
||||
packet_buffer.data,
|
||||
JUNGFRAU_DATA_BYTES_PER_PACKET);
|
||||
|
||||
frame_metadata->n_received_packets++;
|
||||
|
||||
// Last frame packet received. Frame finished.
|
||||
if (packet_buffer.packetnum == JUNGFRAU_N_PACKETS_PER_FRAME-1)
|
||||
{
|
||||
queue_.commit();
|
||||
reserve_next_frame_buffers(frame_metadata, frame_buffer);
|
||||
this_thread::yield();
|
||||
}
|
||||
}
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
is_receiving_ = false;
|
||||
|
||||
using namespace date;
|
||||
using namespace chrono;
|
||||
|
||||
cout << "[" << system_clock::now() << "]";
|
||||
cout << "[UdpRecvModule::receive_thread]";
|
||||
cout << " Stopped because of exception: " << endl;
|
||||
cout << e.what() << endl;
|
||||
|
||||
throw;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,150 @@
|
||||
#include <iostream>
|
||||
#include <stdexcept>
|
||||
#include <RingBuffer.hpp>
|
||||
#include <BufferH5Writer.hpp>
|
||||
#include "zmq.h"
|
||||
#include "buffer_config.hpp"
|
||||
#include "jungfrau.hpp"
|
||||
#include "BufferUdpReceiver.hpp"
|
||||
|
||||
#include <sys/resource.h>
|
||||
#include <syscall.h>
|
||||
|
||||
|
||||
|
||||
using namespace std;
|
||||
using namespace core_buffer;
|
||||
|
||||
int main (int argc, char *argv[]) {
|
||||
if (argc != 5) {
|
||||
cout << endl;
|
||||
cout << "Usage: sf_buffer [device_name] [udp_port] [root_folder]";
|
||||
cout << "[source_id]";
|
||||
cout << endl;
|
||||
cout << "\tdevice_name: Name to write to disk.";
|
||||
cout << "\tudp_port: UDP port to connect to." << endl;
|
||||
cout << "\troot_folder: FS root folder." << endl;
|
||||
cout << "\tsource_id: ID of the source for live stream." << endl;
|
||||
cout << endl;
|
||||
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
string device_name = string(argv[1]);
|
||||
int udp_port = atoi(argv[2]);
|
||||
string root_folder = string(argv[3]);
|
||||
int source_id = atoi(argv[4]);
|
||||
|
||||
stringstream ipc_stream;
|
||||
ipc_stream << BUFFER_LIVE_IPC_URL << source_id;
|
||||
const auto ipc_address = ipc_stream.str();
|
||||
|
||||
auto ctx = zmq_ctx_new();
|
||||
auto socket = zmq_socket(ctx, ZMQ_PUB);
|
||||
|
||||
const int sndhwm = BUFFER_ZMQ_SNDHWM;
|
||||
if (zmq_setsockopt(socket, ZMQ_SNDHWM, &sndhwm, sizeof(sndhwm)) != 0)
|
||||
throw runtime_error(strerror (errno));
|
||||
|
||||
const int linger_ms = 0;
|
||||
if (zmq_setsockopt(socket, ZMQ_LINGER, &linger_ms, sizeof(linger_ms)) != 0)
|
||||
throw runtime_error(strerror (errno));
|
||||
|
||||
if (zmq_bind(socket, ipc_address.c_str()) != 0)
|
||||
throw runtime_error(strerror (errno));
|
||||
|
||||
uint64_t stats_counter(0);
|
||||
uint64_t n_missed_packets = 0;
|
||||
uint64_t n_missed_frames = 0;
|
||||
uint64_t n_corrupted_frames = 0;
|
||||
uint64_t last_pulse_id = 0;
|
||||
|
||||
BufferH5Writer writer(root_folder, device_name);
|
||||
BufferUdpReceiver receiver(udp_port, source_id);
|
||||
|
||||
pid_t tid;
|
||||
tid = syscall(SYS_gettid);
|
||||
int ret = setpriority(PRIO_PROCESS, tid, 0);
|
||||
if (ret == -1) throw runtime_error("cannot set nice");
|
||||
|
||||
ModuleFrame metadata;
|
||||
auto frame_buffer = new char[MODULE_N_BYTES * JUNGFRAU_N_MODULES];
|
||||
|
||||
size_t write_total_us = 0;
|
||||
size_t write_max_us = 0;
|
||||
size_t send_total_us = 0;
|
||||
size_t send_max_us = 0;
|
||||
|
||||
while (true) {
|
||||
|
||||
auto pulse_id = receiver.get_frame_from_udp(metadata, frame_buffer);
|
||||
|
||||
auto start_time = chrono::steady_clock::now();
|
||||
|
||||
writer.set_pulse_id(pulse_id);
|
||||
writer.write(&metadata, frame_buffer);
|
||||
|
||||
auto write_end_time = chrono::steady_clock::now();
|
||||
auto write_us_duration = chrono::duration_cast<chrono::microseconds>(
|
||||
write_end_time-start_time).count();
|
||||
|
||||
start_time = chrono::steady_clock::now();
|
||||
|
||||
zmq_send(socket, &metadata, sizeof(ModuleFrame), ZMQ_SNDMORE);
|
||||
zmq_send(socket, frame_buffer, MODULE_N_BYTES, 0);
|
||||
|
||||
auto send_end_time = chrono::steady_clock::now();
|
||||
auto send_us_duration = chrono::duration_cast<chrono::microseconds>(
|
||||
send_end_time-start_time).count();
|
||||
|
||||
// TODO: Make real statistics, please.
|
||||
stats_counter++;
|
||||
write_total_us += write_us_duration;
|
||||
send_total_us += send_us_duration;
|
||||
|
||||
if (write_us_duration > write_max_us) {
|
||||
write_max_us = write_us_duration;
|
||||
}
|
||||
|
||||
if (send_us_duration > send_max_us) {
|
||||
send_max_us = send_us_duration;
|
||||
}
|
||||
|
||||
if (metadata.n_received_packets < JUNGFRAU_N_PACKETS_PER_FRAME) {
|
||||
n_missed_packets +=
|
||||
JUNGFRAU_N_PACKETS_PER_FRAME - metadata.n_received_packets;
|
||||
n_corrupted_frames++;
|
||||
}
|
||||
|
||||
if (last_pulse_id>0) {
|
||||
n_missed_frames += (pulse_id - last_pulse_id) - 1;
|
||||
}
|
||||
last_pulse_id = pulse_id;
|
||||
|
||||
if (stats_counter == STATS_MODULO) {
|
||||
cout << "sf_buffer:device_name " << device_name;
|
||||
cout << " sf_buffer:pulse_id " << pulse_id;
|
||||
cout << " sf_buffer:n_missed_frames " << n_missed_frames;
|
||||
cout << " sf_buffer:n_missed_packets " << n_missed_packets;
|
||||
cout << " sf_buffer:n_corrupted_frames " << n_corrupted_frames;
|
||||
|
||||
cout << " sf_buffer:write_total_us " << write_total_us/STATS_MODULO;
|
||||
cout << " sf_buffer:write_max_us " << write_max_us;
|
||||
cout << " sf_buffer:send_total_us " << send_total_us/STATS_MODULO;
|
||||
cout << " sf_buffer:send_max_us " << send_max_us;
|
||||
cout << endl;
|
||||
|
||||
stats_counter = 0;
|
||||
n_missed_packets = 0;
|
||||
n_corrupted_frames = 0;
|
||||
n_missed_frames = 0;
|
||||
|
||||
write_total_us = 0;
|
||||
write_max_us = 0;
|
||||
send_total_us = 0;
|
||||
send_max_us = 0;
|
||||
}
|
||||
}
|
||||
|
||||
delete[] frame_buffer;
|
||||
}
|
||||
@@ -0,0 +1,140 @@
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include "jungfrau.hpp"
|
||||
|
||||
#include "zmq.h"
|
||||
#include "buffer_config.hpp"
|
||||
|
||||
#include <cstring>
|
||||
#include <ReplayH5Reader.hpp>
|
||||
#include "date.h"
|
||||
#include "bitshuffle/bitshuffle.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace core_buffer;
|
||||
|
||||
void sf_replay (
|
||||
void* socket,
|
||||
const string& device,
|
||||
const string& channel_name,
|
||||
const uint64_t start_pulse_id,
|
||||
const uint64_t stop_pulse_id)
|
||||
{
|
||||
StreamModuleFrame metadata_buffer;
|
||||
auto frame_buffer = make_unique<uint16_t[]>(MODULE_N_PIXELS);
|
||||
|
||||
ReplayH5Reader file_reader(device, channel_name);
|
||||
|
||||
//TODO: Add statstics.
|
||||
uint64_t stats_counter = 0;
|
||||
|
||||
uint64_t total_read_us = 0;
|
||||
uint64_t max_read_us = 0;
|
||||
uint64_t total_send_us = 0;
|
||||
uint64_t max_send_us = 0;
|
||||
|
||||
// "<= stop_pulse_id" because we include the stop_pulse_id in the file.
|
||||
for (
|
||||
uint64_t curr_pulse_id = start_pulse_id;
|
||||
curr_pulse_id <= stop_pulse_id;
|
||||
curr_pulse_id++) {
|
||||
|
||||
auto start_time = chrono::steady_clock::now();
|
||||
|
||||
metadata_buffer.is_frame_present = file_reader.get_frame(
|
||||
curr_pulse_id,
|
||||
&(metadata_buffer.metadata),
|
||||
(char*)(frame_buffer.get()));
|
||||
|
||||
metadata_buffer.data_n_bytes = MODULE_N_BYTES;
|
||||
|
||||
auto end_time = chrono::steady_clock::now();
|
||||
auto read_us_duration = chrono::duration_cast<chrono::microseconds>(
|
||||
end_time-start_time).count();
|
||||
|
||||
start_time = chrono::steady_clock::now();
|
||||
|
||||
zmq_send(socket,
|
||||
&metadata_buffer,
|
||||
sizeof(StreamModuleFrame),
|
||||
ZMQ_SNDMORE);
|
||||
zmq_send(socket,
|
||||
(char*)(frame_buffer.get()),
|
||||
metadata_buffer.data_n_bytes,
|
||||
0);
|
||||
|
||||
end_time = chrono::steady_clock::now();
|
||||
auto send_us_duration = chrono::duration_cast<chrono::microseconds>(
|
||||
end_time-start_time).count();
|
||||
|
||||
// TODO: Make proper stastistics.
|
||||
stats_counter++;
|
||||
total_read_us += read_us_duration;
|
||||
max_read_us = max(max_read_us, (uint64_t)read_us_duration);
|
||||
|
||||
total_send_us += send_us_duration;
|
||||
max_send_us = max(max_send_us, (uint64_t)send_us_duration);
|
||||
|
||||
if (stats_counter == STATS_MODULO) {
|
||||
cout << "sf_replay:avg_read_us " << total_read_us/STATS_MODULO;
|
||||
cout << " sf_replay:max_read_us " << max_read_us;
|
||||
cout << " sf_replay:avg_send_us " << total_send_us/STATS_MODULO;
|
||||
cout << " sf_replay:max_send_us " << max_send_us;
|
||||
|
||||
cout << endl;
|
||||
|
||||
stats_counter = 0;
|
||||
total_read_us = 0;
|
||||
max_read_us = 0;
|
||||
total_send_us = 0;
|
||||
max_send_us = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main (int argc, char *argv[]) {
|
||||
|
||||
if (argc != 6) {
|
||||
cout << endl;
|
||||
cout << "Usage: sf_replay [device]";
|
||||
cout << " [channel_name] [source_id] [start_pulse_id] [stop_pulse_id]";
|
||||
cout << endl;
|
||||
cout << "\tdevice: Name of detector." << endl;
|
||||
cout << "\tchannel_name: M00-M31 for JF16M." << endl;
|
||||
cout << "\tsource_id: Module index" << endl;
|
||||
cout << "\tstart_pulse_id: Start pulse_id of retrieval." << endl;
|
||||
cout << "\tstop_pulse_id: Stop pulse_id of retrieval." << endl;
|
||||
cout << endl;
|
||||
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
const string device = string(argv[1]);
|
||||
const string channel_name = string(argv[2]);
|
||||
const auto source_id = (uint16_t) atoi(argv[3]);
|
||||
const auto start_pulse_id = (uint64_t) atoll(argv[4]);
|
||||
const auto stop_pulse_id = (uint64_t) atoll(argv[5]);
|
||||
|
||||
stringstream ipc_stream;
|
||||
ipc_stream << REPLAY_STREAM_IPC_URL << (int)source_id;
|
||||
const auto ipc_address = ipc_stream.str();
|
||||
|
||||
auto ctx = zmq_ctx_new();
|
||||
auto socket = zmq_socket(ctx, ZMQ_PUSH);
|
||||
|
||||
const int sndhwm = REPLAY_SNDHWM;
|
||||
if (zmq_setsockopt(socket, ZMQ_SNDHWM, &sndhwm, sizeof(sndhwm)) != 0)
|
||||
throw runtime_error(strerror (errno));
|
||||
|
||||
const int linger_ms = -1;
|
||||
if (zmq_setsockopt(socket, ZMQ_LINGER, &linger_ms, sizeof(linger_ms)) != 0)
|
||||
throw runtime_error(strerror (errno));
|
||||
|
||||
if (zmq_bind(socket, ipc_address.c_str()) != 0)
|
||||
throw runtime_error(strerror (errno));
|
||||
|
||||
sf_replay(socket, device, channel_name, start_pulse_id, stop_pulse_id);
|
||||
|
||||
zmq_close(socket);
|
||||
zmq_ctx_destroy(ctx);
|
||||
}
|
||||
@@ -0,0 +1,221 @@
|
||||
#include <iostream>
|
||||
#include <stdexcept>
|
||||
#include "buffer_config.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <jungfrau.hpp>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
#include "WriterH5Writer.hpp"
|
||||
#include <FastQueue.hpp>
|
||||
#include <cstring>
|
||||
#include <zmq.h>
|
||||
#include <LiveRecvModule.hpp>
|
||||
#include "date.h"
|
||||
#include <jsoncpp/json/json.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace core_buffer;
|
||||
|
||||
int main (int argc, char *argv[])
|
||||
{
|
||||
if (argc != 5) {
|
||||
cout << endl;
|
||||
cout << "Usage: sf_stream ";
|
||||
cout << " [streamvis_address] [reduction_factor_streamvis]";
|
||||
cout << " [live_analysis_address] [reduction_factor_live_analysis]";
|
||||
cout << endl;
|
||||
cout << "\tstreamvis_address: address to streamvis, example tcp://129.129.241.42:9007" << endl;
|
||||
cout << "\treduction_factor_streamvis: 1 out of N (example 10) images to send to streamvis. For remaining send metadata." << endl;
|
||||
cout << "\tlive_analysis_address: address to live_analysis, example tcp://129.129.241.42:9107" << endl;
|
||||
cout << "\treduction_factor_live_analysis: 1 out of N (example 10) images to send to live analysis. For remaining send metadata. N<=1 - send every image" << endl;
|
||||
cout << endl;
|
||||
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
string streamvis_address = string(argv[1]);
|
||||
int reduction_factor_streamvis = (int) atoll(argv[2]);
|
||||
string live_analysis_address = string(argv[3]);
|
||||
int reduction_factor_live_analysis = (uint64_t) atoll(argv[4]);
|
||||
|
||||
size_t n_modules = 32;
|
||||
FastQueue<ModuleFrameBuffer> queue(
|
||||
n_modules * MODULE_N_BYTES,
|
||||
STREAM_FASTQUEUE_SLOTS);
|
||||
|
||||
auto ctx = zmq_ctx_new();
|
||||
zmq_ctx_set (ctx, ZMQ_IO_THREADS, STREAM_ZMQ_IO_THREADS);
|
||||
|
||||
LiveRecvModule recv_module(queue, n_modules, ctx, BUFFER_LIVE_IPC_URL);
|
||||
|
||||
// 0mq sockets to streamvis and live analysis
|
||||
void *socket_streamvis = zmq_socket(ctx, ZMQ_PUB);
|
||||
if (zmq_bind(socket_streamvis, streamvis_address.c_str()) != 0) {
|
||||
throw runtime_error(strerror(errno));
|
||||
}
|
||||
void *socket_live = zmq_socket(ctx, ZMQ_PUB);
|
||||
if (zmq_bind(socket_live, live_analysis_address.c_str()) != 0) {
|
||||
throw runtime_error(strerror(errno));
|
||||
}
|
||||
|
||||
uint16_t data_empty [] = { 0, 0, 0, 0};
|
||||
Json::Value header;
|
||||
Json::StreamWriterBuilder builder;
|
||||
|
||||
// TODO: Remove stats trash.
|
||||
int stats_counter = 0;
|
||||
|
||||
size_t read_total_us = 0;
|
||||
size_t read_max_us = 0;
|
||||
|
||||
while (true) {
|
||||
|
||||
auto start_time = chrono::steady_clock::now();
|
||||
|
||||
auto slot_id = queue.read();
|
||||
|
||||
if(slot_id == -1) {
|
||||
this_thread::sleep_for(chrono::milliseconds(
|
||||
core_buffer::RB_READ_RETRY_INTERVAL_MS));
|
||||
continue;
|
||||
}
|
||||
|
||||
auto metadata = queue.get_metadata_buffer(slot_id);
|
||||
auto data = queue.get_data_buffer(slot_id);
|
||||
|
||||
auto read_end_time = chrono::steady_clock::now();
|
||||
auto read_us_duration = chrono::duration_cast<chrono::microseconds>(
|
||||
read_end_time-start_time).count();
|
||||
|
||||
uint64_t pulse_id = 0;
|
||||
uint64_t frame_index = 0;
|
||||
uint64_t daq_rec = 0;
|
||||
bool is_good_frame = true;
|
||||
|
||||
for (size_t i_module = 0; i_module < n_modules; i_module++) {
|
||||
// TODO: Place this tests in the appropriate spot.
|
||||
auto& module_metadata = metadata->module[i_module];
|
||||
if (i_module == 0) {
|
||||
pulse_id = module_metadata.pulse_id;
|
||||
frame_index = module_metadata.frame_index;
|
||||
daq_rec = module_metadata.daq_rec;
|
||||
|
||||
if ( module_metadata.n_received_packets != 128 ) is_good_frame = false;
|
||||
} else {
|
||||
if (module_metadata.pulse_id != pulse_id) is_good_frame = false;
|
||||
|
||||
if (module_metadata.frame_index != frame_index) is_good_frame = false;
|
||||
|
||||
if (module_metadata.daq_rec != daq_rec) is_good_frame = false;
|
||||
|
||||
if (module_metadata.n_received_packets != 128 ) is_good_frame = false;
|
||||
}
|
||||
}
|
||||
|
||||
//Here we need to send to streamvis and live analysis metadata(probably need to operate still on them) and data(not every frame)
|
||||
|
||||
header["frame"] = (Json::Value::UInt64)frame_index;
|
||||
header["is_good_frame"] = is_good_frame;
|
||||
header["daq_rec"] = (Json::Value::UInt64)daq_rec;
|
||||
header["pulse_id"] = (Json::Value::UInt64)pulse_id;
|
||||
|
||||
//this needs to be re-read from external source
|
||||
header["pedestal_file"] = "/sf/bernina/data/p17534/res/JF_pedestals/pedestal_20200423_1018.JF07T32V01.res.h5";
|
||||
header["gain_file"] = "/sf/bernina/config/jungfrau/gainMaps/JF07T32V01/gains.h5";
|
||||
|
||||
header["number_frames_expected"] = 10000;
|
||||
header["run_name"] = to_string(uint64_t(pulse_id/10000)*10000);
|
||||
|
||||
// detector name should come as parameter to sf_stream
|
||||
header["detector_name"] = "JF07T32V01";
|
||||
|
||||
header["htype"] = "array-1.0";
|
||||
header["type"] = "uint16";
|
||||
|
||||
int send_streamvis = 0;
|
||||
if ( reduction_factor_streamvis > 1 ) {
|
||||
send_streamvis = rand() % reduction_factor_streamvis;
|
||||
}
|
||||
if ( send_streamvis == 0 ) {
|
||||
header["shape"][0] = 16384;
|
||||
header["shape"][1] = 1024;
|
||||
} else{
|
||||
header["shape"][0] = 2;
|
||||
header["shape"][1] = 2;
|
||||
}
|
||||
|
||||
string text_header = Json::writeString(builder, header);
|
||||
|
||||
zmq_send(socket_streamvis,
|
||||
text_header.c_str(),
|
||||
text_header.size(),
|
||||
ZMQ_SNDMORE);
|
||||
|
||||
if ( send_streamvis == 0 ) {
|
||||
zmq_send(socket_streamvis,
|
||||
(char*)data,
|
||||
core_buffer::MODULE_N_BYTES*n_modules,
|
||||
0);
|
||||
} else {
|
||||
zmq_send(socket_streamvis,
|
||||
(char*)data_empty,
|
||||
8,
|
||||
0);
|
||||
}
|
||||
|
||||
//same for live analysis
|
||||
int send_live_analysis = 0;
|
||||
if ( reduction_factor_live_analysis > 1 ) {
|
||||
send_live_analysis = rand() % reduction_factor_live_analysis;
|
||||
}
|
||||
if ( send_live_analysis == 0 ) {
|
||||
header["shape"][0] = 16384;
|
||||
header["shape"][1] = 1024;
|
||||
} else{
|
||||
header["shape"][0] = 2;
|
||||
header["shape"][1] = 2;
|
||||
}
|
||||
|
||||
text_header = Json::writeString(builder, header);
|
||||
|
||||
zmq_send(socket_live,
|
||||
text_header.c_str(),
|
||||
text_header.size(),
|
||||
ZMQ_SNDMORE);
|
||||
|
||||
if ( send_live_analysis == 0 ) {
|
||||
zmq_send(socket_live,
|
||||
(char*)data,
|
||||
core_buffer::MODULE_N_BYTES*n_modules,
|
||||
0);
|
||||
} else {
|
||||
zmq_send(socket_live,
|
||||
(char*)data_empty,
|
||||
8,
|
||||
0);
|
||||
}
|
||||
|
||||
queue.release();
|
||||
|
||||
// TODO: Some poor statistics.
|
||||
stats_counter++;
|
||||
read_total_us += read_us_duration;
|
||||
|
||||
if (read_us_duration > read_max_us) {
|
||||
read_max_us = read_us_duration;
|
||||
}
|
||||
|
||||
if (stats_counter == STATS_MODULO) {
|
||||
cout << "sf_stream:read_us " << read_total_us / STATS_MODULO;
|
||||
cout << " sf_stream:read_max_us " << read_max_us;
|
||||
cout << endl;
|
||||
|
||||
stats_counter = 0;
|
||||
read_total_us = 0;
|
||||
read_max_us = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,179 @@
|
||||
#include <iostream>
|
||||
#include <stdexcept>
|
||||
#include "buffer_config.hpp"
|
||||
#include "zmq.h"
|
||||
#include <string>
|
||||
#include <jungfrau.hpp>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
#include "WriterH5Writer.hpp"
|
||||
#include <FastQueue.hpp>
|
||||
#include <cstring>
|
||||
#include <BufferedFastQueue.hpp>
|
||||
#include "date.h"
|
||||
#include "bitshuffle/bitshuffle.h"
|
||||
#include "WriterZmqReceiver.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace core_buffer;
|
||||
|
||||
void receive_replay(
|
||||
void* ctx,
|
||||
const string ipc_prefix,
|
||||
const size_t n_modules,
|
||||
FastQueue<ImageMetadataBuffer>& queue,
|
||||
const uint64_t start_pulse_id,
|
||||
const uint64_t stop_pulse_id)
|
||||
{
|
||||
try {
|
||||
WriterZmqReceiver receiver(ctx, ipc_prefix, n_modules);
|
||||
BufferedFastQueue buffered_queue(
|
||||
queue, WRITER_DATA_CACHE_N_IMAGES, n_modules);
|
||||
|
||||
uint64_t current_pulse_id=start_pulse_id;
|
||||
|
||||
// "<= stop_pulse_id" because we include the last pulse_id.
|
||||
while(current_pulse_id<=stop_pulse_id) {
|
||||
|
||||
auto image_metadata = buffered_queue.get_metadata_buffer();
|
||||
auto image_buffer = buffered_queue.get_data_buffer();
|
||||
|
||||
receiver.get_next_image(
|
||||
current_pulse_id, image_metadata, image_buffer);
|
||||
|
||||
if (image_metadata->pulse_id != current_pulse_id) {
|
||||
throw runtime_error("Wrong pulse id from zmq receiver.");
|
||||
}
|
||||
|
||||
buffered_queue.commit();
|
||||
current_pulse_id++;
|
||||
}
|
||||
|
||||
buffered_queue.finalize();
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
using namespace date;
|
||||
using namespace chrono;
|
||||
|
||||
cout << "[" << system_clock::now() << "]";
|
||||
cout << "[sf_writer::receive_replay]";
|
||||
cout << " Stopped because of exception: " << endl;
|
||||
cout << e.what() << endl;
|
||||
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
int main (int argc, char *argv[])
|
||||
{
|
||||
if (argc != 4) {
|
||||
cout << endl;
|
||||
cout << "Usage: sf_writer ";
|
||||
cout << " [output_file] [start_pulse_id] [stop_pulse_id]";
|
||||
cout << endl;
|
||||
cout << "\toutput_file: Complete path to the output file." << endl;
|
||||
cout << "\tstart_pulse_id: Start pulse_id of retrieval." << endl;
|
||||
cout << "\tstop_pulse_id: Stop pulse_id of retrieval." << endl;
|
||||
cout << endl;
|
||||
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
string output_file = string(argv[1]);
|
||||
uint64_t start_pulse_id = (uint64_t) atoll(argv[2]);
|
||||
uint64_t stop_pulse_id = (uint64_t) atoll(argv[3]);
|
||||
|
||||
size_t n_modules = 32;
|
||||
|
||||
FastQueue<ImageMetadataBuffer> queue(
|
||||
MODULE_N_BYTES * n_modules * WRITER_DATA_CACHE_N_IMAGES,
|
||||
WRITER_FASTQUEUE_N_SLOTS);
|
||||
|
||||
auto ctx = zmq_ctx_new();
|
||||
zmq_ctx_set (ctx, ZMQ_IO_THREADS, WRITER_ZMQ_IO_THREADS);
|
||||
|
||||
thread replay_receive_thread(receive_replay,
|
||||
ctx, REPLAY_STREAM_IPC_URL, n_modules,
|
||||
ref(queue), start_pulse_id, stop_pulse_id);
|
||||
|
||||
size_t n_frames = stop_pulse_id - start_pulse_id + 1;
|
||||
WriterH5Writer writer(output_file, n_frames, n_modules);
|
||||
|
||||
// TODO: Remove stats trash.
|
||||
int stats_counter = 0;
|
||||
size_t read_total_us = 0;
|
||||
size_t write_total_us = 0;
|
||||
size_t read_max_us = 0;
|
||||
size_t write_max_us = 0;
|
||||
|
||||
auto start_time = chrono::steady_clock::now();
|
||||
|
||||
auto current_pulse_id = start_pulse_id;
|
||||
// "<= stop_pulse_id" because we include the last pulse_id.
|
||||
while (current_pulse_id <= stop_pulse_id) {
|
||||
|
||||
int slot_id; ;
|
||||
while((slot_id = queue.read()) == -1) {
|
||||
this_thread::sleep_for(chrono::milliseconds(
|
||||
RB_READ_RETRY_INTERVAL_MS));
|
||||
}
|
||||
|
||||
auto metadata = queue.get_metadata_buffer(slot_id);
|
||||
auto data = queue.get_data_buffer(slot_id);
|
||||
|
||||
auto read_end_time = chrono::steady_clock::now();
|
||||
auto read_us_duration = chrono::duration_cast<chrono::microseconds>(
|
||||
read_end_time-start_time).count();
|
||||
|
||||
// Verify that all pulse_ids are correct.
|
||||
for (int i=0; i<metadata->n_pulses_in_buffer; i++) {
|
||||
if (metadata->pulse_id[i] != current_pulse_id) {
|
||||
throw runtime_error("Wrong pulse id from receiver thread.");
|
||||
}
|
||||
|
||||
current_pulse_id++;
|
||||
}
|
||||
|
||||
start_time = chrono::steady_clock::now();
|
||||
|
||||
writer.write(metadata, data);
|
||||
|
||||
auto write_end_time = chrono::steady_clock::now();
|
||||
auto write_us_duration = chrono::duration_cast<chrono::microseconds>(
|
||||
write_end_time-start_time).count();
|
||||
|
||||
queue.release();
|
||||
|
||||
// TODO: Some poor statistics.
|
||||
stats_counter++;
|
||||
|
||||
read_total_us += read_us_duration;
|
||||
read_max_us = max(read_max_us, (uint64_t)read_us_duration);
|
||||
|
||||
write_total_us += write_us_duration;
|
||||
write_max_us = max(write_max_us, (uint64_t)write_us_duration);
|
||||
|
||||
// if (stats_counter == STATS_MODULO) {
|
||||
cout << "sf_writer:read_us " << read_total_us / STATS_MODULO;
|
||||
cout << " sf_writer:read_max_us " << read_max_us;
|
||||
cout << " sf_writer:write_us " << write_total_us / STATS_MODULO;
|
||||
cout << " sf_writer:write_max_us " << write_max_us;
|
||||
|
||||
cout << endl;
|
||||
|
||||
stats_counter = 0;
|
||||
read_total_us = 0;
|
||||
read_max_us = 0;
|
||||
write_total_us = 0;
|
||||
write_max_us = 0;
|
||||
// }
|
||||
|
||||
start_time = chrono::steady_clock::now();
|
||||
}
|
||||
|
||||
writer.close_file();
|
||||
|
||||
//wait till receive thread is finished
|
||||
replay_receive_thread.join();
|
||||
return 0;
|
||||
}
|
||||
Reference in New Issue
Block a user