Merge remote-tracking branch 'origin/master' into compression

This commit is contained in:
2020-05-01 12:41:55 +02:00
11 changed files with 346 additions and 12 deletions
+1
View File
@@ -1,6 +1,7 @@
# Build directories
bin/
obj/
build/
cmake-build-*/
+1
View File
@@ -12,6 +12,7 @@ struct DetectorFrame
uint64_t frame_index[core_buffer::WRITER_N_FRAMES_BUFFER];
uint32_t daq_rec[core_buffer::WRITER_N_FRAMES_BUFFER];
uint16_t n_received_packets[core_buffer::WRITER_N_FRAMES_BUFFER];
bool is_good_frame[core_buffer::WRITER_N_FRAMES_BUFFER];
};
class SFWriter {
+2 -2
View File
@@ -19,5 +19,5 @@ port=$((${initialUDPport}+10#${M}))
DETECTOR=JF07T32V01
echo ${port}
#taskset -c ${coreAssociatedBuffer[10#${M}]} /home/dbe/test/sf_buffer M${M} ${port} /gpfs/photonics/swissfel/buffer/${DETECTOR} > /gpfs/photonics/swissfel/buffer/${port}.log
taskset -c ${coreAssociatedBuffer[10#${M}]} /home/dbe/test/sf_buffer M${M} ${port} /gpfs/photonics/swissfel/buffer/${DETECTOR}
#taskset -c ${coreAssociatedBuffer[10#${M}]} /usr/bin/sf_buffer M${M} ${port} /gpfs/photonics/swissfel/buffer/${DETECTOR} > /gpfs/photonics/swissfel/buffer/${port}.log
taskset -c ${coreAssociatedBuffer[10#${M}]} /usr/bin/sf_buffer M${M} ${port} /gpfs/photonics/swissfel/buffer/${DETECTOR}
+1 -1
View File
@@ -8,7 +8,7 @@ BindsTo=JF07-buffer.service
PermissionsStartOnly=true
Type=idle
User=root
ExecStart=/usr/bin/sh /home/dbe/test/systemd/JF07-buffer-worker.sh %i
ExecStart=/usr/bin/sh /home/writer/git/sf_daq_buffer/scripts/JF07-buffer-worker.sh %i
TimeoutStartSec=10
RestartSec=10
+1 -1
View File
@@ -3,7 +3,7 @@ Description=All UDP-buffer instances of JF07
[Service]
Type=oneshot
ExecStart=/usr/bin/sh /home/dbe/test/systemd/JF07-buffer-worker.sh
ExecStart=/usr/bin/sh /home/writer/git/sf_daq_buffer/scripts/JF07-buffer-worker.sh
RemainAfterExit=yes
[Install]
+4 -3
View File
@@ -2,7 +2,7 @@
if [ $# != 1 ]
then
echo "need parameter, exit"
systemctl start JF07-replay-worker@{00..32}
exit
fi
@@ -20,7 +20,8 @@ echo "First/last pulse_id : ${first_pulse_id} ${last_pulse_id}"
if [ ${M} == 32 ]
then
taskset -c ${coreAssociated[10#${M}]} /home/dbe/test/sf_writer /gpfs/photonics/swissfel/buffer/test.${first_pulse_id}-${last_pulse_id}.h5 ${first_pulse_id} ${last_pulse_id}
# taskset -c ${coreAssociated[10#${M}]} /usr/bin/sf_writer /gpfs/photonics/swissfel/buffer/test.${first_pulse_id}-${last_pulse_id}.h5 ${first_pulse_id} ${last_pulse_id}
taskset -c ${coreAssociated[10#${M}]} /usr/bin/sf_stream tcp://129.129.241.42:9007 30 tcp://129.129.241.42:9107 30
else
taskset -c ${coreAssociated[10#${M}]} /home/dbe/test/sf_replay /gpfs/photonics/swissfel/buffer/JF07T32V01 M${M} ${M} ${first_pulse_id} ${last_pulse_id}
taskset -c ${coreAssociated[10#${M}]} /usr/bin/sf_replay /gpfs/photonics/swissfel/buffer/JF07T32V01 M${M} ${M} ${first_pulse_id} ${last_pulse_id}
fi
+1 -1
View File
@@ -8,7 +8,7 @@ BindsTo=JF07-replay.service
PermissionsStartOnly=true
Type=idle
User=root
ExecStart=/usr/bin/sh /home/dbe/test/systemd/JF07-replay-worker.sh %i
ExecStart=/usr/bin/sh /home/writer/git/sf_daq_buffer/scripts/JF07-replay-worker.sh %i
TimeoutStartSec=10
RestartSec=10
+1 -1
View File
@@ -3,7 +3,7 @@ Description=All replay instances of JF07
[Service]
Type=oneshot
ExecStart=/usr/bin/sh /home/dbe/test/systemd/JF07-replay.sh
ExecStart=/usr/bin/sh /home/writer/git/sf_daq_buffer/scripts/JF07-replay-worker.sh
RemainAfterExit=yes
[Install]
-2
View File
@@ -1,2 +0,0 @@
#!/bin/bash
systemctl start JF07-replay-worker@{00..32}
+10 -1
View File
@@ -53,5 +53,14 @@ target_link_libraries(sf-writer
boost_system
pthread)
add_executable(sf-stream src/sf_stream.cpp)
set_target_properties(sf-stream PROPERTIES OUTPUT_NAME sf_stream)
target_link_libraries(sf-stream
core-buffer
zmq
jsoncpp
boost_system
pthread)
enable_testing()
add_subdirectory(test/)
add_subdirectory(test/)
+324
View File
@@ -0,0 +1,324 @@
#include <iostream>
#include <stdexcept>
#include "buffer_config.hpp"
#include "zmq.h"
#include <string>
#include <jungfrau.hpp>
#include <thread>
#include <chrono>
#include "SFWriter.hpp"
#include <FastQueue.hpp>
#include <cstring>
#include "date.h"
#include <jsoncpp/json/json.h>
using namespace std;
using namespace core_buffer;
void receive_replay(
const string ipc_prefix,
const size_t n_modules,
FastQueue<DetectorFrame>& queue,
void* ctx)
{
try {
void *sockets[n_modules];
for (size_t i = 0; i < n_modules; i++) {
sockets[i] = zmq_socket(ctx, ZMQ_PULL);
int rcvhwm = REPLAY_READ_BLOCK_SIZE;
if (zmq_setsockopt(sockets[i], ZMQ_RCVHWM, &rcvhwm,
sizeof(rcvhwm)) != 0) {
throw runtime_error(strerror(errno));
}
int linger = 0;
if (zmq_setsockopt(sockets[i], ZMQ_LINGER, &linger,
sizeof(linger)) != 0) {
throw runtime_error(strerror(errno));
}
stringstream ipc_addr;
ipc_addr << ipc_prefix << i;
const auto ipc = ipc_addr.str();
if (zmq_bind(sockets[i], ipc.c_str()) != 0) {
throw runtime_error(strerror(errno));
}
}
auto module_meta_buffer = make_unique<ModuleFrame>();
while (true) {
auto slot_id = queue.reserve();
if (slot_id == -1){
this_thread::sleep_for(chrono::milliseconds(5));
continue;
}
auto frame_meta_buffer = queue.get_metadata_buffer(slot_id);
auto frame_buffer = queue.get_data_buffer(slot_id);
for (
size_t i_buffer=0;
i_buffer<WRITER_N_FRAMES_BUFFER;
i_buffer++)
{
frame_meta_buffer->is_good_frame[i_buffer] = true;
for (size_t i_module = 0; i_module < n_modules; i_module++) {
auto n_bytes_metadata = zmq_recv(
sockets[i_module],
module_meta_buffer.get(),
sizeof(ModuleFrame),
0);
if (n_bytes_metadata != sizeof(ModuleFrame)) {
// TODO: Make nicer expcetion.
frame_meta_buffer->is_good_frame[i_buffer] = false;
throw runtime_error(strerror(errno));
}
// Initialize buffers in first iteration for each pulse_id.
if (i_module == 0) {
frame_meta_buffer->pulse_id[i_buffer] =
module_meta_buffer->pulse_id;
frame_meta_buffer->frame_index[i_buffer] =
module_meta_buffer->frame_index;
frame_meta_buffer->daq_rec[i_buffer] =
module_meta_buffer->daq_rec;
frame_meta_buffer->n_received_packets[i_buffer] =
module_meta_buffer->n_received_packets;
}
if (frame_meta_buffer->pulse_id[i_buffer] !=
module_meta_buffer->pulse_id) {
frame_meta_buffer->is_good_frame[i_buffer] = false;
throw runtime_error("Unexpected pulse_id received.");
}
if (frame_meta_buffer->frame_index[i_buffer] !=
module_meta_buffer->frame_index) {
frame_meta_buffer->is_good_frame[i_buffer] = false;
}
if (frame_meta_buffer->daq_rec[i_buffer] !=
module_meta_buffer->daq_rec) {
frame_meta_buffer->is_good_frame[i_buffer] = false;
}
if ( module_meta_buffer->n_received_packets != 128 ) {
frame_meta_buffer->is_good_frame[i_buffer] = false;
}
// Offset due to frame in buffer.
size_t offset = MODULE_N_BYTES * n_modules * i_buffer;
// offset due to module in frame.
offset += MODULE_N_BYTES * i_module;
auto n_bytes_image = zmq_recv(
sockets[i_module],
(frame_buffer + offset),
MODULE_N_BYTES,
0);
if (n_bytes_image != MODULE_N_BYTES) {
// TODO: Make nicer expcetion.
throw runtime_error("Unexpected number of bytes.");
}
}
}
queue.commit();
}
for (size_t i = 0; i < n_modules; i++) {
zmq_close(sockets[i]);
}
zmq_ctx_destroy(ctx);
} catch (const std::exception& e) {
using namespace date;
using namespace chrono;
cout << "[" << system_clock::now() << "]";
cout << "[sf_stream::receive_replay]";
cout << " Stopped because of exception: " << endl;
cout << e.what() << endl;
throw;
}
}
int main (int argc, char *argv[])
{
if (argc != 5) {
cout << endl;
cout << "Usage: sf_stream ";
cout << " [streamvis_address] [reduction_factor_streamvis]";
cout << " [live_analysis_address] [reduction_factor_live_analysis]";
cout << endl;
cout << "\tstreamvis_address: address to streamvis, example tcp://129.129.241.42:9007" << endl;
cout << "\treduction_factor_streamvis: 1 out of N (example 10) images to send to streamvis. For remaining send metadata." << endl;
cout << "\tlive_analysis_address: address to live_analysis, example tcp://129.129.241.42:9107" << endl;
cout << "\treduction_factor_live_analysis: 1 out of N (example 10) images to send to live analysis. For remaining send metadata. N<=1 - send every image" << endl;
cout << endl;
exit(-1);
}
string streamvis_address = string(argv[1]);
int reduction_factor_streamvis = (int) atoll(argv[2]);
string live_analysis_address = string(argv[3]);
int reduction_factor_live_analysis = (uint64_t) atoll(argv[4]);
size_t n_modules = 32;
FastQueue<DetectorFrame> queue(
n_modules * MODULE_N_BYTES * WRITER_N_FRAMES_BUFFER,
WRITER_RB_BUFFER_SLOTS);
string ipc_prefix = "ipc://sf-replay-";
auto ctx = zmq_ctx_new();
zmq_ctx_set (ctx, ZMQ_IO_THREADS, WRITER_ZMQ_IO_THREADS);
thread replay_receive_thread(
receive_replay,
ipc_prefix,
n_modules,
ref(queue),
ctx);
// 0mq sockets to streamvis and live analysis
void *socket_streamvis = zmq_socket(ctx, ZMQ_PUB);
if (zmq_bind(socket_streamvis, streamvis_address.c_str()) != 0) {
throw runtime_error(strerror(errno));
}
void *socket_live = zmq_socket(ctx, ZMQ_PUB);
if (zmq_bind(socket_live, live_analysis_address.c_str()) != 0) {
throw runtime_error(strerror(errno));
}
uint16_t data_empty [] = { 0, 0, 0, 0};
// TODO: Remove stats trash.
int stats_counter = 0;
size_t read_total_us = 0;
size_t write_total_us = 0;
size_t read_max_us = 0;
size_t write_max_us = 0;
auto start_time = chrono::steady_clock::now();
Json::Value header;
while (true) {
auto slot_id = queue.read();
if(slot_id == -1) {
this_thread::sleep_for(chrono::milliseconds(
core_buffer::RB_READ_RETRY_INTERVAL_MS));
continue;
}
auto metadata = queue.get_metadata_buffer(slot_id);
auto data = queue.get_data_buffer(slot_id);
auto read_end_time = chrono::steady_clock::now();
auto read_us_duration = chrono::duration_cast<chrono::microseconds>(
read_end_time-start_time).count();
start_time = chrono::steady_clock::now();
//Here we need to send to streamvis and live analysis metadata(probably need to operate still on them) and data(not every frame)
for ( size_t i_buffer=0; i_buffer<WRITER_N_FRAMES_BUFFER; i_buffer++) {
//for (size_t i_module = 0; i_module < n_modules; i_module++) {
// cout << metadata->pulse_id[i_buffer*n_modules+i_module] << " ";
//}
//cout << endl;
//cout << metadata->is_good_frame[i_buffer] << " " << metadata->pulse_id[i_buffer] << " " << metadata->frame_index[i_buffer] << " " << metadata->daq_rec[i_buffer] << " " << metadata->n_received_packets[i_buffer] << " " << endl;
header["frame"] = (Json::Value::UInt64)metadata->frame_index[i_buffer];
header["is_good_frame"] = metadata->is_good_frame[i_buffer];
header["daq_rec"] = metadata->daq_rec[i_buffer];
header["pulse_id"] = (Json::Value::UInt64)metadata->pulse_id[i_buffer];
string text_header = Json::FastWriter().write(header);
zmq_send(socket_streamvis,
text_header,
text_header.size(),
ZMQ_SNDMORE);
zmq_send(socket_streamvis,
(char*)data_empty,
8,
0);
}
//int send_streamvis = 0;
//if ( reduction_factor_streamvis > 1 ) {
// send_streamvis = rand() % reduction_factor_streamvis;
//}
//send_streamvis = 0;
//zmq_send(socket_streamvis,
// &metadata,
// sizeof(DetectorFrame),
// ZMQ_SNDMORE);
//if ( send_streamvis == 0 ) {
// zmq_send(socket_streamvis,
// (char*)data,
// MODULE_N_BYTES*n_modules,
// 0);
//} else {
// zmq_send(socket_streamvis,
// (char*)data_empty,
// 8,
// 0);
//}
queue.release();
// TODO: Some poor statistics.
stats_counter += WRITER_N_FRAMES_BUFFER;
auto write_end_time = chrono::steady_clock::now();
auto write_us_duration = chrono::duration_cast<chrono::microseconds>(
write_end_time-start_time).count();
read_total_us += read_us_duration;
write_total_us += write_us_duration;
if (read_us_duration > read_max_us) {
read_max_us = read_us_duration;
}
if (write_us_duration > write_max_us) {
write_max_us = write_us_duration;
}
if (stats_counter == STATS_MODULO) {
cout << "sf_stream:read_us " << read_total_us / STATS_MODULO;
cout << " sf_stream:read_max_us " << read_max_us;
cout << " sf_stream:write_us " << write_total_us / STATS_MODULO;
cout << " sf_stream:write_max_us " << write_max_us;
cout << endl;
stats_counter = 0;
read_total_us = 0;
read_max_us = 0;
write_total_us = 0;
write_max_us = 0;
}
start_time = chrono::steady_clock::now();
}
return 0;
}