f7a9e4eab1
Build Packages / build:rpm (rocky8_nocuda) (push) Successful in 12m17s
Build Packages / build:rpm (rocky9_nocuda) (push) Successful in 15m22s
Build Packages / build:rpm (ubuntu2404_nocuda) (push) Successful in 16m11s
Build Packages / build:rpm (ubuntu2204_nocuda) (push) Successful in 16m41s
Build Packages / build:rpm (rocky8_sls9) (push) Successful in 17m3s
Build Packages / build:rpm (rocky8) (push) Successful in 18m10s
Build Packages / build:rpm (rocky9_sls9) (push) Successful in 19m15s
Build Packages / build:rpm (rocky9) (push) Successful in 19m17s
Build Packages / Generate python client (push) Successful in 1m46s
Build Packages / build:rpm (ubuntu2204) (push) Successful in 9m9s
Build Packages / Create release (push) Has been skipped
Build Packages / Build documentation (push) Successful in 2m0s
Build Packages / DIALS test (push) Failing after 9m1s
Build Packages / build:rpm (ubuntu2404) (push) Successful in 10m43s
Build Packages / XDS test (JFJoch plugin) (push) Failing after 9m9s
Build Packages / XDS test (durin plugin) (push) Failing after 9m43s
Build Packages / XDS test (neggia plugin) (push) Failing after 8m41s
Build Packages / Unit tests (push) Failing after 56m55s
248 lines
8.3 KiB
C++
248 lines
8.3 KiB
C++
// SPDX-FileCopyrightText: 2024 Filip Leonarski, Paul Scherrer Institute <filip.leonarski@psi.ch>
|
|
// SPDX-License-Identifier: GPL-3.0-only
|
|
|
|
#include <sys/stat.h>
|
|
#include <filesystem>
|
|
#include <iostream>
|
|
|
|
#include "HDF5DataFile.h"
|
|
#include "../compression/JFJochCompressor.h"
|
|
|
|
#include "HDF5DataFilePluginAzInt.h"
|
|
#include "HDF5DataFilePluginMX.h"
|
|
#include "HDF5DataFilePluginXFEL.h"
|
|
#include "HDF5DataFilePluginDetector.h"
|
|
#include "HDF5DataFilePluginROI.h"
|
|
#include "HDF5DataFilePluginPerformance.h"
|
|
#include "HDF5DataFilePluginImageStats.h"
|
|
#include "HDF5DataFilePluginReflection.h"
|
|
#include "../include/spdlog/fmt/fmt.h"
|
|
#include "HDF5NXmx.h"
|
|
#include "../common/time_utc.h"
|
|
|
|
HDF5DataFile::HDF5DataFile(const StartMessage &msg, uint64_t in_file_number) {
|
|
file_number = in_file_number;
|
|
|
|
if (msg.overwrite.has_value())
|
|
overwrite = msg.overwrite.value();
|
|
|
|
xpixel = 0;
|
|
ypixel = 0;
|
|
max_image_number = 0;
|
|
nimages = 0;
|
|
filename = HDF5Metadata::DataFileName(msg, file_number);
|
|
image_low = file_number * msg.images_per_file;
|
|
images_per_file = msg.images_per_file;
|
|
|
|
timestamp.reserve(images_per_file);
|
|
exptime.reserve(images_per_file);
|
|
number.reserve(images_per_file);
|
|
|
|
uint64_t tmp_suffix;
|
|
try {
|
|
if (!msg.arm_date.empty())
|
|
tmp_suffix = parse_UTC_to_ms(msg.arm_date);
|
|
} catch (...) {
|
|
tmp_suffix = std::chrono::system_clock::now().time_since_epoch().count();
|
|
}
|
|
tmp_filename = fmt::format("{}.{:08x}.tmp", filename, tmp_suffix);
|
|
plugins.emplace_back(std::make_unique<HDF5DataFilePluginROI>());
|
|
plugins.emplace_back(std::make_unique<HDF5DataFilePluginDetector>(msg));
|
|
plugins.emplace_back(std::make_unique<HDF5DataFilePluginAzInt>(msg));
|
|
plugins.emplace_back(std::make_unique<HDF5DataFilePluginXFEL>());
|
|
plugins.emplace_back(std::make_unique<HDF5DataFilePluginMX>(msg));
|
|
plugins.emplace_back(std::make_unique<HDF5DataFilePluginImageStats>());
|
|
plugins.emplace_back(std::make_unique<HDF5DataFilePluginReflection>());
|
|
plugins.emplace_back(std::make_unique<HDF5DataFilePluginPerformance>());
|
|
}
|
|
|
|
std::optional<HDF5DataFileStatistics> HDF5DataFile::Close() {
|
|
if (!data_file)
|
|
return {};
|
|
|
|
// If a prior write already failed, do not call ANY further HDF5 routines on
|
|
// this file (per HDF Forum guidance: behavior after an I/O error is undefined,
|
|
// and a subsequent H5Fclose can segfault). Just drop the handles and unlink
|
|
// the tmp file. Do NOT rename to the final name.
|
|
if (broken) {
|
|
if (data_set) data_set.reset();
|
|
if (data_set_image_number) data_set_image_number.reset();
|
|
data_file.reset();
|
|
if (manage_file) {
|
|
std::error_code ec;
|
|
std::filesystem::remove(tmp_filename, ec);
|
|
}
|
|
closed = true;
|
|
return {};
|
|
}
|
|
|
|
try {
|
|
HDF5Group group_exp(*data_file, "/entry/detector");
|
|
group_exp.NXClass("NXcollection");
|
|
|
|
group_exp.SaveVector("timestamp", timestamp);
|
|
group_exp.SaveVector("exptime", exptime);
|
|
group_exp.SaveVector("number", number);
|
|
|
|
for (auto &p: plugins)
|
|
p->WriteFinal(*data_file);
|
|
|
|
if (data_set) {
|
|
data_set->SetExtent({max_image_number + 1, ypixel, xpixel});
|
|
data_set
|
|
->Attr("image_nr_low", (int32_t) (image_low + 1))
|
|
.Attr("image_nr_high", (int32_t) (image_low + 1 + max_image_number));
|
|
data_set->Close();
|
|
data_set.reset();
|
|
}
|
|
} catch (...) {
|
|
// Anything during finalize failed (most likely ENOSPC). Mark broken,
|
|
// drop handles without further HDF5 calls, remove tmp, propagate.
|
|
broken = true;
|
|
if (data_set) data_set.reset();
|
|
data_file.reset();
|
|
if (manage_file) {
|
|
std::error_code ec;
|
|
std::filesystem::remove(tmp_filename, ec);
|
|
}
|
|
closed = true;
|
|
throw;
|
|
}
|
|
|
|
if (manage_file) {
|
|
try {
|
|
data_file->Close();
|
|
} catch (...) {
|
|
broken = true;
|
|
data_file.reset();
|
|
std::error_code ec;
|
|
std::filesystem::remove(tmp_filename, ec);
|
|
closed = true;
|
|
throw;
|
|
}
|
|
data_file.reset();
|
|
|
|
if (std::filesystem::exists(filename) && !overwrite)
|
|
throw JFJochException(JFJochExceptionCategory::FileWriteError, "File already exists");
|
|
std::error_code ec;
|
|
std::filesystem::rename(tmp_filename, filename, ec);
|
|
if (ec)
|
|
throw JFJochException(JFJochExceptionCategory::FileWriteError,
|
|
"Cannot rename temporary HDF5 file " + tmp_filename +
|
|
" to " + filename + ": " + ec.message());
|
|
} else {
|
|
data_file.reset();
|
|
}
|
|
|
|
closed = true;
|
|
|
|
HDF5DataFileStatistics ret;
|
|
ret.max_image_number = max_image_number;
|
|
ret.total_images = nimages;
|
|
ret.filename = filename;
|
|
ret.file_number = file_number + 1;
|
|
return ret;
|
|
}
|
|
|
|
HDF5DataFile::~HDF5DataFile() {
|
|
if (data_file) {
|
|
try {
|
|
data_set.reset();
|
|
data_set_image_number.reset();
|
|
data_file.reset();
|
|
if (manage_file) {
|
|
std::error_code ec;
|
|
std::filesystem::remove(tmp_filename, ec);
|
|
}
|
|
} catch (...) {
|
|
// Never throw from destructor; HDF5 may already be in a bad state
|
|
}
|
|
}
|
|
}
|
|
|
|
void HDF5DataFile::CreateFile(const DataMessage& msg, std::shared_ptr<HDF5File> in_data_file, bool integrated) {
|
|
HDF5Dcpl dcpl;
|
|
|
|
HDF5DataType data_type(msg.image.GetMode());
|
|
|
|
xpixel = msg.image.GetWidth();
|
|
ypixel = msg.image.GetHeight();
|
|
|
|
dcpl.SetCompression(msg.image.GetCompressionAlgorithm(), JFJochBitShuffleCompressor::DefaultBlockSize);
|
|
dcpl.SetChunking( {1, ypixel, xpixel});
|
|
|
|
H5Pset_fill_time(dcpl.GetID(), H5D_FILL_TIME_NEVER);
|
|
H5Pset_alloc_time(dcpl.GetID(), H5D_ALLOC_TIME_INCR);
|
|
|
|
switch (msg.image.GetMode()) {
|
|
case CompressedImageMode::Int8:
|
|
dcpl.SetFillValue8(INT8_MIN);
|
|
break;
|
|
case CompressedImageMode::Int16:
|
|
dcpl.SetFillValue16(INT16_MIN);
|
|
break;
|
|
case CompressedImageMode::Int32:
|
|
dcpl.SetFillValue32(INT32_MIN);
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
data_file = in_data_file;
|
|
|
|
HDF5Group(*data_file, "/entry").NXClass("NXentry");
|
|
HDF5Group(*data_file, "/entry/data").NXClass("NXdata");
|
|
|
|
HDF5DataSpace data_space({1, ypixel, xpixel}, {H5S_UNLIMITED, ypixel, xpixel});
|
|
data_set = std::make_unique<HDF5DataSet>(*data_file, "/entry/data/data", data_type, data_space, dcpl);
|
|
data_set->SetExtent({images_per_file, ypixel, xpixel});
|
|
for (auto &p: plugins)
|
|
p->OpenFile(*data_file, msg, images_per_file);
|
|
}
|
|
|
|
void HDF5DataFile::Write(const DataMessage &msg, uint64_t image_number) {
|
|
if (closed)
|
|
throw JFJochException(JFJochExceptionCategory::FileWriteError,
|
|
"Trying to write to already closed file");
|
|
if (broken)
|
|
throw JFJochException(JFJochExceptionCategory::FileWriteError,
|
|
"Trying to write to file that previously failed");
|
|
if (image_number >= images_per_file)
|
|
throw JFJochException(JFJochExceptionCategory::FileWriteError,
|
|
"Image number out of bounds");
|
|
|
|
if (!data_file) {
|
|
manage_file = true;
|
|
CreateFile(msg, std::make_shared<HDF5File>(tmp_filename));
|
|
}
|
|
|
|
try {
|
|
if (new_file || (static_cast<int64_t>(image_number) > max_image_number)) {
|
|
max_image_number = image_number;
|
|
timestamp.resize(max_image_number + 1);
|
|
exptime.resize(max_image_number + 1);
|
|
number.resize(max_image_number + 1);
|
|
new_file = false;
|
|
}
|
|
|
|
nimages++;
|
|
data_set->WriteDirectChunk(msg.image.GetCompressed(), msg.image.GetCompressedSize(),
|
|
{image_number, 0, 0});
|
|
|
|
for (auto &p: plugins)
|
|
p->Write(msg, image_number);
|
|
|
|
timestamp[image_number] = msg.timestamp;
|
|
exptime[image_number] = msg.exptime;
|
|
number[image_number] = (msg.original_number) ? msg.original_number.value() : msg.number;
|
|
} catch (...) {
|
|
// Sticky failure: do not call into HDF5 again for this file.
|
|
broken = true;
|
|
throw;
|
|
}
|
|
}
|
|
|
|
size_t HDF5DataFile::GetNumImages() const {
|
|
return nimages;
|
|
}
|