Files
Jungfraujoch/compression/JFJochDecompress.h
T
leonarski_f 4a852b4d6b
Build Packages / build:rpm (rocky9_nocuda) (push) Successful in 12m57s
Build Packages / build:rpm (rocky8_nocuda) (push) Successful in 13m4s
Build Packages / build:rpm (ubuntu2404_nocuda) (push) Successful in 11m18s
Build Packages / build:rpm (ubuntu2204_nocuda) (push) Successful in 13m12s
Build Packages / build:rpm (rocky8_sls9) (push) Successful in 13m51s
Build Packages / build:rpm (rocky9_sls9) (push) Successful in 13m59s
Build Packages / build:rpm (ubuntu2204) (push) Successful in 10m45s
Build Packages / build:rpm (rocky8) (push) Successful in 12m29s
Build Packages / build:rpm (ubuntu2404) (push) Successful in 12m2s
Build Packages / Generate python client (push) Successful in 24s
Build Packages / XDS test (durin plugin) (push) Successful in 9m50s
Build Packages / Create release (push) Has been skipped
Build Packages / build:rpm (rocky9) (push) Successful in 14m15s
Build Packages / Build documentation (push) Successful in 1m6s
Build Packages / DIALS test (push) Successful in 13m10s
Build Packages / XDS test (JFJoch plugin) (push) Successful in 6m45s
Build Packages / XDS test (neggia plugin) (push) Successful in 5m58s
Build Packages / Unit tests (push) Successful in 1h20m42s
v1.0.0-rc.134 (#43)
This is an UNSTABLE release. The release has significant modifications and bug fixes, if things go wrong, it is better to revert to 1.0.0-rc.132.

* jfjoch_broker: Add better locking for detector object - should help, when detector initialization takes too long
* jfjoch_writer: Enable writing single, integrated HDF5 file with both data and metadata
* XDS plugin: Add generation of Jungfraujoch plugin for XDS
* CI: Add tests with XDS and DIALS (`xia2.ssx`)

Reviewed-on: #43
2026-04-09 13:30:47 +02:00

173 lines
7.6 KiB
C++

// SPDX-FileCopyrightText: 2024 Filip Leonarski, Paul Scherrer Institute <filip.leonarski@psi.ch>
// SPDX-License-Identifier: GPL-3.0-only
#ifndef JUNGFRAUJOCH_JFJOCHDECOMPRESS_H
#define JUNGFRAUJOCH_JFJOCHDECOMPRESS_H
#include <vector>
#include <cstring>
#include <bitshuffle/bitshuffle.h>
#include <bitshuffle/bitshuffle_internals.h>
#include <bitshuffle_hperf/bitshuffle.h>
#include <lz4/lz4.h>
#include <zstd.h>
#include "../compression/CompressionAlgorithmEnum.h"
#include "../common/JFJochException.h"
#include "../common/CompressedImage.h"
extern "C" {
uint64_t bshuf_read_uint64_BE(const void* buf);
};
inline size_t JFJochDecompressHperfPtr(uint8_t *output,
CompressionAlgorithm algorithm,
const uint8_t *source,
size_t source_size,
size_t nelements,
size_t elem_size,
size_t block_size) {
if ((algorithm != CompressionAlgorithm::BSHUF_LZ4) &&
(algorithm != CompressionAlgorithm::BSHUF_ZSTD) &&
(algorithm != CompressionAlgorithm::BSHUF_ZSTD_RLE))
throw JFJochException(JFJochExceptionCategory::Compression, "Algorithm not supported by hperf decompressor");
if ((block_size % BSHUF_BLOCKED_MULT) != 0)
throw JFJochException(JFJochExceptionCategory::Compression, "Invalid block size");
std::vector<char> decompressed_block(block_size * elem_size);
std::vector<char> scratch(block_size * elem_size);
const uint8_t *src_ptr = source;
uint8_t *dst_ptr = output;
const size_t num_full_blocks = nelements / block_size;
const size_t reminder_size = nelements - num_full_blocks * block_size;
const size_t last_block_size = reminder_size - reminder_size % BSHUF_BLOCKED_MULT;
auto decode_block = [&](size_t current_nelements) {
const auto compressed_size = static_cast<size_t>(bshuf_read_uint32_BE(src_ptr));
src_ptr += 4;
const size_t expected_size = current_nelements * elem_size;
size_t decompressed_size = 0;
switch (algorithm) {
case CompressionAlgorithm::BSHUF_LZ4: {
const int ret = LZ4_decompress_safe(reinterpret_cast<const char *>(src_ptr),
decompressed_block.data(),
static_cast<int>(compressed_size),
static_cast<int>(expected_size));
if (ret < 0 || static_cast<size_t>(ret) != expected_size)
throw JFJochException(JFJochExceptionCategory::Compression, "LZ4 decompression error");
decompressed_size = static_cast<size_t>(ret);
break;
}
case CompressionAlgorithm::BSHUF_ZSTD:
case CompressionAlgorithm::BSHUF_ZSTD_RLE: {
const size_t ret = ZSTD_decompress(decompressed_block.data(),
expected_size,
src_ptr,
compressed_size);
if (ZSTD_isError(ret) || ret != expected_size)
throw JFJochException(JFJochExceptionCategory::Compression, "ZSTD decompression error");
decompressed_size = ret;
break;
}
default:
throw JFJochException(JFJochExceptionCategory::Compression, "Algorithm not supported");
}
if (bitshuf_decode_block(reinterpret_cast<char *>(dst_ptr),
decompressed_block.data(),
scratch.data(),
current_nelements,
elem_size) < 0)
throw JFJochException(JFJochExceptionCategory::Compression, "bitshuffle_hperf decode error");
src_ptr += compressed_size;
dst_ptr += decompressed_size;
};
for (size_t i = 0; i < num_full_blocks; ++i)
decode_block(block_size);
if (last_block_size > 0)
decode_block(last_block_size);
const size_t leftover_bytes = (reminder_size % BSHUF_BLOCKED_MULT) * elem_size;
if (leftover_bytes > 0) {
memcpy(dst_ptr, src_ptr, leftover_bytes);
src_ptr += leftover_bytes;
}
return static_cast<size_t>(src_ptr - source);
}
inline void JFJochDecompressPtr(uint8_t *output,
CompressionAlgorithm algorithm,
const uint8_t *source,
size_t source_size,
size_t nelements,
size_t elem_size,
bool use_hperf = true) {
size_t block_size;
if (algorithm != CompressionAlgorithm::NO_COMPRESSION) {
if (bshuf_read_uint64_BE(const_cast<uint8_t *>(source)) != nelements * elem_size)
throw JFJochException(JFJochExceptionCategory::Compression, "Mismatch in size");
auto tmp = bshuf_read_uint32_BE(source + 8);
block_size = tmp / elem_size;
}
switch (algorithm) {
case CompressionAlgorithm::NO_COMPRESSION:
if (source_size != nelements * elem_size)
throw JFJochException(JFJochExceptionCategory::Compression, "Mismatch in size");
memcpy(output, source, source_size);
break;
case CompressionAlgorithm::BSHUF_LZ4:
if (use_hperf) {
if (JFJochDecompressHperfPtr(output, algorithm, source + 12, source_size - 12,
nelements, elem_size, block_size) != source_size - 12)
throw JFJochException(JFJochExceptionCategory::Compression, "Decompression error");
} else {
if (bshuf_decompress_lz4(source + 12, output, nelements,
elem_size, block_size) != source_size - 12)
throw JFJochException(JFJochExceptionCategory::Compression, "Decompression error");
}
break;
case CompressionAlgorithm::BSHUF_ZSTD_RLE:
case CompressionAlgorithm::BSHUF_ZSTD:
if (use_hperf) {
if (JFJochDecompressHperfPtr(output, algorithm, source + 12, source_size - 12,
nelements, elem_size, block_size) != source_size - 12)
throw JFJochException(JFJochExceptionCategory::Compression, "Decompression error");
} else {
if (bshuf_decompress_zstd(source + 12, output, nelements,
elem_size, block_size) != source_size - 12)
throw JFJochException(JFJochExceptionCategory::Compression, "Decompression error");
}
break;
default:
throw JFJochException(JFJochExceptionCategory::Compression, "Not implemented algorithm");
}
}
template <class Td, class Ts>
void JFJochDecompress(std::vector<Td> &output, CompressionAlgorithm algorithm, const Ts *source_v, size_t source_size,
size_t nelements, bool use_hperf = true) {
output.resize(nelements);
JFJochDecompressPtr((uint8_t *) output.data(), algorithm, (uint8_t *) source_v, source_size,
nelements, sizeof(Td), use_hperf);
}
template <class Td, class Ts>
void JFJochDecompress(std::vector<Td> &output, CompressionAlgorithm algorithm, const std::vector<Ts> source_v,
size_t nelements, bool use_hperf = true) {
JFJochDecompress(output, algorithm, source_v.data(), source_v.size() * sizeof(Ts), nelements, use_hperf);
}
#endif //JUNGFRAUJOCH_JFJOCHDECOMPRESS_H