Compare commits

..

5 Commits

Author SHA1 Message Date
0d92ea47d0 jfjoch_process: reorder sp-search routine with timer-output
All checks were successful
Build Packages / build:rpm (rocky8_nocuda) (push) Successful in 11m34s
Build Packages / build:rpm (rocky9_nocuda) (push) Successful in 12m55s
Build Packages / build:rpm (ubuntu2204_nocuda) (push) Successful in 9m3s
Build Packages / build:rpm (ubuntu2404_nocuda) (push) Successful in 8m37s
Build Packages / Generate python client (push) Successful in 34s
Build Packages / Build documentation (push) Successful in 49s
Build Packages / build:rpm (rocky8_sls9) (push) Successful in 11m23s
Build Packages / Create release (push) Has been skipped
Build Packages / build:rpm (rocky8) (push) Successful in 12m33s
Build Packages / build:rpm (rocky9_sls9) (push) Successful in 13m42s
Build Packages / build:rpm (rocky9) (push) Successful in 13m48s
Build Packages / build:rpm (ubuntu2404) (push) Successful in 11m5s
Build Packages / build:rpm (ubuntu2204) (push) Successful in 12m49s
Build Packages / Unit tests (push) Successful in 1h14m7s
2026-03-28 16:12:33 +01:00
1a993f4ecb jfjoch_process: reflection selection for scaling
Some checks failed
Build Packages / build:rpm (ubuntu2404_nocuda) (push) Successful in 13m6s
Build Packages / build:rpm (rocky8_nocuda) (push) Successful in 13m50s
Build Packages / build:rpm (rocky9_nocuda) (push) Successful in 14m53s
Build Packages / build:rpm (ubuntu2204_nocuda) (push) Failing after 14m58s
Build Packages / Generate python client (push) Successful in 50s
Build Packages / Build documentation (push) Successful in 44s
Build Packages / Create release (push) Has been skipped
Build Packages / build:rpm (rocky9) (push) Successful in 17m17s
Build Packages / build:rpm (rocky8_sls9) (push) Successful in 18m19s
Build Packages / build:rpm (rocky8) (push) Successful in 18m20s
Build Packages / build:rpm (rocky9_sls9) (push) Successful in 19m23s
Build Packages / build:rpm (ubuntu2204) (push) Failing after 9m18s
Build Packages / build:rpm (ubuntu2404) (push) Successful in 9m10s
Build Packages / Unit tests (push) Successful in 59m11s
2026-03-27 16:32:07 +01:00
fa69ac89f3 jfjoch_process: logging more information for optimization
Some checks failed
Build Packages / build:rpm (ubuntu2404_nocuda) (push) Successful in 14m42s
Build Packages / build:rpm (rocky8_nocuda) (push) Successful in 16m0s
Build Packages / build:rpm (rocky9_nocuda) (push) Successful in 16m18s
Build Packages / build:rpm (ubuntu2204_nocuda) (push) Successful in 16m49s
Build Packages / Build documentation (push) Successful in 42s
Build Packages / Generate python client (push) Successful in 1m17s
Build Packages / Create release (push) Has been skipped
Build Packages / build:rpm (rocky8) (push) Successful in 18m31s
Build Packages / build:rpm (rocky8_sls9) (push) Successful in 18m48s
Build Packages / build:rpm (rocky9) (push) Successful in 19m45s
Build Packages / build:rpm (rocky9_sls9) (push) Successful in 19m49s
Build Packages / build:rpm (ubuntu2404) (push) Successful in 8m49s
Build Packages / build:rpm (ubuntu2204) (push) Successful in 10m11s
Build Packages / Unit tests (push) Has been cancelled
2026-03-27 15:31:59 +01:00
33410e1544 option to interrupt scaling/merging for poorly indexed case 2026-03-27 15:17:41 +01:00
fe87291c1c spot-finding option with I/sigma
Some checks failed
Build Packages / build:rpm (ubuntu2404_nocuda) (push) Successful in 11m42s
Build Packages / build:rpm (rocky8_nocuda) (push) Successful in 12m34s
Build Packages / build:rpm (rocky9_nocuda) (push) Successful in 13m27s
Build Packages / Generate python client (push) Successful in 52s
Build Packages / Build documentation (push) Successful in 24s
Build Packages / build:rpm (ubuntu2204_nocuda) (push) Successful in 16m5s
Build Packages / Create release (push) Has been skipped
Build Packages / build:rpm (rocky8) (push) Successful in 18m0s
Build Packages / build:rpm (rocky8_sls9) (push) Successful in 18m8s
Build Packages / build:rpm (rocky9) (push) Successful in 18m58s
Build Packages / build:rpm (rocky9_sls9) (push) Successful in 19m24s
Build Packages / build:rpm (ubuntu2404) (push) Successful in 9m29s
Build Packages / build:rpm (ubuntu2204) (push) Successful in 10m53s
Build Packages / Unit tests (push) Has been cancelled
2026-03-27 15:06:03 +01:00
10 changed files with 305 additions and 175 deletions

View File

@@ -76,16 +76,27 @@ jobs:
upload_url: https://gitea.psi.ch/api/packages/mx/debian/pool/noble/nocuda/upload
steps:
- uses: actions/checkout@v4
- name: Setup build (cmake)
- name: Install CMake 3.26 on Ubuntu 22.04
if: matrix.distro == 'ubuntu2204' || matrix.distro == 'ubuntu2204_nocuda'
shell: bash
run: |
set -euo pipefail
apt-get update
apt-get install -y wget gpg ca-certificates
wget -qO- https://apt.kitware.com/keys/kitware-archive-latest.asc \
| gpg --dearmor \
| tee /usr/share/keyrings/kitware-archive-keyring.gpg > /dev/null
echo "deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ jammy main" \
| tee /etc/apt/sources.list.d/kitware.list > /dev/null
apt-get update
apt-get install -y cmake=3.26.* cmake-data=3.26.* kitware-archive-keyring
cmake --version
- name: Build packages
shell: bash
run: |
mkdir -p build
cd build
cmake -G Ninja -DJFJOCH_INSTALL_DRIVER_SOURCE=ON -DJFJOCH_VIEWER_BUILD=ON -DCMAKE_BUILD_TYPE=Release ${{ matrix.cmake_flags }} ..
- name: Build packages
shell: bash
run: |
cd build
ninja frontend
ninja -j16 package
- name: Upload packages
@@ -122,32 +133,6 @@ jobs:
python3 gitea_upload_file.py "$file"
done
fi
dials-test:
name: DIALS processing test
runs-on: jfjoch_rocky9
steps:
- uses: actions/checkout@v4
- name: Build processing test
shell: bash
run: |
mkdir -p build
cd build
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release ..
ninja -j16 jfjoch_hdf5_test
- name: Generate test data (with virtual data set and 4 linked image files)
shell: bash
run: |
set -euo pipefail
mkdir -p dials_test
cd dials_test
../build/tools/jfjoch_hdf5_test ../tests/test_data/compression_benchmark.h5 -n100 -f25 -V
- name: Run DIALS processing
shell: bash
run: |
source /opt/dials-v3-27-0/dials_env.sh
set -euo pipefail
cd dials_test
xia2.ssx image=writing_test_master.h5 space_group=P43212 unit_cell=78.551,78.551,36.914,90.000,90.000,90.000
python-client:
name: Generate python client
runs-on: jfjoch_rocky8

View File

@@ -175,7 +175,7 @@ ENDIF()
IF (NOT JFJOCH_WRITER_ONLY)
ADD_CUSTOM_COMMAND(OUTPUT frontend/dist/index.html
COMMAND npm ci
COMMAND npm install
COMMAND npm run build
COMMAND npm run redocly
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/frontend)

View File

@@ -1 +1 @@
1.0.0-rc.133
1.0.0-rc.134

View File

@@ -8,9 +8,6 @@ ARG EIGEN_TAG=3.4.0
ARG LIBJPEG_TURBO_VERSION=3.1.2
ARG LIBTIFF_VERSION=v4.7.1
ARG HDF5_TAG="hdf5_1.14.6"
ARG DIALS_VERSION=3.27.0
ARG DIALS_TARBALL_URL=https://github.com/dials/dials/releases/download/v3.27.0/dials-v3-27-0-linux-x86_64.tar.xz
ARG DIALS_PREFIX=/opt
# Update base, enable CRB (RHEL/Rocky 9), and install toolchain + Qt deps
RUN dnf -y update && \
@@ -68,8 +65,6 @@ RUN dnf -y update && \
libdrm-devel \
libglvnd-core-devel \
libglvnd-devel \
glibc-langpack-en \
glibc-locale-source \
freetype-devel && \
dnf clean all && rm -rf /var/cache/dnf
@@ -190,23 +185,9 @@ RUN set -eux; \
cmake --install .; \
cd /; rm -rf /tmp/qt-everywhere-src-${QT_VERSION} /tmp/qt-everywhere-src-${QT_VERSION}.tar.xz
# Install DIALS using the official binary tarball workflow
RUN set -eux; \
cd /tmp; \
curl -fL -o dials.tar.xz "${DIALS_TARBALL_URL}"; \
tar -xJf dials.tar.xz; \
cd dials-installer; \
./install --prefix="${DIALS_PREFIX}"; \
cd /; \
rm -rf /tmp/dials.tar.xz /tmp/dials-*-linux-x86_64
# Make Qt and Eigen discoverable by CMake
ENV CMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH}:/opt/hdf5-${HDF5_TAG}-static:/opt/qt-${QT_VERSION}-static
ENV LANG=en_US.UTF-8
ENV LANGUAGE=en_US:en
ENV LC_ALL=en_US.UTF-8
# Set workdir for your project
WORKDIR /workspace

View File

@@ -22,11 +22,11 @@ RUN set -eux; \
ca-certificates \
curl \
wget \
gpg \
git \
tar \
xz-utils \
build-essential \
cmake \
ninja-build \
python3 \
python3-requests \
@@ -78,25 +78,10 @@ RUN set -eux; \
libassimp-dev \
libglvnd-dev \
libfreetype6-dev; \
wget -qO- https://apt.kitware.com/keys/kitware-archive-latest.asc \
| gpg --dearmor \
| tee /usr/share/keyrings/kitware-archive-keyring.gpg > /dev/null; \
echo "deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ jammy main" \
| tee /etc/apt/sources.list.d/kitware.list > /dev/null; \
apt-get update; \
apt-get install -y --no-install-recommends \
cmake=3.26.* \
cmake-data=3.26.* \
kitware-archive-keyring; \
apt-get -y install gcc-12 g++-12; \
apt-get clean; \
rm -rf /var/lib/apt/lists/*
# Use GCC/G++ 12 for builds
ENV CC=/usr/bin/gcc-12
ENV CXX=/usr/bin/g++-12
ENV PATH=/usr/bin:${PATH}
# Build a static OpenSSL
RUN set -eux; \
cd /tmp; \
@@ -181,6 +166,11 @@ RUN set -eux; \
ENV CMAKE_PREFIX_PATH=/opt/libtiff-${LIBTIFF_VERSION}-static:/opt/libjpeg-turbo-${LIBJPEG_TURBO_VERSION}-static
ENV PKG_CONFIG_PATH=/opt/hdf5-${HDF5_TAG}-static/lib/pkgconfig:/opt/libjpeg-turbo-${LIBJPEG_TURBO_VERSION}-static/lib/pkgconfig:/opt/libtiff-${LIBTIFF_VERSION}-static/lib/pkgconfig:${OPENSSL_ROOT_DIR}/lib/pkgconfig:${OPENSSL_ROOT_DIR}/lib64/pkgconfig
# Use GCC/G++ 12 for builds
ENV CC=/usr/bin/gcc-12
ENV CXX=/usr/bin/g++-12
ENV PATH=/usr/bin:${PATH}
# Build and install static Qt 6.9 with Core, Gui, Widgets, Charts, DBus
ARG QT_PREFIX=/opt/qt-${QT_VERSION}-static
RUN set -eux; \

View File

@@ -7,6 +7,7 @@
#include <thread>
#include <iostream>
#include <iomanip>
#include <algorithm>
#include <cmath>
#include <limits>
@@ -235,6 +236,7 @@ namespace {
int img_id = 0;
int hkl_slot = -1;
double sigma = 0.0;
mutable bool selected = true;
};
struct CorrectedObs {
@@ -243,6 +245,108 @@ namespace {
double sigma_corr;
};
void select_reflections_by_quasi_random(const std::vector<ObsRef> &obs,
const ScaleMergeOptions &opt,
std::vector<bool> &hkl_selected,
double Isigma_cutoff = 1.0,
int min_per_bin = 2500, // needs optimization
int max_per_bin = 80000,
int n_resolution_bins = 20) {
float stat_d_min = std::numeric_limits<float>::max();
float stat_d_max = 0.0f;
const gemmi::SpaceGroup &sg = *opt.space_group;
const gemmi::GroupOps gops = sg.operations();
int n_operator = gops.order();
struct HKLStats {
int n = 0;
float d = std::numeric_limits<float>::max();
int shell_id = 0;
};
const int nhkl = static_cast<int>(hkl_selected.size());
std::vector<HKLStats> per_hkl(nhkl);
int reflection_above_cutoff = 0;
for (const auto &o: obs) {
if (o.r->I / o.r->sigma < Isigma_cutoff) {
o.selected = false;
continue;
}
const auto d = o.r->d;
reflection_above_cutoff += 1;
if (std::isfinite(d) && d > 0.0f) {
if (opt.d_min_limit_A > 0.0 && d < static_cast<float>(opt.d_min_limit_A))
continue;
stat_d_min = std::min(stat_d_min, d);
stat_d_max = std::max(stat_d_max, d);
auto &hs = per_hkl[o.hkl_slot];
hs.n += 1;
hs.d = d;
}
}
std::cout << "Reflections of I/sigma > " << Isigma_cutoff << " : " << reflection_above_cutoff << std::endl;
if (reflection_above_cutoff < min_per_bin * n_resolution_bins) {
std::cout << "No additional selection applied before scaling" << std::endl;
return;
}
if (stat_d_min < stat_d_max && stat_d_min > 0.0f) {
const float d_min_pad = stat_d_min * 0.999f;
const float d_max_pad = stat_d_max * 1.001f;
ResolutionShells scaling_shells(d_min_pad, d_max_pad, n_resolution_bins);
for (int h = 0; h < nhkl; ++h) {
const auto d = per_hkl[h].d;
if (std::isfinite(d) && d > 0.0f) {
if (opt.d_min_limit_A > 0.0 && d < static_cast<float>(opt.d_min_limit_A))
continue;
auto s = scaling_shells.GetShell(d);
if (s.has_value())
per_hkl[h].shell_id = s.value();
}
}
// Accumulators per shell
struct ShellAccum {
int obs_unique = 0;
int obs_total = 0;
bool selected = true;
};
std::vector<ShellAccum> shell_acc(n_resolution_bins);
for (int h = 0; h < nhkl; ++h) {
auto &sa = shell_acc[per_hkl[h].shell_id];
if (sa.obs_unique * n_operator > min_per_bin * 1.2 || sa.obs_total > max_per_bin)
hkl_selected[h] = false;
else
sa.obs_unique += 1;
sa.obs_total += per_hkl[h].n;
}
const auto shell_min_res = scaling_shells.GetShellMinRes();
std::cout << "| d-mean | n_refl_uni | n_refl_tot | selection |" << std::endl;
for (int n=0; n < n_resolution_bins; ++n) {
if (shell_acc[n].obs_unique * n_operator < min_per_bin)
shell_acc[n].selected = false;
if (shell_min_res[n] <= 0.0f) continue;
std::cout << std::setw(8) << std::fixed << std::setprecision(3) << shell_min_res[n]
<< std::setw(12) << std::fixed << std::setprecision(0) << shell_acc[n].obs_unique
<< std::setw(12) << shell_acc[n].obs_total
<< " " << shell_acc[n].selected << std::endl;
}
for (int h = 0; h < nhkl; ++h) {
auto &sa = shell_acc[per_hkl[h].shell_id];
if (!sa.selected)
hkl_selected[h] = false;
}
}
}
void scale(const ScaleMergeOptions &opt,
std::vector<double> &g,
std::vector<double> &mosaicity,
@@ -250,10 +354,12 @@ namespace {
const std::vector<uint8_t> &image_slot_used,
bool rotation_crystallography,
size_t nhkl,
const std::vector<ObsRef> &obs) {
const std::vector<ObsRef> &obs,
bool selection) {
ceres::Problem problem;
std::vector<double> Itrue(nhkl, 0.0);
std::vector<bool> hkl_selected(nhkl, true);
// Initialize Itrue from per-HKL median of observed intensities
{
@@ -277,8 +383,12 @@ namespace {
double wedge = opt.wedge_deg.value_or(0.0);
select_reflections_by_quasi_random(obs, opt, hkl_selected);
std::vector<bool> is_valid_hkl_slot(nhkl, false);
for (const auto &o: obs) {
if (!o.selected) continue;
if (!hkl_selected[o.hkl_slot]) continue;
switch (opt.partiality_model) {
case ScaleMergeOptions::PartialityModel::Rotation: {
auto *cost = new ceres::AutoDiffCostFunction<IntensityRotResidual, 1, 1, 1, 1, 1>(
@@ -398,6 +508,7 @@ namespace {
options.function_tolerance = 1e-4;
ceres::Solver::Summary summary;
std::cout << "Now start the ceres-solver with residual blocks: " << problem.NumResidualBlocks() << std::endl;
ceres::Solve(options, &problem, &summary);
std::cout << summary.FullReport() << std::endl;
}
@@ -694,6 +805,7 @@ namespace {
o.img_id = img_id;
o.hkl_slot = hkl_slot;
o.sigma = sigma;
o.selected = true;
obs.push_back(o);
}
}
@@ -707,6 +819,7 @@ ScaleMergeResult ScaleAndMergeReflectionsCeres(const std::vector<std::vector<Ref
throw std::invalid_argument("image_cluster must be positive");
const bool rotation_crystallography = opt.wedge_deg.has_value();
const bool selection = opt.selection;
size_t nrefl = 0;
for (const auto &i: observations)
@@ -744,7 +857,8 @@ ScaleMergeResult ScaleAndMergeReflectionsCeres(const std::vector<std::vector<Ref
}
}
scale(opt, g, mosaicity, R_sq, image_slot_used, rotation_crystallography, nhkl, obs);
std::cout << "Now scale the reflections: " << nrefl << std::endl;
scale(opt, g, mosaicity, R_sq, image_slot_used, rotation_crystallography, nhkl, obs, selection);
ScaleMergeResult out;

View File

@@ -50,6 +50,8 @@ struct ScaleMergeOptions {
bool refine_wedge = false;
bool selection = true;
enum class PartialityModel {Fixed, Rotation, Unity, Still} partiality_model = PartialityModel::Fixed;
};

View File

@@ -1,8 +1,6 @@
#!/bin/bash
set -euo pipefail
python3.11 -m venv tmp_venv/
python3.12 -m venv tmp_venv/
source tmp_venv/bin/activate
pip install -r docs/requirements.txt

View File

@@ -18,48 +18,26 @@ int main(int argc, char **argv) {
RegisterHDF5Filter();
int64_t nimages_out = 100;
std::string prefix = "writing_test";
FileWriterFormat format = FileWriterFormat::NXmxLegacy;
std::optional<int64_t> images_per_file;
std::optional<float> rotation;
int opt;
while ((opt = getopt(argc, argv, "o:n:Vf:R:")) != -1) {
switch (opt) {
case 'o':
prefix = optarg;
break;
case 'n':
nimages_out = atoll(optarg);
break;
case 'V':
format = FileWriterFormat::NXmxVDS;
break;
case 'R':
rotation = atof(optarg);
break;
case 'f':
images_per_file = atoll(optarg);
if (images_per_file.value() <= 0) {
std::cerr << "Invalid number of images per file: " << optarg << std::endl;
exit(EXIT_FAILURE);
}
images_per_file = atoll(optarg);
break;
default:
std::cout << "Usage: ./jfjoch_hdf5_test <JF4M hdf5 file> [-o <prefix>] [-n <num images>] [-V] [-f <num images per file>] [-R<rotation angle>]" << std::endl;
exit(EXIT_FAILURE);
}
}
if (optind >= argc) {
std::cout << "Usage: ./jfjoch_hdf5_test <JF4M hdf5 file> [-o <prefix>] [-n <num images>] [-V] [-f <num images per file>] [-R<rotation angle>]" << std::endl;
if ((argc < 2) || (argc > 4)) {
std::cout << "Usage: ./jfjoch_hdf5_test <JF4M hdf5 file> {{<#images>} <rate in Hz>}" << std::endl;
std::cout << std::endl;
std::cout << "Env. variables:" << std::endl;
std::cout << "HDF5DATASET_WRITE_TEST_PREFIX" << std::endl;
std::cout << "HDF5MASTER_NEW_FORMAT" << std::endl;
exit(EXIT_FAILURE);
}
HDF5ReadOnlyFile data(argv[optind]);
int64_t nimages_out = 100;
double rate = 2200;
if (argc >= 3)
nimages_out = atoi(argv[2]);
if (argc >= 4)
rate = atof(argv[3]);
std::chrono::microseconds period_us((rate == 0) ? 0 : (int64_t) (1.0e6 / rate));
HDF5ReadOnlyFile data(argv[1]);
HDF5DataSet dataset(data, "/entry/data/data");
HDF5DataSpace file_space(dataset);
@@ -75,8 +53,6 @@ int main(int argc, char **argv) {
x.BeamX_pxl(1090).BeamY_pxl(1136).DetectorDistance_mm(75).IncidentEnergy_keV(WVL_1A_IN_KEV);
x.MaskModuleEdges(true);
x.MaskChipEdges(true);
if (rotation && rotation.value() != 0.0)
x.Goniometer(GoniometerAxis("omega", 0, rotation.value(), Coord(-1,0,0), std::nullopt));
if ((file_space.GetDimensions()[1] == 2164) && (file_space.GetDimensions()[2] == 2068)) {
std::cout << "JF4M with gaps detected (2068 x 2164)" << std::endl;
@@ -88,12 +64,16 @@ int main(int argc, char **argv) {
logger.Info("Number of images in the original dataset: " + std::to_string(nimages));
// Set file name
x.FilePrefix(prefix);
x.SetFileWriterFormat(format);
x.OverwriteExistingFiles(true);
if (std::getenv("HDF5DATASET_WRITE_TEST_PREFIX") == nullptr)
x.FilePrefix("writing_test");
else
x.FilePrefix(std::getenv("HDF5DATASET_WRITE_TEST_PREFIX"));
if (images_per_file.has_value())
x.ImagesPerFile(images_per_file.value());
if (std::getenv("HDF5MASTER_NEW_FORMAT") != nullptr) {
std::cout << "Using new format for HDF5 master file" << std::endl;
x.SetFileWriterFormat(FileWriterFormat::NXmxVDS);
} else
x.SetFileWriterFormat(FileWriterFormat::NXmxLegacy);
x.ImagesPerTrigger(nimages);
@@ -147,6 +127,8 @@ int main(int argc, char **argv) {
size_t total_image_size = 0;
for (int i = 0; i < nimages_out; i++) {
std::this_thread::sleep_until(start_time + i * period_us);
DataMessage message{};
message.image = CompressedImage(output[i % nimages], x.GetXPixelsNum(), x.GetYPixelsNum(),
x.GetImageMode(), x.GetCompressionAlgorithm());

View File

@@ -2,6 +2,7 @@
// SPDX-License-Identifier: GPL-3.0-only
#include <iostream>
#include <iomanip>
#include <vector>
#include <string>
#include <unistd.h>
@@ -44,12 +45,15 @@ void print_usage(Logger &logger) {
logger.Info(" -d<num> High resolution limit for spot finding (default: 1.5)");
logger.Info(" -D<num> High resolution limit for scaling/merging (default: 0.0; no limit)");
logger.Info(" -S<num> Space group number");
logger.Info(" -M Scale and merge (refine mosaicity) and write scaled.hkl + image.dat");
logger.Info(" -M[num] Scale and merge (refine mosaicity) and write scaled.hkl + image.dat, unless indexing rate is below threshold (default: 0.5)");
logger.Info(" -P<txt> Partiality refinement fixed|rot|unity (default: fixed)");
logger.Info(" -A Anomalous mode (don't merge Friedel pairs)");
logger.Info(" -C<cell> Fix reference unit cell: -C\"a,b,c,alpha,beta,gamma\" (comma-separated, no spaces; quotes optional)");
logger.Info(" -c<num> Max spot count (default: 250)");
logger.Info(" -W HDF5 file with analysis results is written");
logger.Info(" -W<txt> HDF5 file with analysis results is written. 'l' or 'light' deactivates image-output");
logger.Info(" -T<num> Noise sigma level for spot finding (default: 3.0)");
logger.Info(" -a Use all reflection for scaling without filtering (default: false)");
logger.Info(" -w Reflection wedge in scaling (default: false)");
}
void trim_in_place(std::string& t) {
@@ -60,6 +64,36 @@ void trim_in_place(std::string& t) {
t = t.substr(b, e - b);
};
void print_statistics(Logger &logger, const MergeStatistics &stats) {
logger.Info("");
logger.Info(" {:>8s} {:>8s} {:>8s} {:>8s} {:>8s} {:>10s}",
"d_min", "N_obs", "N_uniq", "Rmeas", "<I/sig>", "Complete");
logger.Info(" {:->8s} {:->8s} {:->8s} {:->8s} {:->8s} {:->10s}",
"", "", "", "", "", "");
for (const auto &sh: stats.shells) {
if (sh.unique_reflections == 0)
continue;
std::string compl_str = (sh.completeness > 0.0)
? fmt::format("{:8.1f}%", sh.completeness * 100.0)
: " N/A";
logger.Info(" {:8.2f} {:8d} {:8d} {:8.3f}% {:8.1f} {:>10s}",
sh.d_min, sh.total_observations, sh.unique_reflections,
sh.rmeas * 100, sh.mean_i_over_sigma, compl_str);
}
{
const auto &ov = stats.overall;
logger.Info(" {:->8s} {:->8s} {:->8s} {:->8s} {:->8s} {:->10s}",
"", "", "", "", "", "");
std::string compl_str = (ov.completeness > 0.0)
? fmt::format("{:8.1f}%", ov.completeness * 100.0)
: " N/A";
logger.Info(" {:>8s} {:8d} {:8d} {:8.3f}% {:8.1f} {:>10s}",
"Overall", ov.total_observations, ov.unique_reflections,
ov.rmeas * 100, ov.mean_i_over_sigma, compl_str);
}
logger.Info("");
}
std::optional<UnitCell> parse_unit_cell_arg(const char* arg) {
if (!arg)
return std::nullopt;
@@ -132,8 +166,13 @@ int main(int argc, char **argv) {
std::optional<int> space_group_number;
std::optional<UnitCell> fixed_reference_unit_cell;
bool write_output = false;
bool write_output_noimage = false;
bool filtering = true;
std::optional<int64_t> max_spot_count_override;
float sigma_spot_finding = 3.0;
std::optional<float> merging_threshold;
IndexingAlgorithmEnum indexing_algorithm = IndexingAlgorithmEnum::Auto;
bool refine_wedge = false;
ScaleMergeOptions::PartialityModel partiality_model = ScaleMergeOptions::PartialityModel::Fixed;
@@ -146,7 +185,7 @@ int main(int argc, char **argv) {
}
int opt;
while ((opt = getopt(argc, argv, "o:N:s:e:vc:R::FX:xd:S:MP:AD:C:W")) != -1) {
while ((opt = getopt(argc, argv, "o:N:s:e:vc:R::FX:xd:S:M::P:AD:C:T:W:aw")) != -1) {
switch (opt) {
case 'o':
output_prefix = optarg;
@@ -162,6 +201,10 @@ int main(int argc, char **argv) {
break;
case 'W':
write_output = true;
if (strcmp(optarg, "light") == 0 || strcmp(optarg, "l") == 0) {
write_output_noimage = true;
logger.Warning("Image data will not be saved.");
}
break;
case 'v':
verbose = true;
@@ -204,6 +247,12 @@ int main(int argc, char **argv) {
case 'x':
refine_beam_center = false;
break;
case 'a':
filtering = false;
break;
case 'w':
refine_wedge = true;
break;
case 'D':
d_min_scale_merge = atof(optarg);
logger.Info("High resolution limit for scaling/merging set to {:.2f} A", d_min_spot_finding);
@@ -213,10 +262,15 @@ int main(int argc, char **argv) {
break;
case 'M':
run_scaling = true;
if (optarg) merging_threshold = atof(optarg);
break;
case 'A':
anomalous_mode = true;
break;
case 'T':
sigma_spot_finding = atof(optarg);
logger.Info("Noise threshold level for spot finding set to {:.2f} sigma", sigma_spot_finding);
break;
case 'C': {
auto uc = parse_unit_cell_arg(optarg);
if (!uc.has_value()) {
@@ -296,8 +350,11 @@ int main(int argc, char **argv) {
experiment.OverwriteExistingFiles(true);
experiment.PolarizationFactor(0.99);
if (fixed_reference_unit_cell.has_value())
if (fixed_reference_unit_cell.has_value()) {
experiment.SetUnitCell(*fixed_reference_unit_cell);
} else {
experiment.SetUnitCell({});
}
if (max_spot_count_override.has_value()) {
experiment.MaxSpotCount(max_spot_count_override.value());
@@ -308,6 +365,8 @@ int main(int argc, char **argv) {
IndexingSettings indexing_settings;
indexing_settings.Algorithm(indexing_algorithm);
indexing_settings.RotationIndexing(rotation_indexing);
if (rotation_indexing)
logger.Info("Rotation indexing is activated.");
if (rotation_indexing_range.has_value())
indexing_settings.RotationIndexingMinAngularRange_deg(rotation_indexing_range.value());
@@ -317,10 +376,27 @@ int main(int argc, char **argv) {
indexing_settings.GeomRefinementAlgorithm(GeomRefinementAlgorithmEnum::None);
experiment.ImportIndexingSettings(indexing_settings);
switch (experiment.GetIndexingAlgorithm()) {
case IndexingAlgorithmEnum::FFBIDX:
logger.Info("Indexer used: FFBIDX");
break;
case IndexingAlgorithmEnum::FFTW:
logger.Info("Indexer used: FFTW");
break;
case IndexingAlgorithmEnum::FFT:
logger.Info("Indexer used: FFT (CUDA)");
break;
case IndexingAlgorithmEnum::None:
logger.Warning("Indexer not defined!");
return 0;
default: ;
}
SpotFindingSettings spot_settings;
spot_settings.enable = true;
spot_settings.indexing = true;
spot_settings.high_resolution_limit = d_min_spot_finding;
spot_settings.signal_to_noise_threshold = sigma_spot_finding;
if (d_min_scale_merge > 0)
spot_settings.high_resolution_limit = d_min_spot_finding;
@@ -396,6 +472,10 @@ int main(int argc, char **argv) {
compressed_buffer.resize(MaxCompressedSize(experiment.GetCompressionAlgorithm(),
experiment.GetPixelsNum(),
experiment.GetByteDepthImage()));
auto size = compressor.Compress(compressed_buffer.data(),
compressed_buffer.data(),
experiment.GetPixelsNum(),
sizeof(uint8_t));
// Thread-local analysis resources
MXAnalysisWithoutFPGA analysis(experiment, mapping, pixel_mask, indexer);
@@ -446,10 +526,11 @@ int main(int argc, char **argv) {
auto image_end_time = std::chrono::high_resolution_clock::now();
std::chrono::duration<float> image_duration = image_end_time - image_start_time;
auto size = compressor.Compress(compressed_buffer.data(),
img->Image().data(),
experiment.GetPixelsNum(),
sizeof(int32_t));
if (!write_output_noimage)
size = compressor.Compress(compressed_buffer.data(),
img->Image().data(),
experiment.GetPixelsNum(),
sizeof(int32_t));
msg.image = CompressedImage(compressed_buffer.data(),
size, experiment.GetXPixelsNum(),
@@ -482,7 +563,7 @@ int main(int argc, char **argv) {
}
// Progress log
if (current_idx_offset > 0 && current_idx_offset % 100 == 0) {
if ((current_idx_offset > 0 && (current_idx_offset+1) % 100 == 0) || image_idx == end_image - 1) {
std::optional<float> indexing_rate;
{
std::lock_guard<std::mutex> lock(plots_mutex);
@@ -491,11 +572,21 @@ int main(int argc, char **argv) {
if (indexing_rate.has_value()) {
logger.Info("Processed {} / {} images (indexing rate {:.1f}%)",
current_idx_offset, images_to_process,
current_idx_offset+1, images_to_process,
indexing_rate.value() * 100.0f);
} else {
logger.Info("Processed {} / {} images (indexing rate N/A)",
current_idx_offset, images_to_process);
current_idx_offset+1, images_to_process);
}
if (image_idx == end_image - 1) {
if (!merging_threshold.has_value()) merging_threshold = 0.5f;
if (!indexing_rate.has_value()) {
run_scaling = false;
} else if (indexing_rate.value() < merging_threshold) {
run_scaling = false;
logger.Warning("Not proceed to scale and merge with lower indexing rate: {:.1f}%",
indexing_rate.value()*100.0f);
}
}
}
}
@@ -547,7 +638,6 @@ int main(int argc, char **argv) {
logger.Info("Rotation Indexing found lattice");
}
// --- Optional: run scaling (mosaicity refinement) on accumulated reflections ---
// --- Optional: run scaling (mosaicity refinement) on accumulated reflections ---
if (run_scaling) {
logger.Info("Running scaling (mosaicity refinement) ...");
@@ -558,6 +648,8 @@ int main(int argc, char **argv) {
scale_opts.max_solver_time_s = 240.0; // generous cutoff for now
scale_opts.merge_friedel = !anomalous_mode;
scale_opts.d_min_limit_A = d_min_scale_merge.value_or(0.0);
scale_opts.selection = filtering;
scale_opts.refine_wedge = refine_wedge;
const bool fixed_space_group = space_group || experiment.GetGemmiSpaceGroup().has_value();
@@ -565,12 +657,16 @@ int main(int argc, char **argv) {
scale_opts.space_group = *space_group;
else
scale_opts.space_group = experiment.GetGemmiSpaceGroup();
if (scale_opts.space_group->number == 0) scale_opts.space_group = *gemmi::find_spacegroup_by_number(1);
logger.Info("Starting SG-no.: {}", scale_opts.space_group->number);
auto scale_start = std::chrono::steady_clock::now();
auto scale_result = indexer.ScaleRotationData(scale_opts);
auto scale_end = std::chrono::steady_clock::now();
double scale_time = std::chrono::duration<double>(scale_end - scale_start).count();
if (scale_result) print_statistics(logger, scale_result->statistics);
if (scale_result && !fixed_space_group) {
logger.Info("Searching for space group from P1-merged reflections ...");
@@ -579,13 +675,16 @@ int main(int argc, char **argv) {
sg_opts.centering = '\0';
sg_opts.merge_friedel = !anomalous_mode;
sg_opts.d_min_limit_A = d_min_scale_merge.value_or(0.0);
sg_opts.min_i_over_sigma = 0.0;
sg_opts.min_i_over_sigma = 4.0; // 0.0; follows the DIALS's default
sg_opts.min_operator_cc = 0.80;
sg_opts.min_pairs_per_operator = 20;
sg_opts.min_total_compared = 100;
sg_opts.test_systematic_absences = true;
auto sg_search_start = std::chrono::steady_clock::now();
const auto sg_search = SearchSpaceGroup(scale_result->merged, sg_opts);
auto sg_search_end = std::chrono::steady_clock::now();
double sg_search_time = std::chrono::duration<double>(sg_search_end - sg_search_start).count();
logger.Info("");
{
@@ -599,17 +698,24 @@ int main(int argc, char **argv) {
logger.Info("");
if (sg_search.best_space_group.has_value()) {
logger.Info("Re-running scaling in detected space group {}", sg_search.best_space_group->short_name());
if (sg_search.best_space_group->number != 0) {
if (sg_search.best_space_group->number != scale_opts.space_group->number) {
logger.Info("Re-running scaling in detected space group {}", sg_search.best_space_group->short_name());
logger.Info("SG-search wall-clock time: {:.2f} s", sg_search_time);
scale_opts.space_group = *sg_search.best_space_group;
scale_opts.space_group = *sg_search.best_space_group;
auto rescale_start = std::chrono::steady_clock::now();
auto refined_scale_result = indexer.ScaleRotationData(scale_opts);
auto rescale_end = std::chrono::steady_clock::now();
auto rescale_start = std::chrono::steady_clock::now();
auto refined_scale_result = indexer.ScaleRotationData(scale_opts);
auto rescale_end = std::chrono::steady_clock::now();
if (refined_scale_result) {
scale_result = std::move(refined_scale_result);
scale_time += std::chrono::duration<double>(rescale_end - rescale_start).count();
if (refined_scale_result) {
scale_result = std::move(refined_scale_result);
scale_time += std::chrono::duration<double>(rescale_end - rescale_start).count();
}
} else {
logger.Info("No space group update indicated.");
}
}
} else {
logger.Warning("No space group accepted; keeping P1-merged result");
@@ -623,36 +729,7 @@ int main(int argc, char **argv) {
scale_time, scale_result->merged.size());
// Print resolution-shell statistics table
{
const auto &stats = scale_result->statistics;
logger.Info("");
logger.Info(" {:>8s} {:>8s} {:>8s} {:>8s} {:>8s} {:>10s}",
"d_min", "N_obs", "N_uniq", "Rmeas", "<I/sig>", "Complete");
logger.Info(" {:->8s} {:->8s} {:->8s} {:->8s} {:->8s} {:->10s}",
"", "", "", "", "", "");
for (const auto &sh: stats.shells) {
if (sh.unique_reflections == 0)
continue;
std::string compl_str = (sh.completeness > 0.0)
? fmt::format("{:8.1f}%", sh.completeness * 100.0)
: " N/A";
logger.Info(" {:8.2f} {:8d} {:8d} {:8.3f}% {:8.1f} {:>10s}",
sh.d_min, sh.total_observations, sh.unique_reflections,
sh.rmeas * 100, sh.mean_i_over_sigma, compl_str);
}
{
const auto &ov = stats.overall;
logger.Info(" {:->8s} {:->8s} {:->8s} {:->8s} {:->8s} {:->10s}",
"", "", "", "", "", "");
std::string compl_str = (ov.completeness > 0.0)
? fmt::format("{:8.1f}%", ov.completeness * 100.0)
: " N/A";
logger.Info(" {:>8s} {:8d} {:8d} {:8.3f}% {:8.1f} {:>10s}",
"Overall", ov.total_observations, ov.unique_reflections,
ov.rmeas * 100, ov.mean_i_over_sigma, compl_str);
}
logger.Info("");
}
print_statistics(logger, scale_result->statistics);
{
const std::string img_path = output_prefix + "_image.dat";
@@ -698,6 +775,8 @@ int main(int argc, char **argv) {
cif_meta.unit_cell = rotation_indexer_ret->lattice.GetUnitCell();
} else if (experiment.GetUnitCell().has_value()) {
cif_meta.unit_cell = experiment.GetUnitCell().value();
} else {
logger.Warning("No UnitCell output");
}
if (scale_opts.space_group.has_value()) {
@@ -745,12 +824,11 @@ int main(int argc, char **argv) {
double frame_rate = static_cast<double>(images_to_process) / processing_time;
logger.Info("");
logger.Info("Processing time: {:.2f} s", processing_time);
logger.Info("Frame rate: {:.2f} Hz", frame_rate);
logger.Info("Total throughput:{:.2f} MB/s", throughput_MBs);
logger.Info("Processing time (excl. scaling): {:8.2f} s", processing_time);
logger.Info("Frame rate: {:8.2f} Hz", frame_rate);
logger.Info("Total throughput: {:8.2f} MB/s", throughput_MBs);
// Print extended stats similar to Receiver
if (!end_msg.indexing_rate.has_value()) {
if (end_msg.indexing_rate.has_value()) {
logger.Info("Indexing rate: {:.2f}%", end_msg.indexing_rate.value() * 100.0);
}