ScaleAndMerge: Add "just" merge function for now
Build Packages / build:rpm (rocky8_nocuda) (push) Successful in 14m45s
Build Packages / build:rpm (rocky9_nocuda) (push) Successful in 15m12s
Build Packages / build:rpm (ubuntu2404_nocuda) (push) Successful in 15m16s
Build Packages / build:rpm (ubuntu2204_nocuda) (push) Successful in 16m17s
Build Packages / build:rpm (rocky8_sls9) (push) Successful in 17m13s
Build Packages / build:rpm (rocky8) (push) Successful in 17m17s
Build Packages / build:rpm (rocky9_sls9) (push) Successful in 18m17s
Build Packages / build:rpm (rocky9) (push) Successful in 12m41s
Build Packages / build:rpm (ubuntu2204) (push) Successful in 12m20s
Build Packages / Generate python client (push) Successful in 36s
Build Packages / Create release (push) Has been skipped
Build Packages / Build documentation (push) Successful in 54s
Build Packages / XDS test (neggia plugin) (push) Successful in 10m31s
Build Packages / build:rpm (ubuntu2404) (push) Successful in 13m40s
Build Packages / XDS test (JFJoch plugin) (push) Successful in 11m51s
Build Packages / XDS test (durin plugin) (push) Successful in 12m0s
Build Packages / DIALS test (push) Successful in 15m16s
Build Packages / Unit tests (push) Successful in 1h0m18s

This commit is contained in:
2026-05-10 10:42:56 +02:00
parent e392a3ae1b
commit 3e4e6019c4
2 changed files with 103 additions and 7 deletions
+97 -6
View File
@@ -390,11 +390,9 @@ namespace {
}
void merge(size_t nhkl, ScaleMergeResult &out, const std::vector<ObsRef> &obs) {
// ---- Classical crystallographic merging: inverse-variance weighted mean ----
// For each observation: I_corr = I_obs / correction
// sigma_corr = sigma_obs / correction
// w = 1 / sigma_corr^2 = correction^2 / sigma_obs^2
// Merging
// For weighting, we are extra multiplying weight by total correction value to down-weight reflections
// which come from very weak images and/or low partiality
struct HKLAccum {
double sum_wI = 0.0; // sum of w * I_corr
double sum_w = 0.0; // sum of w
@@ -406,7 +404,7 @@ namespace {
continue;
const double I_corr = static_cast<double>(o.r->I) * o.correction;
const double sigma_corr = o.sigma * o.correction;
const double w = 1.0 / (sigma_corr * sigma_corr);
const double w = o.correction / (sigma_corr * sigma_corr);
auto &a = accum[o.hkl_slot];
a.sum_wI += w * I_corr;
a.sum_w += w;
@@ -769,3 +767,96 @@ ScaleMergeResult ScaleAndMergeReflectionsCeres(const std::vector<std::vector<Ref
return out;
}
ScaleMergeResult MergeReflections(const std::vector<std::vector<Reflection>> &observations,
const ScaleMergeOptions &opt) {
size_t nrefl = 0;
for (const auto &i: observations)
nrefl += i.size();
std::vector<ObsRef> obs;
obs.reserve(nrefl);
std::unordered_map<HKLKey, int, HKLKeyHash> hklToSlot;
hklToSlot.reserve(nrefl);
for (int i = 0; i < (int)observations.size(); i++) {
for (const auto &r: observations[i]) {
if (!std::isfinite(r.I) || !std::isfinite(r.d) || r.d <= 0.0f)
continue;
if (opt.d_min_limit_A > 0.0 && r.d < opt.d_min_limit_A)
continue;
if (!std::isfinite(r.rlp) || r.rlp == 0.0f)
continue;
if (r.partiality <= opt.min_partiality_for_merge)
continue;
// correction stored as 1 / (G * partiality * LP), with G = 1
const double lp = SafeInv(static_cast<double>(r.rlp), 1.0);
const double correction = SafeInv(r.partiality * lp, 0.0);
if (correction <= 0.0)
continue;
int hkl_slot;
try {
const HKLKey key = CanonicalizeHKLKey(r, opt);
auto it = hklToSlot.find(key);
if (it == hklToSlot.end()) {
hkl_slot = static_cast<int>(hklToSlot.size());
hklToSlot.emplace(key, hkl_slot);
} else {
hkl_slot = it->second;
}
} catch (...) {
continue;
}
ObsRef o;
o.r = &r;
o.img_id = i;
o.hkl_slot = hkl_slot;
o.sigma = SafeSigma(r.sigma, opt.min_sigma);
o.correction = correction;
obs.push_back(o);
}
}
const int nhkl = static_cast<int>(hklToSlot.size());
std::vector<HKLKey> slotToHKL(nhkl);
for (const auto &kv: hklToSlot)
slotToHKL[kv.second] = kv.first;
ScaleMergeResult out;
out.merged.resize(nhkl);
for (int h = 0; h < nhkl; ++h) {
out.merged[h].h = slotToHKL[h].h;
out.merged[h].k = slotToHKL[h].k;
out.merged[h].l = slotToHKL[h].l;
out.merged[h].I = 0.0;
out.merged[h].sigma = 0.0;
out.merged[h].d = 0.0;
}
// Populate d from median of observations per HKL
{
std::vector<std::vector<double>> per_hkl_d(nhkl);
for (const auto &o: obs) {
const double d_val = static_cast<double>(o.r->d);
if (std::isfinite(d_val) && d_val > 0.0)
per_hkl_d[o.hkl_slot].push_back(d_val);
}
for (int h = 0; h < nhkl; ++h) {
auto &v = per_hkl_d[h];
if (!v.empty()) {
std::nth_element(v.begin(), v.begin() + (long)(v.size() / 2), v.end());
out.merged[h].d = v[v.size() / 2];
}
}
}
merge(nhkl, out, obs);
stats(opt, nhkl, out, obs);
return out;
}
+6 -1
View File
@@ -91,4 +91,9 @@ struct ScaleMergeResult {
};
ScaleMergeResult ScaleAndMergeReflectionsCeres(const std::vector<std::vector<Reflection>>& observations,
const ScaleMergeOptions& opt = {});
const ScaleMergeOptions& opt = {});
/// Merge reflections without any scaling (G = 1, partiality and LP taken as-is from the Reflection).
/// Uses the same HKL canonicalization and statistics as ScaleAndMergeReflectionsCeres.
ScaleMergeResult MergeReflections(const std::vector<std::vector<Reflection>>& observations,
const ScaleMergeOptions& opt = {});