diff --git a/image_analysis/scale_merge/ScaleAndMerge.cpp b/image_analysis/scale_merge/ScaleAndMerge.cpp index 6b60439b..d2027c8c 100644 --- a/image_analysis/scale_merge/ScaleAndMerge.cpp +++ b/image_analysis/scale_merge/ScaleAndMerge.cpp @@ -390,11 +390,9 @@ namespace { } void merge(size_t nhkl, ScaleMergeResult &out, const std::vector &obs) { - // ---- Classical crystallographic merging: inverse-variance weighted mean ---- - // For each observation: I_corr = I_obs / correction - // sigma_corr = sigma_obs / correction - // w = 1 / sigma_corr^2 = correction^2 / sigma_obs^2 - + // Merging + // For weighting, we are extra multiplying weight by total correction value to down-weight reflections + // which come from very weak images and/or low partiality struct HKLAccum { double sum_wI = 0.0; // sum of w * I_corr double sum_w = 0.0; // sum of w @@ -406,7 +404,7 @@ namespace { continue; const double I_corr = static_cast(o.r->I) * o.correction; const double sigma_corr = o.sigma * o.correction; - const double w = 1.0 / (sigma_corr * sigma_corr); + const double w = o.correction / (sigma_corr * sigma_corr); auto &a = accum[o.hkl_slot]; a.sum_wI += w * I_corr; a.sum_w += w; @@ -769,3 +767,96 @@ ScaleMergeResult ScaleAndMergeReflectionsCeres(const std::vector> &observations, + const ScaleMergeOptions &opt) { + size_t nrefl = 0; + for (const auto &i: observations) + nrefl += i.size(); + + std::vector obs; + obs.reserve(nrefl); + + std::unordered_map hklToSlot; + hklToSlot.reserve(nrefl); + + for (int i = 0; i < (int)observations.size(); i++) { + for (const auto &r: observations[i]) { + if (!std::isfinite(r.I) || !std::isfinite(r.d) || r.d <= 0.0f) + continue; + if (opt.d_min_limit_A > 0.0 && r.d < opt.d_min_limit_A) + continue; + if (!std::isfinite(r.rlp) || r.rlp == 0.0f) + continue; + if (r.partiality <= opt.min_partiality_for_merge) + continue; + + // correction stored as 1 / (G * partiality * LP), with G = 1 + const double lp = SafeInv(static_cast(r.rlp), 1.0); + const double correction = SafeInv(r.partiality * lp, 0.0); + if (correction <= 0.0) + continue; + + int hkl_slot; + try { + const HKLKey key = CanonicalizeHKLKey(r, opt); + auto it = hklToSlot.find(key); + if (it == hklToSlot.end()) { + hkl_slot = static_cast(hklToSlot.size()); + hklToSlot.emplace(key, hkl_slot); + } else { + hkl_slot = it->second; + } + } catch (...) { + continue; + } + + ObsRef o; + o.r = &r; + o.img_id = i; + o.hkl_slot = hkl_slot; + o.sigma = SafeSigma(r.sigma, opt.min_sigma); + o.correction = correction; + obs.push_back(o); + } + } + + const int nhkl = static_cast(hklToSlot.size()); + + std::vector slotToHKL(nhkl); + for (const auto &kv: hklToSlot) + slotToHKL[kv.second] = kv.first; + + ScaleMergeResult out; + out.merged.resize(nhkl); + for (int h = 0; h < nhkl; ++h) { + out.merged[h].h = slotToHKL[h].h; + out.merged[h].k = slotToHKL[h].k; + out.merged[h].l = slotToHKL[h].l; + out.merged[h].I = 0.0; + out.merged[h].sigma = 0.0; + out.merged[h].d = 0.0; + } + + // Populate d from median of observations per HKL + { + std::vector> per_hkl_d(nhkl); + for (const auto &o: obs) { + const double d_val = static_cast(o.r->d); + if (std::isfinite(d_val) && d_val > 0.0) + per_hkl_d[o.hkl_slot].push_back(d_val); + } + for (int h = 0; h < nhkl; ++h) { + auto &v = per_hkl_d[h]; + if (!v.empty()) { + std::nth_element(v.begin(), v.begin() + (long)(v.size() / 2), v.end()); + out.merged[h].d = v[v.size() / 2]; + } + } + } + + merge(nhkl, out, obs); + stats(opt, nhkl, out, obs); + + return out; +} diff --git a/image_analysis/scale_merge/ScaleAndMerge.h b/image_analysis/scale_merge/ScaleAndMerge.h index d154e941..e92b7e69 100644 --- a/image_analysis/scale_merge/ScaleAndMerge.h +++ b/image_analysis/scale_merge/ScaleAndMerge.h @@ -91,4 +91,9 @@ struct ScaleMergeResult { }; ScaleMergeResult ScaleAndMergeReflectionsCeres(const std::vector>& observations, - const ScaleMergeOptions& opt = {}); \ No newline at end of file + const ScaleMergeOptions& opt = {}); + +/// Merge reflections without any scaling (G = 1, partiality and LP taken as-is from the Reflection). +/// Uses the same HKL canonicalization and statistics as ScaleAndMergeReflectionsCeres. +ScaleMergeResult MergeReflections(const std::vector>& observations, + const ScaleMergeOptions& opt = {}); \ No newline at end of file