From 1dedd3de33dcc00160bb6460da4b97e30041458b Mon Sep 17 00:00:00 2001 From: Filip Leonarski Date: Thu, 12 Feb 2026 17:56:32 +0100 Subject: [PATCH] ScaleAndMerge: Add AI generated sigmas --- image_analysis/scale_merge/ScaleAndMerge.cpp | 74 ++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/image_analysis/scale_merge/ScaleAndMerge.cpp b/image_analysis/scale_merge/ScaleAndMerge.cpp index 9f5c7bd0..1d6a83e3 100644 --- a/image_analysis/scale_merge/ScaleAndMerge.cpp +++ b/image_analysis/scale_merge/ScaleAndMerge.cpp @@ -442,5 +442,79 @@ ScaleMergeResult ScaleAndMergeReflectionsCeres(const std::vector& ob std::cout << summary.FullReport() << std::endl; + // --- Compute sigma(I_true) from scatter of corrected observations --- + { + // Accumulate per-HKL weighted residuals + // I_corrected_i = I_obs_i / (G_i * partiality_i * lp_i) + // sigma(I_true) = 1 / sqrt(sum_i w_i) where w_i = 1/sigma_corrected_i^2 + // OR from sample variance of I_corrected around I_true + + struct HKLAccum { + double sum_w = 0.0; // sum of weights + double sum_w_delta2 = 0.0; // sum of w * (I_corr - I_true)^2 + int count = 0; + }; + std::vector accum(nhkl); + + const double half_wedge = opt.wedge_deg / 2.0; + + for (const auto& o : obs) { + const Reflection& r = *o.r; + const double lp = SafeInv(static_cast(r.rlp), 1.0); + const double G_i = g[o.img_slot]; + + // Compute partiality with refined mosaicity + double partiality; + size_t mos_slot = opt.per_image_mosaicity ? o.img_slot : 0; + if (refine_partiality && mosaicity[mos_slot] > 0.0) { + double c1 = r.zeta / std::sqrt(2.0); + double arg_plus = (r.delta_phi_deg + half_wedge) * c1 / mosaicity[mos_slot]; + double arg_minus = (r.delta_phi_deg - half_wedge) * c1 / mosaicity[mos_slot]; + partiality = (std::erf(arg_plus) - std::erf(arg_minus)) / 2.0; + } else { + partiality = r.partiality; + } + if (partiality < 1e-6) partiality = 1e-6; + + const double correction = G_i * partiality * lp; + if (correction <= 0.0) continue; + + const double I_corr = static_cast(r.I) / correction; + const double sigma_corr = o.sigma / correction; + const double w = 1.0 / (sigma_corr * sigma_corr); + + const double delta = I_corr - Itrue[o.hkl_slot]; + accum[o.hkl_slot].sum_w += w; + accum[o.hkl_slot].sum_w_delta2 += w * delta * delta; + accum[o.hkl_slot].count++; + } + + // GoF^2 (global reduced chi-squared) for rescaling + double global_sum_w_delta2 = 0.0; + double global_sum_w = 0.0; + int global_n = 0; + for (int h = 0; h < nhkl; ++h) { + global_sum_w_delta2 += accum[h].sum_w_delta2; + global_sum_w += accum[h].sum_w; + global_n += accum[h].count; + } + out.gof2 = (global_n > nhkl) + ? global_sum_w_delta2 / static_cast(global_n - nhkl) + : 1.0; + + for (int h = 0; h < nhkl; ++h) { + if (accum[h].count >= 2 && accum[h].sum_w > 0.0) { + // Internal consistency: sigma^2 = (1/sum_w) * chi^2_h + // where chi^2_h = sum_w_delta2 / (n-1) + double chi2_h = accum[h].sum_w_delta2 / static_cast(accum[h].count - 1); + out.merged[h].sigma = std::sqrt(chi2_h / accum[h].sum_w); + } else if (accum[h].sum_w > 0.0) { + // Single observation: use propagated sigma + out.merged[h].sigma = std::sqrt(1.0 / accum[h].sum_w); + } + // else: sigma stays 0.0 (no usable data) + } + } + return out; }