From 9fb35dfda47e0fdc679b1a1083f5f39379e12215 Mon Sep 17 00:00:00 2001 From: Jakob Lass Date: Wed, 28 May 2025 10:29:12 +0200 Subject: [PATCH] Update convergence criterion for denoise --- src/AMBER/background.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/AMBER/background.py b/src/AMBER/background.py index 951fdc7..a1d0677 100644 --- a/src/AMBER/background.py +++ b/src/AMBER/background.py @@ -612,11 +612,12 @@ class background(): Me = self.set_e_design_matrix(mu_) # Loss function - loss = np.zeros(n_epochs, dtype=self.dtype) - loss[1] = 1.0 - k = 1 + loss = []#np.zeros(n_epochs, dtype=self.dtype) + old_loss = 2000000 + new_loss = 1000000 + k = 0 - while (np.abs(loss[k] - loss[k-1]) > 1e-3) and (k < n_epochs-1): + while (np.abs(old_loss - new_loss) > 1e-3) and (k < n_epochs): # Compute A = Y - B by filling the nans with 0s A = np.where(np.isnan(Y_r - b_tmp) == True, 0.0, Y_r - b_tmp) @@ -640,17 +641,18 @@ class background(): b_tmp = self.R_operator(self.b) # ######################### Compute loss function ################## - loss[k] = 0.5 * np.nansum((Y_r - self.X - b_tmp) ** 2) + lambda_ * np.nansum(np.abs(self.X)) + loss.append(0.5 * np.nansum((Y_r - self.X - b_tmp) ** 2) + lambda_ * np.nansum(np.abs(self.X))) for e in range(self.E_size): - loss[k] += (beta_/2) * np.matmul(self.b[e, :], np.matmul(Lb_lst[e], self.b[e, :].T)) + loss[-1] += (beta_/2) * np.matmul(self.b[e, :], np.matmul(Lb_lst[e], self.b[e, :].T)) - loss[k] += (mu_ / 2) * np.trace(np.matmul(self.X.T, np.matmul(Le, self.X))) + loss[-1] += (mu_ / 2) * np.trace(np.matmul(self.X.T, np.matmul(Le, self.X))) if verbose: - print(" Iteration ", str(k)) - print(" Loss function: ", loss[k].item()) - + print(" Iteration ", str(k+1)) + print(" Loss function: ", loss[-1].item()) + old_loss = new_loss + new_loss = loss[-1] k += 1 # Compute the propagated background