Update convergence criterion for denoise
Some checks failed
Gitea Actions Demo / build_test (3.11.12) (push) Failing after 8s
Gitea Actions Demo / build_test (3.10.17) (push) Successful in 11s
Gitea Actions Demo / build_test (3.13.3) (push) Failing after 9s
Gitea Actions Demo / build_test (3.12.10) (push) Successful in 11s
Gitea Actions Demo / build_test (3.9.22) (push) Successful in 10s

This commit is contained in:
Jakob Lass 2025-05-28 10:29:12 +02:00
parent 00ad81aba6
commit 9fb35dfda4

View File

@ -612,11 +612,12 @@ class background():
Me = self.set_e_design_matrix(mu_)
# Loss function
loss = np.zeros(n_epochs, dtype=self.dtype)
loss[1] = 1.0
k = 1
loss = []#np.zeros(n_epochs, dtype=self.dtype)
old_loss = 2000000
new_loss = 1000000
k = 0
while (np.abs(loss[k] - loss[k-1]) > 1e-3) and (k < n_epochs-1):
while (np.abs(old_loss - new_loss) > 1e-3) and (k < n_epochs):
# Compute A = Y - B by filling the nans with 0s
A = np.where(np.isnan(Y_r - b_tmp) == True, 0.0, Y_r - b_tmp)
@ -640,17 +641,18 @@ class background():
b_tmp = self.R_operator(self.b)
# ######################### Compute loss function ##################
loss[k] = 0.5 * np.nansum((Y_r - self.X - b_tmp) ** 2) + lambda_ * np.nansum(np.abs(self.X))
loss.append(0.5 * np.nansum((Y_r - self.X - b_tmp) ** 2) + lambda_ * np.nansum(np.abs(self.X)))
for e in range(self.E_size):
loss[k] += (beta_/2) * np.matmul(self.b[e, :], np.matmul(Lb_lst[e], self.b[e, :].T))
loss[-1] += (beta_/2) * np.matmul(self.b[e, :], np.matmul(Lb_lst[e], self.b[e, :].T))
loss[k] += (mu_ / 2) * np.trace(np.matmul(self.X.T, np.matmul(Le, self.X)))
loss[-1] += (mu_ / 2) * np.trace(np.matmul(self.X.T, np.matmul(Le, self.X)))
if verbose:
print(" Iteration ", str(k))
print(" Loss function: ", loss[k].item())
print(" Iteration ", str(k+1))
print(" Loss function: ", loss[-1].item())
old_loss = new_loss
new_loss = loss[-1]
k += 1
# Compute the propagated background