I'm trying to calibrate the denoiser like this:
import numpy as np
from skimage.restoration import (
calibrate_denoiser,
denoise_tv_chambolle,
denoise_invariant,
)
# create random noisy data
noisy = np.random.random ([1200, 1000])*100
noise_std = np.std(noisy)
# set weights for calibration
n_weights = 50
weight_range = (noise_std/10, noise_std*3)
weights = np.linspace(weight_range[0], weight_range[1], n_weights)
parameter_ranges_tv = {'weight': weights}
# calibration
_, (parameters_tested_tv, losses_tv) = calibrate_denoiser(
noisy,
denoise_tv_chambolle,
denoise_parameters=parameter_ranges_tv,
extra_output=True,
)
It will take about 25 seconds. Is there any method to speed it up? I have tried multiprocessing with 20 cores, but the running time is similar.
# Define function for parallel execution
def evaluate_weight(weight):
"""Apply denoising with a specific weight and compute loss."""
denoised = denoise_tv_chambolle(noisy, weight=weight)
loss = np.mean((denoised - noisy) ** 2) # Example: Mean Squared Error
return weight, loss
with mp.Pool(processes=mp.cpu_count()) as pool: # Use all available cores
results = pool.map(evaluate_weight, weights) # Parallel execution
# Extract parameters and losses
parameters_tested_tv, losses_tv = zip(*results)
# Find the best weight with minimum loss
best_weight = parameters_tested_tv[np.argmin(losses_tv)]
print(f"Best weight: {best_weight}")