diff --git a/utils/GaussianBlur.py b/utils/GaussianBlur.py new file mode 100644 index 0000000..a5a847e --- /dev/null +++ b/utils/GaussianBlur.py @@ -0,0 +1,49 @@ +""" +## CycleISP: Real Image Restoration Via Improved Data Synthesis +## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao +## CVPR 2020 +## https://arxiv.org/abs/2003.07761 +""" + +import torch +import torch.nn as nn +import math +import numpy as np + +def get_gaussian_kernel(kernel_size=21, sigma=5, channels=3): + #if not kernel_size: kernel_size = int(2*np.ceil(2*sigma)+1) + #print("Kernel is: ",kernel_size) + #print("Sigma is: ",sigma) + padding = kernel_size//2 + # Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2) + x_coord = torch.arange(kernel_size) + x_grid = x_coord.repeat(kernel_size).view(kernel_size, kernel_size) + y_grid = x_grid.t() + xy_grid = torch.stack([x_grid, y_grid], dim=-1).float() + + mean = (kernel_size - 1)/2. + variance = sigma**2. + + # Calculate the 2-dimensional gaussian kernel which is + # the product of two gaussian distributions for two different + # variables (in this case called x and y) + gaussian_kernel = (1./(2.*math.pi*variance)) *\ + torch.exp( + -torch.sum((xy_grid - mean)**2., dim=-1) /\ + (2*variance) + ) + + # Make sure sum of values in gaussian kernel equals 1. + gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel) + + # Reshape to 2d depthwise convolutional weight + gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size) + gaussian_kernel = gaussian_kernel.repeat(channels, 1, 1, 1) + + gaussian_filter = nn.Conv2d(in_channels=channels, out_channels=channels, + kernel_size=kernel_size, groups=channels, bias=False) + + gaussian_filter.weight.data = gaussian_kernel + gaussian_filter.weight.requires_grad = False + + return gaussian_filter, padding diff --git a/utils/bundle_submissions.py b/utils/bundle_submissions.py new file mode 100644 index 0000000..fc6a242 --- /dev/null +++ b/utils/bundle_submissions.py @@ -0,0 +1,108 @@ + # Author: Tobias Plötz, TU Darmstadt (tobias.ploetz@visinf.tu-darmstadt.de) + + # This file is part of the implementation as described in the CVPR 2017 paper: + # Tobias Plötz and Stefan Roth, Benchmarking Denoising Algorithms with Real Photographs. + # Please see the file LICENSE.txt for the license governing this code. + + +import numpy as np +import scipy.io as sio +import os +import h5py + +def bundle_submissions_raw(submission_folder,session): + ''' + Bundles submission data for raw denoising + + submission_folder Folder where denoised images reside + + Output is written to /bundled/. Please submit + the content of this folder. + ''' + + out_folder = os.path.join(submission_folder, session) + # out_folder = os.path.join(submission_folder, "bundled/") + try: + os.mkdir(out_folder) + except:pass + + israw = True + eval_version="1.0" + + for i in range(50): + Idenoised = np.zeros((20,), dtype=np.object) + for bb in range(20): + filename = '%04d_%02d.mat'%(i+1,bb+1) + s = sio.loadmat(os.path.join(submission_folder,filename)) + Idenoised_crop = s["Idenoised_crop"] + Idenoised[bb] = Idenoised_crop + filename = '%04d.mat'%(i+1) + sio.savemat(os.path.join(out_folder, filename), + {"Idenoised": Idenoised, + "israw": israw, + "eval_version": eval_version}, + ) + +def bundle_submissions_srgb(submission_folder,session): + ''' + Bundles submission data for sRGB denoising + + submission_folder Folder where denoised images reside + + Output is written to /bundled/. Please submit + the content of this folder. + ''' + out_folder = os.path.join(submission_folder, session) + # out_folder = os.path.join(submission_folder, "bundled/") + try: + os.mkdir(out_folder) + except:pass + israw = False + eval_version="1.0" + + for i in range(50): + Idenoised = np.zeros((20,), dtype=np.object) + for bb in range(20): + filename = '%04d_%02d.mat'%(i+1,bb+1) + s = sio.loadmat(os.path.join(submission_folder,filename)) + Idenoised_crop = s["Idenoised_crop"] + Idenoised[bb] = Idenoised_crop + filename = '%04d.mat'%(i+1) + sio.savemat(os.path.join(out_folder, filename), + {"Idenoised": Idenoised, + "israw": israw, + "eval_version": eval_version}, + ) + + + +def bundle_submissions_srgb_v1(submission_folder,session): + ''' + Bundles submission data for sRGB denoising + + submission_folder Folder where denoised images reside + + Output is written to /bundled/. Please submit + the content of this folder. + ''' + out_folder = os.path.join(submission_folder, session) + # out_folder = os.path.join(submission_folder, "bundled/") + try: + os.mkdir(out_folder) + except:pass + israw = False + eval_version="1.0" + + for i in range(50): + Idenoised = np.zeros((20,), dtype=np.object) + for bb in range(20): + filename = '%04d_%d.mat'%(i+1,bb+1) + s = sio.loadmat(os.path.join(submission_folder,filename)) + Idenoised_crop = s["Idenoised_crop"] + Idenoised[bb] = Idenoised_crop + filename = '%04d.mat'%(i+1) + sio.savemat(os.path.join(out_folder, filename), + {"Idenoised": Idenoised, + "israw": israw, + "eval_version": eval_version}, + ) \ No newline at end of file diff --git a/utils/dir_utils.py b/utils/dir_utils.py new file mode 100644 index 0000000..3be7063 --- /dev/null +++ b/utils/dir_utils.py @@ -0,0 +1,18 @@ +import os +from natsort import natsorted +from glob import glob + +def mkdirs(paths): + if isinstance(paths, list) and not isinstance(paths, str): + for path in paths: + mkdir(path) + else: + mkdir(paths) + +def mkdir(path): + if not os.path.exists(path): + os.makedirs(path) + +def get_last_path(path, session): + x = natsorted(glob(os.path.join(path,'*%s'%session)))[-1] + return x \ No newline at end of file diff --git a/utils/image_utils.py b/utils/image_utils.py new file mode 100644 index 0000000..6c0e1fa --- /dev/null +++ b/utils/image_utils.py @@ -0,0 +1,94 @@ +import torch +import numpy as np +from skimage.measure.simple_metrics import compare_psnr +import pickle +import lycon +from skimage.measure import compare_ssim + + +def is_numpy_file(filename): + return any(filename.endswith(extension) for extension in [".npy"]) + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in [".jpg"]) + +def is_png_file(filename): + return any(filename.endswith(extension) for extension in [".png"]) + +def is_pkl_file(filename): + return any(filename.endswith(extension) for extension in [".pkl"]) + + +def load_pkl(filename_): + with open(filename_, 'rb') as f: + ret_dict = pickle.load(f) + return ret_dict + +def save_dict(dict_, filename_): + with open(filename_, 'wb') as f: + pickle.dump(dict_, f) + +def load_npy(filepath): + img = np.load(filepath) + return img + +def load_img(filepath): + img = lycon.load(filepath) + img = img.astype(np.float32) + img = img/255. + return img + +def batch_PSNR(img, imclean, data_range): + Img = img.data.cpu().numpy().astype(np.float32) + Iclean = imclean.data.cpu().numpy().astype(np.float32) + PSNR = [] + for i in range(Img.shape[0]): + psnr = compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) + if np.isinf(psnr): + continue + PSNR.append(psnr) + return sum(PSNR)/len(PSNR) + + +def batch_SSIM(img, imclean): + Img = img.data.cpu().numpy().astype(np.float32) + Iclean = imclean.data.cpu().numpy().astype(np.float32) + SSIM = [] + for i in range(Img.shape[0]): + ssim = compare_ssim(Iclean[i,:,:,:], Img[i,:,:,:], gaussian_weights=True, use_sample_covariance=False, multichannel =True) + SSIM.append(ssim) + return sum(SSIM)/len(SSIM) + + +def unpack_raw(im): + bs,chan,h,w = im.shape + H, W = h*2, w*2 + img2 = torch.zeros((bs,H,W)) + img2[:,0:H:2,0:W:2]=im[:,0,:,:] + img2[:,0:H:2,1:W:2]=im[:,1,:,:] + img2[:,1:H:2,0:W:2]=im[:,2,:,:] + img2[:,1:H:2,1:W:2]=im[:,3,:,:] + img2 = img2.unsqueeze(1) + return img2 + +def pack_raw(im): + img_shape = im.shape + H = img_shape[0] + W = img_shape[1] + ## R G G B + out = np.concatenate((im[0:H:2,0:W:2,:], + im[0:H:2,1:W:2,:], + im[1:H:2,0:W:2,:], + im[1:H:2,1:W:2,:]), axis=2) + return out + +def pack_raw_torch(im): + img_shape = im.shape + H = img_shape[0] + W = img_shape[1] + ## R G G B + out = torch.cat((im[0:H:2,0:W:2,:], + im[0:H:2,1:W:2,:], + im[1:H:2,0:W:2,:], + im[1:H:2,1:W:2,:]), dim=2) + return out diff --git a/utils/model_utils.py b/utils/model_utils.py new file mode 100644 index 0000000..e3a7dc6 --- /dev/null +++ b/utils/model_utils.py @@ -0,0 +1,53 @@ +import torch +import os +from collections import OrderedDict + +def freeze(model): + for p in model.parameters(): + p.requires_grad=False + +def unfreeze(model): + for p in model.parameters(): + p.requires_grad=True + +def is_frozen(model): + x = [p.requires_grad for p in model.parameters()] + return not all(x) + +def save_checkpoint(model_dir, state, session): + epoch = state['epoch'] + model_out_path = os.path.join(model_dir,"model_epoch_{}_{}.pth".format(epoch,session)) + torch.save(state, model_out_path) + +def load_checkpoint(model, weights): + checkpoint = torch.load(weights) + try: + model.load_state_dict(checkpoint["state_dict"]) + except: + state_dict = checkpoint["state_dict"] + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + + +def load_checkpoint_multigpu(model, weights): + checkpoint = torch.load(weights) + state_dict = checkpoint["state_dict"] + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + +def load_start_epoch(weights): + checkpoint = torch.load(weights) + epoch = checkpoint["epoch"] + return epoch + +def load_optim(optimizer, weights): + checkpoint = torch.load(weights) + optimizer.load_state_dict(checkpoint['optimizer']) + for p in optimizer.param_groups: lr = p['lr'] + return lr diff --git a/utils/noise_sampling.py b/utils/noise_sampling.py new file mode 100644 index 0000000..45255d3 --- /dev/null +++ b/utils/noise_sampling.py @@ -0,0 +1,60 @@ +""" +## CycleISP: Real Image Restoration Via Improved Data Synthesis +## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao +## CVPR 2020 +## https://arxiv.org/abs/2003.07761 +""" + +## We adopt the same noise sampling procedure as in "Unprocessing Images for Learned Raw Denoising" by Brooks et al. CVPR2019 + +import torch +import torch.distributions as dist +import numpy as np + + + + +################ If the target dataset is DND, use this function ##################### +def random_noise_levels_dnd(): + """Generates random noise levels from a log-log linear distribution.""" + log_min_shot_noise = torch.log10(torch.Tensor([0.0001])) + log_max_shot_noise = torch.log10(torch.Tensor([0.012])) + distribution = dist.uniform.Uniform(log_min_shot_noise, log_max_shot_noise) + + log_shot_noise = distribution.sample() + shot_noise = torch.pow(10,log_shot_noise) + distribution = dist.normal.Normal(torch.Tensor([0.0]), torch.Tensor([0.26])) + read_noise = distribution.sample() + line = lambda x: 2.18 * x + 1.20 + log_read_noise = line(log_shot_noise) + read_noise + read_noise = torch.pow(10,log_read_noise) + return shot_noise, read_noise + +################ If the target dataset is SIDD, use this function ##################### +def random_noise_levels_sidd(): + """ Where read_noise in SIDD is not 0 """ + log_min_shot_noise = torch.log10(torch.Tensor([0.00068674])) + log_max_shot_noise = torch.log10(torch.Tensor([0.02194856])) + distribution = dist.uniform.Uniform(log_min_shot_noise, log_max_shot_noise) + + log_shot_noise = distribution.sample() + shot_noise = torch.pow(10,log_shot_noise) + + distribution = dist.normal.Normal(torch.Tensor([0.0]), torch.Tensor([0.20])) + read_noise = distribution.sample() + line = lambda x: 1.85 * x + 0.30 ### Line SIDD test set + log_read_noise = line(log_shot_noise) + read_noise + read_noise = torch.pow(10,log_read_noise) + return shot_noise, read_noise + + +def add_noise(image, shot_noise=0.01, read_noise=0.0005, use_cuda=False): + """Adds random shot (proportional to image) and read (independent) noise.""" + variance = image * shot_noise + read_noise + mean = torch.Tensor([0.0]) + if use_cuda: + mean = mean.cuda() + distribution = dist.normal.Normal(mean, torch.sqrt(variance)) + noise = distribution.sample() + return image + noise + diff --git a/utils/plot_utils.py b/utils/plot_utils.py new file mode 100644 index 0000000..6b32ea1 --- /dev/null +++ b/utils/plot_utils.py @@ -0,0 +1,37 @@ +import numpy as np +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt + +def plot_psnr(apath, epoch,psnr_vec, mode=None): + axis = np.linspace(1, len(psnr_vec), len(psnr_vec)) + fig = plt.figure() + plt.plot(axis, psnr_vec) + plt.xlabel('Epochs') + plt.ylabel('PSNR') + plt.grid(True) + plt.savefig('{}psnr_{}.pdf'.format(apath,mode)) + plt.close(fig) + + +def plot_loss(apath, epoch,loss_vec): + if len(loss_vec) == epoch: + axis = np.linspace(1, epoch, epoch) + fig = plt.figure() + plt.plot(axis, loss_vec) + #plt.legend() + plt.xlabel('Epochs') + plt.ylabel('Loss') + plt.grid(True) + plt.savefig('{}loss.pdf'.format(apath)) + plt.close(fig) + else: + axis = np.linspace(1, len(loss_vec), len(loss_vec)) + fig = plt.figure() + plt.plot(axis, loss_vec) + #plt.legend() + plt.xlabel('Epochs') + plt.ylabel('Loss') + plt.grid(True) + plt.savefig('{}loss_resume.pdf'.format(apath)) + plt.close(fig) \ No newline at end of file