-
Notifications
You must be signed in to change notification settings - Fork 9
/
interpolator_tools.py
133 lines (95 loc) · 4.54 KB
/
interpolator_tools.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import math
import numpy as np
import scipy.ndimage.filters as ft
import torch
import torch.nn as nn
def interp23tap(img, ratio):
"""
Polynomial (with 23 coefficients) interpolator Function.
For more details please refers to:
[1] B. Aiazzi, L. Alparone, S. Baronti, and A. Garzelli - Context-driven fusion of high spatial and spectral
resolution images based on oversampled multiresolution analysis
[2] B. Aiazzi, S. Baronti, M. Selva, and L. Alparone - Bi-cubic interpolation for shift-free pan-sharpening
[3] G. Vivone, M. Dalla Mura, A. Garzelli, R. Restaino, G. Scarpa, M. Orn Ulfarsson, L. Alparone, J. Chanussot -
A new benchmark based on recent advances in multispectral pansharpening: Revisiting pansharpening with
classical and emerging pansharpening methods
Parameters
----------
img : Numpy Array
Image to be scaled. Dimension: H, W, B
ratio : int
The desired scale. It must be a factor power of 2.
Return
------
img : Numpy array
the interpolated img.
"""
assert ((2 ** (round(math.log(ratio, 2)))) == ratio), 'Error: Only resize factors power of 2'
r, c, b = img.shape
CDF23 = np.asarray(
[0.5, 0.305334091185, 0, -0.072698593239, 0, 0.021809577942, 0, -0.005192756653, 0, 0.000807762146, 0,
-0.000060081482])
CDF23 = [element * 2 for element in CDF23]
BaseCoeff = np.expand_dims(np.concatenate([np.flip(CDF23[1:]), CDF23]), axis=-1)
for z in range(int(ratio / 2)):
I1LRU = np.zeros(((2 ** (z + 1)) * r, (2 ** (z + 1)) * c, b))
if z == 0:
I1LRU[1::2, 1::2, :] = img
else:
I1LRU[::2, ::2, :] = img
for i in range(b):
temp = ft.convolve(np.transpose(I1LRU[:, :, i]), BaseCoeff, mode='wrap')
I1LRU[:, :, i] = ft.convolve(np.transpose(temp), BaseCoeff, mode='wrap')
img = I1LRU
return img
def interp23tap_torch(img, ratio, device):
"""
A PyTorch implementation of the Polynomial interpolator Function.
For more details please refers to:
[1] B. Aiazzi, L. Alparone, S. Baronti, and A. Garzelli - Context-driven fusion of high spatial and spectral
resolution images based on oversampled multiresolution analysis
[2] B. Aiazzi, S. Baronti, M. Selva, and L. Alparone - Bi-cubic interpolation for shift-free pan-sharpening
[3] G. Vivone, M. Dalla Mura, A. Garzelli, R. Restaino, G. Scarpa, M. Orn Ulfarsson, L. Alparone, J. Chanussot -
A new benchmark based on recent advances in multispectral pansharpening: Revisiting pansharpening with
classical and emerging pansharpening methods
Parameters
----------
img : Numpy Array
Image to be scaled. The conversion in Torch Tensor is made within the function. Dimension: H, W, B
ratio : int
The desired scale. It must be a factor power of 2.
device : Torch Device
The device on which perform the operation.
Return
------
img : Numpy array
The interpolated img.
"""
assert ((2 ** (round(math.log(ratio, 2)))) == ratio), 'Error: Only resize factors power of 2'
r, c, b = img.shape
CDF23 = np.asarray(
[0.5, 0.305334091185, 0, -0.072698593239, 0, 0.021809577942, 0, -0.005192756653, 0, 0.000807762146, 0,
-0.000060081482])
CDF23 = [element * 2 for element in CDF23]
BaseCoeff = np.expand_dims(np.concatenate([np.flip(CDF23[1:]), CDF23]), axis=-1)
BaseCoeff = np.expand_dims(BaseCoeff, axis=(0, 1))
BaseCoeff = np.concatenate([BaseCoeff] * b, axis=0)
BaseCoeff = torch.from_numpy(BaseCoeff).to(device)
img = img.astype(np.float32)
img = np.moveaxis(img, -1, 0)
for z in range(int(ratio / 2)):
I1LRU = np.zeros((b, (2 ** (z + 1)) * r, (2 ** (z + 1)) * c))
if z == 0:
I1LRU[:, 1::2, 1::2] = img
else:
I1LRU[:, ::2, ::2] = img
I1LRU = np.expand_dims(I1LRU, axis=0)
conv = nn.Conv2d(in_channels=b, out_channels=b, padding=(11, 0),
kernel_size=BaseCoeff.shape, groups=b, bias=False, padding_mode='circular')
conv.weight.data = BaseCoeff
conv.weight.requires_grad = False
t = conv(torch.transpose(torch.from_numpy(I1LRU).to(device), 2, 3))
img = conv(torch.transpose(t, 2, 3)).cpu().detach().numpy()
img = np.squeeze(img)
img = np.moveaxis(img, 0, -1)
return img