-
Notifications
You must be signed in to change notification settings - Fork 2
/
attack_imagenet.py
202 lines (154 loc) · 9.1 KB
/
attack_imagenet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
#!/usr/bin/env python
import sys
import torch
import torch.nn
import torch.optim
from torch import autograd
from torch.nn.functional import avg_pool2d, interpolate, softmax
from torch.nn import UpsamplingBilinear2d, Upsample
from torch.autograd import Variable
import numpy as np
import tqdm
import matplotlib.pyplot as plt
import config as c
import opts
import time
opts.parse(sys.argv)
config_str = ""
config_str += "==="*30 + "\n"
config_str += "Config options:\n\n"
upsampler = Upsample(size=(c.org_size, c.org_size), align_corners=True, mode='bilinear')
for v in dir(c):
if v[0]=='_': continue
s=eval('c.%s'%(v))
config_str += " {:25}\t{}\n".format(v,s)
config_str += "==="*30 + "\n"
print(config_str)
import model
import data
# Loading the flow-based model, as well as the target classifier
if c.load_file:
model.load(c.load_file)
black_box_target = model.load_target_model(type=c.dataset)
model.model.eval()
with torch.no_grad():
for i_epoch in range(c.n_epochs):
# Initializiation
total_imgs = 0
succ_imgs = 0
fail_list = []
succ_list = []
print_list = []
data_iter = iter(data.test_loader)
L2_mean = 0
for i_batch, data_tuple in tqdm.tqdm(enumerate(data_iter),
total=len(data.test_loader),
leave=False,
mininterval=1.,
disable=True,
ncols=83):
success = False
print('\nEvaluating {:d}'.format(i_batch), flush=True)
# Getting the data and moving them to the GPU device
x, y = data_tuple
x = x.cuda()
y = y.cuda()
# Down-sampling the clean image (line 2 of Algorithm 3)
x_low = interpolate(x, size=c.img_dims[1], mode='bilinear')
# Mapping the clean image to the flow-based model latent space (line 3 of Algorithm 3)
z = model.model(x_low)
# Initializing the shift vector \mu (line 1 of Algorithm 3)
mu = 0.001 * torch.randn([1, c.output_dim]).cuda()
# Getting the target classifier prediction for the current image
logits = black_box_target((x - data.means)/data.stds)
probs = softmax(logits, dim=1)
# Check if the classifier is predicting the label correctly.
# Otherwise, we skip the sample as it is already classified wrong.
if torch.argmax(probs[0]) != y:
print('\nSkipping the wrong example ', i_batch)
continue
# Adding this image to the total number of images
total_imgs += 1
# Main adversarial example generation loop
for run_step in range(c.n_iter):
# Generating a bunch of candidate points in the latent space based
# on a normal distribution with mean \mu and variance \simga^2 (line 5 of Algorithm 3)
z_sample = torch.randn([c.n_pop, c.output_dim]).cuda()
modify_try = mu.repeat(c.n_pop, 1) + c.sigma * z_sample
# Mapping the latent points back to the original image space (lines 6 and 7 of Algorithm 1 (f(z_k) part))
x_hat_s = torch.clamp(model.model(z + modify_try, rev=True), 0., 1.)
# Checking whether the classifier is already fooled.
if run_step % 10 == 0:
# Mapping the candidate adversarial image to the image space
real_input_img = torch.clamp(model.model(z + mu, rev=True), 0., 1.)
# Computing the adversarial and clean images distance
real_dist = real_input_img - x_low
# Making sure that the perturbation lies within the deifned boundary
# Here, we use \ell_\inf, hence the torch.clamp function.
# One can easily extend this to other norms such as \ell_2.
real_clip_dist = torch.clamp(upsampler(real_dist), -c.epsi, c.epsi)
# Adding the perturbation to the clean image
real_clip_input = real_clip_dist + x
real_clip_input = torch.clamp(real_clip_input, 0., 1.)
# Querying the target classifier
outputs_real = black_box_target((real_clip_input - data.means)/data.stds)
outputs_real = softmax(outputs_real, dim=1)
# Checking whether the classifier is fooled and the perturbations are within the defined boundary.
if (torch.argmax(outputs_real) != y) and (torch.abs(real_clip_dist).max() <= c.epsi):
# Adding the current image to the list of successfully attacked ones.
succ_imgs += 1
success = True
print('\nClip image success images: ' + str(succ_imgs) + ' total images: ' + str(total_imgs))
succ_list.append(i_batch)
print_list.append(run_step)
L2_mean += torch.sqrt(torch.mean(real_clip_dist ** 2))
# Appending the successfully attacked images to a torch array for logging purposes.
if succ_imgs == 1:
clean_data_tot = x.clone().data.cpu()
adv_data_tot = real_clip_input.clone().cpu()
label_tot = y.clone().data.cpu()
else:
clean_data_tot = torch.cat((clean_data_tot, x.clone().data.cpu()), 0)
adv_data_tot = torch.cat((adv_data_tot, real_clip_input.clone().cpu()), 0)
label_tot = torch.cat((label_tot, y.clone().data.cpu()), 0)
break
# Computing the perturbation (line 7 of Algorithm 3)
dist = x_hat_s - x_low
# Ensuring that the perturbation lies within the defined boundary (line 8 of Algorithm 3 (the proj. function part))
clip_dist = torch.clamp(upsampler(dist), -c.epsi, c.epsi)
# Adding the correctly clipped perturbation to the original image (line 8 of Algorithm 3 (the proj. function part))
clip_input = (clip_dist + x).view(c.n_pop, 3, c.org_size, c.org_size)
clip_input = torch.clamp(clip_input, 0., 1.)
# Initializing the one hot code for the correct label
target_onehot = torch.zeros((1, c.num_classes)).cuda()
target_onehot[0][y] = 1.
target_onehot = target_onehot.repeat(c.n_pop, 1)
# Querying the classifier with adversarial image candidates
clip_input = clip_input.squeeze()
outputs = black_box_target((clip_input - data.means)/data.stds)
outputs = softmax(outputs, dim=1)
# Computing the C&W loss for all candidate images (line 8 of Algorithm 3)
real = torch.log((target_onehot * outputs).sum(1) + 1e-10)
other = torch.log(((1. - target_onehot) * outputs - target_onehot * 10000.).max(1)[0] + 1e-10)
loss1 = torch.clamp(real - other, 0., 1000.)
# Updating the shift vector \mu (lines 9, 10, and 11 of Algorithm 3)
Reward = - 0.5 * loss1
A = (Reward - torch.mean(Reward))/(torch.std(Reward) + 1e-10) # (line 9 of Algorithm 3)
mu += (c.lr / (c.n_pop * c.sigma))*(torch.matmul(z_sample.view(c.n_pop, -1).t(), A.view(-1, 1))).view(1, -1) # (lines 10 and 11 of Algorithm 3)
# Logging
if not success:
fail_list.append(i_batch)
print('\nFailed!', flush=True)
else:
print('\nSucceed!', flush=True)
print(fail_list)
success_rate = succ_imgs/float(total_imgs)
# note that this is the number of steps.
# to get number of queries you have to multiply the vector by c.n_pop
print('\nRun steps: ', print_list, flush=True)
np.savez('runstep', print_list)
print('\nAttack success rate: ', success_rate, flush=True)
print('\nAvg L2 norm: ', L2_mean/float(succ_imgs), flush=True)
torch.save(clean_data_tot,'%s/clean_data_%s_%s_%s.pth' % ('adv_output', c.model, c.dataset, 'AdvFlow'))
torch.save(adv_data_tot, '%s/adv_data_%s_%s_%s.pth' % ('adv_output', c.model, c.dataset, 'AdvFlow'))
torch.save(label_tot, '%s/label_%s_%s_%s.pth' % ('adv_output', c.model, c.dataset, 'AdvFlow'))