-
Notifications
You must be signed in to change notification settings - Fork 1
/
eql.py
55 lines (44 loc) · 2.01 KB
/
eql.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import torch
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from mmcls.registry import MODELS
from mmengine.dist import all_reduce as allreduce
@MODELS.register_module(force=True)
class SoftmaxEQLLoss(_Loss):
def __init__(self, num_classes, indicator='pos', loss_weight=1.0, tau=1.0, eps=1e-4):
super(SoftmaxEQLLoss, self).__init__()
self.loss_weight = loss_weight
self.num_classes = num_classes
self.tau = tau
self.eps = eps
assert indicator in ['pos', 'neg', 'pos_and_neg'], 'Wrong indicator type!'
self.indicator = indicator
# initial variables
self.register_buffer('pos_grad', torch.zeros(num_classes))
self.register_buffer('neg_grad', torch.zeros(num_classes))
self.register_buffer('pos_neg', torch.ones(num_classes))
def forward(self, input, label, weight=None, avg_factor=None, reduction_override=None, **kwargs):
if self.indicator == 'pos':
indicator = self.pos_grad.detach()
elif self.indicator == 'neg':
indicator = self.neg_grad.detach()
elif self.indicator == 'pos_and_neg':
indicator = self.pos_neg.detach() + self.neg_grad.detach()
else:
raise NotImplementedError
one_hot = F.one_hot(label, self.num_classes)
self.targets = one_hot.detach()
matrix = indicator[None, :].clamp(min=self.eps) / indicator[:, None].clamp(min=self.eps)
factor = matrix[label.long(), :].pow(self.tau)
cls_score = input + (factor.log() * (1 - one_hot.detach()))
loss = F.cross_entropy(cls_score, label)
return loss * self.loss_weight
def collect_grad(self, grad):
grad = torch.abs(grad)
pos_grad = torch.sum(grad * self.targets, dim=0)
neg_grad = torch.sum(grad * (1 - self.targets), dim=0)
allreduce(pos_grad)
allreduce(neg_grad)
self.pos_grad += pos_grad
self.neg_grad += neg_grad
self.pos_neg = self.pos_grad / (self.neg_grad + 1e-10)