-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapproximate.py
More file actions
64 lines (47 loc) · 1.72 KB
/
approximate.py
File metadata and controls
64 lines (47 loc) · 1.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.autograd import Variable
import numpy as np
class Quantize(Function):
@staticmethod
def forward(cxt, input, FL): #FL means fractions bit length, IL we set it to 1
x = input #.clone()
sign_bits = torch.sign(x.clone())
x = torch.abs(x.clone())
y = x * torch.pow(torch.cuda.FloatTensor([2]), FL)
compensate = ((y-torch.floor(y))>=0.5).float()
y = y + compensate
y = y.floor()
overflow = (y >= torch.pow(torch.cuda.FloatTensor([2]),FL+1) -1)
underflow = (y < 1)
y[overflow] = torch.pow(torch.cuda.FloatTensor([2]), FL+1)[overflow] -1
y[underflow] = 0
y = y / torch.pow((torch.cuda.FloatTensor([2])), FL)
y = y * sign_bits
return y
'''
@staticmethod
def forward(cxt, input, FL): #FL means fractions bit length, IL we set it to 1
x = input
sign_bits = torch.sign(x)
#x = torch.abs(x)
x.abs_()
y = x * torch.pow(torch.cuda.FloatTensor([2]), FL)
compensate = ((y-torch.floor(y))>=0.5).float()
y = y + compensate
y = y.floor()
overflow = (y >= torch.pow(torch.cuda.FloatTensor([2]),FL+1) -1)
underflow = (y < 1)
y[overflow] = torch.pow(torch.cuda.FloatTensor([2]), FL+1)[overflow] -1
y[underflow] = 0
y = y / torch.pow((torch.cuda.FloatTensor([2])), FL)
y = y * sign_bits
return y
@staticmethod
def backward(cxt, grad_output):
grad_input = grad_output
return grad_input,None,None,None,None
'''
# aliases
quant = Quantize.apply