1- import tensorflow as tf
21from qkeras .quantizers import get_quantizer
32
4- from hls4ml .converters .keras .core import BinaryQuantizer
5- from hls4ml .model .types import ExponentPrecisionType , FixedPrecisionType , IntegerPrecisionType , Quantizer , XnorPrecisionType
6-
7-
8- class QKerasQuantizer (Quantizer ):
9- def __init__ (self , config ):
10- self .quantizer_fn = get_quantizer (config )
11- self .alpha = config ['config' ].get ('alpha' , None )
12- if config ['class_name' ] == 'quantized_bits' :
13- self .bits = config ['config' ]['bits' ]
14- self .hls_type = get_type (config )
15- # ! includes stochastic_ternary
16- elif 'ternary' in config ['class_name' ]:
17- self .bits = 2
18- self .hls_type = IntegerPrecisionType (width = 2 , signed = True )
19- # ! includes stochastic_binary
20- elif 'binary' in config ['class_name' ]:
21- self .bits = 1
22- self .hls_type = XnorPrecisionType ()
23- else :
24- print ("Unsupported quantizer: " + config ['class_name' ])
25- self .bits = 16
26- self .hls_type = FixedPrecisionType (width = 16 , integer = 6 , signed = True )
27-
28- def __call__ (self , data ):
29- tf_data = tf .convert_to_tensor (data )
30- return self .quantizer_fn (tf_data ).numpy ()
31- # return self.quantizer_fn(data)
32-
33-
34- class QKerasBinaryQuantizer :
35- def __init__ (self , config , xnor = False ):
36- self .bits = 1 if xnor else 2
37- self .hls_type = XnorPrecisionType () if xnor else IntegerPrecisionType (width = 2 , signed = True )
38- self .alpha = config ['config' ]['alpha' ]
39- # Use the QKeras quantizer to handle any stochastic / alpha stuff
40- self .quantizer_fn = get_quantizer (config )
41- # Then we use our BinaryQuantizer to convert to '0,1' format
42- self .binary_quantizer = BinaryQuantizer (1 ) if xnor else BinaryQuantizer (2 )
43-
44- def __call__ (self , data ):
45- x = tf .convert_to_tensor (data )
46- y = self .quantizer_fn (x ).numpy ()
47- return self .binary_quantizer (y )
48-
49-
50- class QKerasPO2Quantizer :
51- def __init__ (self , config ):
52- self .bits = config ['config' ]['bits' ]
53- self .quantizer_fn = get_quantizer (config )
54- self .hls_type = ExponentPrecisionType (width = self .bits , signed = True )
55-
56- def __call__ (self , data ):
57- '''
58- Weights are quantized to nearest power of two
59- '''
60- x = tf .convert_to_tensor (data )
61- y = self .quantizer_fn (x )
62- if hasattr (y , 'numpy' ):
63- y = y .numpy ()
64- return y
65-
66-
67- def get_type (quantizer_config ):
68- width = quantizer_config ['config' ]['bits' ]
69- integer = quantizer_config ['config' ].get ('integer' , 0 )
70- if quantizer_config ['class_name' ] == 'quantized_po2' :
71- return ExponentPrecisionType (width = width , signed = True )
72- if width == integer :
73- if width == 1 :
74- return XnorPrecisionType ()
75- else :
76- return IntegerPrecisionType (width = width , signed = True )
77- else :
78- return FixedPrecisionType (width = width , integer = integer + 1 , signed = True )
3+ from hls4ml .converters .keras .convolution import parse_conv1d_layer , parse_conv2d_layer
4+ from hls4ml .converters .keras .core import parse_batchnorm_layer , parse_dense_layer
5+ from hls4ml .converters .keras_to_hls import keras_handler , parse_default_keras_layer
6+ from hls4ml .model .types import FixedPrecisionType , QKerasBinaryQuantizer , QKerasPO2Quantizer , QKerasQuantizer
797
808
819def get_quantizer_from_config (keras_layer , quantizer_var ):
@@ -88,3 +16,130 @@ def get_quantizer_from_config(keras_layer, quantizer_var):
8816 return QKerasPO2Quantizer (quantizer_config )
8917 else :
9018 return QKerasQuantizer (quantizer_config )
19+
20+
21+ @keras_handler ('QDense' )
22+ def parse_qdense_layer (keras_layer , input_names , input_shapes , data_reader ):
23+
24+ layer , output_shape = parse_dense_layer (keras_layer , input_names , input_shapes , data_reader )
25+
26+ layer ['weight_quantizer' ] = get_quantizer_from_config (keras_layer , 'kernel' )
27+ if keras_layer ['config' ]['bias_quantizer' ] is not None :
28+ layer ['bias_quantizer' ] = get_quantizer_from_config (keras_layer , 'bias' )
29+ else :
30+ layer ['bias_quantizer' ] = None
31+
32+ return layer , output_shape
33+
34+
35+ @keras_handler ('QConv1D' , 'QConv2D' )
36+ def parse_qconv_layer (keras_layer , input_names , input_shapes , data_reader ):
37+ assert 'QConv' in keras_layer ['class_name' ]
38+
39+ if '1D' in keras_layer ['class_name' ]:
40+ layer , output_shape = parse_conv1d_layer (keras_layer , input_names , input_shapes , data_reader )
41+ elif '2D' in keras_layer ['class_name' ]:
42+ layer , output_shape = parse_conv2d_layer (keras_layer , input_names , input_shapes , data_reader )
43+
44+ layer ['weight_quantizer' ] = get_quantizer_from_config (keras_layer , 'kernel' )
45+ if keras_layer ['config' ]['bias_quantizer' ] is not None :
46+ layer ['bias_quantizer' ] = get_quantizer_from_config (keras_layer , 'bias' )
47+ else :
48+ layer ['bias_quantizer' ] = None
49+
50+ return layer , output_shape
51+
52+
53+ @keras_handler ('QActivation' )
54+ def parse_qactivation_layer (keras_layer , input_names , input_shapes , data_reader ):
55+ assert keras_layer ['class_name' ] == 'QActivation'
56+ supported_activations = [
57+ 'quantized_relu' ,
58+ 'quantized_tanh' ,
59+ 'binary_tanh' ,
60+ 'ternary_tanh' ,
61+ 'quantized_sigmoid' ,
62+ 'quantized_bits' ,
63+ 'binary' ,
64+ 'ternary' ,
65+ ]
66+
67+ layer = parse_default_keras_layer (keras_layer , input_names )
68+
69+ activation_config = keras_layer ['config' ]['activation' ]
70+ quantizer_obj = get_quantizer (activation_config )
71+ activation_config = {}
72+ # some activations are classes
73+ if hasattr (quantizer_obj , 'get_config' ):
74+ activation_config ['class_name' ] = quantizer_obj .__class__ .__name__
75+ if activation_config ['class_name' ] == 'ternary' or activation_config ['class_name' ] == 'binary' :
76+ activation_config ['class_name' ] += '_tanh'
77+ activation_config ['config' ] = quantizer_obj .get_config ()
78+ # some activation quantizers are just functions with no config
79+ else :
80+ activation_config ['config' ] = {}
81+ if 'binary' in quantizer_obj .__name__ :
82+ activation_config ['class_name' ] = 'binary_tanh'
83+ activation_config ['config' ]['bits' ] = 1
84+ activation_config ['config' ]['integer' ] = 1
85+ elif 'ternary' in quantizer_obj .__name__ :
86+ activation_config ['class_name' ] = 'ternary_tanh'
87+ activation_config ['config' ]['bits' ] = 2
88+ activation_config ['config' ]['integer' ] = 2
89+ else :
90+ activation_config ['class_name' ] = 'unknown'
91+
92+ if activation_config ['class_name' ] not in supported_activations :
93+ raise Exception ('Unsupported QKeras activation: {}' .format (activation_config ['class_name' ]))
94+
95+ if activation_config ['class_name' ] == 'quantized_bits' :
96+ activation_config ['class_name' ] = 'linear'
97+
98+ if activation_config ['class_name' ] == 'ternary_tanh' :
99+ layer ['class_name' ] = 'TernaryTanh'
100+ layer ['threshold' ] = activation_config .get ('config' , {}).get ('threshold' , 0.33 )
101+ if layer ['threshold' ] is None :
102+ layer ['threshold' ] = 0.33 # the default ternary tanh threshold for QKeras
103+ layer ['activation' ] = 'ternary_tanh'
104+ elif (
105+ activation_config ['class_name' ] == 'quantized_sigmoid'
106+ and not activation_config ['config' ].get ('use_real_sigmoid' , False )
107+ ) or (
108+ activation_config ['class_name' ] == 'quantized_tanh' and not activation_config ['config' ].get ('use_real_tanh' , False )
109+ ):
110+ layer ['class_name' ] = 'HardActivation'
111+ layer ['slope' ] = 0.5 # the default values in QKeras
112+ layer ['shift' ] = 0.5
113+ # Quartus seems to have trouble if the width is 1.
114+ layer ['slope_prec' ] = FixedPrecisionType (width = 2 , integer = 0 , signed = False )
115+ layer ['shift_prec' ] = FixedPrecisionType (width = 2 , integer = 0 , signed = False )
116+ layer ['activation' ] = activation_config ['class_name' ].replace ('quantized_' , 'hard_' )
117+ else :
118+ layer ['class_name' ] = 'Activation'
119+ layer ['activation' ] = activation_config ['class_name' ].replace ('quantized_' , '' )
120+
121+ layer ['activation_quantizer' ] = activation_config
122+ return layer , [shape for shape in input_shapes [0 ]]
123+
124+
125+ @keras_handler ('QBatchNormalization' )
126+ def parse_qbatchnorm_layer (keras_layer , input_names , input_shapes , data_reader ):
127+
128+ layer , output_shape = parse_batchnorm_layer (keras_layer , input_names , input_shapes , data_reader )
129+
130+ layer ['mean_quantizer' ] = get_quantizer_from_config (keras_layer , 'mean' )
131+ layer ['variance_quantizer' ] = get_quantizer_from_config (keras_layer , 'variance' )
132+ layer ['beta_quantizer' ] = get_quantizer_from_config (keras_layer , 'beta' )
133+ layer ['gamma_quantizer' ] = get_quantizer_from_config (keras_layer , 'gamma' )
134+
135+ return layer , output_shape
136+
137+
138+ @keras_handler ('QConv2DBatchnorm' )
139+ def parse_qconv2dbatchnorm_layer (keras_layer , input_names , input_shapes , data_reader ):
140+ intermediate_shape = list ()
141+ conv_layer , shape_qconv = parse_qconv_layer (keras_layer , input_names , input_shapes , data_reader )
142+ intermediate_shape .append (shape_qconv )
143+ temp_shape = intermediate_shape
144+ batch_layer , out_shape = parse_batchnorm_layer (keras_layer , input_names , temp_shape , data_reader )
145+ return {** conv_layer , ** batch_layer }, out_shape
0 commit comments