22
33from hls4ml .converters .pytorch_to_hls import pytorch_handler
44
5+ # TODO: propagate use_bias info properly
6+ # https://github.com/fastmachinelearning/hls4ml/issues/409
57@pytorch_handler ('Linear' )
68def parse_linear_layer (pytorch_layer , layer_name , input_shapes , data_reader , config ):
79 assert ('Linear' in pytorch_layer .__class__ .__name__ )
@@ -15,6 +17,7 @@ def parse_linear_layer(pytorch_layer, layer_name, input_shapes, data_reader, con
1517 layer ['n_out' ] = pytorch_layer .out_features
1618
1719 #Handling whether bias is used or not
20+ assert not pytorch_layer .bias is None , "PyTorch Linear with bias=False not yet supported"
1821 if pytorch_layer .bias is None :
1922 layer ['use_bias' ] = False
2023 else :
@@ -24,8 +27,10 @@ def parse_linear_layer(pytorch_layer, layer_name, input_shapes, data_reader, con
2427
2528 return layer , output_shape
2629
27-
28- activation_layers = ['LeakyReLU' , 'ThresholdedReLU' , 'ELU' , 'PReLU' , 'Softmax' , 'ReLU' ]
30+ # TODO: propagate parametrized activation parameters
31+ # https://github.com/fastmachinelearning/hls4ml/issues/409
32+ # activation_layers = ['LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU', 'Softmax', 'ReLU']
33+ activation_layers = ['Softmax' , 'ReLU' ]
2934@pytorch_handler (* activation_layers )
3035def parse_activation_layer (pytorch_layer , layer_name , input_shapes , data_reader , config ):
3136
0 commit comments