Skip to content

Commit af587b0

Browse files
committed
Cleanup leftovers in keras_to_hls.py
1 parent c652251 commit af587b0

File tree

2 files changed

+62
-89
lines changed

2 files changed

+62
-89
lines changed

hls4ml/converters/keras_to_hls.py

Lines changed: 8 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import json
2-
import math
32

43
import h5py
54

@@ -72,24 +71,6 @@ def get_weights_shape(self, layer_name, var_name):
7271
return None
7372

7473

75-
def get_qkeras_quantization(layer, keras_layer):
76-
if not layer['class_name'].startswith('Q'): # Not a QKeras layer, nothing to do
77-
return
78-
kernel_quantizer = keras_layer['config']['kernel_quantizer']['class_name']
79-
bias_quantizer = keras_layer['config']['bias_quantizer']['class_name']
80-
81-
if kernel_quantizer != bias_quantizer:
82-
raise Exception('Mixing quantizers within QKeras layers is not supported')
83-
if kernel_quantizer == 'binary':
84-
layer['quantize'] = 2
85-
elif kernel_quantizer == 'ternary':
86-
layer['quantize'] = 3
87-
else:
88-
raise Exception(
89-
'Unsupported quantizer {} in {} layer {}'.format(kernel_quantizer, layer['class_name'], layer['name'])
90-
)
91-
92-
9374
layer_handlers = {}
9475

9576

@@ -113,6 +94,14 @@ def register_keras_layer_handler(layer_cname, handler_func):
11394

11495

11596
def get_supported_keras_layers():
97+
"""Returns the list of Keras layers that the converter can parse.
98+
99+
The returned list contains all Keras layers that can be parsed into the hls4ml internal representation. Support for
100+
computation of these layers may vary across hls4ml backends and conversion configuration.
101+
102+
Returns:
103+
list: The names of supported Keras layers.
104+
"""
116105
return list(layer_handlers.keys())
117106

118107

@@ -145,76 +134,6 @@ def parse_default_keras_layer(keras_layer, input_names):
145134
return layer
146135

147136

148-
def parse_data_format(input_shape, data_format='channels_last'):
149-
# Ignore batch size
150-
input_shape = input_shape[1:]
151-
152-
if data_format.lower() == 'channels_last':
153-
if len(input_shape) == 2: # 1D, (n_in, n_filt)
154-
return (input_shape[0], input_shape[1])
155-
elif len(input_shape) == 3: # 2D, (in_height, in_width, n_filt)
156-
return (input_shape[0], input_shape[1], input_shape[2])
157-
158-
elif data_format.lower() == 'channels_first':
159-
if len(input_shape) == 2: # 1D, (n_filt, n_in)
160-
return (input_shape[1], input_shape[0])
161-
elif len(input_shape) == 3: # 2D, (n_filt, in_height, in_width)
162-
return (input_shape[1], input_shape[2], input_shape[0])
163-
else:
164-
raise Exception(f'Unknown data format: {data_format}')
165-
166-
167-
def compute_padding_1d(pad_type, in_size, stride, filt_size):
168-
if pad_type.lower() == 'same':
169-
n_out = int(math.ceil(float(in_size) / float(stride)))
170-
if in_size % stride == 0:
171-
pad_along_size = max(filt_size - stride, 0)
172-
else:
173-
pad_along_size = max(filt_size - (in_size % stride), 0)
174-
pad_left = pad_along_size // 2
175-
pad_right = pad_along_size - pad_left
176-
elif pad_type.lower() == 'valid':
177-
n_out = int(math.ceil(float(in_size - filt_size + 1) / float(stride)))
178-
pad_left = 0
179-
pad_right = 0
180-
else:
181-
raise Exception(f'Unknown padding type: {pad_type}')
182-
183-
return (n_out, pad_left, pad_right)
184-
185-
186-
def compute_padding_2d(pad_type, in_height, in_width, stride_height, stride_width, filt_height, filt_width):
187-
if pad_type.lower() == 'same':
188-
# Height
189-
out_height = int(math.ceil(float(in_height) / float(stride_height)))
190-
if in_height % stride_height == 0:
191-
pad_along_height = max(filt_height - stride_height, 0)
192-
else:
193-
pad_along_height = max(filt_height - (in_height % stride_height), 0)
194-
pad_top = pad_along_height // 2
195-
pad_bottom = pad_along_height - pad_top
196-
# Width
197-
out_width = int(math.ceil(float(in_width) / float(stride_width)))
198-
if in_width % stride_width == 0:
199-
pad_along_width = max(filt_width - stride_width, 0)
200-
else:
201-
pad_along_width = max(filt_width - (in_width % stride_width), 0)
202-
pad_left = pad_along_width // 2
203-
pad_right = pad_along_width - pad_left
204-
elif pad_type.lower() == 'valid':
205-
out_height = int(math.ceil(float(in_height - filt_height + 1) / float(stride_height)))
206-
out_width = int(math.ceil(float(in_width - filt_width + 1) / float(stride_width)))
207-
208-
pad_top = 0
209-
pad_bottom = 0
210-
pad_left = 0
211-
pad_right = 0
212-
else:
213-
raise Exception(f'Unknown padding type: {pad_type}')
214-
215-
return (out_height, out_width, pad_top, pad_bottom, pad_left, pad_right)
216-
217-
218137
def get_model_arch(config):
219138
if 'KerasModel' in config:
220139
# Model instance passed in config from API

hls4ml/converters/utils.py

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,23 @@
22

33

44
def parse_data_format(input_shape, data_format='channels_last'):
5+
"""Parses the given input shape according to the specified data format.
6+
7+
This function can be used to ensure the shapes of convolutional and pooling layers is correctly parsed. If the first
8+
element of the given ``input_shape`` is ``None`` it is interpreted as a batch dimension and discarded.The returned tuple
9+
will have the channels dimension last.
10+
11+
Args:
12+
input_shape (list or tuple): Input shape of 2D or 3D tensor with optional batch dimension of ``None``.
13+
data_format (str, optional): Data format type, one of ``channels_first`` or ``channels_last``. (case insensitive).
14+
Defaults to 'channels_last'.
15+
16+
Raises:
17+
Exception: Raised if the data format type is unknown.
18+
19+
Returns:
20+
tuple: The input shape (without the batch dimension) in ``channels_last`` format.
21+
"""
522
if input_shape[0] is None:
623
# Ignore batch size
724
input_shape = input_shape[1:]
@@ -22,6 +39,23 @@ def parse_data_format(input_shape, data_format='channels_last'):
2239

2340

2441
def compute_padding_1d(pad_type, in_size, stride, filt_size):
42+
"""Computes the amount of padding required on each side of the 1D input tensor.
43+
44+
In case of ``same`` padding, this routine tries to pad evenly left and right, but if the amount of columns to be added
45+
is odd, it will add the extra column to the right.
46+
47+
Args:
48+
pad_type (str): Padding type, one of ``same``, `valid`` or ``causal`` (case insensitive).
49+
in_size (int): Input size.
50+
stride (int): Stride length.
51+
filt_size (int): Length of the kernel window.
52+
53+
Raises:
54+
Exception: Raised if the padding type is unknown.
55+
56+
Returns:
57+
tuple: Tuple containing the padded input size, left and right padding values.
58+
"""
2559
if pad_type.lower() == 'same':
2660
n_out = int(math.ceil(float(in_size) / float(stride)))
2761
if in_size % stride == 0:
@@ -49,6 +83,26 @@ def compute_padding_1d(pad_type, in_size, stride, filt_size):
4983

5084

5185
def compute_padding_2d(pad_type, in_height, in_width, stride_height, stride_width, filt_height, filt_width):
86+
"""Computes the amount of padding required on each side of the 2D input tensor.
87+
88+
In case of ``same`` padding, this routine tries to pad evenly left and right (top and bottom), but if the amount of
89+
columns to be added is odd, it will add the extra column to the right/bottom.
90+
91+
Args:
92+
pad_type (str): Padding type, one of ``same`` or ``valid`` (case insensitive).
93+
in_height (int): The height of the input tensor.
94+
in_width (int): The width of the input tensor.
95+
stride_height (int): Stride height.
96+
stride_width (int): Stride width.
97+
filt_height (int): Height of the kernel window.
98+
filt_width (int): Width of the kernel window.
99+
100+
Raises:
101+
Exception: Raised if the padding type is unknown.
102+
103+
Returns:
104+
tuple: Tuple containing the padded input height, width, and top, bottom, left and right padding values.
105+
"""
52106
if pad_type.lower() == 'same':
53107
# Height
54108
out_height = int(math.ceil(float(in_height) / float(stride_height)))

0 commit comments

Comments
 (0)