|
5 | 5 |
|
6 | 6 | from hls4ml.backends import FPGABackend |
7 | 7 | from hls4ml.backends.fpga.fpga_types import APTypeConverter, HLSTypeConverter, VivadoArrayVariableConverter |
8 | | -from hls4ml.model.attributes import ChoiceAttribute, ConfigurableAttribute |
| 8 | +from hls4ml.model.attributes import ChoiceAttribute, ConfigurableAttribute, TypeAttribute |
9 | 9 | from hls4ml.model.flow import register_flow |
10 | 10 | from hls4ml.model.layers import ( |
11 | 11 | GRU, |
@@ -51,6 +51,8 @@ def _register_layer_attributes(self): |
51 | 51 | attrs = self.attribute_map.get(layer, []) |
52 | 52 | attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1)) |
53 | 53 | attrs.append(ConfigurableAttribute('static', value_type=bool, default=True)) |
| 54 | + attrs.append(ConfigurableAttribute('table_size', default=1024)) |
| 55 | + attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8))) |
54 | 56 | self.attribute_map[layer] = attrs |
55 | 57 |
|
56 | 58 | # Add ParallelizationFactor to Conv1D/2D |
@@ -393,46 +395,30 @@ def init_lstm(self, layer): |
393 | 395 | reuse_factor = layer.model.config.get_reuse_factor(layer) |
394 | 396 | layer.set_attr('recurrent_reuse_factor', reuse_factor) |
395 | 397 |
|
396 | | - index_t = IntegerPrecisionType(width=1, signed=False) |
397 | | - |
398 | | - if 'table_t' not in layer.attributes: |
399 | | - layer.set_attr('table_t', FixedPrecisionType(width=18, integer=8)) |
400 | | - if 'table_size' not in layer.attributes: |
401 | | - layer.set_attr('table_size', 1024) |
402 | 398 | if layer.model.config.is_resource_strategy(layer): |
403 | 399 | n_in, n_out, n_in_recr, n_out_recr = self.get_layer_mult_size(layer) |
404 | 400 | self.set_closest_reuse_factor(layer, n_in, n_out) |
405 | 401 | self.set_closest_reuse_factor(layer, n_in_recr, n_out_recr, attribute='recurrent_reuse_factor') |
406 | | - layer.weights['weight'].data = np.transpose(layer.weights['weight'].data) |
407 | | - layer.weights['recurrent_weight'].data = np.transpose(layer.weights['recurrent_weight'].data) |
408 | 402 | layer.set_attr('strategy', 'resource') |
409 | 403 | else: |
410 | 404 | layer.set_attr('strategy', 'latency') |
411 | 405 |
|
412 | | - layer.set_attr('index_t', index_t) |
| 406 | + layer.set_attr('index_t', NamedType(f'layer{layer.index}_index', IntegerPrecisionType(width=1, signed=False))) |
413 | 407 |
|
414 | 408 | @layer_optimizer(GRU) |
415 | 409 | def init_gru(self, layer): |
416 | 410 | reuse_factor = layer.model.config.get_reuse_factor(layer) |
417 | 411 | layer.set_attr('recurrent_reuse_factor', reuse_factor) |
418 | 412 |
|
419 | | - index_t = IntegerPrecisionType(width=1, signed=False) |
420 | | - |
421 | | - if 'table_t' not in layer.attributes: |
422 | | - layer.set_attr('table_t', FixedPrecisionType(width=18, integer=8)) |
423 | | - if 'table_size' not in layer.attributes: |
424 | | - layer.set_attr('table_size', 1024) |
425 | 413 | if layer.model.config.is_resource_strategy(layer): |
426 | 414 | n_in, n_out, n_in_recr, n_out_recr = self.get_layer_mult_size(layer) |
427 | 415 | self.set_closest_reuse_factor(layer, n_in, n_out) |
428 | 416 | self.set_closest_reuse_factor(layer, n_in_recr, n_out_recr, attribute='recurrent_reuse_factor') |
429 | | - layer.weights['weight'].data = np.transpose(layer.weights['weight'].data) |
430 | | - layer.weights['recurrent_weight'].data = np.transpose(layer.weights['recurrent_weight'].data) |
431 | 417 | layer.set_attr('strategy', 'resource') |
432 | 418 | else: |
433 | 419 | layer.set_attr('strategy', 'latency') |
434 | 420 |
|
435 | | - layer.set_attr('index_t', index_t) |
| 421 | + layer.set_attr('index_t', NamedType(f'layer{layer.index}_index', IntegerPrecisionType(width=1, signed=False))) |
436 | 422 |
|
437 | 423 | @layer_optimizer(GarNet) |
438 | 424 | def init_garnet(self, layer): |
|
0 commit comments