|
| 1 | +from pathlib import Path |
| 2 | + |
| 3 | +import numpy as np |
| 4 | +import pytest |
| 5 | +from qkeras import QActivation, QBatchNormalization, QConv2D, QDense |
| 6 | +from tensorflow.keras.layers import Flatten, Input, MaxPooling2D |
| 7 | +from tensorflow.keras.models import Model |
| 8 | +from tensorflow.keras.regularizers import l2 |
| 9 | + |
| 10 | +import hls4ml |
| 11 | + |
| 12 | +test_root_path = Path(__file__).parent |
| 13 | + |
| 14 | + |
| 15 | +@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) |
| 16 | +@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) |
| 17 | +def test_model2(backend, io_type): |
| 18 | + x_in = Input(shape=(28, 28, 1)) |
| 19 | + |
| 20 | + x = QConv2D(4, (3, 3), kernel_quantizer="binary", name="conv2d_1", kernel_regularizer=l2(0.0001), use_bias=False)(x_in) |
| 21 | + x = QBatchNormalization()(x) |
| 22 | + x = QActivation("binary", name="act1")(x) |
| 23 | + |
| 24 | + x = QConv2D(8, (3, 3), kernel_quantizer="binary", name="conv2d_2", kernel_regularizer=l2(0.0001), use_bias=False)(x) |
| 25 | + x = QBatchNormalization()(x) |
| 26 | + x = QActivation("binary", name="act2")(x) |
| 27 | + x = MaxPooling2D(pool_size=(2, 2))(x) |
| 28 | + |
| 29 | + x = QConv2D(8, (3, 3), kernel_quantizer="binary", name="conv2d_3", kernel_regularizer=l2(0.0001), use_bias=False)(x) |
| 30 | + x = QBatchNormalization()(x) |
| 31 | + x = QActivation("binary", name="act3")(x) |
| 32 | + x = MaxPooling2D(pool_size=(2, 2))(x) |
| 33 | + |
| 34 | + x = Flatten()(x) |
| 35 | + |
| 36 | + x = QDense(10, kernel_quantizer="binary", name="q_dense_6", use_bias=False)(x) |
| 37 | + x = QBatchNormalization()(x) |
| 38 | + x = QActivation("binary_tanh", name="act4")(x) |
| 39 | + |
| 40 | + x = QDense(10, kernel_quantizer="binary", activation="softmax", name="q_dense_7", use_bias=False)(x) |
| 41 | + |
| 42 | + model2 = Model(inputs=x_in, outputs=x) |
| 43 | + |
| 44 | + model2.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) |
| 45 | + |
| 46 | + model2.summary() |
| 47 | + |
| 48 | + hls_config = hls4ml.utils.config_from_keras_model(model2, granularity="name") |
| 49 | + hls_config["Model"]["Strategy"] = "Resource" |
| 50 | + |
| 51 | + print(f"{hls_config['LayerName'].keys()=}") |
| 52 | + for layer in hls_config['LayerName'].keys(): |
| 53 | + hls_config['LayerName'][layer]['Strategy'] = "Latency" |
| 54 | + |
| 55 | + hls_config["LayerName"]["conv2d_1"]["ReuseFactor"] = 36 |
| 56 | + hls_config["LayerName"]["conv2d_2"]["ReuseFactor"] = 288 |
| 57 | + hls_config["LayerName"]["conv2d_3"]["ReuseFactor"] = 576 |
| 58 | + hls_config["LayerName"]["q_dense_6"]["ReuseFactor"] = 2000 |
| 59 | + hls_config["LayerName"]["q_dense_7"]["ReuseFactor"] = 100 |
| 60 | + |
| 61 | + output_dir = str(test_root_path / f"hls4mlprj_binary_cnn_{backend}_{io_type}") |
| 62 | + hls_model = hls4ml.converters.convert_from_keras_model( |
| 63 | + model2, |
| 64 | + hls_config=hls_config, |
| 65 | + output_dir=output_dir, |
| 66 | + backend=backend, |
| 67 | + io_type=io_type, |
| 68 | + ) |
| 69 | + |
| 70 | + X = np.random.rand(1, 28, 28, 1) |
| 71 | + |
| 72 | + hls_model.compile() |
| 73 | + y = model2.predict(X) # noqa: F841 |
| 74 | + y_hls = hls_model.predict(X) # noqa: F841 |
| 75 | + |
| 76 | + # # TODO: enable the comparions after fixing the remaing issues |
| 77 | + # np.testing.assert_allclose(np.squeeze(y_hls), np.squeeze(y), rtol=1e-2, atol=0.01) |
0 commit comments