Skip to content
This repository was archived by the owner on Jun 3, 2025. It is now read-only.

Commit 28c9e09

Browse files
vixadddkroellbfineranmarkurtz
authored
No need to toss conversion because of branching inputs. (#393)
Co-authored-by: dkroell <david.kroell@exptechinc.com> Co-authored-by: Benjamin Fineran <bfineran@users.noreply.github.com> Co-authored-by: Mark Kurtz <mark@neuralmagic.com>
1 parent 004ab81 commit 28c9e09

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

src/sparseml/onnx/optim/quantization/quantize.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,6 @@
99
# neuralmagic: no copyright
1010
# flake8: noqa
1111

12-
import os
13-
import struct
14-
1512
import numpy as np
1613
import onnx
1714
import onnx.numpy_helper
@@ -1086,6 +1083,10 @@ def _quantize_bias(self, node, new_node_list):
10861083
bias_data = self.find_weight_data(bias_initializer)
10871084
quantized_bias_name = bias_name + "_quantized"
10881085

1086+
if bias_name in self.quantized_value_map:
1087+
print(f"WARNING: {bias_name} already has a quantization value.")
1088+
return quantized_bias_name
1089+
10891090
# input scale is not provided and this input is dynamically quantized so it is not pre-computed at this point
10901091
# so resort to dynamic quantization for bias
10911092
if (
@@ -1152,7 +1153,6 @@ def _quantize_bias(self, node, new_node_list):
11521153
)
11531154
self._quantized_weights.append(quantized_bias_entry)
11541155

1155-
assert bias_name not in self.quantized_value_map
11561156
quantized_value = QuantizedValue(
11571157
bias_name,
11581158
quantized_bias_name,

0 commit comments

Comments
 (0)