Skip to content

Commit ae98316

Browse files
committed
Address some comments
Signed-off-by: shanjiaz <zsjwpianpian@gmail.com>
1 parent 84c9a50 commit ae98316

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

src/compressed_tensors/compressors/quantized_compressors/base.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,8 @@ def compress(
131131
if name in compressed_param_names:
132132
continue
133133

134-
# omit saving zero points for symmetric quantization
134+
# for symmetric quantization, omit zero_point
135+
# manually because it wasn't handled in compress_weight
135136
if name.endswith("weight_zero_point"):
136137
module_path = name.rsplit(".", 1)[0]
137138
if (

src/compressed_tensors/compressors/quantized_compressors/fp4_quantized.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,6 @@ def compress_weight(
118118
"NVFP4 quantization requires global_scale (TENSOR_GROUP strategy). "
119119
"Use TENSOR_GROUP strategy instead of GROUP for FP4 quantization."
120120
)
121-
compressed_dict["weight_global_scale"] = global_scale
122121

123122
return compressed_dict
124123

0 commit comments

Comments
 (0)