From 01a73969d23006dba09fb97ae2425654755d3349 Mon Sep 17 00:00:00 2001 From: qinyiqun Date: Wed, 14 Jan 2026 10:00:38 +0800 Subject: [PATCH 1/8] =?UTF-8?q?=E6=94=AF=E6=8C=81nv=20w8=201batch=201tp?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- csrc/layers/fused_linear.cpp | 125 ++++++++++++++++-- csrc/layers/fused_linear.hpp | 82 +++++++++++- csrc/models/llama/llama_attention.cpp | 28 ++-- csrc/models/llama/llama_config.hpp | 10 +- csrc/models/llama/llama_mlp.cpp | 23 +++- examples/jiuge.py | 6 +- python/infinilm/modeling_utils.py | 6 +- .../models/llama/configuration_llama.py | 12 +- python/infinilm/models/quant_config.py | 110 +++++++++++++++ 9 files changed, 360 insertions(+), 42 deletions(-) create mode 100644 python/infinilm/models/quant_config.py diff --git a/csrc/layers/fused_linear.cpp b/csrc/layers/fused_linear.cpp index 9b2c813d..7f5ec364 100644 --- a/csrc/layers/fused_linear.cpp +++ b/csrc/layers/fused_linear.cpp @@ -6,6 +6,57 @@ namespace infinilm::layers { // --------------------------------------------------------- // QKV Parallel Linear // --------------------------------------------------------- +// QKVParallelLinear::QKVParallelLinear(size_t hidden_size, +// size_t head_dim, +// size_t num_q_head, +// size_t num_kv_head, +// bool bias, +// const infinicore::DataType &dtype, +// const infinicore::Device &device, +// engine::distributed::RankInfo rank_info) +// : QKVParallelLinear(hidden_size, +// head_dim, head_dim, head_dim, +// num_q_head, num_kv_head, num_kv_head, +// bias, bias, bias, +// dtype, device, rank_info) {} + +// QKVParallelLinear::QKVParallelLinear(size_t hidden_size, +// size_t q_dim, size_t k_dim, size_t v_dim, +// size_t num_q_head, size_t num_k_head, size_t num_v_head, +// bool q_bias, bool k_bias, bool v_bias, +// const infinicore::DataType &dtype, +// const infinicore::Device &device, +// engine::distributed::RankInfo rank_info) +// : infinicore::nn::ColumnParallelLinear( +// hidden_size, +// num_q_head * q_dim + num_k_head * k_dim + num_v_head * v_dim, +// (q_bias || k_bias || v_bias), +// dtype, +// device, +// rank_info.tp_rank, +// rank_info.tp_size), +// q_dim_(q_dim), +// k_dim_(k_dim), +// v_dim_(v_dim), +// num_q_head_(num_q_head), +// num_k_head_(num_k_head), +// num_v_head_(num_v_head), +// q_bias_(q_bias), +// k_bias_(k_bias), +// v_bias_(v_bias) { +// if (num_q_head % tp_size_ != 0 || num_k_head % tp_size_ != 0 || num_v_head % tp_size_ != 0) { +// throw std::runtime_error("QKVParallelLinear: num_[q|k|v]_head must be divisible by tp_size"); +// } + +// if ((q_bias_ != k_bias_) || (k_bias_ != v_bias_)) { +// throw std::runtime_error("q_bias, k_bias, v_bias must all match"); +// } + +// q_out_size_ = num_q_head_ * q_dim_ / tp_size_; +// k_out_size_ = num_k_head_ * k_dim_ / tp_size_; +// v_out_size_ = num_v_head_ * v_dim_ / tp_size_; +// } + QKVParallelLinear::QKVParallelLinear(size_t hidden_size, size_t head_dim, size_t num_q_head, @@ -13,12 +64,14 @@ QKVParallelLinear::QKVParallelLinear(size_t hidden_size, bool bias, const infinicore::DataType &dtype, const infinicore::Device &device, - engine::distributed::RankInfo rank_info) + engine::distributed::RankInfo rank_info, + std::optional quant_config) : QKVParallelLinear(hidden_size, head_dim, head_dim, head_dim, num_q_head, num_kv_head, num_kv_head, bias, bias, bias, - dtype, device, rank_info) {} + dtype, device, rank_info, + quant_config) {} QKVParallelLinear::QKVParallelLinear(size_t hidden_size, size_t q_dim, size_t k_dim, size_t v_dim, @@ -26,15 +79,17 @@ QKVParallelLinear::QKVParallelLinear(size_t hidden_size, bool q_bias, bool k_bias, bool v_bias, const infinicore::DataType &dtype, const infinicore::Device &device, - engine::distributed::RankInfo rank_info) + engine::distributed::RankInfo rank_info, + std::optional quant_config) : infinicore::nn::ColumnParallelLinear( - hidden_size, - num_q_head * q_dim + num_k_head * k_dim + num_v_head * v_dim, - (q_bias || k_bias || v_bias), - dtype, - device, - rank_info.tp_rank, - rank_info.tp_size), + hidden_size, + num_q_head * q_dim + num_k_head * k_dim + num_v_head * v_dim, + (q_bias || k_bias || v_bias), + dtype, + device, + rank_info.tp_rank, + rank_info.tp_size, + quant_config), q_dim_(q_dim), k_dim_(k_dim), v_dim_(v_dim), @@ -86,6 +141,23 @@ infinicore::nn::Parameter QKVParallelLinear::get_v_weight() const { 0, tp_rank_, tp_size_); } +infinicore::nn::Parameter QKVParallelLinear::get_q_weight_scale() const { + return infinicore::nn::Parameter( + weight_scale_->narrow({{0, 0, q_out_size_}}), 0, tp_rank_, tp_size_); +} + +infinicore::nn::Parameter QKVParallelLinear::get_k_weight_scale() const { + return infinicore::nn::Parameter( + weight_scale_->narrow({{0, q_out_size_, k_out_size_}}), + 0, tp_rank_, tp_size_); +} + +infinicore::nn::Parameter QKVParallelLinear::get_v_weight_scale() const { + return infinicore::nn::Parameter( + weight_scale_->narrow({{0, q_out_size_ + k_out_size_, v_out_size_}}), + 0, tp_rank_, tp_size_); +} + infinicore::nn::Parameter QKVParallelLinear::get_q_bias() const { if (!q_bias_) { return infinicore::nn::Parameter(); @@ -120,16 +192,33 @@ bool QKVParallelLinear::has_v_bias() const { return v_bias_; } // --------------------------------------------------------- // Gate-Up Parallel Linear // --------------------------------------------------------- +// GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool bias, +// const infinicore::DataType &dtype, const infinicore::Device &device, +// engine::distributed::RankInfo rank_info) +// : GateUpParallelLinear(hidden_size, intermediate_size, bias, bias, dtype, device, rank_info) { +// } + +// GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool gate_bias, bool up_bias, +// const infinicore::DataType &dtype, const infinicore::Device &device, +// engine::distributed::RankInfo rank_info) +// : infinicore::nn::ColumnParallelLinear(hidden_size, intermediate_size * 2, gate_bias || up_bias, dtype, device, rank_info.tp_rank, rank_info.tp_size), gate_bias_(gate_bias), up_bias_(up_bias) { +// if (gate_bias_ != up_bias_) { +// throw std::runtime_error("Not supported yet: gate_bias and up_bias should be given at the same time"); +// } +// } + GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool bias, const infinicore::DataType &dtype, const infinicore::Device &device, - engine::distributed::RankInfo rank_info) - : GateUpParallelLinear(hidden_size, intermediate_size, bias, bias, dtype, device, rank_info) { + engine::distributed::RankInfo rank_info, + std::optional quant_config) + : GateUpParallelLinear(hidden_size, intermediate_size, bias, bias, dtype, device, rank_info, quant_config) { } GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool gate_bias, bool up_bias, const infinicore::DataType &dtype, const infinicore::Device &device, - engine::distributed::RankInfo rank_info) - : infinicore::nn::ColumnParallelLinear(hidden_size, intermediate_size * 2, gate_bias || up_bias, dtype, device, rank_info.tp_rank, rank_info.tp_size), gate_bias_(gate_bias), up_bias_(up_bias) { + engine::distributed::RankInfo rank_info, + std::optional quant_config) + : infinicore::nn::ColumnParallelLinear(hidden_size, intermediate_size * 2, gate_bias || up_bias, dtype, device, rank_info.tp_rank, rank_info.tp_size, quant_config), gate_bias_(gate_bias), up_bias_(up_bias) { if (gate_bias_ != up_bias_) { throw std::runtime_error("Not supported yet: gate_bias and up_bias should be given at the same time"); } @@ -168,6 +257,14 @@ infinicore::nn::Parameter GateUpParallelLinear::get_up_bias() const { } } +infinicore::nn::Parameter GateUpParallelLinear::get_gate_weight_scale() const { + return infinicore::nn::Parameter(weight_scale_->narrow({{0, 0, weight_scale_->size(0) / 2}}), 0, tp_rank_, tp_size_); +} + +infinicore::nn::Parameter GateUpParallelLinear::get_up_weight_scale() const { + return infinicore::nn::Parameter(weight_scale_->narrow({{0, weight_scale_->size(0) / 2, weight_scale_->size(0) / 2}}), 0, tp_rank_, tp_size_); +} + bool GateUpParallelLinear::has_gate_bias() const { return gate_bias_; } diff --git a/csrc/layers/fused_linear.hpp b/csrc/layers/fused_linear.hpp index 1e32ce50..8bde20d8 100644 --- a/csrc/layers/fused_linear.hpp +++ b/csrc/layers/fused_linear.hpp @@ -1,18 +1,37 @@ #pragma once #include "infinicore/nn/linear.hpp" +#include "infinicore/nn/quantization.hpp" #include "../engine/distributed/communication_group.hpp" namespace infinilm::layers { class QKVParallelLinear : public infinicore::nn::ColumnParallelLinear { public: + // explicit QKVParallelLinear(size_t hidden_size, + // size_t q_dim, size_t k_dim, size_t v_dim, + // size_t num_q_head, size_t num_k_head, size_t num_v_head, + // bool q_bias, bool k_bias, bool v_bias, + // const infinicore::DataType &dtype = infinicore::DataType::F32, + // const infinicore::Device &device = infinicore::Device(), + // engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + + // // A more common case where all heads have the same dimension + // explicit QKVParallelLinear(size_t hidden_size, + // size_t head_dim, + // size_t num_q_head, size_t num_kv_head, + // bool bias = false, + // const infinicore::DataType &dtype = infinicore::DataType::F32, + // const infinicore::Device &device = infinicore::Device(), + // engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + explicit QKVParallelLinear(size_t hidden_size, size_t q_dim, size_t k_dim, size_t v_dim, size_t num_q_head, size_t num_k_head, size_t num_v_head, bool q_bias, bool k_bias, bool v_bias, const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), - engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), + std::optional quant_config = std::nullopt); // A more common case where all heads have the same dimension explicit QKVParallelLinear(size_t hidden_size, @@ -21,7 +40,8 @@ class QKVParallelLinear : public infinicore::nn::ColumnParallelLinear { bool bias = false, const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), - engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), + std::optional quant_config = std::nullopt); std::tuple forward_split(infinicore::Tensor &input); @@ -30,6 +50,10 @@ class QKVParallelLinear : public infinicore::nn::ColumnParallelLinear { infinicore::nn::Parameter get_k_weight() const; infinicore::nn::Parameter get_v_weight() const; + infinicore::nn::Parameter get_q_weight_scale() const; + infinicore::nn::Parameter get_k_weight_scale() const; + infinicore::nn::Parameter get_v_weight_scale() const; + infinicore::nn::Parameter get_q_bias() const; infinicore::nn::Parameter get_k_bias() const; infinicore::nn::Parameter get_v_bias() const; @@ -55,22 +79,37 @@ class QKVParallelLinear : public infinicore::nn::ColumnParallelLinear { class GateUpParallelLinear : public infinicore::nn::ColumnParallelLinear { public: + // GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool bias = false, + // const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), + // engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + + // GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool gate_bias, bool up_bias, + // const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), + // engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + + // Overload for quantization, old ones need tobe purged GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool bias = false, const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), - engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), + std::optional quant_config = std::nullopt); GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool gate_bias, bool up_bias, const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), - engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), + std::optional quant_config = std::nullopt); std::tuple forward_split(infinicore::Tensor &input); infinicore::nn::Parameter get_gate_weight() const; + infinicore::nn::Parameter get_gate_weight_scale() const; + infinicore::nn::Parameter get_gate_bias() const; infinicore::nn::Parameter get_up_weight() const; + infinicore::nn::Parameter get_up_weight_scale() const; + infinicore::nn::Parameter get_up_bias() const; bool has_gate_bias() const; @@ -103,4 +142,39 @@ class GateUpParallelLinear : public infinicore::nn::ColumnParallelLinear { if (name##_->has_up_bias()) \ this->register_parameter(std::string(up_name) + ".bias", name##_->get_up_bias()); +// ========================= QKV 量化 ================================== +#define INFINILM_QKV_LINEAR_W8A8_INIT(name, q_name, k_name, v_name, ...) \ + name##_ = std::make_shared(__VA_ARGS__); \ + /* 注册 Q 权重 */ \ + this->register_parameter(std::string(q_name) + ".weight", name##_->get_q_weight()); \ + this->register_parameter(std::string(q_name) + ".weight_scale", name##_->get_q_weight_scale()); \ + /* 注册 K 权重 */ \ + this->register_parameter(std::string(k_name) + ".weight", name##_->get_k_weight()); \ + this->register_parameter(std::string(k_name) + ".weight_scale", name##_->get_k_weight_scale()); \ + /* 注册 V 权重 */ \ + this->register_parameter(std::string(v_name) + ".weight", name##_->get_v_weight()); \ + this->register_parameter(std::string(v_name) + ".weight_scale", name##_->get_v_weight_scale()); \ + /* bias 保持原样 */ \ + if (name##_->has_q_bias()) \ + this->register_parameter(std::string(q_name) + ".bias", name##_->get_q_bias()); \ + if (name##_->has_k_bias()) \ + this->register_parameter(std::string(k_name) + ".bias", name##_->get_k_bias()); \ + if (name##_->has_v_bias()) \ + this->register_parameter(std::string(v_name) + ".bias", name##_->get_v_bias()); + +// ========================= Gate-Up 量化 ============================== +#define INFINILM_GATE_UP_LINEAR_W8A8_INIT(name, gate_name, up_name, ...) \ + name##_ = std::make_shared(__VA_ARGS__); \ + /* 注册 Gate 权重 */ \ + this->register_parameter(std::string(gate_name) + ".weight", name##_->get_gate_weight()); \ + this->register_parameter(std::string(gate_name) + ".weight_scale", name##_->get_gate_weight_scale()); \ + /* 注册 Up 权重 */ \ + this->register_parameter(std::string(up_name) + ".weight", name##_->get_up_weight()); \ + this->register_parameter(std::string(up_name) + ".weight_scale", name##_->get_up_weight_scale()); \ + /* bias 保持原样 */ \ + if (name##_->has_gate_bias()) \ + this->register_parameter(std::string(gate_name) + ".bias", name##_->get_gate_bias()); \ + if (name##_->has_up_bias()) \ + this->register_parameter(std::string(up_name) + ".bias", name##_->get_up_bias()); + } // namespace infinilm::layers diff --git a/csrc/models/llama/llama_attention.cpp b/csrc/models/llama/llama_attention.cpp index c78040e2..9707db7c 100644 --- a/csrc/models/llama/llama_attention.cpp +++ b/csrc/models/llama/llama_attention.cpp @@ -48,16 +48,26 @@ LlamaAttention::LlamaAttention(const LlamaConfig &config, scaling_ = 1.0f / std::sqrt(static_cast(head_dim_)); // Initialize projection layers - INFINILM_QKV_LINEAR_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, config.num_attention_heads, config.num_key_value_heads, use_bias_, - dtype, device, rank_info); - // Output projection uses attention_output_bias (can be different from qkv) - INFINICORE_NN_MODULE_INIT(o_proj, num_attention_heads * head_dim_, hidden_size_, use_output_bias_, - dtype, device, tp_rank, tp_size, rank_info.comm); + if (!config.quant_config.has_value()) { + INFINILM_QKV_LINEAR_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, config.num_attention_heads, config.num_key_value_heads, use_bias_, + dtype, device, rank_info); + // Output projection uses attention_output_bias (can be different from qkv) + INFINICORE_NN_MODULE_INIT(o_proj, hidden_size_, hidden_size_, use_output_bias_, + dtype, device, tp_rank, tp_size, rank_info.comm); - // Initialize qk RMSNorm - if (use_qk_norm_) { - INFINICORE_NN_MODULE_INIT(q_norm, head_dim_, config.rms_norm_eps, dtype, device); - INFINICORE_NN_MODULE_INIT(k_norm, head_dim_, config.rms_norm_eps, dtype, device); + } else { + switch (config.quant_config.value().get_quant_type()) { + case infinicore::nn::QuantType::COMPRESSED_TENSOR: { + INFINILM_QKV_LINEAR_W8A8_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, config.num_attention_heads, config.num_key_value_heads, use_bias_, + dtype, device, rank_info, config.quant_config.value()); + + INFINICORE_NN_MODULE_INIT(o_proj, hidden_size_, hidden_size_, use_output_bias_, + dtype, device, tp_rank, tp_size, rank_info.comm, config.quant_config.value()); + break; + } + default: { + } + } } } diff --git a/csrc/models/llama/llama_config.hpp b/csrc/models/llama/llama_config.hpp index 59108546..b0fb892e 100644 --- a/csrc/models/llama/llama_config.hpp +++ b/csrc/models/llama/llama_config.hpp @@ -6,6 +6,7 @@ #include #include "../infinilm_model.hpp" +#include "infinicore/nn/quantization.hpp" #include @@ -36,8 +37,6 @@ struct LlamaConfig : public InfinilmModel::Config { size_t max_position_embeddings = 2048; // Maximum sequence length double rope_theta = 10000.0; // RoPE base frequency - std::shared_ptr rope_scaling = nullptr; // RoPE scaling type - // Normalization double rms_norm_eps = 1e-6; // RMSNorm epsilon @@ -66,11 +65,16 @@ struct LlamaConfig : public InfinilmModel::Config { std::vector bos_token_id = {1}; // Beginning of sequence token ID(s) std::vector eos_token_id = {2}; // End of sequence token ID(s) + // Quant Config + // std::optional quant_config = std::nullopt; + std::optional quant_config = infinicore::nn::QuantConfig(infinicore::nn::QuantType::COMPRESSED_TENSOR); + /** * @brief Compute key-value dimension for Grouped Query Attention (GQA) * @return The dimension for key/value projections */ - size_t kv_dim() const { + size_t + kv_dim() const { return hidden_size * num_key_value_heads / num_attention_heads; } diff --git a/csrc/models/llama/llama_mlp.cpp b/csrc/models/llama/llama_mlp.cpp index fc7abd69..2cc48ccb 100644 --- a/csrc/models/llama/llama_mlp.cpp +++ b/csrc/models/llama/llama_mlp.cpp @@ -1,6 +1,7 @@ #include "llama_mlp.hpp" #include "infinicore/nn/linear.hpp" #include "infinicore/ops.hpp" +#include namespace infinilm::models::llama { @@ -16,10 +17,24 @@ LlamaMLP::LlamaMLP(const LlamaConfig &config, int tp_size = rank_info.tp_size; // Initialize projection layers - INFINILM_GATE_UP_LINEAR_INIT(gate_up_proj, "gate_proj", "up_proj", hidden_size_, intermediate_size_, use_bias_, - dtype, device, rank_info_); - INFINICORE_NN_MODULE_INIT(down_proj, intermediate_size_, hidden_size_, use_bias_, - dtype, device, tp_rank, tp_size, rank_info.comm); + if (!config.quant_config.has_value()) { + INFINILM_GATE_UP_LINEAR_INIT(gate_up_proj, "gate_proj", "up_proj", hidden_size_, intermediate_size_, use_bias_, + dtype, device, rank_info_); + INFINICORE_NN_MODULE_INIT(down_proj, intermediate_size_, hidden_size_, use_bias_, + dtype, device, tp_rank, tp_size, rank_info.comm); + } else { + switch (config.quant_config.value().get_quant_type()) { + case infinicore::nn::QuantType::COMPRESSED_TENSOR: { + INFINILM_GATE_UP_LINEAR_W8A8_INIT(gate_up_proj, "gate_proj", "up_proj", hidden_size_, intermediate_size_, use_bias_, + dtype, device, rank_info_, config.quant_config.value()); + INFINICORE_NN_MODULE_INIT(down_proj, intermediate_size_, hidden_size_, use_bias_, + dtype, device, tp_rank, tp_size, rank_info.comm, config.quant_config.value()); + break; + } + default: { + } + } + } } infinicore::Tensor LlamaMLP::forward(const infinicore::Tensor &hidden_states) const { diff --git a/examples/jiuge.py b/examples/jiuge.py index c1ad567e..8043ebbb 100644 --- a/examples/jiuge.py +++ b/examples/jiuge.py @@ -56,7 +56,7 @@ def get_args(): parser.add_argument( "--max_new_tokens", type=int, - default=100, + default=1000, help="max_new_tokens", ) parser.add_argument( @@ -95,7 +95,7 @@ def get_args(): def test( prompts: str | list[str], model_path, - max_new_tokens=100, + max_new_tokens=5000, infini_device=infinicore.device("cpu", 0), tp=1, enable_paged_attn=False, @@ -109,7 +109,6 @@ def test( device=infini_device, distributed_config=DistConfig(tp), ) - # ---------------------------------------------------------------------------- # # Load Weights # ---------------------------------------------------------------------------- # @@ -119,7 +118,6 @@ def test( # create tokenizer # ---------------------------------------------------------------------------- # tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) - if "llama" == model.config.model_type: backend = getattr(tokenizer, "backend_tokenizer", None) target = getattr(backend, "_tokenizer", backend) diff --git a/python/infinilm/modeling_utils.py b/python/infinilm/modeling_utils.py index 792aa503..a8d987ca 100644 --- a/python/infinilm/modeling_utils.py +++ b/python/infinilm/modeling_utils.py @@ -75,7 +75,8 @@ def load_state_dict( ) for k in f.keys(): - state_dict[k] = f.get_tensor(k).to(device=device, dtype=dtype) + # state_dict[k] = f.get_tensor(k).to(device=device, dtype=dtype) + state_dict[k] = f.get_tensor(k).to(device=device) return state_dict @@ -147,6 +148,7 @@ def load_model_state_dict_by_file( model_param = load_state_dict( file_path, device=torch_device, dtype=torch_dtype ) + already_loaded_keys.extend(model_param.keys()) # --------------------------------------------------------- # @@ -155,7 +157,6 @@ def load_model_state_dict_by_file( model_param_infini = {} for key in model_param.keys(): model_param_infini[key] = infinicore.from_torch(model_param[key]) - model.load_state_dict(model_param_infini, strict=False) infinicore.sync_device() @@ -168,7 +169,6 @@ def load_model_state_dict_by_file( model_param_infini[key] = infinicore.from_torch( model_params[key].to(dtype=torch_dtype) ) - already_loaded_keys.append(key) model.load_state_dict(model_param_infini, strict=True) diff --git a/python/infinilm/models/llama/configuration_llama.py b/python/infinilm/models/llama/configuration_llama.py index 15776c84..f893c5cf 100644 --- a/python/infinilm/models/llama/configuration_llama.py +++ b/python/infinilm/models/llama/configuration_llama.py @@ -15,12 +15,13 @@ """LLaMA model configuration""" +from typing import Optional import infinicore from infinilm.lib import _infinilm from ...configuration_utils import PretrainedConfig - +from ..quant_config import parse_quant_config, QuantizationConfig class LlamaConfig(PretrainedConfig, _infinilm.LlamaConfig): r""" @@ -182,6 +183,7 @@ def __init__( mlp_bias=False, head_dim=None, torch_dtype=None, + quantization_config=None, **kwargs, ): _infinilm.LlamaConfig.__init__(self) @@ -245,3 +247,11 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + if isinstance(quantization_config, dict): + self.quantization_config: Optional[QuantizationConfig] = parse_quant_config(quantization_config) + self.quantization_config_dict = quantization_config + else: + self.quantization_config = None + self.quantization_config_dict = None + diff --git a/python/infinilm/models/quant_config.py b/python/infinilm/models/quant_config.py new file mode 100644 index 00000000..9e8ea0bf --- /dev/null +++ b/python/infinilm/models/quant_config.py @@ -0,0 +1,110 @@ +# coding=utf-8 +# Copyright (c) 2025, InfiniCore +# BSD 3-Clause License + +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Any, Type + +# ---------------- 抽象层 ---------------- +class QuantizationConfig(ABC): + """InfiniCore 量化统一入口,C++ 或 Python 侧都只认这四个接口。""" + @abstractmethod + def get_name(self) -> str: ... + @abstractmethod + def get_min_capability(self) -> int: ... + @abstractmethod + def get_scaled_act_names(self) -> List[str]: ... + @abstractmethod + def get_quant_method(self) -> str: + """返回算法名,供 C++ dispatcher 用。""" + ... + +# ---------------- 数据类 ---------------- +@dataclass +class CompressedTensorsConfig(QuantizationConfig): + """对应 HF compressed-tensors 导出格式。""" + quant_method: str = "compressed-tensors" + format: str = "int-quantized" + quantization_status: str = "compressed" + version: str = "0.11.0" + global_compression_ratio: Optional[float] = None + ignore: List[str] = field(default_factory=lambda: ["lm_head"]) + kv_cache_scheme: Optional[Dict[str, Any]] = None + sparsity_config: Dict[str, Any] = field(default_factory=dict) + transform_config: Dict[str, Any] = field(default_factory=dict) + config_groups: Dict[str, "Group"] = field(default_factory=dict) + + @dataclass + class TensorConfig: + num_bits: int + type: str + symmetric: bool + dynamic: bool + strategy: str + observer: Optional[str] = None + observer_kwargs: Dict[str, Any] = field(default_factory=dict) + group_size: Optional[int] = None + block_structure: Optional[str] = None + actorder: Optional[Any] = None + + @dataclass + class Group: + targets: List[str] + weights: "CompressedTensorsConfig.TensorConfig" + input_activations: Optional["CompressedTensorsConfig.TensorConfig"] = None + output_activations: Optional["CompressedTensorsConfig.TensorConfig"] = None + format: str = "int-quantized" + + @staticmethod + def from_dict(cfg: Dict[str, Any]) -> "CompressedTensorsConfig": + def _build_tensor(obj: Optional[Dict[str, Any]]) -> Optional["CompressedTensorsConfig.TensorConfig"]: + return None if obj is None else CompressedTensorsConfig.TensorConfig(**obj) + + groups = {} + for gname, gcfg in cfg.get("config_groups", {}).items(): + groups[gname] = CompressedTensorsConfig.Group( + targets=gcfg["targets"], + weights=_build_tensor(gcfg["weights"]), + input_activations=_build_tensor(gcfg.get("input_activations")), + output_activations=_build_tensor(gcfg.get("output_activations")), + format=gcfg.get("format", "int-quantized"), + ) + return CompressedTensorsConfig( + quant_method=cfg["quant_method"], + format=cfg["format"], + quantization_status=cfg["quantization_status"], + version=cfg["version"], + global_compression_ratio=cfg.get("global_compression_ratio"), + ignore=cfg.get("ignore", ["lm_head"]), + kv_cache_scheme=cfg.get("kv_cache_scheme"), + sparsity_config=cfg.get("sparsity_config", {}), + transform_config=cfg.get("transform_config", {}), + config_groups=groups, + ) + + def get_name(self) -> str: + return self.quant_method + + def get_min_capability(self) -> int: + return 75 + + def get_scaled_act_names(self) -> List[str]: + return [] + + def get_quant_method(self) -> str: + return self.quant_method + + +_QUANT_METHOD_MAP: Dict[str, Type[QuantizationConfig]] = { + "compressed-tensors": CompressedTensorsConfig, +} + +def parse_quant_config(quant_cfg: Dict[str, Any]) -> Optional[QuantizationConfig]: + """统一解析入口,供 LlamaConfig 调用。""" + method = quant_cfg.get("quant_method") + cls = _QUANT_METHOD_MAP.get(method) + if cls is None: + return None + + return cls.from_dict(quant_cfg) \ No newline at end of file From 9e27583242a63b821332e1962d207fd6ebf48e86 Mon Sep 17 00:00:00 2001 From: qinyiqun Date: Fri, 16 Jan 2026 18:33:16 +0800 Subject: [PATCH 2/8] =?UTF-8?q?=E5=A2=9E=E5=8A=A0json=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitmodules | 3 +++ csrc/config/global_config.hpp | 31 ++++++++++++++++++++++++++++++ csrc/config/quant_config.hpp | 17 ++++++++++++++++ csrc/engine/infer_engine.cpp | 9 +++++++-- csrc/engine/infer_engine.hpp | 5 ++++- csrc/engine/rank_worker.cpp | 4 +++- csrc/engine/rank_worker.hpp | 5 ++++- csrc/models/infinilm_model.hpp | 6 +++--- csrc/models/llama/llama.hpp | 7 ++++--- csrc/models/llama/llama_config.hpp | 5 +++++ csrc/pybind11/engine/engine.hpp | 25 ++++++++++-------------- python/infinilm/infer_engine.py | 1 + third_party/json | 1 + xmake.lua | 1 + 14 files changed, 94 insertions(+), 26 deletions(-) create mode 100644 csrc/config/global_config.hpp create mode 100644 csrc/config/quant_config.hpp create mode 160000 third_party/json diff --git a/.gitmodules b/.gitmodules index eab6041a..ade5ff58 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "third_party/spdlog"] path = third_party/spdlog url = https://github.com/gabime/spdlog.git +[submodule "third_party/json"] + path = third_party/json + url = https://github.com/nlohmann/json.git diff --git a/csrc/config/global_config.hpp b/csrc/config/global_config.hpp new file mode 100644 index 00000000..d04c1c94 --- /dev/null +++ b/csrc/config/global_config.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include "quant_config.hpp" +#include +#include +#include + +namespace infinilm::config::global_config { +struct GlobalConfig { + // Quantization configuration +public: + infinilm::config::quantization::QuantConfig get_quant_config_json() const { + return infinilm::config::quantization::QuantConfig(config_json.value("quantization_config", nlohmann::json::object())).to_json(); + } + + GlobalConfig() = default; + GlobalConfig(const nlohmann::json &json) : config_json(json) {}; + GlobalConfig(const std::string &path) { + std::ifstream file(path); + if (file.is_open()) { + file >> config_json; + file.close(); + } else { + throw std::runtime_error("Could not open config file: " + path); + } + } + +private: + nlohmann::json config_json; +}; +} // namespace infinilm::config::global_config \ No newline at end of file diff --git a/csrc/config/quant_config.hpp b/csrc/config/quant_config.hpp new file mode 100644 index 00000000..fa9e01f4 --- /dev/null +++ b/csrc/config/quant_config.hpp @@ -0,0 +1,17 @@ +#pragma once + +#include "nlohmann/json.hpp" + +namespace infinilm::config::quantization { + +struct QuantConfig { + nlohmann::json quantization_config; + + QuantConfig() = default; + QuantConfig(const nlohmann::json &json) : quantization_config(json) {}; + nlohmann::json to_json() const { + return quantization_config; + } +}; + +} // namespace infinilm::config::quantization \ No newline at end of file diff --git a/csrc/engine/infer_engine.cpp b/csrc/engine/infer_engine.cpp index 482117c0..f631fff1 100644 --- a/csrc/engine/infer_engine.cpp +++ b/csrc/engine/infer_engine.cpp @@ -10,13 +10,17 @@ InferEngine::InferEngine( const InfinilmModel::Config &config, const distributed::DistConfig &distributed_config, infinicore::Device::Type device_type, - const cache::CacheConfig *cache_config) // Changed parameter + const cache::CacheConfig *cache_config, + const std::string &model_path) // Changed parameter : communication_group_(distributed_config, device_type), model_config_(config) { if (cache_config != nullptr) { cache_config_ = cache_config->unique_copy(); } + if (!model_path.empty()) { + global_config_ = infinilm::config::global_config::GlobalConfig(model_path + "/config.json"); + } // Create one RankWorker per rank int world_size = communication_group_.get_world_size(); workers_.reserve(world_size); @@ -24,7 +28,8 @@ InferEngine::InferEngine( workers_.emplace_back(std::make_unique( model_config_, communication_group_.get_rank_info(r), - cache_config_ != nullptr ? cache_config_.get() : nullptr)); + cache_config_ != nullptr ? cache_config_.get() : nullptr, + global_config_)); } } diff --git a/csrc/engine/infer_engine.hpp b/csrc/engine/infer_engine.hpp index 315e1c7c..1188af28 100644 --- a/csrc/engine/infer_engine.hpp +++ b/csrc/engine/infer_engine.hpp @@ -1,5 +1,6 @@ #pragma once +#include "../config/global_config.hpp" #include "../models/infinilm_model.hpp" #include "../models/llama/llama_config.hpp" #include "distributed/distributed.hpp" @@ -22,7 +23,8 @@ class InferEngine { const InfinilmModel::Config &config, const distributed::DistConfig &distributed_config = distributed::DistConfig(), infinicore::Device::Type device_type = infinicore::context::getDevice().getType(), - const cache::CacheConfig *cache_config = nullptr); + const cache::CacheConfig *cache_config = nullptr, + const std::string &modle_path = ""); // Load a parameter to all workers (each can extract its shard inside RankWorker) void load_param(const std::string &name, const infinicore::Tensor ¶m); @@ -47,6 +49,7 @@ class InferEngine { distributed::CommunicationGroup communication_group_; const InfinilmModel::Config &model_config_; std::unique_ptr cache_config_; + infinilm::config::global_config::GlobalConfig global_config_; }; } // namespace infinilm::engine diff --git a/csrc/engine/rank_worker.cpp b/csrc/engine/rank_worker.cpp index 003fb265..9dbbeed3 100644 --- a/csrc/engine/rank_worker.cpp +++ b/csrc/engine/rank_worker.cpp @@ -12,12 +12,14 @@ namespace infinilm::engine { RankWorker::RankWorker(const InfinilmModel::Config &model_config, const distributed::RankInfo &rank_info, - const cache::CacheConfig *cache_config) + const cache::CacheConfig *cache_config, + const infinilm::config::global_config::GlobalConfig &global_config) : model_config_(model_config), rank_info_(rank_info), job_cmd_(Command::INIT), has_job_(false), job_done_(false), + global_config_(global_config), should_exit_(false), init_done_(false) { if (cache_config != nullptr) { diff --git a/csrc/engine/rank_worker.hpp b/csrc/engine/rank_worker.hpp index 98bb4b87..27ed6efa 100644 --- a/csrc/engine/rank_worker.hpp +++ b/csrc/engine/rank_worker.hpp @@ -1,6 +1,7 @@ #pragma once #include "../cache/cache.hpp" +#include "../config/global_config.hpp" #include "../models/model_factory.hpp" #include "distributed/distributed.hpp" @@ -56,7 +57,8 @@ class RankWorker { RankWorker(const InfinilmModel::Config &model_config, const distributed::RankInfo &rank_info, - const cache::CacheConfig *cache_config); + const cache::CacheConfig *cache_config, + const infinilm::config::global_config::GlobalConfig &global_config); // Submit a parameter load job and wait until the load completes on the worker thread. void load_param(const std::string &name, @@ -91,6 +93,7 @@ class RankWorker { distributed::RankInfo rank_info_; std::shared_ptr model_; std::shared_ptr cache_; + const infinilm::config::global_config::GlobalConfig &global_config_; // Command for the pending job (protected by mutex_) Command job_cmd_; diff --git a/csrc/models/infinilm_model.hpp b/csrc/models/infinilm_model.hpp index 4cad3b6c..9752d949 100644 --- a/csrc/models/infinilm_model.hpp +++ b/csrc/models/infinilm_model.hpp @@ -1,8 +1,8 @@ #pragma once -#include "infinicore/nn/module.hpp" - #include "../cache/cache.hpp" +#include "infinicore/nn/module.hpp" +#include "nlohmann/json.hpp" #include @@ -13,7 +13,7 @@ class InfinilmModel : public infinicore::nn::Module { public: struct Config { std::string model_type; - + nlohmann::json model_config; virtual ~Config() = default; }; diff --git a/csrc/models/llama/llama.hpp b/csrc/models/llama/llama.hpp index fe554c32..eebac92b 100644 --- a/csrc/models/llama/llama.hpp +++ b/csrc/models/llama/llama.hpp @@ -16,9 +16,10 @@ * - LlamaForCausalLM: Complete model with language modeling head */ -#include "llama_config.hpp" +#include "../../config/global_config.hpp" #include "llama_attention.hpp" -#include "llama_mlp.hpp" +#include "llama_config.hpp" #include "llama_decoder_layer.hpp" -#include "llama_model.hpp" #include "llama_for_causal_lm.hpp" +#include "llama_mlp.hpp" +#include "llama_model.hpp" diff --git a/csrc/models/llama/llama_config.hpp b/csrc/models/llama/llama_config.hpp index b0fb892e..e3e56a7f 100644 --- a/csrc/models/llama/llama_config.hpp +++ b/csrc/models/llama/llama_config.hpp @@ -7,6 +7,7 @@ #include "../infinilm_model.hpp" #include "infinicore/nn/quantization.hpp" +#include "nlohmann/json.hpp" #include @@ -37,6 +38,8 @@ struct LlamaConfig : public InfinilmModel::Config { size_t max_position_embeddings = 2048; // Maximum sequence length double rope_theta = 10000.0; // RoPE base frequency + std::shared_ptr rope_scaling = nullptr; // RoPE scaling type + // Normalization double rms_norm_eps = 1e-6; // RMSNorm epsilon @@ -94,6 +97,8 @@ struct LlamaConfig : public InfinilmModel::Config { } return true; } + + nlohmann::json config_json; }; } // namespace infinilm::models::llama diff --git a/csrc/pybind11/engine/engine.hpp b/csrc/pybind11/engine/engine.hpp index 5ac38d70..535ddf8a 100644 --- a/csrc/pybind11/engine/engine.hpp +++ b/csrc/pybind11/engine/engine.hpp @@ -35,17 +35,20 @@ inline void bind_infer_engine(py::module &m) { const InfinilmModel::Config &cfg, const distributed::DistConfig &dist, infinicore::Device::Type dev, - std::shared_ptr cache_cfg) { + std::shared_ptr cache_cfg, + const std::string &modle_path) { return std::make_shared( cfg, dist, dev, - cache_cfg ? cache_cfg.get() : nullptr); + cache_cfg ? cache_cfg.get() : nullptr, + modle_path); }), py::arg("config"), py::arg("distributed_config") = distributed::DistConfig(), py::arg("device_type") = infinicore::context::getDevice().getType(), - py::arg("cache_config") = py::none()) + py::arg("cache_config") = py::none(), + py::arg("model_path") = "") .def("load_param", &InferEngine::load_param, py::arg("name"), py::arg("param"), "Load a parameter tensor into all workers (each worker picks its shard)") @@ -60,20 +63,12 @@ inline void bind_infer_engine(py::module &m) { } return state_dict_tp_all; }) - .def( - "forward", [](InferEngine &self, const InferEngine::Input &input) -> InferEngine::Output { return self.forward(input); }, "Run inference on all ranks with arbitrary arguments") - .def( - "reset_cache", [](InferEngine &self, std::shared_ptr cfg) { - self.reset_cache(cfg ? cfg.get() : nullptr); - }, - py::arg("cache_config") = py::none()) + .def("forward", [](InferEngine &self, const InferEngine::Input &input) -> InferEngine::Output { return self.forward(input); }, "Run inference on all ranks with arbitrary arguments") + .def("reset_cache", [](InferEngine &self, std::shared_ptr cfg) { self.reset_cache(cfg ? cfg.get() : nullptr); }, py::arg("cache_config") = py::none()) .def("get_cache_config", [](const InferEngine &self) { auto cfg = self.get_cache_config(); - return std::shared_ptr(std::move(cfg->unique_copy())); - }) - .def("__repr__", [](const InferEngine &self) { - return ""; - }); + return std::shared_ptr(std::move(cfg->unique_copy())); }) + .def("__repr__", [](const InferEngine &self) { return ""; }); py::class_(infer_engine, "Input") .def( diff --git a/python/infinilm/infer_engine.py b/python/infinilm/infer_engine.py index 8d5ea985..716b0afb 100644 --- a/python/infinilm/infer_engine.py +++ b/python/infinilm/infer_engine.py @@ -39,6 +39,7 @@ def __init__( distributed_config._underlying, device._underlying.type, cache_config, + model_path, ) self.use_cache = False diff --git a/third_party/json b/third_party/json new file mode 160000 index 00000000..5ed07097 --- /dev/null +++ b/third_party/json @@ -0,0 +1 @@ +Subproject commit 5ed07097faa6c50199c4a3b66e5ed37d4fbfccc2 diff --git a/xmake.lua b/xmake.lua index ad636197..aab1a0c7 100644 --- a/xmake.lua +++ b/xmake.lua @@ -6,6 +6,7 @@ set_toolchains("gcc") -- Add spdlog from third_party directory add_includedirs("third_party/spdlog/include") +add_includedirs("third_party/json/single_include/") target("infinicore_infer") set_kind("shared") From bc07f0453f6aee2835be7c572b520a18b6e6f7f8 Mon Sep 17 00:00:00 2001 From: qinyiqun Date: Tue, 20 Jan 2026 14:01:22 +0800 Subject: [PATCH 3/8] =?UTF-8?q?InfiniLM=20=E5=A2=9E=E5=8A=A0=E9=87=8F?= =?UTF-8?q?=E5=8C=96=E5=B1=82=E5=92=8Cglobal=20config?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- csrc/engine/rank_worker.cpp | 2 +- csrc/models/llama/llama_attention.cpp | 9 +++++---- csrc/models/llama/llama_attention.hpp | 4 +++- csrc/models/llama/llama_config.hpp | 2 +- csrc/models/llama/llama_mlp.cpp | 4 ++-- csrc/models/model_factory.cpp | 3 ++- csrc/models/model_factory.hpp | 4 +++- csrc/quantization/compressed_tensors.cpp | 6 ++++++ csrc/quantization/compressed_tensors.hpp | 19 +++++++++++++++++++ csrc/quantization/quantization.hpp | 19 +++++++++++++++++++ csrc/quantization/utils.hpp | 2 ++ 11 files changed, 63 insertions(+), 11 deletions(-) create mode 100644 csrc/quantization/compressed_tensors.cpp create mode 100644 csrc/quantization/compressed_tensors.hpp create mode 100644 csrc/quantization/quantization.hpp create mode 100644 csrc/quantization/utils.hpp diff --git a/csrc/engine/rank_worker.cpp b/csrc/engine/rank_worker.cpp index 9dbbeed3..e3654a5a 100644 --- a/csrc/engine/rank_worker.cpp +++ b/csrc/engine/rank_worker.cpp @@ -177,7 +177,7 @@ void RankWorker::thread_loop() { infinicore::context::setDevice(rank_info_.device); // Create model using factory (may be expensive) - model_ = InfinilmModelFactory::createModel(model_config_, rank_info_, pending_cache_config_ != nullptr ? pending_cache_config_.get() : nullptr); + model_ = InfinilmModelFactory::createModel(model_config_, rank_info_, pending_cache_config_ != nullptr ? pending_cache_config_.get() : nullptr, global_config_); if (!model_) { throw std::runtime_error("Failed to create model"); } diff --git a/csrc/models/llama/llama_attention.cpp b/csrc/models/llama/llama_attention.cpp index 9707db7c..465adc32 100644 --- a/csrc/models/llama/llama_attention.cpp +++ b/csrc/models/llama/llama_attention.cpp @@ -20,7 +20,8 @@ namespace infinilm::models::llama { LlamaAttention::LlamaAttention(const LlamaConfig &config, const infinicore::Device &device, size_t layer_idx, - engine::distributed::RankInfo rank_info) + engine::distributed::RankInfo rank_info, + const infinilm::config::global_config::GlobalConfig &global_config) : layer_idx_(layer_idx), hidden_size_(config.hidden_size), num_attention_heads_(config.num_attention_heads), @@ -48,16 +49,16 @@ LlamaAttention::LlamaAttention(const LlamaConfig &config, scaling_ = 1.0f / std::sqrt(static_cast(head_dim_)); // Initialize projection layers + // if (global_config.get_global_config_json().is_null()) { if (!config.quant_config.has_value()) { INFINILM_QKV_LINEAR_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, config.num_attention_heads, config.num_key_value_heads, use_bias_, dtype, device, rank_info); // Output projection uses attention_output_bias (can be different from qkv) INFINICORE_NN_MODULE_INIT(o_proj, hidden_size_, hidden_size_, use_output_bias_, dtype, device, tp_rank, tp_size, rank_info.comm); - } else { - switch (config.quant_config.value().get_quant_type()) { - case infinicore::nn::QuantType::COMPRESSED_TENSOR: { + switch (config.quant_config.value().get_quant_scheme()) { + case infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8: { INFINILM_QKV_LINEAR_W8A8_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, config.num_attention_heads, config.num_key_value_heads, use_bias_, dtype, device, rank_info, config.quant_config.value()); diff --git a/csrc/models/llama/llama_attention.hpp b/csrc/models/llama/llama_attention.hpp index 9d464bcf..d35a7e6a 100644 --- a/csrc/models/llama/llama_attention.hpp +++ b/csrc/models/llama/llama_attention.hpp @@ -1,6 +1,7 @@ #pragma once #include "../../cache/kv_cache.hpp" +#include "../../config/global_config.hpp" #include "../../engine/distributed/distributed.hpp" #include "../../layers/fused_linear.hpp" #include "llama_config.hpp" @@ -39,7 +40,8 @@ class LlamaAttention : public infinicore::nn::Module { LlamaAttention(const LlamaConfig &config, const infinicore::Device &device, size_t layer_idx, - engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), + const infinilm::config::global_config::GlobalConfig &global_config = infinilm::config::global_config::GlobalConfig()); /** * @brief Forward pass: compute attention diff --git a/csrc/models/llama/llama_config.hpp b/csrc/models/llama/llama_config.hpp index e3e56a7f..ca02507d 100644 --- a/csrc/models/llama/llama_config.hpp +++ b/csrc/models/llama/llama_config.hpp @@ -70,7 +70,7 @@ struct LlamaConfig : public InfinilmModel::Config { // Quant Config // std::optional quant_config = std::nullopt; - std::optional quant_config = infinicore::nn::QuantConfig(infinicore::nn::QuantType::COMPRESSED_TENSOR); + std::optional quant_config = infinicore::nn::QuantConfig(infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8); /** * @brief Compute key-value dimension for Grouped Query Attention (GQA) diff --git a/csrc/models/llama/llama_mlp.cpp b/csrc/models/llama/llama_mlp.cpp index 2cc48ccb..1ac3474b 100644 --- a/csrc/models/llama/llama_mlp.cpp +++ b/csrc/models/llama/llama_mlp.cpp @@ -23,8 +23,8 @@ LlamaMLP::LlamaMLP(const LlamaConfig &config, INFINICORE_NN_MODULE_INIT(down_proj, intermediate_size_, hidden_size_, use_bias_, dtype, device, tp_rank, tp_size, rank_info.comm); } else { - switch (config.quant_config.value().get_quant_type()) { - case infinicore::nn::QuantType::COMPRESSED_TENSOR: { + switch (config.quant_config.value().get_quant_scheme()) { + case infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8: { INFINILM_GATE_UP_LINEAR_W8A8_INIT(gate_up_proj, "gate_proj", "up_proj", hidden_size_, intermediate_size_, use_bias_, dtype, device, rank_info_, config.quant_config.value()); INFINICORE_NN_MODULE_INIT(down_proj, intermediate_size_, hidden_size_, use_bias_, diff --git a/csrc/models/model_factory.cpp b/csrc/models/model_factory.cpp index 999bb364..f84d4905 100644 --- a/csrc/models/model_factory.cpp +++ b/csrc/models/model_factory.cpp @@ -5,7 +5,8 @@ namespace infinilm { std::shared_ptr InfinilmModelFactory::createModel( const InfinilmModel::Config &config, engine::distributed::RankInfo rank_info, - const cache::CacheConfig *cache) { + const cache::CacheConfig *cache, + const config::global_config::GlobalConfig &global_config) { std::shared_ptr model; if (const auto llama_config_ptr = dynamic_cast(&config)) { diff --git a/csrc/models/model_factory.hpp b/csrc/models/model_factory.hpp index a73f432c..3d14ced3 100644 --- a/csrc/models/model_factory.hpp +++ b/csrc/models/model_factory.hpp @@ -1,5 +1,6 @@ #pragma once +#include "../config/global_config.hpp" #include "infinilm_model.hpp" #include "../engine/distributed/distributed.hpp" @@ -10,6 +11,7 @@ class InfinilmModelFactory { static std::shared_ptr createModel( const InfinilmModel::Config &config, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), - const cache::CacheConfig *cache = nullptr); + const cache::CacheConfig *cache = nullptr, + const config::global_config::GlobalConfig &global_config = config::global_config::GlobalConfig()); }; } // namespace infinilm diff --git a/csrc/quantization/compressed_tensors.cpp b/csrc/quantization/compressed_tensors.cpp new file mode 100644 index 00000000..f5b71bcc --- /dev/null +++ b/csrc/quantization/compressed_tensors.cpp @@ -0,0 +1,6 @@ +// #include "compressed_tensors.hpp" + +// infinicore::nn::QuantScheme CompressedTensors::get_quant_scheme() { +// // need to add more schemes later +// return infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8; +// } \ No newline at end of file diff --git a/csrc/quantization/compressed_tensors.hpp b/csrc/quantization/compressed_tensors.hpp new file mode 100644 index 00000000..c7d6eead --- /dev/null +++ b/csrc/quantization/compressed_tensors.hpp @@ -0,0 +1,19 @@ +#pragma once +#include "quantization.hpp" +// #include "utils.hpp" +namespace infinilm::quantization { + +class CompressedTensors : public BaseQuantization { +public: + CompressedTensors(const infinilm::config::global_config::GlobalConfig &global_config) + : BaseQuantization(global_config) { + quant_config_ = global_config.get_quant_config_json(); + } + + infinicore::nn::QuantScheme + get_quant_scheme() const override { + return infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8; + } +}; + +} // namespace infinilm::quantization \ No newline at end of file diff --git a/csrc/quantization/quantization.hpp b/csrc/quantization/quantization.hpp new file mode 100644 index 00000000..8bca2651 --- /dev/null +++ b/csrc/quantization/quantization.hpp @@ -0,0 +1,19 @@ +#pragma once +#include "compressed_tensors.hpp" + +// #include "../config/quant_config.hpp" +#include "../config/global_config.hpp" +#include "infinicore/nn/quantization.hpp" + +namespace infinilm::quantization { +class BaseQuantization { +public: + explicit BaseQuantization(const infinilm::config::global_config::GlobalConfig &global_config) {}; + virtual ~BaseQuantization() = default; + + virtual infinicore::nn::QuantScheme get_quant_scheme() const = 0; + +protected: + infinilm::config::quantization::QuantConfig quant_config_; +} +} // namespace infinilm::quantization \ No newline at end of file diff --git a/csrc/quantization/utils.hpp b/csrc/quantization/utils.hpp new file mode 100644 index 00000000..1ae21db2 --- /dev/null +++ b/csrc/quantization/utils.hpp @@ -0,0 +1,2 @@ +#include "../config/global_config.hpp" +#include "infinicore/nn/quantization.hpp" \ No newline at end of file From 1d8f51b4e56f82e2996f1bf67a5b9ac8006f60ee Mon Sep 17 00:00:00 2001 From: qinyiqun Date: Wed, 21 Jan 2026 16:01:39 +0800 Subject: [PATCH 4/8] =?UTF-8?q?=E4=BB=A5=E4=B8=80=E7=A7=8D=E6=AF=94?= =?UTF-8?q?=E8=BE=83=E4=BC=98=E9=9B=85=E7=9A=84=E6=96=B9=E5=BC=8F=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E4=BA=86quant=20config=E7=9A=84=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- csrc/config/global_config.cpp | 14 +++++++++ csrc/config/global_config.hpp | 17 +++++------ csrc/config/quant_config.cpp | 22 ++++++++++++++ csrc/config/quant_config.hpp | 24 +++++++++++---- csrc/engine/infer_engine.cpp | 7 +++-- csrc/engine/infer_engine.hpp | 2 +- csrc/engine/rank_worker.cpp | 3 +- csrc/engine/rank_worker.hpp | 4 +-- csrc/models/infinilm_model.hpp | 1 - csrc/models/llama/llama_attention.cpp | 37 +++++++++++------------ csrc/models/llama/llama_attention.hpp | 3 +- csrc/models/llama/llama_config.hpp | 4 --- csrc/models/llama/llama_decoder_layer.cpp | 7 +++-- csrc/models/llama/llama_decoder_layer.hpp | 4 ++- csrc/models/llama/llama_for_causal_lm.cpp | 5 +-- csrc/models/llama/llama_for_causal_lm.hpp | 3 +- csrc/models/llama/llama_mlp.cpp | 30 +++++++++--------- csrc/models/llama/llama_mlp.hpp | 6 +++- csrc/models/llama/llama_model.cpp | 8 ++--- csrc/models/llama/llama_model.hpp | 5 +-- csrc/models/model_factory.cpp | 4 +-- csrc/models/model_factory.hpp | 2 +- csrc/quantization/compressed_tensors.hpp | 12 +++++--- csrc/quantization/quantization.hpp | 13 ++++---- 24 files changed, 143 insertions(+), 94 deletions(-) create mode 100644 csrc/config/global_config.cpp create mode 100644 csrc/config/quant_config.cpp diff --git a/csrc/config/global_config.cpp b/csrc/config/global_config.cpp new file mode 100644 index 00000000..63d03fb0 --- /dev/null +++ b/csrc/config/global_config.cpp @@ -0,0 +1,14 @@ +#include "global_config.hpp" +#include +namespace infinilm::config::global_config { +GlobalConfig::GlobalConfig(const std::string &path) { + std::ifstream file(path); + if (file.is_open()) { + file >> config_json; + file.close(); + } else { + throw std::runtime_error("Could not open config file: " + path); + } + this->quant_config = quantization::QuantConfig(config_json["quantization_config"]); +} +} // namespace infinilm::config::global_config \ No newline at end of file diff --git a/csrc/config/global_config.hpp b/csrc/config/global_config.hpp index d04c1c94..6f9c32b6 100644 --- a/csrc/config/global_config.hpp +++ b/csrc/config/global_config.hpp @@ -9,23 +9,20 @@ namespace infinilm::config::global_config { struct GlobalConfig { // Quantization configuration public: - infinilm::config::quantization::QuantConfig get_quant_config_json() const { - return infinilm::config::quantization::QuantConfig(config_json.value("quantization_config", nlohmann::json::object())).to_json(); - } - GlobalConfig() = default; GlobalConfig(const nlohmann::json &json) : config_json(json) {}; - GlobalConfig(const std::string &path) { - std::ifstream file(path); - if (file.is_open()) { - file >> config_json; - file.close(); + GlobalConfig(const std::string &path); + + infinicore::nn::QuantScheme get_quant_scheme() const { + if (quant_config.get_quant_scheme() != infinicore::nn::QuantScheme::NONE) { + return quant_config.get_quant_scheme(); } else { - throw std::runtime_error("Could not open config file: " + path); + return infinicore::nn::QuantScheme::NONE; } } private: nlohmann::json config_json; + quantization::QuantConfig quant_config; }; } // namespace infinilm::config::global_config \ No newline at end of file diff --git a/csrc/config/quant_config.cpp b/csrc/config/quant_config.cpp new file mode 100644 index 00000000..0ee47682 --- /dev/null +++ b/csrc/config/quant_config.cpp @@ -0,0 +1,22 @@ +#include "quant_config.hpp" +#include +namespace infinilm::config::quantization { +QuantConfig::QuantConfig(const nlohmann::json &json) : quantization_config(json) { + this->quantization_method = get_quantization_method(); +} + +std::shared_ptr +QuantConfig::get_quantization_method() const { + if (quantization_config.is_null()) { + return nullptr; + } + + // Determine the quantization scheme from the JSON config + if (quantization_config["quant_method"] == "compressed-tensors") { + return std::make_shared(quantization_config); + } + // Add other schemes as needed + + return nullptr; // Default case if no matching scheme +} +} // namespace infinilm::config::quantization \ No newline at end of file diff --git a/csrc/config/quant_config.hpp b/csrc/config/quant_config.hpp index fa9e01f4..46400eff 100644 --- a/csrc/config/quant_config.hpp +++ b/csrc/config/quant_config.hpp @@ -1,17 +1,29 @@ #pragma once +#include "../quantization/compressed_tensors.hpp" +#include "../quantization/quantization.hpp" #include "nlohmann/json.hpp" +#include namespace infinilm::config::quantization { -struct QuantConfig { - nlohmann::json quantization_config; - +class QuantConfig { +public: QuantConfig() = default; - QuantConfig(const nlohmann::json &json) : quantization_config(json) {}; - nlohmann::json to_json() const { - return quantization_config; + QuantConfig(const nlohmann::json &json); + + infinicore::nn::QuantScheme get_quant_scheme() const { + if (quantization_method != nullptr) { + return quantization_method->get_quant_scheme(); + } else { + return infinicore::nn::QuantScheme::NONE; + } } + +private: + nlohmann::json quantization_config; + std::shared_ptr get_quantization_method() const; + std::shared_ptr quantization_method; }; } // namespace infinilm::config::quantization \ No newline at end of file diff --git a/csrc/engine/infer_engine.cpp b/csrc/engine/infer_engine.cpp index f631fff1..9c8fedc8 100644 --- a/csrc/engine/infer_engine.cpp +++ b/csrc/engine/infer_engine.cpp @@ -1,5 +1,6 @@ #include "infer_engine.hpp" #include "spdlog/spdlog.h" +#include namespace infinilm::engine { @@ -18,9 +19,9 @@ InferEngine::InferEngine( if (cache_config != nullptr) { cache_config_ = cache_config->unique_copy(); } - if (!model_path.empty()) { - global_config_ = infinilm::config::global_config::GlobalConfig(model_path + "/config.json"); - } + // if (!model_path.empty()) { + this->global_config_ = std::make_shared(model_path + "/config.json"); + // Create one RankWorker per rank int world_size = communication_group_.get_world_size(); workers_.reserve(world_size); diff --git a/csrc/engine/infer_engine.hpp b/csrc/engine/infer_engine.hpp index 1188af28..1d628598 100644 --- a/csrc/engine/infer_engine.hpp +++ b/csrc/engine/infer_engine.hpp @@ -49,7 +49,7 @@ class InferEngine { distributed::CommunicationGroup communication_group_; const InfinilmModel::Config &model_config_; std::unique_ptr cache_config_; - infinilm::config::global_config::GlobalConfig global_config_; + std::shared_ptr global_config_; }; } // namespace infinilm::engine diff --git a/csrc/engine/rank_worker.cpp b/csrc/engine/rank_worker.cpp index e3654a5a..0d6cc9ab 100644 --- a/csrc/engine/rank_worker.cpp +++ b/csrc/engine/rank_worker.cpp @@ -13,7 +13,7 @@ namespace infinilm::engine { RankWorker::RankWorker(const InfinilmModel::Config &model_config, const distributed::RankInfo &rank_info, const cache::CacheConfig *cache_config, - const infinilm::config::global_config::GlobalConfig &global_config) + std::shared_ptr global_config) : model_config_(model_config), rank_info_(rank_info), job_cmd_(Command::INIT), @@ -27,7 +27,6 @@ RankWorker::RankWorker(const InfinilmModel::Config &model_config, } // start the thread thread_ = std::thread(&RankWorker::thread_loop, this); - // Wait until the worker thread finishes initialization (model created) std::unique_lock lk(mutex_); cv_.wait(lk, [&] { return init_done_; }); diff --git a/csrc/engine/rank_worker.hpp b/csrc/engine/rank_worker.hpp index 27ed6efa..34c4b0b6 100644 --- a/csrc/engine/rank_worker.hpp +++ b/csrc/engine/rank_worker.hpp @@ -58,7 +58,7 @@ class RankWorker { RankWorker(const InfinilmModel::Config &model_config, const distributed::RankInfo &rank_info, const cache::CacheConfig *cache_config, - const infinilm::config::global_config::GlobalConfig &global_config); + std::shared_ptr global_config); // Submit a parameter load job and wait until the load completes on the worker thread. void load_param(const std::string &name, @@ -93,7 +93,7 @@ class RankWorker { distributed::RankInfo rank_info_; std::shared_ptr model_; std::shared_ptr cache_; - const infinilm::config::global_config::GlobalConfig &global_config_; + std::shared_ptr global_config_; // Command for the pending job (protected by mutex_) Command job_cmd_; diff --git a/csrc/models/infinilm_model.hpp b/csrc/models/infinilm_model.hpp index 9752d949..fcad67fc 100644 --- a/csrc/models/infinilm_model.hpp +++ b/csrc/models/infinilm_model.hpp @@ -13,7 +13,6 @@ class InfinilmModel : public infinicore::nn::Module { public: struct Config { std::string model_type; - nlohmann::json model_config; virtual ~Config() = default; }; diff --git a/csrc/models/llama/llama_attention.cpp b/csrc/models/llama/llama_attention.cpp index 465adc32..dd8fe38d 100644 --- a/csrc/models/llama/llama_attention.cpp +++ b/csrc/models/llama/llama_attention.cpp @@ -21,7 +21,7 @@ LlamaAttention::LlamaAttention(const LlamaConfig &config, const infinicore::Device &device, size_t layer_idx, engine::distributed::RankInfo rank_info, - const infinilm::config::global_config::GlobalConfig &global_config) + std::shared_ptr global_config) : layer_idx_(layer_idx), hidden_size_(config.hidden_size), num_attention_heads_(config.num_attention_heads), @@ -30,8 +30,9 @@ LlamaAttention::LlamaAttention(const LlamaConfig &config, kv_dim_(config.kv_dim()), use_bias_(config.attention_bias), use_output_bias_(config.attention_output_bias), - use_qk_norm_(config.qk_norm), - max_position_embeddings_(config.max_position_embeddings), rank_info_(rank_info) { + max_position_embeddings_(config.max_position_embeddings), + rank_info_(rank_info), + global_config_(global_config) { const auto &dtype{config.dtype}; int tp_rank = rank_info.tp_rank; @@ -48,27 +49,23 @@ LlamaAttention::LlamaAttention(const LlamaConfig &config, } scaling_ = 1.0f / std::sqrt(static_cast(head_dim_)); - // Initialize projection layers - // if (global_config.get_global_config_json().is_null()) { - if (!config.quant_config.has_value()) { + auto quant_scheme = this->global_config_->get_quant_scheme(); + switch (quant_scheme) { + case infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8: + INFINILM_QKV_LINEAR_W8A8_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, config.num_attention_heads, config.num_key_value_heads, use_bias_, + dtype, device, rank_info, quant_scheme); + + INFINICORE_NN_MODULE_INIT(o_proj, hidden_size_, hidden_size_, use_output_bias_, + dtype, device, tp_rank, tp_size, rank_info.comm, quant_scheme); + break; + + default: INFINILM_QKV_LINEAR_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, config.num_attention_heads, config.num_key_value_heads, use_bias_, dtype, device, rank_info); - // Output projection uses attention_output_bias (can be different from qkv) + INFINICORE_NN_MODULE_INIT(o_proj, hidden_size_, hidden_size_, use_output_bias_, dtype, device, tp_rank, tp_size, rank_info.comm); - } else { - switch (config.quant_config.value().get_quant_scheme()) { - case infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8: { - INFINILM_QKV_LINEAR_W8A8_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, config.num_attention_heads, config.num_key_value_heads, use_bias_, - dtype, device, rank_info, config.quant_config.value()); - - INFINICORE_NN_MODULE_INIT(o_proj, hidden_size_, hidden_size_, use_output_bias_, - dtype, device, tp_rank, tp_size, rank_info.comm, config.quant_config.value()); - break; - } - default: { - } - } + break; } } diff --git a/csrc/models/llama/llama_attention.hpp b/csrc/models/llama/llama_attention.hpp index d35a7e6a..ca9abe32 100644 --- a/csrc/models/llama/llama_attention.hpp +++ b/csrc/models/llama/llama_attention.hpp @@ -41,7 +41,7 @@ class LlamaAttention : public infinicore::nn::Module { const infinicore::Device &device, size_t layer_idx, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), - const infinilm::config::global_config::GlobalConfig &global_config = infinilm::config::global_config::GlobalConfig()); + std::shared_ptr global_config = nullptr); /** * @brief Forward pass: compute attention @@ -115,6 +115,7 @@ class LlamaAttention : public infinicore::nn::Module { size_t max_position_embeddings_; // For cache initialization (deprecated, kept for compatibility) float scaling_; + std::shared_ptr global_config_; }; } // namespace infinilm::models::llama diff --git a/csrc/models/llama/llama_config.hpp b/csrc/models/llama/llama_config.hpp index ca02507d..0db2bcc8 100644 --- a/csrc/models/llama/llama_config.hpp +++ b/csrc/models/llama/llama_config.hpp @@ -68,10 +68,6 @@ struct LlamaConfig : public InfinilmModel::Config { std::vector bos_token_id = {1}; // Beginning of sequence token ID(s) std::vector eos_token_id = {2}; // End of sequence token ID(s) - // Quant Config - // std::optional quant_config = std::nullopt; - std::optional quant_config = infinicore::nn::QuantConfig(infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8); - /** * @brief Compute key-value dimension for Grouped Query Attention (GQA) * @return The dimension for key/value projections diff --git a/csrc/models/llama/llama_decoder_layer.cpp b/csrc/models/llama/llama_decoder_layer.cpp index 35a1acab..1842ae2d 100644 --- a/csrc/models/llama/llama_decoder_layer.cpp +++ b/csrc/models/llama/llama_decoder_layer.cpp @@ -9,7 +9,8 @@ namespace infinilm::models::llama { LlamaDecoderLayer::LlamaDecoderLayer(const LlamaConfig &config, const infinicore::Device &device, size_t layer_idx, - engine::distributed::RankInfo rank_info) : layer_idx_(layer_idx), rank_info_(rank_info) { + engine::distributed::RankInfo rank_info, + std::shared_ptr global_config) : layer_idx_(layer_idx), rank_info_(rank_info), global_config_(global_config) { const auto &dtype{config.dtype}; // Initialize layer normalization layers @@ -19,8 +20,8 @@ LlamaDecoderLayer::LlamaDecoderLayer(const LlamaConfig &config, dtype, device); // Initialize attention and MLP modules - INFINICORE_NN_MODULE_INIT(self_attn, config, device, layer_idx, rank_info_); - INFINICORE_NN_MODULE_INIT(mlp, config, device, rank_info_); + INFINICORE_NN_MODULE_INIT(self_attn, config, device, layer_idx, rank_info_, global_config); + INFINICORE_NN_MODULE_INIT(mlp, config, device, rank_info_, global_config); } infinicore::Tensor LlamaDecoderLayer::forward(const infinicore::Tensor &hidden_states, diff --git a/csrc/models/llama/llama_decoder_layer.hpp b/csrc/models/llama/llama_decoder_layer.hpp index 4ded50a7..d37a2994 100644 --- a/csrc/models/llama/llama_decoder_layer.hpp +++ b/csrc/models/llama/llama_decoder_layer.hpp @@ -36,7 +36,8 @@ class LlamaDecoderLayer : public infinicore::nn::Module { LlamaDecoderLayer(const LlamaConfig &config, const infinicore::Device &device, size_t layer_idx, - engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), + std::shared_ptr global_config = nullptr); /** * @brief Forward pass: process one decoder layer @@ -75,6 +76,7 @@ class LlamaDecoderLayer : public infinicore::nn::Module { INFINICORE_NN_MODULE(LlamaAttention, self_attn); INFINICORE_NN_MODULE(LlamaMLP, mlp); engine::distributed::RankInfo rank_info_; + std::shared_ptr global_config_; private: size_t layer_idx_; // Layer index for cache management and debugging diff --git a/csrc/models/llama/llama_for_causal_lm.cpp b/csrc/models/llama/llama_for_causal_lm.cpp index 6ce1fd98..1963cee2 100644 --- a/csrc/models/llama/llama_for_causal_lm.cpp +++ b/csrc/models/llama/llama_for_causal_lm.cpp @@ -8,7 +8,8 @@ namespace infinilm::models::llama { LlamaForCausalLM::LlamaForCausalLM(const LlamaConfig &config, const infinicore::Device &device, - engine::distributed::RankInfo rank_info) { + engine::distributed::RankInfo rank_info, + std::shared_ptr global_config) { // Initialize module's device_ member device_ = device; @@ -16,7 +17,7 @@ LlamaForCausalLM::LlamaForCausalLM(const LlamaConfig &config, const auto &dtype{config.dtype}; // Initialize base model - INFINICORE_NN_MODULE_INIT(model, config, device, rank_info); + INFINICORE_NN_MODULE_INIT(model, config, device, rank_info, global_config); // Initialize language modeling head // Note: If tie_word_embeddings is true, we would share weights with embed_tokens diff --git a/csrc/models/llama/llama_for_causal_lm.hpp b/csrc/models/llama/llama_for_causal_lm.hpp index dd6f90fa..2595c027 100644 --- a/csrc/models/llama/llama_for_causal_lm.hpp +++ b/csrc/models/llama/llama_for_causal_lm.hpp @@ -30,7 +30,8 @@ class LlamaForCausalLM : public InfinilmModel { */ LlamaForCausalLM(const LlamaConfig &config, const infinicore::Device &device, - engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), + std::shared_ptr global_config = nullptr); /** * @brief Forward pass: compute language modeling logits diff --git a/csrc/models/llama/llama_mlp.cpp b/csrc/models/llama/llama_mlp.cpp index 1ac3474b..3f457d82 100644 --- a/csrc/models/llama/llama_mlp.cpp +++ b/csrc/models/llama/llama_mlp.cpp @@ -7,33 +7,33 @@ namespace infinilm::models::llama { LlamaMLP::LlamaMLP(const LlamaConfig &config, const infinicore::Device &device, - engine::distributed::RankInfo rank_info) + engine::distributed::RankInfo rank_info, + std::shared_ptr global_config) : hidden_size_(config.hidden_size), intermediate_size_(config.intermediate_size), - use_bias_(config.mlp_bias), rank_info_(rank_info) { + use_bias_(config.mlp_bias), rank_info_(rank_info), global_config_(global_config) { const auto &dtype{config.dtype}; int tp_rank = rank_info.tp_rank; int tp_size = rank_info.tp_size; // Initialize projection layers - if (!config.quant_config.has_value()) { + auto quant_scheme = this->global_config_->get_quant_scheme(); + // std::cout << "LlamaMLP quant_scheme: " << static_cast(quant_scheme) << std::endl; + switch (quant_scheme) { + case infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8: + INFINILM_GATE_UP_LINEAR_W8A8_INIT(gate_up_proj, "gate_proj", "up_proj", hidden_size_, intermediate_size_, use_bias_, + dtype, device, rank_info_, quant_scheme); + INFINICORE_NN_MODULE_INIT(down_proj, intermediate_size_, hidden_size_, use_bias_, + dtype, device, tp_rank, tp_size, rank_info.comm, quant_scheme); + break; + + default: INFINILM_GATE_UP_LINEAR_INIT(gate_up_proj, "gate_proj", "up_proj", hidden_size_, intermediate_size_, use_bias_, dtype, device, rank_info_); INFINICORE_NN_MODULE_INIT(down_proj, intermediate_size_, hidden_size_, use_bias_, dtype, device, tp_rank, tp_size, rank_info.comm); - } else { - switch (config.quant_config.value().get_quant_scheme()) { - case infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8: { - INFINILM_GATE_UP_LINEAR_W8A8_INIT(gate_up_proj, "gate_proj", "up_proj", hidden_size_, intermediate_size_, use_bias_, - dtype, device, rank_info_, config.quant_config.value()); - INFINICORE_NN_MODULE_INIT(down_proj, intermediate_size_, hidden_size_, use_bias_, - dtype, device, tp_rank, tp_size, rank_info.comm, config.quant_config.value()); - break; - } - default: { - } - } + break; } } diff --git a/csrc/models/llama/llama_mlp.hpp b/csrc/models/llama/llama_mlp.hpp index 665dac70..42eacc1e 100644 --- a/csrc/models/llama/llama_mlp.hpp +++ b/csrc/models/llama/llama_mlp.hpp @@ -3,6 +3,7 @@ #include "../../layers/fused_linear.hpp" #include "llama_config.hpp" +#include "../../config/global_config.hpp" #include "infinicore/device.hpp" #include "infinicore/nn/linear.hpp" #include "infinicore/nn/module.hpp" @@ -35,7 +36,8 @@ class LlamaMLP : public infinicore::nn::Module { */ LlamaMLP(const LlamaConfig &config, const infinicore::Device &device, - engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), + std::shared_ptr global_config = nullptr); /** * @brief Forward pass: compute MLP output @@ -57,6 +59,8 @@ class LlamaMLP : public infinicore::nn::Module { size_t hidden_size_; size_t intermediate_size_; bool use_bias_; + + std::shared_ptr global_config_; }; } // namespace infinilm::models::llama diff --git a/csrc/models/llama/llama_model.cpp b/csrc/models/llama/llama_model.cpp index 34c3c0b2..df7aece1 100644 --- a/csrc/models/llama/llama_model.cpp +++ b/csrc/models/llama/llama_model.cpp @@ -9,13 +9,13 @@ namespace infinilm::models::llama { LlamaModel::LlamaModel(const LlamaConfig &config, const infinicore::Device &device, - engine::distributed::RankInfo rank_info) - : config_(config), rank_info_(rank_info) { + engine::distributed::RankInfo rank_info, + std::shared_ptr global_config) + : config_(config), rank_info_(rank_info), global_config_(global_config) { const auto &dtype{config.dtype}; // Initialize token embeddings INFINICORE_NN_MODULE_INIT(embed_tokens, config.vocab_size, config.hidden_size, std::nullopt, dtype, device); - // Initialize decoder layers with layer indices // TODO: Update INFINICORE_NN_MODULE_VEC_INIT macro to support per-layer constructor arguments // (e.g., via a factory function or lambda that receives the layer index) @@ -23,7 +23,7 @@ LlamaModel::LlamaModel(const LlamaConfig &config, layers_.reserve(config.num_hidden_layers); for (size_t i = 0; i < config.num_hidden_layers; ++i) { layers_.push_back(this->register_module( - "layers." + std::to_string(i), config, device, i, rank_info)); + "layers." + std::to_string(i), config, device, i, rank_info, global_config_)); } // Initialize final layer normalization diff --git a/csrc/models/llama/llama_model.hpp b/csrc/models/llama/llama_model.hpp index 5a008b0f..b43fa542 100644 --- a/csrc/models/llama/llama_model.hpp +++ b/csrc/models/llama/llama_model.hpp @@ -1,7 +1,6 @@ #pragma once #include "../../cache/kv_cache.hpp" -#include "llama_config.hpp" #include "llama_decoder_layer.hpp" #include "infinicore/nn/embedding.hpp" @@ -40,7 +39,8 @@ class LlamaModel : public infinicore::nn::Module { */ LlamaModel(const LlamaConfig &config, const infinicore::Device &device, - engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), + std::shared_ptr global_config = nullptr); /** * @brief Forward pass: process input through the model @@ -86,6 +86,7 @@ class LlamaModel : public infinicore::nn::Module { private: LlamaConfig config_; + std::shared_ptr global_config_; }; } // namespace infinilm::models::llama diff --git a/csrc/models/model_factory.cpp b/csrc/models/model_factory.cpp index f84d4905..cf783fb8 100644 --- a/csrc/models/model_factory.cpp +++ b/csrc/models/model_factory.cpp @@ -6,13 +6,13 @@ std::shared_ptr InfinilmModelFactory::createModel( const InfinilmModel::Config &config, engine::distributed::RankInfo rank_info, const cache::CacheConfig *cache, - const config::global_config::GlobalConfig &global_config) { + std::shared_ptr global_config) { std::shared_ptr model; if (const auto llama_config_ptr = dynamic_cast(&config)) { const auto &llama_config = *llama_config_ptr; model = std::make_shared( - llama_config, rank_info.device, rank_info); + llama_config, rank_info.device, rank_info, global_config); } else { throw std::invalid_argument("InfinilmModelFactory::createModel: Unsupported model config type"); } diff --git a/csrc/models/model_factory.hpp b/csrc/models/model_factory.hpp index 3d14ced3..fcf60708 100644 --- a/csrc/models/model_factory.hpp +++ b/csrc/models/model_factory.hpp @@ -12,6 +12,6 @@ class InfinilmModelFactory { const InfinilmModel::Config &config, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), const cache::CacheConfig *cache = nullptr, - const config::global_config::GlobalConfig &global_config = config::global_config::GlobalConfig()); + std::shared_ptr global_config = nullptr); }; } // namespace infinilm diff --git a/csrc/quantization/compressed_tensors.hpp b/csrc/quantization/compressed_tensors.hpp index c7d6eead..b290f9ac 100644 --- a/csrc/quantization/compressed_tensors.hpp +++ b/csrc/quantization/compressed_tensors.hpp @@ -1,19 +1,21 @@ #pragma once +// #include "../config/global_config.hpp" +#include "../config/quant_config.hpp" #include "quantization.hpp" // #include "utils.hpp" namespace infinilm::quantization { class CompressedTensors : public BaseQuantization { public: - CompressedTensors(const infinilm::config::global_config::GlobalConfig &global_config) - : BaseQuantization(global_config) { - quant_config_ = global_config.get_quant_config_json(); - } + explicit CompressedTensors(const nlohmann::json &quant_config) + : BaseQuantization(quant_config) { + // quant_config_ = global_config.get_quant_config_json(); + }; infinicore::nn::QuantScheme get_quant_scheme() const override { return infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8; - } + }; }; } // namespace infinilm::quantization \ No newline at end of file diff --git a/csrc/quantization/quantization.hpp b/csrc/quantization/quantization.hpp index 8bca2651..65f34aa1 100644 --- a/csrc/quantization/quantization.hpp +++ b/csrc/quantization/quantization.hpp @@ -1,19 +1,18 @@ #pragma once -#include "compressed_tensors.hpp" - -// #include "../config/quant_config.hpp" -#include "../config/global_config.hpp" +#include "../config/quant_config.hpp" #include "infinicore/nn/quantization.hpp" +#include "nlohmann/json.hpp" namespace infinilm::quantization { class BaseQuantization { public: - explicit BaseQuantization(const infinilm::config::global_config::GlobalConfig &global_config) {}; + explicit BaseQuantization(const nlohmann::json &quant_config) : quant_config_(quant_config) {}; virtual ~BaseQuantization() = default; virtual infinicore::nn::QuantScheme get_quant_scheme() const = 0; protected: - infinilm::config::quantization::QuantConfig quant_config_; -} + // infinilm::config::quantization::QuantConfig quant_config_; + nlohmann::json quant_config_; +}; } // namespace infinilm::quantization \ No newline at end of file From 6898b483ffb664bf6b320f7e215f2bbdfe62c85d Mon Sep 17 00:00:00 2001 From: qinyiqun Date: Wed, 21 Jan 2026 17:26:04 +0800 Subject: [PATCH 5/8] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E9=83=A8=E5=88=86?= =?UTF-8?q?=E4=BB=A3=E7=A0=81=E7=BB=93=E6=9E=84=EF=BC=8C=E5=88=A0=E9=99=A4?= =?UTF-8?q?=E6=97=A0=E7=94=A8=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- csrc/config/global_config.cpp | 13 ++- csrc/config/global_config.hpp | 16 +-- csrc/config/quant_config.cpp | 4 +- csrc/config/quant_config.hpp | 7 +- csrc/engine/infer_engine.cpp | 3 +- csrc/layers/fused_linear.cpp | 66 ----------- csrc/layers/fused_linear.hpp | 30 +---- csrc/quantization/base_quantization.hpp | 18 +++ csrc/quantization/compressed_tensors.cpp | 6 - csrc/quantization/compressed_tensors.hpp | 14 +-- csrc/quantization/quantization.hpp | 19 +-- csrc/quantization/utils.hpp | 2 - examples/jiuge.py | 4 +- python/infinilm/modeling_utils.py | 2 - .../models/llama/configuration_llama.py | 13 +-- python/infinilm/models/quant_config.py | 110 ------------------ 16 files changed, 56 insertions(+), 271 deletions(-) create mode 100644 csrc/quantization/base_quantization.hpp delete mode 100644 csrc/quantization/compressed_tensors.cpp delete mode 100644 csrc/quantization/utils.hpp delete mode 100644 python/infinilm/models/quant_config.py diff --git a/csrc/config/global_config.cpp b/csrc/config/global_config.cpp index 63d03fb0..93cd45be 100644 --- a/csrc/config/global_config.cpp +++ b/csrc/config/global_config.cpp @@ -1,5 +1,5 @@ #include "global_config.hpp" -#include + namespace infinilm::config::global_config { GlobalConfig::GlobalConfig(const std::string &path) { std::ifstream file(path); @@ -11,4 +11,13 @@ GlobalConfig::GlobalConfig(const std::string &path) { } this->quant_config = quantization::QuantConfig(config_json["quantization_config"]); } -} // namespace infinilm::config::global_config \ No newline at end of file + +infinicore::nn::QuantScheme +GlobalConfig::get_quant_scheme() const { + if (quant_config.get_quant_scheme() != infinicore::nn::QuantScheme::NONE) { + return quant_config.get_quant_scheme(); + } else { + return infinicore::nn::QuantScheme::NONE; + } +} +} // namespace infinilm::config::global_config diff --git a/csrc/config/global_config.hpp b/csrc/config/global_config.hpp index 6f9c32b6..1621142e 100644 --- a/csrc/config/global_config.hpp +++ b/csrc/config/global_config.hpp @@ -1,28 +1,22 @@ #pragma once - #include "quant_config.hpp" #include -#include #include namespace infinilm::config::global_config { struct GlobalConfig { - // Quantization configuration + // Global config is implemented using nlohmann/json and is primarily used for advanced configuration + // beyond the standard model config. It is initialized via GlobalConfig(const std::string& path) + // and passed through the InferEngine during inference. public: GlobalConfig() = default; GlobalConfig(const nlohmann::json &json) : config_json(json) {}; GlobalConfig(const std::string &path); - infinicore::nn::QuantScheme get_quant_scheme() const { - if (quant_config.get_quant_scheme() != infinicore::nn::QuantScheme::NONE) { - return quant_config.get_quant_scheme(); - } else { - return infinicore::nn::QuantScheme::NONE; - } - } + infinicore::nn::QuantScheme get_quant_scheme() const; private: nlohmann::json config_json; quantization::QuantConfig quant_config; }; -} // namespace infinilm::config::global_config \ No newline at end of file +} // namespace infinilm::config::global_config diff --git a/csrc/config/quant_config.cpp b/csrc/config/quant_config.cpp index 0ee47682..8984661f 100644 --- a/csrc/config/quant_config.cpp +++ b/csrc/config/quant_config.cpp @@ -1,5 +1,5 @@ #include "quant_config.hpp" -#include + namespace infinilm::config::quantization { QuantConfig::QuantConfig(const nlohmann::json &json) : quantization_config(json) { this->quantization_method = get_quantization_method(); @@ -19,4 +19,4 @@ QuantConfig::get_quantization_method() const { return nullptr; // Default case if no matching scheme } -} // namespace infinilm::config::quantization \ No newline at end of file +} // namespace infinilm::config::quantization diff --git a/csrc/config/quant_config.hpp b/csrc/config/quant_config.hpp index 46400eff..dec3750e 100644 --- a/csrc/config/quant_config.hpp +++ b/csrc/config/quant_config.hpp @@ -1,13 +1,12 @@ #pragma once - -#include "../quantization/compressed_tensors.hpp" #include "../quantization/quantization.hpp" #include "nlohmann/json.hpp" -#include namespace infinilm::config::quantization { class QuantConfig { + // QuantConfig is used to store and parse the "quantization" field from config.json. + // This is currently a basic version and will be extended in the future. public: QuantConfig() = default; QuantConfig(const nlohmann::json &json); @@ -26,4 +25,4 @@ class QuantConfig { std::shared_ptr quantization_method; }; -} // namespace infinilm::config::quantization \ No newline at end of file +} // namespace infinilm::config::quantization diff --git a/csrc/engine/infer_engine.cpp b/csrc/engine/infer_engine.cpp index 9c8fedc8..caabecf7 100644 --- a/csrc/engine/infer_engine.cpp +++ b/csrc/engine/infer_engine.cpp @@ -19,7 +19,8 @@ InferEngine::InferEngine( if (cache_config != nullptr) { cache_config_ = cache_config->unique_copy(); } - // if (!model_path.empty()) { + + // Load global config if model_path is provided, model_path must be valid, and config.json exists this->global_config_ = std::make_shared(model_path + "/config.json"); // Create one RankWorker per rank diff --git a/csrc/layers/fused_linear.cpp b/csrc/layers/fused_linear.cpp index 7f5ec364..700e8fde 100644 --- a/csrc/layers/fused_linear.cpp +++ b/csrc/layers/fused_linear.cpp @@ -6,57 +6,6 @@ namespace infinilm::layers { // --------------------------------------------------------- // QKV Parallel Linear // --------------------------------------------------------- -// QKVParallelLinear::QKVParallelLinear(size_t hidden_size, -// size_t head_dim, -// size_t num_q_head, -// size_t num_kv_head, -// bool bias, -// const infinicore::DataType &dtype, -// const infinicore::Device &device, -// engine::distributed::RankInfo rank_info) -// : QKVParallelLinear(hidden_size, -// head_dim, head_dim, head_dim, -// num_q_head, num_kv_head, num_kv_head, -// bias, bias, bias, -// dtype, device, rank_info) {} - -// QKVParallelLinear::QKVParallelLinear(size_t hidden_size, -// size_t q_dim, size_t k_dim, size_t v_dim, -// size_t num_q_head, size_t num_k_head, size_t num_v_head, -// bool q_bias, bool k_bias, bool v_bias, -// const infinicore::DataType &dtype, -// const infinicore::Device &device, -// engine::distributed::RankInfo rank_info) -// : infinicore::nn::ColumnParallelLinear( -// hidden_size, -// num_q_head * q_dim + num_k_head * k_dim + num_v_head * v_dim, -// (q_bias || k_bias || v_bias), -// dtype, -// device, -// rank_info.tp_rank, -// rank_info.tp_size), -// q_dim_(q_dim), -// k_dim_(k_dim), -// v_dim_(v_dim), -// num_q_head_(num_q_head), -// num_k_head_(num_k_head), -// num_v_head_(num_v_head), -// q_bias_(q_bias), -// k_bias_(k_bias), -// v_bias_(v_bias) { -// if (num_q_head % tp_size_ != 0 || num_k_head % tp_size_ != 0 || num_v_head % tp_size_ != 0) { -// throw std::runtime_error("QKVParallelLinear: num_[q|k|v]_head must be divisible by tp_size"); -// } - -// if ((q_bias_ != k_bias_) || (k_bias_ != v_bias_)) { -// throw std::runtime_error("q_bias, k_bias, v_bias must all match"); -// } - -// q_out_size_ = num_q_head_ * q_dim_ / tp_size_; -// k_out_size_ = num_k_head_ * k_dim_ / tp_size_; -// v_out_size_ = num_v_head_ * v_dim_ / tp_size_; -// } - QKVParallelLinear::QKVParallelLinear(size_t hidden_size, size_t head_dim, size_t num_q_head, @@ -192,21 +141,6 @@ bool QKVParallelLinear::has_v_bias() const { return v_bias_; } // --------------------------------------------------------- // Gate-Up Parallel Linear // --------------------------------------------------------- -// GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool bias, -// const infinicore::DataType &dtype, const infinicore::Device &device, -// engine::distributed::RankInfo rank_info) -// : GateUpParallelLinear(hidden_size, intermediate_size, bias, bias, dtype, device, rank_info) { -// } - -// GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool gate_bias, bool up_bias, -// const infinicore::DataType &dtype, const infinicore::Device &device, -// engine::distributed::RankInfo rank_info) -// : infinicore::nn::ColumnParallelLinear(hidden_size, intermediate_size * 2, gate_bias || up_bias, dtype, device, rank_info.tp_rank, rank_info.tp_size), gate_bias_(gate_bias), up_bias_(up_bias) { -// if (gate_bias_ != up_bias_) { -// throw std::runtime_error("Not supported yet: gate_bias and up_bias should be given at the same time"); -// } -// } - GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool bias, const infinicore::DataType &dtype, const infinicore::Device &device, engine::distributed::RankInfo rank_info, diff --git a/csrc/layers/fused_linear.hpp b/csrc/layers/fused_linear.hpp index 8bde20d8..f4220fce 100644 --- a/csrc/layers/fused_linear.hpp +++ b/csrc/layers/fused_linear.hpp @@ -7,23 +7,6 @@ namespace infinilm::layers { class QKVParallelLinear : public infinicore::nn::ColumnParallelLinear { public: - // explicit QKVParallelLinear(size_t hidden_size, - // size_t q_dim, size_t k_dim, size_t v_dim, - // size_t num_q_head, size_t num_k_head, size_t num_v_head, - // bool q_bias, bool k_bias, bool v_bias, - // const infinicore::DataType &dtype = infinicore::DataType::F32, - // const infinicore::Device &device = infinicore::Device(), - // engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); - - // // A more common case where all heads have the same dimension - // explicit QKVParallelLinear(size_t hidden_size, - // size_t head_dim, - // size_t num_q_head, size_t num_kv_head, - // bool bias = false, - // const infinicore::DataType &dtype = infinicore::DataType::F32, - // const infinicore::Device &device = infinicore::Device(), - // engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); - explicit QKVParallelLinear(size_t hidden_size, size_t q_dim, size_t k_dim, size_t v_dim, size_t num_q_head, size_t num_k_head, size_t num_v_head, @@ -79,15 +62,6 @@ class QKVParallelLinear : public infinicore::nn::ColumnParallelLinear { class GateUpParallelLinear : public infinicore::nn::ColumnParallelLinear { public: - // GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool bias = false, - // const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), - // engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); - - // GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool gate_bias, bool up_bias, - // const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), - // engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); - - // Overload for quantization, old ones need tobe purged GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool bias = false, const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), @@ -142,7 +116,7 @@ class GateUpParallelLinear : public infinicore::nn::ColumnParallelLinear { if (name##_->has_up_bias()) \ this->register_parameter(std::string(up_name) + ".bias", name##_->get_up_bias()); -// ========================= QKV 量化 ================================== +// ========================= QKV Quantization ================================== #define INFINILM_QKV_LINEAR_W8A8_INIT(name, q_name, k_name, v_name, ...) \ name##_ = std::make_shared(__VA_ARGS__); \ /* 注册 Q 权重 */ \ @@ -162,7 +136,7 @@ class GateUpParallelLinear : public infinicore::nn::ColumnParallelLinear { if (name##_->has_v_bias()) \ this->register_parameter(std::string(v_name) + ".bias", name##_->get_v_bias()); -// ========================= Gate-Up 量化 ============================== +// ========================= Gate-Up Quantization ============================== #define INFINILM_GATE_UP_LINEAR_W8A8_INIT(name, gate_name, up_name, ...) \ name##_ = std::make_shared(__VA_ARGS__); \ /* 注册 Gate 权重 */ \ diff --git a/csrc/quantization/base_quantization.hpp b/csrc/quantization/base_quantization.hpp new file mode 100644 index 00000000..0d1f52ce --- /dev/null +++ b/csrc/quantization/base_quantization.hpp @@ -0,0 +1,18 @@ +#pragma once +#include "../config/quant_config.hpp" +#include "infinicore/nn/quantization.hpp" +#include "nlohmann/json.hpp" + +namespace infinilm::quantization { +class BaseQuantization { + // Base class for quantization schemes. Intended to be extended to support various quantization methods. +public: + explicit BaseQuantization(const nlohmann::json &quant_config) : quant_config_(quant_config) {}; + virtual ~BaseQuantization() = default; + + virtual infinicore::nn::QuantScheme get_quant_scheme() const = 0; + +protected: + nlohmann::json quant_config_; +}; +} // namespace infinilm::quantization diff --git a/csrc/quantization/compressed_tensors.cpp b/csrc/quantization/compressed_tensors.cpp deleted file mode 100644 index f5b71bcc..00000000 --- a/csrc/quantization/compressed_tensors.cpp +++ /dev/null @@ -1,6 +0,0 @@ -// #include "compressed_tensors.hpp" - -// infinicore::nn::QuantScheme CompressedTensors::get_quant_scheme() { -// // need to add more schemes later -// return infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8; -// } \ No newline at end of file diff --git a/csrc/quantization/compressed_tensors.hpp b/csrc/quantization/compressed_tensors.hpp index b290f9ac..f502f398 100644 --- a/csrc/quantization/compressed_tensors.hpp +++ b/csrc/quantization/compressed_tensors.hpp @@ -1,16 +1,16 @@ #pragma once -// #include "../config/global_config.hpp" + #include "../config/quant_config.hpp" -#include "quantization.hpp" -// #include "utils.hpp" +#include "base_quantization.hpp" namespace infinilm::quantization { class CompressedTensors : public BaseQuantization { + // This is a temporary class that currently only returns COMPRESSED_TENSOR_W8A8I8. + // Future enhancements should parse quant_config to extract detailed quantization + // information and support multiple quantization schemes. public: explicit CompressedTensors(const nlohmann::json &quant_config) - : BaseQuantization(quant_config) { - // quant_config_ = global_config.get_quant_config_json(); - }; + : BaseQuantization(quant_config) {}; infinicore::nn::QuantScheme get_quant_scheme() const override { @@ -18,4 +18,4 @@ class CompressedTensors : public BaseQuantization { }; }; -} // namespace infinilm::quantization \ No newline at end of file +} // namespace infinilm::quantization diff --git a/csrc/quantization/quantization.hpp b/csrc/quantization/quantization.hpp index 65f34aa1..48b7646e 100644 --- a/csrc/quantization/quantization.hpp +++ b/csrc/quantization/quantization.hpp @@ -1,18 +1,5 @@ #pragma once -#include "../config/quant_config.hpp" -#include "infinicore/nn/quantization.hpp" -#include "nlohmann/json.hpp" - -namespace infinilm::quantization { -class BaseQuantization { -public: - explicit BaseQuantization(const nlohmann::json &quant_config) : quant_config_(quant_config) {}; - virtual ~BaseQuantization() = default; - virtual infinicore::nn::QuantScheme get_quant_scheme() const = 0; - -protected: - // infinilm::config::quantization::QuantConfig quant_config_; - nlohmann::json quant_config_; -}; -} // namespace infinilm::quantization \ No newline at end of file +#include "base_quantization.hpp" +#include "compressed_tensors.hpp" +#include "infinicore/nn/quantization.hpp" diff --git a/csrc/quantization/utils.hpp b/csrc/quantization/utils.hpp deleted file mode 100644 index 1ae21db2..00000000 --- a/csrc/quantization/utils.hpp +++ /dev/null @@ -1,2 +0,0 @@ -#include "../config/global_config.hpp" -#include "infinicore/nn/quantization.hpp" \ No newline at end of file diff --git a/examples/jiuge.py b/examples/jiuge.py index 8043ebbb..9ea1019a 100644 --- a/examples/jiuge.py +++ b/examples/jiuge.py @@ -56,7 +56,7 @@ def get_args(): parser.add_argument( "--max_new_tokens", type=int, - default=1000, + default=100, help="max_new_tokens", ) parser.add_argument( @@ -95,7 +95,7 @@ def get_args(): def test( prompts: str | list[str], model_path, - max_new_tokens=5000, + max_new_tokens=100, infini_device=infinicore.device("cpu", 0), tp=1, enable_paged_attn=False, diff --git a/python/infinilm/modeling_utils.py b/python/infinilm/modeling_utils.py index a8d987ca..d1b26dd9 100644 --- a/python/infinilm/modeling_utils.py +++ b/python/infinilm/modeling_utils.py @@ -75,7 +75,6 @@ def load_state_dict( ) for k in f.keys(): - # state_dict[k] = f.get_tensor(k).to(device=device, dtype=dtype) state_dict[k] = f.get_tensor(k).to(device=device) return state_dict @@ -148,7 +147,6 @@ def load_model_state_dict_by_file( model_param = load_state_dict( file_path, device=torch_device, dtype=torch_dtype ) - already_loaded_keys.extend(model_param.keys()) # --------------------------------------------------------- # diff --git a/python/infinilm/models/llama/configuration_llama.py b/python/infinilm/models/llama/configuration_llama.py index f893c5cf..8d07a657 100644 --- a/python/infinilm/models/llama/configuration_llama.py +++ b/python/infinilm/models/llama/configuration_llama.py @@ -15,13 +15,11 @@ """LLaMA model configuration""" -from typing import Optional import infinicore from infinilm.lib import _infinilm from ...configuration_utils import PretrainedConfig -from ..quant_config import parse_quant_config, QuantizationConfig class LlamaConfig(PretrainedConfig, _infinilm.LlamaConfig): r""" @@ -183,7 +181,6 @@ def __init__( mlp_bias=False, head_dim=None, torch_dtype=None, - quantization_config=None, **kwargs, ): _infinilm.LlamaConfig.__init__(self) @@ -246,12 +243,4 @@ def __init__( eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, - ) - - if isinstance(quantization_config, dict): - self.quantization_config: Optional[QuantizationConfig] = parse_quant_config(quantization_config) - self.quantization_config_dict = quantization_config - else: - self.quantization_config = None - self.quantization_config_dict = None - + ) \ No newline at end of file diff --git a/python/infinilm/models/quant_config.py b/python/infinilm/models/quant_config.py deleted file mode 100644 index 9e8ea0bf..00000000 --- a/python/infinilm/models/quant_config.py +++ /dev/null @@ -1,110 +0,0 @@ -# coding=utf-8 -# Copyright (c) 2025, InfiniCore -# BSD 3-Clause License - -from abc import ABC, abstractmethod -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Any, Type - -# ---------------- 抽象层 ---------------- -class QuantizationConfig(ABC): - """InfiniCore 量化统一入口,C++ 或 Python 侧都只认这四个接口。""" - @abstractmethod - def get_name(self) -> str: ... - @abstractmethod - def get_min_capability(self) -> int: ... - @abstractmethod - def get_scaled_act_names(self) -> List[str]: ... - @abstractmethod - def get_quant_method(self) -> str: - """返回算法名,供 C++ dispatcher 用。""" - ... - -# ---------------- 数据类 ---------------- -@dataclass -class CompressedTensorsConfig(QuantizationConfig): - """对应 HF compressed-tensors 导出格式。""" - quant_method: str = "compressed-tensors" - format: str = "int-quantized" - quantization_status: str = "compressed" - version: str = "0.11.0" - global_compression_ratio: Optional[float] = None - ignore: List[str] = field(default_factory=lambda: ["lm_head"]) - kv_cache_scheme: Optional[Dict[str, Any]] = None - sparsity_config: Dict[str, Any] = field(default_factory=dict) - transform_config: Dict[str, Any] = field(default_factory=dict) - config_groups: Dict[str, "Group"] = field(default_factory=dict) - - @dataclass - class TensorConfig: - num_bits: int - type: str - symmetric: bool - dynamic: bool - strategy: str - observer: Optional[str] = None - observer_kwargs: Dict[str, Any] = field(default_factory=dict) - group_size: Optional[int] = None - block_structure: Optional[str] = None - actorder: Optional[Any] = None - - @dataclass - class Group: - targets: List[str] - weights: "CompressedTensorsConfig.TensorConfig" - input_activations: Optional["CompressedTensorsConfig.TensorConfig"] = None - output_activations: Optional["CompressedTensorsConfig.TensorConfig"] = None - format: str = "int-quantized" - - @staticmethod - def from_dict(cfg: Dict[str, Any]) -> "CompressedTensorsConfig": - def _build_tensor(obj: Optional[Dict[str, Any]]) -> Optional["CompressedTensorsConfig.TensorConfig"]: - return None if obj is None else CompressedTensorsConfig.TensorConfig(**obj) - - groups = {} - for gname, gcfg in cfg.get("config_groups", {}).items(): - groups[gname] = CompressedTensorsConfig.Group( - targets=gcfg["targets"], - weights=_build_tensor(gcfg["weights"]), - input_activations=_build_tensor(gcfg.get("input_activations")), - output_activations=_build_tensor(gcfg.get("output_activations")), - format=gcfg.get("format", "int-quantized"), - ) - return CompressedTensorsConfig( - quant_method=cfg["quant_method"], - format=cfg["format"], - quantization_status=cfg["quantization_status"], - version=cfg["version"], - global_compression_ratio=cfg.get("global_compression_ratio"), - ignore=cfg.get("ignore", ["lm_head"]), - kv_cache_scheme=cfg.get("kv_cache_scheme"), - sparsity_config=cfg.get("sparsity_config", {}), - transform_config=cfg.get("transform_config", {}), - config_groups=groups, - ) - - def get_name(self) -> str: - return self.quant_method - - def get_min_capability(self) -> int: - return 75 - - def get_scaled_act_names(self) -> List[str]: - return [] - - def get_quant_method(self) -> str: - return self.quant_method - - -_QUANT_METHOD_MAP: Dict[str, Type[QuantizationConfig]] = { - "compressed-tensors": CompressedTensorsConfig, -} - -def parse_quant_config(quant_cfg: Dict[str, Any]) -> Optional[QuantizationConfig]: - """统一解析入口,供 LlamaConfig 调用。""" - method = quant_cfg.get("quant_method") - cls = _QUANT_METHOD_MAP.get(method) - if cls is None: - return None - - return cls.from_dict(quant_cfg) \ No newline at end of file From 23c2ae3c27a72d1671f3bece7e08801b124b8177 Mon Sep 17 00:00:00 2001 From: qinyiqun Date: Thu, 22 Jan 2026 11:41:04 +0800 Subject: [PATCH 6/8] =?UTF-8?q?=E8=B7=9F=E9=9A=8Finifnicore=E4=BF=AE?= =?UTF-8?q?=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- csrc/layers/fused_linear.cpp | 16 ++++++++-------- csrc/layers/fused_linear.hpp | 8 ++++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/csrc/layers/fused_linear.cpp b/csrc/layers/fused_linear.cpp index 700e8fde..e108b275 100644 --- a/csrc/layers/fused_linear.cpp +++ b/csrc/layers/fused_linear.cpp @@ -14,13 +14,13 @@ QKVParallelLinear::QKVParallelLinear(size_t hidden_size, const infinicore::DataType &dtype, const infinicore::Device &device, engine::distributed::RankInfo rank_info, - std::optional quant_config) + std::optional quant_scheme) : QKVParallelLinear(hidden_size, head_dim, head_dim, head_dim, num_q_head, num_kv_head, num_kv_head, bias, bias, bias, dtype, device, rank_info, - quant_config) {} + quant_scheme) {} QKVParallelLinear::QKVParallelLinear(size_t hidden_size, size_t q_dim, size_t k_dim, size_t v_dim, @@ -29,7 +29,7 @@ QKVParallelLinear::QKVParallelLinear(size_t hidden_size, const infinicore::DataType &dtype, const infinicore::Device &device, engine::distributed::RankInfo rank_info, - std::optional quant_config) + std::optional quant_scheme) : infinicore::nn::ColumnParallelLinear( hidden_size, num_q_head * q_dim + num_k_head * k_dim + num_v_head * v_dim, @@ -38,7 +38,7 @@ QKVParallelLinear::QKVParallelLinear(size_t hidden_size, device, rank_info.tp_rank, rank_info.tp_size, - quant_config), + quant_scheme), q_dim_(q_dim), k_dim_(k_dim), v_dim_(v_dim), @@ -144,15 +144,15 @@ bool QKVParallelLinear::has_v_bias() const { return v_bias_; } GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool bias, const infinicore::DataType &dtype, const infinicore::Device &device, engine::distributed::RankInfo rank_info, - std::optional quant_config) - : GateUpParallelLinear(hidden_size, intermediate_size, bias, bias, dtype, device, rank_info, quant_config) { + std::optional quant_scheme) + : GateUpParallelLinear(hidden_size, intermediate_size, bias, bias, dtype, device, rank_info, quant_scheme) { } GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool gate_bias, bool up_bias, const infinicore::DataType &dtype, const infinicore::Device &device, engine::distributed::RankInfo rank_info, - std::optional quant_config) - : infinicore::nn::ColumnParallelLinear(hidden_size, intermediate_size * 2, gate_bias || up_bias, dtype, device, rank_info.tp_rank, rank_info.tp_size, quant_config), gate_bias_(gate_bias), up_bias_(up_bias) { + std::optional quant_scheme) + : infinicore::nn::ColumnParallelLinear(hidden_size, intermediate_size * 2, gate_bias || up_bias, dtype, device, rank_info.tp_rank, rank_info.tp_size, quant_scheme), gate_bias_(gate_bias), up_bias_(up_bias) { if (gate_bias_ != up_bias_) { throw std::runtime_error("Not supported yet: gate_bias and up_bias should be given at the same time"); } diff --git a/csrc/layers/fused_linear.hpp b/csrc/layers/fused_linear.hpp index f4220fce..f3d95bae 100644 --- a/csrc/layers/fused_linear.hpp +++ b/csrc/layers/fused_linear.hpp @@ -14,7 +14,7 @@ class QKVParallelLinear : public infinicore::nn::ColumnParallelLinear { const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), - std::optional quant_config = std::nullopt); + std::optional quant_scheme = std::nullopt); // A more common case where all heads have the same dimension explicit QKVParallelLinear(size_t hidden_size, @@ -24,7 +24,7 @@ class QKVParallelLinear : public infinicore::nn::ColumnParallelLinear { const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), - std::optional quant_config = std::nullopt); + std::optional quant_scheme = std::nullopt); std::tuple forward_split(infinicore::Tensor &input); @@ -65,12 +65,12 @@ class GateUpParallelLinear : public infinicore::nn::ColumnParallelLinear { GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool bias = false, const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), - std::optional quant_config = std::nullopt); + std::optional quant_scheme = std::nullopt); GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool gate_bias, bool up_bias, const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), - std::optional quant_config = std::nullopt); + std::optional quant_scheme = std::nullopt); std::tuple forward_split(infinicore::Tensor &input); From ca87660661de54516033fcc937eada2e1eeb4f80 Mon Sep 17 00:00:00 2001 From: qinyiqun Date: Fri, 23 Jan 2026 10:08:34 +0800 Subject: [PATCH 7/8] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=89=80=E6=9C=89?= =?UTF-8?q?=E7=9A=84model=5Fconfig=EF=BC=8C=E7=BB=9F=E4=B8=80=E4=BD=BF?= =?UTF-8?q?=E7=94=A8global=5Fconfig?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- csrc/config/global_config.cpp | 65 +++++++++++++++++++++++ csrc/config/global_config.hpp | 38 +++++++++++++ csrc/engine/infer_engine.cpp | 6 +-- csrc/engine/infer_engine.hpp | 3 +- csrc/engine/rank_worker.cpp | 13 +++-- csrc/engine/rank_worker.hpp | 5 +- csrc/models/llama/llama_attention.cpp | 29 +++++----- csrc/models/llama/llama_attention.hpp | 3 +- csrc/models/llama/llama_config.hpp | 2 - csrc/models/llama/llama_decoder_layer.cpp | 13 +++-- csrc/models/llama/llama_decoder_layer.hpp | 3 +- csrc/models/llama/llama_for_causal_lm.cpp | 10 ++-- csrc/models/llama/llama_for_causal_lm.hpp | 5 +- csrc/models/llama/llama_mlp.cpp | 11 ++-- csrc/models/llama/llama_mlp.hpp | 3 +- csrc/models/llama/llama_model.cpp | 52 +++++++++--------- csrc/models/llama/llama_model.hpp | 7 +-- csrc/models/model_factory.cpp | 8 +-- csrc/models/model_factory.hpp | 2 +- csrc/pybind11/engine/engine.hpp | 6 +-- python/infinilm/infer_engine.py | 2 +- 21 files changed, 186 insertions(+), 100 deletions(-) diff --git a/csrc/config/global_config.cpp b/csrc/config/global_config.cpp index 93cd45be..2f3ce308 100644 --- a/csrc/config/global_config.cpp +++ b/csrc/config/global_config.cpp @@ -20,4 +20,69 @@ GlobalConfig::get_quant_scheme() const { return infinicore::nn::QuantScheme::NONE; } } + +std::shared_ptr +GlobalConfig::get_rope_scaling() const { + if (!config_json.contains("rope_scaling") || config_json["rope_scaling"].is_null()) { + return nullptr; + } + + const auto &rope_scaling = config_json["rope_scaling"]; + if (!rope_scaling.is_object()) { + throw std::runtime_error("rope_scaling must be an object"); + } + + if (!rope_scaling.contains("type")) { + throw std::runtime_error("rope_scaling must contain 'type' field"); + } + + std::string type_str = rope_scaling["type"].get(); + if (type_str == "longrope") { + // Required fields for LongRopeConfig + if (!rope_scaling.contains("short_factor") || !rope_scaling.contains("long_factor") || !rope_scaling.contains("original_max_position_embeddings")) { + throw std::runtime_error( + "LongRopeConfig requires 'short_factor', 'long_factor', and 'original_max_position_embeddings'"); + } + + auto short_factor = rope_scaling["short_factor"].get>(); + auto long_factor = rope_scaling["long_factor"].get>(); + size_t original_max_position_embeddings = rope_scaling["original_max_position_embeddings"].get(); + + float factor = 1.0f; + if (rope_scaling.contains("factor")) { + factor = rope_scaling["factor"].get(); + } + + return std::make_shared( + std::move(short_factor), + std::move(long_factor), + original_max_position_embeddings, + factor); + } else if (type_str == "default" || type_str == "none") { + // Default scaling, no scaling applied + return nullptr; + } else { + throw std::runtime_error("Unsupported rope_scaling type: " + type_str); + } +} + +infinicore::DataType +GlobalConfig::get_dtype() const { + try { + std::string dtype_str = this->get("torch_dtype"); + if (dtype_str == "float32") { + return infinicore::DataType::F32; + } else if (dtype_str == "float16") { + return infinicore::DataType::F16; + } else if (dtype_str == "bfloat16") { + return infinicore::DataType::BF16; + } else if (dtype_str == "int8") { + return infinicore::DataType::I8; + } else { + throw std::runtime_error("Unsupported dtype string: " + dtype_str); + } + } catch (const std::exception &e) { + throw std::runtime_error("Error getting dtype from config: " + std::string(e.what())); + } +} } // namespace infinilm::config::global_config diff --git a/csrc/config/global_config.hpp b/csrc/config/global_config.hpp index 1621142e..dac30565 100644 --- a/csrc/config/global_config.hpp +++ b/csrc/config/global_config.hpp @@ -1,4 +1,8 @@ #pragma once + +// #include "infinicore/nn/quantization.hpp" +#include "infinicore/nn/rope.hpp" +#include "infinicore/ops.hpp" #include "quant_config.hpp" #include #include @@ -13,7 +17,41 @@ struct GlobalConfig { GlobalConfig(const nlohmann::json &json) : config_json(json) {}; GlobalConfig(const std::string &path); + // Template Function to get a value by key with type safety + template + T get(const std::string &key) const { + if (!config_json.contains(key)) { + throw std::out_of_range("Key '" + key + "' not found in config."); + } + try { + return config_json.at(key).get(); + } catch (const nlohmann::json::type_error &e) { + throw std::runtime_error("Type conversion failed for key '" + key + "': " + std::string(e.what())); + } + } + + template + T get_or(const std::string &key, const T &default_value) const { + if (!config_json.contains(key) || config_json.at(key).is_null()) { + return default_value; + } + try { + return config_json.at(key).get(); + } catch (const nlohmann::json::type_error &) { + // If type conversion fails, return default value + return default_value; + } + } + size_t get_kv_dim() const { + return get("hidden_size") * get("num_key_value_heads") / get("num_attention_heads"); + } + size_t get_head_dim() const { + return get("hidden_size") / get("num_attention_heads"); + } + + infinicore::DataType get_dtype() const; infinicore::nn::QuantScheme get_quant_scheme() const; + std::shared_ptr get_rope_scaling() const; private: nlohmann::json config_json; diff --git a/csrc/engine/infer_engine.cpp b/csrc/engine/infer_engine.cpp index caabecf7..c737ffc1 100644 --- a/csrc/engine/infer_engine.cpp +++ b/csrc/engine/infer_engine.cpp @@ -8,13 +8,11 @@ namespace infinilm::engine { // Constructor //------------------------------------------------------ InferEngine::InferEngine( - const InfinilmModel::Config &config, const distributed::DistConfig &distributed_config, infinicore::Device::Type device_type, const cache::CacheConfig *cache_config, const std::string &model_path) // Changed parameter - : communication_group_(distributed_config, device_type), - model_config_(config) { + : communication_group_(distributed_config, device_type) { if (cache_config != nullptr) { cache_config_ = cache_config->unique_copy(); @@ -28,7 +26,7 @@ InferEngine::InferEngine( workers_.reserve(world_size); for (int r = 0; r < world_size; ++r) { workers_.emplace_back(std::make_unique( - model_config_, + // model_config_, communication_group_.get_rank_info(r), cache_config_ != nullptr ? cache_config_.get() : nullptr, global_config_)); diff --git a/csrc/engine/infer_engine.hpp b/csrc/engine/infer_engine.hpp index 1d628598..f8a2d95c 100644 --- a/csrc/engine/infer_engine.hpp +++ b/csrc/engine/infer_engine.hpp @@ -20,7 +20,6 @@ class InferEngine { // Updated constructor: accept CacheConfig instead of CacheType InferEngine( - const InfinilmModel::Config &config, const distributed::DistConfig &distributed_config = distributed::DistConfig(), infinicore::Device::Type device_type = infinicore::context::getDevice().getType(), const cache::CacheConfig *cache_config = nullptr, @@ -47,7 +46,7 @@ class InferEngine { protected: std::vector> workers_; distributed::CommunicationGroup communication_group_; - const InfinilmModel::Config &model_config_; + // const InfinilmModel::Config &model_config_; std::unique_ptr cache_config_; std::shared_ptr global_config_; }; diff --git a/csrc/engine/rank_worker.cpp b/csrc/engine/rank_worker.cpp index 0d6cc9ab..be287bb9 100644 --- a/csrc/engine/rank_worker.cpp +++ b/csrc/engine/rank_worker.cpp @@ -10,12 +10,11 @@ namespace infinilm::engine { -RankWorker::RankWorker(const InfinilmModel::Config &model_config, - const distributed::RankInfo &rank_info, - const cache::CacheConfig *cache_config, - std::shared_ptr global_config) - : model_config_(model_config), - rank_info_(rank_info), +RankWorker::RankWorker( + const distributed::RankInfo &rank_info, + const cache::CacheConfig *cache_config, + std::shared_ptr global_config) + : rank_info_(rank_info), job_cmd_(Command::INIT), has_job_(false), job_done_(false), @@ -176,7 +175,7 @@ void RankWorker::thread_loop() { infinicore::context::setDevice(rank_info_.device); // Create model using factory (may be expensive) - model_ = InfinilmModelFactory::createModel(model_config_, rank_info_, pending_cache_config_ != nullptr ? pending_cache_config_.get() : nullptr, global_config_); + model_ = InfinilmModelFactory::createModel(rank_info_, pending_cache_config_ != nullptr ? pending_cache_config_.get() : nullptr, global_config_); if (!model_) { throw std::runtime_error("Failed to create model"); } diff --git a/csrc/engine/rank_worker.hpp b/csrc/engine/rank_worker.hpp index 34c4b0b6..b939b3c1 100644 --- a/csrc/engine/rank_worker.hpp +++ b/csrc/engine/rank_worker.hpp @@ -55,8 +55,7 @@ class RankWorker { infinicore::Tensor output_ids; }; - RankWorker(const InfinilmModel::Config &model_config, - const distributed::RankInfo &rank_info, + RankWorker(const distributed::RankInfo &rank_info, const cache::CacheConfig *cache_config, std::shared_ptr global_config); @@ -89,7 +88,7 @@ class RankWorker { private: // Worker properties - const InfinilmModel::Config &model_config_; + // const InfinilmModel::Config &model_config_; distributed::RankInfo rank_info_; std::shared_ptr model_; std::shared_ptr cache_; diff --git a/csrc/models/llama/llama_attention.cpp b/csrc/models/llama/llama_attention.cpp index dd8fe38d..4524dae1 100644 --- a/csrc/models/llama/llama_attention.cpp +++ b/csrc/models/llama/llama_attention.cpp @@ -17,29 +17,28 @@ namespace infinilm::models::llama { -LlamaAttention::LlamaAttention(const LlamaConfig &config, - const infinicore::Device &device, +LlamaAttention::LlamaAttention(const infinicore::Device &device, size_t layer_idx, engine::distributed::RankInfo rank_info, std::shared_ptr global_config) : layer_idx_(layer_idx), - hidden_size_(config.hidden_size), - num_attention_heads_(config.num_attention_heads), - num_key_value_heads_(config.num_key_value_heads), - head_dim_(config.head_dim), - kv_dim_(config.kv_dim()), - use_bias_(config.attention_bias), - use_output_bias_(config.attention_output_bias), - max_position_embeddings_(config.max_position_embeddings), + hidden_size_(global_config->get("hidden_size")), + num_attention_heads_(global_config->get("num_attention_heads")), + num_key_value_heads_(global_config->get("num_key_value_heads")), + head_dim_(global_config->get_head_dim()), + kv_dim_(global_config->get_kv_dim()), + use_bias_(global_config->get_or("attention_bias", true)), + use_output_bias_(global_config->get_or("attention_output_bias", false)), + max_position_embeddings_(global_config->get("max_position_embeddings")), rank_info_(rank_info), global_config_(global_config) { - const auto &dtype{config.dtype}; + const auto &dtype{global_config_->get_dtype()}; int tp_rank = rank_info.tp_rank; int tp_size = rank_info.tp_size; - int num_attention_heads = config.num_attention_heads; - int num_key_value_heads = config.num_key_value_heads; + int num_attention_heads = global_config_->get("num_attention_heads"); + int num_key_value_heads = global_config_->get("num_key_value_heads"); if ((num_key_value_heads >= tp_size) && (0 == (num_key_value_heads % tp_size))) { this->num_attention_heads_ = num_attention_heads / tp_size; @@ -52,7 +51,7 @@ LlamaAttention::LlamaAttention(const LlamaConfig &config, auto quant_scheme = this->global_config_->get_quant_scheme(); switch (quant_scheme) { case infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8: - INFINILM_QKV_LINEAR_W8A8_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, config.num_attention_heads, config.num_key_value_heads, use_bias_, + INFINILM_QKV_LINEAR_W8A8_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, global_config_->get("num_attention_heads"), global_config_->get("num_key_value_heads"), use_bias_, dtype, device, rank_info, quant_scheme); INFINICORE_NN_MODULE_INIT(o_proj, hidden_size_, hidden_size_, use_output_bias_, @@ -60,7 +59,7 @@ LlamaAttention::LlamaAttention(const LlamaConfig &config, break; default: - INFINILM_QKV_LINEAR_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, config.num_attention_heads, config.num_key_value_heads, use_bias_, + INFINILM_QKV_LINEAR_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, global_config_->get("num_attention_heads"), global_config_->get("num_key_value_heads"), use_bias_, dtype, device, rank_info); INFINICORE_NN_MODULE_INIT(o_proj, hidden_size_, hidden_size_, use_output_bias_, diff --git a/csrc/models/llama/llama_attention.hpp b/csrc/models/llama/llama_attention.hpp index ca9abe32..231169b0 100644 --- a/csrc/models/llama/llama_attention.hpp +++ b/csrc/models/llama/llama_attention.hpp @@ -37,8 +37,7 @@ class LlamaAttention : public infinicore::nn::Module { * @param layer_idx Layer index for cache access * @param dtype Optional data type for model parameters (defaults to F32) */ - LlamaAttention(const LlamaConfig &config, - const infinicore::Device &device, + LlamaAttention(const infinicore::Device &device, size_t layer_idx, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), std::shared_ptr global_config = nullptr); diff --git a/csrc/models/llama/llama_config.hpp b/csrc/models/llama/llama_config.hpp index 0db2bcc8..fe5ba7e9 100644 --- a/csrc/models/llama/llama_config.hpp +++ b/csrc/models/llama/llama_config.hpp @@ -93,8 +93,6 @@ struct LlamaConfig : public InfinilmModel::Config { } return true; } - - nlohmann::json config_json; }; } // namespace infinilm::models::llama diff --git a/csrc/models/llama/llama_decoder_layer.cpp b/csrc/models/llama/llama_decoder_layer.cpp index 1842ae2d..332095c6 100644 --- a/csrc/models/llama/llama_decoder_layer.cpp +++ b/csrc/models/llama/llama_decoder_layer.cpp @@ -6,22 +6,21 @@ namespace infinilm::models::llama { -LlamaDecoderLayer::LlamaDecoderLayer(const LlamaConfig &config, - const infinicore::Device &device, +LlamaDecoderLayer::LlamaDecoderLayer(const infinicore::Device &device, size_t layer_idx, engine::distributed::RankInfo rank_info, std::shared_ptr global_config) : layer_idx_(layer_idx), rank_info_(rank_info), global_config_(global_config) { - const auto &dtype{config.dtype}; + const auto &dtype{global_config_->get_dtype()}; // Initialize layer normalization layers - INFINICORE_NN_MODULE_INIT(input_layernorm, config.hidden_size, config.rms_norm_eps, + INFINICORE_NN_MODULE_INIT(input_layernorm, global_config_->get("hidden_size"), global_config_->get("rms_norm_eps"), dtype, device); - INFINICORE_NN_MODULE_INIT(post_attention_layernorm, config.hidden_size, config.rms_norm_eps, + INFINICORE_NN_MODULE_INIT(post_attention_layernorm, global_config_->get("hidden_size"), global_config_->get("rms_norm_eps"), dtype, device); // Initialize attention and MLP modules - INFINICORE_NN_MODULE_INIT(self_attn, config, device, layer_idx, rank_info_, global_config); - INFINICORE_NN_MODULE_INIT(mlp, config, device, rank_info_, global_config); + INFINICORE_NN_MODULE_INIT(self_attn, device, layer_idx, rank_info_, global_config); + INFINICORE_NN_MODULE_INIT(mlp, device, rank_info_, global_config); } infinicore::Tensor LlamaDecoderLayer::forward(const infinicore::Tensor &hidden_states, diff --git a/csrc/models/llama/llama_decoder_layer.hpp b/csrc/models/llama/llama_decoder_layer.hpp index d37a2994..e377d2ae 100644 --- a/csrc/models/llama/llama_decoder_layer.hpp +++ b/csrc/models/llama/llama_decoder_layer.hpp @@ -33,8 +33,7 @@ class LlamaDecoderLayer : public infinicore::nn::Module { * @param layer_idx Layer index for cache management and debugging * @param dtype Optional data type for model parameters (defaults to F32) */ - LlamaDecoderLayer(const LlamaConfig &config, - const infinicore::Device &device, + LlamaDecoderLayer(const infinicore::Device &device, size_t layer_idx, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), std::shared_ptr global_config = nullptr); diff --git a/csrc/models/llama/llama_for_causal_lm.cpp b/csrc/models/llama/llama_for_causal_lm.cpp index 1963cee2..e63a9b22 100644 --- a/csrc/models/llama/llama_for_causal_lm.cpp +++ b/csrc/models/llama/llama_for_causal_lm.cpp @@ -6,23 +6,23 @@ namespace infinilm::models::llama { -LlamaForCausalLM::LlamaForCausalLM(const LlamaConfig &config, - const infinicore::Device &device, +LlamaForCausalLM::LlamaForCausalLM(const infinicore::Device &device, engine::distributed::RankInfo rank_info, std::shared_ptr global_config) { // Initialize module's device_ member device_ = device; - const auto &dtype{config.dtype}; + const auto &dtype{global_config->get_dtype()}; // Initialize base model - INFINICORE_NN_MODULE_INIT(model, config, device, rank_info, global_config); + INFINICORE_NN_MODULE_INIT(model, device, rank_info, global_config); // Initialize language modeling head // Note: If tie_word_embeddings is true, we would share weights with embed_tokens // For now, we create a separate linear layer - INFINICORE_NN_MODULE_INIT(lm_head, config.hidden_size, config.vocab_size, false, + + INFINICORE_NN_MODULE_INIT(lm_head, global_config->get("hidden_size"), global_config->get("vocab_size"), false, dtype, device); } diff --git a/csrc/models/llama/llama_for_causal_lm.hpp b/csrc/models/llama/llama_for_causal_lm.hpp index 2595c027..b7eef806 100644 --- a/csrc/models/llama/llama_for_causal_lm.hpp +++ b/csrc/models/llama/llama_for_causal_lm.hpp @@ -28,8 +28,7 @@ class LlamaForCausalLM : public InfinilmModel { * @param config Model configuration * @param device Device to create tensors on */ - LlamaForCausalLM(const LlamaConfig &config, - const infinicore::Device &device, + LlamaForCausalLM(const infinicore::Device &device, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), std::shared_ptr global_config = nullptr); @@ -44,7 +43,7 @@ class LlamaForCausalLM : public InfinilmModel { void reset_cache(const cache::CacheConfig *cache_config) override; // Module information - const LlamaConfig &config() const { return model_->config(); } + // const LlamaConfig &config() const { return model_->config(); } LlamaModel &model() { return *model_; } const LlamaModel &model() const { return *model_; } diff --git a/csrc/models/llama/llama_mlp.cpp b/csrc/models/llama/llama_mlp.cpp index 3f457d82..1f4ee436 100644 --- a/csrc/models/llama/llama_mlp.cpp +++ b/csrc/models/llama/llama_mlp.cpp @@ -5,14 +5,13 @@ namespace infinilm::models::llama { -LlamaMLP::LlamaMLP(const LlamaConfig &config, - const infinicore::Device &device, +LlamaMLP::LlamaMLP(const infinicore::Device &device, engine::distributed::RankInfo rank_info, std::shared_ptr global_config) - : hidden_size_(config.hidden_size), - intermediate_size_(config.intermediate_size), - use_bias_(config.mlp_bias), rank_info_(rank_info), global_config_(global_config) { - const auto &dtype{config.dtype}; + : hidden_size_(global_config->get("hidden_size")), + intermediate_size_(global_config->get("intermediate_size")), + use_bias_(global_config->get_or("mlp_bias", false)), rank_info_(rank_info), global_config_(global_config) { + const auto &dtype{global_config_->get_dtype()}; int tp_rank = rank_info.tp_rank; int tp_size = rank_info.tp_size; diff --git a/csrc/models/llama/llama_mlp.hpp b/csrc/models/llama/llama_mlp.hpp index 42eacc1e..38249cb3 100644 --- a/csrc/models/llama/llama_mlp.hpp +++ b/csrc/models/llama/llama_mlp.hpp @@ -34,8 +34,7 @@ class LlamaMLP : public infinicore::nn::Module { * @param device Device to create tensors on * @param dtype Optional data type for model parameters (defaults to F32) */ - LlamaMLP(const LlamaConfig &config, - const infinicore::Device &device, + LlamaMLP(const infinicore::Device &device, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), std::shared_ptr global_config = nullptr); diff --git a/csrc/models/llama/llama_model.cpp b/csrc/models/llama/llama_model.cpp index df7aece1..bf7c123a 100644 --- a/csrc/models/llama/llama_model.cpp +++ b/csrc/models/llama/llama_model.cpp @@ -7,34 +7,33 @@ namespace infinilm::models::llama { -LlamaModel::LlamaModel(const LlamaConfig &config, - const infinicore::Device &device, +LlamaModel::LlamaModel(const infinicore::Device &device, engine::distributed::RankInfo rank_info, std::shared_ptr global_config) - : config_(config), rank_info_(rank_info), global_config_(global_config) { - const auto &dtype{config.dtype}; + : rank_info_(rank_info), global_config_(global_config) { + const auto &dtype{global_config_->get_dtype()}; // Initialize token embeddings - INFINICORE_NN_MODULE_INIT(embed_tokens, config.vocab_size, config.hidden_size, + INFINICORE_NN_MODULE_INIT(embed_tokens, global_config_->get("vocab_size"), global_config_->get("hidden_size"), std::nullopt, dtype, device); // Initialize decoder layers with layer indices // TODO: Update INFINICORE_NN_MODULE_VEC_INIT macro to support per-layer constructor arguments // (e.g., via a factory function or lambda that receives the layer index) // Currently, we can't use the macro because each layer needs a different layer_idx - layers_.reserve(config.num_hidden_layers); - for (size_t i = 0; i < config.num_hidden_layers; ++i) { + layers_.reserve(global_config_->get("num_hidden_layers")); + for (size_t i = 0; i < global_config_->get("num_hidden_layers"); ++i) { layers_.push_back(this->register_module( - "layers." + std::to_string(i), config, device, i, rank_info, global_config_)); + "layers." + std::to_string(i), device, i, rank_info, global_config_)); } // Initialize final layer normalization - INFINICORE_NN_MODULE_INIT(norm, config.hidden_size, config.rms_norm_eps, + INFINICORE_NN_MODULE_INIT(norm, global_config_->get("hidden_size"), global_config_->get("rms_norm_eps"), dtype, device); // Initialize Rotary Position Embeddings (shared across all layers) // Use GPT-J-style inverse frequencies (default) and GPT_NEOX rotation pairing - INFINICORE_NN_MODULE_INIT(rotary_emb, config.head_dim, config.max_position_embeddings, - config.rope_theta, infinicore::nn::RoPE::Algo::GPT_NEOX, - dtype, device, config.rope_scaling); + INFINICORE_NN_MODULE_INIT(rotary_emb, global_config_->get_head_dim(), global_config_->get("max_position_embeddings"), + global_config_->get("rope_theta"), infinicore::nn::RoPE::Algo::GPT_NEOX, + dtype, device, global_config_->get_rope_scaling()); for (auto &layer : layers_) { if (layer) { @@ -69,24 +68,25 @@ void LlamaModel::reset_cache(const cache::CacheConfig *cache_config) { } if (auto kv_cache_config = dynamic_cast(cache_config)) { kv_cache_ = std::make_shared( - config_.head_dim, - config_.head_dim, - config_.num_key_value_heads, - config_.num_key_value_heads, - config_.num_hidden_layers, - config_.max_position_embeddings, - config_.dtype, + global_config_->get("hidden_size") / global_config_->get("num_attention_heads"), + global_config_->get("hidden_size") / global_config_->get("num_attention_heads"), + global_config_->get("num_key_value_heads"), + global_config_->get("num_key_value_heads"), + global_config_->get("num_hidden_layers"), + global_config_->get("max_position_embeddings"), + // config_.dtype, + global_config_->get_dtype(), *kv_cache_config, rank_info_); - } else if (auto paged_kv_cache_config = dynamic_cast(cache_config)) { kv_cache_ = std::make_shared( - config_.head_dim, - config_.head_dim, - config_.num_key_value_heads, - config_.num_key_value_heads, - config_.num_hidden_layers, - config_.dtype, + global_config_->get("hidden_size") / global_config_->get("num_attention_heads"), + global_config_->get("hidden_size") / global_config_->get("num_attention_heads"), + global_config_->get("num_key_value_heads"), + global_config_->get("num_key_value_heads"), + global_config_->get("num_hidden_layers"), + // config_.dtype, + global_config_->get_dtype(), *paged_kv_cache_config, rank_info_); } else { diff --git a/csrc/models/llama/llama_model.hpp b/csrc/models/llama/llama_model.hpp index b43fa542..422c1bd6 100644 --- a/csrc/models/llama/llama_model.hpp +++ b/csrc/models/llama/llama_model.hpp @@ -37,8 +37,7 @@ class LlamaModel : public infinicore::nn::Module { * @param device Device to create tensors on * @param dtype Optional data type for model parameters (defaults to F32) */ - LlamaModel(const LlamaConfig &config, - const infinicore::Device &device, + LlamaModel(const infinicore::Device &device, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), std::shared_ptr global_config = nullptr); @@ -64,8 +63,7 @@ class LlamaModel : public infinicore::nn::Module { void reset_cache(const cache::CacheConfig *cache_config); // Module information - const LlamaConfig &config() const { return config_; } - size_t num_layers() const { return config_.num_hidden_layers; } + size_t num_layers() const { return global_config_->get("num_hidden_layers"); } protected: // Token embeddings @@ -85,7 +83,6 @@ class LlamaModel : public infinicore::nn::Module { std::shared_ptr kv_cache_; private: - LlamaConfig config_; std::shared_ptr global_config_; }; diff --git a/csrc/models/model_factory.cpp b/csrc/models/model_factory.cpp index cf783fb8..b4fd634a 100644 --- a/csrc/models/model_factory.cpp +++ b/csrc/models/model_factory.cpp @@ -3,16 +3,16 @@ namespace infinilm { std::shared_ptr InfinilmModelFactory::createModel( - const InfinilmModel::Config &config, engine::distributed::RankInfo rank_info, const cache::CacheConfig *cache, std::shared_ptr global_config) { std::shared_ptr model; - if (const auto llama_config_ptr = dynamic_cast(&config)) { - const auto &llama_config = *llama_config_ptr; + //****************************NEED TO BE FIXED */ + if (true) { + // const auto &llama_config = *llama_config_ptr; model = std::make_shared( - llama_config, rank_info.device, rank_info, global_config); + rank_info.device, rank_info, global_config); } else { throw std::invalid_argument("InfinilmModelFactory::createModel: Unsupported model config type"); } diff --git a/csrc/models/model_factory.hpp b/csrc/models/model_factory.hpp index fcf60708..c020f6a5 100644 --- a/csrc/models/model_factory.hpp +++ b/csrc/models/model_factory.hpp @@ -9,7 +9,7 @@ namespace infinilm { class InfinilmModelFactory { public: static std::shared_ptr createModel( - const InfinilmModel::Config &config, + // const InfinilmModel::Config &config, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), const cache::CacheConfig *cache = nullptr, std::shared_ptr global_config = nullptr); diff --git a/csrc/pybind11/engine/engine.hpp b/csrc/pybind11/engine/engine.hpp index 535ddf8a..8d610c61 100644 --- a/csrc/pybind11/engine/engine.hpp +++ b/csrc/pybind11/engine/engine.hpp @@ -32,19 +32,19 @@ inline void bind_infer_engine(py::module &m) { py::class_> infer_engine(m, "InferEngine"); infer_engine .def(py::init([]( - const InfinilmModel::Config &cfg, + // const InfinilmModel::Config &cfg, const distributed::DistConfig &dist, infinicore::Device::Type dev, std::shared_ptr cache_cfg, const std::string &modle_path) { return std::make_shared( - cfg, + // cfg, dist, dev, cache_cfg ? cache_cfg.get() : nullptr, modle_path); }), - py::arg("config"), + // py::arg("config"), py::arg("distributed_config") = distributed::DistConfig(), py::arg("device_type") = infinicore::context::getDevice().getType(), py::arg("cache_config") = py::none(), diff --git a/python/infinilm/infer_engine.py b/python/infinilm/infer_engine.py index 716b0afb..94f02e30 100644 --- a/python/infinilm/infer_engine.py +++ b/python/infinilm/infer_engine.py @@ -35,7 +35,7 @@ def __init__( device = infinicore.device() super().__init__( - self.config, + # self.config, distributed_config._underlying, device._underlying.type, cache_config, From 6615743dd86e4f98ff010095610084805677d652 Mon Sep 17 00:00:00 2001 From: qinyiqun Date: Fri, 23 Jan 2026 11:20:41 +0800 Subject: [PATCH 8/8] =?UTF-8?q?=E8=B7=9F=E9=9A=8FInfiniLM=E6=9C=80?= =?UTF-8?q?=E6=96=B0=E4=BB=A3=E7=A0=81=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- csrc/config/global_config.hpp | 3 +++ csrc/models/llama/llama_attention.cpp | 14 ++++++++++---- csrc/models/llama/llama_attention.hpp | 1 - csrc/models/llama/llama_model.cpp | 10 ++++------ 4 files changed, 17 insertions(+), 11 deletions(-) diff --git a/csrc/config/global_config.hpp b/csrc/config/global_config.hpp index dac30565..e8be1ec2 100644 --- a/csrc/config/global_config.hpp +++ b/csrc/config/global_config.hpp @@ -46,6 +46,9 @@ struct GlobalConfig { return get("hidden_size") * get("num_key_value_heads") / get("num_attention_heads"); } size_t get_head_dim() const { + if (config_json.contains("head_dim")) { + return get("head_dim"); + } return get("hidden_size") / get("num_attention_heads"); } diff --git a/csrc/models/llama/llama_attention.cpp b/csrc/models/llama/llama_attention.cpp index 4524dae1..6ca77034 100644 --- a/csrc/models/llama/llama_attention.cpp +++ b/csrc/models/llama/llama_attention.cpp @@ -54,7 +54,9 @@ LlamaAttention::LlamaAttention(const infinicore::Device &device, INFINILM_QKV_LINEAR_W8A8_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, global_config_->get("num_attention_heads"), global_config_->get("num_key_value_heads"), use_bias_, dtype, device, rank_info, quant_scheme); - INFINICORE_NN_MODULE_INIT(o_proj, hidden_size_, hidden_size_, use_output_bias_, + // INFINICORE_NN_MODULE_INIT(o_proj, hidden_size_, hidden_size_, use_output_bias_, + // dtype, device, tp_rank, tp_size, rank_info.comm, quant_scheme); + INFINICORE_NN_MODULE_INIT(o_proj, global_config_->get("num_attention_heads") * head_dim_, hidden_size_, use_output_bias_, dtype, device, tp_rank, tp_size, rank_info.comm, quant_scheme); break; @@ -62,10 +64,14 @@ LlamaAttention::LlamaAttention(const infinicore::Device &device, INFINILM_QKV_LINEAR_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, global_config_->get("num_attention_heads"), global_config_->get("num_key_value_heads"), use_bias_, dtype, device, rank_info); - INFINICORE_NN_MODULE_INIT(o_proj, hidden_size_, hidden_size_, use_output_bias_, + INFINICORE_NN_MODULE_INIT(o_proj, global_config_->get("num_attention_heads") * head_dim_, hidden_size_, use_output_bias_, dtype, device, tp_rank, tp_size, rank_info.comm); break; } + if (global_config_->get("model_type") == "qwen3") { + INFINICORE_NN_MODULE_INIT(q_norm, head_dim_, global_config_->get("rms_norm_eps"), dtype, device); + INFINICORE_NN_MODULE_INIT(k_norm, head_dim_, global_config_->get("rms_norm_eps"), dtype, device); + } } infinicore::Tensor LlamaAttention::forward_(const infinicore::Tensor &hidden_states, @@ -82,7 +88,7 @@ infinicore::Tensor LlamaAttention::forward_(const infinicore::Tensor &hidden_sta // 1. Project Q, K, V auto [q, k, v] = qkv_proj_->forward_split(hidden_states_mutable); - if (use_qk_norm_) { + if (global_config_->get("model_type") == "qwen3") { q = q_norm_->forward(q->view({batch_size * seq_len, num_attention_heads_, head_dim_})); k = k_norm_->forward(k->view({batch_size * seq_len, num_key_value_heads_, head_dim_})); } @@ -191,7 +197,7 @@ infinicore::Tensor LlamaAttention::forward_paged_(const infinicore::Tensor &hidd auto k_reshaped = k->view({seq_len, num_key_value_heads_, head_dim_}); auto v_reshaped = v->view({seq_len, num_key_value_heads_, head_dim_}); - if (use_qk_norm_) { + if (global_config_->get("model_type") == "qwen3") { q_reshaped = q_norm_->forward(q_reshaped); k_reshaped = k_norm_->forward(k_reshaped); } diff --git a/csrc/models/llama/llama_attention.hpp b/csrc/models/llama/llama_attention.hpp index 231169b0..17f6f95e 100644 --- a/csrc/models/llama/llama_attention.hpp +++ b/csrc/models/llama/llama_attention.hpp @@ -110,7 +110,6 @@ class LlamaAttention : public infinicore::nn::Module { size_t kv_dim_; bool use_bias_; // Bias for Q/K/V projections bool use_output_bias_; // Bias for output projection (o_proj) - bool use_qk_norm_; // Whether to use QK RMSNorm size_t max_position_embeddings_; // For cache initialization (deprecated, kept for compatibility) float scaling_; diff --git a/csrc/models/llama/llama_model.cpp b/csrc/models/llama/llama_model.cpp index bf7c123a..9baeb454 100644 --- a/csrc/models/llama/llama_model.cpp +++ b/csrc/models/llama/llama_model.cpp @@ -68,24 +68,22 @@ void LlamaModel::reset_cache(const cache::CacheConfig *cache_config) { } if (auto kv_cache_config = dynamic_cast(cache_config)) { kv_cache_ = std::make_shared( - global_config_->get("hidden_size") / global_config_->get("num_attention_heads"), - global_config_->get("hidden_size") / global_config_->get("num_attention_heads"), + global_config_->get_head_dim(), + global_config_->get_head_dim(), global_config_->get("num_key_value_heads"), global_config_->get("num_key_value_heads"), global_config_->get("num_hidden_layers"), global_config_->get("max_position_embeddings"), - // config_.dtype, global_config_->get_dtype(), *kv_cache_config, rank_info_); } else if (auto paged_kv_cache_config = dynamic_cast(cache_config)) { kv_cache_ = std::make_shared( - global_config_->get("hidden_size") / global_config_->get("num_attention_heads"), - global_config_->get("hidden_size") / global_config_->get("num_attention_heads"), + global_config_->get_head_dim(), + global_config_->get_head_dim(), global_config_->get("num_key_value_heads"), global_config_->get("num_key_value_heads"), global_config_->get("num_hidden_layers"), - // config_.dtype, global_config_->get_dtype(), *paged_kv_cache_config, rank_info_);