diff --git a/.gitmodules b/.gitmodules index eab6041a..ade5ff58 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "third_party/spdlog"] path = third_party/spdlog url = https://github.com/gabime/spdlog.git +[submodule "third_party/json"] + path = third_party/json + url = https://github.com/nlohmann/json.git diff --git a/=0.34.0, b/=0.34.0, new file mode 100644 index 00000000..e69de29b diff --git a/csrc/config/model_config.cpp b/csrc/config/model_config.cpp new file mode 100644 index 00000000..ec15967a --- /dev/null +++ b/csrc/config/model_config.cpp @@ -0,0 +1,88 @@ +#include "model_config.hpp" + +namespace infinilm::config { +ModelConfig::ModelConfig(const std::string &path) { + std::ifstream file(path); + if (file.is_open()) { + file >> config_json; + file.close(); + } else { + throw std::runtime_error("Could not open config file: " + path); + } + this->quant_config = QuantConfig(config_json["quantization_config"]); +} + +infinicore::nn::QuantScheme +ModelConfig::get_quant_scheme() const { + if (quant_config.get_quant_scheme() != infinicore::nn::QuantScheme::NONE) { + return quant_config.get_quant_scheme(); + } else { + return infinicore::nn::QuantScheme::NONE; + } +} + +std::shared_ptr +ModelConfig::get_rope_scaling() const { + if (!config_json.contains("rope_scaling") || config_json["rope_scaling"].is_null()) { + return nullptr; + } + + const auto &rope_scaling = config_json["rope_scaling"]; + if (!rope_scaling.is_object()) { + throw std::runtime_error("rope_scaling must be an object"); + } + + if (!rope_scaling.contains("type")) { + throw std::runtime_error("rope_scaling must contain 'type' field"); + } + + std::string type_str = rope_scaling["type"].get(); + if (type_str == "longrope") { + // Required fields for LongRopeConfig + if (!rope_scaling.contains("short_factor") || !rope_scaling.contains("long_factor") || !rope_scaling.contains("original_max_position_embeddings")) { + throw std::runtime_error( + "LongRopeConfig requires 'short_factor', 'long_factor', and 'original_max_position_embeddings'"); + } + + auto short_factor = rope_scaling["short_factor"].get>(); + auto long_factor = rope_scaling["long_factor"].get>(); + size_t original_max_position_embeddings = rope_scaling["original_max_position_embeddings"].get(); + + float factor = 1.0f; + if (rope_scaling.contains("factor")) { + factor = rope_scaling["factor"].get(); + } + + return std::make_shared( + std::move(short_factor), + std::move(long_factor), + original_max_position_embeddings, + factor); + } else if (type_str == "default" || type_str == "none") { + // Default scaling, no scaling applied + return nullptr; + } else { + throw std::runtime_error("Unsupported rope_scaling type: " + type_str); + } +} + +infinicore::DataType +ModelConfig::get_dtype() const { + try { + std::string dtype_str = this->get("torch_dtype"); + if (dtype_str == "float32") { + return infinicore::DataType::F32; + } else if (dtype_str == "float16") { + return infinicore::DataType::F16; + } else if (dtype_str == "bfloat16") { + return infinicore::DataType::BF16; + } else if (dtype_str == "int8") { + return infinicore::DataType::I8; + } else { + throw std::runtime_error("Unsupported dtype string: " + dtype_str); + } + } catch (const std::exception &e) { + throw std::runtime_error("Error getting dtype from config: " + std::string(e.what())); + } +} +} // namespace infinilm::config diff --git a/csrc/config/model_config.hpp b/csrc/config/model_config.hpp new file mode 100644 index 00000000..2682c6d2 --- /dev/null +++ b/csrc/config/model_config.hpp @@ -0,0 +1,63 @@ +#pragma once + +#include "infinicore/nn/rope.hpp" +#include "infinicore/ops.hpp" +#include "quant_config.hpp" +#include +#include + +namespace infinilm::config { +class ModelConfig { + // Model config is implemented using nlohmann/json and is primarily used for advanced configuration + // beyond the standard model config. It is initialized via ModelConfig(const std::string& path) + // and passed through the InferEngine during inference. +public: + ModelConfig() = default; + // Not Implemented + // ModelConfig(const nlohmann::json &json) : config_json(json) {}; + ModelConfig(const std::string &path); + + // Template Function to get a value by key with type safety + template + T get(const std::string &key) const { + if (!config_json.contains(key)) { + throw std::out_of_range("Key '" + key + "' not found in config."); + } + try { + return config_json.at(key).get(); + } catch (const nlohmann::json::type_error &e) { + throw std::runtime_error("Type conversion failed for key '" + key + "': " + std::string(e.what())); + } + } + + template + T get_or(const std::string &key, const T &default_value) const { + if (!config_json.contains(key) || config_json.at(key).is_null()) { + return default_value; + } + try { + return config_json.at(key).get(); + } catch (const nlohmann::json::type_error &) { + // If type conversion fails, return default value + return default_value; + } + } + size_t get_kv_dim() const { + return get("hidden_size") * get("num_key_value_heads") / get("num_attention_heads"); + } + size_t get_head_dim() const { + if (config_json.contains("head_dim")) { + return get("head_dim"); + } + return get("hidden_size") / get("num_attention_heads"); + } + + infinicore::DataType get_dtype() const; + infinicore::nn::QuantScheme get_quant_scheme() const; + std::shared_ptr get_rope_scaling() const; + +private: + nlohmann::json config_json; + QuantConfig quant_config; +}; +} // namespace infinilm::config diff --git a/csrc/config/quant_config.cpp b/csrc/config/quant_config.cpp new file mode 100644 index 00000000..0f154407 --- /dev/null +++ b/csrc/config/quant_config.cpp @@ -0,0 +1,22 @@ +#include "quant_config.hpp" + +namespace infinilm::config { +QuantConfig::QuantConfig(const nlohmann::json &json) : quantization_config(json) { + this->quantization_method = get_quantization_method(); +} + +std::shared_ptr +QuantConfig::get_quantization_method() const { + if (quantization_config.is_null()) { + return nullptr; + } + + // Determine the quantization scheme from the JSON config + if (quantization_config["quant_method"] == "compressed-tensors") { + return std::make_shared(quantization_config); + } + // Add other schemes as needed + + return nullptr; // Default case if no matching scheme +} +} // namespace infinilm::config diff --git a/csrc/config/quant_config.hpp b/csrc/config/quant_config.hpp new file mode 100644 index 00000000..9fda4224 --- /dev/null +++ b/csrc/config/quant_config.hpp @@ -0,0 +1,28 @@ +#pragma once +#include "../quantization/quantization.hpp" +#include "nlohmann/json.hpp" + +namespace infinilm::config { + +class QuantConfig { + // QuantConfig is used to store and parse the "quantization" field from config.json. + // This is currently a basic version and will be extended in the future. +public: + QuantConfig() = default; + QuantConfig(const nlohmann::json &json); + + infinicore::nn::QuantScheme get_quant_scheme() const { + if (quantization_method != nullptr) { + return quantization_method->get_quant_scheme(); + } else { + return infinicore::nn::QuantScheme::NONE; + } + } + +private: + nlohmann::json quantization_config; + std::shared_ptr get_quantization_method() const; + std::shared_ptr quantization_method; +}; + +} // namespace infinilm::config diff --git a/csrc/engine/infer_engine.cpp b/csrc/engine/infer_engine.cpp index f49a9108..c86b6bf1 100644 --- a/csrc/engine/infer_engine.cpp +++ b/csrc/engine/infer_engine.cpp @@ -6,6 +6,18 @@ namespace infinilm::engine { //------------------------------------------------------ // Constructor //------------------------------------------------------ +/** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ InferEngine::InferEngine( const InfinilmModel::Config &config, const distributed::DistConfig &distributed_config, @@ -13,7 +25,7 @@ InferEngine::InferEngine( const cache::CacheConfig *cache_config, bool enable_graph_compiling) // Changed parameter : communication_group_(distributed_config, device_type), - model_config_(config) { + legacy_model_config_(config) { if (cache_config != nullptr) { cache_config_ = cache_config->unique_copy(); @@ -24,7 +36,7 @@ InferEngine::InferEngine( workers_.reserve(world_size); for (int r = 0; r < world_size; ++r) { workers_.emplace_back(std::make_unique( - model_config_, + legacy_model_config_, communication_group_.get_rank_info(r), cache_config_ != nullptr ? cache_config_.get() : nullptr, barrier_.get(), @@ -35,6 +47,35 @@ InferEngine::InferEngine( this->compile(); } +InferEngine::InferEngine( + const std::string &model_path, + const distributed::DistConfig &distributed_config, + infinicore::Device::Type device_type, + const cache::CacheConfig *cache_config, + bool enable_graph_compiling) // Changed parameter + : communication_group_(distributed_config, device_type) { + if (cache_config != nullptr) { + cache_config_ = cache_config->unique_copy(); + } + + // Load model config if model_path is provided, model_path must be valid, and config.json exists + this->model_config_ = std::make_shared(model_path + "/config.json"); + // Create one RankWorker per rank + int world_size = communication_group_.get_world_size(); + barrier_ = std::make_unique((size_t)world_size); + workers_.reserve(world_size); + for (int r = 0; r < world_size; ++r) { + workers_.emplace_back(std::make_unique( + model_config_, + communication_group_.get_rank_info(r), + cache_config_ != nullptr ? cache_config_.get() : nullptr, + barrier_.get(), + enable_graph_compiling)); + } + // Compile the model on all workers + this->compile(); +} + //------------------------------------------------------ // load_param //------------------------------------------------------ diff --git a/csrc/engine/infer_engine.hpp b/csrc/engine/infer_engine.hpp index ce834c6a..22e428ec 100644 --- a/csrc/engine/infer_engine.hpp +++ b/csrc/engine/infer_engine.hpp @@ -1,5 +1,6 @@ #pragma once +#include "../config/model_config.hpp" #include "../models/infinilm_model.hpp" #include "../models/llama/llama_config.hpp" #include "distributed/distributed.hpp" @@ -19,6 +20,18 @@ class InferEngine { using Output = RankWorker::Output; // Updated constructor: accept CacheConfig instead of CacheType + /** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ InferEngine( const InfinilmModel::Config &config, const distributed::DistConfig &distributed_config = distributed::DistConfig(), @@ -26,6 +39,13 @@ class InferEngine { const cache::CacheConfig *cache_config = nullptr, bool enable_graph_compiling = false); + InferEngine( + const std::string &model_path = "", + const distributed::DistConfig &distributed_config = distributed::DistConfig(), + infinicore::Device::Type device_type = infinicore::context::getDevice().getType(), + const cache::CacheConfig *cache_config = nullptr, + bool enable_graph_compiling = false); + // Load a parameter to all workers (each can extract its shard inside RankWorker) void load_param(const std::string &name, const infinicore::Tensor ¶m); @@ -50,8 +70,9 @@ class InferEngine { std::vector> workers_; std::unique_ptr barrier_; distributed::CommunicationGroup communication_group_; - const InfinilmModel::Config &model_config_; std::unique_ptr cache_config_; + const InfinilmModel::Config &legacy_model_config_ = InfinilmModel::Config(); + std::shared_ptr model_config_; }; } // namespace infinilm::engine diff --git a/csrc/engine/rank_worker.cpp b/csrc/engine/rank_worker.cpp index 8149b69b..02b8a907 100644 --- a/csrc/engine/rank_worker.cpp +++ b/csrc/engine/rank_worker.cpp @@ -4,17 +4,54 @@ #include "infinicore/ops.hpp" -#include #include #include namespace infinilm::engine { +/** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ RankWorker::RankWorker(const InfinilmModel::Config &model_config, const distributed::RankInfo &rank_info, const cache::CacheConfig *cache_config, RankBarrier *barrier, bool enable_graph_compiling) + : legacy_model_config_(model_config), + rank_info_(rank_info), + enable_graph_compiling_(enable_graph_compiling), + job_cmd_(Command::INIT), + has_job_(false), + job_done_(false), + should_exit_(false), + init_done_(false), + barrier_(barrier) { + if (cache_config != nullptr) { + pending_cache_config_ = cache_config->unique_copy(); + } + // start the thread + thread_ = std::thread(&RankWorker::thread_loop, this); + + // Wait until the worker thread finishes initialization (model created) + std::unique_lock lk(mutex_); + cv_.wait(lk, [&] { return init_done_; }); +} + +RankWorker::RankWorker( + std::shared_ptr model_config, + const distributed::RankInfo &rank_info, + const cache::CacheConfig *cache_config, + RankBarrier *barrier, + bool enable_graph_compiling) : model_config_(model_config), rank_info_(rank_info), enable_graph_compiling_(enable_graph_compiling), @@ -30,7 +67,6 @@ RankWorker::RankWorker(const InfinilmModel::Config &model_config, } // start the thread thread_ = std::thread(&RankWorker::thread_loop, this); - // Wait until the worker thread finishes initialization (model created) std::unique_lock lk(mutex_); cv_.wait(lk, [&] { return init_done_; }); diff --git a/csrc/engine/rank_worker.hpp b/csrc/engine/rank_worker.hpp index 480dc767..f738ec1f 100644 --- a/csrc/engine/rank_worker.hpp +++ b/csrc/engine/rank_worker.hpp @@ -1,6 +1,7 @@ #pragma once #include "../cache/cache.hpp" +#include "../config/model_config.hpp" #include "../models/model_factory.hpp" #include "compiler/general_compiler.hpp" #include "distributed/distributed.hpp" @@ -62,6 +63,12 @@ class RankWorker { RankBarrier *barrier, bool enable_graph_compiling); + RankWorker(std::shared_ptr model_config, + const distributed::RankInfo &rank_info, + const cache::CacheConfig *cache_config, + RankBarrier *barrier, + bool enable_graph_compiling); + // Submit a parameter load job and wait until the load completes on the worker thread. void load_param(const std::string &name, const infinicore::Tensor ¶m); @@ -94,7 +101,8 @@ class RankWorker { private: // Worker properties - const InfinilmModel::Config &model_config_; + const InfinilmModel::Config &legacy_model_config_ = InfinilmModel::Config(); + std::shared_ptr model_config_; distributed::RankInfo rank_info_; std::shared_ptr model_; std::shared_ptr cache_; diff --git a/csrc/layers/fused_linear.cpp b/csrc/layers/fused_linear.cpp index 9b2c813d..41d5bb21 100644 --- a/csrc/layers/fused_linear.cpp +++ b/csrc/layers/fused_linear.cpp @@ -6,6 +6,18 @@ namespace infinilm::layers { // --------------------------------------------------------- // QKV Parallel Linear // --------------------------------------------------------- +/** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ QKVParallelLinear::QKVParallelLinear(size_t hidden_size, size_t head_dim, size_t num_q_head, @@ -28,13 +40,68 @@ QKVParallelLinear::QKVParallelLinear(size_t hidden_size, const infinicore::Device &device, engine::distributed::RankInfo rank_info) : infinicore::nn::ColumnParallelLinear( - hidden_size, - num_q_head * q_dim + num_k_head * k_dim + num_v_head * v_dim, - (q_bias || k_bias || v_bias), - dtype, - device, - rank_info.tp_rank, - rank_info.tp_size), + hidden_size, + num_q_head * q_dim + num_k_head * k_dim + num_v_head * v_dim, + (q_bias || k_bias || v_bias), + dtype, + device, + rank_info.tp_rank, + rank_info.tp_size), + q_dim_(q_dim), + k_dim_(k_dim), + v_dim_(v_dim), + num_q_head_(num_q_head), + num_k_head_(num_k_head), + num_v_head_(num_v_head), + q_bias_(q_bias), + k_bias_(k_bias), + v_bias_(v_bias) { + if (num_q_head % tp_size_ != 0 || num_k_head % tp_size_ != 0 || num_v_head % tp_size_ != 0) { + throw std::runtime_error("QKVParallelLinear: num_[q|k|v]_head must be divisible by tp_size"); + } + + if ((q_bias_ != k_bias_) || (k_bias_ != v_bias_)) { + throw std::runtime_error("q_bias, k_bias, v_bias must all match"); + } + + q_out_size_ = num_q_head_ * q_dim_ / tp_size_; + k_out_size_ = num_k_head_ * k_dim_ / tp_size_; + v_out_size_ = num_v_head_ * v_dim_ / tp_size_; +} + +QKVParallelLinear::QKVParallelLinear(size_t hidden_size, + size_t head_dim, + size_t num_q_head, + size_t num_kv_head, + infinicore::nn::QuantScheme quant_scheme, + bool bias, + const infinicore::DataType &dtype, + const infinicore::Device &device, + engine::distributed::RankInfo rank_info) + : QKVParallelLinear(hidden_size, + head_dim, head_dim, head_dim, + num_q_head, num_kv_head, num_kv_head, + bias, bias, bias, + quant_scheme, + dtype, device, rank_info) {} + +QKVParallelLinear::QKVParallelLinear(size_t hidden_size, + size_t q_dim, size_t k_dim, size_t v_dim, + size_t num_q_head, size_t num_k_head, size_t num_v_head, + bool q_bias, bool k_bias, bool v_bias, + infinicore::nn::QuantScheme quant_scheme, + const infinicore::DataType &dtype, + const infinicore::Device &device, + engine::distributed::RankInfo rank_info) + : infinicore::nn::ColumnParallelLinear( + hidden_size, + num_q_head * q_dim + num_k_head * k_dim + num_v_head * v_dim, + quant_scheme, + (q_bias || k_bias || v_bias), + dtype, + device, + rank_info.tp_rank, + rank_info.tp_size), q_dim_(q_dim), k_dim_(k_dim), v_dim_(v_dim), @@ -86,6 +153,23 @@ infinicore::nn::Parameter QKVParallelLinear::get_v_weight() const { 0, tp_rank_, tp_size_); } +infinicore::nn::Parameter QKVParallelLinear::get_q_weight_scale() const { + return infinicore::nn::Parameter( + weight_scale_->narrow({{0, 0, q_out_size_}}), 0, tp_rank_, tp_size_); +} + +infinicore::nn::Parameter QKVParallelLinear::get_k_weight_scale() const { + return infinicore::nn::Parameter( + weight_scale_->narrow({{0, q_out_size_, k_out_size_}}), + 0, tp_rank_, tp_size_); +} + +infinicore::nn::Parameter QKVParallelLinear::get_v_weight_scale() const { + return infinicore::nn::Parameter( + weight_scale_->narrow({{0, q_out_size_ + k_out_size_, v_out_size_}}), + 0, tp_rank_, tp_size_); +} + infinicore::nn::Parameter QKVParallelLinear::get_q_bias() const { if (!q_bias_) { return infinicore::nn::Parameter(); @@ -120,6 +204,18 @@ bool QKVParallelLinear::has_v_bias() const { return v_bias_; } // --------------------------------------------------------- // Gate-Up Parallel Linear // --------------------------------------------------------- +/** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool bias, const infinicore::DataType &dtype, const infinicore::Device &device, engine::distributed::RankInfo rank_info) @@ -135,6 +231,22 @@ GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermedia } } +GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, infinicore::nn::QuantScheme quant_scheme, bool bias, + const infinicore::DataType &dtype, const infinicore::Device &device, + engine::distributed::RankInfo rank_info) + : GateUpParallelLinear(hidden_size, intermediate_size, bias, bias, quant_scheme, dtype, device, rank_info) { +} + +GateUpParallelLinear::GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool gate_bias, bool up_bias, + infinicore::nn::QuantScheme quant_scheme, + const infinicore::DataType &dtype, const infinicore::Device &device, + engine::distributed::RankInfo rank_info) + : infinicore::nn::ColumnParallelLinear(hidden_size, intermediate_size * 2, quant_scheme, gate_bias || up_bias, dtype, device, rank_info.tp_rank, rank_info.tp_size), gate_bias_(gate_bias), up_bias_(up_bias) { + if (gate_bias_ != up_bias_) { + throw std::runtime_error("Not supported yet: gate_bias and up_bias should be given at the same time"); + } +} + std::tuple GateUpParallelLinear::forward_split(infinicore::Tensor &input) { auto output = this->forward(input); auto cols = output->shape()[2]; @@ -168,6 +280,14 @@ infinicore::nn::Parameter GateUpParallelLinear::get_up_bias() const { } } +infinicore::nn::Parameter GateUpParallelLinear::get_gate_weight_scale() const { + return infinicore::nn::Parameter(weight_scale_->narrow({{0, 0, weight_scale_->size(0) / 2}}), 0, tp_rank_, tp_size_); +} + +infinicore::nn::Parameter GateUpParallelLinear::get_up_weight_scale() const { + return infinicore::nn::Parameter(weight_scale_->narrow({{0, weight_scale_->size(0) / 2, weight_scale_->size(0) / 2}}), 0, tp_rank_, tp_size_); +} + bool GateUpParallelLinear::has_gate_bias() const { return gate_bias_; } diff --git a/csrc/layers/fused_linear.hpp b/csrc/layers/fused_linear.hpp index 1e32ce50..9656e6ca 100644 --- a/csrc/layers/fused_linear.hpp +++ b/csrc/layers/fused_linear.hpp @@ -1,5 +1,6 @@ #pragma once #include "infinicore/nn/linear.hpp" +#include "infinicore/nn/quantization.hpp" #include "../engine/distributed/communication_group.hpp" @@ -23,6 +24,25 @@ class QKVParallelLinear : public infinicore::nn::ColumnParallelLinear { const infinicore::Device &device = infinicore::Device(), engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + explicit QKVParallelLinear(size_t hidden_size, + size_t q_dim, size_t k_dim, size_t v_dim, + size_t num_q_head, size_t num_k_head, size_t num_v_head, + bool q_bias, bool k_bias, bool v_bias, + infinicore::nn::QuantScheme quant_scheme, + const infinicore::DataType &dtype = infinicore::DataType::F32, + const infinicore::Device &device = infinicore::Device(), + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + + // A more common case where all heads have the same dimension + explicit QKVParallelLinear(size_t hidden_size, + size_t head_dim, + size_t num_q_head, size_t num_kv_head, + infinicore::nn::QuantScheme quant_scheme, + bool bias = false, + const infinicore::DataType &dtype = infinicore::DataType::F32, + const infinicore::Device &device = infinicore::Device(), + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + std::tuple forward_split(infinicore::Tensor &input); @@ -30,6 +50,10 @@ class QKVParallelLinear : public infinicore::nn::ColumnParallelLinear { infinicore::nn::Parameter get_k_weight() const; infinicore::nn::Parameter get_v_weight() const; + infinicore::nn::Parameter get_q_weight_scale() const; + infinicore::nn::Parameter get_k_weight_scale() const; + infinicore::nn::Parameter get_v_weight_scale() const; + infinicore::nn::Parameter get_q_bias() const; infinicore::nn::Parameter get_k_bias() const; infinicore::nn::Parameter get_v_bias() const; @@ -55,6 +79,18 @@ class QKVParallelLinear : public infinicore::nn::ColumnParallelLinear { class GateUpParallelLinear : public infinicore::nn::ColumnParallelLinear { public: + /** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool bias = false, const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); @@ -63,14 +99,29 @@ class GateUpParallelLinear : public infinicore::nn::ColumnParallelLinear { const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, infinicore::nn::QuantScheme quant_scheme, + bool bias = false, + const infinicore::DataType &dtype = infinicore::DataType::F32, + const infinicore::Device &device = infinicore::Device(), + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + + GateUpParallelLinear(size_t hidden_size, size_t intermediate_size, bool gate_bias, bool up_bias, + infinicore::nn::QuantScheme quant_scheme, + const infinicore::DataType &dtype = infinicore::DataType::F32, const infinicore::Device &device = infinicore::Device(), + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + std::tuple forward_split(infinicore::Tensor &input); infinicore::nn::Parameter get_gate_weight() const; + infinicore::nn::Parameter get_gate_weight_scale() const; + infinicore::nn::Parameter get_gate_bias() const; infinicore::nn::Parameter get_up_weight() const; + infinicore::nn::Parameter get_up_weight_scale() const; + infinicore::nn::Parameter get_up_bias() const; bool has_gate_bias() const; @@ -103,4 +154,35 @@ class GateUpParallelLinear : public infinicore::nn::ColumnParallelLinear { if (name##_->has_up_bias()) \ this->register_parameter(std::string(up_name) + ".bias", name##_->get_up_bias()); +// ========================= QKV Quantization ================================== +#define INFINILM_QKV_LINEAR_W8A8_INIT(name, q_name, k_name, v_name, ...) \ + name##_ = std::make_shared(__VA_ARGS__); \ + this->register_parameter(std::string(q_name) + ".weight", name##_->get_q_weight()); \ + this->register_parameter(std::string(q_name) + ".weight_scale", name##_->get_q_weight_scale()); \ + this->register_parameter(std::string(k_name) + ".weight", name##_->get_k_weight()); \ + this->register_parameter(std::string(k_name) + ".weight_scale", name##_->get_k_weight_scale()); \ + this->register_parameter(std::string(v_name) + ".weight", name##_->get_v_weight()); \ + this->register_parameter(std::string(v_name) + ".weight_scale", name##_->get_v_weight_scale()); \ + if (name##_->has_q_bias()) \ + this->register_parameter(std::string(q_name) + ".bias", name##_->get_q_bias()); \ + if (name##_->has_k_bias()) \ + this->register_parameter(std::string(k_name) + ".bias", name##_->get_k_bias()); \ + if (name##_->has_v_bias()) \ + this->register_parameter(std::string(v_name) + ".bias", name##_->get_v_bias()); + +// ========================= Gate-Up Quantization ============================== +#define INFINILM_GATE_UP_LINEAR_W8A8_INIT(name, gate_name, up_name, ...) \ + name##_ = std::make_shared(__VA_ARGS__); \ + /* 注册 Gate 权重 */ \ + this->register_parameter(std::string(gate_name) + ".weight", name##_->get_gate_weight()); \ + this->register_parameter(std::string(gate_name) + ".weight_scale", name##_->get_gate_weight_scale()); \ + /* 注册 Up 权重 */ \ + this->register_parameter(std::string(up_name) + ".weight", name##_->get_up_weight()); \ + this->register_parameter(std::string(up_name) + ".weight_scale", name##_->get_up_weight_scale()); \ + /* bias 保持原样 */ \ + if (name##_->has_gate_bias()) \ + this->register_parameter(std::string(gate_name) + ".bias", name##_->get_gate_bias()); \ + if (name##_->has_up_bias()) \ + this->register_parameter(std::string(up_name) + ".bias", name##_->get_up_bias()); + } // namespace infinilm::layers diff --git a/csrc/models/infinilm_model.hpp b/csrc/models/infinilm_model.hpp index 3537bc75..be7ebd0d 100644 --- a/csrc/models/infinilm_model.hpp +++ b/csrc/models/infinilm_model.hpp @@ -1,8 +1,8 @@ #pragma once -#include "infinicore/nn/module.hpp" - #include "../cache/cache.hpp" +#include "infinicore/nn/module.hpp" +#include "nlohmann/json.hpp" #include @@ -13,7 +13,6 @@ class InfinilmModel : public infinicore::nn::Module { public: struct Config { std::string model_type; - virtual ~Config() = default; }; diff --git a/csrc/models/llama/llama.hpp b/csrc/models/llama/llama.hpp index fe554c32..8402a1ab 100644 --- a/csrc/models/llama/llama.hpp +++ b/csrc/models/llama/llama.hpp @@ -16,9 +16,9 @@ * - LlamaForCausalLM: Complete model with language modeling head */ -#include "llama_config.hpp" +#include "../../config/model_config.hpp" #include "llama_attention.hpp" -#include "llama_mlp.hpp" #include "llama_decoder_layer.hpp" -#include "llama_model.hpp" #include "llama_for_causal_lm.hpp" +#include "llama_mlp.hpp" +#include "llama_model.hpp" diff --git a/csrc/models/llama/llama_attention.cpp b/csrc/models/llama/llama_attention.cpp index ad42efb1..a4e82811 100644 --- a/csrc/models/llama/llama_attention.cpp +++ b/csrc/models/llama/llama_attention.cpp @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -17,6 +16,18 @@ namespace infinilm::models::llama { +/** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ LlamaAttention::LlamaAttention(const LlamaConfig &config, const infinicore::Device &device, size_t layer_idx, @@ -61,6 +72,61 @@ LlamaAttention::LlamaAttention(const LlamaConfig &config, } } +LlamaAttention::LlamaAttention(std::shared_ptr model_config, + const infinicore::Device &device, + size_t layer_idx, + engine::distributed::RankInfo rank_info) + : model_config_(model_config), + layer_idx_(layer_idx), + hidden_size_(model_config->get("hidden_size")), + num_attention_heads_(model_config->get("num_attention_heads")), + num_key_value_heads_(model_config->get("num_key_value_heads")), + head_dim_(model_config->get_head_dim()), + kv_dim_(model_config->get_kv_dim()), + use_bias_(model_config->get_or("attention_bias", true)), + use_output_bias_(model_config->get_or("attention_output_bias", false)), + max_position_embeddings_(model_config->get("max_position_embeddings")), + rank_info_(rank_info) { + const auto &dtype{model_config_->get_dtype()}; + + int tp_rank = rank_info.tp_rank; + int tp_size = rank_info.tp_size; + + int num_attention_heads = model_config_->get("num_attention_heads"); + int num_key_value_heads = model_config_->get("num_key_value_heads"); + + if ((num_key_value_heads >= tp_size) && (0 == (num_key_value_heads % tp_size))) { + this->num_attention_heads_ = num_attention_heads / tp_size; + this->num_key_value_heads_ = num_key_value_heads / tp_size; + } else { + throw std::runtime_error("num_attention_heads / tp_size error."); + } + scaling_ = 1.0f / std::sqrt(static_cast(head_dim_)); + + auto quant_scheme = this->model_config_->get_quant_scheme(); + switch (quant_scheme) { + case infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8: + INFINILM_QKV_LINEAR_W8A8_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, model_config_->get("num_attention_heads"), model_config_->get("num_key_value_heads"), quant_scheme, use_bias_, + dtype, device, rank_info); + + INFINICORE_NN_MODULE_INIT(o_proj, model_config_->get("num_attention_heads") * head_dim_, hidden_size_, quant_scheme, use_output_bias_, + dtype, device, tp_rank, tp_size, rank_info.comm); + break; + + default: + INFINILM_QKV_LINEAR_INIT(qkv_proj, "q_proj", "k_proj", "v_proj", hidden_size_, head_dim_, model_config_->get("num_attention_heads"), model_config_->get("num_key_value_heads"), quant_scheme, use_bias_, + dtype, device, rank_info); + + INFINICORE_NN_MODULE_INIT(o_proj, model_config_->get("num_attention_heads") * head_dim_, hidden_size_, quant_scheme, use_output_bias_, + dtype, device, tp_rank, tp_size, rank_info.comm); + break; + } + if (model_config_->get("model_type") == "qwen3") { + INFINICORE_NN_MODULE_INIT(q_norm, head_dim_, model_config_->get("rms_norm_eps"), dtype, device); + INFINICORE_NN_MODULE_INIT(k_norm, head_dim_, model_config_->get("rms_norm_eps"), dtype, device); + } +} + infinicore::Tensor LlamaAttention::forward_(const infinicore::Tensor &hidden_states, const infinicore::Tensor &position_ids, std::shared_ptr kv_cache, @@ -75,7 +141,7 @@ infinicore::Tensor LlamaAttention::forward_(const infinicore::Tensor &hidden_sta // 1. Project Q, K, V auto [q, k, v] = qkv_proj_->forward_split(hidden_states_mutable); - if (use_qk_norm_) { + if (model_config_->get("model_type") == "qwen3") { q = q_norm_->forward(q->view({batch_size * seq_len, num_attention_heads_, head_dim_})); k = k_norm_->forward(k->view({batch_size * seq_len, num_key_value_heads_, head_dim_})); } @@ -199,7 +265,7 @@ infinicore::Tensor LlamaAttention::forward_paged_(const infinicore::Tensor &hidd auto k_reshaped = k->view({seq_len, num_key_value_heads_, head_dim_}); auto v_reshaped = v->view({seq_len, num_key_value_heads_, head_dim_}); - if (use_qk_norm_) { + if (model_config_->get("model_type") == "qwen3") { q_reshaped = q_norm_->forward(q_reshaped); k_reshaped = k_norm_->forward(k_reshaped); } diff --git a/csrc/models/llama/llama_attention.hpp b/csrc/models/llama/llama_attention.hpp index 9d464bcf..45bca14a 100644 --- a/csrc/models/llama/llama_attention.hpp +++ b/csrc/models/llama/llama_attention.hpp @@ -1,6 +1,7 @@ #pragma once #include "../../cache/kv_cache.hpp" +#include "../../config/model_config.hpp" #include "../../engine/distributed/distributed.hpp" #include "../../layers/fused_linear.hpp" #include "llama_config.hpp" @@ -36,11 +37,28 @@ class LlamaAttention : public infinicore::nn::Module { * @param layer_idx Layer index for cache access * @param dtype Optional data type for model parameters (defaults to F32) */ + /** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ LlamaAttention(const LlamaConfig &config, const infinicore::Device &device, size_t layer_idx, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + LlamaAttention(std::shared_ptr model_config, + const infinicore::Device &device, + size_t layer_idx, + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + /** * @brief Forward pass: compute attention * @@ -101,6 +119,7 @@ class LlamaAttention : public infinicore::nn::Module { std::shared_ptr rotary_emb_; private: + std::shared_ptr model_config_; size_t layer_idx_; // Layer index for cache access size_t hidden_size_; size_t num_attention_heads_; diff --git a/csrc/models/llama/llama_config.hpp b/csrc/models/llama/llama_config.hpp index 59108546..f2df38e5 100644 --- a/csrc/models/llama/llama_config.hpp +++ b/csrc/models/llama/llama_config.hpp @@ -92,4 +92,4 @@ struct LlamaConfig : public InfinilmModel::Config { } }; -} // namespace infinilm::models::llama +} // namespace infinilm::models::llama \ No newline at end of file diff --git a/csrc/models/llama/llama_decoder_layer.cpp b/csrc/models/llama/llama_decoder_layer.cpp index c99dad6f..208771d2 100644 --- a/csrc/models/llama/llama_decoder_layer.cpp +++ b/csrc/models/llama/llama_decoder_layer.cpp @@ -1,11 +1,21 @@ #include "llama_decoder_layer.hpp" #include "infinicore/nn/rmsnorm.hpp" #include "infinicore/ops.hpp" - #include namespace infinilm::models::llama { - +/** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ LlamaDecoderLayer::LlamaDecoderLayer(const LlamaConfig &config, const infinicore::Device &device, size_t layer_idx, @@ -23,6 +33,22 @@ LlamaDecoderLayer::LlamaDecoderLayer(const LlamaConfig &config, INFINICORE_NN_MODULE_INIT(mlp, config, device, rank_info_); } +LlamaDecoderLayer::LlamaDecoderLayer(std::shared_ptr model_config, + const infinicore::Device &device, + size_t layer_idx, + engine::distributed::RankInfo rank_info) : model_config_(model_config), layer_idx_(layer_idx), rank_info_(rank_info) { + const auto &dtype{model_config_->get_dtype()}; + // Initialize layer normalization layers + INFINICORE_NN_MODULE_INIT(input_layernorm, model_config_->get("hidden_size"), model_config_->get("rms_norm_eps"), + dtype, device); + INFINICORE_NN_MODULE_INIT(post_attention_layernorm, model_config_->get("hidden_size"), model_config_->get("rms_norm_eps"), + dtype, device); + + // Initialize attention and MLP modules + INFINICORE_NN_MODULE_INIT(self_attn, model_config_, device, layer_idx, rank_info_); + INFINICORE_NN_MODULE_INIT(mlp, model_config_, device, rank_info_); +} + std::tuple LlamaDecoderLayer::forward(infinicore::Tensor &hidden_states, infinicore::Tensor &residual, diff --git a/csrc/models/llama/llama_decoder_layer.hpp b/csrc/models/llama/llama_decoder_layer.hpp index 839d6d37..a56aec03 100644 --- a/csrc/models/llama/llama_decoder_layer.hpp +++ b/csrc/models/llama/llama_decoder_layer.hpp @@ -33,11 +33,28 @@ class LlamaDecoderLayer : public infinicore::nn::Module { * @param layer_idx Layer index for cache management and debugging * @param dtype Optional data type for model parameters (defaults to F32) */ + /** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ LlamaDecoderLayer(const LlamaConfig &config, const infinicore::Device &device, size_t layer_idx, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + LlamaDecoderLayer(std::shared_ptr model_config, + const infinicore::Device &device, + size_t layer_idx, + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + /** * @brief Forward pass: process one decoder layer * @@ -79,6 +96,7 @@ class LlamaDecoderLayer : public infinicore::nn::Module { INFINICORE_NN_MODULE(LlamaAttention, self_attn); INFINICORE_NN_MODULE(LlamaMLP, mlp); engine::distributed::RankInfo rank_info_; + std::shared_ptr model_config_; private: size_t layer_idx_; // Layer index for cache management and debugging diff --git a/csrc/models/llama/llama_for_causal_lm.cpp b/csrc/models/llama/llama_for_causal_lm.cpp index c7f8728e..cb386814 100644 --- a/csrc/models/llama/llama_for_causal_lm.cpp +++ b/csrc/models/llama/llama_for_causal_lm.cpp @@ -2,10 +2,20 @@ #include "infinicore/context/context.hpp" #include "infinicore/nn/linear.hpp" #include "infinicore/ops.hpp" -#include namespace infinilm::models::llama { - +/** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ LlamaForCausalLM::LlamaForCausalLM(const LlamaConfig &config, const infinicore::Device &device, engine::distributed::RankInfo rank_info) { @@ -25,6 +35,25 @@ LlamaForCausalLM::LlamaForCausalLM(const LlamaConfig &config, dtype, device); } +LlamaForCausalLM::LlamaForCausalLM(std::shared_ptr model_config, + const infinicore::Device &device, + engine::distributed::RankInfo rank_info) { + + // Initialize module's device_ member + device_ = device; + + const auto &dtype{model_config->get_dtype()}; + + // Initialize base model + INFINICORE_NN_MODULE_INIT(model, model_config, device, rank_info); + // Initialize language modeling head + // Note: If tie_word_embeddings is true, we would share weights with embed_tokens + // For now, we create a separate linear layer + + INFINICORE_NN_MODULE_INIT(lm_head, model_config->get("hidden_size"), model_config->get("vocab_size"), false, + dtype, device); +} + LlamaForCausalLM::Output LlamaForCausalLM::forward(const Input &input) const { auto input_ids = input.input_ids.value(); auto position_ids = input.position_ids.value(); diff --git a/csrc/models/llama/llama_for_causal_lm.hpp b/csrc/models/llama/llama_for_causal_lm.hpp index 4b7275cd..a6e078e7 100644 --- a/csrc/models/llama/llama_for_causal_lm.hpp +++ b/csrc/models/llama/llama_for_causal_lm.hpp @@ -28,10 +28,26 @@ class LlamaForCausalLM : public InfinilmModel { * @param config Model configuration * @param device Device to create tensors on */ + /** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ LlamaForCausalLM(const LlamaConfig &config, const infinicore::Device &device, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + LlamaForCausalLM(std::shared_ptr model_config, + const infinicore::Device &device, + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + /** * @brief Forward pass: compute language modeling logits * @@ -45,7 +61,6 @@ class LlamaForCausalLM : public InfinilmModel { const cache::CacheConfig *get_cache_config() const override; // Module information - const LlamaConfig &config() const { return model_->config(); } LlamaModel &model() { return *model_; } const LlamaModel &model() const { return *model_; } diff --git a/csrc/models/llama/llama_mlp.cpp b/csrc/models/llama/llama_mlp.cpp index fc7abd69..89866a16 100644 --- a/csrc/models/llama/llama_mlp.cpp +++ b/csrc/models/llama/llama_mlp.cpp @@ -3,7 +3,18 @@ #include "infinicore/ops.hpp" namespace infinilm::models::llama { - +/** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ LlamaMLP::LlamaMLP(const LlamaConfig &config, const infinicore::Device &device, engine::distributed::RankInfo rank_info) @@ -22,6 +33,37 @@ LlamaMLP::LlamaMLP(const LlamaConfig &config, dtype, device, tp_rank, tp_size, rank_info.comm); } +LlamaMLP::LlamaMLP(std::shared_ptr model_config, + const infinicore::Device &device, + engine::distributed::RankInfo rank_info) + : model_config_(model_config), hidden_size_(model_config->get("hidden_size")), + intermediate_size_(model_config->get("intermediate_size")), + use_bias_(model_config->get_or("mlp_bias", false)), rank_info_(rank_info) { + + const auto &dtype{model_config_->get_dtype()}; + + int tp_rank = rank_info.tp_rank; + int tp_size = rank_info.tp_size; + + // Initialize projection layers + auto quant_scheme = this->model_config_->get_quant_scheme(); + switch (quant_scheme) { + case infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8: + INFINILM_GATE_UP_LINEAR_W8A8_INIT(gate_up_proj, "gate_proj", "up_proj", hidden_size_, intermediate_size_, quant_scheme, use_bias_, + dtype, device, rank_info_); + INFINICORE_NN_MODULE_INIT(down_proj, intermediate_size_, hidden_size_, quant_scheme, use_bias_, + dtype, device, tp_rank, tp_size, rank_info.comm); + break; + + default: + INFINILM_GATE_UP_LINEAR_INIT(gate_up_proj, "gate_proj", "up_proj", hidden_size_, intermediate_size_, quant_scheme, use_bias_, + dtype, device, rank_info_); + INFINICORE_NN_MODULE_INIT(down_proj, intermediate_size_, hidden_size_, quant_scheme, use_bias_, + dtype, device, tp_rank, tp_size, rank_info.comm); + break; + } +} + infinicore::Tensor LlamaMLP::forward(const infinicore::Tensor &hidden_states) const { // 1. Project to gate and up auto hidden_states_mutable = hidden_states; diff --git a/csrc/models/llama/llama_mlp.hpp b/csrc/models/llama/llama_mlp.hpp index 665dac70..179ea217 100644 --- a/csrc/models/llama/llama_mlp.hpp +++ b/csrc/models/llama/llama_mlp.hpp @@ -3,6 +3,7 @@ #include "../../layers/fused_linear.hpp" #include "llama_config.hpp" +#include "../../config/model_config.hpp" #include "infinicore/device.hpp" #include "infinicore/nn/linear.hpp" #include "infinicore/nn/module.hpp" @@ -33,10 +34,26 @@ class LlamaMLP : public infinicore::nn::Module { * @param device Device to create tensors on * @param dtype Optional data type for model parameters (defaults to F32) */ + /** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ LlamaMLP(const LlamaConfig &config, const infinicore::Device &device, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + LlamaMLP(std::shared_ptr model_config, + const infinicore::Device &device, + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + /** * @brief Forward pass: compute MLP output * @@ -57,6 +74,8 @@ class LlamaMLP : public infinicore::nn::Module { size_t hidden_size_; size_t intermediate_size_; bool use_bias_; + + std::shared_ptr model_config_; }; } // namespace infinilm::models::llama diff --git a/csrc/models/llama/llama_model.cpp b/csrc/models/llama/llama_model.cpp index f1de0618..e8360a87 100644 --- a/csrc/models/llama/llama_model.cpp +++ b/csrc/models/llama/llama_model.cpp @@ -6,7 +6,18 @@ #include namespace infinilm::models::llama { - +/** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ LlamaModel::LlamaModel(const LlamaConfig &config, const infinicore::Device &device, engine::distributed::RankInfo rank_info) @@ -43,6 +54,41 @@ LlamaModel::LlamaModel(const LlamaConfig &config, } } +LlamaModel::LlamaModel(std::shared_ptr model_config, + const infinicore::Device &device, + engine::distributed::RankInfo rank_info) + : model_config_(model_config), rank_info_(rank_info) { + const auto &dtype{model_config_->get_dtype()}; + // Initialize token embeddings + INFINICORE_NN_MODULE_INIT(embed_tokens, model_config_->get("vocab_size"), model_config_->get("hidden_size"), + std::nullopt, dtype, device); + // Initialize decoder layers with layer indices + // TODO: Update INFINICORE_NN_MODULE_VEC_INIT macro to support per-layer constructor arguments + // (e.g., via a factory function or lambda that receives the layer index) + // Currently, we can't use the macro because each layer needs a different layer_idx + layers_.reserve(model_config_->get("num_hidden_layers")); + for (size_t i = 0; i < model_config_->get("num_hidden_layers"); ++i) { + layers_.push_back(this->register_module( + "layers." + std::to_string(i), model_config_, device, i, rank_info)); + } + + // Initialize final layer normalization + INFINICORE_NN_MODULE_INIT(norm, model_config_->get("hidden_size"), model_config_->get("rms_norm_eps"), + dtype, device); + + // Initialize Rotary Position Embeddings (shared across all layers) + // Use GPT-J-style inverse frequencies (default) and GPT_NEOX rotation pairing + INFINICORE_NN_MODULE_INIT(rotary_emb, model_config_->get_head_dim(), model_config_->get("max_position_embeddings"), + model_config_->get("rope_theta"), infinicore::nn::RoPE::Algo::GPT_NEOX, + dtype, device, model_config_->get_rope_scaling()); + + for (auto &layer : layers_) { + if (layer) { + layer->set_rotary_emb(rotary_emb_); + } + } +} + infinicore::Tensor LlamaModel::forward(const infinicore::Tensor &input_ids, const infinicore::Tensor &position_ids, std::optional past_sequence_lengths, @@ -81,24 +127,23 @@ void LlamaModel::reset_cache(const cache::CacheConfig *cache_config) { } if (auto kv_cache_config = dynamic_cast(cache_config)) { kv_cache_ = std::make_shared( - config_.head_dim, - config_.head_dim, - config_.num_key_value_heads, - config_.num_key_value_heads, - config_.num_hidden_layers, - config_.max_position_embeddings, - config_.dtype, + model_config_->get_head_dim(), + model_config_->get_head_dim(), + model_config_->get("num_key_value_heads"), + model_config_->get("num_key_value_heads"), + model_config_->get("num_hidden_layers"), + model_config_->get("max_position_embeddings"), + model_config_->get_dtype(), *kv_cache_config, rank_info_); - } else if (auto paged_kv_cache_config = dynamic_cast(cache_config)) { kv_cache_ = std::make_shared( - config_.head_dim, - config_.head_dim, - config_.num_key_value_heads, - config_.num_key_value_heads, - config_.num_hidden_layers, - config_.dtype, + model_config_->get_head_dim(), + model_config_->get_head_dim(), + model_config_->get("num_key_value_heads"), + model_config_->get("num_key_value_heads"), + model_config_->get("num_hidden_layers"), + model_config_->get_dtype(), *paged_kv_cache_config, rank_info_); } else { diff --git a/csrc/models/llama/llama_model.hpp b/csrc/models/llama/llama_model.hpp index 5a008b0f..f293a97a 100644 --- a/csrc/models/llama/llama_model.hpp +++ b/csrc/models/llama/llama_model.hpp @@ -1,7 +1,6 @@ #pragma once #include "../../cache/kv_cache.hpp" -#include "llama_config.hpp" #include "llama_decoder_layer.hpp" #include "infinicore/nn/embedding.hpp" @@ -38,10 +37,26 @@ class LlamaModel : public infinicore::nn::Module { * @param device Device to create tensors on * @param dtype Optional data type for model parameters (defaults to F32) */ + /** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ LlamaModel(const LlamaConfig &config, const infinicore::Device &device, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + LlamaModel(std::shared_ptr model_config, + const infinicore::Device &device, + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo()); + /** * @brief Forward pass: process input through the model * @@ -64,8 +79,7 @@ class LlamaModel : public infinicore::nn::Module { void reset_cache(const cache::CacheConfig *cache_config); // Module information - const LlamaConfig &config() const { return config_; } - size_t num_layers() const { return config_.num_hidden_layers; } + size_t num_layers() const { return model_config_->get("num_hidden_layers"); } protected: // Token embeddings @@ -86,6 +100,8 @@ class LlamaModel : public infinicore::nn::Module { private: LlamaConfig config_; + + std::shared_ptr model_config_; }; } // namespace infinilm::models::llama diff --git a/csrc/models/model_factory.cpp b/csrc/models/model_factory.cpp index 999bb364..fa117227 100644 --- a/csrc/models/model_factory.cpp +++ b/csrc/models/model_factory.cpp @@ -2,6 +2,18 @@ #include "llama/llama.hpp" namespace infinilm { +/** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ std::shared_ptr InfinilmModelFactory::createModel( const InfinilmModel::Config &config, engine::distributed::RankInfo rank_info, @@ -22,4 +34,24 @@ std::shared_ptr InfinilmModelFactory::createModel( return model; } + +std::shared_ptr InfinilmModelFactory::createModel( + std::shared_ptr model_config, + engine::distributed::RankInfo rank_info, + const cache::CacheConfig *cache) { + + std::shared_ptr model; + if (true) { + model = std::make_shared( + model_config, rank_info.device, rank_info); + } else { + throw std::invalid_argument("InfinilmModelFactory::createModel: Unsupported model config type"); + } + + if (cache) { + model->reset_cache(cache); + } + + return model; +} } // namespace infinilm diff --git a/csrc/models/model_factory.hpp b/csrc/models/model_factory.hpp index a73f432c..02385029 100644 --- a/csrc/models/model_factory.hpp +++ b/csrc/models/model_factory.hpp @@ -1,5 +1,6 @@ #pragma once +#include "../config/model_config.hpp" #include "infinilm_model.hpp" #include "../engine/distributed/distributed.hpp" @@ -7,9 +8,26 @@ namespace infinilm { class InfinilmModelFactory { public: + /** + * @deprecated This function is deprecated and will be REMOVED in the next major release (v0.2.0). + * + * ⚠️ DEVELOPMENT POLICY: + * - NO new development or feature additions permitted on this interface + * - Only critical bug fixes (security/stability) allowed until removal + * - All new code MUST migrate to the polymorphic overload below + * + * Replacement: Use the polymorphic overload of this same function name with updated signature + * Reason: Legacy signature lacks support for dynamic quantization modes. + * Removal target: v0.2.0 (Q2 2026) + */ static std::shared_ptr createModel( const InfinilmModel::Config &config, engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), const cache::CacheConfig *cache = nullptr); + + static std::shared_ptr createModel( + std::shared_ptr model_config, + engine::distributed::RankInfo rank_info = engine::distributed::RankInfo(), + const cache::CacheConfig *cache = nullptr); }; } // namespace infinilm diff --git a/csrc/pybind11/engine/engine.hpp b/csrc/pybind11/engine/engine.hpp index f5dae4a7..78af5daa 100644 --- a/csrc/pybind11/engine/engine.hpp +++ b/csrc/pybind11/engine/engine.hpp @@ -63,20 +63,52 @@ inline void bind_infer_engine(py::module &m) { } return state_dict_tp_all; }) - .def( - "forward", [](InferEngine &self, const InferEngine::Input &input) -> InferEngine::Output { return self.forward(input); }, "Run inference on all ranks with arbitrary arguments") - .def( - "reset_cache", [](InferEngine &self, std::shared_ptr cfg) { - self.reset_cache(cfg ? cfg.get() : nullptr); - }, - py::arg("cache_config") = py::none()) + .def("forward", [](InferEngine &self, const InferEngine::Input &input) -> InferEngine::Output { return self.forward(input); }, "Run inference on all ranks with arbitrary arguments") + .def("reset_cache", [](InferEngine &self, std::shared_ptr cfg) { self.reset_cache(cfg ? cfg.get() : nullptr); }, py::arg("cache_config") = py::none()) .def("get_cache_config", [](const InferEngine &self) { auto cfg = self.get_cache_config(); - return std::shared_ptr(std::move(cfg->unique_copy())); + return std::shared_ptr(std::move(cfg->unique_copy())); }) + .def("__repr__", [](const InferEngine &self) { return ""; }); + + infer_engine + .def(py::init([]( + const std::string &model_path, + const distributed::DistConfig &dist, + infinicore::Device::Type dev, + std::shared_ptr cache_cfg, + bool enable_graph_compiling) { + return std::make_shared( + model_path, + dist, + dev, + cache_cfg ? cache_cfg.get() : nullptr, + enable_graph_compiling); + }), + py::arg("model_path") = "", + py::arg("distributed_config") = distributed::DistConfig(), + py::arg("device_type") = infinicore::context::getDevice().getType(), + py::arg("cache_config") = py::none(), + py::arg("enable_graph_compiling") = false) + .def("load_param", &InferEngine::load_param, + py::arg("name"), py::arg("param"), + "Load a parameter tensor into all workers (each worker picks its shard)") + .def("state_dict", [](InferEngine &self) { + py::list state_dict_tp_all; + for (const auto &state_dict_tp : self.state_dict()) { + py::dict result; + for (const auto &[name, param] : state_dict_tp) { + result[py::cast(name)] = infinicore::Tensor(param); + } + state_dict_tp_all.append(result); + } + return state_dict_tp_all; }) - .def("__repr__", [](const InferEngine &self) { - return ""; - }); + .def("forward", [](InferEngine &self, const InferEngine::Input &input) -> InferEngine::Output { return self.forward(input); }, "Run inference on all ranks with arbitrary arguments") + .def("reset_cache", [](InferEngine &self, std::shared_ptr cfg) { self.reset_cache(cfg ? cfg.get() : nullptr); }, py::arg("cache_config") = py::none()) + .def("get_cache_config", [](const InferEngine &self) { + auto cfg = self.get_cache_config(); + return std::shared_ptr(std::move(cfg->unique_copy())); }) + .def("__repr__", [](const InferEngine &self) { return ""; }); py::class_(infer_engine, "Input") .def( diff --git a/csrc/quantization/base_quantization.hpp b/csrc/quantization/base_quantization.hpp new file mode 100644 index 00000000..0d1f52ce --- /dev/null +++ b/csrc/quantization/base_quantization.hpp @@ -0,0 +1,18 @@ +#pragma once +#include "../config/quant_config.hpp" +#include "infinicore/nn/quantization.hpp" +#include "nlohmann/json.hpp" + +namespace infinilm::quantization { +class BaseQuantization { + // Base class for quantization schemes. Intended to be extended to support various quantization methods. +public: + explicit BaseQuantization(const nlohmann::json &quant_config) : quant_config_(quant_config) {}; + virtual ~BaseQuantization() = default; + + virtual infinicore::nn::QuantScheme get_quant_scheme() const = 0; + +protected: + nlohmann::json quant_config_; +}; +} // namespace infinilm::quantization diff --git a/csrc/quantization/compressed_tensors.hpp b/csrc/quantization/compressed_tensors.hpp new file mode 100644 index 00000000..f502f398 --- /dev/null +++ b/csrc/quantization/compressed_tensors.hpp @@ -0,0 +1,21 @@ +#pragma once + +#include "../config/quant_config.hpp" +#include "base_quantization.hpp" +namespace infinilm::quantization { + +class CompressedTensors : public BaseQuantization { + // This is a temporary class that currently only returns COMPRESSED_TENSOR_W8A8I8. + // Future enhancements should parse quant_config to extract detailed quantization + // information and support multiple quantization schemes. +public: + explicit CompressedTensors(const nlohmann::json &quant_config) + : BaseQuantization(quant_config) {}; + + infinicore::nn::QuantScheme + get_quant_scheme() const override { + return infinicore::nn::QuantScheme::COMPRESSED_TENSOR_W8A8I8; + }; +}; + +} // namespace infinilm::quantization diff --git a/csrc/quantization/quantization.hpp b/csrc/quantization/quantization.hpp new file mode 100644 index 00000000..48b7646e --- /dev/null +++ b/csrc/quantization/quantization.hpp @@ -0,0 +1,5 @@ +#pragma once + +#include "base_quantization.hpp" +#include "compressed_tensors.hpp" +#include "infinicore/nn/quantization.hpp" diff --git a/examples/bench.py b/examples/bench.py index 2b968fa6..957f9215 100644 --- a/examples/bench.py +++ b/examples/bench.py @@ -272,6 +272,13 @@ def __init__( # 创建 tokenizer # ---------------------------------------------------------------------------- # tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + + if tokenizer.pad_token is None: + if tokenizer.eos_token is not None: + tokenizer.pad_token = tokenizer.eos_token + tokenizer.pad_token_id = tokenizer.eos_token_id + else: + tokenizer.add_special_tokens({'pad_token': '[PAD]'}) # ---------------------------------------------------------------------------- # # token编码 @@ -285,7 +292,16 @@ def __init__( ] # print(input_content, end="", flush=True) - input_ids_list = tokenizer.batch_encode_plus(input_content)["input_ids"] + # Support Transformers >= 5.0 for batch_encode_plus deprecation + encoding = tokenizer( + input_content, + padding=True, + truncation=True, + max_length=2048, + return_tensors="pt" + ) + + input_ids_list = encoding["input_ids"] self.model = model self.tokenizer = tokenizer diff --git a/examples/jiuge.py b/examples/jiuge.py index 0ca5a418..653a1a55 100644 --- a/examples/jiuge.py +++ b/examples/jiuge.py @@ -140,7 +140,6 @@ def test( distributed_config=DistConfig(tp), enable_graph_compiling=enable_graph, ) - # ---------------------------------------------------------------------------- # # Load Weights # ---------------------------------------------------------------------------- # @@ -150,7 +149,6 @@ def test( # create tokenizer # ---------------------------------------------------------------------------- # tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) - if "llama" == model.config.model_type: backend = getattr(tokenizer, "backend_tokenizer", None) target = getattr(backend, "_tokenizer", backend) diff --git a/python/infinilm/infer_engine.py b/python/infinilm/infer_engine.py index f5359d7d..213e62ad 100644 --- a/python/infinilm/infer_engine.py +++ b/python/infinilm/infer_engine.py @@ -34,15 +34,21 @@ def __init__( if device is None: device = infinicore.device() - + + # super().__init__( + # self.config, + # distributed_config._underlying, + # device._underlying.type, + # cache_config, + # enable_graph_compiling, + # ) super().__init__( - self.config, + model_path, distributed_config._underlying, device._underlying.type, cache_config, enable_graph_compiling, ) - self.use_cache = False self.enable_paged_attn = isinstance(cache_config, PagedKVCacheConfig) diff --git a/python/infinilm/modeling_utils.py b/python/infinilm/modeling_utils.py index 792aa503..d1b26dd9 100644 --- a/python/infinilm/modeling_utils.py +++ b/python/infinilm/modeling_utils.py @@ -75,7 +75,7 @@ def load_state_dict( ) for k in f.keys(): - state_dict[k] = f.get_tensor(k).to(device=device, dtype=dtype) + state_dict[k] = f.get_tensor(k).to(device=device) return state_dict @@ -155,7 +155,6 @@ def load_model_state_dict_by_file( model_param_infini = {} for key in model_param.keys(): model_param_infini[key] = infinicore.from_torch(model_param[key]) - model.load_state_dict(model_param_infini, strict=False) infinicore.sync_device() @@ -168,7 +167,6 @@ def load_model_state_dict_by_file( model_param_infini[key] = infinicore.from_torch( model_params[key].to(dtype=torch_dtype) ) - already_loaded_keys.append(key) model.load_state_dict(model_param_infini, strict=True) diff --git a/third_party/json b/third_party/json new file mode 160000 index 00000000..5ed07097 --- /dev/null +++ b/third_party/json @@ -0,0 +1 @@ +Subproject commit 5ed07097faa6c50199c4a3b66e5ed37d4fbfccc2 diff --git a/xmake.lua b/xmake.lua index ad636197..aab1a0c7 100644 --- a/xmake.lua +++ b/xmake.lua @@ -6,6 +6,7 @@ set_toolchains("gcc") -- Add spdlog from third_party directory add_includedirs("third_party/spdlog/include") +add_includedirs("third_party/json/single_include/") target("infinicore_infer") set_kind("shared")