Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit 055d003

Browse files
committed
refactor: check if model is loaded
1 parent fecc146 commit 055d003

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

controllers/llamaCPP.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ std::shared_ptr<inferenceState> create_inference_state(llamaCPP *instance) {
2222
// --------------------------------------------
2323

2424
// Function to check if the model is loaded
25-
void checkModelLoaded(llama_server_context &llama, const HttpRequestPtr &req,
25+
void check_model_loaded(llama_server_context &llama, const HttpRequestPtr &req,
2626
std::function<void(const HttpResponsePtr &)> &callback) {
2727
if (!llama.model_loaded_external) {
2828
Json::Value jsonResp;
@@ -151,7 +151,7 @@ void llamaCPP::chatCompletion(
151151
std::function<void(const HttpResponsePtr &)> &&callback) {
152152

153153
// Check if model is loaded
154-
checkModelLoaded(llama, req, callback);
154+
check_model_loaded(llama, req, callback);
155155

156156
const auto &jsonBody = req->getJsonObject();
157157
std::string formatted_output = pre_prompt;
@@ -409,7 +409,7 @@ void llamaCPP::chatCompletion(
409409
void llamaCPP::embedding(
410410
const HttpRequestPtr &req,
411411
std::function<void(const HttpResponsePtr &)> &&callback) {
412-
checkModelLoaded(llama, req, callback);
412+
check_model_loaded(llama, req, callback);
413413

414414
const auto &jsonBody = req->getJsonObject();
415415

0 commit comments

Comments
 (0)