Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit 6ceb477

Browse files
committed
to take user_prompt
1 parent 59acfda commit 6ceb477

File tree

1 file changed

+8
-3
lines changed

1 file changed

+8
-3
lines changed

controllers/llamaCPP.cc

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ void llamaCPP::chatCompletion(
107107
}
108108

109109
std::string content = message["content"].asString();
110-
formatted_output += role + ": " + content + "\n";
110+
formatted_output += role + content + "\n";
111111
}
112112
formatted_output += "assistant:";
113113

@@ -116,8 +116,7 @@ void llamaCPP::chatCompletion(
116116
stopWords.push_back(stop_word.asString());
117117
}
118118
// specify default stop words
119-
stopWords.push_back("user:");
120-
stopWords.push_back("### USER:");
119+
stopWords.push_back(user_prompt);
121120
data["stop"] = stopWords;
122121
}
123122

@@ -224,6 +223,12 @@ void llamaCPP::loadModel(
224223
}
225224

226225
params.cont_batching = (*jsonBody)["cont_batching"].asBool();
226+
227+
// Set up prompt
228+
user_prompt = (*jsonBody)["user_prompt"].asString();
229+
ai_prompt = (*jsonBody)["ai_prompt"].asString();
230+
system_prompt = (*jsonBody)["system_prompt"].asString();
231+
227232
// params.n_threads = (*jsonBody)["n_threads"].asInt();
228233
// params.n_threads_batch = params.n_threads;
229234
}

0 commit comments

Comments
 (0)