@@ -56,7 +56,7 @@ size_t StreamWriteCallback(char* ptr, size_t size, size_t nmemb,
5656 return size * nmemb;
5757}
5858
59- static size_t WriteCallback (char * ptr, size_t size, size_t nmemb,
59+ [[maybe_unused]] static size_t WriteCallback (char * ptr, size_t size, size_t nmemb,
6060 std::string* data) {
6161 data->append (ptr, size * nmemb);
6262 return size * nmemb;
@@ -185,6 +185,7 @@ void PythonEngine::GetModels(
185185 status[" status_code" ] = k200OK;
186186
187187 callback (std::move (status), std::move (response_json));
188+ (void ) json_body;
188189}
189190
190191void PythonEngine::LoadModel (
@@ -386,6 +387,8 @@ void PythonEngine::HandleChatCompletion(
386387 std::shared_ptr<Json::Value> json_body,
387388 std::function<void (Json::Value&&, Json::Value&&)>&& callback) {
388389 LOG_WARN << " Does not support yet!" ;
390+ (void ) json_body;
391+ (void ) callback;
389392}
390393
391394CurlResponse PythonEngine::MakeStreamPostRequest (
@@ -623,7 +626,9 @@ Json::Value PythonEngine::GetRemoteModels() {
623626 return Json::Value ();
624627}
625628
626- void PythonEngine::StopInferencing (const std::string& model_id) {}
629+ void PythonEngine::StopInferencing (const std::string& model_id) {
630+ (void )model_id;
631+ }
627632
628633void PythonEngine::HandleRouteRequest (
629634 std::shared_ptr<Json::Value> json_body,
@@ -893,12 +898,14 @@ void PythonEngine::SetLogLevel(trantor::Logger::LogLevel log_level) {
893898
894899void PythonEngine::Load (EngineLoadOption opts) {
895900 // Develop register model here on loading engine
901+ (void ) opts;
896902};
897903
898904void PythonEngine::Unload (EngineUnloadOption opts) {
899905 for (const auto & pair : models_) {
900906 TerminateModelProcess (pair.first );
901907 }
908+ (void ) opts;
902909};
903910
904- } // namespace python_engine
911+ } // namespace python_engine
0 commit comments