@@ -153,7 +153,7 @@ void llamaCPP::chatCompletion(
153153 const HttpRequestPtr &req,
154154 std::function<void (const HttpResponsePtr &)> &&callback) {
155155
156- if (!model_loaded ) {
156+ if (!llama. model_loaded_external ) {
157157 Json::Value jsonResp;
158158 jsonResp[" message" ] =
159159 " Model has not been loaded, please load model into nitro" ;
@@ -391,7 +391,7 @@ void llamaCPP::unloadModel(
391391 std::function<void (const HttpResponsePtr &)> &&callback) {
392392 Json::Value jsonResp;
393393 jsonResp[" message" ] = " No model loaded" ;
394- if (model_loaded ) {
394+ if (llama. model_loaded_external ) {
395395 stopBackgroundTask ();
396396
397397 llama_free (llama.ctx );
@@ -408,7 +408,7 @@ void llamaCPP::modelStatus(
408408 const HttpRequestPtr &req,
409409 std::function<void (const HttpResponsePtr &)> &&callback) {
410410 Json::Value jsonResp;
411- bool is_model_loaded = this -> model_loaded ;
411+ bool is_model_loaded = llama. model_loaded_external ;
412412 if (is_model_loaded) {
413413 jsonResp[" model_loaded" ] = is_model_loaded;
414414 jsonResp[" model_data" ] = llama.get_model_props ().dump ();
@@ -484,7 +484,6 @@ bool llamaCPP::loadModelImpl(const Json::Value &jsonBody) {
484484 }
485485 llama.initialize ();
486486
487- model_loaded = true ;
488487 llama.model_loaded_external = true ;
489488
490489 LOG_INFO << " Started background task here!" ;
@@ -497,7 +496,7 @@ void llamaCPP::loadModel(
497496 const HttpRequestPtr &req,
498497 std::function<void (const HttpResponsePtr &)> &&callback) {
499498
500- if (model_loaded ) {
499+ if (llama. model_loaded_external ) {
501500 LOG_INFO << " model loaded" ;
502501 Json::Value jsonResp;
503502 jsonResp[" message" ] = " Model already loaded" ;
@@ -525,7 +524,7 @@ void llamaCPP::loadModel(
525524}
526525
527526void llamaCPP::backgroundTask () {
528- while (model_loaded ) {
527+ while (llama. model_loaded_external ) {
529528 // model_loaded =
530529 llama.update_slots ();
531530 }
@@ -536,10 +535,9 @@ void llamaCPP::backgroundTask() {
536535}
537536
538537void llamaCPP::stopBackgroundTask () {
539- if (model_loaded) {
540- model_loaded = false ;
541- llama.condition_tasks .notify_one ();
538+ if (llama.model_loaded_external ) {
542539 llama.model_loaded_external = false ;
540+ llama.condition_tasks .notify_one ();
543541 LOG_INFO << " changed to false" ;
544542 if (backgroundThread.joinable ()) {
545543 backgroundThread.join ();
0 commit comments