|
1 | 1 | #include "command_line_parser.h" |
2 | 2 | #include <memory> |
3 | 3 | #include <optional> |
4 | | -#include "commands/chat_cmd.h" |
5 | | -#include "commands/chat_completion_cmd.h" |
6 | 4 | #include "commands/cortex_upd_cmd.h" |
7 | 5 | #include "commands/engine_get_cmd.h" |
8 | 6 | #include "commands/engine_install_cmd.h" |
@@ -152,36 +150,6 @@ void CommandLineParser::SetupCommonCommands() { |
152 | 150 | cml_data_.model_id, download_service_); |
153 | 151 | rc.Exec(cml_data_.run_detach); |
154 | 152 | }); |
155 | | - |
156 | | - auto chat_cmd = app_.add_subcommand( |
157 | | - "chat", |
158 | | - "Shortcut for `cortex run --chat` or send a chat completion request"); |
159 | | - chat_cmd->group(kCommonCommandsGroup); |
160 | | - chat_cmd->usage("Usage:\n" + commands::GetCortexBinary() + |
161 | | - " chat [model_id] -m [msg]"); |
162 | | - chat_cmd->add_option("model_id", cml_data_.model_id, ""); |
163 | | - chat_cmd->add_option("-m,--message", cml_data_.msg, |
164 | | - "Message to chat with model"); |
165 | | - chat_cmd->callback([this, chat_cmd] { |
166 | | - if (std::exchange(executed_, true)) |
167 | | - return; |
168 | | - if (cml_data_.model_id.empty()) { |
169 | | - CLI_LOG("[model_id] is required\n"); |
170 | | - CLI_LOG(chat_cmd->help()); |
171 | | - return; |
172 | | - } |
173 | | - |
174 | | - if (cml_data_.msg.empty()) { |
175 | | - commands::ChatCmd().Exec(cml_data_.config.apiServerHost, |
176 | | - std::stoi(cml_data_.config.apiServerPort), |
177 | | - cml_data_.model_id, download_service_); |
178 | | - } else { |
179 | | - commands::ChatCompletionCmd(model_service_) |
180 | | - .Exec(cml_data_.config.apiServerHost, |
181 | | - std::stoi(cml_data_.config.apiServerPort), cml_data_.model_id, |
182 | | - cml_data_.msg); |
183 | | - } |
184 | | - }); |
185 | 153 | } |
186 | 154 |
|
187 | 155 | void CommandLineParser::SetupInferenceCommands() { |
|
0 commit comments