@@ -762,12 +762,6 @@ bool common_arg_utils::is_autoy(const std::string & value) {
762762}
763763
764764common_params_context common_params_parser_init (common_params & params, llama_example ex, void (*print_usage)(int , char **)) {
765- // default values specific to example
766- // note: we place it here instead of inside server.cpp to allow llama-gen-docs to pick it up
767- if (ex == LLAMA_EXAMPLE_SERVER) {
768- params.use_jinja = true ;
769- }
770-
771765 params.use_color = tty_can_use_colors ();
772766
773767 // load dynamic backends
@@ -2623,14 +2617,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
26232617 ).set_examples ({LLAMA_EXAMPLE_SERVER}).set_env (" LLAMA_ARG_NO_MODELS_AUTOLOAD" ));
26242618 add_opt (common_arg (
26252619 {" --jinja" },
2626- string_format (" use jinja template for chat (default: %s)\n " , params.use_jinja ? " enabled" : " disabled" ),
2620+ string_format (" use jinja template for chat (default: %s)" , params.use_jinja ? " enabled" : " disabled" ),
26272621 [](common_params & params) {
26282622 params.use_jinja = true ;
26292623 }
26302624 ).set_examples ({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_MTMD}).set_env (" LLAMA_ARG_JINJA" ));
26312625 add_opt (common_arg (
26322626 {" --no-jinja" },
2633- string_format (" disable jinja template for chat (default: %s)\n " , params.use_jinja ? " enabled " : " disabled " ),
2627+ string_format (" disable jinja template for chat (default: %s)" , params.use_jinja ? " disabled " : " enabled " ),
26342628 [](common_params & params) {
26352629 params.use_jinja = false ;
26362630 }
0 commit comments