Skip to content

Commit a28118e

Browse files
committed
add https_verify config support
1 parent bf7af36 commit a28118e

File tree

1 file changed

+38
-19
lines changed

1 file changed

+38
-19
lines changed

src/autocoder/auto_coder.py

Lines changed: 38 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -268,7 +268,8 @@ def main(input_args: Optional[List[str]] = None):
268268
"saas.api_key": model_info["api_key"],
269269
"saas.model": model_info["model_name"],
270270
"saas.is_reasoning": model_info["is_reasoning"],
271-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
271+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
272+
"saas.https_verify": model_info.get("https_verify", True),
272273
}
273274
)
274275

@@ -292,7 +293,8 @@ def main(input_args: Optional[List[str]] = None):
292293
"saas.api_key": model_info["api_key"],
293294
"saas.model": model_info["model_name"],
294295
"saas.is_reasoning": model_info["is_reasoning"],
295-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
296+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
297+
"saas.https_verify": model_info.get("https_verify", True),
296298
}
297299
)
298300
models.append(code_model)
@@ -311,7 +313,8 @@ def main(input_args: Optional[List[str]] = None):
311313
"saas.api_key": model_info["api_key"],
312314
"saas.model": model_info["model_name"],
313315
"saas.is_reasoning": model_info["is_reasoning"],
314-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
316+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
317+
"saas.https_verify": model_info.get("https_verify", True),
315318
}
316319
)
317320
llm.setup_sub_client("code_model", code_model)
@@ -333,7 +336,8 @@ def main(input_args: Optional[List[str]] = None):
333336
"saas.api_key": model_info["api_key"],
334337
"saas.model": model_info["model_name"],
335338
"saas.is_reasoning": model_info["is_reasoning"],
336-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
339+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
340+
"saas.https_verify": model_info.get("https_verify", True),
337341
}
338342
)
339343
models.append(rerank_model)
@@ -352,7 +356,8 @@ def main(input_args: Optional[List[str]] = None):
352356
"saas.api_key": model_info["api_key"],
353357
"saas.model": model_info["model_name"],
354358
"saas.is_reasoning": model_info["is_reasoning"],
355-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
359+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
360+
"saas.https_verify": model_info.get("https_verify", True),
356361
}
357362
)
358363
llm.setup_sub_client("generate_rerank_model", rerank_model)
@@ -370,7 +375,8 @@ def main(input_args: Optional[List[str]] = None):
370375
"saas.api_key": model_info["api_key"],
371376
"saas.model": model_info["model_name"],
372377
"saas.is_reasoning": model_info["is_reasoning"],
373-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
378+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
379+
"saas.https_verify": model_info.get("https_verify", True),
374380
}
375381
)
376382
llm.setup_sub_client("inference_model", inference_model)
@@ -388,7 +394,8 @@ def main(input_args: Optional[List[str]] = None):
388394
"saas.api_key": model_info["api_key"],
389395
"saas.model": model_info["model_name"],
390396
"saas.is_reasoning": model_info["is_reasoning"],
391-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
397+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
398+
"saas.https_verify": model_info.get("https_verify", True),
392399
}
393400
)
394401
llm.setup_sub_client("index_filter_model", index_filter_model)
@@ -406,7 +413,8 @@ def main(input_args: Optional[List[str]] = None):
406413
"saas.api_key": model_info["api_key"],
407414
"saas.model": model_info["model_name"],
408415
"saas.is_reasoning": model_info["is_reasoning"],
409-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
416+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
417+
"saas.https_verify": model_info.get("https_verify", True),
410418
}
411419
)
412420
llm.setup_sub_client("context_prune_model", context_prune_model)
@@ -424,7 +432,8 @@ def main(input_args: Optional[List[str]] = None):
424432
"saas.api_key": model_info["api_key"],
425433
"saas.model": model_info["model_name"],
426434
"saas.is_reasoning": model_info["is_reasoning"],
427-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
435+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
436+
"saas.https_verify": model_info.get("https_verify", True),
428437
}
429438
)
430439
llm.setup_sub_client("conversation_prune_model", conversation_prune_model)
@@ -599,7 +608,8 @@ def intercept_callback(
599608
"saas.api_key": model_info["api_key"],
600609
"saas.model": model_info["model_name"],
601610
"saas.is_reasoning": model_info["is_reasoning"],
602-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
611+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
612+
"saas.https_verify": model_info.get("https_verify", True),
603613
}
604614
)
605615
llm.setup_sub_client("chat_model", chat_model)
@@ -617,7 +627,8 @@ def intercept_callback(
617627
"saas.api_key": model_info["api_key"],
618628
"saas.model": model_info["model_name"],
619629
"saas.is_reasoning": model_info["is_reasoning"],
620-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
630+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
631+
"saas.https_verify": model_info.get("https_verify", True),
621632
}
622633
)
623634
llm.setup_sub_client("vl_model", vl_model)
@@ -635,7 +646,8 @@ def intercept_callback(
635646
"saas.api_key": model_info["api_key"],
636647
"saas.model": model_info["model_name"],
637648
"saas.is_reasoning": model_info["is_reasoning"],
638-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
649+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
650+
"saas.https_verify": model_info.get("https_verify", True),
639651
}
640652
)
641653
llm.setup_sub_client("index_model", index_model)
@@ -653,7 +665,8 @@ def intercept_callback(
653665
"saas.api_key": model_info["api_key"],
654666
"saas.model": model_info["model_name"],
655667
"saas.is_reasoning": model_info["is_reasoning"],
656-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
668+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
669+
"saas.https_verify": model_info.get("https_verify", True),
657670
}
658671
)
659672
llm.setup_sub_client("sd_model", sd_model)
@@ -671,7 +684,8 @@ def intercept_callback(
671684
"saas.api_key": model_info["api_key"],
672685
"saas.model": model_info["model_name"],
673686
"saas.is_reasoning": model_info["is_reasoning"],
674-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
687+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
688+
"saas.https_verify": model_info.get("https_verify", True),
675689
}
676690
)
677691
llm.setup_sub_client("text2voice_model", text2voice_model)
@@ -689,7 +703,8 @@ def intercept_callback(
689703
"saas.api_key": model_info["api_key"],
690704
"saas.model": model_info["model_name"],
691705
"saas.is_reasoning": model_info["is_reasoning"],
692-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
706+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
707+
"saas.https_verify": model_info.get("https_verify", True),
693708
}
694709
)
695710
llm.setup_sub_client("voice2text_model", voice2text_model)
@@ -707,7 +722,8 @@ def intercept_callback(
707722
"saas.api_key": model_info["api_key"],
708723
"saas.model": model_info["model_name"],
709724
"saas.is_reasoning": model_info["is_reasoning"],
710-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
725+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
726+
"saas.https_verify": model_info.get("https_verify", True),
711727
}
712728
)
713729
llm.setup_sub_client("planner_model", planner_model)
@@ -725,7 +741,8 @@ def intercept_callback(
725741
"saas.api_key": model_info["api_key"],
726742
"saas.model": model_info["model_name"],
727743
"saas.is_reasoning": model_info["is_reasoning"],
728-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
744+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
745+
"saas.https_verify": model_info.get("https_verify", True),
729746
}
730747
)
731748
llm.setup_sub_client("commit_model", commit_model)
@@ -743,7 +760,8 @@ def intercept_callback(
743760
"saas.api_key": model_info["api_key"],
744761
"saas.model": model_info["model_name"],
745762
"saas.is_reasoning": model_info["is_reasoning"],
746-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
763+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
764+
"saas.https_verify": model_info.get("https_verify", True),
747765
}
748766
)
749767
llm.setup_sub_client("designer_model", designer_model)
@@ -761,7 +779,8 @@ def intercept_callback(
761779
"saas.api_key": model_info["api_key"],
762780
"saas.model": model_info["model_name"],
763781
"saas.is_reasoning": model_info["is_reasoning"],
764-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
782+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096),
783+
"saas.https_verify": model_info.get("https_verify", True),
765784
}
766785
)
767786
llm.setup_sub_client("emb_model", emb_model)

0 commit comments

Comments
 (0)