|
316 | 316 | " api_key=os.environ[\"OPENAI_API_KEY\"],\n", |
317 | 317 | " )\n", |
318 | 318 | "\n", |
319 | | - "# Uses gpt-4.1-mini:\n", |
| 319 | + "# Uses gpt-5-mini:\n", |
320 | 320 | "# - more intelligent\n", |
321 | 321 | "llm_mini = ChatOpenAI(\n", |
322 | | - " model=\"gpt-4.1-mini\",\n", |
| 322 | + " model=\"gpt-5-mini\",\n", |
323 | 323 | " api_key=os.environ[\"OPENAI_API_KEY\"],\n", |
| 324 | + " reasoning_effort=\"minimal\"\n", |
324 | 325 | " )" |
325 | 326 | ] |
326 | 327 | }, |
|
472 | 473 | " \"\"\"\n", |
473 | 474 | "\n", |
474 | 475 | "# Prompt for the LLM to extract questions.\n", |
475 | | - "def seperate_questions_prompt(parser: PydanticOutputParser[AllQuestionsModelLines], doc_page_content: list[str], previous_repsonse: str = \"\", improvements: list[str] = \"\") -> str:\n", |
| 476 | + "def seperate_questions_prompt(parser: PydanticOutputParser[AllQuestionsModelLines], doc_page_content: list[str]) -> str: #, previous_repsonse: str = \"\", improvements: list[str] = \"\") -> str:\n", |
476 | 477 | "\n", |
477 | 478 | " feedback = \"\"\n", |
478 | | - " if previous_repsonse:\n", |
479 | | - " feedback = f\"\"\"\n", |
| 479 | + " # if previous_repsonse:\n", |
| 480 | + " # feedback = f\"\"\"\n", |
480 | 481 | " \n", |
481 | | - " Previous output:\n", |
482 | | - " {previous_repsonse}\n", |
| 482 | + " # Previous output:\n", |
| 483 | + " # {previous_repsonse}\n", |
483 | 484 | "\n", |
484 | | - " Improvements:\n", |
485 | | - " {improvements}\n", |
| 485 | + " # Improvements:\n", |
| 486 | + " # {improvements}\n", |
486 | 487 | "\n", |
487 | | - " \"\"\"\n", |
| 488 | + " # \"\"\"\n", |
488 | 489 | "\n", |
489 | 490 | " return f\"\"\"\n", |
490 | 491 | " Your task is to extract a JSON with the following structure exactly, ready to be parsed by a pydantic model:\n", |
|
716 | 717 | "\n", |
717 | 718 | " for attempt_idx in range(3):\n", |
718 | 719 | " try:\n", |
719 | | - " response = llm_mini.invoke(seperate_questions_prompt(parser, markdown, previous_response, improvements))\n", |
| 720 | + " response = llm_mini.invoke(seperate_questions_prompt(parser, markdown)) #, previous_response, improvements))\n", |
720 | 721 | " parsed_response = parser.parse(response.content)\n", |
721 | 722 | " questions_dict = extract_questions(parsed_response, markdown)\n", |
722 | 723 | " print(questions_dict.model_dump_json())\n", |
723 | 724 | "\n", |
724 | | - " evaluation = evaluate_questions_separation(parsed_output=questions_dict, markdown=markdown)\n", |
725 | | - "\n", |
726 | | - " if all(e.well_separated for e in evaluation):\n", |
| 725 | + " # evaluation = evaluate_questions_separation(parsed_output=questions_dict, markdown=markdown)\n", |
| 726 | + " # if all(e.well_separated for e in evaluation):\n", |
| 727 | + " if True:\n", |
727 | 728 | " print(\"Question separation was successful.\")\n", |
728 | 729 | " return questions_dict.model_dump()\n", |
729 | 730 | " else:\n", |
|
0 commit comments