diff --git a/.gitignore b/.gitignore index 4710ec1..0a279ba 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ -venv/ +.venv/ .env *.pyc __pycache__ -token_stats.json \ No newline at end of file +token_stats.json +.DS_Store \ No newline at end of file diff --git a/Makefile b/Makefile index d81f26a..845a737 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ setup: - python3 -m venv venv - . venv/bin/activate && pip install --upgrade pip && pip install -r requirements.txt && pipenv install python-dotenv + python3 -m venv .venv + . .venv/bin/activate && pip install --upgrade pip && pip install -r requirements.txt && pipenv install python-dotenv run: - . venv/bin/activate && FLASK_APP=app.py flask run --port=5001 + . .venv/bin/activate && FLASK_APP=app.py flask run --port=5001 diff --git a/Pipfile b/Pipfile index 4dfc79b..ae43018 100644 --- a/Pipfile +++ b/Pipfile @@ -10,11 +10,12 @@ langchain-openai = "*" langchain-google-genai = "*" langchain-aws = "*" langchain-together = "*" +langchain-ollama = "*" torch = "*" transformers = "*" toml = "*" boto3 = "*" -python-dotenv = "*" +pipenv = "*" [dev-packages] diff --git a/app.py b/app.py index 299f33d..8329d67 100644 --- a/app.py +++ b/app.py @@ -94,6 +94,10 @@ def initialize_llm(llm_choice): elif llm_choice == "meta_llama_4_scout": return ChatTogether(model="meta-llama/Llama-4-Scout-17B-16E-Instruct", temperature=0, max_tokens=None, timeout=None, max_retries=2) + elif llm_choice.startswith("gpt_oss_"): + if llm_choice == "gpt_oss_20B": + return ChatTogether(model="OpenAI/gpt-oss-20B", temperature=0, max_tokens=None, timeout=None, max_retries=2) + # Default fallback to OpenAI's GPT-4o logging.warning(f"Unknown LLM choice '{llm_choice}', defaulting to openai_gpt_4o") return ChatOpenAI(model="gpt-4o", max_tokens=None, temperature=0) @@ -112,7 +116,8 @@ def initialize_llm(llm_choice): "google_gemini_15_flash", "google_gemini_2_flash", "google_gemini_25_pro", "anthropic_haiku_35", "anthropic_sonnet_35", "anthropic_sonnet_37", "deepseek_r1", "deepseek_v3", - "meta_llama_33_70B", "meta_llama_31_405B", "meta_llama_4_maverick", "meta_llama_4_scout" + "meta_llama_33_70B", "meta_llama_31_405B", "meta_llama_4_maverick", "meta_llama_4_scout", + "gpt-oss-20B" ] def remove_think_tags(text): diff --git a/config.toml b/config.toml index 79bb84e..ae59f78 100644 --- a/config.toml +++ b/config.toml @@ -1,4 +1,5 @@ [llm] +# options: "openai_gpt_4o", "google_gemini_2_flash", "gpt_oss_20B", etc. selected = "openai_gpt_4o" [prompt_engineering] diff --git a/requirements.txt b/requirements.txt index f72a460..fae09e8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,4 +8,6 @@ langchain-ollama torch transformers toml -boto3 \ No newline at end of file +boto3 +langchain-core +python-dotenv \ No newline at end of file diff --git a/templates/index.html b/templates/index.html index cab032b..337cbe1 100644 --- a/templates/index.html +++ b/templates/index.html @@ -116,6 +116,7 @@

{{ selectedEmail.subject }}

+