|
6 | 6 |
|
7 | 7 | if "response" not in st.session_state: |
8 | 8 | st.session_state.response = "" |
| 9 | +if "feedback_key" not in st.session_state: |
| 10 | + st.session_state.feedback_key = 0 |
9 | 11 | if "logged_prompt" not in st.session_state: |
10 | 12 | st.session_state.logged_prompt = "" |
11 | 13 |
|
|
15 | 17 | email, password = trubrics_config() |
16 | 18 |
|
17 | 19 | if email and password: |
18 | | - collector = FeedbackCollector( |
19 | | - project="default", |
20 | | - email=email, |
21 | | - password=password, |
22 | | - ) |
| 20 | + try: |
| 21 | + collector = FeedbackCollector(email=email, password=password, project="default") |
| 22 | + except Exception: |
| 23 | + st.error(f"Error authenticating '{email}' with [Trubrics](https://trubrics.streamlit.app/). Please try again.") |
| 24 | + st.stop() |
23 | 25 | else: |
24 | | - st.warning("To save some feedback to Trubrics, add your account details in the sidebar.") |
| 26 | + st.info( |
| 27 | + "To ask a question to an LLM and save your feedback to Trubrics, add your email and password in the sidebar." |
| 28 | + " Don't have an account yet? Create one for free [here](https://trubrics.streamlit.app/)!" |
| 29 | + ) |
| 30 | + st.stop() |
25 | 31 |
|
26 | 32 | models = ("gpt-3.5-turbo",) |
27 | 33 | model = st.selectbox( |
|
32 | 38 |
|
33 | 39 | openai.api_key = st.secrets.get("OPENAI_API_KEY") |
34 | 40 | if openai.api_key is None: |
35 | | - raise ValueError("OpenAI key is missing. Set OPENAI_API_KEY in st.secrets") |
| 41 | + st.info("Please add your OpenAI API key to continue.") |
| 42 | + st.stop() |
36 | 43 |
|
37 | 44 | prompt = st.text_area(label="Prompt", label_visibility="collapsed", placeholder="What would you like to know?") |
38 | 45 | button = st.button(f"Ask {model}") |
|
41 | 48 | response = openai.ChatCompletion.create(model=model, messages=[{"role": "user", "content": prompt}]) |
42 | 49 | response_text = response.choices[0].message["content"] |
43 | 50 | st.session_state.logged_prompt = collector.log_prompt( |
44 | | - config_model={"model": model}, prompt=prompt, generation=response_text, tags=["llm_app.py"] |
| 51 | + config_model={"model": model}, prompt=prompt, generation=response_text, tags=["llm_app.py"], user_id=email |
45 | 52 | ) |
46 | 53 | st.session_state.response = response_text |
| 54 | + st.session_state.feedback_key += 1 |
47 | 55 |
|
48 | 56 | if st.session_state.response: |
49 | 57 | st.markdown(f"#### :violet[{st.session_state.response}]") |
|
55 | 63 | prompt_id=st.session_state.logged_prompt.id, |
56 | 64 | model=model, |
57 | 65 | align="flex-start", |
58 | | - single_submit=False, |
59 | 66 | tags=["llm_app.py"], |
| 67 | + key=f"feedback_{st.session_state.feedback_key}", # overwrite with new key |
| 68 | + user_id=email, |
60 | 69 | ) |
61 | 70 |
|
62 | 71 | if feedback: |
|
0 commit comments