File size: 1,693 Bytes
b973e27
29850df
2f71b8a
29850df
 
 
 
2543ddf
29850df
f2f6b48
29850df
5023c48
 
f2f6b48
29850df
 
086c24f
29850df
 
5023c48
 
 
 
 
 
29850df
 
 
 
 
5023c48
29850df
 
 
 
 
1557b9f
 
 
 
 
 
 
 
 
 
29850df
5023c48
1557b9f
 
29850df
1557b9f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import streamlit as st
from g4f import ChatCompletion

# List of available models
models = [
    "gpt-4o", "gpt-4o-mini", "gpt-4",
    "gpt-4-turbo", "gpt-3.5-turbo",
    "claude-3.7-sonnet", "o3-mini", "o1", "claude-3.5", "llama-3.1-405b", "gemini-flash", "blackboxai-pro", "openchat-3.5", "glm-4-9B", "blackboxai"
]

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Streamlit app title
st.title("Chat with AI Models")

# Model selection
selected_model = st.selectbox("Choose a model:", models)

# Display chat messages from history
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# User input
if user_input := st.chat_input("What do you want to ask?"):
    # Display user message
    st.chat_message("user").markdown(user_input)
    st.session_state.messages.append({"role": "user", "content": user_input})

    # Get response from selected model
    response = ChatCompletion.create(
        model=selected_model,
        messages=st.session_state.messages
    )

    # Check and handle response type
    if isinstance(response, str):
        response_content = response  # Directly use if it's a string
    else:
        try:
            response_content = response['choices'][0]['message']['content']
        except (IndexError, KeyError):
            response_content = "Error: Unexpected response structure."

    # Display assistant response
    with st.chat_message("assistant"):
        st.markdown(response_content)

    # Add assistant response to chat history
    st.session_state.messages.append({"role": "assistant", "content": response_content})