File size: 5,606 Bytes
f8dbf90 921a07f da7ac0b f8dbf90 d40005d f8dbf90 d40005d f8dbf90 d40005d f8dbf90 e705807 f8dbf90 da7ac0b f8dbf90 da7ac0b f8dbf90 a7a2ad9 da7ac0b a7a2ad9 f8dbf90 921a07f da7ac0b f8dbf90 921a07f f8dbf90 da7ac0b 83ac817 b479e07 921a07f b479e07 da7ac0b ec018dc da7ac0b d35faf8 921a07f 83ac817 da7ac0b 921a07f a7a2ad9 da7ac0b a7a2ad9 da7ac0b ec018dc d35faf8 9a7d198 921a07f d35faf8 b479e07 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import streamlit as st
import google.generativeai as genai
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
import html
import re
import time
# Configure the Gemini API
genai.configure(api_key=st.secrets["GOOGLE_API_KEY"])
# Create the model with enhanced system instructions
generation_config = {
"temperature": 0.7,
"top_p": 0.95,
"top_k": 64,
"max_output_tokens": 10240,
}
model = genai.GenerativeModel(
model_name="gemini-1.5-pro",
generation_config=generation_config,
system_instruction="""You are Ath, an advanced AI code assistant with expertise across multiple programming languages, frameworks, and paradigms. Your knowledge spans software architecture, design patterns, algorithms, and cutting-edge technologies. Provide high-quality, optimized code solutions with explanations when requested. Adapt your communication style based on the user's expertise level, offering additional context for beginners and diving into complex topics for experts. You can generate code, explain concepts, debug issues, and provide best practices. Always prioritize security, efficiency, and maintainability in your solutions."""
)
# Initialize session state for chat history
if 'chat_history' not in st.session_state:
st.session_state.chat_history = []
def generate_response(user_input):
try:
# Add user message to chat history
st.session_state.chat_history.append({"role": "user", "content": user_input})
# Generate response
response = model.generate_content(st.session_state.chat_history)
# Add AI response to chat history
st.session_state.chat_history.append({"role": "assistant", "content": response.text})
return response.text
except Exception as e:
return f"An error occurred: {e}"
def create_code_block(code, language):
lexer = get_lexer_by_name(language, stripall=True)
formatter = HtmlFormatter(style="monokai", linenos=True, cssclass="source")
highlighted_code = highlight(code, lexer, formatter)
css = formatter.get_style_defs('.source')
return highlighted_code, css
def detect_language(code):
# Simple language detection based on keywords or syntax
if re.search(r'\b(def|import|class)\b', code):
return 'python'
elif re.search(r'\b(function|var|let|const)\b', code):
return 'javascript'
elif re.search(r'\b(public|private|class)\b', code):
return 'java'
else:
return 'text' # Default to plain text if language can't be determined
# Streamlit UI setup
st.set_page_config(page_title="Advanced AI Code Assistant", page_icon="💻", layout="wide")
# ... (Keep the existing CSS styles) ...
st.markdown('<div class="main-container">', unsafe_allow_html=True)
st.title("💻 Advanced AI Code Assistant")
st.markdown('<p class="subtitle">Powered by Google Gemini - Expert-level coding solutions</p>', unsafe_allow_html=True)
# Add a selectbox for different modes
mode = st.selectbox("Choose a mode:", ["Code Generation", "Code Explanation", "Debugging", "Best Practices"])
prompt = st.text_area(f"Enter your {mode.lower()} request:", height=100)
if st.button("Generate Response"):
if prompt.strip() == "":
st.error("Please enter a valid prompt.")
else:
with st.spinner(f"Generating {mode.lower()} response..."):
completed_text = generate_response(prompt)
if "An error occurred" in completed_text:
st.error(completed_text)
else:
st.success(f"{mode} response generated successfully!")
# Split the response into code and explanation
code_blocks = re.split(r'```(\w+)?\n', completed_text)
for i in range(1, len(code_blocks), 2):
language = code_blocks[i] if code_blocks[i] else detect_language(code_blocks[i+1])
code = code_blocks[i+1]
highlighted_code, css = create_code_block(code, language)
st.markdown(f'<style>{css}</style>', unsafe_allow_html=True)
st.markdown('<div class="output-container">', unsafe_allow_html=True)
st.markdown('<div class="code-block">', unsafe_allow_html=True)
st.markdown(highlighted_code, unsafe_allow_html=True)
st.markdown('</div>', unsafe_allow_html=True)
st.markdown('</div>', unsafe_allow_html=True)
# Add explanation if available
if i+2 < len(code_blocks):
st.markdown(code_blocks[i+2])
# Display chat history
st.subheader("Conversation History")
for message in st.session_state.chat_history:
st.text(f"{message['role'].capitalize()}: {message['content']}")
# Add a clear button for chat history
if st.button("Clear Conversation History"):
st.session_state.chat_history = []
st.success("Conversation history cleared!")
# Add a feedback section
st.subheader("Feedback")
feedback = st.text_area("How can we improve? (Optional)")
if st.button("Submit Feedback"):
# Here you would typically send this feedback to a database or email
st.success("Thank you for your feedback!")
st.markdown("""
<div style='text-align: center; margin-top: 2rem; color: #6c757d;'>
Crafted with ❤️ by Your Advanced AI Code Assistant
</div>
""", unsafe_allow_html=True)
st.markdown('</div>', unsafe_allow_html=True) |