Spaces:
Running
Running
<html lang="en"> | |
<head> | |
<meta charset="UTF-8"> | |
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no"> | |
<title>AI Assistant (Gemma 3 1B - Strict Load Attempt)</title> | |
<style> | |
/* CSS styles remain the same as the previous valid version */ | |
@import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap'); | |
:root { /* Using the neutral blue theme */ | |
--primary-color: #007bff; --secondary-color: #6c757d; --text-color: #212529; | |
--bg-color: #f8f9fa; --user-msg-bg: #e7f5ff; --user-msg-text: #004085; | |
--bot-msg-bg: #ffffff; --bot-msg-border: #dee2e6; --system-msg-color: #6c757d; | |
--error-color: #721c24; --error-bg: #f8d7da; --error-border: #f5c6cb; | |
--warning-color: #856404; --warning-bg: #fff3cd; --warning-border: #ffeeba; | |
--success-color: #155724; --success-bg: #d4edda; --success-border: #c3e6cb; | |
--border-color: #dee2e6; --input-bg: #ffffff; --input-border: #ced4da; | |
--button-bg: var(--primary-color); --button-hover-bg: #0056b3; --button-disabled-bg: #adb5bd; | |
--scrollbar-thumb: var(--primary-color); --scrollbar-track: #e9ecef; | |
--header-bg: #ffffff; --header-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); | |
--container-shadow: 0 4px 15px rgba(0, 0, 0, 0.07); | |
} | |
* { box-sizing: border-box; margin: 0; padding: 0; } | |
html { height: 100%; } | |
body { font-family: 'Roboto', sans-serif; display: flex; flex-direction: column; align-items: center; justify-content: flex-start; min-height: 100vh; background-color: var(--bg-color); color: var(--text-color); padding: 10px; overscroll-behavior: none; } | |
#control-panel { background: var(--header-bg); padding: 15px; border-radius: 8px; margin-bottom: 10px; box-shadow: var(--header-shadow); width: 100%; max-width: 600px; border: 1px solid var(--border-color); text-align: center; } | |
#loadModelButton { padding: 10px 20px; font-size: 1em; background-color: var(--primary-color); color: white; border: none; border-radius: 5px; cursor: pointer; transition: background-color 0.2s; margin-bottom: 10px; } | |
#loadModelButton:hover:not(:disabled) { background-color: var(--button-hover-bg); } | |
#loadModelButton:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; } | |
#model-status { font-size: 0.9em; padding: 10px; border-radius: 4px; text-align: center; min-height: 40px; line-height: 1.4; } | |
#model-status.info { background-color: #e2e3e5; border: 1px solid #d6d8db; color: #383d41; } | |
#model-status.loading { background-color: var(--warning-bg); border: 1px solid var(--warning-border); color: var(--warning-color); } | |
#model-status.success { background-color: var(--success-bg); border: 1px solid var(--success-border); color: var(--success-color); } | |
#model-status.error { background-color: var(--error-bg); border: 1px solid var(--error-border); color: var(--error-color); } | |
#chat-container { width: 100%; max-width: 600px; height: 75vh; max-height: 700px; background-color: #ffffff; border-radius: 12px; box-shadow: var(--container-shadow); display: flex; flex-direction: column; overflow: hidden; border: 1px solid var(--border-color); } | |
h1 { text-align: center; color: var(--primary-color); padding: 15px; background-color: var(--header-bg); border-bottom: 1px solid var(--border-color); font-size: 1.2em; font-weight: 500; flex-shrink: 0; box-shadow: var(--header-shadow); position: relative; z-index: 10; } | |
#chatbox { flex-grow: 1; overflow-y: auto; padding: 15px; display: flex; flex-direction: column; gap: 12px; scrollbar-width: thin; scrollbar-color: var(--scrollbar-thumb) var(--scrollbar-track); background-color: var(--bg-color); } | |
#chatbox::-webkit-scrollbar { width: 6px; } #chatbox::-webkit-scrollbar-track { background: var(--scrollbar-track); border-radius: 3px; } #chatbox::-webkit-scrollbar-thumb { background-color: var(--scrollbar-thumb); border-radius: 3px; } | |
#messages div { padding: 10px 15px; border-radius: 16px; max-width: 85%; word-wrap: break-word; line-height: 1.5; font-size: 1em; box-shadow: 0 1px 2px rgba(0,0,0,0.05); position: relative; animation: fadeIn 0.25s ease-out; } | |
@keyframes fadeIn { from { opacity: 0; transform: translateY(5px); } to { opacity: 1; transform: translateY(0); } } | |
.user-message { background: var(--user-msg-bg); color: var(--user-msg-text); align-self: flex-end; border-bottom-right-radius: 4px; margin-left: auto; } | |
.bot-message { background-color: var(--bot-msg-bg); border: 1px solid var(--bot-msg-border); align-self: flex-start; border-bottom-left-radius: 4px; margin-right: auto; } | |
.bot-message a { color: var(--primary-color); text-decoration: none; } .bot-message a:hover { text-decoration: underline; } | |
.system-message { font-style: italic; color: var(--system-msg-color); text-align: center; font-size: 0.85em; background-color: transparent; box-shadow: none; align-self: center; max-width: 100%; padding: 5px 0; animation: none; } | |
.error-message { color: var(--error-color); font-weight: 500; background-color: var(--error-bg); border: 1px solid var(--error-border); padding: 10px 15px; border-radius: 8px; align-self: stretch; text-align: left; } | |
#input-area { display: flex; padding: 10px 12px; border-top: 1px solid var(--border-color); background-color: var(--header-bg); align-items: center; gap: 8px; flex-shrink: 0; } | |
#userInput { flex-grow: 1; padding: 10px 15px; border: 1px solid var(--input-border); border-radius: 20px; outline: none; font-size: 1em; font-family: 'Roboto', sans-serif; background-color: var(--input-bg); transition: border-color 0.2s ease; min-height: 42px; resize: none; overflow-y: auto; } | |
#userInput:focus { border-color: var(--primary-color); } | |
.control-button { padding: 0; border: none; border-radius: 50%; cursor: pointer; background-color: var(--button-bg); color: white; width: 42px; height: 42px; font-size: 1.3em; display: flex; align-items: center; justify-content: center; flex-shrink: 0; transition: background-color 0.2s ease, transform 0.1s ease; box-shadow: 0 1px 2px rgba(0,0,0,0.08); } | |
.control-button:hover:not(:disabled) { background-color: var(--button-hover-bg); transform: translateY(-1px); } | |
.control-button:active:not(:disabled) { transform: scale(0.95); } | |
.control-button:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; transform: none; box-shadow: none; } | |
#toggleSpeakerButton.muted { background-color: #aaa; } | |
@media (max-width: 600px) { /* Responsive styles */ | |
body { padding: 5px; justify-content: flex-start; } #control-panel { margin-bottom: 5px; padding: 12px; } | |
#chat-container { width: 100%; height: auto; flex-grow: 1; border-radius: 12px; max-height: none; margin-bottom: 5px; } | |
h1 { font-size: 1.1em; padding: 12px; } #chatbox { padding: 12px 8px; gap: 10px; } | |
#messages div { max-width: 90%; font-size: 0.95em; padding: 9px 14px;} | |
#input-area { padding: 8px; gap: 5px; } #userInput { padding: 9px 14px; min-height: 40px; } | |
.control-button { width: 40px; height: 40px; font-size: 1.2em; } | |
} | |
</style> | |
<!-- | |
Import Map: Specifies how to resolve the '@xenova/transformers' module specifier. | |
We load the latest stable version directly from the jsDelivr CDN. | |
This is a standard way to load ES modules in modern browsers without a build step. | |
--> | |
<script type="importmap"> | |
{ | |
"imports": { | |
"@xenova/transformers": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest" | |
} | |
} | |
</script> | |
</head> | |
<body> | |
<div id="control-panel"> | |
<h2>Model Loader</h2> | |
<button id="loadModelButton">Load Gemma 3 1B Model (Q4)</button> | |
<div id="model-status" class="info">Click button to load Gemma 3 1B (using Transformers.js v3+ & Q4 dtype). **Warning:** Model loading is expected to fail due to library incompatibility.</div> | |
</div> | |
<div id="chat-container"> | |
<h1 id="chatbot-name">AI Assistant</h1> | |
<div id="chatbox"> | |
<div id="messages"> | |
<!-- Chat messages appear here --> | |
</div> | |
</div> | |
<div id="input-area"> | |
<textarea id="userInput" placeholder="Please attempt to load the model first..." rows="1" disabled></textarea> | |
<button id="speechButton" class="control-button" title="Speak message" disabled>🎤</button> | |
<button id="toggleSpeakerButton" class="control-button" title="Toggle AI speech output" disabled>🔊</button> | |
<button id="sendButton" class="control-button" title="Send message" disabled>➤</button> | |
</div> | |
</div> | |
<script type="module"> | |
// Import necessary functions from the loaded library. | |
// This relies on the import map defined above. | |
import { pipeline, env } from '@xenova/transformers'; | |
// --- Configuration --- | |
const MODEL_NAME = 'onnx-community/gemma-3-1b-it-ONNX-GQA'; // The specific model requested | |
const TASK = 'text-generation'; | |
const QUANTIZATION = 'q4'; // As specified in the model card example | |
// --- Environment Setup --- | |
// Basic setup for Transformers.js environment | |
env.allowLocalModels = false; // Only load from Hub/CDN | |
env.useBrowserCache = true; // Cache models in the browser | |
env.backends.onnx.executionProviders = ['webgpu', 'wasm']; // Prioritize WebGPU | |
console.log('Using Execution Providers:', env.backends.onnx.executionProviders); | |
env.backends.onnx.prefer_alternative_execution_providers = true; // Try WebGPU first if available | |
// --- DOM Elements --- | |
const chatbox = document.getElementById('messages'); | |
const userInput = document.getElementById('userInput'); | |
const sendButton = document.getElementById('sendButton'); | |
const chatbotNameElement = document.getElementById('chatbot-name'); | |
const speechButton = document.getElementById('speechButton'); | |
const toggleSpeakerButton = document.getElementById('toggleSpeakerButton'); | |
const modelStatus = document.getElementById('model-status'); | |
const loadModelButton = document.getElementById('loadModelButton'); | |
// --- State Management --- | |
let generator = null; // Holds the loaded pipeline if successful | |
let isLoadingModel = false; // Flag to prevent concurrent loading attempts | |
let conversationHistory = []; // Stores chat history in messages format | |
let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } }; | |
const stateKey = 'gemma3_1b_strict_state_v1'; // Unique key for this version | |
const historyKey = 'gemma3_1b_strict_history_v1'; | |
// --- Web Speech API --- | |
let recognition = null; | |
let synthesis = window.speechSynthesis; | |
let targetVoice = null; | |
let isListening = false; | |
// --- Initialization --- | |
window.addEventListener('load', () => { | |
loadState(); | |
chatbotNameElement.textContent = botState.botName; | |
updateSpeakerButtonUI(); | |
initializeSpeechAPI(); | |
setupInputAutosize(); | |
updateChatUIState(false); // Initially, UI is disabled | |
displayHistory(); | |
setTimeout(loadVoices, 500); | |
loadModelButton.addEventListener('click', handleLoadModelClick); // Attach button listener | |
console.log("Attempting to use Transformers.js library loaded via import map."); | |
displayMessage('system', `Using Transformers.js (latest). Ready to attempt loading ${MODEL_NAME}.`, false); | |
}); | |
// --- State Persistence --- | |
function loadState() { | |
const savedState = localStorage.getItem(stateKey); if (savedState) { try { const loaded = JSON.parse(savedState); botState = { ...botState, ...loaded, botSettings: { ...botState.botSettings, ...(loaded.botSettings || {}) } }; } catch(e) {} } | |
const savedHistory = localStorage.getItem(historyKey); if (savedHistory) { try { conversationHistory = JSON.parse(savedHistory); if (!Array.isArray(conversationHistory)) conversationHistory = []; } catch(e) { conversationHistory = []; } } | |
} | |
function saveState() { | |
localStorage.setItem(stateKey, JSON.stringify(botState)); | |
localStorage.setItem(historyKey, JSON.stringify(conversationHistory)); | |
} | |
function displayHistory() { | |
chatbox.innerHTML = ''; conversationHistory.forEach(msg => { if (msg.role === 'user' || msg.role === 'assistant') { displayMessage(msg.role === 'user' ? 'user' : 'bot', msg.content, false); } }); | |
} | |
// --- UI Update Functions --- | |
function displayMessage(sender, text, animate = true, isError = false) { | |
const messageDiv = document.createElement('div'); let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message'; if (sender === 'system' && isError) messageClass = 'error-message'; messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none'; text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>'); messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' }); | |
} | |
function updateModelStatus(message, type = 'info') { | |
modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`); | |
} | |
function updateChatUIState(isModelLoadedSuccessfully) { | |
userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel; sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === ''; speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition; toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis; loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully; if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; } else if (isLoadingModel) { userInput.placeholder = "Model loading..."; } else { userInput.placeholder = "Please attempt to load the model first..."; } | |
} | |
function updateSpeakerButtonUI() { | |
toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? '🔊' : '🔇'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput); | |
} | |
function showSpeechStatus(message) { console.log("Speech Status:", message); } | |
function setupInputAutosize() { userInput.addEventListener('input', () => { userInput.style.height = 'auto'; userInput.style.height = userInput.scrollHeight + 'px'; updateChatUIState(generator !== null); }); } | |
// --- Model & AI Logic --- | |
async function handleLoadModelClick() { | |
if (isLoadingModel || generator) return; | |
isLoadingModel = true; generator = null; // Reset state | |
updateChatUIState(false); | |
await initializeModel(MODEL_NAME); // Attempt to load | |
isLoadingModel = false; | |
updateChatUIState(generator !== null); // Update UI based on outcome | |
} | |
// Initialize model exactly as per documentation example for this model | |
async function initializeModel(modelId) { | |
updateModelStatus(`Loading ${modelId} with { dtype: "${QUANTIZATION}" }... (Strict doc example)`, 'loading'); | |
displayMessage('system', `Attempting to load ${modelId} using documented method (dtype: ${QUANTIZATION})...`, false); | |
try { | |
// Directly use the pipeline function as shown in the model card | |
generator = await pipeline(TASK, modelId, { | |
dtype: QUANTIZATION, // Explicitly use q4 | |
progress_callback: (progress) => { | |
const msg = `[Loading: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`; | |
updateModelStatus(msg, 'loading'); | |
} | |
}); | |
// If successful (still unlikely given previous errors) | |
updateModelStatus(`${modelId} loaded successfully!`, 'success'); | |
displayMessage('system', `[SUCCESS] ${modelId} loaded. The environment might be different or the library was updated.`, false); | |
} catch (error) { | |
// Catch and report the inevitable error | |
console.error(`Model loading failed for ${modelId} (Strict Attempt):`, error); | |
let errorMsg = `Failed to load ${modelId}: ${error.message}.`; | |
if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) { | |
errorMsg += " Confirmed: The 'gemma3_text' model type is unsupported by this library version."; | |
} else if (error.message.includes("split is not a function")) { | |
errorMsg += " Confirmed: TypeError during config parsing, likely due to unsupported 'gemma3_text' type."; | |
} else { | |
errorMsg += " Unknown error. Check console and consider Space resource limits."; | |
} | |
updateModelStatus(errorMsg, 'error'); | |
displayMessage('system', `[ERROR] ${errorMsg}`, true, true); | |
generator = null; // Ensure it's null on failure | |
} | |
} | |
// Build messages array as per documentation example | |
function buildMessages(newUserMessage) { | |
// Start with system prompt (can be customized) | |
let messages = [{ role: "system", content: "You are a helpful assistant." }]; | |
// Append history | |
messages = messages.concat(conversationHistory); | |
// Append new user message | |
messages.push({ role: "user", content: newUserMessage }); | |
console.log("Input Messages:", messages); | |
return messages; | |
} | |
// Cleanup response based on messages output format | |
function cleanupResponse(output) { | |
// Expecting output like: [{ generated_text: [..., {role: 'assistant', content: '...'}] }] | |
try { | |
if (output && output.length > 0 && output[0].generated_text && Array.isArray(output[0].generated_text)) { | |
const lastMessage = output[0].generated_text.at(-1); | |
if (lastMessage && lastMessage.role === 'assistant' && typeof lastMessage.content === 'string') { | |
let cleaned = lastMessage.content.trim(); | |
cleaned = cleaned.replace(/<end_of_turn>/g, '').trim(); | |
if (cleaned.length > 0) return cleaned; | |
} | |
} | |
// If structure is different (e.g., older library version returned flat text) | |
if (output && output.length > 0 && typeof output[0].generated_text === 'string') { | |
// Fallback for potentially different output structure | |
let cleaned = output[0].generated_text; | |
// Need to remove the prompt part if it's included | |
// This part is tricky without knowing the exact prompt format used internally by the pipeline for messages | |
// Let's just remove common artifacts for now | |
cleaned = cleaned.split("<start_of_turn>model").pop().trim(); // Attempt to get text after last model turn | |
cleaned = cleaned.replace(/<end_of_turn>/g, '').trim(); | |
if (cleaned.length > 0) return cleaned; | |
} | |
} catch (e) { console.error("Error parsing generator output structure:", e, "Output:", output); } | |
console.warn("Could not reliably extract assistant response from output:", output); | |
const fallbacks = [ "Sorry, response format was unexpected.", "My response might be garbled.", "Error processing the AI answer." ]; | |
return fallbacks[Math.floor(Math.random() * fallbacks.length)]; | |
} | |
// --- Main Interaction Logic --- | |
async function handleUserMessage() { | |
const userText = userInput.value.trim(); | |
// Proceed only if generator is loaded and not currently loading | |
if (!userText || !generator || isLoadingModel) return; | |
userInput.value = ''; userInput.style.height = 'auto'; | |
updateChatUIState(true); // Disable input during generation | |
// Add user message to UI and history | |
displayMessage('user', userText); | |
conversationHistory.push({ role: 'user', content: userText }); | |
updateModelStatus("AI thinking...", "loading"); | |
const messages = buildMessages(userText); // Use the messages array format | |
try { | |
// Call generator with messages array | |
const outputs = await generator(messages, { | |
max_new_tokens: 300, | |
// Generation parameters from docs example: | |
do_sample: true, // Typically true for more natural chat | |
temperature: 0.7, | |
top_k: 50, | |
// repetition_penalty: 1.1, // Check if supported with messages format | |
// top_p: 0.9, // Check if supported | |
}); | |
const replyText = cleanupResponse(outputs); // Process the potentially complex output | |
console.log("Cleaned AI Output:", replyText); | |
// Add AI response to UI and history | |
displayMessage('bot', replyText); | |
// Ensure the role matches what the model/library uses ('assistant' is common) | |
conversationHistory.push({ role: 'assistant', content: replyText }); | |
if (botState.botSettings.useSpeechOutput && synthesis && targetVoice) { | |
speakText(replyText); | |
} | |
saveState(); | |
} catch (error) { | |
console.error("AI response generation error:", error); | |
displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true); | |
// Don't add a bot message on error, the system message covers it | |
} finally { | |
if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success"); | |
updateChatUIState(generator !== null); // Re-enable UI | |
userInput.focus(); | |
} | |
} | |
// --- Speech API Functions --- | |
function initializeSpeechAPI() { /* No changes needed */ | |
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); } if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); } updateChatUIState(false); | |
} | |
function loadVoices() { /* No changes needed */ if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } } | |
function findAndSetVoice(voices) { /* No changes needed */ targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } } | |
function speakText(text) { /* No changes needed */ if (!synthesis || !botState.botSettings.useSpeechOutput || !targetVoice) return; synthesis.cancel(); const utterance = new SpeechSynthesisUtterance(text); utterance.voice = targetVoice; utterance.lang = targetVoice.lang; utterance.rate = 1.0; utterance.pitch = 1.0; synthesis.speak(utterance); } | |
// --- Event Listeners --- | |
sendButton.addEventListener('click', handleUserMessage); | |
userInput.addEventListener('keypress', (e) => { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault(); handleUserMessage(); } }); | |
speechButton.addEventListener('click', () => { if (recognition && !isListening && generator && !isLoadingModel) { try { recognition.start(); } catch (error) { console.error("Rec start fail:", error); updateModelStatus(`Failed to start recognition`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 2000); isListening = false; updateChatUIState(generator !== null); } } }); | |
</script> | |
</body> | |
</html> |