File size: 26,615 Bytes
19a862c
3ba6123
19a862c
 
 
5772021
19a862c
5772021
fa541aa
5772021
3ba6123
 
 
060ac05
 
 
3ba6123
 
 
 
fa541aa
19a862c
 
 
ebcf47f
 
 
060ac05
4bfa00d
060ac05
 
 
 
 
ebcf47f
060ac05
 
4bfa00d
060ac05
fa541aa
3ba6123
 
 
 
f8a195e
060ac05
3ba6123
19a862c
060ac05
71d66b0
19a862c
71d66b0
19a862c
ebcf47f
 
060ac05
 
 
 
 
 
19a862c
5772021
 
 
 
 
19a862c
f8a195e
 
 
 
 
19a862c
 
 
4bfa00d
060ac05
f8a195e
ebcf47f
4bfa00d
 
19a862c
3ba6123
19a862c
 
35c0ea2
19a862c
 
 
ebcf47f
3ba6123
 
 
19a862c
 
 
 
5772021
 
f8a195e
19a862c
5772021
 
19a862c
5772021
19a862c
5772021
 
 
 
 
19a862c
5772021
19a862c
5772021
19a862c
71d66b0
19a862c
 
 
 
060ac05
 
 
5772021
 
 
 
4bfa00d
5772021
 
4bfa00d
5772021
19a862c
 
3ba6123
19a862c
 
35c0ea2
4bfa00d
5772021
19a862c
 
3ba6123
71d66b0
5772021
 
f8a195e
5772021
 
 
19a862c
 
35c0ea2
5772021
060ac05
ebcf47f
4bfa00d
ebcf47f
4bfa00d
 
 
ebcf47f
5772021
4bfa00d
19a862c
35c0ea2
ebcf47f
5772021
4bfa00d
ebcf47f
f8a195e
 
ebcf47f
 
f8a195e
5772021
f8a195e
 
 
 
4bfa00d
35c0ea2
ebcf47f
 
5772021
ebcf47f
5772021
ebcf47f
5772021
4bfa00d
ebcf47f
5772021
ebcf47f
5772021
 
 
19a862c
5772021
ebcf47f
5772021
ebcf47f
 
 
 
 
5772021
 
 
 
ebcf47f
19a862c
5772021
 
ebcf47f
 
5772021
ebcf47f
5772021
ebcf47f
5772021
ebcf47f
 
 
5772021
19a862c
 
ebcf47f
5772021
ebcf47f
5772021
ebcf47f
5772021
ebcf47f
5772021
ebcf47f
 
 
 
 
5772021
ebcf47f
5772021
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ebcf47f
 
5772021
 
 
 
 
ebcf47f
 
 
 
5772021
ebcf47f
 
 
5772021
ebcf47f
5772021
ebcf47f
 
 
 
 
5772021
ebcf47f
 
5772021
ebcf47f
 
5772021
 
ebcf47f
 
5772021
 
ebcf47f
 
5772021
ebcf47f
 
 
5772021
ebcf47f
5772021
 
ebcf47f
 
 
 
5772021
ebcf47f
 
 
 
5772021
ebcf47f
 
5772021
ebcf47f
 
19a862c
 
060ac05
5772021
35c0ea2
4bfa00d
5772021
 
 
19a862c
 
 
3ba6123
f8a195e
19a862c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
    <title>AI Assistant (Gemma 3 1B - Strict Load Attempt)</title>
    <style>
        /* CSS styles remain the same as the previous valid version */
        @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
        :root { /* Using the neutral blue theme */
            --primary-color: #007bff; --secondary-color: #6c757d; --text-color: #212529;
            --bg-color: #f8f9fa; --user-msg-bg: #e7f5ff; --user-msg-text: #004085;
            --bot-msg-bg: #ffffff; --bot-msg-border: #dee2e6; --system-msg-color: #6c757d;
            --error-color: #721c24; --error-bg: #f8d7da; --error-border: #f5c6cb;
            --warning-color: #856404; --warning-bg: #fff3cd; --warning-border: #ffeeba;
            --success-color: #155724; --success-bg: #d4edda; --success-border: #c3e6cb;
            --border-color: #dee2e6; --input-bg: #ffffff; --input-border: #ced4da;
            --button-bg: var(--primary-color); --button-hover-bg: #0056b3; --button-disabled-bg: #adb5bd;
            --scrollbar-thumb: var(--primary-color); --scrollbar-track: #e9ecef;
            --header-bg: #ffffff; --header-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
            --container-shadow: 0 4px 15px rgba(0, 0, 0, 0.07);
        }
        * { box-sizing: border-box; margin: 0; padding: 0; }
        html { height: 100%; }
        body { font-family: 'Roboto', sans-serif; display: flex; flex-direction: column; align-items: center; justify-content: flex-start; min-height: 100vh; background-color: var(--bg-color); color: var(--text-color); padding: 10px; overscroll-behavior: none; }
        #control-panel { background: var(--header-bg); padding: 15px; border-radius: 8px; margin-bottom: 10px; box-shadow: var(--header-shadow); width: 100%; max-width: 600px; border: 1px solid var(--border-color); text-align: center; }
        #loadModelButton { padding: 10px 20px; font-size: 1em; background-color: var(--primary-color); color: white; border: none; border-radius: 5px; cursor: pointer; transition: background-color 0.2s; margin-bottom: 10px; }
        #loadModelButton:hover:not(:disabled) { background-color: var(--button-hover-bg); }
        #loadModelButton:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; }
        #model-status { font-size: 0.9em; padding: 10px; border-radius: 4px; text-align: center; min-height: 40px; line-height: 1.4; }
        #model-status.info { background-color: #e2e3e5; border: 1px solid #d6d8db; color: #383d41; }
        #model-status.loading { background-color: var(--warning-bg); border: 1px solid var(--warning-border); color: var(--warning-color); }
        #model-status.success { background-color: var(--success-bg); border: 1px solid var(--success-border); color: var(--success-color); }
        #model-status.error { background-color: var(--error-bg); border: 1px solid var(--error-border); color: var(--error-color); }
        #chat-container { width: 100%; max-width: 600px; height: 75vh; max-height: 700px; background-color: #ffffff; border-radius: 12px; box-shadow: var(--container-shadow); display: flex; flex-direction: column; overflow: hidden; border: 1px solid var(--border-color); }
        h1 { text-align: center; color: var(--primary-color); padding: 15px; background-color: var(--header-bg); border-bottom: 1px solid var(--border-color); font-size: 1.2em; font-weight: 500; flex-shrink: 0; box-shadow: var(--header-shadow); position: relative; z-index: 10; }
        #chatbox { flex-grow: 1; overflow-y: auto; padding: 15px; display: flex; flex-direction: column; gap: 12px; scrollbar-width: thin; scrollbar-color: var(--scrollbar-thumb) var(--scrollbar-track); background-color: var(--bg-color); }
        #chatbox::-webkit-scrollbar { width: 6px; } #chatbox::-webkit-scrollbar-track { background: var(--scrollbar-track); border-radius: 3px; } #chatbox::-webkit-scrollbar-thumb { background-color: var(--scrollbar-thumb); border-radius: 3px; }
        #messages div { padding: 10px 15px; border-radius: 16px; max-width: 85%; word-wrap: break-word; line-height: 1.5; font-size: 1em; box-shadow: 0 1px 2px rgba(0,0,0,0.05); position: relative; animation: fadeIn 0.25s ease-out; }
        @keyframes fadeIn { from { opacity: 0; transform: translateY(5px); } to { opacity: 1; transform: translateY(0); } }
        .user-message { background: var(--user-msg-bg); color: var(--user-msg-text); align-self: flex-end; border-bottom-right-radius: 4px; margin-left: auto; }
        .bot-message { background-color: var(--bot-msg-bg); border: 1px solid var(--bot-msg-border); align-self: flex-start; border-bottom-left-radius: 4px; margin-right: auto; }
        .bot-message a { color: var(--primary-color); text-decoration: none; } .bot-message a:hover { text-decoration: underline; }
        .system-message { font-style: italic; color: var(--system-msg-color); text-align: center; font-size: 0.85em; background-color: transparent; box-shadow: none; align-self: center; max-width: 100%; padding: 5px 0; animation: none; }
        .error-message { color: var(--error-color); font-weight: 500; background-color: var(--error-bg); border: 1px solid var(--error-border); padding: 10px 15px; border-radius: 8px; align-self: stretch; text-align: left; }
        #input-area { display: flex; padding: 10px 12px; border-top: 1px solid var(--border-color); background-color: var(--header-bg); align-items: center; gap: 8px; flex-shrink: 0; }
        #userInput { flex-grow: 1; padding: 10px 15px; border: 1px solid var(--input-border); border-radius: 20px; outline: none; font-size: 1em; font-family: 'Roboto', sans-serif; background-color: var(--input-bg); transition: border-color 0.2s ease; min-height: 42px; resize: none; overflow-y: auto; }
        #userInput:focus { border-color: var(--primary-color); }
        .control-button { padding: 0; border: none; border-radius: 50%; cursor: pointer; background-color: var(--button-bg); color: white; width: 42px; height: 42px; font-size: 1.3em; display: flex; align-items: center; justify-content: center; flex-shrink: 0; transition: background-color 0.2s ease, transform 0.1s ease; box-shadow: 0 1px 2px rgba(0,0,0,0.08); }
        .control-button:hover:not(:disabled) { background-color: var(--button-hover-bg); transform: translateY(-1px); }
        .control-button:active:not(:disabled) { transform: scale(0.95); }
        .control-button:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; transform: none; box-shadow: none; }
        #toggleSpeakerButton.muted { background-color: #aaa; }
        @media (max-width: 600px) { /* Responsive styles */
             body { padding: 5px; justify-content: flex-start; } #control-panel { margin-bottom: 5px; padding: 12px; }
             #chat-container { width: 100%; height: auto; flex-grow: 1; border-radius: 12px; max-height: none; margin-bottom: 5px; }
             h1 { font-size: 1.1em; padding: 12px; } #chatbox { padding: 12px 8px; gap: 10px; }
             #messages div { max-width: 90%; font-size: 0.95em; padding: 9px 14px;}
             #input-area { padding: 8px; gap: 5px; } #userInput { padding: 9px 14px; min-height: 40px; }
             .control-button { width: 40px; height: 40px; font-size: 1.2em; }
         }
    </style>
    <!--
      Import Map: Specifies how to resolve the '@xenova/transformers' module specifier.
      We load the latest stable version directly from the jsDelivr CDN.
      This is a standard way to load ES modules in modern browsers without a build step.
    -->
    <script type="importmap">
    {
      "imports": {
        "@xenova/transformers": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest"
      }
    }
    </script>
</head>
<body>
    <div id="control-panel">
        <h2>Model Loader</h2>
        <button id="loadModelButton">Load Gemma 3 1B Model (Q4)</button>
        <div id="model-status" class="info">Click button to load Gemma 3 1B (using Transformers.js v3+ & Q4 dtype). **Warning:** Model loading is expected to fail due to library incompatibility.</div>
    </div>

    <div id="chat-container">
        <h1 id="chatbot-name">AI Assistant</h1>
        <div id="chatbox">
            <div id="messages">
                <!-- Chat messages appear here -->
            </div>
        </div>
        <div id="input-area">
             <textarea id="userInput" placeholder="Please attempt to load the model first..." rows="1" disabled></textarea>
            <button id="speechButton" class="control-button" title="Speak message" disabled>🎀</button>
            <button id="toggleSpeakerButton" class="control-button" title="Toggle AI speech output" disabled>πŸ”Š</button>
            <button id="sendButton" class="control-button" title="Send message" disabled>➀</button>
        </div>
    </div>

    <script type="module">
        // Import necessary functions from the loaded library.
        // This relies on the import map defined above.
        import { pipeline, env } from '@xenova/transformers';

        // --- Configuration ---
        const MODEL_NAME = 'onnx-community/gemma-3-1b-it-ONNX-GQA'; // The specific model requested
        const TASK = 'text-generation';
        const QUANTIZATION = 'q4'; // As specified in the model card example

        // --- Environment Setup ---
        // Basic setup for Transformers.js environment
        env.allowLocalModels = false; // Only load from Hub/CDN
        env.useBrowserCache = true;   // Cache models in the browser
        env.backends.onnx.executionProviders = ['webgpu', 'wasm']; // Prioritize WebGPU
        console.log('Using Execution Providers:', env.backends.onnx.executionProviders);
        env.backends.onnx.prefer_alternative_execution_providers = true; // Try WebGPU first if available

        // --- DOM Elements ---
        const chatbox = document.getElementById('messages');
        const userInput = document.getElementById('userInput');
        const sendButton = document.getElementById('sendButton');
        const chatbotNameElement = document.getElementById('chatbot-name');
        const speechButton = document.getElementById('speechButton');
        const toggleSpeakerButton = document.getElementById('toggleSpeakerButton');
        const modelStatus = document.getElementById('model-status');
        const loadModelButton = document.getElementById('loadModelButton');

        // --- State Management ---
        let generator = null;       // Holds the loaded pipeline if successful
        let isLoadingModel = false; // Flag to prevent concurrent loading attempts
        let conversationHistory = []; // Stores chat history in messages format
        let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
        const stateKey = 'gemma3_1b_strict_state_v1'; // Unique key for this version
        const historyKey = 'gemma3_1b_strict_history_v1';

        // --- Web Speech API ---
        let recognition = null;
        let synthesis = window.speechSynthesis;
        let targetVoice = null;
        let isListening = false;

        // --- Initialization ---
        window.addEventListener('load', () => {
            loadState();
            chatbotNameElement.textContent = botState.botName;
            updateSpeakerButtonUI();
            initializeSpeechAPI();
            setupInputAutosize();
            updateChatUIState(false); // Initially, UI is disabled
            displayHistory();
            setTimeout(loadVoices, 500);
            loadModelButton.addEventListener('click', handleLoadModelClick); // Attach button listener
            console.log("Attempting to use Transformers.js library loaded via import map.");
            displayMessage('system', `Using Transformers.js (latest). Ready to attempt loading ${MODEL_NAME}.`, false);
        });

        // --- State Persistence ---
        function loadState() {
             const savedState = localStorage.getItem(stateKey); if (savedState) { try { const loaded = JSON.parse(savedState); botState = { ...botState, ...loaded, botSettings: { ...botState.botSettings, ...(loaded.botSettings || {}) } }; } catch(e) {} }
             const savedHistory = localStorage.getItem(historyKey); if (savedHistory) { try { conversationHistory = JSON.parse(savedHistory); if (!Array.isArray(conversationHistory)) conversationHistory = []; } catch(e) { conversationHistory = []; } }
         }
         function saveState() {
             localStorage.setItem(stateKey, JSON.stringify(botState));
             localStorage.setItem(historyKey, JSON.stringify(conversationHistory));
         }
         function displayHistory() {
             chatbox.innerHTML = ''; conversationHistory.forEach(msg => { if (msg.role === 'user' || msg.role === 'assistant') { displayMessage(msg.role === 'user' ? 'user' : 'bot', msg.content, false); } });
         }

        // --- UI Update Functions ---
        function displayMessage(sender, text, animate = true, isError = false) {
             const messageDiv = document.createElement('div'); let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message'; if (sender === 'system' && isError) messageClass = 'error-message'; messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none'; text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>'); messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
         }
        function updateModelStatus(message, type = 'info') {
            modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
        }
        function updateChatUIState(isModelLoadedSuccessfully) {
            userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel; sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === ''; speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition; toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis; loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully; if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; } else if (isLoadingModel) { userInput.placeholder = "Model loading..."; } else { userInput.placeholder = "Please attempt to load the model first..."; }
        }
        function updateSpeakerButtonUI() {
            toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? 'πŸ”Š' : 'πŸ”‡'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
        }
        function showSpeechStatus(message) { console.log("Speech Status:", message); }
        function setupInputAutosize() { userInput.addEventListener('input', () => { userInput.style.height = 'auto'; userInput.style.height = userInput.scrollHeight + 'px'; updateChatUIState(generator !== null); }); }

        // --- Model & AI Logic ---
        async function handleLoadModelClick() {
            if (isLoadingModel || generator) return;
            isLoadingModel = true; generator = null; // Reset state
            updateChatUIState(false);
            await initializeModel(MODEL_NAME); // Attempt to load
            isLoadingModel = false;
            updateChatUIState(generator !== null); // Update UI based on outcome
        }

        // Initialize model exactly as per documentation example for this model
        async function initializeModel(modelId) {
            updateModelStatus(`Loading ${modelId} with { dtype: "${QUANTIZATION}" }... (Strict doc example)`, 'loading');
            displayMessage('system', `Attempting to load ${modelId} using documented method (dtype: ${QUANTIZATION})...`, false);

            try {
                // Directly use the pipeline function as shown in the model card
                generator = await pipeline(TASK, modelId, {
                    dtype: QUANTIZATION, // Explicitly use q4
                    progress_callback: (progress) => {
                         const msg = `[Loading: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`;
                         updateModelStatus(msg, 'loading');
                     }
                });

                // If successful (still unlikely given previous errors)
                updateModelStatus(`${modelId} loaded successfully!`, 'success');
                displayMessage('system', `[SUCCESS] ${modelId} loaded. The environment might be different or the library was updated.`, false);

            } catch (error) {
                // Catch and report the inevitable error
                console.error(`Model loading failed for ${modelId} (Strict Attempt):`, error);
                let errorMsg = `Failed to load ${modelId}: ${error.message}.`;
                if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) {
                    errorMsg += " Confirmed: The 'gemma3_text' model type is unsupported by this library version.";
                } else if (error.message.includes("split is not a function")) {
                     errorMsg += " Confirmed: TypeError during config parsing, likely due to unsupported 'gemma3_text' type.";
                 } else {
                     errorMsg += " Unknown error. Check console and consider Space resource limits.";
                 }
                updateModelStatus(errorMsg, 'error');
                displayMessage('system', `[ERROR] ${errorMsg}`, true, true);
                generator = null; // Ensure it's null on failure
            }
        }

        // Build messages array as per documentation example
        function buildMessages(newUserMessage) {
            // Start with system prompt (can be customized)
            let messages = [{ role: "system", content: "You are a helpful assistant." }];
            // Append history
            messages = messages.concat(conversationHistory);
            // Append new user message
            messages.push({ role: "user", content: newUserMessage });
            console.log("Input Messages:", messages);
            return messages;
        }

        // Cleanup response based on messages output format
        function cleanupResponse(output) {
             // Expecting output like: [{ generated_text: [..., {role: 'assistant', content: '...'}] }]
             try {
                 if (output && output.length > 0 && output[0].generated_text && Array.isArray(output[0].generated_text)) {
                     const lastMessage = output[0].generated_text.at(-1);
                     if (lastMessage && lastMessage.role === 'assistant' && typeof lastMessage.content === 'string') {
                         let cleaned = lastMessage.content.trim();
                         cleaned = cleaned.replace(/<end_of_turn>/g, '').trim();
                         if (cleaned.length > 0) return cleaned;
                     }
                 }
                 // If structure is different (e.g., older library version returned flat text)
                 if (output && output.length > 0 && typeof output[0].generated_text === 'string') {
                    // Fallback for potentially different output structure
                    let cleaned = output[0].generated_text;
                    // Need to remove the prompt part if it's included
                    // This part is tricky without knowing the exact prompt format used internally by the pipeline for messages
                    // Let's just remove common artifacts for now
                    cleaned = cleaned.split("<start_of_turn>model").pop().trim(); // Attempt to get text after last model turn
                    cleaned = cleaned.replace(/<end_of_turn>/g, '').trim();
                     if (cleaned.length > 0) return cleaned;
                 }


             } catch (e) { console.error("Error parsing generator output structure:", e, "Output:", output); }
             console.warn("Could not reliably extract assistant response from output:", output);
             const fallbacks = [ "Sorry, response format was unexpected.", "My response might be garbled.", "Error processing the AI answer." ];
             return fallbacks[Math.floor(Math.random() * fallbacks.length)];
         }

        // --- Main Interaction Logic ---
        async function handleUserMessage() {
            const userText = userInput.value.trim();
            // Proceed only if generator is loaded and not currently loading
            if (!userText || !generator || isLoadingModel) return;

            userInput.value = ''; userInput.style.height = 'auto';
            updateChatUIState(true); // Disable input during generation

            // Add user message to UI and history
            displayMessage('user', userText);
            conversationHistory.push({ role: 'user', content: userText });

            updateModelStatus("AI thinking...", "loading");

            const messages = buildMessages(userText); // Use the messages array format

            try {
                // Call generator with messages array
                const outputs = await generator(messages, {
                    max_new_tokens: 300,
                    // Generation parameters from docs example:
                    do_sample: true, // Typically true for more natural chat
                    temperature: 0.7,
                    top_k: 50,
                    // repetition_penalty: 1.1, // Check if supported with messages format
                    // top_p: 0.9, // Check if supported
                });

                const replyText = cleanupResponse(outputs); // Process the potentially complex output

                console.log("Cleaned AI Output:", replyText);

                // Add AI response to UI and history
                displayMessage('bot', replyText);
                // Ensure the role matches what the model/library uses ('assistant' is common)
                conversationHistory.push({ role: 'assistant', content: replyText });

                if (botState.botSettings.useSpeechOutput && synthesis && targetVoice) {
                    speakText(replyText);
                }
                saveState();

            } catch (error) {
                console.error("AI response generation error:", error);
                displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true);
                 // Don't add a bot message on error, the system message covers it
            } finally {
                if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success");
                updateChatUIState(generator !== null); // Re-enable UI
                userInput.focus();
            }
        }

        // --- Speech API Functions ---
        function initializeSpeechAPI() { /* No changes needed */
              const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); } if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); } updateChatUIState(false);
          }
          function loadVoices() { /* No changes needed */ if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } }
          function findAndSetVoice(voices) { /* No changes needed */ targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } }
          function speakText(text) { /* No changes needed */ if (!synthesis || !botState.botSettings.useSpeechOutput || !targetVoice) return; synthesis.cancel(); const utterance = new SpeechSynthesisUtterance(text); utterance.voice = targetVoice; utterance.lang = targetVoice.lang; utterance.rate = 1.0; utterance.pitch = 1.0; synthesis.speak(utterance); }

        // --- Event Listeners ---
        sendButton.addEventListener('click', handleUserMessage);
        userInput.addEventListener('keypress', (e) => { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault(); handleUserMessage(); } });
        speechButton.addEventListener('click', () => { if (recognition && !isListening && generator && !isLoadingModel) { try { recognition.start(); } catch (error) { console.error("Rec start fail:", error); updateModelStatus(`Failed to start recognition`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 2000); isListening = false; updateChatUIState(generator !== null); } } });

    </script>
</body>
</html>