kimhyunwoo commited on
Commit
d5b0fae
·
verified ·
1 Parent(s): 32922f8

Update index.html

Browse files
Files changed (1) hide show
  1. index.html +278 -196
index.html CHANGED
@@ -2,243 +2,325 @@
2
  <html lang="en">
3
  <head>
4
  <meta charset="UTF-8">
5
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
- <title>Gemma Chatbot</title>
7
  <style>
8
- /* (Same good CSS as before) */
9
- body {
10
- font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
11
- display: flex;
12
- flex-direction: column;
13
- align-items: center;
14
- justify-content: center;
15
- min-height: 100vh;
16
- margin: 0;
17
- background-color: #f4f7f6;
 
 
 
 
18
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- h1 {
21
- color: #333;
22
- margin-bottom: 0.5em;
23
- }
 
 
 
 
 
 
 
 
 
 
24
 
25
- #chat-container {
26
- width: 90%;
27
- max-width: 700px;
28
- border: none;
29
- padding: 25px;
30
- border-radius: 15px;
31
- background-color: #fff;
32
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
33
- overflow: hidden;
34
- }
35
 
36
- #chat-messages {
37
- height: 400px;
38
- overflow-y: auto;
39
- margin-bottom: 15px;
40
- padding: 15px;
41
- border: 1px solid #e0e0e0;
42
- border-radius: 10px;
43
- background-color: #fafafa;
44
- }
45
 
46
- .message {
47
- margin-bottom: 10px;
48
- padding: 12px;
49
- border-radius: 20px;
50
- max-width: 70%;
51
- word-wrap: break-word;
52
- box-shadow: 0 1px 2px rgba(0,0,0,0.1);
53
- }
54
 
55
- .user-message {
56
- background-color: #d4e5ff;
57
- color: #000;
58
- text-align: right;
59
- align-self: flex-end;
60
- margin-left: auto;
61
- margin-right: 10px;
62
- }
 
63
 
64
- .bot-message {
65
- background-color: #e5e5ea;
66
- color: #000;
67
- text-align: left;
68
- align-self: flex-start;
69
- margin-right: auto;
70
- margin-left: 10px;
71
- }
72
 
73
- #input-container {
74
- display: flex;
75
- gap: 10px;
76
- padding-top: 15px;
77
- border-top: 1px solid #e0e0e0;
78
- }
79
 
80
- #user-input {
81
- flex-grow: 1;
82
- padding: 12px;
83
- border: 1px solid #ccc;
84
- border-radius: 20px;
85
- font-size: 16px;
86
- outline: none;
87
- transition: border-color 0.2s ease-in-out;
88
- }
 
 
 
 
 
89
 
90
- #user-input:focus {
91
- border-color: #007bff;
92
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
- #send-button {
95
- padding: 12px 25px;
96
- background-color: #007bff;
97
- color: white;
98
- border: none;
99
- border-radius: 20px;
100
- cursor: pointer;
101
- font-size: 16px;
102
- transition: background-color 0.2s ease-in-out;
103
- }
104
 
105
- #send-button:hover {
106
- background-color: #0056b3;
 
 
 
 
107
  }
108
-
109
- #send-button:disabled {
110
- background-color: #cccccc;
111
- cursor: not-allowed;
112
  }
113
-
114
- #loading-indicator {
115
- display: none;
116
- margin-top: 15px;
117
- text-align: center;
118
- color: #888;
119
- font-style: italic;
120
  }
 
 
121
 
122
- .system-message {
123
- display: none;
 
 
 
 
 
 
124
  }
125
- </style>
126
- </head>
127
- <body>
128
- <h1>Gemma 3 Chatbot</h1>
129
-
130
- <div id="chat-container">
131
- <div id="chat-messages">
132
- <!-- Messages will appear here -->
133
- </div>
134
- <div id="input-container">
135
- <input type="text" id="user-input" placeholder="Type your message..." aria-label="Your message">
136
- <button id="send-button" aria-label="Send message">Send</button>
137
- </div>
138
- <div id="loading-indicator">Loading...</div>
139
- </div>
140
 
141
- <script type="module">
142
- import { pipeline, env } from "https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/dist/transformers.js";
 
 
143
 
144
- // Allow remote models and set a cache directory.
145
- env.allowRemoteModels = true;
146
- env.cacheDir = './cache';
 
 
 
 
 
 
147
 
148
- const chatMessagesDiv = document.getElementById('chat-messages');
149
- const userInput = document.getElementById('user-input');
150
- const sendButton = document.getElementById('send-button');
151
- const loadingIndicator = document.getElementById('loading-indicator');
152
 
153
- let chatHistory = [
154
- { role: "system", content: "You are a helpful assistant." },
155
- ];
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
- let generator;
 
 
 
 
 
 
 
 
 
 
158
 
159
- async function initializePipeline() {
160
- loadingIndicator.style.display = 'block';
161
  try {
162
- // Correct Gemma 3 model name and text-generation pipeline
163
- generator = await pipeline(
164
- "text-generation",
165
- "onnx-community/gemma-3-1b-it-ONNX",
166
- {
167
- dtype: "q4", // Quantization
168
- progress_callback: (progress) => {
169
- if (progress.status === 'progress') {
170
- loadingIndicator.textContent = `Loading... ${progress.file} - ${Math.round(progress.loaded / 1000000)}MB/${Math.round(progress.total / 1000000)}MB`;
171
- }
172
- if (progress.status === 'loaded') {
173
- loadingIndicator.textContent = 'Model Loaded!';
174
- // Optional: Hide after a short delay, so the user sees it.
175
- setTimeout(() => {
176
- loadingIndicator.style.display = 'none';
177
- }, 1500); // 1.5 seconds
178
- }
179
- },
180
- }
181
- );
182
 
183
- } catch (error) {
184
- console.error("Error loading model:", error);
185
- loadingIndicator.textContent = "Error loading model. Check console, ensure a modern browser, and internet connection.";
186
- loadingIndicator.style.color = "red";
187
- sendButton.disabled = true;
188
- return;
189
- }
190
- // Don't hide it here anymore: let the progress_callback and timeout handle it.
191
- addMessage("bot", "Hello! I'm ready to chat. Ask me anything!");
192
- }
193
 
194
- initializePipeline();
 
 
 
195
 
196
- function addMessage(role, content) {
197
- const messageDiv = document.createElement('div');
198
- messageDiv.classList.add('message', `${role}-message`);
199
- messageDiv.textContent = content;
200
- chatMessagesDiv.appendChild(messageDiv);
201
- chatMessagesDiv.scrollTop = chatMessagesDiv.scrollHeight; // Scroll to bottom
202
 
203
- if (role === 'user' || role === 'assistant') {
204
- chatHistory.push({ role: role, content: content });
205
- }
206
- }
207
 
208
- async function sendMessage() {
209
- const message = userInput.value.trim();
210
- if (!message || !generator) return;
211
 
212
- addMessage('user', message);
213
- userInput.value = '';
214
- loadingIndicator.style.display = 'block';
215
- loadingIndicator.textContent = 'Generating response...'; // Indicate response generation
216
- sendButton.disabled = true;
217
 
218
  try {
219
- const output = await generator(chatHistory, {
220
- max_new_tokens: 512,
221
- do_sample: false // Try setting to `true` for varied responses.
 
 
222
  });
223
 
224
- const botResponse = output[0].generated_text.at(-1).content; // Correctly access generated text
225
- addMessage('assistant', botResponse);
 
 
 
 
 
 
 
 
 
 
226
 
227
  } catch (error) {
228
- console.error("Error generating response:", error);
229
- addMessage('bot', "Sorry, I encountered an error. Please try again.");
230
  } finally {
231
- loadingIndicator.style.display = 'none';
232
- sendButton.disabled = false;
 
233
  }
234
  }
235
 
236
- sendButton.addEventListener('click', sendMessage);
237
- userInput.addEventListener('keypress', (event) => {
238
- if (event.key === 'Enter') {
239
- sendMessage();
240
- }
241
- });
 
 
 
 
 
 
 
242
  </script>
243
  </body>
244
  </html>
 
2
  <html lang="en">
3
  <head>
4
  <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
6
+ <title>AI Assistant (Gemma 3 1B - Exact Doc Example)</title>
7
  <style>
8
+ /* CSS는 이전과 동일 (가독성을 위해 일부만 표시) */
9
+ @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
10
+ :root { /* Using the neutral blue theme */
11
+ --primary-color: #007bff; --secondary-color: #6c757d; --text-color: #212529;
12
+ --bg-color: #f8f9fa; --user-msg-bg: #e7f5ff; --user-msg-text: #004085;
13
+ --bot-msg-bg: #ffffff; --bot-msg-border: #dee2e6; --system-msg-color: #6c757d;
14
+ --error-color: #721c24; --error-bg: #f8d7da; --error-border: #f5c6cb;
15
+ --warning-color: #856404; --warning-bg: #fff3cd; --warning-border: #ffeeba;
16
+ --success-color: #155724; --success-bg: #d4edda; --success-border: #c3e6cb;
17
+ --border-color: #dee2e6; --input-bg: #ffffff; --input-border: #ced4da;
18
+ --button-bg: var(--primary-color); --button-hover-bg: #0056b3; --button-disabled-bg: #adb5bd;
19
+ --scrollbar-thumb: var(--primary-color); --scrollbar-track: #e9ecef;
20
+ --header-bg: #ffffff; --header-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
21
+ --container-shadow: 0 4px 15px rgba(0, 0, 0, 0.07);
22
  }
23
+ * { box-sizing: border-box; margin: 0; padding: 0; }
24
+ html { height: 100%; }
25
+ body { font-family: 'Roboto', sans-serif; display: flex; flex-direction: column; align-items: center; justify-content: flex-start; min-height: 100vh; background-color: var(--bg-color); color: var(--text-color); padding: 10px; overscroll-behavior: none; }
26
+ #control-panel { background: var(--header-bg); padding: 15px; border-radius: 8px; margin-bottom: 10px; box-shadow: var(--header-shadow); width: 100%; max-width: 600px; border: 1px solid var(--border-color); text-align: center; }
27
+ #loadModelButton { padding: 10px 20px; font-size: 1em; background-color: var(--primary-color); color: white; border: none; border-radius: 5px; cursor: pointer; transition: background-color 0.2s; margin-bottom: 10px; }
28
+ #loadModelButton:hover:not(:disabled) { background-color: var(--button-hover-bg); }
29
+ #loadModelButton:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; }
30
+ #model-status { font-size: 0.9em; padding: 10px; border-radius: 4px; text-align: center; min-height: 40px; line-height: 1.4; }
31
+ #model-status.info { background-color: #e2e3e5; border: 1px solid #d6d8db; color: #383d41; }
32
+ #model-status.loading { background-color: var(--warning-bg); border: 1px solid var(--warning-border); color: var(--warning-color); }
33
+ #model-status.success { background-color: var(--success-bg); border: 1px solid var(--success-border); color: var(--success-color); }
34
+ #model-status.error { background-color: var(--error-bg); border: 1px solid var(--error-border); color: var(--error-color); }
35
+ #chat-container { width: 100%; max-width: 600px; height: 75vh; max-height: 700px; background-color: #ffffff; border-radius: 12px; box-shadow: var(--container-shadow); display: flex; flex-direction: column; overflow: hidden; border: 1px solid var(--border-color); }
36
+ h1 { text-align: center; color: var(--primary-color); padding: 15px; background-color: var(--header-bg); border-bottom: 1px solid var(--border-color); font-size: 1.2em; font-weight: 500; flex-shrink: 0; box-shadow: var(--header-shadow); position: relative; z-index: 10; }
37
+ #chatbox { flex-grow: 1; overflow-y: auto; padding: 15px; display: flex; flex-direction: column; gap: 12px; scrollbar-width: thin; scrollbar-color: var(--scrollbar-thumb) var(--scrollbar-track); background-color: var(--bg-color); }
38
+ #chatbox::-webkit-scrollbar { width: 6px; } #chatbox::-webkit-scrollbar-track { background: var(--scrollbar-track); border-radius: 3px; } #chatbox::-webkit-scrollbar-thumb { background-color: var(--scrollbar-thumb); border-radius: 3px; }
39
+ #messages div { padding: 10px 15px; border-radius: 16px; max-width: 85%; word-wrap: break-word; line-height: 1.5; font-size: 1em; box-shadow: 0 1px 2px rgba(0,0,0,0.05); position: relative; animation: fadeIn 0.25s ease-out; }
40
+ @keyframes fadeIn { from { opacity: 0; transform: translateY(5px); } to { opacity: 1; transform: translateY(0); } }
41
+ .user-message { background: var(--user-msg-bg); color: var(--user-msg-text); align-self: flex-end; border-bottom-right-radius: 4px; margin-left: auto; }
42
+ .bot-message { background-color: var(--bot-msg-bg); border: 1px solid var(--bot-msg-border); align-self: flex-start; border-bottom-left-radius: 4px; margin-right: auto; }
43
+ .bot-message a { color: var(--primary-color); text-decoration: none; } .bot-message a:hover { text-decoration: underline; }
44
+ .system-message { font-style: italic; color: var(--system-msg-color); text-align: center; font-size: 0.85em; background-color: transparent; box-shadow: none; align-self: center; max-width: 100%; padding: 5px 0; animation: none; }
45
+ .error-message { color: var(--error-color); font-weight: 500; background-color: var(--error-bg); border: 1px solid var(--error-border); padding: 10px 15px; border-radius: 8px; align-self: stretch; text-align: left; }
46
+ #input-area { display: flex; padding: 10px 12px; border-top: 1px solid var(--border-color); background-color: var(--header-bg); align-items: center; gap: 8px; flex-shrink: 0; }
47
+ #userInput { flex-grow: 1; padding: 10px 15px; border: 1px solid var(--input-border); border-radius: 20px; outline: none; font-size: 1em; font-family: 'Roboto', sans-serif; background-color: var(--input-bg); transition: border-color 0.2s ease; min-height: 42px; resize: none; overflow-y: auto; }
48
+ #userInput:focus { border-color: var(--primary-color); }
49
+ .control-button { padding: 0; border: none; border-radius: 50%; cursor: pointer; background-color: var(--button-bg); color: white; width: 42px; height: 42px; font-size: 1.3em; display: flex; align-items: center; justify-content: center; flex-shrink: 0; transition: background-color 0.2s ease, transform 0.1s ease; box-shadow: 0 1px 2px rgba(0,0,0,0.08); }
50
+ .control-button:hover:not(:disabled) { background-color: var(--button-hover-bg); transform: translateY(-1px); }
51
+ .control-button:active:not(:disabled) { transform: scale(0.95); }
52
+ .control-button:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; transform: none; box-shadow: none; }
53
+ #toggleSpeakerButton.muted { background-color: #aaa; }
54
+ @media (max-width: 600px) { /* Responsive styles */
55
+ body { padding: 5px; justify-content: flex-start; } #control-panel { margin-bottom: 5px; padding: 12px; }
56
+ #chat-container { width: 100%; height: auto; flex-grow: 1; border-radius: 12px; max-height: none; margin-bottom: 5px; }
57
+ h1 { font-size: 1.1em; padding: 12px; } #chatbox { padding: 12px 8px; gap: 10px; }
58
+ #messages div { max-width: 90%; font-size: 0.95em; padding: 9px 14px;}
59
+ #input-area { padding: 8px; gap: 5px; } #userInput { padding: 9px 14px; min-height: 40px; }
60
+ .control-button { width: 40px; height: 40px; font-size: 1.2em; }
61
+ }
62
+ </style>
63
+ <!-- Using LATEST version of Transformers.js via CDN -->
64
+ <script type="importmap">
65
+ {
66
+ "imports": {
67
+ "@xenova/transformers": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest"
68
+ }
69
+ }
70
+ </script>
71
+ </head>
72
+ <body>
73
+ <div id="control-panel">
74
+ <h2>Model Loader</h2>
75
+ <!-- Button to explicitly trigger model loading -->
76
+ <button id="loadModelButton">Load Gemma 3 1B Model (Q4)</button>
77
+ <div id="model-status" class="info">Click button to load Gemma 3 1B using the exact method from the documentation. **Warning:** Loading is still expected to fail due to library incompatibility.</div>
78
+ </div>
79
 
80
+ <div id="chat-container">
81
+ <h1 id="chatbot-name">AI Assistant</h1>
82
+ <div id="chatbox">
83
+ <div id="messages">
84
+ <!-- Chat messages appear here -->
85
+ </div>
86
+ </div>
87
+ <div id="input-area">
88
+ <textarea id="userInput" placeholder="Please attempt to load the model first..." rows="1" disabled></textarea>
89
+ <button id="speechButton" class="control-button" title="Speak message" disabled>🎤</button>
90
+ <button id="toggleSpeakerButton" class="control-button" title="Toggle AI speech output" disabled>🔊</button>
91
+ <button id="sendButton" class="control-button" title="Send message" disabled>➤</button>
92
+ </div>
93
+ </div>
94
 
95
+ <script type="module">
96
+ import { pipeline, env } from '@xenova/transformers';
 
 
 
 
 
 
 
 
97
 
98
+ // Configuration EXACTLY as per the model card example
99
+ const MODEL_NAME = 'onnx-community/gemma-3-1b-it-ONNX-GQA';
100
+ const TASK = 'text-generation';
101
+ const QUANTIZATION = 'q4';
 
 
 
 
 
102
 
103
+ // Environment setup
104
+ env.allowRemoteModels = true; // Usually default, but set explicitly
105
+ env.useBrowserCache = true;
106
+ env.backends.onnx.executionProviders = ['webgpu', 'wasm'];
107
+ console.log('Using Execution Providers:', env.backends.onnx.executionProviders);
108
+ env.backends.onnx.prefer_alternative_execution_providers = true;
 
 
109
 
110
+ // DOM Elements
111
+ const chatbox = document.getElementById('messages');
112
+ const userInput = document.getElementById('userInput');
113
+ const sendButton = document.getElementById('sendButton');
114
+ const chatbotNameElement = document.getElementById('chatbot-name');
115
+ const speechButton = document.getElementById('speechButton');
116
+ const toggleSpeakerButton = document.getElementById('toggleSpeakerButton');
117
+ const modelStatus = document.getElementById('model-status');
118
+ const loadModelButton = document.getElementById('loadModelButton');
119
 
120
+ // State
121
+ let generator = null;
122
+ let isLoadingModel = false;
123
+ // Store history as { role: 'user' | 'assistant' | 'system', content: '...' }
124
+ let conversationHistory = [];
125
+ let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
126
+ const stateKey = 'gemma3_1b_exact_doc_state_v1';
127
+ const historyKey = 'gemma3_1b_exact_doc_history_v1';
128
 
129
+ // Speech API
130
+ let recognition = null;
131
+ let synthesis = window.speechSynthesis;
132
+ let targetVoice = null;
133
+ let isListening = false;
 
134
 
135
+ // --- Initialization ---
136
+ window.addEventListener('load', () => {
137
+ loadState();
138
+ chatbotNameElement.textContent = botState.botName;
139
+ updateSpeakerButtonUI();
140
+ initializeSpeechAPI();
141
+ setupInputAutosize();
142
+ updateChatUIState(false); // Initial state: disabled
143
+ displayHistory();
144
+ setTimeout(loadVoices, 500);
145
+ loadModelButton.addEventListener('click', handleLoadModelClick);
146
+ console.log("Attempting to use Transformers.js (latest) loaded via import map.");
147
+ displayMessage('system', `Using latest Transformers.js. Ready to load ${MODEL_NAME} exactly as per doc example.`, false);
148
+ });
149
 
150
+ // --- State Persistence ---
151
+ function loadState() {
152
+ const savedState = localStorage.getItem(stateKey); if (savedState) { try { const loaded = JSON.parse(savedState); botState = { ...botState, ...loaded, botSettings: { ...botState.botSettings, ...(loaded.botSettings || {}) } }; } catch(e) {} }
153
+ const savedHistory = localStorage.getItem(historyKey); if (savedHistory) { try { conversationHistory = JSON.parse(savedHistory); if (!Array.isArray(conversationHistory)) conversationHistory = []; } catch(e) { conversationHistory = []; } }
154
+ }
155
+ function saveState() {
156
+ localStorage.setItem(stateKey, JSON.stringify(botState));
157
+ localStorage.setItem(historyKey, JSON.stringify(conversationHistory));
158
+ }
159
+ function displayHistory() {
160
+ chatbox.innerHTML = '';
161
+ conversationHistory.forEach(msg => {
162
+ if (msg.role === 'user' || msg.role === 'assistant') {
163
+ displayMessage(msg.role === 'user' ? 'user' : 'bot', msg.content, false);
164
+ }
165
+ });
166
+ }
167
 
 
 
 
 
 
 
 
 
 
 
168
 
169
+ // --- UI Update Functions ---
170
+ function displayMessage(sender, text, animate = true, isError = false) {
171
+ const messageDiv = document.createElement('div'); let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message'; if (sender === 'system' && isError) messageClass = 'error-message'; messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none'; text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>'); messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
172
+ }
173
+ function updateModelStatus(message, type = 'info') {
174
+ modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
175
  }
176
+ function updateChatUIState(isModelLoadedSuccessfully) {
177
+ userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel; sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === ''; speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition; toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis; loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully; if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; } else if (isLoadingModel) { userInput.placeholder = "Model loading..."; } else { userInput.placeholder = "Please attempt to load the model first..."; }
 
 
178
  }
179
+ function updateSpeakerButtonUI() {
180
+ toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? '🔊' : '🔇'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
 
 
 
 
 
181
  }
182
+ function showSpeechStatus(message) { console.log("Speech Status:", message); }
183
+ function setupInputAutosize() { userInput.addEventListener('input', () => { userInput.style.height = 'auto'; userInput.style.height = userInput.scrollHeight + 'px'; updateChatUIState(generator !== null); }); }
184
 
185
+ // --- Model & AI Logic ---
186
+ async function handleLoadModelClick() {
187
+ if (isLoadingModel || generator) return;
188
+ isLoadingModel = true; generator = null;
189
+ updateChatUIState(false);
190
+ await initializeModel(MODEL_NAME);
191
+ isLoadingModel = false;
192
+ updateChatUIState(generator !== null);
193
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
+ // Initialize model EXACTLY as per the documentation example
196
+ async function initializeModel(modelId) {
197
+ updateModelStatus(`Loading ${modelId} with { dtype: "${QUANTIZATION}" }... (Strict doc example)`, 'loading');
198
+ displayMessage('system', `Attempting to load ${modelId} using documented method (dtype: ${QUANTIZATION})...`, false);
199
 
200
+ try {
201
+ // Pipeline creation EXACTLY as in the example
202
+ generator = await pipeline(TASK, modelId, {
203
+ dtype: QUANTIZATION,
204
+ progress_callback: (progress) => {
205
+ const msg = `[Loading: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`;
206
+ updateModelStatus(msg, 'loading');
207
+ }
208
+ });
209
 
210
+ updateModelStatus(`${modelId} loaded successfully!`, 'success');
211
+ displayMessage('system', `[SUCCESS] ${modelId} loaded.`, false);
 
 
212
 
213
+ } catch (error) {
214
+ console.error(`Model loading failed for ${modelId} (Strict Attempt):`, error);
215
+ let errorMsg = `Failed to load ${modelId}: ${error.message}.`;
216
+ if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) {
217
+ errorMsg += " As expected, the 'gemma3_text' model type is likely unsupported.";
218
+ } else if (error.message.includes("split is not a function")) {
219
+ errorMsg += " As expected, a TypeError occurred during config parsing (incompatibility).";
220
+ } else {
221
+ errorMsg += " Unknown error. Check console/network/memory.";
222
+ }
223
+ updateModelStatus(errorMsg, 'error');
224
+ displayMessage('system', `[ERROR] ${errorMsg}`, true, true);
225
+ generator = null;
226
+ }
227
+ }
228
 
229
+ // Build messages array EXACTLY as per documentation example
230
+ function buildMessages(newUserMessage) {
231
+ // Start with system prompt, add history, then user message
232
+ let messages = [{ role: "system", content: "You are a helpful assistant." }];
233
+ // Append history (already in correct format)
234
+ messages = messages.concat(conversationHistory);
235
+ // Append new user message
236
+ messages.push({ role: "user", content: newUserMessage });
237
+ console.log("Input Messages for Pipeline:", messages);
238
+ return messages;
239
+ }
240
 
241
+ // Cleanup response EXACTLY as per documentation example (with safety checks)
242
+ function cleanupResponse(output) {
243
  try {
244
+ // Check the structure expected when using messages input
245
+ if (output && output.length > 0 && output[0].generated_text && Array.isArray(output[0].generated_text)) {
246
+ // Use .at(-1) to get the last element, which should be the assistant's response
247
+ const lastMessage = output[0].generated_text.at(-1);
248
+ if (lastMessage && (lastMessage.role === 'assistant' || lastMessage.role === 'model') && typeof lastMessage.content === 'string') {
249
+ let content = lastMessage.content.trim();
250
+ // Optional: Remove potential trailing artifacts if needed
251
+ content = content.replace(/<end_of_turn>/g, '').trim();
252
+ if (content.length > 0) return content;
253
+ }
254
+ }
255
+ } catch (e) { console.error("Error parsing generator output with .at(-1):", e, "Output:", output); }
 
 
 
 
 
 
 
 
256
 
257
+ // Fallback if the specific structure isn't found
258
+ console.warn("Could not extract response using output[0].generated_text.at(-1).content. Output structure might differ or generation failed.", output);
259
+ const fallbacks = [ "Sorry, response format was unexpected.", "My response might be garbled.", "Error processing the AI answer." ];
260
+ return fallbacks[Math.floor(Math.random() * fallbacks.length)];
261
+ }
 
 
 
 
 
262
 
263
+ // --- Main Interaction Logic ---
264
+ async function handleUserMessage() {
265
+ const userText = userInput.value.trim();
266
+ if (!userText || !generator || isLoadingModel) return; // Check if generator is ready
267
 
268
+ userInput.value = ''; userInput.style.height = 'auto';
269
+ updateChatUIState(true); // Disable input
 
 
 
 
270
 
271
+ // Add user message to UI and history
272
+ displayMessage('user', userText);
273
+ conversationHistory.push({ role: 'user', content: userText });
 
274
 
275
+ updateModelStatus("AI thinking...", "loading");
 
 
276
 
277
+ // Prepare messages array
278
+ const messages = buildMessages(userText);
 
 
 
279
 
280
  try {
281
+ // Call generator EXACTLY as in the example (with messages array)
282
+ const outputs = await generator(messages, {
283
+ max_new_tokens: 512, // From example
284
+ do_sample: false // From example
285
+ // Add other parameters like temperature if needed for sampling
286
  });
287
 
288
+ const replyText = cleanupResponse(outputs); // Use the cleanup function based on example
289
+
290
+ console.log("Cleaned AI Output:", replyText);
291
+
292
+ // Add AI response to UI and history
293
+ displayMessage('bot', replyText);
294
+ conversationHistory.push({ role: 'assistant', content: replyText }); // Add assistant response
295
+
296
+ if (botState.botSettings.useSpeechOutput && synthesis && targetVoice) {
297
+ speakText(replyText);
298
+ }
299
+ saveState(); // Save history
300
 
301
  } catch (error) {
302
+ console.error("AI response generation error:", error);
303
+ displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true);
304
  } finally {
305
+ if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success");
306
+ updateChatUIState(generator !== null); // Re-enable UI
307
+ userInput.focus();
308
  }
309
  }
310
 
311
+ // --- Speech API Functions ---
312
+ function initializeSpeechAPI() { /* No changes needed */
313
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); } if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); } updateChatUIState(false);
314
+ }
315
+ function loadVoices() { /* No changes needed */ if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } }
316
+ function findAndSetVoice(voices) { /* No changes needed */ targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } }
317
+ function speakText(text) { /* No changes needed */ if (!synthesis || !botState.botSettings.useSpeechOutput || !targetVoice) return; synthesis.cancel(); const utterance = new SpeechSynthesisUtterance(text); utterance.voice = targetVoice; utterance.lang = targetVoice.lang; utterance.rate = 1.0; utterance.pitch = 1.0; synthesis.speak(utterance); }
318
+
319
+ // --- Event Listeners ---
320
+ sendButton.addEventListener('click', handleUserMessage);
321
+ userInput.addEventListener('keypress', (e) => { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault(); handleUserMessage(); } });
322
+ speechButton.addEventListener('click', () => { if (recognition && !isListening && generator && !isLoadingModel) { try { recognition.start(); } catch (error) { console.error("Rec start fail:", error); updateModelStatus(`Failed to start recognition`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 2000); isListening = false; updateChatUIState(generator !== null); } } });
323
+
324
  </script>
325
  </body>
326
  </html>