mmcw23-p commited on
Commit
8e5a110
·
1 Parent(s): 17d9651

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -587
app.py CHANGED
@@ -1,589 +1,8 @@
 
 
 
1
 
2
- import io
3
- import json
4
- import re
5
- import sys
6
- import time
7
- import zipfile
8
- from datetime import datetime
9
- from pathlib import Path
10
 
11
- import gradio as gr
12
- from PIL import Image
13
-
14
- import modules.extensions as extensions_module
15
- from modules import api, chat, shared, training, ui
16
- from modules.html_generator import chat_html_wrapper
17
- from modules.LoRA import add_lora_to_model
18
- from modules.models import load_model, load_soft_prompt
19
- from modules.text_generation import (clear_torch_cache, generate_reply,
20
- stop_everything_event)
21
-
22
- # Loading custom settings
23
- settings_file = None
24
- if shared.args.settings is not None and Path(shared.args.settings).exists():
25
- settings_file = Path(shared.args.settings)
26
- elif Path('settings.json').exists():
27
- settings_file = Path('settings.json')
28
- if settings_file is not None:
29
- print(f"Loading settings from {settings_file}...")
30
- new_settings = json.loads(open(settings_file, 'r').read())
31
- for item in new_settings:
32
- shared.settings[item] = new_settings[item]
33
-
34
-
35
- iface.launch()
36
- def get_available_models():
37
- if shared.args.flexgen:
38
- return sorted([re.sub('-np$', '', item.name) for item in list(Path(f'{shared.args.model_dir}/').glob('*')) if item.name.endswith('-np')], key=str.lower)
39
- else:
40
- return sorted([re.sub('.pth$', '', item.name) for item in list(Path(f'{shared.args.model_dir}/').glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json'))], key=str.lower)
41
-
42
-
43
- def get_available_presets():
44
- return sorted(set((k.stem for k in Path('presets').glob('*.txt'))), key=str.lower)
45
-
46
-
47
- def get_available_prompts():
48
- prompts = []
49
- prompts += sorted(set((k.stem for k in Path('prompts').glob('[0-9]*.txt'))), key=str.lower, reverse=True)
50
- prompts += sorted(set((k.stem for k in Path('prompts').glob('*.txt'))), key=str.lower)
51
- prompts += ['None']
52
- return prompts
53
-
54
-
55
- def get_available_characters():
56
- paths = (x for x in Path('characters').iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
57
- return ['None'] + sorted(set((k.stem for k in paths if k.stem != "instruction-following")), key=str.lower)
58
-
59
-
60
- def get_available_instruction_templates():
61
- path = "characters/instruction-following"
62
- paths = []
63
- if os.path.exists(path):
64
- paths = (x for x in Path(path).iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
65
- return ['None'] + sorted(set((k.stem for k in paths)), key=str.lower)
66
-
67
-
68
- def get_available_extensions():
69
- return sorted(set(map(lambda x: x.parts[1], Path('extensions').glob('*/script.py'))), key=str.lower)
70
-
71
-
72
- def get_available_softprompts():
73
- return ['None'] + sorted(set((k.stem for k in Path('softprompts').glob('*.zip'))), key=str.lower)
74
-
75
-
76
- def get_available_loras():
77
- return ['None'] + sorted([item.name for item in list(Path(shared.args.lora_dir).glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json'))], key=str.lower)
78
-
79
-
80
- def unload_model():
81
- shared.model = shared.tokenizer = None
82
- clear_torch_cache()
83
-
84
-
85
- def load_model_wrapper(selected_model):
86
- if selected_model != shared.model_name:
87
- shared.model_name = selected_model
88
-
89
- unload_model()
90
- if selected_model != '':
91
- shared.model, shared.tokenizer = load_model(shared.model_name)
92
-
93
- return selected_model
94
-
95
-
96
- def load_lora_wrapper(selected_lora):
97
- add_lora_to_model(selected_lora)
98
- return selected_lora
99
-
100
-
101
- def load_preset_values(preset_menu, state, return_dict=False):
102
- generate_params = {
103
- 'do_sample': True,
104
- 'temperature': 1,
105
- 'top_p': 1,
106
- 'typical_p': 1,
107
- 'repetition_penalty': 1,
108
- 'encoder_repetition_penalty': 1,
109
- 'top_k': 50,
110
- 'num_beams': 1,
111
- 'penalty_alpha': 0,
112
- 'min_length': 0,
113
- 'length_penalty': 1,
114
- 'no_repeat_ngram_size': 0,
115
- 'early_stopping': False,
116
- }
117
- with open(Path(f'presets/{preset_menu}.txt'), 'r') as infile:
118
- preset = infile.read()
119
- for i in preset.splitlines():
120
- i = i.rstrip(',').strip().split('=')
121
- if len(i) == 2 and i[0].strip() != 'tokens':
122
- generate_params[i[0].strip()] = eval(i[1].strip())
123
- generate_params['temperature'] = min(1.99, generate_params['temperature'])
124
-
125
- if return_dict:
126
- return generate_params
127
- else:
128
- state.update(generate_params)
129
- return state, *[generate_params[k] for k in ['do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']]
130
-
131
-
132
- def upload_soft_prompt(file):
133
- with zipfile.ZipFile(io.BytesIO(file)) as zf:
134
- zf.extract('meta.json')
135
- j = json.loads(open('meta.json', 'r').read())
136
- name = j['name']
137
- Path('meta.json').unlink()
138
-
139
- with open(Path(f'softprompts/{name}.zip'), 'wb') as f:
140
- f.write(file)
141
-
142
- return name
143
-
144
-
145
- def save_prompt(text):
146
- fname = f"{datetime.now().strftime('%Y-%m-%d-%H%M%S')}.txt"
147
- with open(Path(f'prompts/{fname}'), 'w', encoding='utf-8') as f:
148
- f.write(text)
149
- return f"Saved to prompts/{fname}"
150
-
151
-
152
- def load_prompt(fname):
153
- if fname in ['None', '']:
154
- return ''
155
- else:
156
- with open(Path(f'prompts/{fname}.txt'), 'r', encoding='utf-8') as f:
157
- text = f.read()
158
- if text[-1] == '\n':
159
- text = text[:-1]
160
- return text
161
-
162
-
163
- def create_prompt_menus():
164
- with gr.Row():
165
- with gr.Column():
166
- with gr.Row():
167
- shared.gradio['prompt_menu'] = gr.Dropdown(choices=get_available_prompts(), value='None', label='Prompt')
168
- ui.create_refresh_button(shared.gradio['prompt_menu'], lambda: None, lambda: {'choices': get_available_prompts()}, 'refresh-button')
169
-
170
- with gr.Column():
171
- with gr.Column():
172
- shared.gradio['save_prompt'] = gr.Button('Save prompt')
173
- shared.gradio['status'] = gr.Markdown('Ready')
174
-
175
- shared.gradio['prompt_menu'].change(load_prompt, [shared.gradio['prompt_menu']], [shared.gradio['textbox']], show_progress=False)
176
- shared.gradio['save_prompt'].click(save_prompt, [shared.gradio['textbox']], [shared.gradio['status']], show_progress=False)
177
-
178
-
179
- def create_model_menus():
180
- with gr.Row():
181
- with gr.Column():
182
- with gr.Row():
183
- shared.gradio['model_menu'] = gr.Dropdown(choices=available_models, value=shared.model_name, label='Model')
184
- ui.create_refresh_button(shared.gradio['model_menu'], lambda: None, lambda: {'choices': get_available_models()}, 'refresh-button')
185
- with gr.Column():
186
- with gr.Row():
187
- shared.gradio['lora_menu'] = gr.Dropdown(choices=available_loras, value=shared.lora_name, label='LoRA')
188
- ui.create_refresh_button(shared.gradio['lora_menu'], lambda: None, lambda: {'choices': get_available_loras()}, 'refresh-button')
189
-
190
- shared.gradio['model_menu'].change(load_model_wrapper, shared.gradio['model_menu'], shared.gradio['model_menu'], show_progress=True)
191
- shared.gradio['lora_menu'].change(load_lora_wrapper, shared.gradio['lora_menu'], shared.gradio['lora_menu'], show_progress=True)
192
-
193
-
194
- def create_settings_menus(default_preset):
195
- generate_params = load_preset_values(default_preset if not shared.args.flexgen else 'Naive', {}, return_dict=True)
196
- for k in ['max_new_tokens', 'seed', 'stop_at_newline', 'chat_prompt_size', 'chat_generation_attempts']:
197
- generate_params[k] = shared.settings[k]
198
- shared.gradio['generate_state'] = gr.State(generate_params)
199
-
200
- with gr.Row():
201
- with gr.Column():
202
- with gr.Row():
203
- shared.gradio['preset_menu'] = gr.Dropdown(choices=available_presets, value=default_preset if not shared.args.flexgen else 'Naive', label='Generation parameters preset')
204
- ui.create_refresh_button(shared.gradio['preset_menu'], lambda: None, lambda: {'choices': get_available_presets()}, 'refresh-button')
205
- with gr.Column():
206
- shared.gradio['seed'] = gr.Number(value=shared.settings['seed'], label='Seed (-1 for random)')
207
-
208
- with gr.Row():
209
- with gr.Column():
210
- with gr.Box():
211
- gr.Markdown('Custom generation parameters ([reference](https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig))')
212
- with gr.Row():
213
- with gr.Column():
214
- shared.gradio['temperature'] = gr.Slider(0.01, 1.99, value=generate_params['temperature'], step=0.01, label='temperature')
215
- shared.gradio['top_p'] = gr.Slider(0.0, 1.0, value=generate_params['top_p'], step=0.01, label='top_p')
216
- shared.gradio['top_k'] = gr.Slider(0, 200, value=generate_params['top_k'], step=1, label='top_k')
217
- shared.gradio['typical_p'] = gr.Slider(0.0, 1.0, value=generate_params['typical_p'], step=0.01, label='typical_p')
218
- with gr.Column():
219
- shared.gradio['repetition_penalty'] = gr.Slider(1.0, 1.5, value=generate_params['repetition_penalty'], step=0.01, label='repetition_penalty')
220
- shared.gradio['encoder_repetition_penalty'] = gr.Slider(0.8, 1.5, value=generate_params['encoder_repetition_penalty'], step=0.01, label='encoder_repetition_penalty')
221
- shared.gradio['no_repeat_ngram_size'] = gr.Slider(0, 20, step=1, value=generate_params['no_repeat_ngram_size'], label='no_repeat_ngram_size')
222
- shared.gradio['min_length'] = gr.Slider(0, 2000, step=1, value=generate_params['min_length'] if shared.args.no_stream else 0, label='min_length', interactive=shared.args.no_stream)
223
- shared.gradio['do_sample'] = gr.Checkbox(value=generate_params['do_sample'], label='do_sample')
224
- with gr.Column():
225
- with gr.Box():
226
- gr.Markdown('Contrastive search')
227
- shared.gradio['penalty_alpha'] = gr.Slider(0, 5, value=generate_params['penalty_alpha'], label='penalty_alpha')
228
- with gr.Box():
229
- gr.Markdown('Beam search (uses a lot of VRAM)')
230
- with gr.Row():
231
- with gr.Column():
232
- shared.gradio['num_beams'] = gr.Slider(1, 20, step=1, value=generate_params['num_beams'], label='num_beams')
233
- with gr.Column():
234
- shared.gradio['length_penalty'] = gr.Slider(-5, 5, value=generate_params['length_penalty'], label='length_penalty')
235
- shared.gradio['early_stopping'] = gr.Checkbox(value=generate_params['early_stopping'], label='early_stopping')
236
-
237
- with gr.Accordion('Soft prompt', open=False):
238
- with gr.Row():
239
- shared.gradio['softprompts_menu'] = gr.Dropdown(choices=available_softprompts, value='None', label='Soft prompt')
240
- ui.create_refresh_button(shared.gradio['softprompts_menu'], lambda: None, lambda: {'choices': get_available_softprompts()}, 'refresh-button')
241
-
242
- gr.Markdown('Upload a soft prompt (.zip format):')
243
- with gr.Row():
244
- shared.gradio['upload_softprompt'] = gr.File(type='binary', file_types=['.zip'])
245
-
246
- shared.gradio['preset_menu'].change(load_preset_values, [shared.gradio[k] for k in ['preset_menu', 'generate_state']], [shared.gradio[k] for k in ['generate_state', 'do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']])
247
- shared.gradio['softprompts_menu'].change(load_soft_prompt, shared.gradio['softprompts_menu'], shared.gradio['softprompts_menu'], show_progress=True)
248
- shared.gradio['upload_softprompt'].upload(upload_soft_prompt, shared.gradio['upload_softprompt'], shared.gradio['softprompts_menu'])
249
-
250
-
251
- def set_interface_arguments(interface_mode, extensions, bool_active):
252
- modes = ["default", "notebook", "chat", "cai_chat"]
253
- cmd_list = vars(shared.args)
254
- bool_list = [k for k in cmd_list if type(cmd_list[k]) is bool and k not in modes]
255
-
256
- shared.args.extensions = extensions
257
- for k in modes[1:]:
258
- exec(f"shared.args.{k} = False")
259
- if interface_mode != "default":
260
- exec(f"shared.args.{interface_mode} = True")
261
-
262
- for k in bool_list:
263
- exec(f"shared.args.{k} = False")
264
- for k in bool_active:
265
- exec(f"shared.args.{k} = True")
266
-
267
- shared.need_restart = True
268
-
269
-
270
- available_models = get_available_models()
271
- available_presets = get_available_presets()
272
- available_characters = get_available_characters()
273
- available_softprompts = get_available_softprompts()
274
- available_loras = get_available_loras()
275
-
276
- # Default extensions
277
- extensions_module.available_extensions = get_available_extensions()
278
- if shared.is_chat():
279
- for extension in shared.settings['chat_default_extensions']:
280
- shared.args.extensions = shared.args.extensions or []
281
- if extension not in shared.args.extensions:
282
- shared.args.extensions.append(extension)
283
- else:
284
- for extension in shared.settings['default_extensions']:
285
- shared.args.extensions = shared.args.extensions or []
286
- if extension not in shared.args.extensions:
287
- shared.args.extensions.append(extension)
288
-
289
- # Default model
290
- if shared.args.model is not None:
291
- shared.model_name = shared.args.model
292
- else:
293
- if len(available_models) == 0:
294
- print('No models are available! Please download at least one.')
295
- sys.exit(0)
296
- elif len(available_models) == 1:
297
- i = 0
298
- else:
299
- print('The following models are available:\n')
300
- for i, model in enumerate(available_models):
301
- print(f'{i+1}. {model}')
302
- print(f'\nWhich one do you want to load? 1-{len(available_models)}\n')
303
- i = int(input()) - 1
304
- print()
305
- shared.model_name = available_models[i]
306
- shared.model, shared.tokenizer = load_model(shared.model_name)
307
- if shared.args.lora:
308
- add_lora_to_model(shared.args.lora)
309
-
310
- # Default UI settings
311
- default_preset = shared.settings['presets'][next((k for k in shared.settings['presets'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
312
- if shared.lora_name != "None":
313
- default_text = load_prompt(shared.settings['lora_prompts'][next((k for k in shared.settings['lora_prompts'] if re.match(k.lower(), shared.lora_name.lower())), 'default')])
314
- else:
315
- default_text = load_prompt(shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.model_name.lower())), 'default')])
316
- title = 'Text generation web UI'
317
-
318
-
319
- def create_interface():
320
- gen_events = []
321
- if shared.args.extensions is not None and len(shared.args.extensions) > 0:
322
- extensions_module.load_extensions()
323
-
324
- with gr.Blocks(css=ui.css if not shared.is_chat() else ui.css + ui.chat_css, analytics_enabled=False, title=title) as shared.gradio['interface']:
325
- if shared.is_chat():
326
- shared.gradio['Chat input'] = gr.State()
327
- with gr.Tab("Text generation", elem_id="main"):
328
- shared.gradio['display'] = gr.HTML(value=chat_html_wrapper(shared.history['visible'], shared.settings['name1'], shared.settings['name2'], 'cai-chat'))
329
- shared.gradio['textbox'] = gr.Textbox(label='Input')
330
- with gr.Row():
331
- shared.gradio['Generate'] = gr.Button('Generate')
332
- shared.gradio['Stop'] = gr.Button('Stop', elem_id="stop")
333
- with gr.Row():
334
- shared.gradio['Impersonate'] = gr.Button('Impersonate')
335
- shared.gradio['Regenerate'] = gr.Button('Regenerate')
336
- with gr.Row():
337
- shared.gradio['Copy last reply'] = gr.Button('Copy last reply')
338
- shared.gradio['Replace last reply'] = gr.Button('Replace last reply')
339
- shared.gradio['Remove last'] = gr.Button('Remove last')
340
-
341
- shared.gradio['Clear history'] = gr.Button('Clear history')
342
- shared.gradio['Clear history-confirm'] = gr.Button('Confirm', variant="stop", visible=False)
343
- shared.gradio['Clear history-cancel'] = gr.Button('Cancel', visible=False)
344
-
345
- shared.gradio["Chat mode"] = gr.Radio(choices=["cai-chat", "chat", "instruct"], value="cai-chat", label="Mode")
346
- shared.gradio["Instruction templates"] = gr.Dropdown(choices=get_available_instruction_templates(), label="Instruction template", value="None", visible=False)
347
-
348
- with gr.Tab("Character", elem_id="chat-settings"):
349
- with gr.Row():
350
- with gr.Column(scale=8):
351
- shared.gradio['name1'] = gr.Textbox(value=shared.settings['name1'], lines=1, label='Your name')
352
- shared.gradio['name2'] = gr.Textbox(value=shared.settings['name2'], lines=1, label='Character\'s name')
353
- shared.gradio['greeting'] = gr.Textbox(value=shared.settings['greeting'], lines=4, label='Greeting')
354
- shared.gradio['context'] = gr.Textbox(value=shared.settings['context'], lines=4, label='Context')
355
- shared.gradio['end_of_turn'] = gr.Textbox(value=shared.settings["end_of_turn"], lines=1, label='End of turn string')
356
- with gr.Column(scale=1):
357
- shared.gradio['character_picture'] = gr.Image(label='Character picture', type="pil")
358
- shared.gradio['your_picture'] = gr.Image(label='Your picture', type="pil", value=Image.open(Path("cache/pfp_me.png")) if Path("cache/pfp_me.png").exists() else None)
359
- with gr.Row():
360
- shared.gradio['character_menu'] = gr.Dropdown(choices=available_characters, value='None', label='Character', elem_id='character-menu')
361
- ui.create_refresh_button(shared.gradio['character_menu'], lambda: None, lambda: {'choices': get_available_characters()}, 'refresh-button')
362
-
363
- with gr.Row():
364
- with gr.Tab('Chat history'):
365
- with gr.Row():
366
- with gr.Column():
367
- gr.Markdown('Upload')
368
- shared.gradio['upload_chat_history'] = gr.File(type='binary', file_types=['.json', '.txt'])
369
- with gr.Column():
370
- gr.Markdown('Download')
371
- shared.gradio['download'] = gr.File()
372
- shared.gradio['download_button'] = gr.Button(value='Click me')
373
- with gr.Tab('Upload character'):
374
- gr.Markdown("# JSON format")
375
- with gr.Row():
376
- with gr.Column():
377
- gr.Markdown('1. Select the JSON file')
378
- shared.gradio['upload_json'] = gr.File(type='binary', file_types=['.json'])
379
- with gr.Column():
380
- gr.Markdown('2. Select your character\'s profile picture (optional)')
381
- shared.gradio['upload_img_bot'] = gr.File(type='binary', file_types=['image'])
382
- shared.gradio['Upload character'] = gr.Button(value='Submit')
383
-
384
- gr.Markdown("# TavernAI PNG format")
385
- shared.gradio['upload_img_tavern'] = gr.File(type='binary', file_types=['image'])
386
-
387
- with gr.Tab("Parameters", elem_id="parameters"):
388
- with gr.Box():
389
- gr.Markdown("Chat parameters")
390
- with gr.Row():
391
- with gr.Column():
392
- shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
393
- shared.gradio['chat_prompt_size_slider'] = gr.Slider(minimum=shared.settings['chat_prompt_size_min'], maximum=shared.settings['chat_prompt_size_max'], step=1, label='Maximum prompt size in tokens', value=shared.settings['chat_prompt_size'])
394
- with gr.Column():
395
- shared.gradio['chat_generation_attempts'] = gr.Slider(minimum=shared.settings['chat_generation_attempts_min'], maximum=shared.settings['chat_generation_attempts_max'], value=shared.settings['chat_generation_attempts'], step=1, label='Generation attempts (for longer replies)')
396
- shared.gradio['stop_at_newline'] = gr.Checkbox(value=shared.settings['stop_at_newline'], label='Stop generating at new line character?')
397
-
398
- create_settings_menus(default_preset)
399
-
400
- shared.input_params = [shared.gradio[k] for k in ['Chat input', 'generate_state', 'name1', 'name2', 'context', 'Chat mode', 'end_of_turn']]
401
-
402
- def set_chat_input(textbox):
403
- return textbox, ""
404
-
405
- gen_events.append(shared.gradio['Generate'].click(set_chat_input, shared.gradio['textbox'], [shared.gradio['Chat input'], shared.gradio['textbox']], show_progress=False))
406
- gen_events.append(shared.gradio['Generate'].click(chat.cai_chatbot_wrapper, shared.input_params, shared.gradio['display'], show_progress=shared.args.no_stream))
407
- gen_events.append(shared.gradio['textbox'].submit(set_chat_input, shared.gradio['textbox'], [shared.gradio['Chat input'], shared.gradio['textbox']], show_progress=False))
408
- gen_events.append(shared.gradio['textbox'].submit(chat.cai_chatbot_wrapper, shared.input_params, shared.gradio['display'], show_progress=shared.args.no_stream))
409
- gen_events.append(shared.gradio['Regenerate'].click(chat.regenerate_wrapper, shared.input_params, shared.gradio['display'], show_progress=shared.args.no_stream))
410
- gen_events.append(shared.gradio['Impersonate'].click(chat.impersonate_wrapper, shared.input_params, shared.gradio['textbox'], show_progress=shared.args.no_stream))
411
- shared.gradio['Stop'].click(stop_everything_event, [], [], queue=False, cancels=gen_events if shared.args.no_stream else None)
412
-
413
- shared.gradio['Copy last reply'].click(chat.send_last_reply_to_input, [], shared.gradio['textbox'], show_progress=shared.args.no_stream)
414
- shared.gradio['Replace last reply'].click(chat.replace_last_reply, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'Chat mode']], shared.gradio['display'], show_progress=shared.args.no_stream)
415
-
416
- # Clear history with confirmation
417
- clear_arr = [shared.gradio[k] for k in ['Clear history-confirm', 'Clear history', 'Clear history-cancel']]
418
- shared.gradio['Clear history'].click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, clear_arr)
419
- shared.gradio['Clear history-confirm'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr)
420
- shared.gradio['Clear history-confirm'].click(chat.clear_chat_log, [shared.gradio[k] for k in ['name1', 'name2', 'greeting', 'Chat mode']], shared.gradio['display'])
421
- shared.gradio['Clear history-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr)
422
- shared.gradio['Chat mode'].change(lambda x: gr.update(visible=x == 'instruct'), shared.gradio['Chat mode'], shared.gradio['Instruction templates'])
423
-
424
- shared.gradio['Remove last'].click(chat.remove_last_message, [shared.gradio[k] for k in ['name1', 'name2', 'Chat mode']], [shared.gradio['display'], shared.gradio['textbox']], show_progress=False)
425
- shared.gradio['download_button'].click(chat.save_history, inputs=[], outputs=[shared.gradio['download']])
426
- shared.gradio['Upload character'].click(chat.upload_character, [shared.gradio['upload_json'], shared.gradio['upload_img_bot']], [shared.gradio['character_menu']])
427
-
428
- # Clearing stuff and saving the history
429
- for i in ['Generate', 'Regenerate', 'Replace last reply']:
430
- shared.gradio[i].click(lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False)
431
- shared.gradio[i].click(lambda: chat.save_history(timestamp=False), [], [], show_progress=False)
432
- shared.gradio['Clear history-confirm'].click(lambda: chat.save_history(timestamp=False), [], [], show_progress=False)
433
- shared.gradio['textbox'].submit(lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False)
434
- shared.gradio['textbox'].submit(lambda: chat.save_history(timestamp=False), [], [], show_progress=False)
435
-
436
- shared.gradio['character_menu'].change(chat.load_character, [shared.gradio[k] for k in ['character_menu', 'name1', 'name2', 'Chat mode']], [shared.gradio[k] for k in ['name1', 'name2', 'character_picture', 'greeting', 'context', 'end_of_turn', 'display']])
437
- shared.gradio['Instruction templates'].change(lambda character, name1, name2, mode: chat.load_character(character, name1, name2, mode), [shared.gradio[k] for k in ['Instruction templates', 'name1', 'name2', 'Chat mode']], [shared.gradio[k] for k in ['name1', 'name2', 'character_picture', 'greeting', 'context', 'end_of_turn', 'display']])
438
- shared.gradio['upload_chat_history'].upload(chat.load_history, [shared.gradio[k] for k in ['upload_chat_history', 'name1', 'name2']], [])
439
- shared.gradio['upload_img_tavern'].upload(chat.upload_tavern_character, [shared.gradio['upload_img_tavern'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['character_menu']])
440
- shared.gradio['your_picture'].change(chat.upload_your_profile_picture, [shared.gradio[k] for k in ['your_picture', 'name1', 'name2', 'Chat mode']], shared.gradio['display'])
441
-
442
- reload_inputs = [shared.gradio[k] for k in ['name1', 'name2', 'Chat mode']]
443
- shared.gradio['upload_chat_history'].upload(chat.redraw_html, reload_inputs, [shared.gradio['display']])
444
- shared.gradio['Stop'].click(chat.redraw_html, reload_inputs, [shared.gradio['display']])
445
- shared.gradio['Instruction templates'].change(chat.redraw_html, reload_inputs, [shared.gradio['display']])
446
- shared.gradio['Chat mode'].change(chat.redraw_html, reload_inputs, [shared.gradio['display']])
447
-
448
- shared.gradio['interface'].load(None, None, None, _js=f"() => {{{ui.main_js+ui.chat_js}}}")
449
- shared.gradio['interface'].load(lambda: chat.load_default_history(shared.settings['name1'], shared.settings['name2']), None, None)
450
- shared.gradio['interface'].load(chat.redraw_html, reload_inputs, [shared.gradio['display']], show_progress=True)
451
-
452
- elif shared.args.notebook:
453
- with gr.Tab("Text generation", elem_id="main"):
454
- with gr.Row():
455
- with gr.Column(scale=4):
456
- with gr.Tab('Raw'):
457
- shared.gradio['textbox'] = gr.Textbox(value=default_text, elem_id="textbox", lines=27)
458
- with gr.Tab('Markdown'):
459
- shared.gradio['markdown'] = gr.Markdown()
460
- with gr.Tab('HTML'):
461
- shared.gradio['html'] = gr.HTML()
462
-
463
- with gr.Row():
464
- with gr.Column():
465
- with gr.Row():
466
- shared.gradio['Generate'] = gr.Button('Generate')
467
- shared.gradio['Stop'] = gr.Button('Stop')
468
- with gr.Column():
469
- pass
470
-
471
- with gr.Column(scale=1):
472
- gr.HTML('<div style="padding-bottom: 13px"></div>')
473
- shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
474
-
475
- create_prompt_menus()
476
-
477
- with gr.Tab("Parameters", elem_id="parameters"):
478
- create_settings_menus(default_preset)
479
-
480
- shared.input_params = [shared.gradio[k] for k in ['textbox', 'generate_state']]
481
- output_params = [shared.gradio[k] for k in ['textbox', 'markdown', 'html']]
482
- gen_events.append(shared.gradio['Generate'].click(generate_reply, shared.input_params, output_params, show_progress=shared.args.no_stream))
483
- gen_events.append(shared.gradio['textbox'].submit(generate_reply, shared.input_params, output_params, show_progress=shared.args.no_stream))
484
- shared.gradio['Stop'].click(stop_everything_event, [], [], queue=False, cancels=gen_events if shared.args.no_stream else None)
485
- shared.gradio['interface'].load(None, None, None, _js=f"() => {{{ui.main_js}}}")
486
-
487
- else:
488
- with gr.Tab("Text generation", elem_id="main"):
489
- with gr.Row():
490
- with gr.Column():
491
- shared.gradio['textbox'] = gr.Textbox(value=default_text, lines=21, label='Input')
492
- shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
493
- shared.gradio['Generate'] = gr.Button('Generate')
494
- with gr.Row():
495
- with gr.Column():
496
- shared.gradio['Continue'] = gr.Button('Continue')
497
- with gr.Column():
498
- shared.gradio['Stop'] = gr.Button('Stop')
499
-
500
- create_prompt_menus()
501
-
502
- with gr.Column():
503
- with gr.Tab('Raw'):
504
- shared.gradio['output_textbox'] = gr.Textbox(lines=27, label='Output')
505
- with gr.Tab('Markdown'):
506
- shared.gradio['markdown'] = gr.Markdown()
507
- with gr.Tab('HTML'):
508
- shared.gradio['html'] = gr.HTML()
509
-
510
- with gr.Tab("Parameters", elem_id="parameters"):
511
- create_settings_menus(default_preset)
512
-
513
- shared.input_params = [shared.gradio[k] for k in ['textbox', 'generate_state']]
514
- output_params = [shared.gradio[k] for k in ['output_textbox', 'markdown', 'html']]
515
- gen_events.append(shared.gradio['Generate'].click(generate_reply, shared.input_params, output_params, show_progress=shared.args.no_stream))
516
- gen_events.append(shared.gradio['textbox'].submit(generate_reply, shared.input_params, output_params, show_progress=shared.args.no_stream))
517
- gen_events.append(shared.gradio['Continue'].click(generate_reply, [shared.gradio['output_textbox']] + shared.input_params[1:], output_params, show_progress=shared.args.no_stream))
518
- shared.gradio['Stop'].click(stop_everything_event, [], [], queue=False, cancels=gen_events if shared.args.no_stream else None)
519
- shared.gradio['interface'].load(None, None, None, _js=f"() => {{{ui.main_js}}}")
520
-
521
- with gr.Tab("Model", elem_id="model-tab"):
522
- create_model_menus()
523
-
524
- with gr.Tab("Training", elem_id="training-tab"):
525
- training.create_train_interface()
526
-
527
- with gr.Tab("Interface mode", elem_id="interface-mode"):
528
- modes = ["default", "notebook", "chat", "cai_chat"]
529
- current_mode = "default"
530
- for mode in modes[1:]:
531
- if eval(f"shared.args.{mode}"):
532
- current_mode = mode
533
- break
534
- cmd_list = vars(shared.args)
535
- bool_list = [k for k in cmd_list if type(cmd_list[k]) is bool and k not in modes]
536
- bool_active = [k for k in bool_list if vars(shared.args)[k]]
537
-
538
- gr.Markdown("*Experimental*")
539
- shared.gradio['interface_modes_menu'] = gr.Dropdown(choices=modes, value=current_mode, label="Mode")
540
- shared.gradio['extensions_menu'] = gr.CheckboxGroup(choices=get_available_extensions(), value=shared.args.extensions, label="Available extensions")
541
- shared.gradio['bool_menu'] = gr.CheckboxGroup(choices=bool_list, value=bool_active, label="Boolean command-line flags")
542
- shared.gradio['reset_interface'] = gr.Button("Apply and restart the interface")
543
-
544
- shared.gradio['reset_interface'].click(set_interface_arguments, [shared.gradio[k] for k in ['interface_modes_menu', 'extensions_menu', 'bool_menu']], None)
545
- shared.gradio['reset_interface'].click(lambda: None, None, None, _js='() => {document.body.innerHTML=\'<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>\'; setTimeout(function(){location.reload()},2500); return []}')
546
-
547
- if shared.args.extensions is not None:
548
- extensions_module.create_extensions_block()
549
-
550
- def change_dict_value(d, key, value):
551
- d[key] = value
552
- return d
553
-
554
- for k in ['do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping', 'max_new_tokens', 'seed', 'stop_at_newline', 'chat_prompt_size_slider', 'chat_generation_attempts']:
555
- if k not in shared.gradio:
556
- continue
557
- if type(shared.gradio[k]) in [gr.Checkbox, gr.Number]:
558
- shared.gradio[k].change(lambda state, value, copy=k: change_dict_value(state, copy, value), inputs=[shared.gradio['generate_state'], shared.gradio[k]], outputs=shared.gradio['generate_state'])
559
- else:
560
- shared.gradio[k].release(lambda state, value, copy=k: change_dict_value(state, copy, value), inputs=[shared.gradio['generate_state'], shared.gradio[k]], outputs=shared.gradio['generate_state'])
561
-
562
- if not shared.is_chat():
563
- api.create_apis()
564
-
565
- # Authentication
566
- auth = None
567
- if shared.args.gradio_auth_path is not None:
568
- gradio_auth_creds = []
569
- with open(shared.args.gradio_auth_path, 'r', encoding="utf8") as file:
570
- for line in file.readlines():
571
- gradio_auth_creds += [x.strip() for x in line.split(',') if x.strip()]
572
- auth = [tuple(cred.split(':')) for cred in gradio_auth_creds]
573
-
574
- # Launch the interface
575
- shared.gradio['interface'].queue()
576
- if shared.args.listen:
577
- shared.gradio['interface'].launch(prevent_thread_lock=True, share=True, server_name='0.0.0.0', server_port=shared.args.listen_port, inbrowser=shared.args.auto_launch, auth=auth)
578
- else:
579
- shared.gradio['interface'].launch(prevent_thread_lock=True, share=True, server_port=shared.args.listen_port, inbrowser=shared.args.auto_launch, auth=auth)
580
-
581
-
582
- create_interface()
583
-
584
- while True:
585
- time.sleep(0.5)
586
- if shared.need_restart:
587
- shared.need_restart = False
588
- shared.gradio['interface'].close()
589
- create_interface()
 
1
+ from transformers import pipeline
2
+ import torch
3
+ import torch.nn.functional as F
4
 
5
+ classifier = pipeline("sentiment-analysis")
6
+ res= classifier("test")
 
 
 
 
 
 
7
 
8
+ print(res)