MisterAI commited on
Commit
267e7c8
·
verified ·
1 Parent(s): 0face81

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -21
app.py CHANGED
@@ -1,4 +1,4 @@
1
- #V01
2
 
3
  import gradio as gr
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
@@ -13,26 +13,15 @@ model_list = [
13
 
14
 
15
 
16
-
17
- model = None
18
- tokenizer = None
19
 
20
  def load_model(model_name):
21
  """Charge le modèle et le tokenizer"""
22
- global model, tokenizer
23
- model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
24
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
25
-
26
-
27
-
28
- #def load_model(model_name):
29
- # """Charge le modèle et le tokenizer"""
30
- # if model_name is not None:
31
- # tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
32
- # model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
33
- # return model, tokenizer
34
- # else:
35
- # return None, None
36
 
37
  def generate_text(model, tokenizer, input_text, max_length, temperature):
38
  """Génère du texte en utilisant le modèle"""
@@ -42,7 +31,7 @@ def generate_text(model, tokenizer, input_text, max_length, temperature):
42
 
43
  def main(model_name, input_text, max_length, temperature):
44
  """Fonction principale pour générer le texte"""
45
- if model_name is not None:
46
  model, tokenizer = load_model(model_name)
47
  generated_text = generate_text(model, tokenizer, input_text, max_length, temperature)
48
  return generated_text
@@ -72,14 +61,14 @@ with demo:
72
 
73
  load_button.click(
74
  load_model,
75
- inputs=model_select,
76
  outputs=None,
77
  queue=False
78
  )
79
 
80
  submit_button.click(
81
  main,
82
- inputs=[model_select, input_text, max_length_slider, temperature_slider],
83
  outputs=output_text,
84
  queue=False
85
  )
 
1
+ #V02
2
 
3
  import gradio as gr
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
13
 
14
 
15
 
16
+ import gradio as gr
17
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
18
 
19
  def load_model(model_name):
20
  """Charge le modèle et le tokenizer"""
21
+ if model_name is not None and model_name!= "":
22
+ model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
23
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
24
+ return model, tokenizer
 
 
 
 
 
 
 
 
 
 
25
 
26
  def generate_text(model, tokenizer, input_text, max_length, temperature):
27
  """Génère du texte en utilisant le modèle"""
 
31
 
32
  def main(model_name, input_text, max_length, temperature):
33
  """Fonction principale pour générer le texte"""
34
+ if model_name is not None and model_name!= "":
35
  model, tokenizer = load_model(model_name)
36
  generated_text = generate_text(model, tokenizer, input_text, max_length, temperature)
37
  return generated_text
 
61
 
62
  load_button.click(
63
  load_model,
64
+ inputs=model_name,
65
  outputs=None,
66
  queue=False
67
  )
68
 
69
  submit_button.click(
70
  main,
71
+ inputs=[model_name, input_text, max_length_slider, temperature_slider],
72
  outputs=output_text,
73
  queue=False
74
  )