MisterAI commited on
Commit
8883dd3
·
verified ·
1 Parent(s): 1435c20

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -1
app.py CHANGED
@@ -4,7 +4,12 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
5
 
6
  # Modèle à utiliser
7
- model_name = "fbaldassarri/tiiuae_Falcon3-1B-Instruct-autogptq-int8-gs128-asym"
 
 
 
 
 
8
 
9
  def load_model():
10
  """Charge le modèle et le tokenizer"""
 
4
  import torch
5
 
6
  # Modèle à utiliser
7
+ #model_name = "fbaldassarri/tiiuae_Falcon3-1B-Instruct-autogptq-int8-gs128-asym"
8
+ #File "/usr/local/lib/python3.10/site-packages/transformers/quantizers/quantizer_gptq.py", line 65, in validate_environment
9
+ # raise RuntimeError("GPU is required to quantize or run quantize model.")
10
+ #RuntimeError: GPU is required to quantize or run quantize model.
11
+ model_name = "BSC-LT/salamandra-2b-instruct"
12
+
13
 
14
  def load_model():
15
  """Charge le modèle et le tokenizer"""