MekkCyber commited on
Commit
7fec156
·
1 Parent(s): 89006af
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -39,7 +39,7 @@ def run_inference(model_name, input_text, num_tokens=6):
39
 
40
  model_name = model_name.split("/")[1]
41
  start_time = time.time()
42
- if input_text is None :
43
  return "Please provide an input text for the model"
44
  result = subprocess.run(
45
  f"python run_inference.py -m models/{model_name}/ggml-model-i2_s.gguf -p \"{input_text}\" -n {num_tokens} -temp 0",
@@ -65,7 +65,7 @@ def run_transformers(model_name, input_text, num_tokens):
65
  # Load the model and tokenizer dynamically if needed (commented out for performance)
66
  # if model_name=="TinyLlama/TinyLlama-1.1B-Chat-v1.0" :
67
  print(input_text)
68
- if input_text is None :
69
  return "Please provide an input text for the model", None
70
  tokenizer = AutoTokenizer.from_pretrained(model_name)
71
  model = AutoModelForCausalLM.from_pretrained(model_name)
@@ -125,8 +125,8 @@ def interface():
125
  with gr.Row():
126
  transformer_model_dropdown = gr.Dropdown(
127
  label="Select Transformers Model",
128
- choices=["TinyLlama/TinyLlama-1.1B-Chat-v1.0"], # Replace with actual models
129
- value="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
130
  interactive=True
131
  )
132
  compare_button = gr.Button("Run Transformers Inference", elem_id="compare-button")
 
39
 
40
  model_name = model_name.split("/")[1]
41
  start_time = time.time()
42
+ if input_text is None or input_text == "":
43
  return "Please provide an input text for the model"
44
  result = subprocess.run(
45
  f"python run_inference.py -m models/{model_name}/ggml-model-i2_s.gguf -p \"{input_text}\" -n {num_tokens} -temp 0",
 
65
  # Load the model and tokenizer dynamically if needed (commented out for performance)
66
  # if model_name=="TinyLlama/TinyLlama-1.1B-Chat-v1.0" :
67
  print(input_text)
68
+ if input_text is None or input_text == "":
69
  return "Please provide an input text for the model", None
70
  tokenizer = AutoTokenizer.from_pretrained(model_name)
71
  model = AutoModelForCausalLM.from_pretrained(model_name)
 
125
  with gr.Row():
126
  transformer_model_dropdown = gr.Dropdown(
127
  label="Select Transformers Model",
128
+ choices=["TinyLlama/TinyLlama_v1.1"], # Replace with actual models
129
+ value="TinyLlama/TinyLlama_v1.1",
130
  interactive=True
131
  )
132
  compare_button = gr.Button("Run Transformers Inference", elem_id="compare-button")