Spaces:
Running
Running
MekkCyber
commited on
Commit
·
8091bbd
1
Parent(s):
0ddd62c
update
Browse files- README.md +0 -14
- app.py +3 -3
- requirements.txt +1 -1
README.md
CHANGED
@@ -6,17 +6,3 @@ colorTo: red
|
|
6 |
sdk: docker
|
7 |
app_file: app.py
|
8 |
app_port: 7860
|
9 |
-
pinned: false
|
10 |
-
|
11 |
-
hf_oauth: true
|
12 |
-
# optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
|
13 |
-
hf_oauth_expiration_minutes: 480
|
14 |
-
# optional, see "Scopes" below. "openid profile" is always included.
|
15 |
-
hf_oauth_scopes:
|
16 |
-
- read-repos
|
17 |
-
- write-repos
|
18 |
-
- manage-repos
|
19 |
-
- inference-api
|
20 |
-
---
|
21 |
-
|
22 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
6 |
sdk: docker
|
7 |
app_file: app.py
|
8 |
app_port: 7860
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -54,8 +54,8 @@ def run_inference(model_name, input_text, num_tokens=6):
|
|
54 |
|
55 |
def run_transformers(profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToken | None, model_name, input_text, num_tokens):
|
56 |
|
57 |
-
if oauth_token is None :
|
58 |
-
|
59 |
# Load the model and tokenizer dynamically if needed (commented out for performance)
|
60 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=oauth_token.token)
|
61 |
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=oauth_token.token)
|
@@ -81,7 +81,7 @@ def run_transformers(profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToken
|
|
81 |
def interface():
|
82 |
with gr.Blocks(css=".gr-button {background-color: #5C6BC0; color: white;} .gr-button:hover {background-color: #3F51B5;}") as demo:
|
83 |
|
84 |
-
gr.LoginButton(elem_id="login-button", elem_classes="center-button")
|
85 |
|
86 |
gr.Markdown(
|
87 |
"""
|
|
|
54 |
|
55 |
def run_transformers(profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToken | None, model_name, input_text, num_tokens):
|
56 |
|
57 |
+
# if oauth_token is None :
|
58 |
+
# return "Error : To Compare please login to your HF account and make sure you have access to the used Llama models"
|
59 |
# Load the model and tokenizer dynamically if needed (commented out for performance)
|
60 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=oauth_token.token)
|
61 |
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=oauth_token.token)
|
|
|
81 |
def interface():
|
82 |
with gr.Blocks(css=".gr-button {background-color: #5C6BC0; color: white;} .gr-button:hover {background-color: #3F51B5;}") as demo:
|
83 |
|
84 |
+
# gr.LoginButton(elem_id="login-button", elem_classes="center-button")
|
85 |
|
86 |
gr.Markdown(
|
87 |
"""
|
requirements.txt
CHANGED
@@ -4,4 +4,4 @@ fastapi==0.112.4
|
|
4 |
huggingface-hub
|
5 |
transformers
|
6 |
torch
|
7 |
-
gradio
|
|
|
4 |
huggingface-hub
|
5 |
transformers
|
6 |
torch
|
7 |
+
gradio
|