Spaces:
Running
on
Zero
Running
on
Zero
Lord-Raven
commited on
Commit
·
de1ced9
1
Parent(s):
a3a5d99
Messing with configuration.
Browse files
app.py
CHANGED
@@ -2,8 +2,8 @@ import spaces
|
|
2 |
import gradio
|
3 |
import json
|
4 |
import torch
|
5 |
-
from transformers import AutoTokenizer
|
6 |
from optimum.onnxruntime import ORTModelForSequenceClassification
|
|
|
7 |
from optimum.pipelines import pipeline
|
8 |
from fastapi import FastAPI
|
9 |
from fastapi.middleware.cors import CORSMiddleware
|
@@ -29,22 +29,22 @@ print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
|
|
29 |
# "Xenova/deBERTa-v3-base-mnli" "MoritzLaurer/DeBERTa-v3-base-mnli" Still a bit slow and not great answers
|
30 |
# "xenova/nli-deberta-v3-small" "cross-encoder/nli-deberta-v3-small" Was using this for a good while and it was...okay
|
31 |
|
32 |
-
model_name = "MoritzLaurer/deberta-v3-base-zeroshot-v2.0"
|
33 |
-
file_name = "onnx/model.onnx"
|
34 |
-
tokenizer_name = "MoritzLaurer/deberta-v3-base-zeroshot-v2.0"
|
35 |
|
36 |
-
model = ORTModelForSequenceClassification.from_pretrained(model_name, export=True, provider="CUDAExecutionProvider")
|
37 |
-
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, model_max_length=512)
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
|
45 |
-
|
46 |
|
47 |
-
classifier = pipeline(task="zero-shot-classification", model=model, tokenizer=tokenizer, device="cuda:0")
|
48 |
|
49 |
def classify(data_string, request: gradio.Request):
|
50 |
if request:
|
@@ -56,9 +56,10 @@ def classify(data_string, request: gradio.Request):
|
|
56 |
# else:
|
57 |
return zero_shot_classification(data)
|
58 |
|
59 |
-
@spaces.GPU()
|
60 |
def zero_shot_classification(data):
|
61 |
-
results =
|
|
|
62 |
response_string = json.dumps(results)
|
63 |
return response_string
|
64 |
|
|
|
2 |
import gradio
|
3 |
import json
|
4 |
import torch
|
|
|
5 |
from optimum.onnxruntime import ORTModelForSequenceClassification
|
6 |
+
from transformers import AutoTokenizer
|
7 |
from optimum.pipelines import pipeline
|
8 |
from fastapi import FastAPI
|
9 |
from fastapi.middleware.cors import CORSMiddleware
|
|
|
29 |
# "Xenova/deBERTa-v3-base-mnli" "MoritzLaurer/DeBERTa-v3-base-mnli" Still a bit slow and not great answers
|
30 |
# "xenova/nli-deberta-v3-small" "cross-encoder/nli-deberta-v3-small" Was using this for a good while and it was...okay
|
31 |
|
32 |
+
# model_name = "MoritzLaurer/deberta-v3-base-zeroshot-v2.0"
|
33 |
+
# file_name = "onnx/model.onnx"
|
34 |
+
# tokenizer_name = "MoritzLaurer/deberta-v3-base-zeroshot-v2.0"
|
35 |
|
36 |
+
# model = ORTModelForSequenceClassification.from_pretrained(model_name, export=True, provider="CUDAExecutionProvider")
|
37 |
+
# tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, model_max_length=512)
|
38 |
|
39 |
+
model = ORTModelForSequenceClassification.from_pretrained(
|
40 |
+
"philschmid/tiny-bert-sst2-distilled",
|
41 |
+
export=True,
|
42 |
+
provider="CUDAExecutionProvider",
|
43 |
+
)
|
44 |
|
45 |
+
tokenizer = AutoTokenizer.from_pretrained("philschmid/tiny-bert-sst2-distilled")
|
46 |
|
47 |
+
# classifier = pipeline(task="zero-shot-classification", model=model, tokenizer=tokenizer, device="cuda:0")
|
48 |
|
49 |
def classify(data_string, request: gradio.Request):
|
50 |
if request:
|
|
|
56 |
# else:
|
57 |
return zero_shot_classification(data)
|
58 |
|
59 |
+
# @spaces.GPU()
|
60 |
def zero_shot_classification(data):
|
61 |
+
results = []
|
62 |
+
# classifier(data['sequence'], candidate_labels=data['candidate_labels'], hypothesis_template=data['hypothesis_template'], multi_label=data['multi_label'])
|
63 |
response_string = json.dumps(results)
|
64 |
return response_string
|
65 |
|