Update app.py
Browse files
app.py
CHANGED
@@ -77,12 +77,12 @@ class TextWindowProcessor:
|
|
77 |
|
78 |
class TextClassifier:
|
79 |
def __init__(self):
|
80 |
-
|
81 |
-
if
|
82 |
-
# Enable CPU optimizations
|
83 |
torch.set_num_threads(MAX_WORKERS)
|
84 |
torch.set_num_interop_threads(MAX_WORKERS)
|
85 |
|
|
|
86 |
self.model_name = MODEL_NAME
|
87 |
self.tokenizer = None
|
88 |
self.model = None
|
@@ -105,11 +105,6 @@ class TextClassifier:
|
|
105 |
self.model_name,
|
106 |
num_labels=2
|
107 |
).to(self.device)
|
108 |
-
|
109 |
-
if self.device.type == 'cpu':
|
110 |
-
# Enable faster CPU performance without TorchScript
|
111 |
-
torch.set_num_threads(MAX_WORKERS)
|
112 |
-
torch.set_num_interop_threads(MAX_WORKERS)
|
113 |
|
114 |
model_path = "model_20250209_184929_acc1.0000.pt"
|
115 |
if os.path.exists(model_path):
|
|
|
77 |
|
78 |
class TextClassifier:
|
79 |
def __init__(self):
|
80 |
+
# Set thread configuration before any model loading or parallel work
|
81 |
+
if not torch.cuda.is_available():
|
|
|
82 |
torch.set_num_threads(MAX_WORKERS)
|
83 |
torch.set_num_interop_threads(MAX_WORKERS)
|
84 |
|
85 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
86 |
self.model_name = MODEL_NAME
|
87 |
self.tokenizer = None
|
88 |
self.model = None
|
|
|
105 |
self.model_name,
|
106 |
num_labels=2
|
107 |
).to(self.device)
|
|
|
|
|
|
|
|
|
|
|
108 |
|
109 |
model_path = "model_20250209_184929_acc1.0000.pt"
|
110 |
if os.path.exists(model_path):
|