KeivanR commited on
Commit
c4ad33b
Β·
1 Parent(s): 6fe0026

evaluate to cli and hf repo to hf env var

Browse files
Files changed (3) hide show
  1. app.py +11 -5
  2. qwen_classifier/cli.py +23 -0
  3. qwen_classifier/evaluate.py +5 -5
app.py CHANGED
@@ -14,6 +14,9 @@ from qwen_classifier.config import HF_REPO
14
  from pydantic import BaseModel
15
 
16
  app = FastAPI(title="Qwen Classifier")
 
 
 
17
 
18
  # Add this endpoint
19
  @app.get("/", response_class=HTMLResponse)
@@ -31,7 +34,7 @@ def home():
31
  <li><strong>POST /evaluate</strong> - Evaluate batch text prediction from zip file</li>
32
  <li><strong>GET /health</strong> - Check API status</li>
33
  </ul>
34
- <p>Try it: <code>curl -X POST <SPACE_URL>/predict -H "Content-Type: application/json" -d '{"text":"your text"}'</code></p>
35
  </body>
36
  </html>
37
  """
@@ -50,7 +53,7 @@ async def load_model():
50
 
51
  # Load model (will cache in /home/user/.cache/huggingface)
52
  app.state.model = QwenClassifier.from_pretrained(
53
- HF_REPO,
54
  )
55
  print("Model loaded successfully!")
56
 
@@ -59,13 +62,16 @@ async def load_model():
59
  class PredictionRequest(BaseModel):
60
  text: str # ← Enforces that 'text' must be a non-empty string
61
 
 
 
 
62
  @app.post("/predict")
63
  async def predict(request: PredictionRequest): # ← Validates input automatically
64
- return predict_single(request.text, HF_REPO, backend="local")
65
 
66
  @app.post("/evaluate")
67
- async def evaluate(request: PredictionRequest): # ← Validates input automatically
68
- return evaluate_batch(request.text, HF_REPO, backend="local")
69
 
70
  @app.get("/health")
71
  def health_check():
 
14
  from pydantic import BaseModel
15
 
16
  app = FastAPI(title="Qwen Classifier")
17
+ hf_repo = os.getenv("HF_REPO")
18
+ if not hf_repo:
19
+ hf_repo = HF_REPO
20
 
21
  # Add this endpoint
22
  @app.get("/", response_class=HTMLResponse)
 
34
  <li><strong>POST /evaluate</strong> - Evaluate batch text prediction from zip file</li>
35
  <li><strong>GET /health</strong> - Check API status</li>
36
  </ul>
37
+ <p>Try it: <code>curl -X POST https://keivanr-qwen-classifier-demo.hf.space/predict -H "Content-Type: application/json" -d '{"text":"your text"}'</code></p>
38
  </body>
39
  </html>
40
  """
 
53
 
54
  # Load model (will cache in /home/user/.cache/huggingface)
55
  app.state.model = QwenClassifier.from_pretrained(
56
+ hf_repo,
57
  )
58
  print("Model loaded successfully!")
59
 
 
62
  class PredictionRequest(BaseModel):
63
  text: str # ← Enforces that 'text' must be a non-empty string
64
 
65
+ class EvaluationRequest(BaseModel):
66
+ file_path: str # ← Enforces that 'text' must be a non-empty string
67
+
68
  @app.post("/predict")
69
  async def predict(request: PredictionRequest): # ← Validates input automatically
70
+ return predict_single(request.text, hf_repo, backend="local")
71
 
72
  @app.post("/evaluate")
73
+ async def evaluate(request: EvaluationRequest): # ← Validates input automatically
74
+ return evaluate_batch(request.file_path, hf_repo, backend="local")
75
 
76
  @app.get("/health")
77
  def health_check():
qwen_classifier/cli.py CHANGED
@@ -1,5 +1,6 @@
1
  import click
2
  from .predict import predict_single
 
3
  import warnings
4
  from transformers import logging as hf_logging
5
  from .config import HF_REPO
@@ -42,4 +43,26 @@ def predict(ctx, text, hf_repo, backend, hf_token):
42
  backend=backend,
43
  hf_token=hf_token
44
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  click.echo(f"Prediction results: {results}")
 
1
  import click
2
  from .predict import predict_single
3
+ from .evaluate import evaluate_batch
4
  import warnings
5
  from transformers import logging as hf_logging
6
  from .config import HF_REPO
 
43
  backend=backend,
44
  hf_token=hf_token
45
  )
46
+ click.echo(f"Prediction results: {results}")
47
+
48
+ @cli.command()
49
+ @click.argument('file_path')
50
+ @click.option('--hf-token', envvar="HF_TOKEN", help="HF API token (or set HF_TOKEN env variable)")
51
+ @click.option('--hf-repo', default=HF_REPO, help="Hugging Face model repo")
52
+ @click.option('--backend',
53
+ type=click.Choice(['local', 'hf'], case_sensitive=False),
54
+ default='local',
55
+ help="Inference backend: 'local' (your machine) or 'hf' (Hugging Face API)")
56
+ @click.pass_context
57
+ def evaluate(ctx, file_path, hf_repo, backend, hf_token):
58
+ """Make prediction on a single text"""
59
+ if ctx.obj['DEBUG']:
60
+ click.echo("Debug mode enabled - showing all warnings")
61
+
62
+ results = evaluate_batch(
63
+ file_path,
64
+ hf_repo,
65
+ backend=backend,
66
+ hf_token=hf_token
67
+ )
68
  click.echo(f"Prediction results: {results}")
qwen_classifier/evaluate.py CHANGED
@@ -63,11 +63,11 @@ def preprocessing(df):
63
 
64
 
65
 
66
- def evaluate_batch(text, hf_repo, backend="local", hf_token=None):
67
  if backend == "local":
68
- return _evaluate_local(text, hf_repo)
69
  elif backend == "hf":
70
- return _evaluate_hf_api(text, hf_token)
71
  else:
72
  raise ValueError(f"Unknown backend: {backend}")
73
 
@@ -136,11 +136,11 @@ def _evaluate_local(test_data_path, hf_repo):
136
  return metrics, report
137
 
138
 
139
- def _evaluate_hf_api(text, hf_token=None):
140
  try:
141
  response = requests.post(
142
  f"{SPACE_URL}/evaluate",
143
- json={"text": text}, # This matches the Pydantic model
144
  headers={
145
  "Authorization": f"Bearer {hf_token}",
146
  "Content-Type": "application/json"
 
63
 
64
 
65
 
66
+ def evaluate_batch(file_path, hf_repo, backend="local", hf_token=None):
67
  if backend == "local":
68
+ return _evaluate_local(file_path, hf_repo)
69
  elif backend == "hf":
70
+ return _evaluate_hf_api(file_path, hf_token)
71
  else:
72
  raise ValueError(f"Unknown backend: {backend}")
73
 
 
136
  return metrics, report
137
 
138
 
139
+ def _evaluate_hf_api(file_path, hf_token=None):
140
  try:
141
  response = requests.post(
142
  f"{SPACE_URL}/evaluate",
143
+ json={"file_path": file_path}, # This matches the Pydantic model
144
  headers={
145
  "Authorization": f"Bearer {hf_token}",
146
  "Content-Type": "application/json"