Spaces:
Sleeping
Sleeping
File size: 1,583 Bytes
6a1e686 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import click
from .predict import predict_single
import warnings
from transformers import logging as hf_logging
def configure_logging(debug):
"""Configure warning and logging levels based on debug flag"""
if not debug:
warnings.filterwarnings("ignore", message="Some weights of the model checkpoint")
hf_logging.set_verbosity_error()
else:
hf_logging.set_verbosity_info()
warnings.simplefilter("default")
@click.group()
@click.option('--debug', is_flag=True, help="Enable debug output including warnings")
@click.pass_context
def cli(ctx, debug):
"""Qwen Multi-label Classifier CLI"""
ctx.ensure_object(dict)
ctx.obj['DEBUG'] = debug
configure_logging(debug)
@cli.command()
@click.argument('text')
@click.option('--hf-token', envvar="HF_TOKEN", help="HF API token (or set HF_TOKEN env variable)")
@click.option('--hf-repo', default="KeivanR/Qwen2.5-1.5B-Instruct-MLB-clf_lora-1743189446", help="Hugging Face model repo")
@click.option('--backend',
type=click.Choice(['local', 'hf'], case_sensitive=False),
default='local',
help="Inference backend: 'local' (your machine) or 'hf' (Hugging Face API)")
@click.pass_context
def predict(ctx, text, hf_repo, backend, hf_token):
"""Make prediction on a single text"""
if ctx.obj['DEBUG']:
click.echo("Debug mode enabled - showing all warnings")
results = predict_single(
text,
hf_repo,
backend=backend,
hf_token=hf_token
)
click.echo(f"Prediction results: {results}") |