bnoy1's picture
Update app.py
5b1c51c verified
import gradio as gr
import random
from PIL import Image
import time
import torch
from transformers import AutoImageProcessor, AutoModelForImageClassification
# 讛讙讚专转 拽讟讙讜专讬讜转
moves = ["rock", "paper", "scissors"]
computer_images = {
"rock": "computer_rock.png",
"paper": "computer_paper.png",
"scissors": "computer_scissors.png"
}
# 讟注讬谞转 讛诪讜讚诇
checkpoint = "facebook/deit-tiny-patch16-224"
processor = AutoImageProcessor.from_pretrained(checkpoint)
model = AutoModelForImageClassification.from_pretrained(
checkpoint,
num_labels=3,
id2label={0: "rock", 1: "paper", 2: "scissors"},
label2id={"rock": 0, "paper": 1, "scissors": 2},
ignore_mismatched_sizes=True
)
# 讞讜拽讬 讛诪砖讞拽
def game_logic(user_move, computer_move):
if user_move == computer_move:
return "It's a tie!"
if (user_move == "rock" and computer_move == "scissors") or \
(user_move == "paper" and computer_move == "rock") or \
(user_move == "scissors" and computer_move == "paper"):
return "You win!"
else:
return "You lose!"
# 住驻讬专讛 诇讗讞讜专 + 诪砖讞拽
def full_play(live_image):
# 住驻讬专讛 诇讗讞讜专 讘砖专转 (讬讜驻讬注 讘诇讜讙讬诐)
for i in ["3...", "2...", "1...", "GO!"]:
print(i)
time.sleep(1)
# 谞讬转讜讞 转诪讜谞讛
prediction = processor(images=live_image, return_tensors="pt")
outputs = model(**prediction)
logits = outputs.logits
predicted_class_idx = logits.argmax(-1).item()
user_move = model.config.id2label[predicted_class_idx]
computer_move = random.choice(moves)
computer_img = Image.open(computer_images[computer_move])
result = game_logic(user_move, computer_move)
return live_image, computer_img, f"You chose {user_move}, computer chose {computer_move}. {result}"
# 讘谞讬讬转 讛讗驻诇讬拽爪讬讛
with gr.Blocks() as demo:
gr.Markdown("# 鉁傦笍 馃 馃搫 Rock Paper Scissors - LIVE Game!")
webcam_input = gr.Image(source="webcam", tool=None, label="Show your move!")
play_button = gr.Button("Start Countdown and Play")
user_output = gr.Image(label="Your Move")
computer_output = gr.Image(label="Computer's Move")
result_text = gr.Textbox(label="Result")
play_button.click(
fn=full_play,
inputs=[webcam_input],
outputs=[user_output, computer_output, result_text]
)
demo.launch()