SawitDetection / app.py
Dede16's picture
Update app.py
b8f9df4 verified
import os
import uuid
import pandas as pd
import cv2
from ultralytics import YOLO
import gradio as gr
from huggingface_hub import (
create_repo,
get_full_repo_name,
upload_file,
)
# Initialize the YOLO model
model = YOLO('best.pt')
import os
hf_token = os.environ.get('token')
# Initialize an empty list to store the results
results_list = []
# Process each image
def palm_detection(image):
results = model(image)
annotated_frame = results[0].plot()
total_objects = len(results[0].boxes)
labels = results[0].boxes.cls.tolist()
matang_count = labels.count(0)
mentah_count = labels.count(1)
# Generate a unique filename and save the annotated image
unique_id = str(uuid.uuid4())
filename = f"{unique_id}.jpg"
#_, buffer = cv2.imencode('.jpg', annotated_frame)
#binary_image = buffer.tobytes()
#repo_name = get_full_repo_name(model_id="SawitDetection", token=hf_token)
#img_file_url = upload_file(
#path_or_fileobj=binary_image,
#path_in_repo=filename,
#repo_id=repo_name,
#repo_type="space",
#token=hf_token,
# )
# Append the results to the list
results_list.append({
'image_file': filename,
'total_objects': total_objects,
'matang_count': matang_count,
'mentah_count': mentah_count
})
results_df = pd.DataFrame(results_list)
csv_filename = 'detection_results.csv'
csv_output_path = os.path.join(csv_filename)
#results_df.to_csv(csv_output_path, index=False)
#csv_file_url = upload_file(
# path_or_fileobj=csv_output_path,
# path_in_repo=csv_filename,
# repo_id=repo_name,
# repo_type="space",
# token=hf_token,
# )
return annotated_frame, results_df
with gr.Blocks(theme = "soft", title="Palm Detector") as palm_detector:
gr.Markdown(
"""
<h1 style="text-align:center">Palm Detection</h1>
<p style="text-align:center">Upload an image for palm detection. Press the "Process Image" button, and the model will analyze the image to detect palms.</p>
"""
)
with gr.Row():
with gr.Column():
image = gr.Image()
proccess = gr.Button("Proccess Image")
img_outputs = gr.Image(label="Detection Results")
outputs = gr.components.Dataframe(type="pandas")
proccess.click(fn=palm_detection, inputs=[image], outputs= [img_outputs, outputs])
if __name__ == "__main__":
palm_detector.launch()