Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,23 +3,25 @@ import uuid
|
|
3 |
import pandas as pd
|
4 |
import cv2
|
5 |
from ultralytics import YOLO
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
# Initialize the YOLO model
|
8 |
-
model = YOLO('best.pt')
|
9 |
-
|
10 |
-
# Define directories
|
11 |
-
image_dir = 'datasetsw/valid/ripe'
|
12 |
-
output_dir = 'datasetsw/valid/test_ripe'
|
13 |
|
14 |
-
|
15 |
-
image_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith(('.jpg', '.jpeg', '.png'))]
|
16 |
|
17 |
# Initialize an empty list to store the results
|
18 |
results_list = []
|
19 |
|
20 |
# Process each image
|
21 |
-
|
22 |
-
results = model(
|
23 |
annotated_frame = results[0].plot()
|
24 |
|
25 |
total_objects = len(results[0].boxes)
|
@@ -29,22 +31,50 @@ for image_file in image_files:
|
|
29 |
|
30 |
# Generate a unique filename and save the annotated image
|
31 |
unique_id = str(uuid.uuid4())
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
# Append the results to the list
|
37 |
results_list.append({
|
38 |
-
'image_file':
|
39 |
'total_objects': total_objects,
|
40 |
'matang_count': matang_count,
|
41 |
'mentah_count': mentah_count
|
42 |
})
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import pandas as pd
|
4 |
import cv2
|
5 |
from ultralytics import YOLO
|
6 |
+
import gradio as gr
|
7 |
+
from huggingface_hub import (
|
8 |
+
create_repo,
|
9 |
+
get_full_repo_name,
|
10 |
+
upload_file,
|
11 |
+
)
|
12 |
|
13 |
# Initialize the YOLO model
|
14 |
+
model = YOLO('runs/segment/train/weights/best.pt')
|
15 |
+
import os
|
|
|
|
|
|
|
16 |
|
17 |
+
hf_token = os.environ.get('HUGGINGFACE_TOKEN')
|
|
|
18 |
|
19 |
# Initialize an empty list to store the results
|
20 |
results_list = []
|
21 |
|
22 |
# Process each image
|
23 |
+
def palm_detection(image):
|
24 |
+
results = model(image)
|
25 |
annotated_frame = results[0].plot()
|
26 |
|
27 |
total_objects = len(results[0].boxes)
|
|
|
31 |
|
32 |
# Generate a unique filename and save the annotated image
|
33 |
unique_id = str(uuid.uuid4())
|
34 |
+
filename = f"{unique_id}.jpg"
|
35 |
+
_, buffer = cv2.imencode('.jpg', annotated_frame)
|
36 |
+
binary_image = buffer.tobytes()
|
37 |
+
repo_name = get_full_repo_name(model_id="SawitDetection", token=hf_token)
|
38 |
+
img_file_url = upload_file(
|
39 |
+
path_or_fileobj=binary_image,
|
40 |
+
path_in_repo=filename,
|
41 |
+
repo_id=repo_name,
|
42 |
+
repo_type="space",
|
43 |
+
token=hf_token,
|
44 |
+
)
|
45 |
|
46 |
# Append the results to the list
|
47 |
results_list.append({
|
48 |
+
'image_file': filename,
|
49 |
'total_objects': total_objects,
|
50 |
'matang_count': matang_count,
|
51 |
'mentah_count': mentah_count
|
52 |
})
|
53 |
+
|
54 |
+
results_df = pd.DataFrame(results_list)
|
55 |
+
csv_filename = 'detection_results.csv'
|
56 |
+
csv_output_path = os.path.join(csv_filename)
|
57 |
+
results_df.to_csv(csv_output_path, index=False)
|
58 |
+
csv_file_url = upload_file(
|
59 |
+
path_or_fileobj=csv_output_path,
|
60 |
+
path_in_repo=csv_filename,
|
61 |
+
repo_id=repo_name,
|
62 |
+
repo_type="space",
|
63 |
+
token=hf_token,
|
64 |
+
)
|
65 |
+
return annotated_frame, results_df
|
66 |
+
|
67 |
+
with gr.Blocks(theme = "soft", title="Palm Detectior") as palm_detector:
|
68 |
+
gr.Markdown(
|
69 |
+
"""
|
70 |
+
# Palm Detection
|
71 |
+
""")
|
72 |
+
with gr.Row():
|
73 |
+
with gr.Column():
|
74 |
+
image = gr.Image()
|
75 |
+
proccess = gr.Button("Proccess Image")
|
76 |
+
img_outputs = gr.Image(label="Detection Results")
|
77 |
+
outputs = gr.components.Dataframe(type="pandas")
|
78 |
+
proccess.click(fn=palm_detection, inputs=[image], outputs= [img_outputs, outputs])
|
79 |
+
if __name__ == "__main__":
|
80 |
+
palm_detector.launch()
|