Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import json
|
3 |
+
from hugchat import hugchat
|
4 |
+
from hugchat.login import Login
|
5 |
+
import os
|
6 |
+
import re
|
7 |
+
import torch
|
8 |
+
from transformers import pipeline
|
9 |
+
import librosa
|
10 |
+
|
11 |
+
# HugChat login credentials from environment variables (secrets)
|
12 |
+
EMAIL = os.environ.get("EMAIL")
|
13 |
+
PASSWD = os.environ.get("PASSWORD")
|
14 |
+
|
15 |
+
# Directory to store cookies
|
16 |
+
cookie_path_dir = "./cookies/"
|
17 |
+
os.makedirs(cookie_path_dir, exist_ok=True)
|
18 |
+
|
19 |
+
# Login to HugChat
|
20 |
+
sign = Login(EMAIL, PASSWD)
|
21 |
+
cookies = sign.login(cookie_dir_path=cookie_path_dir, save_cookies=True)
|
22 |
+
chatbot = hugchat.ChatBot(cookies=cookies.get_dict())
|
23 |
+
|
24 |
+
# Model and device configuration for Whisper transcription
|
25 |
+
MODEL_NAME = "openai/whisper-large-v3-turbo"
|
26 |
+
device = 0 if torch.cuda.is_available() else "cpu"
|
27 |
+
|
28 |
+
# Initialize Whisper pipeline
|
29 |
+
pipe = pipeline(
|
30 |
+
task="automatic-speech-recognition",
|
31 |
+
model=MODEL_NAME,
|
32 |
+
chunk_length_s=30,
|
33 |
+
device=device,
|
34 |
+
)
|
35 |
+
|
36 |
+
def transcribe_audio(audio_path):
|
37 |
+
"""
|
38 |
+
Transcribe a local audio file using the Whisper pipeline.
|
39 |
+
"""
|
40 |
+
try:
|
41 |
+
# Ensure audio is mono and resampled to 16kHz
|
42 |
+
audio, sr = librosa.load(audio_path, sr=16000, mono=True)
|
43 |
+
|
44 |
+
# Perform transcription
|
45 |
+
transcription = pipe(audio, batch_size=8, generate_kwargs={"language": "urdu"})["text"]
|
46 |
+
return transcription
|
47 |
+
|
48 |
+
except Exception as e:
|
49 |
+
return f"Error processing audio: {e}"
|
50 |
+
|
51 |
+
# Get command-line arguments: audio file path and file name
|
52 |
+
audio_path = sys.argv[1] # Path to the audio file
|
53 |
+
file_name = sys.argv[2] # File name for metadata
|
54 |
+
|
55 |
+
# Transcribe the audio to get Urdu text
|
56 |
+
urdu_text = transcribe_audio(audio_path)
|
57 |
+
if "Error" in urdu_text:
|
58 |
+
print(json.dumps({"error": urdu_text}))
|
59 |
+
sys.exit(1)
|
60 |
+
|
61 |
+
def extract_metadata(file_name):
|
62 |
+
"""
|
63 |
+
Extract metadata from the file name.
|
64 |
+
Assumes the second-last chunk is the city, e.g.,
|
65 |
+
'agent2_5_Multan_Pakistan.mp3' -> location = 'Multan'.
|
66 |
+
|
67 |
+
Args:
|
68 |
+
file_name (str): The name of the audio file.
|
69 |
+
|
70 |
+
Returns:
|
71 |
+
dict: Contains agent_username and location.
|
72 |
+
"""
|
73 |
+
base = file_name.split(".")[0] # Remove extension
|
74 |
+
parts = base.split("_")
|
75 |
+
if len(parts) >= 3:
|
76 |
+
return {
|
77 |
+
"agent_username": parts[0],
|
78 |
+
"location": parts[-2] # Second-last chunk
|
79 |
+
}
|
80 |
+
return {"agent_username": "Unknown", "location": "Unknown"}
|
81 |
+
|
82 |
+
# Extract metadata from file name
|
83 |
+
metadata = extract_metadata(file_name)
|
84 |
+
location = metadata["location"]
|
85 |
+
|
86 |
+
# Step 1: Translate Urdu to English with context-aware correction
|
87 |
+
english_text = chatbot.chat(
|
88 |
+
f"The following Urdu text is about crops and their diseases, but it may contain errors or misheard words due to audio transcription issues. Please use context to infer the most likely correct crop names and disease terms, and then translate the text to English:\n\n{urdu_text}"
|
89 |
+
).wait_until_done()
|
90 |
+
|
91 |
+
# Step 2: Extract specific crops and diseases from the English text
|
92 |
+
extraction_prompt = f"""
|
93 |
+
Below is an English text about specific crops and possible diseases/pests:
|
94 |
+
|
95 |
+
{english_text}
|
96 |
+
|
97 |
+
Identify each specific Crop (like wheat, rice, cotton, etc.) mentioned and list any Diseases or Pests affecting that crop.
|
98 |
+
|
99 |
+
- If a disease or pest is mentioned without specifying a particular crop, list it under "No crop:".
|
100 |
+
- If a crop is mentioned but no diseases or pests are specified for it, include it with an empty diseases list.
|
101 |
+
- Do not include general terms like "crops" as a specific crop name.
|
102 |
+
|
103 |
+
Format your answer in this style (one entry at a time):
|
104 |
+
|
105 |
+
For specific crops with diseases:
|
106 |
+
1. CropName:
|
107 |
+
Diseases:
|
108 |
+
- DiseaseName
|
109 |
+
- AnotherDisease
|
110 |
+
|
111 |
+
For specific crops with no diseases:
|
112 |
+
2. NextCrop:
|
113 |
+
Diseases:
|
114 |
+
|
115 |
+
For standalone diseases:
|
116 |
+
3. No crop:
|
117 |
+
Diseases:
|
118 |
+
- StandaloneDisease
|
119 |
+
|
120 |
+
No extra text, just the structured bullet list.
|
121 |
+
"""
|
122 |
+
extraction_response = chatbot.chat(extraction_prompt).wait_until_done()
|
123 |
+
|
124 |
+
# Step 3: Parse the extraction response
|
125 |
+
lines = extraction_response.splitlines()
|
126 |
+
crops_and_diseases = []
|
127 |
+
current_crop = None
|
128 |
+
current_diseases = []
|
129 |
+
|
130 |
+
for line in lines:
|
131 |
+
line = line.strip()
|
132 |
+
if not line:
|
133 |
+
continue
|
134 |
+
|
135 |
+
# Match lines like "1. Wheat:" or "3. No crop:"
|
136 |
+
match_crop = re.match(r'^(\d+)\.\s*(.+?):$', line)
|
137 |
+
if match_crop:
|
138 |
+
# Save previous crop and diseases if any
|
139 |
+
if current_crop is not None or current_diseases:
|
140 |
+
crops_and_diseases.append({
|
141 |
+
"crop": current_crop,
|
142 |
+
"diseases": current_diseases
|
143 |
+
})
|
144 |
+
# Process new crop
|
145 |
+
crop_name = match_crop.group(2).strip()
|
146 |
+
# Handle general terms as "No crop"
|
147 |
+
if crop_name.lower() in ["no crop", "crops", "general crops"]:
|
148 |
+
current_crop = None # Standalone diseases
|
149 |
+
else:
|
150 |
+
current_crop = crop_name
|
151 |
+
current_diseases = []
|
152 |
+
continue
|
153 |
+
|
154 |
+
# Skip "Diseases:" line
|
155 |
+
if line.lower().startswith("diseases:"):
|
156 |
+
continue
|
157 |
+
|
158 |
+
# Add disease if line starts with '-'
|
159 |
+
if line.startswith('-'):
|
160 |
+
disease_name = line.lstrip('-').strip()
|
161 |
+
if disease_name:
|
162 |
+
current_diseases.append(disease_name)
|
163 |
+
|
164 |
+
# Append the last crop/diseases if present
|
165 |
+
if current_crop is not None or current_diseases:
|
166 |
+
crops_and_diseases.append({
|
167 |
+
"crop": current_crop,
|
168 |
+
"diseases": current_diseases
|
169 |
+
})
|
170 |
+
|
171 |
+
# Step 4: Get temperature for the location
|
172 |
+
temp_prompt = f"Give me weather of {location} in Celsius numeric only."
|
173 |
+
temperature_response = chatbot.chat(temp_prompt).wait_until_done()
|
174 |
+
|
175 |
+
# Parse temperature (extract first number found)
|
176 |
+
temperature = None
|
177 |
+
temp_match = re.search(r'(\d+)', temperature_response)
|
178 |
+
if temp_match:
|
179 |
+
temperature = int(temp_match.group(1))
|
180 |
+
|
181 |
+
# Step 5: Build and output the final JSON
|
182 |
+
output = {
|
183 |
+
"urdu_text": urdu_text,
|
184 |
+
"english_text": english_text,
|
185 |
+
"crops_and_diseases": crops_and_diseases,
|
186 |
+
"temperature": temperature,
|
187 |
+
"location": location
|
188 |
+
}
|
189 |
+
|
190 |
+
print(json.dumps(output))
|