Create whisper_model.py
Browse files- whisper_model.py +31 -0
whisper_model.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import whisper
|
2 |
+
from ipex_llm import optimize_model
|
3 |
+
|
4 |
+
def has_intersection(t1, t2):
|
5 |
+
if t1[1] < t2[0] or t2[1] < t1[0]:
|
6 |
+
return False
|
7 |
+
else:
|
8 |
+
return True
|
9 |
+
|
10 |
+
class AudioTranslator():
|
11 |
+
def __init__(self, args):
|
12 |
+
self.model = whisper.load_model(args.whisper_version, download_root='checkpoints')
|
13 |
+
self.model = optimize_model(self.model)
|
14 |
+
|
15 |
+
def __call__(self, video_path):
|
16 |
+
"""
|
17 |
+
input: video_path (str)
|
18 |
+
output: audio_results (list)
|
19 |
+
"""
|
20 |
+
print("Extract the audio results.")
|
21 |
+
audio_results = self.model.transcribe(video_path, task = 'translate')["segments"]
|
22 |
+
print("Finished.")
|
23 |
+
return audio_results
|
24 |
+
|
25 |
+
def match(self, audio_results):
|
26 |
+
transcript = ''
|
27 |
+
for res in audio_results:
|
28 |
+
transcript += res['text'] + ' '
|
29 |
+
# if has_intersection((start, end), (res["start"], res["end"])):
|
30 |
+
# transcript += res['text'] + ' '
|
31 |
+
return transcript
|