masimishi commited on
Commit
2be798f
·
verified ·
1 Parent(s): 4240a70

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import numpy as np
4
+ import soundfile as sf
5
+ import librosa
6
+ from transformers import pipeline
7
+
8
+ pipe = pipeline(
9
+ "automatic-speech-recognition",
10
+ model="antony66/whisper-large-v3-russian",
11
+ torch_dtype=torch.float16,
12
+ device=0 if torch.cuda.is_available() else -1
13
+ )
14
+
15
+ def transcribe(audio_data):
16
+ print(f"Received audio data: {audio_data}")
17
+
18
+ if audio_data is None:
19
+ return "Ошибка: не получены аудиоданные"
20
+
21
+ wav_file = "temp_audio.wav"
22
+
23
+ if isinstance(audio_data, tuple):
24
+ audio_array, sample_rate = audio_data
25
+ sf.write(wav_file, audio_array, sample_rate)
26
+ elif isinstance(audio_data, str):
27
+ audio_array, sample_rate = librosa.load(audio_data, sr=16000)
28
+ sf.write(wav_file, audio_array, sample_rate)
29
+ else:
30
+ return "Ошибка: неизвестный формат аудиоданных"
31
+
32
+ result = pipe(wav_file)
33
+ return result["text"]
34
+
35
+ with gr.Blocks() as app:
36
+ gr.Markdown("## Распознавание речи с Whisper")
37
+
38
+ audio_data = gr.Audio(type="filepath")
39
+ text_output = gr.Textbox(label="Распознанный текст")
40
+
41
+ btn = gr.Button("Распознать")
42
+ btn.click(transcribe, inputs=audio_data, outputs=text_output)
43
+
44
+ app.launch(debug=True)