# # speech recognition # import numpy as np import gradio as gr from transformers import pipeline asr = pipeline("automatic-speech-recognition", model="openai/whisper-base") def transcribe(audio): english = asr(audio) return english['text'] demo = gr.Interface(fn = transcribe, inputs=gr.Audio(type="filepath"), outputs = "text", title = "Whisper Speech Recognition") demo.launch()