jethrowang commited on
Commit
cda51a4
·
verified ·
1 Parent(s): 2e10e56

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -24,7 +24,7 @@ TARGET_SAMPLE_RATE = 16000
24
  model = TinyVAD(1, 32, 64, patch_size=8, num_blocks=2,
25
  sinc_conv=SINC_CONV, ssm=SSM)
26
  checkpoint_path = './sincvad.ckpt'
27
- checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
28
  model.load_state_dict(checkpoint, strict=False)
29
  model.eval()
30
 
@@ -173,8 +173,8 @@ with gr.Blocks() as demo:
173
  with gr.Row():
174
  with gr.Column():
175
  # Separate recording and file upload
176
- record_input = gr.Audio(source="microphone", type="filepath", label="Record Audio")
177
- upload_input = gr.Audio(source="upload", type="filepath", label="Upload Audio")
178
  threshold_input = gr.Slider(minimum=0, maximum=1, value=0.5, step=0.1, label="Threshold")
179
  with gr.Column():
180
  prediction_output = gr.Textbox(label="Prediction")
 
24
  model = TinyVAD(1, 32, 64, patch_size=8, num_blocks=2,
25
  sinc_conv=SINC_CONV, ssm=SSM)
26
  checkpoint_path = './sincvad.ckpt'
27
+ checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'), weights_only=True)
28
  model.load_state_dict(checkpoint, strict=False)
29
  model.eval()
30
 
 
173
  with gr.Row():
174
  with gr.Column():
175
  # Separate recording and file upload
176
+ record_input = gr.Audio(sources="microphone", type="filepath", label="Record Audio")
177
+ upload_input = gr.Audio(sources="upload", type="filepath", label="Upload Audio")
178
  threshold_input = gr.Slider(minimum=0, maximum=1, value=0.5, step=0.1, label="Threshold")
179
  with gr.Column():
180
  prediction_output = gr.Textbox(label="Prediction")