Spaces:
Running
on
Zero
Running
on
Zero
update for zero-gpu
Browse files
app.py
CHANGED
@@ -6,23 +6,11 @@ from huggingface_hub import hf_hub_download
|
|
6 |
import numpy as np
|
7 |
import sphn
|
8 |
import torch
|
|
|
9 |
|
10 |
from moshi.models import loaders
|
11 |
|
12 |
|
13 |
-
def seed_all(seed):
|
14 |
-
torch.manual_seed(seed)
|
15 |
-
if torch.cuda.is_available():
|
16 |
-
torch.cuda.manual_seed(seed)
|
17 |
-
torch.cuda.manual_seed_all(seed) # for multi-GPU setups
|
18 |
-
random.seed(seed)
|
19 |
-
np.random.seed(seed)
|
20 |
-
torch.backends.cudnn.deterministic = True
|
21 |
-
torch.backends.cudnn.benchmark = False
|
22 |
-
|
23 |
-
|
24 |
-
seed_all(42424242)
|
25 |
-
|
26 |
device = "cuda" if torch.cuda.device_count() else "cpu"
|
27 |
num_codebooks = 32
|
28 |
|
@@ -34,6 +22,7 @@ mimi.eval()
|
|
34 |
print("mimi loaded")
|
35 |
|
36 |
|
|
|
37 |
def mimi_streaming_test(input_wave, max_duration_sec=10.0):
|
38 |
pcm_chunk_size = int(mimi.sample_rate / mimi.frame_rate)
|
39 |
# wget https://github.com/metavoiceio/metavoice-src/raw/main/assets/bria.mp3
|
|
|
6 |
import numpy as np
|
7 |
import sphn
|
8 |
import torch
|
9 |
+
import spaces
|
10 |
|
11 |
from moshi.models import loaders
|
12 |
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
device = "cuda" if torch.cuda.device_count() else "cpu"
|
15 |
num_codebooks = 32
|
16 |
|
|
|
22 |
print("mimi loaded")
|
23 |
|
24 |
|
25 |
+
@spaces.GPU
|
26 |
def mimi_streaming_test(input_wave, max_duration_sec=10.0):
|
27 |
pcm_chunk_size = int(mimi.sample_rate / mimi.frame_rate)
|
28 |
# wget https://github.com/metavoiceio/metavoice-src/raw/main/assets/bria.mp3
|