File size: 2,179 Bytes
6d75162
 
 
0f1518a
6d75162
 
 
 
e02c9de
6d75162
 
 
e02c9de
 
6d75162
 
e02c9de
6d75162
e02c9de
6d75162
 
e02c9de
6d75162
 
0f1518a
6d75162
e02c9de
6d75162
e02c9de
6d75162
e02c9de
6d75162
 
 
 
 
 
 
 
 
 
 
 
e02c9de
6d75162
e02c9de
6d75162
 
e02c9de
6d75162
e02c9de
6d75162
e02c9de
 
6d75162
e02c9de
 
6d75162
 
 
 
e02c9de
6d75162
 
e02c9de
6d75162
e02c9de
6d75162
e02c9de
 
6d75162
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import os
import torch
import torchaudio
# import spaces
from generator import Segment, load_csm_1b
from huggingface_hub import login

def login_huggingface():
    """Login to Hugging Face Hub using token from environment variable or user input"""
    hf_token = os.environ.get("HF_TOKEN")
    
    if not hf_token:
        print("HF_TOKEN not found in environment variables.")
        hf_token = input("Please enter your Hugging Face token: ")
    
    if hf_token:
        print("Logging in to Hugging Face Hub...")
        login(token=hf_token)
        print("Login successful!")
        return True
    else:
        print("No token provided. Some models may not be accessible.")
        return False

# @spaces.GPU
def generate_test_audio(text, speaker_id, device):
    """Generate test audio using ZeroGPU"""
    generator = load_csm_1b(device=device)
    print("Model loaded successfully!")
    
    print(f"Generating audio for text: '{text}'")
    audio = generator.generate(
        text=text,
        speaker=speaker_id,
        context=[],
        max_audio_length_ms=10000,
        temperature=0.9,
        topk=50
    )
    
    return audio, generator.sample_rate

def test_model():
    print("Testing CSM-1B model...")
    
    # Login to Hugging Face Hub
    login_huggingface()
    
    # Check if GPU is available and configure the device
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"Using device: {device}")
    
    # Load CSM-1B model and generate audio
    print("Loading CSM-1B model...")
    try:
        # Use ZeroGPU to generate audio
        text = "Hello, this is a test of the CSM-1B model."
        speaker_id = 0
        
        audio, sample_rate = generate_test_audio(text, speaker_id, device)
        
        # Save audio to file
        output_path = "test_output.wav"
        torchaudio.save(output_path, audio.unsqueeze(0), sample_rate)
        print(f"Audio saved to file: {output_path}")
        
        print("Test completed!")
    except Exception as e:
        print(f"Error testing model: {e}")
        print("Please check your token and access permissions.")

if __name__ == "__main__":
    test_model()