Spaces:
Sleeping
Sleeping
import streamlit as st | |
import numpy as np | |
import librosa | |
import librosa.display | |
import plotly.graph_objects as go | |
from plotly.subplots import make_subplots | |
import pandas as pd | |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
import matplotlib.pyplot as plt | |
import plotly.express as px | |
import soundfile as sf | |
from scipy.signal import stft | |
import math | |
# ------------------------------- | |
# CNN Model for Audio Analysis | |
# ------------------------------- | |
class AudioCNN(nn.Module): | |
def __init__(self): | |
super(AudioCNN, self).__init__() | |
# Convolutional layers | |
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1) | |
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1) | |
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, padding=1) | |
# Pooling layer | |
self.pool = nn.MaxPool2d(kernel_size=2, stride=2) | |
# Fully connected layers (with dynamic sizing) | |
self.fc1 = None | |
self.fc2 = nn.Linear(256, 128) | |
self.fc3 = nn.Linear(128, 10) | |
# Dropout for regularization | |
self.dropout = nn.Dropout(0.5) | |
def forward(self, x): | |
x1 = F.relu(self.conv1(x)) | |
x2 = self.pool(x1) | |
x3 = F.relu(self.conv2(x2)) | |
x4 = self.pool(x3) | |
x5 = F.relu(self.conv3(x4)) | |
x6 = self.pool(x5) | |
if self.fc1 is None: | |
fc1_input_size = x6.numel() // x6.size(0) | |
self.fc1 = nn.Linear(fc1_input_size, 256) | |
x7 = x6.view(x6.size(0), -1) | |
x8 = F.relu(self.fc1(x7)) | |
x9 = self.dropout(x8) | |
x10 = F.relu(self.fc2(x9)) | |
x11 = self.fc3(x10) | |
return x11, [x2, x4, x6], x8 | |
# ------------------------------- | |
# Audio Processing Functions | |
# ------------------------------- | |
def load_audio(file): | |
audio, sr = librosa.load(file, sr=None, mono=True) | |
return audio, sr | |
def apply_fft(audio): | |
fft = np.fft.fft(audio) | |
magnitude = np.abs(fft) | |
phase = np.angle(fft) | |
return fft, magnitude, phase | |
def filter_fft(fft, percentage): | |
magnitude = np.abs(fft) | |
sorted_indices = np.argsort(magnitude)[::-1] | |
num_keep = int(len(sorted_indices) * percentage / 100) | |
mask = np.zeros_like(fft) | |
mask[sorted_indices[:num_keep]] = 1 | |
return fft * mask | |
def create_spectrogram(audio, sr): | |
n_fft = 2048 | |
hop_length = 512 | |
S = librosa.stft(audio, n_fft=n_fft, hop_length=hop_length) | |
spectrogram = np.abs(S) | |
return spectrogram, n_fft, hop_length | |
# ------------------------------- | |
# Visualization Functions | |
# ------------------------------- | |
def plot_waveform(audio, sr, title): | |
fig = go.Figure() | |
time = np.arange(len(audio)) / sr | |
fig.add_trace(go.Scatter(x=time, y=audio, mode='lines')) | |
fig.update_layout(title=title, xaxis_title='Time (s)', yaxis_title='Amplitude') | |
return fig | |
def create_waveform_table(audio, sr, num_samples=100): | |
time = np.arange(len(audio)) / sr | |
indices = np.linspace(0, len(audio)-1, num_samples, dtype=int) | |
df = pd.DataFrame({"Time (s)": time[indices], "Amplitude": audio[indices]}) | |
return df | |
def plot_fft(magnitude, phase, sr): | |
fig = make_subplots(rows=2, cols=1, subplot_titles=('Magnitude Spectrum', 'Phase Spectrum')) | |
freq = np.fft.fftfreq(len(magnitude), 1/sr) | |
fig.add_trace(go.Scatter(x=freq, y=magnitude, mode='lines', name='Magnitude'), row=1, col=1) | |
fig.add_trace(go.Scatter(x=freq, y=phase, mode='lines', name='Phase'), row=2, col=1) | |
fig.update_xaxes(title_text='Frequency (Hz)', row=1, col=1) | |
fig.update_xaxes(title_text='Frequency (Hz)', row=2, col=1) | |
fig.update_yaxes(title_text='Magnitude', row=1, col=1) | |
fig.update_yaxes(title_text='Phase (radians)', row=2, col=1) | |
return fig | |
def plot_fft_bands(magnitude, phase, sr): | |
freq = np.fft.fftfreq(len(magnitude), 1/sr) | |
pos_mask = freq >= 0 | |
freq, magnitude, phase = freq[pos_mask], magnitude[pos_mask], phase[pos_mask] | |
bass_mask = (freq >= 20) & (freq < 250) | |
mid_mask = (freq >= 250) & (freq < 4000) | |
treble_mask = (freq >= 4000) & (freq <= sr/2) | |
fig = make_subplots(rows=2, cols=1, subplot_titles=('Magnitude Spectrum by Bands', 'Phase Spectrum by Bands')) | |
fig.add_trace(go.Scatter(x=freq[bass_mask], y=magnitude[bass_mask], mode='lines', name='Bass'), row=1, col=1) | |
fig.add_trace(go.Scatter(x=freq[mid_mask], y=magnitude[mid_mask], mode='lines', name='Mid'), row=1, col=1) | |
fig.add_trace(go.Scatter(x=freq[treble_mask], y=magnitude[treble_mask], mode='lines', name='Treble'), row=1, col=1) | |
fig.add_trace(go.Scatter(x=freq[bass_mask], y=phase[bass_mask], mode='lines', name='Bass'), row=2, col=1) | |
fig.add_trace(go.Scatter(x=freq[mid_mask], y=phase[mid_mask], mode='lines', name='Mid'), row=2, col=1) | |
fig.add_trace(go.Scatter(x=freq[treble_mask], y=phase[treble_mask], mode='lines', name='Treble'), row=2, col=1) | |
fig.update_xaxes(title_text='Frequency (Hz)', row=1, col=1) | |
fig.update_xaxes(title_text='Frequency (Hz)', row=2, col=1) | |
fig.update_yaxes(title_text='Magnitude', row=1, col=1) | |
fig.update_yaxes(title_text='Phase (radians)', row=2, col=1) | |
return fig | |
def create_fft_table(magnitude, phase, sr, num_samples=100): | |
freq = np.fft.fftfreq(len(magnitude), 1/sr) | |
pos_mask = freq >= 0 | |
freq, magnitude, phase = freq[pos_mask], magnitude[pos_mask], phase[pos_mask] | |
indices = np.linspace(0, len(freq)-1, num_samples, dtype=int) | |
df = pd.DataFrame({ | |
"Frequency (Hz)": freq[indices], | |
"Magnitude": magnitude[indices], | |
"Phase (radians)": phase[indices] | |
}) | |
return df | |
def plot_3d_polar_fft(magnitude, phase, sr): | |
# Get positive frequencies | |
freq = np.fft.fftfreq(len(magnitude), 1/sr) | |
pos_mask = freq >= 0 | |
freq, mag, ph = freq[pos_mask], magnitude[pos_mask], phase[pos_mask] | |
# Convert polar to Cartesian coordinates | |
x = mag * np.cos(ph) | |
y = mag * np.sin(ph) | |
z = freq # Use frequency as z-axis | |
# Downsample the data to avoid huge message sizes. | |
# Compute a decimation factor so that approximately 500 points are plotted. | |
step = max(1, len(x) // 500) | |
x, y, z, ph = x[::step], y[::step], z[::step], ph[::step] | |
# Create a coarser grid for the contour surface. | |
n_rep = 10 | |
X_surface = np.tile(x, (n_rep, 1)) | |
Y_surface = np.tile(y, (n_rep, 1)) | |
Z_surface = np.tile(z, (n_rep, 1)) | |
surface = go.Surface( | |
x=X_surface, | |
y=Y_surface, | |
z=Z_surface, | |
colorscale='Viridis', | |
opacity=0.6, | |
showscale=False, | |
contours={ | |
"x": {"show": True, "start": float(np.min(x)), "end": float(np.max(x)), "size": float((np.max(x)-np.min(x))/10)}, | |
"y": {"show": True, "start": float(np.min(y)), "end": float(np.max(y)), "size": float((np.max(y)-np.min(y))/10)}, | |
"z": {"show": True, "start": float(np.min(z)), "end": float(np.max(z)), "size": float((np.max(z)-np.min(z))/10)}, | |
}, | |
) | |
scatter = go.Scatter3d( | |
x=x, | |
y=y, | |
z=z, | |
mode='markers', | |
marker=dict( | |
size=3, | |
color=ph, # color by phase | |
colorscale='Viridis', | |
opacity=0.8, | |
colorbar=dict(title='Phase (radians)') | |
) | |
) | |
fig = go.Figure(data=[surface, scatter]) | |
fig.update_layout(scene=dict( | |
xaxis_title='Real Component', | |
yaxis_title='Imaginary Component', | |
zaxis_title='Frequency (Hz)', | |
camera=dict(eye=dict(x=1.5, y=1.5, z=0.5)) | |
), margin=dict(l=0, r=0, b=0, t=0)) | |
return fig | |
def plot_spectrogram(spectrogram, sr, hop_length): | |
fig, ax = plt.subplots() | |
img = librosa.display.specshow(librosa.amplitude_to_db(spectrogram, ref=np.max), | |
sr=sr, hop_length=hop_length, x_axis='time', y_axis='log', ax=ax) | |
plt.colorbar(img, ax=ax, format='%+2.0f dB') | |
plt.title('Spectrogram') | |
return fig | |
def create_spectrogram_table(spectrogram, num_rows=10, num_cols=10): | |
sub_spec = spectrogram[:num_rows, :num_cols] | |
df = pd.DataFrame(sub_spec, | |
index=[f'Freq Bin {i}' for i in range(sub_spec.shape[0])], | |
columns=[f'Time Bin {j}' for j in range(sub_spec.shape[1])]) | |
return df | |
def create_activation_table(activation, num_rows=10, num_cols=10): | |
sub_act = activation[:num_rows, :num_cols] | |
df = pd.DataFrame(sub_act, | |
index=[f'Row {i}' for i in range(sub_act.shape[0])], | |
columns=[f'Col {j}' for j in range(sub_act.shape[1])]) | |
return df | |
# ------------------------------- | |
# Streamlit UI & Main App | |
# ------------------------------- | |
st.set_page_config(layout="wide") | |
st.title("Audio Frequency Analysis with CNN and FFT") | |
st.markdown(""" | |
### Welcome to the Audio Frequency Analysis Tool! | |
This application allows you to: | |
- **Upload an audio file** and visualize its waveform along with a data table. | |
- **Analyze frequency components** using FFT (with both 2D and enhanced 3D polar plots). | |
- **Highlight frequency bands:** Bass (20–250 Hz), Mid (250–4000 Hz), Treble (4000 Hz to Nyquist). | |
- **Filter frequency components** and reconstruct the waveform. | |
- **Generate a spectrogram** for time-frequency analysis with a sample data table. | |
- **Inspect CNN activations** (pooling and dense layers) arranged in grid layouts. | |
- **Final Audio Classification:** Classify the audio for gender (Male/Female) and tone. | |
""") | |
# File uploader | |
uploaded_file = st.file_uploader("Upload an audio file (WAV, MP3, OGG)", type=['wav', 'mp3', 'ogg']) | |
if uploaded_file is not None: | |
audio, sr = load_audio(uploaded_file) | |
# --- Section 1: Raw Audio Waveform --- | |
st.header("1. Raw Audio Waveform") | |
st.markdown(""" | |
The waveform represents the amplitude over time. | |
**Graph:** Amplitude vs. Time. | |
**Data Table:** Sampled values. | |
""") | |
waveform_fig = plot_waveform(audio, sr, "Original Waveform") | |
st.plotly_chart(waveform_fig, use_container_width=True) | |
st.dataframe(create_waveform_table(audio, sr)) | |
# --- Section 2: Frequency Domain Analysis --- | |
st.header("2. Frequency Domain Analysis") | |
st.markdown(""" | |
**FFT Analysis:** Decompose the audio into frequency components. | |
- **Magnitude Spectrum:** Strength of frequencies. | |
- **Phase Spectrum:** Phase angles. | |
""") | |
fft, magnitude, phase = apply_fft(audio) | |
col1, col2 = st.columns(2) | |
with col1: | |
st.subheader("2D FFT Plot") | |
st.plotly_chart(plot_fft(magnitude, phase, sr), use_container_width=True) | |
with col2: | |
st.subheader("Enhanced 3D Polar FFT Plot with Contours") | |
st.plotly_chart(plot_3d_polar_fft(magnitude, phase, sr), use_container_width=True) | |
st.subheader("FFT Data Table (Sampled)") | |
st.dataframe(create_fft_table(magnitude, phase, sr)) | |
st.subheader("Frequency Bands: Bass, Mid, Treble") | |
st.plotly_chart(plot_fft_bands(magnitude, phase, sr), use_container_width=True) | |
# --- Section 3: Frequency Filtering --- | |
st.header("3. Frequency Filtering") | |
st.markdown(""" | |
Filter the audio signal by retaining a percentage of the strongest frequencies. | |
Adjust the slider for retention percentage. | |
**Graph:** Filtered waveform. | |
**Data Table:** Sampled values. | |
""") | |
percentage = st.slider("Percentage of frequencies to retain:", 0.1, 100.0, 10.0, 0.1) | |
if st.button("Apply Frequency Filter"): | |
filtered_fft = filter_fft(fft, percentage) | |
reconstructed = np.fft.ifft(filtered_fft).real | |
col1, col2 = st.columns(2) | |
with col1: | |
st.plotly_chart(plot_waveform(reconstructed, sr, "Filtered Waveform"), use_container_width=True) | |
with col2: | |
st.audio(reconstructed, sample_rate=sr) | |
st.dataframe(create_waveform_table(reconstructed, sr)) | |
# --- Section 4: Spectrogram Analysis --- | |
st.header("4. Spectrogram Analysis") | |
st.markdown(""" | |
A spectrogram shows how frequency content evolves over time. | |
**Graph:** Spectrogram (log-frequency scale). | |
**Data Table:** A subsection of the spectrogram matrix. | |
""") | |
spectrogram, n_fft, hop_length = create_spectrogram(audio, sr) | |
st.pyplot(plot_spectrogram(spectrogram, sr, hop_length)) | |
st.dataframe(create_spectrogram_table(spectrogram)) | |
# --- Section 5: CNN Analysis (Pooling & Dense Activations) --- | |
st.header("5. CNN Analysis: Pooling and Dense Activations") | |
st.markdown(""" | |
Instead of classification probabilities, inspect internal activations: | |
- **Pooling Layer Outputs:** Arranged in a grid layout. | |
- **Dense Layer Activation:** Feature vector from the dense layer. | |
""") | |
if st.button("Run CNN Analysis"): | |
spec_tensor = torch.tensor(spectrogram[np.newaxis, np.newaxis, ...], dtype=torch.float32) | |
model = AudioCNN() | |
with torch.no_grad(): | |
output, pooling_outputs, dense_activation = model(spec_tensor) | |
for idx, activation in enumerate(pooling_outputs): | |
st.subheader(f"Pooling Layer {idx+1} Output") | |
act = activation[0].cpu().numpy() | |
num_channels = act.shape[0] | |
ncols = 4 | |
nrows = math.ceil(num_channels / ncols) | |
fig, axes = plt.subplots(nrows, ncols, figsize=(3*ncols, 3*nrows)) | |
axes = axes.flatten() | |
for i in range(nrows * ncols): | |
if i < num_channels: | |
axes[i].imshow(act[i], aspect='auto', origin='lower', cmap='viridis') | |
axes[i].set_title(f'Channel {i+1}', fontsize=8) | |
axes[i].axis('off') | |
else: | |
axes[i].axis('off') | |
st.pyplot(fig) | |
st.markdown("**Data Table for Pooling Layer Activation (Channel 1, Sampled)**") | |
df_act = create_activation_table(act[0]) | |
st.dataframe(df_act) | |
st.subheader("Dense Layer Activation") | |
dense_act = dense_activation[0].cpu().numpy() | |
df_dense = pd.DataFrame({ | |
"Feature Index": np.arange(len(dense_act)), | |
"Activation Value": dense_act | |
}) | |
st.plotly_chart(px.bar(df_dense, x="Feature Index", y="Activation Value"), use_container_width=True) | |
st.dataframe(df_dense) | |
# --- Section 6: Final Audio Classification (Gender & Tone) --- | |
st.header("6. Final Audio Classification: Gender and Tone") | |
st.markdown(""" | |
In this final step, a pretrained model classifies the audio as Male or Female, | |
and determines its tone (High Tone vs. Low Tone). | |
**Note:** This example uses a placeholder model. Replace the dummy model and random outputs with your actual pretrained model. | |
""") | |
if st.button("Run Final Classification"): | |
# Extract MFCC features as an example (adjust as needed) | |
mfccs = librosa.feature.mfcc(y=audio, sr=sr, n_mfcc=40) | |
features = np.mean(mfccs, axis=1) # average over time | |
features_tensor = torch.tensor(features, dtype=torch.float32).unsqueeze(0) | |
# Dummy classifier model for demonstration | |
class GenderToneClassifier(nn.Module): | |
def __init__(self): | |
super(GenderToneClassifier, self).__init__() | |
self.fc = nn.Linear(40, 4) # 4 outputs: [Male, Female, High Tone, Low Tone] | |
def forward(self, x): | |
return self.fc(x) | |
classifier = GenderToneClassifier() | |
# In practice, load your pretrained weights here. | |
with torch.no_grad(): | |
output = classifier(features_tensor) | |
probs = F.softmax(output, dim=1).numpy()[0] | |
# Interpret outputs: assume first 2 are gender, next 2 are tone. | |
gender = "Male" if probs[0] > probs[1] else "Female" | |
tone = "High Tone" if probs[2] > probs[3] else "Low Tone" | |
st.markdown(f"**Predicted Gender:** {gender}") | |
st.markdown(f"**Predicted Tone:** {tone}") | |
categories = ["Male", "Female", "High Tone", "Low Tone"] | |
df_class = pd.DataFrame({"Category": categories, "Probability": probs}) | |
st.plotly_chart(px.bar(df_class, x="Category", y="Probability"), use_container_width=True) | |
st.dataframe(df_class) | |
# ------------------------------- | |
# Style Enhancements | |
# ------------------------------- | |
st.markdown(""" | |
<style> | |
.stButton>button { | |
padding: 10px 20px; | |
font-size: 16px; | |
background-color: #4CAF50; | |
color: white; | |
} | |
.stSlider>div>div>div>div { | |
background-color: #4CAF50; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |