Mattral commited on
Commit
1835c00
·
verified ·
1 Parent(s): c4d7ea9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +236 -0
app.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ import librosa
4
+ import librosa.display
5
+ import plotly.graph_objects as go
6
+ from plotly.subplots import make_subplots
7
+ import pandas as pd
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ import matplotlib.pyplot as plt
12
+ import plotly.express as px
13
+ import soundfile as sf
14
+ from scipy.signal import stft
15
+
16
+ # Dummy CNN Model for Audio
17
+ class AudioCNN(nn.Module):
18
+ def __init__(self):
19
+ super(AudioCNN, self).__init__()
20
+ self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1)
21
+ self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
22
+ self.fc1 = nn.Linear(32 * 32 * 8, 128) # Adjusted for typical spectrogram size
23
+ self.fc2 = nn.Linear(128, 10)
24
+
25
+ def forward(self, x):
26
+ x1 = F.relu(self.conv1(x)) # First conv layer activation
27
+ x2 = F.relu(self.conv2(x1))
28
+ x3 = F.adaptive_avg_pool2d(x2, (8, 32))
29
+ x4 = x3.view(x3.size(0), -1)
30
+ x5 = F.relu(self.fc1(x4))
31
+ x6 = self.fc2(x5)
32
+ return x6, x1
33
+
34
+ # Audio processing functions
35
+ def load_audio(file):
36
+ audio, sr = librosa.load(file, sr=None, mono=True)
37
+ return audio, sr
38
+
39
+ def apply_fft(audio):
40
+ fft = np.fft.fft(audio)
41
+ magnitude = np.abs(fft)
42
+ phase = np.angle(fft)
43
+ return fft, magnitude, phase
44
+
45
+ def filter_fft(fft, percentage):
46
+ magnitude = np.abs(fft)
47
+ sorted_indices = np.argsort(magnitude)[::-1]
48
+ num_keep = int(len(sorted_indices) * percentage / 100)
49
+ mask = np.zeros_like(fft)
50
+ mask[sorted_indices[:num_keep]] = 1
51
+ return fft * mask
52
+
53
+ def create_spectrogram(audio, sr):
54
+ n_fft = 2048
55
+ hop_length = 512
56
+ stft = librosa.stft(audio, n_fft=n_fft, hop_length=hop_length)
57
+ spectrogram = np.abs(stft)
58
+ return spectrogram, n_fft, hop_length
59
+
60
+ # Visualization functions
61
+ def plot_waveform(audio, sr, title):
62
+ fig = go.Figure()
63
+ time = np.arange(len(audio)) / sr
64
+ fig.add_trace(go.Scatter(x=time, y=audio, mode='lines'))
65
+ fig.update_layout(title=title, xaxis_title='Time (s)', yaxis_title='Amplitude')
66
+ return fig
67
+
68
+ def plot_fft(magnitude, phase, sr):
69
+ fig = make_subplots(rows=2, cols=1, subplot_titles=('Magnitude Spectrum', 'Phase Spectrum'))
70
+ freq = np.fft.fftfreq(len(magnitude), 1/sr)
71
+
72
+ fig.add_trace(go.Scatter(x=freq, y=magnitude, mode='lines', name='Magnitude'), row=1, col=1)
73
+ fig.add_trace(go.Scatter(x=freq, y=phase, mode='lines', name='Phase'), row=2, col=1)
74
+
75
+ fig.update_xaxes(title_text='Frequency (Hz)', row=1, col=1)
76
+ fig.update_xaxes(title_text='Frequency (Hz)', row=2, col=1)
77
+ fig.update_yaxes(title_text='Magnitude', row=1, col=1)
78
+ fig.update_yaxes(title_text='Phase (radians)', row=2, col=1)
79
+
80
+ return fig
81
+
82
+ def plot_3d_fft(magnitude, phase, sr):
83
+ freq = np.fft.fftfreq(len(magnitude), 1/sr)
84
+ fig = go.Figure(data=[go.Scatter3d(
85
+ x=freq,
86
+ y=magnitude,
87
+ z=phase,
88
+ mode='markers',
89
+ marker=dict(
90
+ size=5,
91
+ color=phase, # Color by phase
92
+ colorscale='Viridis', # Choose a colorscale
93
+ opacity=0.8
94
+ )
95
+ )])
96
+
97
+ fig.update_layout(scene=dict(
98
+ xaxis_title='Frequency (Hz)',
99
+ yaxis_title='Magnitude',
100
+ zaxis_title='Phase (radians)'
101
+ ))
102
+
103
+ return fig
104
+
105
+ def plot_spectrogram(spectrogram, sr, hop_length):
106
+ fig, ax = plt.subplots()
107
+ img = librosa.display.specshow(librosa.amplitude_to_db(spectrogram, ref=np.max),
108
+ sr=sr, hop_length=hop_length, x_axis='time', y_axis='log', ax=ax)
109
+ plt.colorbar(img, ax=ax, format='%+2.0f dB')
110
+ plt.title('Spectrogram')
111
+ return fig
112
+
113
+ def create_fft_table(magnitude, phase, sr):
114
+ freq = np.fft.fftfreq(len(magnitude), 1/sr)
115
+ df = pd.DataFrame({
116
+ 'Frequency (Hz)': freq,
117
+ 'Magnitude': magnitude,
118
+ 'Phase (radians)': phase
119
+ })
120
+ return df
121
+
122
+ # Streamlit UI
123
+ st.set_page_config(layout="wide")
124
+ st.title("Audio Frequency Analysis with CNN")
125
+
126
+ # Initialize session state
127
+ if 'audio_data' not in st.session_state:
128
+ st.session_state.audio_data = None
129
+ if 'sr' not in st.session_state:
130
+ st.session_state.sr = None
131
+ if 'fft' not in st.session_state:
132
+ st.session_state.fft = None
133
+
134
+ # File uploader
135
+ uploaded_file = st.file_uploader("Upload an audio file", type=['wav', 'mp3', 'ogg'])
136
+
137
+ if uploaded_file is not None:
138
+ # Load and process audio
139
+ audio, sr = load_audio(uploaded_file)
140
+ st.session_state.audio_data = audio
141
+ st.session_state.sr = sr
142
+
143
+ # Display original waveform
144
+ st.subheader("Original Audio Waveform")
145
+ st.plotly_chart(plot_waveform(audio, sr, "Original Waveform"), use_container_width=True)
146
+
147
+ # Apply FFT
148
+ fft, magnitude, phase = apply_fft(audio)
149
+ st.session_state.fft = fft
150
+
151
+ # Display FFT results
152
+ st.subheader("Frequency Domain Analysis")
153
+ st.plotly_chart(plot_fft(magnitude, phase, sr), use_container_width=True)
154
+
155
+ # 3D FFT Plot
156
+ st.subheader("3D Frequency Domain Analysis")
157
+ st.plotly_chart(plot_3d_fft(magnitude, phase, sr), use_container_width=True)
158
+
159
+ # FFT Table
160
+ st.subheader("FFT Values Table")
161
+ fft_table = create_fft_table(magnitude, phase, sr)
162
+ st.dataframe(fft_table)
163
+
164
+ # Frequency filtering
165
+ percentage = st.slider("Percentage of frequencies to retain:", 0.1, 100.0, 10.0, 0.1)
166
+
167
+ if st.button("Apply Frequency Filter"):
168
+ filtered_fft = filter_fft(st.session_state.fft, percentage)
169
+ reconstructed = np.fft.ifft(filtered_fft).real
170
+
171
+ # Display reconstructed waveform
172
+ st.subheader("Reconstructed Audio")
173
+ st.plotly_chart(plot_waveform(reconstructed, sr, "Filtered Waveform"), use_container_width=True)
174
+
175
+ # Play audio
176
+ st.audio(reconstructed, sample_rate=sr)
177
+
178
+ # Spectrogram creation
179
+ st.subheader("Spectrogram Analysis")
180
+ spectrogram, n_fft, hop_length = create_spectrogram(audio, sr)
181
+ st.pyplot(plot_spectrogram(spectrogram, sr, hop_length))
182
+
183
+ # CNN Processing
184
+ if st.button("Process with CNN"):
185
+ # Convert spectrogram to tensor
186
+ spec_tensor = torch.tensor(spectrogram[np.newaxis, np.newaxis, ...], dtype=torch.float32)
187
+
188
+ model = AudioCNN()
189
+ with torch.no_grad():
190
+ output, activations = model(spec_tensor)
191
+
192
+ # Visualize activations
193
+ st.subheader("CNN Layer Activations")
194
+
195
+ # Input spectrogram
196
+ st.write("### Input Spectrogram")
197
+ fig_input, ax = plt.subplots()
198
+ ax.imshow(spectrogram, aspect='auto', origin='lower')
199
+ st.pyplot(fig_input)
200
+
201
+ # First conv layer activations
202
+ st.write("### First Convolution Layer Activations")
203
+ activation = activations.detach().numpy()[0]
204
+
205
+ cols = 4
206
+ rows = 4
207
+ fig, axs = plt.subplots(rows, cols, figsize=(20, 20))
208
+ for i in range(16):
209
+ ax = axs[i//cols, i%cols]
210
+ ax.imshow(activation[i], aspect='auto', origin='lower')
211
+ ax.set_title(f'Channel {i+1}')
212
+ plt.tight_layout()
213
+ st.pyplot(fig)
214
+
215
+ # Classification results
216
+ st.write("### Classification Output")
217
+ probabilities = F.softmax(output, dim=1).numpy()[0]
218
+ classes = [f"Class {i}" for i in range(10)]
219
+ df = pd.DataFrame({"Class": classes, "Probability": probabilities})
220
+ fig = px.bar(df, x="Class", y="Probability", color="Probability")
221
+ st.plotly_chart(fig)
222
+
223
+ # Add some styling
224
+ st.markdown("""
225
+ <style>
226
+ .stButton>button {
227
+ padding: 10px 20px;
228
+ font-size: 16px;
229
+ background-color: #4CAF50;
230
+ color: white;
231
+ }
232
+ .stSlider>div>div>div>div {
233
+ background-color: #4CAF50;
234
+ }
235
+ </style>
236
+ """, unsafe_allow_html=True)