marianvd-01 commited on
Commit
1398c16
·
verified ·
1 Parent(s): 2a24377

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+
3
+ import gradio as gr
4
+ from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM
5
+ import torch
6
+ import matplotlib.pyplot as plt
7
+ import numpy as np
8
+
9
+ # Load some default model
10
+ MODEL_NAME = "bert-base-uncased"
11
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
12
+ model = AutoModel.from_pretrained(MODEL_NAME, output_attentions=True)
13
+
14
+ def visualize_attention(text):
15
+ inputs = tokenizer(text, return_tensors="pt")
16
+ outputs = model(**inputs)
17
+
18
+ # Grab attentions from output
19
+ attentions = outputs.attentions # List of (num_layers, batch, num_heads, seq_len, seq_len)
20
+ tokens = tokenizer.convert_ids_to_tokens(inputs['input_ids'][0])
21
+
22
+ fig, ax = plt.subplots(figsize=(8, 6))
23
+ # Just visualize attention from last layer, first head
24
+ attn_matrix = attentions[-1][0][0].detach().numpy()
25
+
26
+ cax = ax.matshow(attn_matrix, cmap='viridis')
27
+ fig.colorbar(cax)
28
+
29
+ ax.set_xticks(range(len(tokens)))
30
+ ax.set_yticks(range(len(tokens)))
31
+ ax.set_xticklabels(tokens, rotation=90)
32
+ ax.set_yticklabels(tokens)
33
+ ax.set_title("Attention Map - Last Layer, Head 1")
34
+
35
+ return fig
36
+
37
+ iface = gr.Interface(
38
+ fn=visualize_attention,
39
+ inputs=gr.Textbox(lines=2, placeholder="Enter your text here..."),
40
+ outputs=gr.Plot(),
41
+ title="🧠 Transformer Attention Visualizer",
42
+ description="Visualizes the self-attention of the BERT model's last layer."
43
+ )
44
+
45
+ iface.launch()