Spaces:
Running
Running
Merge remote-tracking branch 'origin/main'
Browse files- app.py +8 -6
- simplified.py +181 -0
app.py
CHANGED
@@ -11,6 +11,7 @@ def landing():
|
|
11 |
st.page_link(highlight_page, label="Highlight locations for possible edits", icon="ποΈ")
|
12 |
#st.page_link(generate_page, label="Generate revisions", icon="π")
|
13 |
st.page_link(type_assistant_response_page, label="Type Assistant Response", icon="π€")
|
|
|
14 |
|
15 |
st.markdown("*Note*: These services send data to a remote server for processing. The server logs requests. Don't use sensitive or identifiable information on this page.")
|
16 |
|
@@ -408,17 +409,18 @@ const makeElt = (tag, attrs, children) => {
|
|
408 |
<script>allLogprobs = {json.dumps(logprobs)};
|
409 |
|
410 |
{show_logprob_js}
|
411 |
-
|
|
|
412 |
</script>
|
413 |
"""
|
414 |
import streamlit.components.v1 as components
|
415 |
return components.html(html_out, height=300, scrolling=True)
|
416 |
|
417 |
-
rewrite_page = st.Page(rewrite_with_predictions, title="Rewrite with predictions", icon="π")
|
418 |
-
highlight_page = st.Page(highlight_edits, title="Highlight locations for possible edits", icon="ποΈ")
|
419 |
-
#generate_page = st.Page(generate_revisions, title="Generate revisions", icon="π")
|
420 |
-
type_assistant_response_page = st.Page(type_assistant_response, title="Type Assistant Response", icon="π€")
|
421 |
-
show_internals_page = st.Page(show_internals, title="Show Internals", icon="π§")
|
422 |
|
423 |
# Manually specify the sidebar
|
424 |
page = st.navigation([
|
|
|
11 |
st.page_link(highlight_page, label="Highlight locations for possible edits", icon="ποΈ")
|
12 |
#st.page_link(generate_page, label="Generate revisions", icon="π")
|
13 |
st.page_link(type_assistant_response_page, label="Type Assistant Response", icon="π€")
|
14 |
+
st.page_link(show_internals_page, label="Show Internals", icon="π§")
|
15 |
|
16 |
st.markdown("*Note*: These services send data to a remote server for processing. The server logs requests. Don't use sensitive or identifiable information on this page.")
|
17 |
|
|
|
409 |
<script>allLogprobs = {json.dumps(logprobs)};
|
410 |
|
411 |
{show_logprob_js}
|
412 |
+
|
413 |
+
//showLogprobs(allLogprobs.length - 1);
|
414 |
</script>
|
415 |
"""
|
416 |
import streamlit.components.v1 as components
|
417 |
return components.html(html_out, height=300, scrolling=True)
|
418 |
|
419 |
+
rewrite_page = st.Page(rewrite_with_predictions, title="Rewrite with predictions", icon="π", url_path="rewrite")
|
420 |
+
highlight_page = st.Page(highlight_edits, title="Highlight locations for possible edits", icon="ποΈ", url_path="highlights")
|
421 |
+
#generate_page = st.Page(generate_revisions, title="Generate revisions", icon="π", url_path="generate")
|
422 |
+
type_assistant_response_page = st.Page(type_assistant_response, title="Type Assistant Response", icon="π€", url_path="type_assistant_response")
|
423 |
+
show_internals_page = st.Page(show_internals, title="Show Internals", icon="π§", url_path="internals")
|
424 |
|
425 |
# Manually specify the sidebar
|
426 |
page = st.navigation([
|
simplified.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import torch
|
3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
+
from transformers import DynamicCache
|
5 |
+
|
6 |
+
USE_GPU = torch.cuda.is_available()
|
7 |
+
|
8 |
+
@st.cache_resource
|
9 |
+
def load_model():
|
10 |
+
import torch
|
11 |
+
|
12 |
+
model_name = 'google/gemma-2-9b-it'
|
13 |
+
|
14 |
+
dtype = torch.bfloat16 if USE_GPU else torch.float16
|
15 |
+
|
16 |
+
llm = {
|
17 |
+
'tokenizer': AutoTokenizer.from_pretrained(model_name),
|
18 |
+
'model': AutoModelForCausalLM.from_pretrained(
|
19 |
+
model_name,
|
20 |
+
device_map="auto" if USE_GPU else "cpu",
|
21 |
+
torch_dtype=dtype,
|
22 |
+
attn_implementation='eager'
|
23 |
+
)
|
24 |
+
}
|
25 |
+
llm['model'].eval()
|
26 |
+
return llm
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
def type_assistant_response():
|
31 |
+
if 'messages' not in st.session_state or st.button("Start a new conversation"):
|
32 |
+
st.session_state['messages'] = [{"role": "user", "content": ""}]
|
33 |
+
st.session_state['msg_in_progress'] = ""
|
34 |
+
messages = st.session_state.messages
|
35 |
+
|
36 |
+
def rewind_to(i):
|
37 |
+
st.session_state.messages = st.session_state.messages[:i+1]
|
38 |
+
st.session_state['msg_in_progress'] = st.session_state.messages[-1]['content']
|
39 |
+
|
40 |
+
for i, message in enumerate(st.session_state.messages[:-1]):
|
41 |
+
with st.chat_message(message["role"]):
|
42 |
+
st.markdown(message["content"])
|
43 |
+
st.button("Edit", on_click=rewind_to, args=(i,), key=f"rewind_to_{i}")
|
44 |
+
|
45 |
+
# Display message-in-progress in chat message container
|
46 |
+
last_role = messages[-1]["role"]
|
47 |
+
with st.chat_message(last_role):
|
48 |
+
label = "Your message" if last_role == "user" else "Assistant response"
|
49 |
+
msg_in_progress = st.text_area(label, placeholder="Clicking the buttons below will update this field. You can also edit it directly; press Ctrl+Enter to apply changes.", height=300, key="msg_in_progress")
|
50 |
+
if msg_in_progress is None:
|
51 |
+
msg_in_progress = ""
|
52 |
+
|
53 |
+
messages[-1]['content'] = msg_in_progress
|
54 |
+
|
55 |
+
def append_token(word):
|
56 |
+
messages[-1]['content'] = st.session_state['msg_in_progress'] = (
|
57 |
+
msg_in_progress + word
|
58 |
+
)
|
59 |
+
|
60 |
+
allow_multi_word = st.checkbox("Allow multi-word predictions", value=False)
|
61 |
+
|
62 |
+
response = continue_messages(
|
63 |
+
messages=messages,
|
64 |
+
n_branch_tokens=5,
|
65 |
+
n_future_tokens=2
|
66 |
+
)
|
67 |
+
|
68 |
+
continuations = response['continuations']
|
69 |
+
for i, (col, continuation) in enumerate(zip(st.columns(len(continuations)), continuations)):
|
70 |
+
token = continuation['doc_text']
|
71 |
+
with col:
|
72 |
+
if not allow_multi_word and ' ' in token[1:]:
|
73 |
+
token = token[0] + token[1:].split(' ', 1)[0]
|
74 |
+
|
75 |
+
# if not allow_multi_word:
|
76 |
+
# import re
|
77 |
+
# split_result = re.split(r'(\s+)', token, maxsplit=1)
|
78 |
+
# assert len(split_result) == 3
|
79 |
+
# before_ws, token, after_ws = split_result
|
80 |
+
# print(repr(split_result))
|
81 |
+
# if before_ws != '':
|
82 |
+
# token = before_ws
|
83 |
+
token_display = show_token(token)
|
84 |
+
st.button(token_display, on_click=append_token, args=(token,), key=i, use_container_width=True)
|
85 |
+
|
86 |
+
def send_message():
|
87 |
+
other_role = "assistant" if last_role == "user" else "user"
|
88 |
+
st.session_state['messages'].append({"role": other_role, "content": ""})
|
89 |
+
st.session_state['msg_in_progress'] = ""
|
90 |
+
st.button("Send", on_click=send_message)
|
91 |
+
|
92 |
+
def show_token(token: str, escape_markdown=True) -> str:
|
93 |
+
token_display = token.replace('\n', 'β΅').replace('\t', 'β₯')
|
94 |
+
if escape_markdown:
|
95 |
+
for c in "\\`*_{}[]()#+-.!":
|
96 |
+
token_display = token_display.replace(c, "\\" + c)
|
97 |
+
return token_display
|
98 |
+
|
99 |
+
|
100 |
+
def continue_messages(messages, n_branch_tokens, n_future_tokens):
|
101 |
+
|
102 |
+
messages = [{"role": m.role, "content": m.content} for m in messages]
|
103 |
+
if len(messages) == 0:
|
104 |
+
raise ValueError("At least one message must be provided.")
|
105 |
+
|
106 |
+
llm = load_model()
|
107 |
+
model = llm['model']
|
108 |
+
tokenizer = llm['tokenizer']
|
109 |
+
|
110 |
+
generated_docs = continue_messages_inner(model, tokenizer, messages, n_branch_tokens, n_future_tokens)
|
111 |
+
|
112 |
+
return {
|
113 |
+
'continuations': [dict(doc_text=doc) for doc in generated_docs]
|
114 |
+
}
|
115 |
+
|
116 |
+
|
117 |
+
def get_lookahead_sequences(model, tokenizer, hypotheses, n_branch_tokens, device):
|
118 |
+
"""
|
119 |
+
For each of the n_branch_tokens next tokens, generate most-likely next tokens and append back on.
|
120 |
+
"""
|
121 |
+
assert len(hypotheses.shape) == 2
|
122 |
+
assert hypotheses.shape[0] == 1
|
123 |
+
n_tokens_so_far = hypotheses.shape[1]
|
124 |
+
past_key_values = DynamicCache()
|
125 |
+
|
126 |
+
with torch.no_grad():
|
127 |
+
model_outs_onestep = model(hypotheses, output_hidden_states=True, past_key_values=past_key_values)
|
128 |
+
|
129 |
+
branch_tokens = model_outs_onestep.logits[0, -1].topk(n_branch_tokens).indices
|
130 |
+
|
131 |
+
# split the cache into n_branch_tokens reps. We pretend we're doing a "Beam search"...
|
132 |
+
past_key_values.reorder_cache(torch.zeros((n_branch_tokens,), dtype=torch.long, device=device))
|
133 |
+
|
134 |
+
# Now call the model again, passing the kv cache, so we can continue generating.
|
135 |
+
# Each of the n_branch_tokens next tokens will be considered as one sequence in a "batch".
|
136 |
+
next_tokens_as_batch = branch_tokens.unsqueeze(1)
|
137 |
+
assert next_tokens_as_batch.shape == (n_branch_tokens, 1)
|
138 |
+
|
139 |
+
position_id_for_final_token = n_tokens_so_far
|
140 |
+
cache_position = torch.full((1,), position_id_for_final_token, dtype=int, device=device)
|
141 |
+
with torch.no_grad():
|
142 |
+
model_outs = model(
|
143 |
+
next_tokens_as_batch,
|
144 |
+
past_key_values=past_key_values,
|
145 |
+
output_hidden_states=True,
|
146 |
+
use_cache=True,
|
147 |
+
# the cache surprisingly doesn't know the position of the last token
|
148 |
+
cache_position=cache_position
|
149 |
+
)
|
150 |
+
|
151 |
+
# Grab the single most likely token from each of the n_branch_tokens sequences
|
152 |
+
next_token_logits = model_outs.logits[:, -1]
|
153 |
+
vocab_size = model.config.vocab_size
|
154 |
+
assert next_token_logits.shape == (n_branch_tokens, vocab_size), f"{next_token_logits.shape=}, {n_branch_tokens=}, {vocab_size=}"
|
155 |
+
most_likely_token_ids = next_token_logits.argmax(dim=-1)
|
156 |
+
|
157 |
+
# Stick them at the end of the branch tokens.
|
158 |
+
assert most_likely_token_ids.shape == (n_branch_tokens,)
|
159 |
+
lookahead_sequences = torch.cat([
|
160 |
+
branch_tokens.unsqueeze(1),
|
161 |
+
most_likely_token_ids.unsqueeze(1)
|
162 |
+
], dim=1)
|
163 |
+
assert lookahead_sequences.shape == (n_branch_tokens, 2)
|
164 |
+
return lookahead_sequences, next_token_logits
|
165 |
+
|
166 |
+
|
167 |
+
def continue_messages_inner(model, tokenizer, messages, n_branch_tokens, n_future_tokens):
|
168 |
+
# Note: we're ignoring n_future_tokens right now since the old implementation was buggy.
|
169 |
+
device = model.device
|
170 |
+
|
171 |
+
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, return_tensors="pt", continue_final_message=True).to(model.device)
|
172 |
+
print(tokenizer.batch_decode(tokenized_chat, skip_special_tokens=False))
|
173 |
+
|
174 |
+
lookahead_sequences, next_token_logits = get_lookahead_sequences(
|
175 |
+
model, tokenizer, tokenized_chat, n_branch_tokens, device)
|
176 |
+
|
177 |
+
generated_docs = tokenizer.batch_decode(lookahead_sequences, skip_special_tokens=True)
|
178 |
+
return generated_docs
|
179 |
+
|
180 |
+
type_assistant_response()
|
181 |
+
|