sdung commited on
Commit
9e357fb
·
verified ·
1 Parent(s): 97ef583

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -0
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ import torch
4
+ from transformers import (
5
+ AutoTokenizer,
6
+ AutoModelForCausalLM,
7
+ TextIteratorStreamer,
8
+ pipeline,
9
+ )
10
+ from threading import Thread
11
+
12
+ # The huggingface model id for Microsoft's phi-2 model
13
+ checkpoint = "microsoft/phi-2"
14
+
15
+ # Download and load model and tokenizer
16
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
17
+ model = AutoModelForCausalLM.from_pretrained(
18
+ checkpoint, torch_dtype=torch.float32, device_map="cpu", trust_remote_code=True
19
+ )
20
+
21
+ # Text generation pipeline
22
+ phi2 = pipeline(
23
+ "text-generation",
24
+ tokenizer=tokenizer,
25
+ model=model,
26
+ pad_token_id=tokenizer.eos_token_id,
27
+ eos_token_id=tokenizer.eos_token_id,
28
+ device_map="cpu",
29
+ )
30
+
31
+
32
+ # Function that accepts a prompt and generates text using the phi2 pipeline
33
+ def generate(message, chat_history, max_new_tokens):
34
+ instruction = "You are a helpful assistant to 'User'. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'."
35
+ final_prompt = f"Instruction: {instruction}\n"
36
+
37
+ for sent, received in chat_history:
38
+ final_prompt += "User: " + sent + "\n"
39
+ final_prompt += "Assistant: " + received + "\n"
40
+
41
+ final_prompt += "User: " + message + "\n"
42
+ final_prompt += "Output:"
43
+
44
+ if (
45
+ len(tokenizer.tokenize(final_prompt))
46
+ >= tokenizer.model_max_length - max_new_tokens
47
+ ):
48
+ final_prompt = "Instruction: Say 'Input exceeded context size, please clear the chat history and retry!' Output:"
49
+
50
+ # Streamer
51
+ streamer = TextIteratorStreamer(
52
+ tokenizer=tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=300.0
53
+ )
54
+ thread = Thread(
55
+ target=phi2,
56
+ kwargs={
57
+ "text_inputs": final_prompt,
58
+ "max_new_tokens": max_new_tokens,
59
+ "streamer": streamer,
60
+ },
61
+ )
62
+ thread.start()
63
+
64
+ generated_text = ""
65
+ for word in streamer:
66
+ generated_text += word
67
+ response = generated_text.strip()
68
+
69
+ if "User:" in response:
70
+ response = response.split("User:")[0].strip()
71
+
72
+ if "Assistant:" in response:
73
+ response = response.split("Assistant:")[1].strip()
74
+
75
+ yield response
76
+
77
+
78
+ # Chat interface with gradio
79
+ with gr.Blocks() as demo:
80
+ gr.Markdown(
81
+ """
82
+ # Phi-2 Chatbot Demo
83
+ This chatbot was created using Microsoft's 2.7 billion parameter [phi-2](https://huggingface.co/microsoft/phi-2) Transformer model.
84
+
85
+ In order to reduce the response time on this hardware, `max_new_tokens` has been set to `21` in the text generation pipeline. With this default configuration, it takes approximately `60 seconds` for the response to start being generated, and streamed one word at a time. Use the slider below to increase or decrease the length of the generated text.
86
+ """
87
+ )
88
+
89
+ tokens_slider = gr.Slider(
90
+ 8,
91
+ 128,
92
+ value=21,
93
+ label="Maximum new tokens",
94
+ info="A larger `max_new_tokens` parameter value gives you longer text responses but at the cost of a slower response time.",
95
+ )
96
+
97
+ chatbot = gr.ChatInterface(
98
+ fn=generate,
99
+ additional_inputs=[tokens_slider],
100
+ stop_btn=None,
101
+ examples=[["Who is Leonhard Euler?"]],
102
+ )
103
+
104
+ demo.queue().launch(share=True)