|
import base64 |
|
import io |
|
import re |
|
from pathlib import Path |
|
|
|
import gradio as gr |
|
import requests |
|
import torch |
|
from PIL import Image |
|
|
|
from modules import chat, shared |
|
|
|
torch._C._jit_set_profiling_mode(False) |
|
|
|
|
|
params = { |
|
'enable_SD_api': False, |
|
'address': 'http://127.0.0.1:7860', |
|
'save_img': False, |
|
'SD_model': 'NeverEndingDream', |
|
'prompt_prefix': '(Masterpiece:1.1), (solo:1.3), detailed, intricate, colorful', |
|
'negative_prompt': '(worst quality, low quality:1.3)', |
|
'side_length': 512, |
|
'restore_faces': False |
|
} |
|
|
|
SD_models = ['NeverEndingDream'] |
|
|
|
streaming_state = shared.args.no_stream |
|
picture_response = False |
|
pic_id = 0 |
|
|
|
|
|
def remove_surrounded_chars(string): |
|
|
|
|
|
return re.sub('\*[^\*]*?(\*|$)', '', string) |
|
|
|
|
|
|
|
|
|
def input_modifier(string): |
|
""" |
|
This function is applied to your text inputs before |
|
they are fed into the model. |
|
""" |
|
global params, picture_response |
|
if not params['enable_SD_api']: |
|
return string |
|
|
|
commands = ['send', 'mail', 'me'] |
|
mediums = ['image', 'pic', 'picture', 'photo'] |
|
subjects = ['yourself', 'own'] |
|
lowstr = string.lower() |
|
|
|
|
|
if any(command in lowstr for command in commands) and any(case in lowstr for case in mediums): |
|
picture_response = True |
|
shared.args.no_stream = True |
|
shared.processing_message = "*Is sending a picture...*" |
|
string = "Please provide a detailed description of your surroundings, how you look and the situation you're in and what you are doing right now" |
|
if any(target in lowstr for target in subjects): |
|
string = "Please provide a detailed and vivid description of how you look and what you are wearing" |
|
|
|
return string |
|
|
|
|
|
|
|
|
|
def get_SD_pictures(description): |
|
|
|
global params, pic_id |
|
|
|
payload = { |
|
"prompt": params['prompt_prefix'] + description, |
|
"seed": -1, |
|
"sampler_name": "DPM++ 2M Karras", |
|
"steps": 32, |
|
"cfg_scale": 7, |
|
"width": params['side_length'], |
|
"height": params['side_length'], |
|
"restore_faces": params['restore_faces'], |
|
"negative_prompt": params['negative_prompt'] |
|
} |
|
|
|
response = requests.post(url=f'{params["address"]}/sdapi/v1/txt2img', json=payload) |
|
r = response.json() |
|
|
|
visible_result = "" |
|
for img_str in r['images']: |
|
image = Image.open(io.BytesIO(base64.b64decode(img_str.split(",", 1)[0]))) |
|
if params['save_img']: |
|
output_file = Path(f'extensions/sd_api_pictures/outputs/{pic_id:06d}.png') |
|
image.save(output_file.as_posix()) |
|
pic_id += 1 |
|
|
|
image.thumbnail((300, 300)) |
|
buffered = io.BytesIO() |
|
image.save(buffered, format="JPEG") |
|
buffered.seek(0) |
|
image_bytes = buffered.getvalue() |
|
img_str = "data:image/jpeg;base64," + base64.b64encode(image_bytes).decode() |
|
visible_result = visible_result + f'<img src="{img_str}" alt="{description}">\n' |
|
|
|
return visible_result |
|
|
|
|
|
|
|
|
|
|
|
def output_modifier(string): |
|
""" |
|
This function is applied to the model outputs. |
|
""" |
|
global pic_id, picture_response, streaming_state |
|
|
|
if not picture_response: |
|
return string |
|
|
|
string = remove_surrounded_chars(string) |
|
string = string.replace('"', '') |
|
string = string.replace('“', '') |
|
string = string.replace('\n', ' ') |
|
string = string.strip() |
|
|
|
if string == '': |
|
string = 'no viable description in reply, try regenerating' |
|
|
|
|
|
text = f'*Description: "{string}"*' |
|
|
|
image = get_SD_pictures(string) |
|
|
|
picture_response = False |
|
|
|
shared.processing_message = "*Is typing...*" |
|
shared.args.no_stream = streaming_state |
|
return image + "\n" + text |
|
|
|
|
|
def bot_prefix_modifier(string): |
|
""" |
|
This function is only applied in chat mode. It modifies |
|
the prefix text for the Bot and can be used to bias its |
|
behavior. |
|
""" |
|
|
|
return string |
|
|
|
|
|
def force_pic(): |
|
global picture_response |
|
picture_response = True |
|
|
|
|
|
def ui(): |
|
|
|
|
|
with gr.Accordion("Stable Diffusion api integration", open=True): |
|
with gr.Row(): |
|
with gr.Column(): |
|
enable = gr.Checkbox(value=params['enable_SD_api'], label='Activate SD Api integration') |
|
save_img = gr.Checkbox(value=params['save_img'], label='Keep original received images in the outputs subdir') |
|
with gr.Column(): |
|
address = gr.Textbox(placeholder=params['address'], value=params['address'], label='Stable Diffusion host address') |
|
|
|
with gr.Row(): |
|
force_btn = gr.Button("Force the next response to be a picture") |
|
generate_now_btn = gr.Button("Generate an image response to the input") |
|
|
|
with gr.Accordion("Generation parameters", open=False): |
|
prompt_prefix = gr.Textbox(placeholder=params['prompt_prefix'], value=params['prompt_prefix'], label='Prompt Prefix (best used to describe the look of the character)') |
|
with gr.Row(): |
|
negative_prompt = gr.Textbox(placeholder=params['negative_prompt'], value=params['negative_prompt'], label='Negative Prompt') |
|
dimensions = gr.Slider(256, 702, value=params['side_length'], step=64, label='Image dimensions') |
|
|
|
|
|
|
|
enable.change(lambda x: params.update({"enable_SD_api": x}), enable, None) |
|
save_img.change(lambda x: params.update({"save_img": x}), save_img, None) |
|
address.change(lambda x: params.update({"address": x}), address, None) |
|
prompt_prefix.change(lambda x: params.update({"prompt_prefix": x}), prompt_prefix, None) |
|
negative_prompt.change(lambda x: params.update({"negative_prompt": x}), negative_prompt, None) |
|
dimensions.change(lambda x: params.update({"side_length": x}), dimensions, None) |
|
|
|
|
|
force_btn.click(force_pic) |
|
generate_now_btn.click(force_pic) |
|
generate_now_btn.click(chat.cai_chatbot_wrapper, shared.input_params, shared.gradio['display'], show_progress=shared.args.no_stream) |
|
|