import React, { ReactNode, useEffect, useState } from 'react' import { useRecoilState, useRecoilValue } from 'recoil' import { AIModel, CV2Flag, isDisableModelSwitchState, settingState, } from '../../store/Atoms' import Selector from '../shared/Selector' import { Switch, SwitchThumb } from '../shared/Switch' import Tooltip from '../shared/Tooltip' import { LDMSampler } from './HDSettingBlock' import NumberInputSetting from './NumberInputSetting' import SettingBlock from './SettingBlock' function ModelSettingBlock() { const [setting, setSettingState] = useRecoilState(settingState) const isDisableModelSwitch = useRecoilValue(isDisableModelSwitchState) const onModelChange = (value: AIModel) => { setSettingState(old => { return { ...old, model: value } }) } const onLDMSamplerChange = (value: LDMSampler) => { setSettingState(old => { return { ...old, ldmSampler: value } }) } const renderModelDesc = ( name: string, paperUrl: string, githubUrl: string ) => { return (
{/* Paper */}
) } const renderLDMModelDesc = () => { return ( <> { const val = value.length === 0 ? 0 : parseInt(value, 10) setSettingState(old => { return { ...old, ldmSteps: val } }) }} /> onLDMSamplerChange(val as LDMSampler)} /> } /> ) } const renderZITSModelDesc = () => { return (
{ setSettingState(old => { return { ...old, zitsWireframe: checked } }) }} > } />
) } const renderFCFModelDesc = () => { return (
FcF only support fixed size(512x512) image input. Lama Cleaner will take care of resize and crop process, it still recommended applies to small defects.
) } const renderOpenCV2Desc = () => { return ( <> { const val = value.length === 0 ? 0 : parseInt(value, 10) setSettingState(old => { return { ...old, cv2Radius: val } }) }} /> { setSettingState(old => { return { ...old, cv2Flag: val as CV2Flag } }) }} /> } /> ) } const renderOptionDesc = (): ReactNode => { switch (setting.model) { case AIModel.LDM: return renderLDMModelDesc() case AIModel.ZITS: return renderZITSModelDesc() case AIModel.FCF: return renderFCFModelDesc() case AIModel.CV2: return renderOpenCV2Desc() default: return undefined } } const renderPaperCodeBadge = (): ReactNode => { switch (setting.model) { case AIModel.LAMA: return renderModelDesc( 'Resolution-robust Large Mask Inpainting with Fourier Convolutions', 'https://arxiv.org/abs/2109.07161', 'https://github.com/saic-mdal/lama' ) case AIModel.LDM: return renderModelDesc( 'High-Resolution Image Synthesis with Latent Diffusion Models', 'https://arxiv.org/abs/2112.10752', 'https://github.com/CompVis/latent-diffusion' ) case AIModel.ZITS: return renderModelDesc( 'Incremental Transformer Structure Enhanced Image Inpainting with Masking Positional Encoding', 'https://arxiv.org/abs/2203.00867', 'https://github.com/DQiaole/ZITS_inpainting' ) case AIModel.MAT: return renderModelDesc( 'Mask-Aware Transformer for Large Hole Image Inpainting', 'https://arxiv.org/abs/2203.15270', 'https://github.com/fenglinglwb/MAT' ) case AIModel.FCF: return renderModelDesc( 'Keys to Better Image Inpainting: Structure and Texture Go Hand in Hand', 'https://arxiv.org/abs/2208.03382', 'https://github.com/SHI-Labs/FcF-Inpainting' ) case AIModel.SD15: return renderModelDesc( 'Stable Diffusion 1.5', 'https://ommer-lab.com/research/latent-diffusion-models/', 'https://github.com/CompVis/stable-diffusion' ) case AIModel.ANYTHING4: return renderModelDesc( 'andite/anything-v4.0', 'https://huggingface.co./andite/anything-v4.0', 'https://huggingface.co./andite/anything-v4.0' ) case AIModel.REALISTIC_VISION_1_4: return renderModelDesc( 'SG161222/Realistic_Vision_V1.4', 'https://huggingface.co./SG161222/Realistic_Vision_V1.4', 'https://huggingface.co./SG161222/Realistic_Vision_V1.4' ) case AIModel.SD2: return renderModelDesc( 'Stable Diffusion 2', 'https://ommer-lab.com/research/latent-diffusion-models/', 'https://github.com/Stability-AI/stablediffusion' ) case AIModel.Mange: return renderModelDesc( 'Manga Inpainting', 'https://www.cse.cuhk.edu.hk/~ttwong/papers/mangainpaint/mangainpaint.html', 'https://github.com/msxie92/MangaInpainting' ) case AIModel.CV2: return renderModelDesc( 'OpenCV Image Inpainting', 'https://docs.opencv.org/4.6.0/df/d3d/tutorial_py_inpainting.html', 'https://docs.opencv.org/4.6.0/df/d3d/tutorial_py_inpainting.html' ) case AIModel.PAINT_BY_EXAMPLE: return renderModelDesc( 'Paint by Example', 'https://arxiv.org/abs/2211.13227', 'https://github.com/Fantasy-Studio/Paint-by-Example' ) case AIModel.PIX2PIX: return renderModelDesc( 'InstructPix2Pix', 'https://arxiv.org/abs/2211.09800', 'https://github.com/timothybrooks/instruct-pix2pix' ) default: return <> } } return ( onModelChange(val as AIModel)} disabled={isDisableModelSwitch} /> } optionDesc={renderOptionDesc()} /> ) } export default ModelSettingBlock