@ -84,26 +84,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- API
- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML.
- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients))
## Where are Aesthetic Gradients?!?!
Aesthetic Gradients are now an extension. You can install it using git:
After running this command, make sure that you have `images-browser` dir in webui's `extensions` directory and restart
the UI. The interface for Image browser should appear exactly the same as it was.
- [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions
## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
"Interrogate":"Reconstruct prompt from existing image and put it into the prompt field.",
"Images filename pattern":"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
"Directory name pattern":"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
"Images filename pattern":"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
"Directory name pattern":"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
"Max prompt words":"Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle",
"Loopback":"Process an image, use it as an input, repeat.",
parser.add_argument("--enable-console-prompts",action='store_true',help="print prompts to console when generating with txt2img and img2img",default=False)
parser.add_argument('--vae-path',type=str,help='Path to Variational Autoencoders model',default=None)
parser.add_argument("--disable-safe-unpickle",action='store_true',help="disable checking pytorch models for malicious code",default=False)
parser.add_argument("--api",action='store_true',help="use api=True to launch the api with the webui")
parser.add_argument("--api-auth",type=str,help='Set authentication for api like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"',default=None)
parser.add_argument("--nowebui",action='store_true',help="use api=True to launch the api instead of the webui")
parser.add_argument("--api",action='store_true',help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)")
parser.add_argument("--api-auth",type=str,help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"',default=None)
parser.add_argument("--nowebui",action='store_true',help="use api=True to launch the API instead of the webui")
parser.add_argument("--ui-debug-mode",action='store_true',help="Don't load model to quickly launch UI")
parser.add_argument("--device-id",type=str,help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)",default=None)
"ESRGAN_tile":OptionInfo(192,"Tile size for ESRGAN upscalers. 0 = no tiling.",gr.Slider,{"minimum":0,"maximum":512,"step":16}),
"ESRGAN_tile_overlap":OptionInfo(8,"Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.",gr.Slider,{"minimum":0,"maximum":48,"step":1}),
"realesrgan_enabled_models":OptionInfo(["R-ESRGAN x4+","R-ESRGAN x4+ Anime6B"],"Select which Real-ESRGAN models to show in the web UI. (Requires restart)",gr.CheckboxGroup,lambda:{"choices":realesrgan_models_names()}),
"realesrgan_enabled_models":OptionInfo(["R-ESRGAN 4x+","R-ESRGAN 4x+ Anime6B"],"Select which Real-ESRGAN models to show in the web UI. (Requires restart)",gr.CheckboxGroup,lambda:{"choices":realesrgan_models_names()}),
"SWIN_tile":OptionInfo(192,"Tile size for all SwinIR.",gr.Slider,{"minimum":16,"maximum":512,"step":16}),
"SWIN_tile_overlap":OptionInfo(8,"Tile overlap, in pixels for SwinIR. Low values = visible seam.",gr.Slider,{"minimum":0,"maximum":48,"step":1}),
"unload_models_when_training":OptionInfo(False,"Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"shuffle_tags":OptionInfo(False,"Shuffleing tags by ',' when create texts."),
"tag_drop_out":OptionInfo(0,"Dropout tags when create texts",gr.Slider,{"minimum":0,"maximum":1,"step":0.1}),
"pin_memory":OptionInfo(False,"Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
"save_optimizer_state":OptionInfo(False,"Saves Optimizer state as separate *.optim file. Training can be resumed with HN itself and matching optim file."),
"dataset_filename_word_regex":OptionInfo("","Filename word regex"),
"hide_samplers":OptionInfo([],"Hide samplers in user interface (requires restart)",gr.CheckboxGroup,lambda:{"choices":[x.nameforxinsd_samplers.all_samplers]}),
"hide_samplers":OptionInfo([],"Hide samplers in user interface (requires restart)",gr.CheckboxGroup,lambda:{"choices":[x.nameforxinlist_samplers()]}),
"eta_ddim":OptionInfo(0.0,"eta (noise multiplier) for DDIM",gr.Slider,{"minimum":0.0,"maximum":1.0,"step":0.01}),
"eta_ancestral":OptionInfo(1.0,"eta (noise multiplier) for ancestral samplers",gr.Slider,{"minimum":0.0,"maximum":1.0,"step":0.01}),
"ddim_discretize":OptionInfo('uniform',"img2img DDIM discretize",gr.Radio,{"choices":['uniform','quad']}),
new_hypernetwork_layer_structure=gr.Textbox("1, 2, 1",label="Enter hypernetwork layer structure",placeholder="1st and last digit must be 1. ex:'1, 2, 1'")
new_hypernetwork_activation_func=gr.Dropdown(value="linear",label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)",choices=modules.hypernetworks.ui.keys)
new_hypernetwork_initialization_option=gr.Dropdown(value="Normal",label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise",choices=["Normal","KaimingUniform","KaimingNormal","XavierUniform","XavierNormal"])