"vram":"Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).",
"Highres. fix":"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition",
"Scale latent":"Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.",
"Eta noise seed delta":"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.",
"Do not add watermark to images":"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.",
@ -100,7 +97,13 @@ titles = {
"Clip skip":"Early stopping parameter for CLIP model; 1 is stop at last layer as usual, 2 is stop at penultimate layer, etc.",
"Approx NN":"Cheap neural network approximation. Very fast compared to VAE, but produces pictures with 4 times smaller horizontal/vertical resoluton and lower quality.",
"Approx cheap":"Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resoluton and extremely low quality."
"Approx cheap":"Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resoluton and extremely low quality.",
"Hires. fix":"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition",
"Hires steps":"Number of sampling steps for upscaled picture. If 0, uses same as for original.",
"Upscale by":"Adjusts the size of the image by multiplying the original width and height by the selected value. Ignored if either Resize width to or Resize height to are non-zero.",
"Resize width to":"Resizes image to this width. If 0, width is inferred from either of two nearby sliders.",
"Resize height to":"Resizes image to this height. If 0, height is inferred from either of two nearby sliders."
@ -86,6 +86,7 @@ parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencode
parser.add_argument("--disable-safe-unpickle",action='store_true',help="disable checking pytorch models for malicious code",default=False)
parser.add_argument("--api",action='store_true',help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)")
parser.add_argument("--api-auth",type=str,help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"',default=None)
parser.add_argument("--api-log",action='store_true',help="use api-log=True to enable logging of all API requests")
parser.add_argument("--nowebui",action='store_true',help="use api=True to launch the API instead of the webui")
parser.add_argument("--ui-debug-mode",action='store_true',help="Don't load model to quickly launch UI")
parser.add_argument("--device-id",type=str,help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)",default=None)
"unload_models_when_training":OptionInfo(False,"Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"pin_memory":OptionInfo(False,"Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
"save_optimizer_state":OptionInfo(False,"Saves Optimizer state as separate *.optim file. Training can be resumed with HN itself and matching optim file."),
"save_optimizer_state":OptionInfo(False,"Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
"dataset_filename_word_regex":OptionInfo("","Filename word regex"),
"training_image_repeats_per_epoch":OptionInfo(1,"Number of repeats for a single input image per epoch; used only for displaying epoch number",gr.Number,{"precision":0}),
@ -498,7 +504,12 @@ class Options:
returnFalse
ifself.data_labels[key].onchangeisnotNone:
self.data_labels[key].onchange()
try:
self.data_labels[key].onchange()
exceptExceptionase:
errors.display(e,f"changing setting {key} to {value}")
setattr(self,key,oldval)
returnFalse
returnTrue
@ -563,8 +574,11 @@ if os.path.exists(config_filename):
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images <a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\" style=\"font-weight:bold;\">[wiki]</a></p>")
dataset_directory=gr.Textbox(label='Dataset directory',placeholder="Path to directory with input images",elem_id="train_dataset_directory")
log_directory=gr.Textbox(label='Log directory',placeholder="Path to directory where to write outputs",value="textual_inversion",elem_id="train_log_directory")
create_image_every=gr.Number(label='Save an image to log directory every N steps, 0 to disable',value=500,precision=0,elem_id="train_create_image_every")
save_embedding_every=gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable',value=500,precision=0,elem_id="train_save_embedding_every")
withFormRow():
create_image_every=gr.Number(label='Save an image to log directory every N steps, 0 to disable',value=500,precision=0,elem_id="train_create_image_every")
save_embedding_every=gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable',value=500,precision=0,elem_id="train_save_embedding_every")
save_image_with_stored_embedding=gr.Checkbox(label='Save images with embedding in PNG chunks',value=True,elem_id="train_save_image_with_stored_embedding")
preview_from_txt2img=gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews',value=False,elem_id="train_preview_from_txt2img")
withgr.Row():
shuffle_tags=gr.Checkbox(label="Shuffle tags by ',' when creating prompts.",value=False,elem_id="train_shuffle_tags")
tag_drop_out=gr.Slider(minimum=0,maximum=1,step=0.1,label="Drop out tags when creating prompts.",value=0,elem_id="train_tag_drop_out")