@ -351,7 +351,7 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat
assert log_directory , " Log directory is empty "
def train_embedding ( id_task , embedding_name , learn_rate , batch_size , gradient_step , data_root , log_directory , training_width , training_height , varsize , steps , clip_grad_mode , clip_grad_value , shuffle_tags , tag_drop_out , latent_sampling_method , create_image_every, save_embedding_every , template_filename , save_image_with_stored_embedding , preview_from_txt2img , preview_prompt , preview_negative_prompt , preview_steps , preview_sampler_index , preview_cfg_scale , preview_seed , preview_width , preview_height ) :
def train_embedding ( id_task , embedding_name , learn_rate , batch_size , gradient_step , data_root , log_directory , training_width , training_height , varsize , steps , clip_grad_mode , clip_grad_value , shuffle_tags , tag_drop_out , latent_sampling_method , use_weight, create_image_every, save_embedding_every , template_filename , save_image_with_stored_embedding , preview_from_txt2img , preview_prompt , preview_negative_prompt , preview_steps , preview_sampler_index , preview_cfg_scale , preview_seed , preview_width , preview_height ) :
save_embedding_every = save_embedding_every or 0
create_image_every = create_image_every or 0
template_file = textual_inversion_templates . get ( template_filename , None )
@ -410,7 +410,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
pin_memory = shared . opts . pin_memory
ds = modules . textual_inversion . dataset . PersonalizedBase ( data_root = data_root , width = training_width , height = training_height , repeats = shared . opts . training_image_repeats_per_epoch , placeholder_token = embedding_name , model = shared . sd_model , cond_model = shared . sd_model . cond_stage_model , device = devices . device , template_file = template_file , batch_size = batch_size , gradient_step = gradient_step , shuffle_tags = shuffle_tags , tag_drop_out = tag_drop_out , latent_sampling_method = latent_sampling_method , varsize = varsize )
ds = modules . textual_inversion . dataset . PersonalizedBase ( data_root = data_root , width = training_width , height = training_height , repeats = shared . opts . training_image_repeats_per_epoch , placeholder_token = embedding_name , model = shared . sd_model , cond_model = shared . sd_model . cond_stage_model , device = devices . device , template_file = template_file , batch_size = batch_size , gradient_step = gradient_step , shuffle_tags = shuffle_tags , tag_drop_out = tag_drop_out , latent_sampling_method = latent_sampling_method , varsize = varsize , use_weight = use_weight )
if shared . opts . save_training_settings_to_txt :
save_settings_to_file ( log_directory , { * * dict ( model_name = checkpoint . model_name , model_hash = checkpoint . shorthash , num_of_dataset_images = len ( ds ) , num_vectors_per_token = len ( embedding . vec ) ) , * * locals ( ) } )
@ -480,6 +480,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
with devices . autocast ( ) :
x = batch . latent_sample . to ( devices . device , non_blocking = pin_memory )
if use_weight :
w = batch . weight . to ( devices . device , non_blocking = pin_memory )
c = shared . sd_model . cond_stage_model ( batch . cond_text )
@ -491,7 +492,11 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
else :
cond = c
if use_weight :
loss = shared . sd_model . weighted_forward ( x , cond , w ) [ 0 ] / gradient_step
del w
else :
loss = shared . sd_model . forward ( x , cond ) [ 0 ] / gradient_step
del x
_loss_step + = loss . item ( )