Merge branch 'master' into master
commit
0831ab476c
@ -0,0 +1,31 @@
|
|||||||
|
name: Run basic features tests on CPU with empty SD model
|
||||||
|
|
||||||
|
on:
|
||||||
|
- push
|
||||||
|
- pull_request
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout Code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
- name: Set up Python 3.10
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: 3.10.6
|
||||||
|
- uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pip
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
|
||||||
|
restore-keys: ${{ runner.os }}-pip-
|
||||||
|
- name: Run tests
|
||||||
|
run: python launch.py --tests basic_features --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test
|
||||||
|
- name: Upload main app stdout-stderr
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
name: stdout-stderr
|
||||||
|
path: |
|
||||||
|
test/stdout.txt
|
||||||
|
test/stderr.txt
|
||||||
@ -0,0 +1,98 @@
|
|||||||
|
import html
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
import traceback
|
||||||
|
import time
|
||||||
|
|
||||||
|
from modules import shared
|
||||||
|
|
||||||
|
queue_lock = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_queued_call(func):
|
||||||
|
def f(*args, **kwargs):
|
||||||
|
with queue_lock:
|
||||||
|
res = func(*args, **kwargs)
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_gradio_gpu_call(func, extra_outputs=None):
|
||||||
|
def f(*args, **kwargs):
|
||||||
|
|
||||||
|
shared.state.begin()
|
||||||
|
|
||||||
|
with queue_lock:
|
||||||
|
res = func(*args, **kwargs)
|
||||||
|
|
||||||
|
shared.state.end()
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
return wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True)
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
|
||||||
|
def f(*args, extra_outputs_array=extra_outputs, **kwargs):
|
||||||
|
run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats
|
||||||
|
if run_memmon:
|
||||||
|
shared.mem_mon.monitor()
|
||||||
|
t = time.perf_counter()
|
||||||
|
|
||||||
|
try:
|
||||||
|
res = list(func(*args, **kwargs))
|
||||||
|
except Exception as e:
|
||||||
|
# When printing out our debug argument list, do not print out more than a MB of text
|
||||||
|
max_debug_str_len = 131072 # (1024*1024)/8
|
||||||
|
|
||||||
|
print("Error completing request", file=sys.stderr)
|
||||||
|
argStr = f"Arguments: {str(args)} {str(kwargs)}"
|
||||||
|
print(argStr[:max_debug_str_len], file=sys.stderr)
|
||||||
|
if len(argStr) > max_debug_str_len:
|
||||||
|
print(f"(Argument list truncated at {max_debug_str_len}/{len(argStr)} characters)", file=sys.stderr)
|
||||||
|
|
||||||
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
|
||||||
|
shared.state.job = ""
|
||||||
|
shared.state.job_count = 0
|
||||||
|
|
||||||
|
if extra_outputs_array is None:
|
||||||
|
extra_outputs_array = [None, '']
|
||||||
|
|
||||||
|
res = extra_outputs_array + [f"<div class='error'>{html.escape(type(e).__name__+': '+str(e))}</div>"]
|
||||||
|
|
||||||
|
shared.state.skipped = False
|
||||||
|
shared.state.interrupted = False
|
||||||
|
shared.state.job_count = 0
|
||||||
|
|
||||||
|
if not add_stats:
|
||||||
|
return tuple(res)
|
||||||
|
|
||||||
|
elapsed = time.perf_counter() - t
|
||||||
|
elapsed_m = int(elapsed // 60)
|
||||||
|
elapsed_s = elapsed % 60
|
||||||
|
elapsed_text = f"{elapsed_s:.2f}s"
|
||||||
|
if elapsed_m > 0:
|
||||||
|
elapsed_text = f"{elapsed_m}m "+elapsed_text
|
||||||
|
|
||||||
|
if run_memmon:
|
||||||
|
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
|
||||||
|
active_peak = mem_stats['active_peak']
|
||||||
|
reserved_peak = mem_stats['reserved_peak']
|
||||||
|
sys_peak = mem_stats['system_peak']
|
||||||
|
sys_total = mem_stats['total']
|
||||||
|
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
|
||||||
|
|
||||||
|
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
|
||||||
|
else:
|
||||||
|
vram_html = ''
|
||||||
|
|
||||||
|
# last item is always HTML
|
||||||
|
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed_text}</p>{vram_html}</div>"
|
||||||
|
|
||||||
|
return tuple(res)
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
@ -0,0 +1,10 @@
|
|||||||
|
from torch.utils.checkpoint import checkpoint
|
||||||
|
|
||||||
|
def BasicTransformerBlock_forward(self, x, context=None):
|
||||||
|
return checkpoint(self._forward, x, context)
|
||||||
|
|
||||||
|
def AttentionBlock_forward(self, x):
|
||||||
|
return checkpoint(self._forward, x)
|
||||||
|
|
||||||
|
def ResBlock_forward(self, x, emb):
|
||||||
|
return checkpoint(self._forward, x, emb)
|
||||||
@ -0,0 +1,301 @@
|
|||||||
|
import math
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from modules import prompt_parser, devices
|
||||||
|
from modules.shared import opts
|
||||||
|
|
||||||
|
|
||||||
|
def get_target_prompt_token_count(token_count):
|
||||||
|
return math.ceil(max(token_count, 1) / 75) * 75
|
||||||
|
|
||||||
|
|
||||||
|
class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
|
||||||
|
def __init__(self, wrapped, hijack):
|
||||||
|
super().__init__()
|
||||||
|
self.wrapped = wrapped
|
||||||
|
self.hijack = hijack
|
||||||
|
|
||||||
|
def tokenize(self, texts):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def encode_with_transformers(self, tokens):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def encode_embedding_init_text(self, init_text, nvpt):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def tokenize_line(self, line, used_custom_terms, hijack_comments):
|
||||||
|
if opts.enable_emphasis:
|
||||||
|
parsed = prompt_parser.parse_prompt_attention(line)
|
||||||
|
else:
|
||||||
|
parsed = [[line, 1.0]]
|
||||||
|
|
||||||
|
tokenized = self.tokenize([text for text, _ in parsed])
|
||||||
|
|
||||||
|
fixes = []
|
||||||
|
remade_tokens = []
|
||||||
|
multipliers = []
|
||||||
|
last_comma = -1
|
||||||
|
|
||||||
|
for tokens, (text, weight) in zip(tokenized, parsed):
|
||||||
|
i = 0
|
||||||
|
while i < len(tokens):
|
||||||
|
token = tokens[i]
|
||||||
|
|
||||||
|
embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
|
||||||
|
|
||||||
|
if token == self.comma_token:
|
||||||
|
last_comma = len(remade_tokens)
|
||||||
|
elif opts.comma_padding_backtrack != 0 and max(len(remade_tokens), 1) % 75 == 0 and last_comma != -1 and len(remade_tokens) - last_comma <= opts.comma_padding_backtrack:
|
||||||
|
last_comma += 1
|
||||||
|
reloc_tokens = remade_tokens[last_comma:]
|
||||||
|
reloc_mults = multipliers[last_comma:]
|
||||||
|
|
||||||
|
remade_tokens = remade_tokens[:last_comma]
|
||||||
|
length = len(remade_tokens)
|
||||||
|
|
||||||
|
rem = int(math.ceil(length / 75)) * 75 - length
|
||||||
|
remade_tokens += [self.id_end] * rem + reloc_tokens
|
||||||
|
multipliers = multipliers[:last_comma] + [1.0] * rem + reloc_mults
|
||||||
|
|
||||||
|
if embedding is None:
|
||||||
|
remade_tokens.append(token)
|
||||||
|
multipliers.append(weight)
|
||||||
|
i += 1
|
||||||
|
else:
|
||||||
|
emb_len = int(embedding.vec.shape[0])
|
||||||
|
iteration = len(remade_tokens) // 75
|
||||||
|
if (len(remade_tokens) + emb_len) // 75 != iteration:
|
||||||
|
rem = (75 * (iteration + 1) - len(remade_tokens))
|
||||||
|
remade_tokens += [self.id_end] * rem
|
||||||
|
multipliers += [1.0] * rem
|
||||||
|
iteration += 1
|
||||||
|
fixes.append((iteration, (len(remade_tokens) % 75, embedding)))
|
||||||
|
remade_tokens += [0] * emb_len
|
||||||
|
multipliers += [weight] * emb_len
|
||||||
|
used_custom_terms.append((embedding.name, embedding.checksum()))
|
||||||
|
i += embedding_length_in_tokens
|
||||||
|
|
||||||
|
token_count = len(remade_tokens)
|
||||||
|
prompt_target_length = get_target_prompt_token_count(token_count)
|
||||||
|
tokens_to_add = prompt_target_length - len(remade_tokens)
|
||||||
|
|
||||||
|
remade_tokens = remade_tokens + [self.id_end] * tokens_to_add
|
||||||
|
multipliers = multipliers + [1.0] * tokens_to_add
|
||||||
|
|
||||||
|
return remade_tokens, fixes, multipliers, token_count
|
||||||
|
|
||||||
|
def process_text(self, texts):
|
||||||
|
used_custom_terms = []
|
||||||
|
remade_batch_tokens = []
|
||||||
|
hijack_comments = []
|
||||||
|
hijack_fixes = []
|
||||||
|
token_count = 0
|
||||||
|
|
||||||
|
cache = {}
|
||||||
|
batch_multipliers = []
|
||||||
|
for line in texts:
|
||||||
|
if line in cache:
|
||||||
|
remade_tokens, fixes, multipliers = cache[line]
|
||||||
|
else:
|
||||||
|
remade_tokens, fixes, multipliers, current_token_count = self.tokenize_line(line, used_custom_terms, hijack_comments)
|
||||||
|
token_count = max(current_token_count, token_count)
|
||||||
|
|
||||||
|
cache[line] = (remade_tokens, fixes, multipliers)
|
||||||
|
|
||||||
|
remade_batch_tokens.append(remade_tokens)
|
||||||
|
hijack_fixes.append(fixes)
|
||||||
|
batch_multipliers.append(multipliers)
|
||||||
|
|
||||||
|
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
|
||||||
|
|
||||||
|
def process_text_old(self, texts):
|
||||||
|
id_start = self.id_start
|
||||||
|
id_end = self.id_end
|
||||||
|
maxlen = self.wrapped.max_length # you get to stay at 77
|
||||||
|
used_custom_terms = []
|
||||||
|
remade_batch_tokens = []
|
||||||
|
hijack_comments = []
|
||||||
|
hijack_fixes = []
|
||||||
|
token_count = 0
|
||||||
|
|
||||||
|
cache = {}
|
||||||
|
batch_tokens = self.tokenize(texts)
|
||||||
|
batch_multipliers = []
|
||||||
|
for tokens in batch_tokens:
|
||||||
|
tuple_tokens = tuple(tokens)
|
||||||
|
|
||||||
|
if tuple_tokens in cache:
|
||||||
|
remade_tokens, fixes, multipliers = cache[tuple_tokens]
|
||||||
|
else:
|
||||||
|
fixes = []
|
||||||
|
remade_tokens = []
|
||||||
|
multipliers = []
|
||||||
|
mult = 1.0
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
while i < len(tokens):
|
||||||
|
token = tokens[i]
|
||||||
|
|
||||||
|
embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
|
||||||
|
|
||||||
|
mult_change = self.token_mults.get(token) if opts.enable_emphasis else None
|
||||||
|
if mult_change is not None:
|
||||||
|
mult *= mult_change
|
||||||
|
i += 1
|
||||||
|
elif embedding is None:
|
||||||
|
remade_tokens.append(token)
|
||||||
|
multipliers.append(mult)
|
||||||
|
i += 1
|
||||||
|
else:
|
||||||
|
emb_len = int(embedding.vec.shape[0])
|
||||||
|
fixes.append((len(remade_tokens), embedding))
|
||||||
|
remade_tokens += [0] * emb_len
|
||||||
|
multipliers += [mult] * emb_len
|
||||||
|
used_custom_terms.append((embedding.name, embedding.checksum()))
|
||||||
|
i += embedding_length_in_tokens
|
||||||
|
|
||||||
|
if len(remade_tokens) > maxlen - 2:
|
||||||
|
vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
|
||||||
|
ovf = remade_tokens[maxlen - 2:]
|
||||||
|
overflowing_words = [vocab.get(int(x), "") for x in ovf]
|
||||||
|
overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
|
||||||
|
hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
|
||||||
|
|
||||||
|
token_count = len(remade_tokens)
|
||||||
|
remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
|
||||||
|
remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end]
|
||||||
|
cache[tuple_tokens] = (remade_tokens, fixes, multipliers)
|
||||||
|
|
||||||
|
multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
|
||||||
|
multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
|
||||||
|
|
||||||
|
remade_batch_tokens.append(remade_tokens)
|
||||||
|
hijack_fixes.append(fixes)
|
||||||
|
batch_multipliers.append(multipliers)
|
||||||
|
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
|
||||||
|
|
||||||
|
def forward(self, text):
|
||||||
|
use_old = opts.use_old_emphasis_implementation
|
||||||
|
if use_old:
|
||||||
|
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text)
|
||||||
|
else:
|
||||||
|
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text)
|
||||||
|
|
||||||
|
self.hijack.comments += hijack_comments
|
||||||
|
|
||||||
|
if len(used_custom_terms) > 0:
|
||||||
|
self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
|
||||||
|
|
||||||
|
if use_old:
|
||||||
|
self.hijack.fixes = hijack_fixes
|
||||||
|
return self.process_tokens(remade_batch_tokens, batch_multipliers)
|
||||||
|
|
||||||
|
z = None
|
||||||
|
i = 0
|
||||||
|
while max(map(len, remade_batch_tokens)) != 0:
|
||||||
|
rem_tokens = [x[75:] for x in remade_batch_tokens]
|
||||||
|
rem_multipliers = [x[75:] for x in batch_multipliers]
|
||||||
|
|
||||||
|
self.hijack.fixes = []
|
||||||
|
for unfiltered in hijack_fixes:
|
||||||
|
fixes = []
|
||||||
|
for fix in unfiltered:
|
||||||
|
if fix[0] == i:
|
||||||
|
fixes.append(fix[1])
|
||||||
|
self.hijack.fixes.append(fixes)
|
||||||
|
|
||||||
|
tokens = []
|
||||||
|
multipliers = []
|
||||||
|
for j in range(len(remade_batch_tokens)):
|
||||||
|
if len(remade_batch_tokens[j]) > 0:
|
||||||
|
tokens.append(remade_batch_tokens[j][:75])
|
||||||
|
multipliers.append(batch_multipliers[j][:75])
|
||||||
|
else:
|
||||||
|
tokens.append([self.id_end] * 75)
|
||||||
|
multipliers.append([1.0] * 75)
|
||||||
|
|
||||||
|
z1 = self.process_tokens(tokens, multipliers)
|
||||||
|
z = z1 if z is None else torch.cat((z, z1), axis=-2)
|
||||||
|
|
||||||
|
remade_batch_tokens = rem_tokens
|
||||||
|
batch_multipliers = rem_multipliers
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
return z
|
||||||
|
|
||||||
|
def process_tokens(self, remade_batch_tokens, batch_multipliers):
|
||||||
|
if not opts.use_old_emphasis_implementation:
|
||||||
|
remade_batch_tokens = [[self.id_start] + x[:75] + [self.id_end] for x in remade_batch_tokens]
|
||||||
|
batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers]
|
||||||
|
|
||||||
|
tokens = torch.asarray(remade_batch_tokens).to(devices.device)
|
||||||
|
|
||||||
|
if self.id_end != self.id_pad:
|
||||||
|
for batch_pos in range(len(remade_batch_tokens)):
|
||||||
|
index = remade_batch_tokens[batch_pos].index(self.id_end)
|
||||||
|
tokens[batch_pos, index+1:tokens.shape[1]] = self.id_pad
|
||||||
|
|
||||||
|
z = self.encode_with_transformers(tokens)
|
||||||
|
|
||||||
|
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
|
||||||
|
batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers]
|
||||||
|
batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(devices.device)
|
||||||
|
original_mean = z.mean()
|
||||||
|
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
|
||||||
|
new_mean = z.mean()
|
||||||
|
z *= original_mean / new_mean
|
||||||
|
|
||||||
|
return z
|
||||||
|
|
||||||
|
|
||||||
|
class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
|
||||||
|
def __init__(self, wrapped, hijack):
|
||||||
|
super().__init__(wrapped, hijack)
|
||||||
|
self.tokenizer = wrapped.tokenizer
|
||||||
|
self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ',</w>'][0]
|
||||||
|
|
||||||
|
self.token_mults = {}
|
||||||
|
tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k]
|
||||||
|
for text, ident in tokens_with_parens:
|
||||||
|
mult = 1.0
|
||||||
|
for c in text:
|
||||||
|
if c == '[':
|
||||||
|
mult /= 1.1
|
||||||
|
if c == ']':
|
||||||
|
mult *= 1.1
|
||||||
|
if c == '(':
|
||||||
|
mult *= 1.1
|
||||||
|
if c == ')':
|
||||||
|
mult /= 1.1
|
||||||
|
|
||||||
|
if mult != 1.0:
|
||||||
|
self.token_mults[ident] = mult
|
||||||
|
|
||||||
|
self.id_start = self.wrapped.tokenizer.bos_token_id
|
||||||
|
self.id_end = self.wrapped.tokenizer.eos_token_id
|
||||||
|
self.id_pad = self.id_end
|
||||||
|
|
||||||
|
def tokenize(self, texts):
|
||||||
|
tokenized = self.wrapped.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"]
|
||||||
|
|
||||||
|
return tokenized
|
||||||
|
|
||||||
|
def encode_with_transformers(self, tokens):
|
||||||
|
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
|
||||||
|
|
||||||
|
if opts.CLIP_stop_at_last_layers > 1:
|
||||||
|
z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers]
|
||||||
|
z = self.wrapped.transformer.text_model.final_layer_norm(z)
|
||||||
|
else:
|
||||||
|
z = outputs.last_hidden_state
|
||||||
|
|
||||||
|
return z
|
||||||
|
|
||||||
|
def encode_embedding_init_text(self, init_text, nvpt):
|
||||||
|
embedding_layer = self.wrapped.transformer.text_model.embeddings
|
||||||
|
ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"]
|
||||||
|
embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
|
||||||
|
|
||||||
|
return embedded
|
||||||
@ -0,0 +1,37 @@
|
|||||||
|
import open_clip.tokenizer
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from modules import sd_hijack_clip, devices
|
||||||
|
from modules.shared import opts
|
||||||
|
|
||||||
|
tokenizer = open_clip.tokenizer._tokenizer
|
||||||
|
|
||||||
|
|
||||||
|
class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase):
|
||||||
|
def __init__(self, wrapped, hijack):
|
||||||
|
super().__init__(wrapped, hijack)
|
||||||
|
|
||||||
|
self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ',</w>'][0]
|
||||||
|
self.id_start = tokenizer.encoder["<start_of_text>"]
|
||||||
|
self.id_end = tokenizer.encoder["<end_of_text>"]
|
||||||
|
self.id_pad = 0
|
||||||
|
|
||||||
|
def tokenize(self, texts):
|
||||||
|
assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip'
|
||||||
|
|
||||||
|
tokenized = [tokenizer.encode(text) for text in texts]
|
||||||
|
|
||||||
|
return tokenized
|
||||||
|
|
||||||
|
def encode_with_transformers(self, tokens):
|
||||||
|
# set self.wrapped.layer_idx here according to opts.CLIP_stop_at_last_layers
|
||||||
|
z = self.wrapped.encode_with_transformer(tokens)
|
||||||
|
|
||||||
|
return z
|
||||||
|
|
||||||
|
def encode_embedding_init_text(self, init_text, nvpt):
|
||||||
|
ids = tokenizer.encode(init_text)
|
||||||
|
ids = torch.asarray([ids], device=devices.device, dtype=torch.int)
|
||||||
|
embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0)
|
||||||
|
|
||||||
|
return embedded
|
||||||
@ -0,0 +1,62 @@
|
|||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
import gradio as gr
|
||||||
|
|
||||||
|
from PIL import PngImagePlugin
|
||||||
|
|
||||||
|
from modules import shared
|
||||||
|
|
||||||
|
|
||||||
|
Savedfile = namedtuple("Savedfile", ["name"])
|
||||||
|
|
||||||
|
|
||||||
|
def save_pil_to_file(pil_image, dir=None):
|
||||||
|
already_saved_as = getattr(pil_image, 'already_saved_as', None)
|
||||||
|
if already_saved_as and os.path.isfile(already_saved_as):
|
||||||
|
shared.demo.temp_dirs = shared.demo.temp_dirs | {os.path.abspath(os.path.dirname(already_saved_as))}
|
||||||
|
file_obj = Savedfile(already_saved_as)
|
||||||
|
return file_obj
|
||||||
|
|
||||||
|
if shared.opts.temp_dir != "":
|
||||||
|
dir = shared.opts.temp_dir
|
||||||
|
|
||||||
|
use_metadata = False
|
||||||
|
metadata = PngImagePlugin.PngInfo()
|
||||||
|
for key, value in pil_image.info.items():
|
||||||
|
if isinstance(key, str) and isinstance(value, str):
|
||||||
|
metadata.add_text(key, value)
|
||||||
|
use_metadata = True
|
||||||
|
|
||||||
|
file_obj = tempfile.NamedTemporaryFile(delete=False, suffix=".png", dir=dir)
|
||||||
|
pil_image.save(file_obj, pnginfo=(metadata if use_metadata else None))
|
||||||
|
return file_obj
|
||||||
|
|
||||||
|
|
||||||
|
# override save to file function so that it also writes PNG info
|
||||||
|
gr.processing_utils.save_pil_to_file = save_pil_to_file
|
||||||
|
|
||||||
|
|
||||||
|
def on_tmpdir_changed():
|
||||||
|
if shared.opts.temp_dir == "" or shared.demo is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
os.makedirs(shared.opts.temp_dir, exist_ok=True)
|
||||||
|
|
||||||
|
shared.demo.temp_dirs = shared.demo.temp_dirs | {os.path.abspath(shared.opts.temp_dir)}
|
||||||
|
|
||||||
|
|
||||||
|
def cleanup_tmpdr():
|
||||||
|
temp_dir = shared.opts.temp_dir
|
||||||
|
if temp_dir == "" or not os.path.isdir(temp_dir):
|
||||||
|
return
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(temp_dir, topdown=False):
|
||||||
|
for name in files:
|
||||||
|
_, extension = os.path.splitext(name)
|
||||||
|
if extension != ".png":
|
||||||
|
continue
|
||||||
|
|
||||||
|
filename = os.path.join(root, name)
|
||||||
|
os.remove(filename)
|
||||||
@ -0,0 +1,47 @@
|
|||||||
|
import unittest
|
||||||
|
import requests
|
||||||
|
|
||||||
|
|
||||||
|
class TestTxt2ImgWorking(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img"
|
||||||
|
self.simple_txt2img = {
|
||||||
|
"enable_hr": False,
|
||||||
|
"denoising_strength": 0,
|
||||||
|
"firstphase_width": 0,
|
||||||
|
"firstphase_height": 0,
|
||||||
|
"prompt": "example prompt",
|
||||||
|
"styles": [],
|
||||||
|
"seed": -1,
|
||||||
|
"subseed": -1,
|
||||||
|
"subseed_strength": 0,
|
||||||
|
"seed_resize_from_h": -1,
|
||||||
|
"seed_resize_from_w": -1,
|
||||||
|
"batch_size": 1,
|
||||||
|
"n_iter": 1,
|
||||||
|
"steps": 3,
|
||||||
|
"cfg_scale": 7,
|
||||||
|
"width": 64,
|
||||||
|
"height": 64,
|
||||||
|
"restore_faces": False,
|
||||||
|
"tiling": False,
|
||||||
|
"negative_prompt": "",
|
||||||
|
"eta": 0,
|
||||||
|
"s_churn": 0,
|
||||||
|
"s_tmax": 0,
|
||||||
|
"s_tmin": 0,
|
||||||
|
"s_noise": 1,
|
||||||
|
"sampler_index": "Euler a"
|
||||||
|
}
|
||||||
|
|
||||||
|
def test_txt2img_with_restore_faces_performed(self):
|
||||||
|
self.simple_txt2img["restore_faces"] = True
|
||||||
|
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
|
||||||
|
|
||||||
|
|
||||||
|
class TestTxt2ImgCorrectness(unittest.TestCase):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
Binary file not shown.
@ -0,0 +1,70 @@
|
|||||||
|
model:
|
||||||
|
base_learning_rate: 1.0e-04
|
||||||
|
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||||
|
params:
|
||||||
|
linear_start: 0.00085
|
||||||
|
linear_end: 0.0120
|
||||||
|
num_timesteps_cond: 1
|
||||||
|
log_every_t: 200
|
||||||
|
timesteps: 1000
|
||||||
|
first_stage_key: "jpg"
|
||||||
|
cond_stage_key: "txt"
|
||||||
|
image_size: 64
|
||||||
|
channels: 4
|
||||||
|
cond_stage_trainable: false # Note: different from the one we trained before
|
||||||
|
conditioning_key: crossattn
|
||||||
|
monitor: val/loss_simple_ema
|
||||||
|
scale_factor: 0.18215
|
||||||
|
use_ema: False
|
||||||
|
|
||||||
|
scheduler_config: # 10000 warmup steps
|
||||||
|
target: ldm.lr_scheduler.LambdaLinearScheduler
|
||||||
|
params:
|
||||||
|
warm_up_steps: [ 10000 ]
|
||||||
|
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
||||||
|
f_start: [ 1.e-6 ]
|
||||||
|
f_max: [ 1. ]
|
||||||
|
f_min: [ 1. ]
|
||||||
|
|
||||||
|
unet_config:
|
||||||
|
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||||
|
params:
|
||||||
|
image_size: 32 # unused
|
||||||
|
in_channels: 4
|
||||||
|
out_channels: 4
|
||||||
|
model_channels: 320
|
||||||
|
attention_resolutions: [ 4, 2, 1 ]
|
||||||
|
num_res_blocks: 2
|
||||||
|
channel_mult: [ 1, 2, 4, 4 ]
|
||||||
|
num_heads: 8
|
||||||
|
use_spatial_transformer: True
|
||||||
|
transformer_depth: 1
|
||||||
|
context_dim: 768
|
||||||
|
use_checkpoint: True
|
||||||
|
legacy: False
|
||||||
|
|
||||||
|
first_stage_config:
|
||||||
|
target: ldm.models.autoencoder.AutoencoderKL
|
||||||
|
params:
|
||||||
|
embed_dim: 4
|
||||||
|
monitor: val/rec_loss
|
||||||
|
ddconfig:
|
||||||
|
double_z: true
|
||||||
|
z_channels: 4
|
||||||
|
resolution: 256
|
||||||
|
in_channels: 3
|
||||||
|
out_ch: 3
|
||||||
|
ch: 128
|
||||||
|
ch_mult:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 4
|
||||||
|
- 4
|
||||||
|
num_res_blocks: 2
|
||||||
|
attn_resolutions: []
|
||||||
|
dropout: 0.0
|
||||||
|
lossconfig:
|
||||||
|
target: torch.nn.Identity
|
||||||
|
|
||||||
|
cond_stage_config:
|
||||||
|
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||||
Loading…
Reference in New Issue