Merge branch 'AUTOMATIC1111:master' into master
commit
1e18a5ffcc
@ -0,0 +1,28 @@
|
|||||||
|
# Please read the [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) before submitting a pull request!
|
||||||
|
|
||||||
|
If you have a large change, pay special attention to this paragraph:
|
||||||
|
|
||||||
|
> Before making changes, if you think that your feature will result in more than 100 lines changing, find me and talk to me about the feature you are proposing. It pains me to reject the hard work someone else did, but I won't add everything to the repo, and it's better if the rejection happens before you have to waste time working on the feature.
|
||||||
|
|
||||||
|
Otherwise, after making sure you're following the rules described in wiki page, remove this section and continue on.
|
||||||
|
|
||||||
|
**Describe what this pull request is trying to achieve.**
|
||||||
|
|
||||||
|
A clear and concise description of what you're trying to accomplish with this, so your intent doesn't have to be extracted from your code.
|
||||||
|
|
||||||
|
**Additional notes and description of your changes**
|
||||||
|
|
||||||
|
More technical discussion about your changes go here, plus anything that a maintainer might have to specifically take a look at, or be wary of.
|
||||||
|
|
||||||
|
**Environment this was tested in**
|
||||||
|
|
||||||
|
List the environment you have developed / tested this on. As per the contributing page, changes should be able to work on Windows out of the box.
|
||||||
|
- OS: [e.g. Windows, Linux]
|
||||||
|
- Browser [e.g. chrome, safari]
|
||||||
|
- Graphics card [e.g. NVIDIA RTX 2080 8GB, AMD RX 6600 8GB]
|
||||||
|
|
||||||
|
**Screenshots or videos of your changes**
|
||||||
|
|
||||||
|
If applicable, screenshots or a video showing off your changes. If it edits an existing UI, it should ideally contain a comparison of what used to be there, before your changes were made.
|
||||||
|
|
||||||
|
This is **required** for anything that touches the user interface.
|
||||||
@ -0,0 +1,172 @@
|
|||||||
|
|
||||||
|
contextMenuInit = function(){
|
||||||
|
let eventListenerApplied=false;
|
||||||
|
let menuSpecs = new Map();
|
||||||
|
|
||||||
|
const uid = function(){
|
||||||
|
return Date.now().toString(36) + Math.random().toString(36).substr(2);
|
||||||
|
}
|
||||||
|
|
||||||
|
function showContextMenu(event,element,menuEntries){
|
||||||
|
let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft;
|
||||||
|
let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop;
|
||||||
|
|
||||||
|
let oldMenu = gradioApp().querySelector('#context-menu')
|
||||||
|
if(oldMenu){
|
||||||
|
oldMenu.remove()
|
||||||
|
}
|
||||||
|
|
||||||
|
let tabButton = gradioApp().querySelector('button')
|
||||||
|
let baseStyle = window.getComputedStyle(tabButton)
|
||||||
|
|
||||||
|
const contextMenu = document.createElement('nav')
|
||||||
|
contextMenu.id = "context-menu"
|
||||||
|
contextMenu.style.background = baseStyle.background
|
||||||
|
contextMenu.style.color = baseStyle.color
|
||||||
|
contextMenu.style.fontFamily = baseStyle.fontFamily
|
||||||
|
contextMenu.style.top = posy+'px'
|
||||||
|
contextMenu.style.left = posx+'px'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
const contextMenuList = document.createElement('ul')
|
||||||
|
contextMenuList.className = 'context-menu-items';
|
||||||
|
contextMenu.append(contextMenuList);
|
||||||
|
|
||||||
|
menuEntries.forEach(function(entry){
|
||||||
|
let contextMenuEntry = document.createElement('a')
|
||||||
|
contextMenuEntry.innerHTML = entry['name']
|
||||||
|
contextMenuEntry.addEventListener("click", function(e) {
|
||||||
|
entry['func']();
|
||||||
|
})
|
||||||
|
contextMenuList.append(contextMenuEntry);
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
gradioApp().getRootNode().appendChild(contextMenu)
|
||||||
|
|
||||||
|
let menuWidth = contextMenu.offsetWidth + 4;
|
||||||
|
let menuHeight = contextMenu.offsetHeight + 4;
|
||||||
|
|
||||||
|
let windowWidth = window.innerWidth;
|
||||||
|
let windowHeight = window.innerHeight;
|
||||||
|
|
||||||
|
if ( (windowWidth - posx) < menuWidth ) {
|
||||||
|
contextMenu.style.left = windowWidth - menuWidth + "px";
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( (windowHeight - posy) < menuHeight ) {
|
||||||
|
contextMenu.style.top = windowHeight - menuHeight + "px";
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
function appendContextMenuOption(targetEmementSelector,entryName,entryFunction){
|
||||||
|
|
||||||
|
currentItems = menuSpecs.get(targetEmementSelector)
|
||||||
|
|
||||||
|
if(!currentItems){
|
||||||
|
currentItems = []
|
||||||
|
menuSpecs.set(targetEmementSelector,currentItems);
|
||||||
|
}
|
||||||
|
let newItem = {'id':targetEmementSelector+'_'+uid(),
|
||||||
|
'name':entryName,
|
||||||
|
'func':entryFunction,
|
||||||
|
'isNew':true}
|
||||||
|
|
||||||
|
currentItems.push(newItem)
|
||||||
|
return newItem['id']
|
||||||
|
}
|
||||||
|
|
||||||
|
function removeContextMenuOption(uid){
|
||||||
|
menuSpecs.forEach(function(v,k) {
|
||||||
|
let index = -1
|
||||||
|
v.forEach(function(e,ei){if(e['id']==uid){index=ei}})
|
||||||
|
if(index>=0){
|
||||||
|
v.splice(index, 1);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
function addContextMenuEventListener(){
|
||||||
|
if(eventListenerApplied){
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
gradioApp().addEventListener("click", function(e) {
|
||||||
|
let source = e.composedPath()[0]
|
||||||
|
if(source.id && source.indexOf('check_progress')>-1){
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
let oldMenu = gradioApp().querySelector('#context-menu')
|
||||||
|
if(oldMenu){
|
||||||
|
oldMenu.remove()
|
||||||
|
}
|
||||||
|
});
|
||||||
|
gradioApp().addEventListener("contextmenu", function(e) {
|
||||||
|
let oldMenu = gradioApp().querySelector('#context-menu')
|
||||||
|
if(oldMenu){
|
||||||
|
oldMenu.remove()
|
||||||
|
}
|
||||||
|
menuSpecs.forEach(function(v,k) {
|
||||||
|
if(e.composedPath()[0].matches(k)){
|
||||||
|
showContextMenu(e,e.composedPath()[0],v)
|
||||||
|
e.preventDefault()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
});
|
||||||
|
eventListenerApplied=true
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return [appendContextMenuOption, removeContextMenuOption, addContextMenuEventListener]
|
||||||
|
}
|
||||||
|
|
||||||
|
initResponse = contextMenuInit()
|
||||||
|
appendContextMenuOption = initResponse[0]
|
||||||
|
removeContextMenuOption = initResponse[1]
|
||||||
|
addContextMenuEventListener = initResponse[2]
|
||||||
|
|
||||||
|
|
||||||
|
//Start example Context Menu Items
|
||||||
|
generateOnRepeatId = appendContextMenuOption('#txt2img_generate','Generate forever',function(){
|
||||||
|
let genbutton = gradioApp().querySelector('#txt2img_generate');
|
||||||
|
let interruptbutton = gradioApp().querySelector('#txt2img_interrupt');
|
||||||
|
if(!interruptbutton.offsetParent){
|
||||||
|
genbutton.click();
|
||||||
|
}
|
||||||
|
clearInterval(window.generateOnRepeatInterval)
|
||||||
|
window.generateOnRepeatInterval = setInterval(function(){
|
||||||
|
if(!interruptbutton.offsetParent){
|
||||||
|
genbutton.click();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
500)}
|
||||||
|
)
|
||||||
|
|
||||||
|
cancelGenerateForever = function(){
|
||||||
|
clearInterval(window.generateOnRepeatInterval)
|
||||||
|
let interruptbutton = gradioApp().querySelector('#txt2img_interrupt');
|
||||||
|
if(interruptbutton.offsetParent){
|
||||||
|
interruptbutton.click();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever)
|
||||||
|
appendContextMenuOption('#txt2img_generate', 'Cancel generate forever',cancelGenerateForever)
|
||||||
|
|
||||||
|
|
||||||
|
appendContextMenuOption('#roll','Roll three',
|
||||||
|
function(){
|
||||||
|
let rollbutton = gradioApp().querySelector('#roll');
|
||||||
|
setTimeout(function(){rollbutton.click()},100)
|
||||||
|
setTimeout(function(){rollbutton.click()},200)
|
||||||
|
setTimeout(function(){rollbutton.click()},300)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
//End example Context Menu Items
|
||||||
|
|
||||||
|
onUiUpdate(function(){
|
||||||
|
addContextMenuEventListener()
|
||||||
|
});
|
||||||
@ -0,0 +1,73 @@
|
|||||||
|
import os.path
|
||||||
|
from concurrent.futures import ProcessPoolExecutor
|
||||||
|
from multiprocessing import get_context
|
||||||
|
|
||||||
|
|
||||||
|
def _load_tf_and_return_tags(pil_image, threshold):
|
||||||
|
import deepdanbooru as dd
|
||||||
|
import tensorflow as tf
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
this_folder = os.path.dirname(__file__)
|
||||||
|
model_path = os.path.abspath(os.path.join(this_folder, '..', 'models', 'deepbooru'))
|
||||||
|
if not os.path.exists(os.path.join(model_path, 'project.json')):
|
||||||
|
# there is no point importing these every time
|
||||||
|
import zipfile
|
||||||
|
from basicsr.utils.download_util import load_file_from_url
|
||||||
|
load_file_from_url(r"https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/deepdanbooru-v3-20211112-sgd-e28.zip",
|
||||||
|
model_path)
|
||||||
|
with zipfile.ZipFile(os.path.join(model_path, "deepdanbooru-v3-20211112-sgd-e28.zip"), "r") as zip_ref:
|
||||||
|
zip_ref.extractall(model_path)
|
||||||
|
os.remove(os.path.join(model_path, "deepdanbooru-v3-20211112-sgd-e28.zip"))
|
||||||
|
|
||||||
|
tags = dd.project.load_tags_from_project(model_path)
|
||||||
|
model = dd.project.load_model_from_project(
|
||||||
|
model_path, compile_model=True
|
||||||
|
)
|
||||||
|
|
||||||
|
width = model.input_shape[2]
|
||||||
|
height = model.input_shape[1]
|
||||||
|
image = np.array(pil_image)
|
||||||
|
image = tf.image.resize(
|
||||||
|
image,
|
||||||
|
size=(height, width),
|
||||||
|
method=tf.image.ResizeMethod.AREA,
|
||||||
|
preserve_aspect_ratio=True,
|
||||||
|
)
|
||||||
|
image = image.numpy() # EagerTensor to np.array
|
||||||
|
image = dd.image.transform_and_pad_image(image, width, height)
|
||||||
|
image = image / 255.0
|
||||||
|
image_shape = image.shape
|
||||||
|
image = image.reshape((1, image_shape[0], image_shape[1], image_shape[2]))
|
||||||
|
|
||||||
|
y = model.predict(image)[0]
|
||||||
|
|
||||||
|
result_dict = {}
|
||||||
|
|
||||||
|
for i, tag in enumerate(tags):
|
||||||
|
result_dict[tag] = y[i]
|
||||||
|
result_tags_out = []
|
||||||
|
result_tags_print = []
|
||||||
|
for tag in tags:
|
||||||
|
if result_dict[tag] >= threshold:
|
||||||
|
if tag.startswith("rating:"):
|
||||||
|
continue
|
||||||
|
result_tags_out.append(tag)
|
||||||
|
result_tags_print.append(f'{result_dict[tag]} {tag}')
|
||||||
|
|
||||||
|
print('\n'.join(sorted(result_tags_print, reverse=True)))
|
||||||
|
|
||||||
|
return ', '.join(result_tags_out).replace('_', ' ').replace(':', ' ')
|
||||||
|
|
||||||
|
|
||||||
|
def subprocess_init_no_cuda():
|
||||||
|
import os
|
||||||
|
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
||||||
|
|
||||||
|
|
||||||
|
def get_deepbooru_tags(pil_image, threshold=0.5):
|
||||||
|
context = get_context('spawn')
|
||||||
|
with ProcessPoolExecutor(initializer=subprocess_init_no_cuda, mp_context=context) as executor:
|
||||||
|
f = executor.submit(_load_tf_and_return_tags, pil_image, threshold, )
|
||||||
|
ret = f.result() # will rethrow any exceptions
|
||||||
|
return ret
|
||||||
@ -0,0 +1,98 @@
|
|||||||
|
import glob
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from ldm.util import default
|
||||||
|
from modules import devices, shared
|
||||||
|
import torch
|
||||||
|
from torch import einsum
|
||||||
|
from einops import rearrange, repeat
|
||||||
|
|
||||||
|
|
||||||
|
class HypernetworkModule(torch.nn.Module):
|
||||||
|
def __init__(self, dim, state_dict):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.linear1 = torch.nn.Linear(dim, dim * 2)
|
||||||
|
self.linear2 = torch.nn.Linear(dim * 2, dim)
|
||||||
|
|
||||||
|
self.load_state_dict(state_dict, strict=True)
|
||||||
|
self.to(devices.device)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
return x + (self.linear2(self.linear1(x)))
|
||||||
|
|
||||||
|
|
||||||
|
class Hypernetwork:
|
||||||
|
filename = None
|
||||||
|
name = None
|
||||||
|
|
||||||
|
def __init__(self, filename):
|
||||||
|
self.filename = filename
|
||||||
|
self.name = os.path.splitext(os.path.basename(filename))[0]
|
||||||
|
self.layers = {}
|
||||||
|
|
||||||
|
state_dict = torch.load(filename, map_location='cpu')
|
||||||
|
for size, sd in state_dict.items():
|
||||||
|
self.layers[size] = (HypernetworkModule(size, sd[0]), HypernetworkModule(size, sd[1]))
|
||||||
|
|
||||||
|
|
||||||
|
def list_hypernetworks(path):
|
||||||
|
res = {}
|
||||||
|
for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True):
|
||||||
|
name = os.path.splitext(os.path.basename(filename))[0]
|
||||||
|
res[name] = filename
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def load_hypernetwork(filename):
|
||||||
|
path = shared.hypernetworks.get(filename, None)
|
||||||
|
if path is not None:
|
||||||
|
print(f"Loading hypernetwork {filename}")
|
||||||
|
try:
|
||||||
|
shared.loaded_hypernetwork = Hypernetwork(path)
|
||||||
|
except Exception:
|
||||||
|
print(f"Error loading hypernetwork {path}", file=sys.stderr)
|
||||||
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
else:
|
||||||
|
if shared.loaded_hypernetwork is not None:
|
||||||
|
print(f"Unloading hypernetwork")
|
||||||
|
|
||||||
|
shared.loaded_hypernetwork = None
|
||||||
|
|
||||||
|
|
||||||
|
def attention_CrossAttention_forward(self, x, context=None, mask=None):
|
||||||
|
h = self.heads
|
||||||
|
|
||||||
|
q = self.to_q(x)
|
||||||
|
context = default(context, x)
|
||||||
|
|
||||||
|
hypernetwork = shared.loaded_hypernetwork
|
||||||
|
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
|
||||||
|
|
||||||
|
if hypernetwork_layers is not None:
|
||||||
|
k = self.to_k(hypernetwork_layers[0](context))
|
||||||
|
v = self.to_v(hypernetwork_layers[1](context))
|
||||||
|
else:
|
||||||
|
k = self.to_k(context)
|
||||||
|
v = self.to_v(context)
|
||||||
|
|
||||||
|
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
|
||||||
|
|
||||||
|
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
|
||||||
|
|
||||||
|
if mask is not None:
|
||||||
|
mask = rearrange(mask, 'b ... -> b (...)')
|
||||||
|
max_neg_value = -torch.finfo(sim.dtype).max
|
||||||
|
mask = repeat(mask, 'b j -> (b h) () j', h=h)
|
||||||
|
sim.masked_fill_(~mask, max_neg_value)
|
||||||
|
|
||||||
|
# attention, what we cannot get enough of
|
||||||
|
attn = sim.softmax(dim=-1)
|
||||||
|
|
||||||
|
out = einsum('b i j, b j d -> b i d', attn, v)
|
||||||
|
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
|
||||||
|
return self.to_out(out)
|
||||||
@ -0,0 +1,93 @@
|
|||||||
|
# this code is adapted from the script contributed by anon from /h/
|
||||||
|
|
||||||
|
import io
|
||||||
|
import pickle
|
||||||
|
import collections
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import numpy
|
||||||
|
import _codecs
|
||||||
|
import zipfile
|
||||||
|
|
||||||
|
|
||||||
|
# PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage
|
||||||
|
TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage
|
||||||
|
|
||||||
|
|
||||||
|
def encode(*args):
|
||||||
|
out = _codecs.encode(*args)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class RestrictedUnpickler(pickle.Unpickler):
|
||||||
|
def persistent_load(self, saved_id):
|
||||||
|
assert saved_id[0] == 'storage'
|
||||||
|
return TypedStorage()
|
||||||
|
|
||||||
|
def find_class(self, module, name):
|
||||||
|
if module == 'collections' and name == 'OrderedDict':
|
||||||
|
return getattr(collections, name)
|
||||||
|
if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']:
|
||||||
|
return getattr(torch._utils, name)
|
||||||
|
if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage']:
|
||||||
|
return getattr(torch, name)
|
||||||
|
if module == 'torch.nn.modules.container' and name in ['ParameterDict']:
|
||||||
|
return getattr(torch.nn.modules.container, name)
|
||||||
|
if module == 'numpy.core.multiarray' and name == 'scalar':
|
||||||
|
return numpy.core.multiarray.scalar
|
||||||
|
if module == 'numpy' and name == 'dtype':
|
||||||
|
return numpy.dtype
|
||||||
|
if module == '_codecs' and name == 'encode':
|
||||||
|
return encode
|
||||||
|
if module == "pytorch_lightning.callbacks" and name == 'model_checkpoint':
|
||||||
|
import pytorch_lightning.callbacks
|
||||||
|
return pytorch_lightning.callbacks.model_checkpoint
|
||||||
|
if module == "pytorch_lightning.callbacks.model_checkpoint" and name == 'ModelCheckpoint':
|
||||||
|
import pytorch_lightning.callbacks.model_checkpoint
|
||||||
|
return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint
|
||||||
|
if module == "__builtin__" and name == 'set':
|
||||||
|
return set
|
||||||
|
|
||||||
|
# Forbid everything else.
|
||||||
|
raise pickle.UnpicklingError(f"global '{module}/{name}' is forbidden")
|
||||||
|
|
||||||
|
|
||||||
|
def check_pt(filename):
|
||||||
|
try:
|
||||||
|
|
||||||
|
# new pytorch format is a zip file
|
||||||
|
with zipfile.ZipFile(filename) as z:
|
||||||
|
with z.open('archive/data.pkl') as file:
|
||||||
|
unpickler = RestrictedUnpickler(file)
|
||||||
|
unpickler.load()
|
||||||
|
|
||||||
|
except zipfile.BadZipfile:
|
||||||
|
|
||||||
|
# if it's not a zip file, it's an olf pytorch format, with five objects written to pickle
|
||||||
|
with open(filename, "rb") as file:
|
||||||
|
unpickler = RestrictedUnpickler(file)
|
||||||
|
for i in range(5):
|
||||||
|
unpickler.load()
|
||||||
|
|
||||||
|
|
||||||
|
def load(filename, *args, **kwargs):
|
||||||
|
from modules import shared
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not shared.cmd_opts.disable_safe_unpickle:
|
||||||
|
check_pt(filename)
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
|
||||||
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
print(f"\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr)
|
||||||
|
print(f"You can skip this check with --disable-safe-unpickle commandline argument.", file=sys.stderr)
|
||||||
|
return None
|
||||||
|
|
||||||
|
return unsafe_torch_load(filename, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
unsafe_torch_load = torch.load
|
||||||
|
torch.load = load
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 526 KiB After Width: | Height: | Size: 329 KiB |
Loading…
Reference in New Issue