|
|
|
|
@ -292,3 +292,18 @@ def cross_attention_attnblock_forward(self, x):
|
|
|
|
|
|
|
|
|
|
return h3
|
|
|
|
|
|
|
|
|
|
def xformers_attnblock_forward(self, x):
|
|
|
|
|
try:
|
|
|
|
|
h_ = x
|
|
|
|
|
h_ = self.norm(h_)
|
|
|
|
|
q = self.q(h_)
|
|
|
|
|
k = self.k(h_)
|
|
|
|
|
v = self.v(h_)
|
|
|
|
|
b, c, h, w = q.shape
|
|
|
|
|
q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
|
|
|
|
|
out = xformers.ops.memory_efficient_attention(q, k, v)
|
|
|
|
|
out = rearrange(out, 'b (h w) c -> b c h w', h=h)
|
|
|
|
|
out = self.proj_out(out)
|
|
|
|
|
return x + out
|
|
|
|
|
except NotImplementedError:
|
|
|
|
|
return cross_attention_attnblock_forward(self, x)
|
|
|
|
|
|