Skip to content

Commit

Permalink
softmax is already stable, also start enforcing it to be always float32
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidrains committed Aug 7, 2022
1 parent 7212069 commit 1adb2ea
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 6 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
setup(
name = 'x-transformers',
packages = find_packages(exclude=['examples']),
version = '0.32.2',
version = '0.32.3',
license='MIT',
description = 'X-Transformers - Pytorch',
author = 'Phil Wang',
Expand Down
6 changes: 1 addition & 5 deletions x_transformers/x_transformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,6 @@ def l2norm(t, groups = 1):
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')

def stable_softmax(t, dim = -1):
t = t - t.amax(dim = dim, keepdim = True).detach()
return F.softmax(t, dim = dim)

# init helpers

def init_zero_(layer):
Expand Down Expand Up @@ -573,7 +569,7 @@ def __init__(
self.sparse_topk = sparse_topk

# entmax
self.attn_fn = entmax15 if use_entmax15 else stable_softmax
self.attn_fn = entmax15 if use_entmax15 else partial(F.softmax, dtype = torch.float32)

# add memory key / values
self.num_mem_kv = num_mem_kv
Expand Down

0 comments on commit 1adb2ea

Please sign in to comment.