From be86e7ebefb21f694b190487b789f4e61132fc13 Mon Sep 17 00:00:00 2001 From: James Cross Date: Thu, 7 May 2020 18:34:46 -0700 Subject: [PATCH] fairseq transformer: enable decoder_output_dim (#2096) Summary: Pull Request resolved: https://github.com/pytorch/fairseq/pull/2096 No change to existing behavior. Allows the use of an extra learned linear projection (bottleneck layer) before the output projection. This structure was already supported in `TransformerDecoder` via args.decoder_output_dim, used in architectures such as `transformer_lm`, but this change surfaces a command-line option for the basic transformer architecture. Reviewed By: cndn Differential Revision: D21443249 fbshipit-source-id: cdf5806c97ce03a77befa14bc482c81c7b9c83a1 --- fairseq/models/transformer.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fairseq/models/transformer.py b/fairseq/models/transformer.py index 79e10cc41a..bb793d1d83 100644 --- a/fairseq/models/transformer.py +++ b/fairseq/models/transformer.py @@ -132,6 +132,9 @@ def add_args(parser): help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') + parser.add_argument('--decoder-output-dim', type=int, metavar='N', + help='decoder output dimension (extra linear layer ' + 'if different from decoder embed dim') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', @@ -924,6 +927,7 @@ def base_architecture(args): args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) + args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) @register_model_architecture("transformer", "transformer_iwslt_de_en")