From 96c78396ce1baf5e19c0618689005f93c7f42d79 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 7 May 2020 10:28:31 +0200 Subject: [PATCH] fix docstring reformer (#4190) --- src/transformers/configuration_reformer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/configuration_reformer.py b/src/transformers/configuration_reformer.py index 1a0a04777695..4e7227bfa06a 100644 --- a/src/transformers/configuration_reformer.py +++ b/src/transformers/configuration_reformer.py @@ -53,11 +53,11 @@ class ReformerConfig(PretrainedConfig): axial_pos_shape (:obj:`list(int)`, optional, defaults to `[64, 64]`): The position dims of the axial position encodings. During training the product of the position dims has to equal the sequence length. - For more information on how axial position embeddings work, see `Axial Position Encodings `__ncodings. + For more information on how axial position embeddings work, see `Axial Position Encodings `__. axial_pos_embds_dim (:obj:`list(int)`, optional, defaults to `[64, 192]`): The embedding dims of the axial position encodings. The sum of the embedding dims has to equal the hidden size. - For more information on how axial position embeddings work, see `Axial Position Encodings `__ncodings. + For more information on how axial position embeddings work, see `Axial Position Encodings `__. chunk_size_lm_head (:obj:`int`, optional, defaults to 0): The chunk size of the final language model feed forward head layer. A chunk size of 0 means that the feed forward layer is not chunked.