This repository was archived by the owner on Jul 7, 2023. It is now read-only.
File tree Expand file tree Collapse file tree 4 files changed +13
-9
lines changed Expand file tree Collapse file tree 4 files changed +13
-9
lines changed Original file line number Diff line number Diff line change @@ -180,7 +180,7 @@ python -c "from tensor2tensor.models.transformer import Transformer"
180180** Datasets** are all standardized on ` TFRecord ` files with ` tensorflow.Example `
181181protocol buffers. All datasets are registered and generated with the
182182[ data
183- generator] ( https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/bin/t2t-datagen )
183+ generator] ( https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/generator.py )
184184and many common sequence datasets are already available for generation and use.
185185
186186### Problems and Modalities
Original file line number Diff line number Diff line change 1- #!/usr/bin/env python
21# coding=utf-8
32# Copyright 2017 The Tensor2Tensor Authors.
43#
Original file line number Diff line number Diff line change @@ -56,12 +56,7 @@ def model_fn_body(self, features):
5656 (decoder_input , decoder_self_attention_bias ) = transformer_prepare_decoder (
5757 targets , hparams )
5858
59- def residual_fn (x , y ):
60- return common_layers .residual_fn (x , y ,
61- hparams .norm_type ,
62- hparams .residual_dropout ,
63- hparams .hidden_size ,
64- epsilon = hparams .layer_norm_epsilon )
59+ residual_fn = get_residual_fn (hparams )
6560
6661 encoder_input = tf .nn .dropout (encoder_input , 1.0 - hparams .residual_dropout )
6762 decoder_input = tf .nn .dropout (decoder_input , 1.0 - hparams .residual_dropout )
@@ -76,6 +71,17 @@ def residual_fn(x, y):
7671 return decoder_output
7772
7873
74+ def get_residual_fn (hparams ):
75+ """Get residual_fn."""
76+ def residual_fn (x , y ):
77+ return common_layers .residual_fn (x , y ,
78+ hparams .norm_type ,
79+ hparams .residual_dropout ,
80+ hparams .hidden_size ,
81+ epsilon = hparams .layer_norm_epsilon )
82+ return residual_fn
83+
84+
7985def transformer_prepare_encoder (inputs , target_space , hparams ):
8086 """Prepare one shard of the model for the encoder.
8187
Original file line number Diff line number Diff line change 1- #!/usr/bin/env python
21# coding=utf-8
32# Copyright 2017 The Tensor2Tensor Authors.
43#
You can’t perform that action at this time.
0 commit comments