forked from OpenNMT/OpenNMT-py
-
Notifications
You must be signed in to change notification settings - Fork 0
/
java_small_nofeat_share.yml
70 lines (60 loc) · 1.56 KB
/
java_small_nofeat_share.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
data:
corpus_1:
path_src: ../synpos/data/java-small-processed/train_src.txt
path_tgt: ../synpos/data/java-seq2seq-data/data.TargetType.seq.train.target.txt
transforms: [filterfeats, onmt_tokenize, inferfeats] #, filtertoolong]
weight: 1
valid:
path_src: ../synpos/data/java-small-processed/val_src.txt
path_tgt: ../synpos/data/java-seq2seq-data/data.TargetType.seq.val.target.txt
transforms: [filterfeats, onmt_tokenize, inferfeats]
src_onmttok_kwargs: "{'mode': 'space'}"
tgt_onmttok_kwargs: "{'mode': 'space'}"
# # Vocab opts
src_vocab: java_small_transformer_sum/data_shared/data.vocab.src
tgt_vocab: java_small_transformer_sum/data_shared/data.vocab.src
save_data: java_small_data
overwrite: False
#save_model: java_small_transformer_nofeat_copy_share/model
save_checkpoint_steps: 1000
keep_checkpoint: 20
seed: 3435
train_steps: 500000
valid_steps: 1000
valid_batch_size: 8192
warmup_steps: 8000
report_every: 100
src_vocab_size: 2000
share_embeddings: True
share_vocab: True
copy_attn: True
# Uri:
#early_stopping: 10
#early_stopping_criteria: accuracy
decoder_type: transformer
encoder_type: transformer
word_vec_size: 512
rnn_size: 512
layers: 6
transformer_ff: 2048
heads: 8
accum_count: 8
optim: adam
adam_beta1: 0.9
adam_beta2: 0.998
decay_method: noam
learning_rate: 2.0
max_grad_norm: 0.0
batch_size: 4096
batch_type: tokens
normalization: tokens
dropout: 0.1
label_smoothing: 0.1
max_generator_batches: 2
param_init: 0.0
param_init_glorot: 'true'
position_encoding: 'true'
world_size: 2
gpu_ranks:
- 0
- 1