-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathconfig.yaml
108 lines (94 loc) · 2.7 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# LSTM Related
TIME_HORIZON: 100
DATA_DIR: ./Data
BASE_LOG_DIR: ./training-log
MODEL_DIR: ./trained_models
# Experiment log
SKOPT_RESULT_BASE_DIR: './results/skopt-test-data'
MNIST_RESULT_BASE_DIR: './results/mnist'
BENCHMARK_RESULT_BASE_DIR: './results/benchmark-functions'
airfoil_optimization:
output_dir: './results/airfoil'
no_alpha: 10
alpha_range: [-5,5]
x_start : -1
lstm_model_2d: 'rnn-cell-2d-1498485717'
adjustable_y : [1,2,3,4,5]
y_input_space:
0: [-0.2,0.2]
1: [-0.2,0.2]
2: [-0.2,0.2]
3: [-0.2,0.2]
4: [-0.2,0.2]
5: [-0.2,0.2]
experiments:
1D:
np_training: 2000
no_testing: 2000
hyperparameters:
learning_rate_init: [0.005]
learning_rate_final: [0.0001]
n_hidden: [100]
loss_function: ['MIN','OI', 'SUM', 'WSUM_EXPO']
batch_size: [128]
n_steps: [20]
gradient_clipping: [1.0]
max_x_abs_value: [1.0]
starting_point:
- [-1]
2D:
no_training: 2000
no_testing : 2000
hyperparameters:
learning_rate_init: [0.005]
learning_rate_final: [0.0001]
n_hidden: [100]
loss_function: ['MIN']
batch_size: [128]
n_steps: [20]
gradient_clipping: [1.0]
max_x_abs_value: [1.0]
starting_point:
- [-1,-1]
3D:
np_training: 4000
no_testing: 2000
hyperparameters:
learning_rate_init: [0.005]
learning_rate_final: [0.0001]
n_hidden: [100]
loss_function: ['MIN','OI', 'SUM', 'WSUM_EXPO']
batch_size: [128]
n_steps: [20]
gradient_clipping: [1.0]
max_x_abs_value: [1.0]
starting_point:
- [-1,-1,-1]
4D:
np_training: 6000
no_testing: 2000
hyperparameters:
learning_rate_init: [0.005]
learning_rate_final: [0.0001]
n_hidden: [100]
loss_function: ['MIN']
batch_size: [128]
n_steps: [20]
gradient_clipping: [1.0]
max_x_abs_value: [1.0]
starting_point:
- [-1,-1,-1,-1]
6D:
np_training: 10000
no_testing: 2000
hyperparameters:
learning_rate_init: [0.005]
learning_rate_final: [0.0001]
n_hidden: [100]
loss_function: ['MIN']
batch_size: [128]
n_steps: [20]
gradient_clipping: [1.0]
max_x_abs_value: [1.0]
starting_point:
- [-1,-1,-1,-1,-1,-1]