-
Notifications
You must be signed in to change notification settings - Fork 35
/
nuscenes_unet32_spherical_transformer.yaml
73 lines (68 loc) · 1.66 KB
/
nuscenes_unet32_spherical_transformer.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
DATA:
data_name: nuscenes
data_root: YOUR_DATA_ROOT # Fill in the data path
label_mapping: util/nuscenes.yaml
classes: 16
fea_dim: 6
voxel_size: [0.1, 0.1, 0.1]
voxel_max: 120000
TRAIN:
# arch
arch: unet_spherical_transformer
input_c: 4
m: 32
block_reps: 2
block_residual: True
layers: [32, 64, 128, 256, 256]
quant_size_scale: 24
patch_size: 1
window_size: 6
use_xyz: True
sync_bn: True # adopt sync_bn or not
rel_query: True
rel_key: True
rel_value: True
drop_path_rate: 0.3
max_batch_points: 250000
xyz_norm: False
window_size_sphere: [2, 2, 120]
window_size_scale: [2.0, 2.0]
sphere_layers: [1,2,3,4,5]
grad_checkpoint_layers: []
a: 0.0125
loss_name: ce_loss
use_tta: False
vote_num: 4
# training
aug: True
transformer_lr_scale: 0.1
scheduler_update: step
scheduler: Poly
power: 0.9
use_amp: True
train_gpu: [0,1,2,3]
workers: 16 # data loader workers
batch_size: 16 # batch size for training
batch_size_val: 8 # batch size for validation during training, memory and speed tradeoff
base_lr: 0.006
epochs: 50
start_epoch: 0
momentum: 0.9
weight_decay: 0.01
drop_rate: 0.5
ignore_label: 255
manual_seed: 123
print_freq: 10
save_freq: 1
save_path: runs/nuscenes_unet32_spherical_transformer
weight: # path to initial weight (default: none)
resume: # path to latest checkpoint (default: none)
evaluate: True # evaluate on validation set, extra gpu memory needed and small batch_size_val is recommend
eval_freq: 1
val: False
Distributed:
dist_url: tcp://127.0.0.1:6789
dist_backend: 'nccl'
multiprocessing_distributed: True
world_size: 1
rank: 0