-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfit_config.yaml
76 lines (76 loc) · 1.68 KB
/
fit_config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# lightning.pytorch==2.3.3
seed_everything: 0
trainer:
accelerator: auto
strategy: auto
devices: auto
num_nodes: 1
precision: null
# get wandb logger;
logger:
class_path: lightning.pytorch.loggers.WandbLogger
init_args:
project: "BERT-LoRA"
log_model: "all"
name: "lightning-studio-run"
callbacks:
- class_path: lightning.pytorch.callbacks.DeviceStatsMonitor
fast_dev_run: false
max_epochs: 12
min_epochs: null
max_steps: -1
min_steps: null
max_time: null
limit_train_batches: null
limit_val_batches: null
limit_test_batches: null
limit_predict_batches: null
overfit_batches: 0.0
val_check_interval: null
check_val_every_n_epoch: 2
num_sanity_val_steps: null
log_every_n_steps: 25
enable_checkpointing: null
enable_progress_bar: null
enable_model_summary: null
accumulate_grad_batches: 1
gradient_clip_val: null
gradient_clip_algorithm: null
deterministic: true
benchmark: null
inference_mode: true
use_distributed_sampler: true
profiler: null
detect_anomaly: false
barebones: false
plugins: null
sync_batchnorm: false
reload_dataloaders_every_n_epochs: 0
default_root_dir: null
model:
idx_to_label: [negative, positive]
lora_rank: 8
lora_alpha: 1
lr: 0.001
do_ffn: true
do_lora: true
data:
batch_size: 250
num_workers: 1
seed: 0
my_model_checkpoint:
dirpath: null
filename: null
monitor: validation/loss
verbose: false
save_last: null
save_top_k: 1
save_weights_only: false
mode: min
auto_insert_metric_name: true
every_n_train_steps: null
train_time_interval: null
every_n_epochs: 2
save_on_train_epoch_end: null
enable_version_counter: true
ckpt_path: null