-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpredict.slurm
111 lines (100 loc) · 3.12 KB
/
predict.slurm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#!/bin/sh
#SBATCH --account=boyer
#SBATCH --qos=boyer
#SBATCH --partition=gpu
#SBATCH --gpus=a100:7
#SBATCH -N 1 # nodes requested
#SBATCH -n 1 # tasks requested
#SBATCH -c 2 # cores requested
#SBATCH --mem=60gb # memory in Mb
#SBATCH -o out_cl # send stdout to outfile
#SBATCH -e err_cl # send stderr to errfile
#SBATCH -t 10:00:00 # time requested in hour:minute:second
# Load necessary modules
module load git
module load python/3.8
module load cuda/11.4.3
# Remove any existing old files
# rm -rf continual-learning-nlu
# rm out_cl
# rm err_cl
# rm venv
# Clone the GitHub repository
#git clone https://github.com/msamogh/continual-learning-nlu.git
cd continual-learning-nlu
# git pull
# Create and activate a virtual environment
python -m venv venv
source venv/bin/activate
# Create and activate a virtual environment
pip install -r requirements.txt
# Change directory
cd cl_domain/
# Random Generate dataset + Train + Evaluate
echo "Strategy = Random"
# PYTHONPATH=$PYTHONPATH:$(pwd)/.. python run.py \
# --mode predict \
# --cl_super_run_label random-flat-tee \
# --cl_checkpoint_dir ../cl_checkpoints \
# --cl_run_dir ../cl_runs \
# --results_dir ../cl_results \
# --ordering_strategy random \
# --num_train_epochs 18 \
# --num_runs 22 \
# --num_domains_per_run 5 \
# --cl_lr_schedule constant \
# --limit_n_samples 180 \
# --val_size_per_domain 0.01 \
# --cl_experience_replay_size 20 \
# --eval_batch_size 16 \
# --train_batch_size 16 \
# echo "Strategy = Max path"
# PYTHONPATH=$PYTHONPATH:$(pwd)/.. python run.py \
# --mode predict \
# --cl_super_run_label max_path-vicious-tree \
# --cl_checkpoint_dir ../cl_checkpoints \
# --cl_run_dir ../cl_runs \
# --results_dir ../cl_results \
# --ordering_strategy max_path \
# --num_train_epochs 5 \
# --num_runs 22 \
# --num_domains_per_run 5 \
# --cl_lr_schedule constant \
# --limit_n_samples 180 \
# --val_size_per_domain 0.01 \
# --cl_experience_replay_size 20 \
# --eval_batch_size 16 \
# --train_batch_size 16 \
echo "Strategy = Min path"
PYTHONPATH=$PYTHONPATH:$(pwd)/.. python run.py \
--mode predict \
--cl_super_run_label min_path-tahoe-word \
--cl_checkpoint_dir ../cl_checkpoints \
--cl_run_dir ../cl_runs \
--results_dir ../cl_results \
--ordering_strategy min_path \
--num_train_epochs 5 \
--num_runs 22 \
--num_domains_per_run 5 \
--cl_lr_schedule constant \
--limit_n_samples 180 \
--val_size_per_domain 0.01 \
--cl_experience_replay_size 20 \
--eval_batch_size 16 \
--train_batch_size 16 \
PYTHONPATH=$PYTHONPATH:$(pwd)/.. python run.py \
--mode predict \
--cl_super_run_label max_path-vicious-tree \
--cl_checkpoint_dir ../cl_checkpoints \
--cl_run_dir ../cl_runs \
--results_dir ../cl_results \
--ordering_strategy min_path \
--num_train_epochs 5 \
--num_runs 22 \
--num_domains_per_run 5 \
--cl_lr_schedule constant \
--limit_n_samples 180 \
--val_size_per_domain 0.01 \
--cl_experience_replay_size 20 \
--eval_batch_size 16 \
--train_batch_size 16 \