Skip to content

Commit

Permalink
[CI] Don't use eval_strategy="steps" when no eval dataset (huggingf…
Browse files Browse the repository at this point in the history
…ace#2152)

* `eval_strategy="steps" if eval_dataset else "no"`

* tmp skip test

* drop `eval_strategy` in `test_sft_trainer_uncorrect_data`

* remove eval strategy
  • Loading branch information
qgallouedec authored Oct 1, 2024
1 parent 0a566f0 commit 5c21de3
Show file tree
Hide file tree
Showing 4 changed files with 3 additions and 19 deletions.
2 changes: 1 addition & 1 deletion tests/test_bco_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def test_bco_trainer(self, name, pre_compute, eval_dataset, config_name):
max_steps=3,
gradient_accumulation_steps=1,
learning_rate=9e-1,
eval_strategy="steps",
eval_strategy="steps" if eval_dataset else "no",
beta=0.1,
precompute_ref_log_probs=pre_compute,
report_to="none",
Expand Down
2 changes: 1 addition & 1 deletion tests/test_dpo_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1049,7 +1049,7 @@ class DPOVisionTrainerTester(unittest.TestCase):
@parameterized.expand(
[
["trl-internal-testing/tiny-random-idefics2"],
["trl-internal-testing/tiny-random-paligemma"],
# ["trl-internal-testing/tiny-random-paligemma"], # temporarily disabled due to flaky tests
["trl-internal-testing/tiny-random-llava-1.5"],
]
)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_kto_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def test_kto_trainer(self, name, loss_type, pre_compute, eval_dataset):
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
eval_strategy="steps",
eval_strategy="steps" if eval_dataset else "no",
beta=0.1,
precompute_ref_log_probs=pre_compute,
loss_type=loss_type,
Expand Down
16 changes: 0 additions & 16 deletions tests/test_sft_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,6 @@ def test_sft_trainer_uncorrect_data(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
Expand All @@ -265,7 +264,6 @@ def test_sft_trainer_uncorrect_data(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
Expand All @@ -285,7 +283,6 @@ def test_sft_trainer_uncorrect_data(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
Expand All @@ -303,7 +300,6 @@ def test_sft_trainer_uncorrect_data(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
Expand All @@ -322,7 +318,6 @@ def test_sft_trainer_uncorrect_data(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
Expand All @@ -340,7 +335,6 @@ def test_sft_trainer_uncorrect_data(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
Expand All @@ -360,7 +354,6 @@ def test_sft_trainer_uncorrect_data(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
Expand All @@ -382,7 +375,6 @@ def test_sft_trainer_uncorrect_data(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
Expand All @@ -401,7 +393,6 @@ def test_sft_trainer_uncorrect_data(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
Expand Down Expand Up @@ -448,7 +439,6 @@ def test_sft_trainer_with_model_num_train_epochs(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
save_steps=1,
num_train_epochs=2,
Expand All @@ -475,7 +465,6 @@ def test_sft_trainer_with_model_num_train_epochs(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
save_steps=1,
num_train_epochs=2,
Expand Down Expand Up @@ -527,7 +516,6 @@ def test_sft_trainer_with_model(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
save_steps=1,
per_device_train_batch_size=2,
Expand All @@ -554,7 +542,6 @@ def test_sft_trainer_with_model(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
save_steps=1,
per_device_train_batch_size=2,
Expand All @@ -581,7 +568,6 @@ def test_sft_trainer_with_model(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
save_steps=1,
per_device_train_batch_size=2,
Expand All @@ -605,7 +591,6 @@ def test_sft_trainer_with_model(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=2,
save_steps=1,
per_device_train_batch_size=2,
Expand Down Expand Up @@ -1193,7 +1178,6 @@ def test_sft_trainer_skip_prepare_dataset_with_no_packing(self):
training_args = SFTConfig(
output_dir=tmp_dir,
dataloader_drop_last=True,
eval_strategy="steps",
max_steps=4,
eval_steps=2,
save_steps=2,
Expand Down

0 comments on commit 5c21de3

Please sign in to comment.