Skip to content

Commit

Permalink
Merge pull request #67 from danmcp/batchsize
Browse files Browse the repository at this point in the history
Update batch size description and allow for str
  • Loading branch information
alinaryan authored Jul 15, 2024
2 parents 4d1ad40 + fd45b93 commit 5f88204
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions src/instructlab/eval/mmlu.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: Apache-2.0

# Standard
from typing import Optional
from typing import Optional, Union
import os

# Third Party
Expand Down Expand Up @@ -93,7 +93,7 @@ class AbstractMMLUEvaluator(Evaluator):
tasks list of tasks for MMLU to test the model with
model_dtype dtype of model when served
few_shots number of examples
batch_size number of GPUs
batch_size batch size for evaluation. Valid values are a positive integer or 'auto' to select the largest batch size that will fit in memory, or 'auto:N' to reselect the largest batch size N times'.
device PyTorch device (e.g. "cpu" or "cuda:0") for running models
"""

Expand All @@ -104,7 +104,7 @@ def __init__(
tasks: list[str],
model_dtype="bfloat16",
few_shots: int = 2,
batch_size: int = 5,
batch_size: Optional[Union[int, str]] = "auto",
device: str = ("cuda" if torch.cuda.is_available() else "cpu"),
) -> None:
self.model_path = model_path
Expand Down Expand Up @@ -170,7 +170,7 @@ class MMLUEvaluator(AbstractMMLUEvaluator):
tasks list of tasks for MMLU to test the model with
model_dtype dtype of model when served
few_shots number of examples
batch_size number of GPUs
batch_size batch size for evaluation. Valid values are a positive integer or 'auto' to select the largest batch size that will fit in memory, or 'auto:N' to reselect the largest batch size N times'.
device PyTorch device (e.g. "cpu" or "cuda:0") for running models
"""

Expand All @@ -182,7 +182,7 @@ def __init__(
tasks: list[str] = MMLU_TASKS,
model_dtype="bfloat16",
few_shots: int = 2,
batch_size: int = 5,
batch_size: Optional[Union[int, str]] = "auto",
device: str = ("cuda" if torch.cuda.is_available() else "cpu"),
) -> None:
super().__init__(
Expand Down Expand Up @@ -227,7 +227,7 @@ class MMLUBranchEvaluator(AbstractMMLUEvaluator):
tasks group name that is shared by all the MMLUBranch tasks
model_dtype dtype of model when served
few_shots number of examples
batch_size number of GPUs
batch_size batch size for evaluation. Valid values are a positive integer or 'auto' to select the largest batch size that will fit in memory, or 'auto:N' to reselect the largest batch size N times'.
device PyTorch device (e.g. "cpu" or "cuda:0") for running models
"""

Expand Down

0 comments on commit 5f88204

Please sign in to comment.