Skip to content

Commit

Permalink
[SW-211857] add dynamo cache size limit option
Browse files Browse the repository at this point in the history
  • Loading branch information
chaojun-zhang committed Dec 17, 2024
1 parent f031d2d commit 25cb002
Showing 1 changed file with 10 additions and 0 deletions.
10 changes: 10 additions & 0 deletions optimum/habana/transformers/training_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,8 @@ class GaudiTrainingArguments(TrainingArguments):
Whether to use compiled autograd for training. Currently only for summarization models.
compile_dynamic (`bool|None`, *optional*, defaults to `None`):
Set value of 'dynamic' parameter for torch.compile.
cache_size_limit(`int`, *optional*, defaults to 'None'):
Set value of 'cache_size_limit' parameter for torch._dynamo.config
disable_tensor_cache_hpu_graphs (`bool`, *optional*, defaults to `False`):
Whether to disable tensor cache when using hpu graphs. If True, tensors won't be cached in hpu graph and memory can be saved.
max_hpu_graphs (`int`, *optional*):
Expand Down Expand Up @@ -170,6 +172,11 @@ class GaudiTrainingArguments(TrainingArguments):
metadata={"help": ("Set value of 'dynamic' parameter for torch.compile.")},
)

cache_size_limit: Optional[int] = field(
default=None,
metadata={"help": "Set value of 'cache_size_limit' parameter for torch._dynamo.config."},
)

disable_tensor_cache_hpu_graphs: Optional[bool] = field(
default=False,
metadata={"help": "Whether to use a tensor cache for hpu graphs."},
Expand Down Expand Up @@ -860,6 +867,9 @@ def _setup_devices(self) -> "torch.device":
if self.sdp_on_bf16:
torch._C._set_math_sdp_allow_fp16_bf16_reduction(True)

if self.torch_compile and self.cache_size_limit is not None:
torch._dynamo.config.cache_size_limit = self.cache_size_limit

logger.info("PyTorch: setting up devices")
if not is_accelerate_available():
raise ImportError(
Expand Down

0 comments on commit 25cb002

Please sign in to comment.