diff --git a/.ci/docker/requirements.txt b/.ci/docker/requirements.txt index b7e3d935..8073a391 100644 --- a/.ci/docker/requirements.txt +++ b/.ci/docker/requirements.txt @@ -2,7 +2,6 @@ torchdata >= 0.8.0 datasets >= 2.21.0 tomli >= 1.1.0 ; python_version < "3.11" tensorboard -sentencepiece tiktoken blobfile tabulate diff --git a/README.md b/README.md index c87f939b..2468d4a0 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ # torchtitan -`torchtitan` is currently in a pre-release state and under extensive development. Currently we showcase pre-training **Llama 3.1**, **Llama 3**, and **Llama 2** LLMs of various sizes from scratch. To use the latest features of `torchtitan`, we recommend using the most recent PyTorch nightly. +`torchtitan` is currently in a pre-release state and under extensive development. Currently we showcase pre-training **Llama 3.1** LLMs of various sizes from scratch. To use the latest features of `torchtitan`, we recommend using the most recent PyTorch nightly. `torchtitan` is a proof-of-concept for Large-scale LLM training using native PyTorch. It is (and will continue to be) a repo to showcase PyTorch's latest distributed training features in a clean, minimal codebase. torchtitan is complementary to and not a replacement for any of the great large-scale LLM training codebases such as Megatron, Megablocks, LLM Foundry, Deepspeed, etc. Instead, we hope that the features showcased in torchtitan will be adopted by these codebases quickly. torchtitan is unlikely to ever grow a large community around it. @@ -43,7 +43,7 @@ You may want to see how the model is defined or how parallelism techniques are a * [torchtitan/parallelisms/pipeline_llama.py](torchtitan/parallelisms/pipeline_llama.py) - helpers for applying Pipeline Parallel to the model * [torchtitan/checkpoint.py](torchtitan/checkpoint.py) - utils for saving/loading distributed checkpoints * [torchtitan/float8.py](torchtitan/float8.py) - utils for applying Float8 techniques -* [torchtitan/models/llama/model.py](torchtitan/models/llama/model.py) - the Llama model definition (shared for Llama 2 and Llama 3 variants) +* [torchtitan/models/llama/model.py](torchtitan/models/llama/model.py) - the Llama 3.1 model definition ### Key features available @@ -78,18 +78,15 @@ pip3 install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu ### Downloading a tokenizer -`torchtitan` currently supports training Llama 3 (8B, 70B), and Llama 2 (7B, 13B, 70B) out of the box. To get started training these models, we need to download a tokenizer.model. Follow the instructions on the official [meta-llama](https://huggingface.co/meta-llama/Meta-Llama-3-8B) repository to ensure you have access to the Llama model weights. +`torchtitan` currently supports training Llama 3.1 (8B, 70B, 405B) out of the box. To get started training these models, we need to download a tokenizer.model. Follow the instructions on the official [meta-llama](https://huggingface.co/meta-llama/Llama-3.1-8B) repository to ensure you have access to the Llama model weights. -Once you have confirmed access, you can run the following command to download the Llama 3 / Llama 2 tokenizer to your local machine. +Once you have confirmed access, you can run the following command to download the Llama 3.1 tokenizer to your local machine. ```bash # Get your HF token from https://huggingface.co/settings/tokens -# Llama 3 or 3.1 tokenizer.model -python torchtitan/datasets/download_tokenizer.py --repo_id meta-llama/Meta-Llama-3-8B --tokenizer_path "original" --hf_token=... - -# Llama 2 tokenizer.model -python torchtitan/datasets/download_tokenizer.py --repo_id meta-llama/Llama-2-13b-hf --hf_token=... +# Llama 3.1 tokenizer.model +python torchtitan/datasets/download_tokenizer.py --repo_id meta-llama/Meta-Llama-3.1-8B --tokenizer_path "original" --hf_token=... ``` ### Start a training run diff --git a/assets/images/llama2_loss_curves.png b/assets/images/llama2_loss_curves.png deleted file mode 100644 index 576b45f7..00000000 Binary files a/assets/images/llama2_loss_curves.png and /dev/null differ diff --git a/docs/performance.md b/docs/performance.md index 93f6764b..39171815 100644 --- a/docs/performance.md +++ b/docs/performance.md @@ -1,4 +1,4 @@ -To demonstrate the effectiveness of PyTorch distributed training techniques used in torchtitan, we report both the infra metrics and loss curves of Llama 2 (13B and 70B) and Llama 3 (8B and 70B) training on 64 A100 (80GB memory) GPUs and Llama 3.1 (405B) on 128 H100 (94GB memory). +To demonstrate the effectiveness of PyTorch distributed training techniques used in torchtitan, we report both the infra metrics and loss curves of Llama 3 (8B and 70B) training on 64 A100 (80GB memory) GPUs and Llama 3.1 (405B) on 128 H100 (94GB memory). We report infra metrics achieved by [FSDP2](fsdp.md) (1D parallelism) under various configurations, and loss curves for both 1D parallelism (FSDP2) and 2D parallelism (FSDP2 + Tensor Parallel) training. (We only report 2D for 405B) @@ -34,30 +34,6 @@ Next we show the loss curves for Llama 3 8B and Llama 3 70B training with both 1 ![image](../assets/images/llama3_loss_curves.png) - -## Llama 2 performance numbers - -Below are the WPS and MFU results which torchtitan achieves on Llama 2 models with FSDP2 on 64 A100 (80GB) GPUs. - -| Model size | Batch size | Activation checkpointing | WPS | MFU | -| ----- | ----- | ----- | ----- | ----- | -| 13B | 2 | no | 2162 | 61.1% | -| 13B | 2 | selective layer | 1914 | 54.1% | -| 13B | 2 | selective op | 1904 | 53.8% | -| 70B | 1[^3] | selective op | 355 | 50.8% | -| 70B | 2 | full | 353 | 50.5% | - -We primarily use local batch size 2 (global batch size 128) in the experiments, to keep the same number of tokens per training iteration between Llama 2 and Llama 3 (since the default sequence length in Llama 2 is 4096 which is halved compared with Llama 3). In fact, for Llama 2 70B model with full activation checkpointing, the MFU can go up to 54% when local batch size is higher (but before an OOM happens). - -Next we show the loss curves for Llama 2 13B and Llama 2 70B training with both 1D parallelism (FSDP2) and 2D parallelism (FSDP2 + Tensor Parallel). All four models are trained 3000 steps with global batch size 128. -In terms of activation checkpointing (AC) configs, the Llama 2 13B training jobs use selective op AC, whereas the Llama 70B training jobs use full AC. The results are shown in the picture (a TensorBoard screenshot) below[^4]. - -![image](../assets/images/llama2_loss_curves.png) - [^1]: We used HBM2e based lower TDP SXM H100(95GB) for our test, the actual peak TFLOPs number is between SXM and NVL, and we don't know its exact value. So this MFU number is lower than actual MFU because we use the peak number of SXM directly. [^2]: Since for Float8, we are not converting all the matmuls to Float8 because our fused attention implementation is not done in Float8, so this number is lower than expected. - -[^3]: Since the 70B training with local batch size 2 will cause an OOM error when selective activation checkpointing is used, we report the local batch size 1 case instead. - -[^4]: One may have noticed that for both 13B and 70B training, 1D parallelism has slightly better convergence than 2D parallelism in the first half of training. We believe this is caused by the stronger shuffling effect introduced by having more FSDP ranks in the 1D parallelism, and the difference in convergence speed should go away after switching to a randomized data loading solution. diff --git a/multinode_trainer.slurm b/multinode_trainer.slurm index 4bc495d3..f506924b 100644 --- a/multinode_trainer.slurm +++ b/multinode_trainer.slurm @@ -53,7 +53,8 @@ export NCCL_SOCKET_IFNAME="eth0,en,eth,em,bond" export NCCL_BUFFSIZE=2097152 #export TORCH_DIST_INIT_BARRIER=1 export FI_EFA_SET_CUDA_SYNC_MEMOPS=0 -CONFIG_FILE=${CONFIG_FILE:-"./train_configs/llama2_13b.toml"} + +CONFIG_FILE=${CONFIG_FILE:-"./train_configs/llama3_8b.toml"} dcgmi profile --pause # adjust sbatch --ntasks and sbatch --nodes above and --nnodes below diff --git a/pyproject.toml b/pyproject.toml index a5c1b72f..16079266 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,6 @@ dependencies = [ # Tokenization "blobfile", - "sentencepiece", "tiktoken", # Miscellaneous diff --git a/torchtitan/datasets/tokenizer/__init__.py b/torchtitan/datasets/tokenizer/__init__.py index 543fc973..823faae8 100644 --- a/torchtitan/datasets/tokenizer/__init__.py +++ b/torchtitan/datasets/tokenizer/__init__.py @@ -4,7 +4,6 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -from torchtitan.datasets.tokenizer.sentencepiece import SentencePieceTokenizer from torchtitan.datasets.tokenizer.tiktoken import TikTokenizer from torchtitan.datasets.tokenizer.tokenizer import Tokenizer @@ -13,9 +12,7 @@ def build_tokenizer(tokenizer_type: str, tokenizer_path: str) -> Tokenizer: logger.info(f"Building {tokenizer_type} tokenizer locally from {tokenizer_path}") - if tokenizer_type == "sentencepiece": - return SentencePieceTokenizer(tokenizer_path) - elif tokenizer_type == "tiktoken": + if tokenizer_type == "tiktoken": return TikTokenizer(tokenizer_path) else: raise ValueError(f"Unknown tokenizer type: {tokenizer_type}") diff --git a/torchtitan/datasets/tokenizer/sentencepiece.py b/torchtitan/datasets/tokenizer/sentencepiece.py deleted file mode 100644 index c71afddd..00000000 --- a/torchtitan/datasets/tokenizer/sentencepiece.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. - -# copied and adjusted from https://github.com/facebookresearch/llama/blob/main/llama/tokenizer.py - -from typing import List - -from sentencepiece import SentencePieceProcessor - -from torchtitan.datasets.tokenizer.tokenizer import Tokenizer -from torchtitan.logging import logger - - -class SentencePieceTokenizer(Tokenizer): - """ - Tokenizing and encoding/decoding text based on a SentencePiece model. - - Args: - tokenizer_path (str): The path to the SentencePiece model file. - """ - - def __init__(self, tokenizer_path: str): - super().__init__(tokenizer_path) - # reload tokenizer - self.sp_model = SentencePieceProcessor(model_file=tokenizer_path) - - # BOS / EOS token IDs - self._n_words: int = self.sp_model.vocab_size() - self.bos_id: int = self.sp_model.bos_id() - self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.pad_id() - logger.info( - f"SentencePieceTokenizer built: #words {self.n_words}, BOS ID {self.bos_id}, EOS ID {self.eos_id}" - ) - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - def encode(self, s: str, bos: bool, eos: bool) -> List[int]: - """ - Encodes a string into a list of token IDs. - - Args: - s (str): The input string to be encoded. - bos (bool): Whether to prepend the beginning-of-sequence token. - eos (bool): Whether to append the end-of-sequence token. - - Returns: - List[int]: A list of token IDs. - """ - assert type(s) is str - t = self.sp_model.encode(s) - if bos: - t = [self.bos_id] + t - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - """ - Decodes a list of token IDs into a string. - - Args: - t (List[int]): The list of token IDs to be decoded. - - Returns: - str: The decoded string. - """ - return self.sp_model.decode(t) diff --git a/torchtitan/models/__init__.py b/torchtitan/models/__init__.py index c7bb16c6..c666b065 100644 --- a/torchtitan/models/__init__.py +++ b/torchtitan/models/__init__.py @@ -4,16 +4,14 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -from torchtitan.models.llama import llama2_configs, llama3_configs, Transformer +from torchtitan.models.llama import llama3_configs, Transformer models_config = { - "llama2": llama2_configs, "llama3": llama3_configs, } -model_name_to_cls = {"llama2": Transformer, "llama3": Transformer} +model_name_to_cls = {"llama3": Transformer} model_name_to_tokenizer = { - "llama2": "sentencepiece", "llama3": "tiktoken", } diff --git a/torchtitan/models/llama/__init__.py b/torchtitan/models/llama/__init__.py index 887a96cd..3bb430d2 100644 --- a/torchtitan/models/llama/__init__.py +++ b/torchtitan/models/llama/__init__.py @@ -4,30 +4,12 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # -# Llama 2 is licensed under the LLAMA 2 Community License, # Copyright (c) Meta Platforms, Inc. All Rights Reserved. from torchtitan.models.llama.model import ModelArgs, Transformer __all__ = ["Transformer"] -llama2_configs = { - "debugmodel": ModelArgs(dim=256, n_layers=8, n_heads=16), - "271M": ModelArgs(dim=1024, n_layers=16, n_heads=8), - "1B": ModelArgs(dim=2048, n_layers=18, n_heads=16), - "7B": ModelArgs(dim=4096, n_layers=32, n_heads=32), - "13B": ModelArgs(dim=5120, n_layers=40, n_heads=40), - "26B": ModelArgs(dim=5120, n_layers=80, n_heads=40), - "70B": ModelArgs( - dim=8192, - n_layers=80, - n_heads=64, - n_kv_heads=8, - ffn_dim_multiplier=1.3, - multiple_of=4096, - ), -} - llama3_configs = { "debugmodel": ModelArgs(dim=256, n_layers=8, n_heads=16, rope_theta=500000), "8B": ModelArgs( diff --git a/torchtitan/models/llama/model.py b/torchtitan/models/llama/model.py index a3bae18a..641ef6de 100644 --- a/torchtitan/models/llama/model.py +++ b/torchtitan/models/llama/model.py @@ -4,7 +4,6 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # -# Llama 2 is licensed under the LLAMA 2 Community License, # Copyright (c) Meta Platforms, Inc. All Rights Reserved. diff --git a/torchtitan/parallelisms/__init__.py b/torchtitan/parallelisms/__init__.py index b75cb336..f1f1d1fb 100644 --- a/torchtitan/parallelisms/__init__.py +++ b/torchtitan/parallelisms/__init__.py @@ -17,10 +17,8 @@ ] models_parallelize_fns = { - "llama2": parallelize_llama, "llama3": parallelize_llama, } models_pipelining_fns = { - "llama2": pipeline_llama, "llama3": pipeline_llama, } diff --git a/train_configs/llama2_13b.toml b/train_configs/llama2_13b.toml deleted file mode 100644 index b238ebad..00000000 --- a/train_configs/llama2_13b.toml +++ /dev/null @@ -1,58 +0,0 @@ -# torchtitan Config.toml -# NOTE: this toml config is a preset for 64 A100 GPUs. - -[job] -dump_folder = "./outputs" -description = "Llama2 13B training" - -[profiling] -enable_profiling = true -save_traces_folder = "profile_trace" -profile_freq = 100 - -[metrics] -log_freq = 10 -enable_tensorboard = true -save_tb_folder = "tb" - -[model] -name = "llama2" -flavor = "13B" -norm_type = "rmsnorm" # layernorm / np_layernorm / rmsnorm / fused_rmsnorm -tokenizer_path = "./torchtitan/datasets/tokenizer/tokenizer.model" - -[optimizer] -name = "AdamW" -lr = 3e-4 - -[training] -batch_size = 2 -seq_len = 4096 -warmup_steps = 200 # lr scheduler warm up, normally 20% of the train steps -max_norm = 1.0 # grad norm clipping -steps = 1000 -data_parallel_replicate_degree = 1 -data_parallel_shard_degree = -1 -tensor_parallel_degree = 1 -compile = false -dataset = "c4" - -[experimental] -context_parallel_degree = 1 -pipeline_parallel_degree = 1 - -[checkpoint] -enable_checkpoint = false -folder = "checkpoint" -interval_type = "steps" -interval = 500 -model_weights_only = false -export_dtype = "float32" -async_mode = "disabled" # ["disabled", "async", "async_with_pinned_mem"] - -[activation_checkpoint] -mode = 'selective' # ['none', 'selective', 'full'] -selective_ac_option = 'op' # 'int' = ac every positive int layer or 'op', ac based on ops policy - -[float8] -enable_float8_linear = false diff --git a/train_configs/llama2_70b.toml b/train_configs/llama2_70b.toml deleted file mode 100644 index 2764a57e..00000000 --- a/train_configs/llama2_70b.toml +++ /dev/null @@ -1,57 +0,0 @@ -# torchtitan Config.toml -# NOTE: this toml config is a preset for 64 A100 GPUs. - -[job] -dump_folder = "./outputs" -description = "Llama2 70B training" - -[profiling] -enable_profiling = true -save_traces_folder = "profile_trace" -profile_freq = 100 - -[metrics] -log_freq = 10 -enable_tensorboard = true -save_tb_folder = "tb" - -[model] -name = "llama2" -flavor = "70B" -norm_type = "rmsnorm" # layernorm / np_layernorm / rmsnorm / fused_rmsnorm -tokenizer_path = "./torchtitan/datasets/tokenizer/tokenizer.model" - -[optimizer] -name = "AdamW" -lr = 1.5e-4 - -[training] -batch_size = 16 -seq_len = 4096 -warmup_steps = 200 # lr scheduler warm up, normally 20% of the train steps -max_norm = 1.0 # grad norm clipping -steps = 1000 -data_parallel_replicate_degree = 1 -data_parallel_shard_degree = -1 -tensor_parallel_degree = 8 # 8-way TP -compile = false -dataset = "c4" - -[experimental] -context_parallel_degree = 1 -pipeline_parallel_degree = 1 - -[checkpoint] -enable_checkpoint = false -folder = "checkpoint" -interval_type = "steps" -interval = 500 -model_weights_only = false -export_dtype = "float32" -async_mode = "disabled" # ["disabled", "async", "async_with_pinned_mem"] - -[activation_checkpoint] -mode = 'full' # ['none', 'selective', 'full'] - -[float8] -enable_float8_linear = false diff --git a/train_configs/llama2_7b.toml b/train_configs/llama2_7b.toml deleted file mode 100644 index e64d8aa8..00000000 --- a/train_configs/llama2_7b.toml +++ /dev/null @@ -1,57 +0,0 @@ -# torchtitan Config.toml - -[job] -dump_folder = "./outputs" -description = "Llama2 7B training" - -[profiling] -enable_profiling = true -save_traces_folder = "profile_trace" -profile_freq = 100 - -[metrics] -log_freq = 10 -enable_tensorboard = true -save_tb_folder = "tb" - -[model] -name = "llama2" -flavor = "7B" -norm_type = "rmsnorm" # layernorm / np_layernorm / rmsnorm / fused_rmsnorm -tokenizer_path = "./torchtitan/datasets/tokenizer/tokenizer.model" - -[optimizer] -name = "AdamW" -lr = 3e-4 - -[training] -batch_size = 8 -seq_len = 2048 -warmup_steps = 200 # lr scheduler warm up, normally 20% of the train steps -max_norm = 1.0 # grad norm clipping -steps = 1000 -data_parallel_replicate_degree = 1 -data_parallel_shard_degree = -1 -tensor_parallel_degree = 1 # dp-only would be sufficient for 7B -compile = false -dataset = "c4" - -[experimental] -context_parallel_degree = 1 -pipeline_parallel_degree = 1 - -[checkpoint] -enable_checkpoint = false -folder = "checkpoint" -interval_type = "steps" -interval = 500 -model_weights_only = false -export_dtype = "float32" -async_mode = "disabled" # ["disabled", "async", "async_with_pinned_mem"] - -[activation_checkpoint] -mode = 'selective' # ['none', 'selective', 'full'] -selective_ac_option = 'op' # 'int' = ac every positive int layer or 'op', ac based on ops policy - -[float8] -enable_float8_linear = false