Skip to content

Commit

Permalink
Fix BitsAndBytes JSON Serializable (#191)
Browse files Browse the repository at this point in the history
* Update run_sft.py

* fix BitsAndBytes JSON serializable

* get_quantization_config to return dict

* to_dict() for load_in_8bit too

* convert quant test to use dict subscriptions instead of dot syntax

* Remove torch

---------

Co-authored-by: lewtun <[email protected]>
  • Loading branch information
deep-diver and lewtun authored Aug 20, 2024
1 parent a8dcde2 commit 27f7dbf
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 9 deletions.
4 changes: 2 additions & 2 deletions src/alignment/model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,11 @@ def get_quantization_config(model_args: ModelArguments) -> BitsAndBytesConfig |
bnb_4bit_quant_type=model_args.bnb_4bit_quant_type,
bnb_4bit_use_double_quant=model_args.use_bnb_nested_quant,
bnb_4bit_quant_storage=model_args.bnb_4bit_quant_storage,
)
).to_dict()
elif model_args.load_in_8bit:
quantization_config = BitsAndBytesConfig(
load_in_8bit=True,
)
).to_dict()
else:
quantization_config = None

Expand Down
12 changes: 5 additions & 7 deletions tests/test_model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@
# limitations under the License.
import unittest

import torch

from alignment import (
DataArguments,
ModelArguments,
Expand All @@ -31,15 +29,15 @@ class GetQuantizationConfigTest(unittest.TestCase):
def test_4bit(self):
model_args = ModelArguments(load_in_4bit=True)
quantization_config = get_quantization_config(model_args)
self.assertTrue(quantization_config.load_in_4bit)
self.assertEqual(quantization_config.bnb_4bit_compute_dtype, torch.float16)
self.assertEqual(quantization_config.bnb_4bit_quant_type, "nf4")
self.assertFalse(quantization_config.bnb_4bit_use_double_quant)
self.assertTrue(quantization_config["load_in_4bit"])
self.assertEqual(quantization_config["bnb_4bit_compute_dtype"], "float16")
self.assertEqual(quantization_config["bnb_4bit_quant_type"], "nf4")
self.assertFalse(quantization_config["bnb_4bit_use_double_quant"])

def test_8bit(self):
model_args = ModelArguments(load_in_8bit=True)
quantization_config = get_quantization_config(model_args)
self.assertTrue(quantization_config.load_in_8bit)
self.assertTrue(quantization_config["load_in_8bit"])

def test_no_quantization(self):
model_args = ModelArguments()
Expand Down

0 comments on commit 27f7dbf

Please sign in to comment.