diff --git a/keras_nlp/models/backbone_test.py b/keras_nlp/models/backbone_test.py index 807e158d2a..7bf430108c 100644 --- a/keras_nlp/models/backbone_test.py +++ b/keras_nlp/models/backbone_test.py @@ -44,4 +44,4 @@ def test_from_preset_errors(self): GPT2Backbone.from_preset("bert_tiny_en_uncased", load_weights=False) with self.assertRaises(FileNotFoundError): # No loading on a non-keras model. - Backbone.from_preset("hf://google/gemma-2b") + Backbone.from_preset("hf://google-bert/bert-base-uncased") diff --git a/keras_nlp/models/preprocessor_test.py b/keras_nlp/models/preprocessor_test.py index 99d11871fe..352bc7c37c 100644 --- a/keras_nlp/models/preprocessor_test.py +++ b/keras_nlp/models/preprocessor_test.py @@ -51,6 +51,6 @@ def test_from_preset_errors(self): BertPreprocessor.from_preset("gpt2_base_en") with self.assertRaises(FileNotFoundError): # No loading on a non-keras model. - Preprocessor.from_preset("hf://google/gemma-2b") + Preprocessor.from_preset("hf://google-bert/bert-base-uncased") # TODO: Add more tests when we added a model that has `preprocessor.json`. diff --git a/keras_nlp/models/task_test.py b/keras_nlp/models/task_test.py index 7160b536e4..a627f8fdf3 100644 --- a/keras_nlp/models/task_test.py +++ b/keras_nlp/models/task_test.py @@ -71,7 +71,7 @@ def test_from_preset_errors(self): BertClassifier.from_preset("gpt2_base_en", load_weights=False) with self.assertRaises(FileNotFoundError): # No loading on a non-keras model. - CausalLM.from_preset("hf://google/gemma-2b") + CausalLM.from_preset("hf://google-bert/bert-base-uncased") def test_summary_with_preprocessor(self): preprocessor = SimplePreprocessor() diff --git a/keras_nlp/tokenizers/tokenizer_test.py b/keras_nlp/tokenizers/tokenizer_test.py index aaa232393d..29f837e375 100644 --- a/keras_nlp/tokenizers/tokenizer_test.py +++ b/keras_nlp/tokenizers/tokenizer_test.py @@ -56,7 +56,7 @@ def test_from_preset_errors(self): GPT2Tokenizer.from_preset("bert_tiny_en_uncased") with self.assertRaises(FileNotFoundError): # No loading on a non-keras model. - Tokenizer.from_preset("hf://google/gemma-2b") + Tokenizer.from_preset("hf://google-bert/bert-base-uncased") def test_tokenize(self): input_data = ["the quick brown fox"]