Skip to content

Commit

Permalink
Update transformers version to 4.47.1 (openvinotoolkit#28348)
Browse files Browse the repository at this point in the history
Update transformers version to 4.47.1

Update the version of the transformers module to support latest models
in precommit testing like katuni4ka/tiny-random-nanollava

Adjust reference values for other models that were affected by the
changes in transformers.

Signed-off-by: Andrii Staikov <[email protected]>

Ticket:
*  CVS-157416
  • Loading branch information
CuriousPanCake authored Jan 17, 2025
1 parent 93b2567 commit c64aa94
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 26 deletions.
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
katuni4ka/tiny-random-llava-next,https://huggingface.co/katuni4ka/tiny-random-llava-next
katuni4ka/tiny-random-minicpmv-2_6,https://huggingface.co/katuni4ka/tiny-random-minicpmv-2_6
katuni4ka/tiny-random-llava,https://huggingface.co/katuni4ka/tiny-random-llava
katuni4ka/tiny-random-nanollava,https://huggingface.co/katuni4ka/tiny-random-nanollava,xfail,CVS-157416
katuni4ka/tiny-random-nanollava,https://huggingface.co/katuni4ka/tiny-random-nanollava
40 changes: 20 additions & 20 deletions tests/model_hub_tests/transformation_tests/sdpa2pa_ref_diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@
"hf-tiny-model-private/tiny-random-OPTForCausalLM" : {
"Assign" : -10,
"PagedAttentionExtension" : 5,
"Parameter" : 14,
"Parameter" : 13,
"ReadValue" : -10,
"ScaledDotProductAttention" : -5,
},
Expand Down Expand Up @@ -273,14 +273,14 @@
"facebook/opt-125m" : {
"Assign" : -24,
"PagedAttentionExtension" : 12,
"Parameter" : 28,
"Parameter" : 27,
"ReadValue" : -24,
"ScaledDotProductAttention" : -12,
},
"facebook/opt-350m" : {
"Assign" : -48,
"PagedAttentionExtension" : 24,
"Parameter" : 52,
"Parameter" : 51,
"ReadValue" : -48,
"ScaledDotProductAttention" : -24,
},
Expand Down Expand Up @@ -319,13 +319,13 @@
"ReadValue" : -4,
"ScaledDotProductAttention" : -2,
},
# "katuni4ka/tiny-random-nanollava" : {
# "Assign" : -4,
# "PagedAttentionExtension" : 2,
# "Parameter" : 7,
# "ReadValue" : -4,
# "ScaledDotProductAttention" : -2,
# },
"katuni4ka/tiny-random-nanollava" : {
"Assign" : -4,
"Parameter" : 7,
"ReadValue" : -4,
"ScaledDotProductAttention" : -2,
"PagedAttentionExtension" : 2,
},
"hf-internal-testing/tiny-random-GPTNeoForCausalLM" : {
"ScaledDotProductAttention" : -4,
"ReadValue" : -8,
Expand Down Expand Up @@ -465,7 +465,7 @@
"hf-tiny-model-private/tiny-random-OPTForCausalLM" : {
"Assign" : -10,
"PagedAttentionExtension" : 5,
"Parameter" : 29,
"Parameter" : 28,
"ReadValue" : -10,
"ScaledDotProductAttention" : -5,
},
Expand Down Expand Up @@ -605,14 +605,14 @@
"facebook/opt-125m" : {
"Assign" : -24,
"PagedAttentionExtension" : 12,
"Parameter" : 64,
"Parameter" : 63,
"ReadValue" : -24,
"ScaledDotProductAttention" : -12,
},
"facebook/opt-350m" : {
"Assign" : -48,
"PagedAttentionExtension" : 24,
"Parameter" : 124,
"Parameter" : 123,
"ReadValue" : -48,
"ScaledDotProductAttention" : -24,
},
Expand Down Expand Up @@ -651,13 +651,13 @@
"ReadValue" : -4,
"ScaledDotProductAttention" : -2,
},
# "katuni4ka/tiny-random-nanollava" : {
# "Assign" : -4,
# "PagedAttentionExtension" : 2,
# "Parameter" : 13,
# "ReadValue" : -4,
# "ScaledDotProductAttention" : -2,
# },
"katuni4ka/tiny-random-nanollava" : {
"Assign" : -4,
"PagedAttentionExtension" : 2,
"Parameter" : 13,
"ReadValue" : -4,
"ScaledDotProductAttention" : -2,
},

"hf-internal-testing/tiny-random-GPTNeoForCausalLM" : {
"ScaledDotProductAttention" : -4,
Expand Down
10 changes: 5 additions & 5 deletions tests/requirements_pytorch
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,8 @@ torchvision==0.20.1; platform_system != "Darwin" or platform_machine != "x86_64"
torchvision==0.17.2; platform_system == "Darwin" and platform_machine == "x86_64"
torchaudio==2.5.1; platform_system != "Darwin" or platform_machine != "x86_64"
torchaudio==2.2.2; platform_system == "Darwin" and platform_machine == "x86_64"
# transformers 4.45.1 is available
# but optimum still requires <4.45.0
transformers==4.44.2
# before updating transformers version, make sure no tests (esp. sdpa2pa) are failing
transformers==4.47.1
pytest==7.0.1; python_version < '3.10'
pytest==7.2.0; python_version >= '3.10'
pytest-html==4.1.1
Expand Down Expand Up @@ -44,8 +43,9 @@ super-image==0.1.7
# huggingface-hub required for super-image
huggingface-hub==0.25.2

# use latest released version once it's available
git+https://github.com/huggingface/optimum-intel.git@5c735487d4bd3dd8d7dccb242d8d5988e7dd4069; python_version < "3.12"
# For now, we decided to pin a specific working version of optimum-intel.
# It will be discussed in the future how to manage versioning of the components properly.
git+https://github.com/huggingface/optimum-intel.git@190ae8737db68a826a86e48a709b41ae51d2e3ee; python_version < "3.12"
# set 'export HF_HUB_ENABLE_HF_TRANSFER=1' to benefits from hf_transfer
hf_transfer==0.1.8

Expand Down

0 comments on commit c64aa94

Please sign in to comment.