From aef65609e1ae2bf5d03c4a2827b5fea626b433a9 Mon Sep 17 00:00:00 2001 From: Andrii Staikov Date: Wed, 13 Nov 2024 18:44:39 +0100 Subject: [PATCH] skip cuda --- .github/workflows/job_pytorch_layer_tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/job_pytorch_layer_tests.yml b/.github/workflows/job_pytorch_layer_tests.yml index dbf0e1913c3db1..4eeed787d1678e 100644 --- a/.github/workflows/job_pytorch_layer_tests.yml +++ b/.github/workflows/job_pytorch_layer_tests.yml @@ -122,8 +122,8 @@ jobs: - name: Install flash_attn module run: | # due to flash_attn issues, it needs to be installed separately from other packages - # pip install flash_attn --no-build-isolation - pip install https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.0.post1/flash_attn-2.7.0.post1+cu12torch2.5cxx11abiTRUE-cp312-cp312-linux_x86_64.whl + export FLASH_ATTENTION_SKIP_CUDA_BUILD=TRUE + pip install flash_attn --no-build-isolation - name: PyTorch Layer Tests if: ${{ fromJSON(inputs.affected-components).PyTorch_FE.test && runner.arch != 'ARM64' }} # Ticket: 126287, 142196