diff --git a/.github/workflows/agreements.yaml b/.github/workflows/agreements.yaml index 90650052..dd0dbc18 100644 --- a/.github/workflows/agreements.yaml +++ b/.github/workflows/agreements.yaml @@ -7,7 +7,7 @@ on: jobs: call-workflow-agreements: - uses: splunk/addonfactory-github-workflows/.github/workflows/reusable-agreements.yaml@v1.3 + uses: splunk/addonfactory-github-workflows/.github/workflows/reusable-agreements.yaml@v1 permissions: actions: read contents: read diff --git a/.github/workflows/build-test-release.yaml b/.github/workflows/build-test-release.yaml index 961b5b64..49b37dcf 100644 --- a/.github/workflows/build-test-release.yaml +++ b/.github/workflows/build-test-release.yaml @@ -1,5 +1,3 @@ -name: CI - on: workflow_dispatch: push: @@ -10,20 +8,19 @@ on: - "v[0-9]+.[0-9]+.[0-9]+" pull_request: branches: [main, develop] + permissions: contents: write packages: read pull-requests: read statuses: write + jobs: compliance-copyrights: - name: Compliance Copyright Headers runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Check License Header - uses: apache/skywalking-eyes@main + - uses: actions/checkout@v3 + - uses: apache/skywalking-eyes@v0.4.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -34,13 +31,11 @@ jobs: - uses: actions/setup-python@v4 with: python-version: "3.7" - - name: Install actionlint - run: | + - run: | bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/v1.6.24/scripts/download-actionlint.bash) - uses: pre-commit/action@v3.0.0 publish: - name: Build Release needs: - compliance-copyrights - pre-commit @@ -61,9 +56,10 @@ jobs: passphrase: ${{ secrets.SA_GPG_PASSPHRASE }} update-semver: - name: Move Respository semver tags if: startsWith(github.ref, 'refs/tags/v') needs: publish + permissions: + contents: write runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/reusable-build-test-release.yml b/.github/workflows/reusable-build-test-release.yml index 1cb64e1d..1b38311b 100644 --- a/.github/workflows/reusable-build-test-release.yml +++ b/.github/workflows/reusable-build-test-release.yml @@ -183,6 +183,26 @@ jobs: echo "$test_type""_labeled=${EXECUTE_LABELED["$test_type"]}" >> "$GITHUB_OUTPUT" echo "$test_type""_labeled: ${EXECUTE_LABELED["$test_type"]}" done + + validate-pr-title: + name: Validate PR title + needs: + - setup-workflow + if: ${{ needs.setup-workflow.outputs.skip-workflow != 'Yes' && github.event_name == 'pull_request' }} + runs-on: ubuntu-latest + permissions: + contents: read + packages: read + pull-requests: read + statuses: write + steps: + - uses: amannn/action-semantic-pull-request@v5.0.2 + with: + wip: true + validateSingleCommit: true + env: + GITHUB_TOKEN: ${{ github.token }} + meta: runs-on: ubuntu-latest needs: @@ -223,7 +243,7 @@ jobs: GITHUB_TOKEN: ${{ github.token }} - name: Docker meta id: docker_action_meta - uses: docker/metadata-action@v4.1.1 + uses: docker/metadata-action@v4.6.0 with: images: ghcr.io/${{ github.repository }}/container tags: | @@ -336,11 +356,91 @@ jobs: with: publishToken: ${{ secrets.SEMGREP_PUBLISH_TOKEN }} + test-inventory: + runs-on: ubuntu-latest + needs: setup-workflow + if: ${{ needs.setup-workflow.outputs.skip-workflow != 'Yes' }} + # Map a step output to a job output + outputs: + unit: ${{ steps.testset.outputs.unit }} + knowledge: ${{ steps.testset.outputs.knowledge }} + ui: ${{ steps.testset.outputs.ui }} + modinput_functional: ${{ steps.testset.outputs.modinput_functional }} + requirement_test: ${{ steps.testset.outputs.requirement_test }} + scripted_inputs: ${{ steps.testset.outputs.scripted_inputs }} + escu: ${{ steps.testset.outputs.escu }} + steps: + - uses: actions/checkout@v3 + - id: testset + name: testsets + run: | + find tests -type d -maxdepth 1 -mindepth 1 | sed 's|^tests/||g' | while read -r TESTSET; do echo "$TESTSET=true" >> "$GITHUB_OUTPUT"; echo "$TESTSET::true"; done + + run-unit-tests: + name: test-unit-python3-${{ matrix.python-version }} + if: ${{ needs.test-inventory.outputs.unit == 'true' }} + runs-on: ubuntu-latest + needs: + - test-inventory + strategy: + fail-fast: false + matrix: + python-version: + - "3.7" + permissions: + actions: read + deployments: read + contents: read + packages: read + statuses: read + checks: write + steps: + - uses: actions/checkout@v3 + - name: Setup python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Setup addon + run: | + if [ -f "poetry.lock" ] + then + mkdir -p package/lib || true + pip install poetry==1.2.2 poetry-plugin-export==1.2.0 + poetry export --without-hashes -o package/lib/requirements.txt + poetry export --without-hashes --dev -o requirements_dev.txt + fi + if [ ! -f requirements_dev.txt ]; then echo no requirements;exit 0 ;fi + pip install -r requirements_dev.txt + - name: Create directories + run: | + mkdir -p /opt/splunk/var/log/splunk + chmod -R 777 /opt/splunk/var/log/splunk + - name: Copy pytest ini + run: cp tests/unit/pytest-ci.ini pytest.ini + - name: Run Pytest with coverage + run: pytest --cov=./ --cov-report=xml --junitxml=test-results/junit.xml tests/unit + - uses: actions/upload-artifact@v3 + if: success() || failure() + with: + name: test-results-unit-python_${{ matrix.python-version }} + path: test-results/* + build: name: build runs-on: ubuntu-latest needs: + - setup-workflow + - test-inventory + - meta + - compliance-copyrights + - lint + - review_secrets + - semgrep + - run-unit-tests - fossa-scan + if: | + always() && + (needs.run-unit-tests.result == 'success' || needs.run-unit-tests.result == 'skipped') outputs: buildname: ${{ steps.buildupload.outputs.name }} permissions: @@ -435,11 +535,6 @@ jobs: uses: splunk/addonfactory-packaging-toolkit-action@v1 with: source: ${{ steps.uccgen.outputs.OUTPUT }} - - name: artifact-splunk-unpacked - uses: actions/upload-artifact@v3 - with: - name: package-raw - path: ${{ steps.uccgen.outputs.OUTPUT }}** if: always() - name: artifact-splunk-base uses: actions/upload-artifact@v3 @@ -468,6 +563,9 @@ jobs: continue-on-error: true name: security-virustotal needs: build + if: | + always() && + needs.build.result == 'success' runs-on: ubuntu-latest steps: - uses: actions/download-artifact@v3 @@ -481,86 +579,15 @@ jobs: files: | build/package/* - test-inventory: - runs-on: ubuntu-latest - needs: setup-workflow - if: ${{ needs.setup-workflow.outputs.skip-workflow != 'Yes' }} - # Map a step output to a job output - outputs: - unit: ${{ steps.testset.outputs.unit }} - knowledge: ${{ steps.testset.outputs.knowledge }} - ui: ${{ steps.testset.outputs.ui }} - modinput_functional: ${{ steps.testset.outputs.modinput_functional }} - requirement_test: ${{ steps.testset.outputs.requirement_test }} - scripted_inputs: ${{ steps.testset.outputs.scripted_inputs }} - escu: ${{ steps.testset.outputs.escu }} - steps: - - uses: actions/checkout@v3 - - id: testset - name: testsets - run: | - find tests -type d -maxdepth 1 -mindepth 1 | sed 's|^tests/||g' | while read -r TESTSET; do echo "$TESTSET=true" >> "$GITHUB_OUTPUT"; echo "$TESTSET::true"; done - - run-unit-tests: - name: test-unit-python3-${{ matrix.python-version }} - if: ${{ needs.test-inventory.outputs.unit == 'true' }} - runs-on: ubuntu-latest - needs: - - build - - test-inventory - strategy: - fail-fast: false - matrix: - python-version: - - "3.7" - permissions: - actions: read - deployments: read - contents: read - packages: read - statuses: read - checks: write - steps: - - uses: actions/checkout@v3 - - name: Setup python - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - uses: actions/download-artifact@v3 - with: - name: package-raw - path: output - - name: Setup addon - run: | - if [ -f "poetry.lock" ] - then - mkdir -p package/lib || true - pip install poetry==1.2.2 poetry-plugin-export==1.2.0 - poetry export --without-hashes -o package/lib/requirements.txt - poetry export --without-hashes --dev -o requirements_dev.txt - fi - if [ ! -f requirements_dev.txt ]; then echo no requirements;exit 0 ;fi - pip install -r requirements_dev.txt - - name: Create directories - run: | - mkdir -p /opt/splunk/var/log/splunk - chmod -R 777 /opt/splunk/var/log/splunk - - name: Copy pytest ini - run: cp tests/unit/pytest-ci.ini pytest.ini - - name: Run Pytest with coverage - run: pytest --cov=./ --cov-report=xml --junitxml=test-results/junit.xml tests/unit - - uses: actions/upload-artifact@v3 # upload test results - if: success() || failure() # run this step even if previous step failed - with: - name: test-results-unit-python_${{ matrix.python-version }} - path: test-results/* - run-requirements-unit-tests: - if: ${{ needs.test-inventory.outputs.requirement_test == 'true' }} runs-on: ubuntu-latest needs: - build - test-inventory + if: | + always() && + needs.build.result == 'success' && + needs.test-inventory.outputs.requirement_test == 'true' permissions: actions: read deployments: read @@ -589,6 +616,9 @@ jobs: appinspect: name: quality-appinspect-${{ matrix.tags }} needs: build + if: | + always() && + needs.build.result == 'success' runs-on: ubuntu-latest strategy: fail-fast: false @@ -609,7 +639,7 @@ jobs: name: package-splunkbase path: build/package/ - name: Scan - uses: splunk/appinspect-cli-action@v1.6 + uses: splunk/appinspect-cli-action@v1.9 with: app_path: build/package/ included_tags: ${{ matrix.tags }} @@ -633,6 +663,10 @@ jobs: needs: - security-virustotal - meta + if: | + always() && + needs.security-virustotal.result == 'success' && + needs.meta.result == 'success' outputs: artifact: ${{ steps.artifactid.outputs.result }} permissions: @@ -658,14 +692,14 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Login to GitHub Packages Docker Registry - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v2.2.0 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ github.token }} - name: Docker meta id: meta - uses: docker/metadata-action@v4.1.1 + uses: docker/metadata-action@v4.6.0 with: images: ghcr.io/${{ github.repository }} tags: | @@ -707,6 +741,9 @@ jobs: needs: - build - test-inventory + if: | + always() && + needs.build.result == 'success' runs-on: ubuntu-latest container: image: ghcr.io/splunk/workflow-engine-base:2.0.3 @@ -764,7 +801,11 @@ jobs: } >> "$GITHUB_OUTPUT" run-knowledge-tests: - if: ${{ needs.test-inventory.outputs.knowledge == 'true' && (needs.setup-workflow.outputs.execute-ko == 'Yes' || needs.setup-workflow.outputs.execute-labeled-knowledge == 'true') }} + if: | + always() && + needs.build.result == 'success' && + needs.test-inventory.outputs.knowledge == 'true' && + (needs.setup-workflow.outputs.execute-ko == 'Yes' || needs.setup-workflow.outputs.execute-labeled-knowledge == 'true') needs: - build - test-inventory @@ -800,7 +841,7 @@ jobs: with: submodules: recursive - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1-node16 + uses: aws-actions/configure-aws-credentials@v2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} @@ -971,7 +1012,11 @@ jobs: ${{ needs.setup.outputs.directory-path }}/diag* run-requirement-tests: - if: ${{ needs.test-inventory.outputs.requirement_test == 'true' && (needs.setup-workflow.outputs.execute-requirement_test == 'Yes' || needs.setup-workflow.outputs.execute-labeled-requirement == 'true') }} + if: | + always() && + needs.build.result == 'success' && + needs.test-inventory.outputs.requirement_test == 'true' && + (needs.setup-workflow.outputs.execute-requirement_test == 'Yes' || needs.setup-workflow.outputs.execute-labeled-requirement == 'true') needs: - build - test-inventory @@ -1006,7 +1051,7 @@ jobs: with: submodules: recursive - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} @@ -1157,7 +1202,11 @@ jobs: ${{ needs.setup.outputs.directory-path }}/diag* run-ui-tests: - if: ${{ needs.test-inventory.outputs.ui == 'true' && (needs.setup-workflow.outputs.execute-ui == 'Yes' || needs.setup-workflow.outputs.execute-labeled-ui == 'true') }} + if: | + always() && + needs.build.result == 'success' && + needs.test-inventory.outputs.ui == 'true' && + (needs.setup-workflow.outputs.execute-ui == 'Yes' || needs.setup-workflow.outputs.execute-labeled-ui == 'true') needs: - build - test-inventory @@ -1194,7 +1243,7 @@ jobs: with: submodules: recursive - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1-node16 + uses: aws-actions/configure-aws-credentials@v2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} @@ -1351,7 +1400,11 @@ jobs: ${{ needs.setup.outputs.directory-path }}/diag* run-modinput-tests: - if: ${{ needs.test-inventory.outputs.modinput_functional == 'true' && (needs.setup-workflow.outputs.execute-modinput_functional == 'Yes' || needs.setup-workflow.outputs.execute-labeled-modinput == 'true') }} + if: | + always() && + needs.build.result == 'success' && + needs.test-inventory.outputs.modinput_functional == 'true' && + (needs.setup-workflow.outputs.execute-modinput_functional == 'Yes' || needs.setup-workflow.outputs.execute-labeled-modinput == 'true') needs: - build - test-inventory @@ -1389,7 +1442,7 @@ jobs: with: submodules: recursive - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1-node16 + uses: aws-actions/configure-aws-credentials@v2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} @@ -1558,7 +1611,11 @@ jobs: ${{ needs.setup.outputs.directory-path }}/diag* run-scripted-input-tests-full-matrix: - if: ${{ needs.test-inventory.outputs.scripted_inputs == 'true' && ( github.base_ref == 'main' || github.ref_name == 'main' ) && (needs.setup-workflow.outputs.execute-scripted_inputs == 'Yes' || needs.setup-workflow.outputs.execute-labeled-scripted_inputs == 'true') }} + if: | + always() && + needs.build.result == 'success' && + needs.test-inventory.outputs.scripted_inputs == 'true' && + ( github.base_ref == 'main' || github.ref_name == 'main' ) && (needs.setup-workflow.outputs.execute-scripted_inputs == 'Yes' || needs.setup-workflow.outputs.execute-labeled-scripted_inputs == 'true') needs: - build - test-inventory @@ -1593,7 +1650,7 @@ jobs: with: submodules: recursive - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1-node16 + uses: aws-actions/configure-aws-credentials@v2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} @@ -1759,7 +1816,11 @@ jobs: ${{ needs.setup.outputs.directory-path }}/diag* run-scripted-input-tests-canary: - if: ${{ needs.test-inventory.outputs.scripted_inputs == 'true' && ( github.base_ref == 'develop' || github.ref_name == 'develop' ) && (needs.setup-workflow.outputs.execute-scripted_inputs == 'Yes' || needs.setup-workflow.outputs.execute-labeled-scripted_inputs == 'true') }} + if: | + always() && + needs.build.result == 'success' && + needs.test-inventory.outputs.scripted_inputs == 'true' && + ( github.base_ref == 'develop' || github.ref_name == 'develop' ) && (needs.setup-workflow.outputs.execute-scripted_inputs == 'Yes' || needs.setup-workflow.outputs.execute-labeled-scripted_inputs == 'true') needs: - build - test-inventory @@ -1794,7 +1855,7 @@ jobs: with: submodules: recursive - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1-node16 + uses: aws-actions/configure-aws-credentials@v2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} @@ -1959,7 +2020,11 @@ jobs: ${{ needs.setup.outputs.directory-path }}/diag* run-escu-tests: - if: ${{ needs.test-inventory.outputs.escu == 'true' && ( github.base_ref == 'main' || github.ref_name == 'main' || github.base_ref == 'develop' || github.ref_name == 'develop' ) && (needs.setup-workflow.outputs.execute-escu == 'Yes' || needs.setup-workflow.outputs.execute-labeled-escu == 'true') }} + if: | + always() && + needs.build.result == 'success' && + needs.test-inventory.outputs.escu == 'true' && + ( github.base_ref == 'main' || github.ref_name == 'main' || github.base_ref == 'develop' || github.ref_name == 'develop' ) && (needs.setup-workflow.outputs.execute-escu == 'Yes' || needs.setup-workflow.outputs.execute-labeled-escu == 'true') needs: - build - test-inventory @@ -1993,7 +2058,7 @@ jobs: with: submodules: recursive - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1-node16 + uses: aws-actions/configure-aws-credentials@v2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} @@ -2168,25 +2233,6 @@ jobs: path: | ${{ needs.setup.outputs.directory-path }}/diag* - validate-pr-title: - name: Validate PR title - needs: - - setup-workflow - if: ${{ needs.setup-workflow.outputs.skip-workflow != 'Yes' && github.event_name == 'pull_request' }} - runs-on: ubuntu-latest - permissions: - contents: read - packages: read - pull-requests: read - statuses: write - steps: - - uses: amannn/action-semantic-pull-request@v5.0.2 - with: - wip: true - validateSingleCommit: true - env: - GITHUB_TOKEN: ${{ github.token }} - pre-publish: if: always() needs: diff --git a/.github/workflows/reusable-escu-manual-workflow.yml b/.github/workflows/reusable-escu-manual-workflow.yml deleted file mode 100644 index a15d2b10..00000000 --- a/.github/workflows/reusable-escu-manual-workflow.yml +++ /dev/null @@ -1,258 +0,0 @@ -name: escu-manual-workflow -on: - workflow_call: - inputs: - TA_BUILD: - description: 'TA build number (e.g. s3://ta-production-artifacts/ta-apps/{ta-name}-$build_number$.spl)' - required: true - type: string - TESTS: - description: 'Comma-Separated List of detections to run (e.g. detection1,detection2,detection3)' - required: true - type: string - secrets: - AWS_ACCESS_KEY_ID: - description: AWS access key id - required: true - AWS_DEFAULT_REGION: - description: AWS default region - required: true - AWS_SECRET_ACCESS_KEY: - description: AWS secret access key - required: true - OTHER_TA_REQUIRED_CONFIGS: - description: other required configs - required: true - -jobs: - meta: - runs-on: ubuntu-latest - outputs: - matrix_supportedSplunk: ${{ steps.matrix.outputs.latestSplunk }} - steps: - - name: Checkout - uses: actions/checkout@v3 - with: - submodules: false - persist-credentials: false - - name: matrix - id: matrix - uses: splunk/addonfactory-test-matrix-action@v1.9 - - test-inventory: - runs-on: ubuntu-latest - outputs: - escu: ${{ steps.testset.outputs.escu }} - steps: - - uses: actions/checkout@v3 - - id: testset - name: testsets - run: | - find tests -type d -maxdepth 1 -mindepth 1 | sed 's|^tests/||g' | while read -r TESTSET; do echo "$TESTSET=true" >> "$GITHUB_OUTPUT"; echo "$TESTSET::true"; done - - setup: - runs-on: ubuntu-latest - container: - image: ghcr.io/splunk/workflow-engine-base:2.0.3 - outputs: - argo-server: ${{ steps.test-setup.outputs.argo-server }} - argo-http1: ${{ steps.test-setup.outputs.argo-http1 }} - argo-secure: ${{ steps.test-setup.outputs.argo-secure }} - argo-href: "" - argo-base-href: ${{ steps.test-setup.outputs.argo-base-href }} - argo-workflow-tmpl-name: ${{ steps.test-setup.outputs.argo-workflow-tmpl-name }} - argo-namespace: ${{ steps.test-setup.outputs.argo-namespace }} - addon-name: ${{ steps.test-setup.outputs.addon-name }} - job-name: ${{ steps.test-setup.outputs.job-name }} - labels: ${{ steps.test-setup.outputs.labels }} - addon-upload-path: ${{ steps.test-setup.outputs.addon-upload-path }} - directory-path: ${{ steps.test-setup.outputs.directory-path }} - s3-bucket: ${{ steps.test-setup.outputs.s3-bucket }} - steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - - name: setup for test - id: test-setup - shell: bash - run: | - ADDON_NAME=$(crudini --get package/default/app.conf id name | tr '[:lower:]' '[:upper:]') - if [[ -n $(echo "${ADDON_NAME}" | awk -F 'SPLUNK_TA_' '{print $2}') ]]; - then - ADDON_NAME=$(echo "${ADDON_NAME}" | awk -F 'SPLUNK_TA_' '{print $2}') - elif [[ -n $(echo "${ADDON_NAME}" | awk -F '_FOR_SPLUNK' '{print $1}') ]]; - then - ADDON_NAME=$(echo "${ADDON_NAME}" | awk -F '_FOR_SPLUNK' '{print $1}') - fi - JOB_NAME=$(echo "$ADDON_NAME" | tail -c 16)-$(echo "${GITHUB_SHA}" | tail -c 8)-TEST-TYPE-${GITHUB_RUN_ID} - JOB_NAME=${JOB_NAME//[_.]/-} - LABELS="addon-name=${ADDON_NAME}" - ADDON_BUILD_NAME=$(crudini --get package/default/app.conf id name) - ADDON_UPLOAD_PATH="s3://ta-production-artifacts/ta-apps/${ADDON_BUILD_NAME}-${{ inputs.TA_BUILD }}.spl" - { - echo "argo-server=argo.wfe.splgdi.com:443" - echo "argo-http1=true" - echo "argo-secure=true" - echo "argo-base-href=\'\'" - echo "argo-namespace=workflows" - echo "argo-workflow-tmpl-name=ta-workflow" - echo "directory-path=/tmp" - echo "s3-bucket=ta-production-artifacts" - echo "addon-name=\"$ADDON_NAME\"" - echo "job-name=wf-$JOB_NAME" - echo "labels=$LABELS" - echo "addon-upload-path=$ADDON_UPLOAD_PATH" - } >> "$GITHUB_OUTPUT" - - run-escu-tests: - if: ${{ needs.test-inventory.outputs.escu == 'true' }} - needs: - - test-inventory - - setup - - meta - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - splunk: - - ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} - container: - image: ghcr.io/splunk/workflow-engine-base:2.0.3 - env: - ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} - ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} - ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} - ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} - ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} - TEST_TYPE: "escu" - steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1-node16 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: ${{ secrets.AWS_DEFAULT_REGION }} - - name: Read secrets from AWS Secrets Manager into environment variables - id: get-argo-token - run: | - ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id ta-github-workflow-automation-token | jq -r '.SecretString') - echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" - - name: create job name - id: create-job-name - shell: bash - run: | - RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) - JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} - JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}} - JOB_NAME=${JOB_NAME//[_.]/-} - JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') - echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" - - name: run-tests - id: run-tests - env: - ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - uses: splunk/wfe-test-runner-action@master - with: - splunk: ${{ matrix.splunk.version }} - test-type: ${{ env.TEST_TYPE }} - test-args: "-tf ${{ inputs.TESTS }}" - job-name: ${{ steps.create-job-name.outputs.job-name }} - labels: ${{ needs.setup.outputs.labels }} - workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} - workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} - delay-destroy: "No" - addon-url: ${{ needs.setup.outputs.addon-upload-path }} - addon-name: ${{ needs.setup.outputs.addon-name }} - vendor-version: ${{ matrix.vendor-version.image }} - sc4s-version: "No" - - name: Check if pod was deleted - id: is-pod-deleted - if: always() - shell: bash - env: - ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - run: | - set -o xtrace - if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then - echo "retry-workflow=true" >> "$GITHUB_OUTPUT" - fi - - name: Retrying workflow - id: retry-wf - shell: bash - env: - ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - if: always() - run: | - set -o xtrace - set +e - if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] - then - WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) - echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" - argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." - else - echo "No retry required" - argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows - argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" - fi - - name: check if workflow completed - env: - ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - if: always() - shell: bash - run: | - set +e - # shellcheck disable=SC2157 - if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then - WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} - else - WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" - fi - ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') - echo "Status of workflow:" "$ARGO_STATUS" - while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] - do - echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." - argo wait "${WORKFLOW_NAME}" -n workflows || true - ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') - done - - name: pull artifacts from s3 bucket - if: always() - run: | - echo "pulling artifacts" - aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ - tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} - - name: pull logs from s3 bucket - if: always() - run: | - # shellcheck disable=SC2157 - if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then - WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} - else - WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" - fi - echo "pulling logs" - mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs - aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive - - uses: actions/upload-artifact@v3 - if: always() - with: - name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests artifacts - path: | - ${{ needs.setup.outputs.directory-path }}/test-results - - uses: actions/upload-artifact@v3 - if: always() - with: - name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests logs - path: | - ${{ needs.setup.outputs.directory-path }}/argo-logs - - name: Test Report - uses: dorny/test-reporter@v1 - if: always() - with: - name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} test report - path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" - reporter: java-junit diff --git a/README.md b/README.md index 65c2958c..b067e891 100644 --- a/README.md +++ b/README.md @@ -182,8 +182,6 @@ lint **Description:** -- Workflow <2.0.0 used [super-linter](https://github.com/github/super-linter) to lint the code, which we don't use anymore now - - Uses [pre-commit](https://pre-commit.com) to run linters (Python, Java, JS and others) **Action used** https://github.com/pre-commit/action @@ -300,35 +298,6 @@ unit::true modinput_functional::true ``` -Validate PR title -======================= - -**Description** - -- A Github Action that ensures that your PR title matches the Conventional Commits spec. - -**Action used:** https://github.com/amannn/action-semantic-pull-request - -**Pass/fail behaviour:** - -- The PR title should follow the conventional commit standards - -- Examples for valid PR titles: - -``` -fix: Correct typo. - -feat: Add support for Node 12. - -refactor!: Drop support for Node 6. - -feat(ui): Add Button component. -``` - -**Note** since PR titles only have a single line, you have to use the ! syntax for breaking changes. - -See https://www.conventionalcommits.org/ for more examples. - build ======================= @@ -362,7 +331,6 @@ installation-actions.json installation-update.json ``` - package-splunkbase includes Splunkbase equivalent package code -- package-raw security-virustotal ======================= diff --git a/renovate.json b/renovate.json index f522e5d2..e7b0eba3 100644 --- a/renovate.json +++ b/renovate.json @@ -2,6 +2,11 @@ "extends": [ "config:base", "group:all", + ":semanticCommitTypeAll(chore)", + "schedule:earlyMondays", ":disableDependencyDashboard" + ], + "ignoreDeps": [ + "edplato/trufflehog-actions-scan" ] } \ No newline at end of file