Update reusable-build-test-release.yml #42
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: build-test-release | ||
on: | ||
workflow_call: | ||
inputs: | ||
marker: | ||
required: false | ||
description: 'Parallel run mod_input marker' | ||
type: string | ||
default: >- | ||
[""] | ||
ui_marker: | ||
required: false | ||
description: 'Parallel run ui marker' | ||
type: string | ||
default: >- | ||
[""] | ||
custom-version: | ||
required: falsename: build-test-release | ||
on: | ||
workflow_call: | ||
inputs: | ||
marker: | ||
required: false | ||
description: 'Parallel run mod_input marker' | ||
type: string | ||
default: >- | ||
[""] | ||
ui_marker: | ||
required: false | ||
description: 'Parallel run ui marker' | ||
type: string | ||
default: >- | ||
[""] | ||
custom-version: | ||
required: false | ||
description: 'Version of release in the form of "x.x.x" string, specified by user instead of automatically generated semantic release' | ||
type: string | ||
default: "" | ||
execute-tests-on-push-to-release: | ||
required: false | ||
description: 'Flag to run all tests on push to release branch' | ||
type: string | ||
default: 'false' | ||
k8s-environment: | ||
required: false | ||
description: Specifies which environment to use for k8s testing. ["production", "staging"] | ||
type: string | ||
default: "production" | ||
k8s-manifests-branch: | ||
required: false | ||
description: "branch for k8s manifests to run the tests on" | ||
type: string | ||
default: "v3.0.5" | ||
scripted-inputs-os-list: | ||
required: false | ||
description: "list of OS used for scripted input tests" | ||
type: string | ||
default: >- | ||
["ubuntu:14.04", "ubuntu:16.04","ubuntu:18.04","ubuntu:22.04", "ubuntu:24.04", "redhat:8.4", "redhat:8.5", "redhat:8.6", "redhat:8.8"] | ||
secrets: | ||
GH_TOKEN_ADMIN: | ||
description: Github admin token | ||
required: true | ||
SEMGREP_PUBLISH_TOKEN: | ||
description: Semgrep token | ||
required: true | ||
AWS_ACCESS_KEY_ID: | ||
description: AWS access key id | ||
required: true | ||
AWS_DEFAULT_REGION: | ||
description: AWS default region | ||
required: true | ||
AWS_SECRET_ACCESS_KEY: | ||
description: AWS secret access key | ||
required: true | ||
OTHER_TA_REQUIRED_CONFIGS: | ||
description: other required configs | ||
required: true | ||
FOSSA_API_KEY: | ||
description: API token for FOSSA app | ||
required: true | ||
SA_GH_USER_NAME: | ||
description: GPG signature username | ||
required: true | ||
SA_GH_USER_EMAIL: | ||
description: GPG signature user email | ||
required: true | ||
SA_GPG_PRIVATE_KEY: | ||
description: GPG signature private key | ||
required: true | ||
SA_GPG_PASSPHRASE: | ||
description: GPG signature passphrase | ||
required: true | ||
SPL_COM_USER: | ||
description: username to splunk.com | ||
required: true | ||
SPL_COM_PASSWORD: | ||
description: password to splunk.com | ||
required: true | ||
permissions: | ||
contents: read | ||
packages: read | ||
concurrency: | ||
group: ${{ github.head_ref || github.run_id }} | ||
cancel-in-progress: true | ||
jobs: | ||
validate-custom-version: | ||
runs-on: ubuntu-latest | ||
if: ${{ github.event.inputs.custom-version != '' }} | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- name: Validate custom version | ||
run: | | ||
if [[ ! ${{ github.event.inputs.custom-version }} =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then | ||
echo "Invalid custom version provided. Please provide a valid semver version." | ||
exit 1 | ||
fi | ||
git fetch --tags | ||
if [ "$(git tag -l 'v${{ github.event.inputs.custom-version }}')" ]; then | ||
echo "The provided version already exists. Please provide a unique version." | ||
exit 1 | ||
fi | ||
setup-workflow: | ||
runs-on: ubuntu-latest | ||
outputs: | ||
execute-knowledge-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_knowledge_labeled }} | ||
execute-ui-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_ui_labeled }} | ||
execute-modinput-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_modinput_functional_labeled }} | ||
execute-scripted_inputs-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_scripted_inputs_labeled }} | ||
execute-requirement-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_requirement_test_labeled }} | ||
s3_bucket_k8s: ${{ steps.k8s-environment.outputs.s3_bucket }} | ||
argo_server_domain_k8s: ${{ steps.k8s-environment.outputs.argo_server_domain }} | ||
argo_token_secret_id_k8s: ${{ steps.k8s-environment.outputs.argo_token_secret_id }} | ||
steps: | ||
- name: set k8s environment | ||
id: k8s-environment | ||
run: | | ||
if [[ ${{ inputs.k8s-environment }} == 'staging' ]]; then | ||
echo "setting up argo variables for staging" | ||
{ | ||
echo "s3_bucket=ta-staging-artifacts" | ||
echo "argo_server_domain=argo.staging.wfe.splgdi.com" | ||
echo "argo_token_secret_id=ta-staging-github-workflow-automation-token" | ||
} >> "$GITHUB_OUTPUT" | ||
else | ||
echo "setting up argo variables for production" | ||
{ | ||
echo "s3_bucket=ta-production-artifacts" | ||
echo "argo_server_domain=argo.wfe.splgdi.com" | ||
echo "argo_token_secret_id=ta-github-workflow-automation-token" | ||
} >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: configure tests based on labels | ||
id: configure-tests-on-labels | ||
run: | | ||
set +e | ||
declare -A EXECUTE_LABELED | ||
TESTSET=("execute_knowledge" "execute_ui" "execute_modinput_functional" "execute_scripted_inputs" "execute_requirement_test") | ||
for test_type in "${TESTSET[@]}"; do | ||
EXECUTE_LABELED["$test_type"]="false" | ||
done | ||
case "${{ github.event_name }}" in | ||
"pull_request") | ||
labels=$(echo '${{ toJSON(github.event.pull_request.labels) }}' | jq -r '.[] | .name') | ||
if ${{ github.base_ref == 'main' }} && ${{ contains(github.event.pull_request.labels.*.name, 'use_labels') }}; then | ||
for test_type in "${TESTSET[@]}"; do | ||
if [[ "$labels" =~ $test_type ]]; then | ||
EXECUTE_LABELED["$test_type"]="true" | ||
fi | ||
done | ||
elif ${{ github.base_ref == 'main' }} || ${{ contains(github.event.pull_request.labels.*.name, 'execute_all_tests') }}; then | ||
for test_type in "${TESTSET[@]}"; do | ||
EXECUTE_LABELED["$test_type"]="true" | ||
done | ||
else | ||
for test_type in "${TESTSET[@]}"; do | ||
if [[ "$labels" =~ $test_type ]]; then | ||
EXECUTE_LABELED["$test_type"]="true" | ||
fi | ||
done | ||
fi | ||
;; | ||
"push") | ||
if ${{ github.ref_name == 'main' }} || ${{ github.ref_name == 'develop' }} || | ||
${{ startsWith(github.ref_name, 'release/') && inputs.execute-tests-on-push-to-release == 'true' }} ; then | ||
for test_type in "${TESTSET[@]}"; do | ||
EXECUTE_LABELED["$test_type"]="true" | ||
done | ||
fi | ||
;; | ||
"schedule") | ||
for test_type in "${TESTSET[@]}"; do | ||
EXECUTE_LABELED["$test_type"]="true" | ||
done | ||
;; | ||
"workflow_dispatch") | ||
if ${{ inputs.custom-version != '' }} ; then | ||
for test_type in "${TESTSET[@]}"; do | ||
EXECUTE_LABELED["$test_type"]="true" | ||
done | ||
fi | ||
;; | ||
*) | ||
echo "No tests were labeled for execution!" | ||
;; | ||
esac | ||
echo "Tests to execute based on labels:" | ||
for test_type in "${TESTSET[@]}"; do | ||
echo "$test_type""_labeled=${EXECUTE_LABELED["$test_type"]}" >> "$GITHUB_OUTPUT" | ||
echo "$test_type""_labeled: ${EXECUTE_LABELED["$test_type"]}" | ||
done | ||
validate-pr-title: | ||
name: Validate PR title | ||
if: github.event_name == 'pull_request' | ||
runs-on: ubuntu-latest | ||
permissions: | ||
contents: read | ||
packages: read | ||
pull-requests: read | ||
statuses: write | ||
steps: | ||
- uses: amannn/[email protected] | ||
with: | ||
wip: true | ||
validateSingleCommit: true | ||
env: | ||
GITHUB_TOKEN: ${{ github.token }} | ||
meta: | ||
runs-on: ubuntu-latest | ||
outputs: | ||
matrix_supportedSplunk: ${{ steps.matrix.outputs.supportedSplunk }} | ||
matrix_latestSplunk: ${{ steps.matrix.outputs.latestSplunk }} | ||
matrix_supportedSC4S: ${{ steps.matrix.outputs.supportedSC4S }} | ||
matrix_supportedModinputFunctionalVendors: ${{ steps.matrix.outputs.supportedModinputFunctionalVendors }} | ||
matrix_supportedUIVendors: ${{ steps.matrix.outputs.supportedUIVendors }} | ||
openssl3_splunk: ${{steps.openssl3_splunk.outputs.splunk}} | ||
openssl3_sc4s: ${{steps.openssl3_splunk.outputs.sc4s}} | ||
permissions: | ||
contents: write | ||
packages: read | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: false | ||
persist-credentials: false | ||
- id: matrix | ||
uses: splunk/[email protected] | ||
- name: OpenSSL3-splunk | ||
id: openssl3_splunk | ||
run: | | ||
echo "splunk={\"version\":\"splunk-9.4-OpenSSL3-1c4ff4fc48b6\", \"build\":\"1c4ff4fc48b6\", \"islatest\":false, \"isoldest\":false}" >> "$GITHUB_OUTPUT" | ||
echo "sc4s={\"version\":\"3.30.1\", \"docker_registry\":\"ghcr.io/splunk/splunk-connect-for-syslog/container3\"}" >> "$GITHUB_OUTPUT" | ||
- name: job summary | ||
run: | | ||
splunk_version_list=$(echo "${{ steps.matrix.outputs.supportedSplunk }}" | jq -r '.[].version'; echo "${{steps.openssl3_splunk.outputs.splunk}}" | jq -r '.version' | paste -sd "," -) | ||
sc4s_version_list=$(echo '${{ steps.matrix.outputs.supportedSC4S }}' | jq -r '.[].version') | ||
echo -e "## Summary of Versions Used\n- **Splunk versions used:** (${splunk_version_list})\n- **SC4S versions used:** (${sc4s_version_list})\n- Browser: Chrome" >> "$GITHUB_STEP_SUMMARY" | ||
fossa-scan: | ||
runs-on: ubuntu-latest | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- name: run fossa analyze and create report | ||
id: fossa-scan | ||
run: | | ||
curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install-latest.sh | bash | ||
fossa analyze --debug 2>&1 | tee /tmp/fossa_analyze_output.txt | ||
exit_code="${PIPESTATUS[0]}" | ||
FOSSA_REPORT_URL=$(grep -o 'https://app.fossa.com[^ ]*' /tmp/fossa_analyze_output.txt || true) | ||
echo "url=$FOSSA_REPORT_URL" | ||
echo "FOSSA_REPORT_URL=$FOSSA_REPORT_URL" >> "$GITHUB_OUTPUT" | ||
fossa report attribution --format text --timeout 600 > /tmp/THIRDPARTY | ||
exit "$exit_code" | ||
env: | ||
FOSSA_API_KEY: ${{ secrets.FOSSA_API_KEY }} | ||
- name: upload THIRDPARTY file | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: THIRDPARTY | ||
path: /tmp/THIRDPARTY | ||
- name: job summary | ||
if: success() || failure() | ||
run: | | ||
echo "FOSSA Report: ${{ steps.fossa-scan.outputs.FOSSA_REPORT_URL }}" >> "$GITHUB_STEP_SUMMARY" | ||
fossa-test: | ||
continue-on-error: true | ||
runs-on: ubuntu-latest | ||
needs: | ||
- fossa-scan | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- name: run fossa test | ||
run: | | ||
curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install-latest.sh | bash | ||
fossa test --debug | ||
env: | ||
FOSSA_API_KEY: ${{ secrets.FOSSA_API_KEY }} | ||
compliance-copyrights: | ||
name: compliance-copyrights | ||
runs-on: ubuntu-latest | ||
steps: | ||
- name: Checkout | ||
uses: actions/checkout@v4 | ||
- name: REUSE Compliance Check | ||
uses: fsfe/[email protected] | ||
lint: | ||
runs-on: ubuntu-latest | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- uses: actions/setup-python@v5 | ||
with: | ||
python-version: "3.7" | ||
- uses: pre-commit/[email protected] | ||
review_secrets: | ||
name: security-detect-secrets | ||
runs-on: ubuntu-latest | ||
steps: | ||
- name: Checkout | ||
if: github.event_name != 'pull_request' | ||
uses: actions/checkout@v4 | ||
with: | ||
submodules: false | ||
fetch-depth: "0" | ||
- name: Checkout for PR | ||
if: github.event_name == 'pull_request' | ||
uses: actions/checkout@v4 | ||
with: | ||
submodules: false | ||
fetch-depth: "0" | ||
ref: ${{ github.head_ref }} | ||
- name: Secret Scanning Trufflehog | ||
uses: trufflesecurity/[email protected] | ||
with: | ||
extra_args: -x .github/workflows/exclude-patterns.txt --json --only-verified | ||
version: 3.77.0 | ||
semgrep: | ||
uses: splunk/sast-scanning/.github/workflows/sast-scan.yml@main | ||
secrets: | ||
SEMGREP_KEY: ${{ secrets.SEMGREP_PUBLISH_TOKEN }} | ||
test-inventory: | ||
runs-on: ubuntu-latest | ||
# Map a step output to a job output | ||
outputs: | ||
unit: ${{ steps.testset.outputs.unit }} | ||
knowledge: ${{ steps.testset.outputs.knowledge }} | ||
ui: ${{ steps.testset.outputs.ui }} | ||
modinput_functional: ${{ steps.testset.outputs.modinput_functional }} | ||
requirement_test: ${{ steps.testset.outputs.requirement_test }} | ||
scripted_inputs: ${{ steps.testset.outputs.scripted_inputs }} | ||
ucc_modinput_functional: ${{ steps.modinput-version.outputs.ucc_modinput_tests }} | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- id: testset | ||
name: Check available test types | ||
run: | | ||
find tests -type d -maxdepth 1 -mindepth 1 | sed 's|^tests/||g' | while read -r TESTSET; do echo "$TESTSET=true" >> "$GITHUB_OUTPUT"; echo "$TESTSET::true"; done | ||
- id: modinput-version | ||
name: Check modinput tests version | ||
run: | | ||
CENTAURS_MODINPUT_TESTS_CHECK_DIR="tests/modinput_functional/centaurs" | ||
ucc_modinput_tests="true" | ||
if [ -d "$CENTAURS_MODINPUT_TESTS_CHECK_DIR" ]; then | ||
ucc_modinput_tests="false" | ||
fi | ||
echo "ucc_modinput_tests=$ucc_modinput_tests" >> "$GITHUB_OUTPUT" | ||
run-unit-tests: | ||
name: test-unit-python3-${{ matrix.python-version }} | ||
if: ${{ needs.test-inventory.outputs.unit == 'true' }} | ||
runs-on: ubuntu-latest | ||
needs: | ||
- test-inventory | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
python-version: | ||
- "3.7" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- uses: actions/setup-python@v5 | ||
with: | ||
python-version: ${{ matrix.python-version }} | ||
- name: Setup addon | ||
run: | | ||
if [ -f "poetry.lock" ] | ||
then | ||
mkdir -p package/lib || true | ||
python${{ matrix.python-version }} -m pip install poetry==1.5.1 poetry-plugin-export==1.4.0 | ||
poetry lock --check | ||
poetry export --without-hashes -o package/lib/requirements.txt | ||
poetry export --without-hashes --with dev -o requirements_dev.txt | ||
fi | ||
if [ ! -f requirements_dev.txt ]; then echo no requirements;exit 0 ;fi | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf https://github.com | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf ssh://[email protected] | ||
poetry install --with dev | ||
- name: Create directories | ||
run: | | ||
mkdir -p /opt/splunk/var/log/splunk | ||
chmod -R 777 /opt/splunk/var/log/splunk | ||
- name: Copy pytest ini | ||
run: cp tests/unit/pytest-ci.ini pytest.ini | ||
- name: Run Pytest with coverage | ||
run: poetry run pytest --cov=./ --cov-report=xml --junitxml=test-results/junit.xml tests/unit | ||
- name : Job summary | ||
continue-on-error: true | ||
run: | | ||
sudo apt-get install -y libxml2-utils | ||
junit_xml_file=$(find "test-results" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo -e "| Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests |\n| ----------- | ------------ | ------------ | ------------- | ------------- |\n| $total_tests | $passed | $failures | $errors | $skipped |" >> "$GITHUB_STEP_SUMMARY" | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- uses: actions/upload-artifact@v4 | ||
if: success() || failure() | ||
with: | ||
name: test-results-unit-python_${{ matrix.python-version }} | ||
path: test-results/* | ||
run-unit-tests-3_9: | ||
name: test-unit-python3-${{ matrix.python-version }} | ||
if: ${{ needs.test-inventory.outputs.unit == 'true' }} | ||
runs-on: ubuntu-latest | ||
needs: | ||
- test-inventory | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
python-version: | ||
- "3.9" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- uses: actions/setup-python@v5 | ||
with: | ||
python-version: ${{ matrix.python-version }} | ||
- name: Setup addon | ||
run: | | ||
if [ -f "poetry.lock" ] | ||
then | ||
mkdir -p package/lib || true | ||
python${{ matrix.python-version }} -m pip install poetry==1.5.1 poetry-plugin-export==1.4.0 | ||
poetry lock --check | ||
poetry export --without-hashes -o package/lib/requirements.txt | ||
poetry export --without-hashes --with dev -o requirements_dev.txt | ||
fi | ||
if [ ! -f requirements_dev.txt ]; then echo no requirements;exit 0 ;fi | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf https://github.com | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf ssh://[email protected] | ||
poetry install --with dev | ||
- name: Create directories | ||
run: | | ||
mkdir -p /opt/splunk/var/log/splunk | ||
chmod -R 777 /opt/splunk/var/log/splunk | ||
- name: Copy pytest ini | ||
run: cp tests/unit/pytest-ci.ini pytest.ini | ||
- name: Run Pytest with coverage | ||
run: poetry run pytest --cov=./ --cov-report=xml --junitxml=test-results/junit.xml tests/unit | ||
- name : Job summary | ||
continue-on-error: true | ||
run: | | ||
sudo apt-get install -y libxml2-utils | ||
junit_xml_file=$(find "test-results" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo -e "| Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests |\n| ----------- | ------------ | ------------ | ------------- | ------------- |\n| $total_tests | $passed | $failures | $errors | $skipped |" >> "$GITHUB_STEP_SUMMARY" | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- uses: actions/upload-artifact@v4 | ||
if: success() || failure() | ||
with: | ||
name: test-results-unit-python_${{ matrix.python-version }} | ||
path: test-results/* | ||
build: | ||
runs-on: ubuntu-latest | ||
needs: | ||
- validate-custom-version | ||
- setup-workflow | ||
- test-inventory | ||
- meta | ||
- compliance-copyrights | ||
- lint | ||
- review_secrets | ||
- semgrep | ||
- run-unit-tests | ||
- fossa-scan | ||
if: ${{ !cancelled() && (needs.run-unit-tests.result == 'success' || needs.run-unit-tests.result == 'skipped') && (needs.validate-custom-version.result == 'success' || needs.validate-custom-version.result == 'skipped') }} | ||
outputs: | ||
buildname: ${{ steps.buildupload.outputs.name }} | ||
permissions: | ||
contents: write | ||
packages: read | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
# Very Important semantic-release won't trigger a tagged | ||
# build if this is not set false | ||
persist-credentials: false | ||
- name: Setup python | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: 3.7 | ||
- name: create requirements file for pip | ||
run: | | ||
if [ -f "poetry.lock" ] | ||
then | ||
echo " poetry.lock found " | ||
python3.7 -m pip install poetry==1.5.1 poetry-plugin-export==1.4.0 | ||
poetry export --without-hashes -o requirements.txt | ||
if [ "$(grep -cve '^\s*$' requirements.txt)" -ne 0 ] | ||
then | ||
echo "Prod dependencies were found, creating package/lib folder" | ||
mkdir -p package/lib || true | ||
mv requirements.txt package/lib | ||
else | ||
echo "No prod dependencies were found" | ||
rm requirements.txt | ||
fi | ||
poetry export --without-hashes --with dev -o requirements_dev.txt | ||
cat requirements_dev.txt | ||
fi | ||
- name: Get pip cache dir | ||
id: pip-cache | ||
run: | | ||
echo "dir=$(pip cache dir)" >> "$GITHUB_OUTPUT" | ||
- name: Run Check there are libraries to scan | ||
id: checklibs | ||
run: if [ -f requirements_dev.txt ]; then echo "ENABLED=true" >> "$GITHUB_OUTPUT"; fi | ||
- name: pip cache | ||
if: ${{ steps.checklibs.outputs.ENABLED == 'true' }} | ||
uses: actions/cache@v4 | ||
with: | ||
path: ${{ steps.pip-cache.outputs.dir }} | ||
key: ${{ runner.os }}-pip-${{ hashFiles('requirements_dev.txt') }} | ||
restore-keys: | | ||
${{ runner.os }}-pip- | ||
- name: Install deps | ||
if: ${{ steps.checklibs.outputs.ENABLED == 'true' }} | ||
run: | | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf https://github.com | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf ssh://[email protected] | ||
pip install -r requirements_dev.txt | ||
- name: Semantic Release Get Next | ||
id: semantic | ||
if: github.event_name != 'pull_request' | ||
uses: splunk/[email protected] | ||
with: | ||
dry_run: true | ||
git_committer_name: ${{ secrets.SA_GH_USER_NAME }} | ||
git_committer_email: ${{ secrets.SA_GH_USER_EMAIL }} | ||
gpg_private_key: ${{ secrets.SA_GPG_PRIVATE_KEY }} | ||
passphrase: ${{ secrets.SA_GPG_PASSPHRASE }} | ||
env: | ||
GITHUB_TOKEN: ${{ github.token }} | ||
- name: Determine the version to build | ||
id: BuildVersion | ||
run: | | ||
INPUT_SEMVER="${{ github.event.inputs.custom-version != '' && github.event.inputs.custom-version || steps.semantic.outputs.new_release_version }}" | ||
echo "Initial semver ${INPUT_SEMVER}" | ||
INPUT_PRNUMBER="${{ github.event.number }}" | ||
SEMVER_REGEX='^v?[0-9]+\.[0-9]+\.[0-9]+$' | ||
BETA_REGEX='^v?[0-9]+\.[0-9]+\.[0-9]+-beta\.[0-9]+$' | ||
echo working with version $INPUT_SEMVER | ||
if [[ $INPUT_SEMVER =~ $SEMVER_REGEX ]]; | ||
then | ||
echo using provided semver | ||
VERSION=$INPUT_SEMVER | ||
elif [[ $INPUT_SEMVER =~ $BETA_REGEX ]]; | ||
then | ||
VERSION=$(echo $INPUT_SEMVER | awk '{gsub("-beta\.", "-B");print}') | ||
else | ||
if [[ $GITHUB_EVENT_NAME != 'pull_request' ]]; | ||
then | ||
echo this is not a release build and NOT PR RUNID + run ID | ||
VERSION=0.0.${GITHUB_RUN_ID} | ||
else | ||
echo this is not a release build and is a PR use run ID | ||
VERSION=0.${INPUT_PRNUMBER}.${GITHUB_RUN_ID} | ||
fi | ||
fi | ||
FINALVERSION="${VERSION//v}" | ||
echo "Version to build is ${FINALVERSION}" | ||
echo "VERSION=${FINALVERSION}" >> "$GITHUB_OUTPUT" | ||
- name: Download THIRDPARTY | ||
if: github.event_name != 'pull_request' && github.event_name != 'schedule' | ||
uses: actions/download-artifact@v4 | ||
with: | ||
name: THIRDPARTY | ||
- name: Download THIRDPARTY (Optional for PR and schedule) | ||
if: github.event_name == 'pull_request' || github.event_name == 'schedule' | ||
continue-on-error: true | ||
uses: actions/download-artifact@v4 | ||
with: | ||
name: THIRDPARTY | ||
- name: Update Notices | ||
run: | | ||
cp -f THIRDPARTY package/THIRDPARTY || echo "THIRDPARTY file not found (allowed for PR and schedule)" | ||
- name: Build Package | ||
id: uccgen | ||
uses: splunk/addonfactory-ucc-generator-action@v2 | ||
with: | ||
version: ${{ steps.BuildVersion.outputs.VERSION }} | ||
- name: Slim Package | ||
id: slim | ||
run: | | ||
pip install splunk-packaging-toolkit | ||
pip install semantic-version==2.6.0 | ||
INPUT_SOURCE=${{ steps.uccgen.outputs.OUTPUT }} | ||
SOURCE_REGEX='^.*/$' | ||
if [[ $INPUT_SOURCE =~ $SOURCE_REGEX ]];then | ||
echo Removing trailing / from INPUT_SOURCE slim is picky | ||
INPUT_SOURCE=$(echo $INPUT_SOURCE | sed 's/\(.*\)\//\1/') | ||
fi | ||
slim generate-manifest "${INPUT_SOURCE}" --update >/tmp/app.manifest || true | ||
cp /tmp/app.manifest "${INPUT_SOURCE}"/app.manifest | ||
mkdir -p build/package/splunkbase | ||
mkdir -p build/package/deployment | ||
slim package -o build/package/splunkbase "${INPUT_SOURCE}" | ||
for f in build/package/splunkbase/*.tar.gz; do | ||
n=$(echo "${f}" | awk '{gsub("-[0-9]+.[0-9]+.[0-9]+-[a-f0-9]+-?", "");print}' | sed 's/.tar.gz/.spl/') | ||
mv "${f}" "${n}" | ||
done | ||
PACKAGE=$(ls build/package/splunkbase/*) | ||
slim partition "${PACKAGE}" -o build/package/deployment/ || true | ||
for f in build/package/deployment/*.tar.gz; do | ||
n=$(echo "${f}" | awk '{gsub("-[0-9]+.[0-9]+.[0-9]+-[a-f0-9]+-?", "");print}' | sed 's/.tar.gz/.spl/') | ||
mv "${f}" "${n}" | ||
done | ||
slim validate "${PACKAGE}" | ||
chmod -R +r build | ||
echo "OUTPUT=$PACKAGE" >> "$GITHUB_OUTPUT" | ||
if: always() | ||
- name: artifact-openapi | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: artifact-openapi | ||
path: ${{ github.workspace }}/${{ steps.uccgen.outputs.OUTPUT }}/appserver/static/openapi.json | ||
if: ${{ !cancelled() && needs.test-inventory.outputs.ucc_modinput_functional == 'true' && needs.test-inventory.outputs.modinput_functional == 'true' }} | ||
- name: artifact-splunk-base | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: package-splunkbase | ||
path: ${{ steps.slim.outputs.OUTPUT }} | ||
if: ${{ !cancelled() }} | ||
- name: upload-build-to-s3 | ||
id: buildupload | ||
env: | ||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} | ||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
run: | | ||
echo "name=$(basename "${{ steps.slim.outputs.OUTPUT }}")" >> "$GITHUB_OUTPUT" | ||
basename "${{ steps.slim.outputs.OUTPUT }}" | ||
aws s3 cp "${{ steps.slim.outputs.OUTPUT }}" "s3://${{ needs.setup-workflow.outputs.s3_bucket_k8s }}/ta-apps/" | ||
- name: artifact-splunk-parts | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: package-deployment | ||
path: build/package/deployment** | ||
if: ${{ !cancelled() }} | ||
build-3_9: | ||
runs-on: ubuntu-latest | ||
needs: | ||
- validate-custom-version | ||
- setup-workflow | ||
- test-inventory | ||
- meta | ||
- compliance-copyrights | ||
- lint | ||
- review_secrets | ||
- semgrep | ||
- run-unit-tests-3_9 | ||
- fossa-scan | ||
if: | | ||
always() && | ||
(needs.run-unit-tests-3_9.result == 'success' || needs.run-unit-tests-3_9.result == 'skipped') && | ||
(needs.validate-custom-version.result == 'success' || needs.validate-custom-version.result == 'skipped') | ||
permissions: | ||
contents: write | ||
packages: read | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
# Very Important semantic-release won't trigger a tagged | ||
# build if this is not set false | ||
persist-credentials: false | ||
- name: Setup python | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: 3.9 | ||
- name: create requirements file for pip | ||
run: | | ||
if [ -f "poetry.lock" ] | ||
then | ||
echo " poetry.lock found " | ||
python3.9 -m pip install poetry==1.5.1 poetry-plugin-export==1.4.0 | ||
poetry export --without-hashes -o requirements.txt | ||
if [ "$(grep -cve '^\s*$' requirements.txt)" -ne 0 ] | ||
then | ||
echo "Prod dependencies were found, creating package/lib folder" | ||
mkdir -p package/lib || true | ||
mv requirements.txt package/lib | ||
else | ||
echo "No prod dependencies were found" | ||
rm requirements.txt | ||
fi | ||
poetry export --without-hashes --with dev -o requirements_dev.txt | ||
cat requirements_dev.txt | ||
fi | ||
- id: pip-cache | ||
run: | | ||
echo "dir=$(pip cache dir)" >> "$GITHUB_OUTPUT" | ||
- name: pip cache | ||
uses: actions/cache@v4 | ||
with: | ||
path: ${{ steps.pip-cache.outputs.dir }} | ||
key: ${{ runner.os }}-pip-python3_9-${{ hashFiles('requirements_dev.txt') }} | ||
restore-keys: | | ||
${{ runner.os }}-pip-python3_9 | ||
- run: | | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf https://github.com | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf ssh://[email protected] | ||
pip install -r requirements_dev.txt | ||
- id: semantic | ||
if: github.event_name != 'pull_request' | ||
uses: splunk/[email protected] | ||
with: | ||
dry_run: true | ||
git_committer_name: ${{ secrets.SA_GH_USER_NAME }} | ||
git_committer_email: ${{ secrets.SA_GH_USER_EMAIL }} | ||
gpg_private_key: ${{ secrets.SA_GPG_PRIVATE_KEY }} | ||
passphrase: ${{ secrets.SA_GPG_PASSPHRASE }} | ||
env: | ||
GITHUB_TOKEN: ${{ github.token }} | ||
- id: BuildVersion | ||
run: | | ||
INPUT_SEMVER="${{ github.event.inputs.custom-version != '' && github.event.inputs.custom-version || steps.semantic.outputs.new_release_version }}" | ||
echo "Initial semver ${INPUT_SEMVER}" | ||
INPUT_PRNUMBER="${{ github.event.number }}" | ||
SEMVER_REGEX='^v?[0-9]+\.[0-9]+\.[0-9]+$' | ||
BETA_REGEX='^v?[0-9]+\.[0-9]+\.[0-9]+-beta\.[0-9]+$' | ||
echo working with version $INPUT_SEMVER | ||
if [[ $INPUT_SEMVER =~ $SEMVER_REGEX ]]; | ||
then | ||
echo using provided semver | ||
VERSION=$INPUT_SEMVER | ||
elif [[ $INPUT_SEMVER =~ $BETA_REGEX ]]; | ||
then | ||
VERSION=$(echo $INPUT_SEMVER | awk '{gsub("-beta\.", "-B");print}') | ||
else | ||
if [[ $GITHUB_EVENT_NAME != 'pull_request' ]]; | ||
then | ||
echo this is not a release build and NOT PR RUNID + run ID | ||
VERSION=0.0.${GITHUB_RUN_ID} | ||
else | ||
echo this is not a release build and is a PR use run ID | ||
VERSION=0.${INPUT_PRNUMBER}.${GITHUB_RUN_ID} | ||
fi | ||
fi | ||
FINALVERSION="${VERSION//v}" | ||
echo "Version to build is $FINALVERSION" | ||
echo "VERSION=$FINALVERSION" >> "$GITHUB_OUTPUT" | ||
- id: uccgen | ||
uses: splunk/addonfactory-ucc-generator-action@v2 | ||
with: | ||
version: ${{ steps.BuildVersion.outputs.VERSION }} | ||
run-requirements-unit-tests: | ||
runs-on: ubuntu-latest | ||
needs: | ||
- build | ||
- test-inventory | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.requirement_test == 'true' }} | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- name: Install Python 3 | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: 3.7 | ||
- name: run-tests | ||
uses: splunk/[email protected] | ||
with: | ||
input-files: tests/requirement_test/logs | ||
- name: Archive production artifacts | ||
if: ${{ !cancelled() }} | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: test-results | ||
path: | | ||
test_*.txt | ||
appinspect: | ||
name: quality-appinspect-${{ matrix.tags }} | ||
needs: build | ||
if: ${{ !cancelled() && needs.build.result == 'success' }} | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
tags: | ||
- "cloud" | ||
- "appapproval" | ||
- "deprecated_feature" | ||
- "developer_guidance" | ||
- "future" | ||
- "self-service" | ||
- "splunk_appinspect" | ||
- "manual" | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- uses: actions/download-artifact@v4 | ||
with: | ||
name: package-splunkbase | ||
path: build/package/ | ||
- name: Scan | ||
uses: splunk/[email protected] | ||
with: | ||
app_path: build/package/ | ||
included_tags: ${{ matrix.tags }} | ||
result_file: appinspect_result_${{ matrix.tags }}.json | ||
- name: upload-appinspect-report | ||
if: ${{ !cancelled() }} | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: appinspect_${{ matrix.tags }}_checks.json | ||
path: appinspect_result_${{ matrix.tags }}.json | ||
- name: upload-markdown | ||
if: matrix.tags == 'manual' | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: check_markdown | ||
path: | | ||
*_markdown.txt | ||
appinspect-api: | ||
name: appinspect api ${{ matrix.tags }} | ||
needs: build | ||
if: | | ||
!cancelled() && | ||
needs.build.result == 'success' && | ||
( github.base_ref == 'main' || github.ref_name == 'main' ) | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
tags: | ||
- "cloud" | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- uses: actions/download-artifact@v4 | ||
with: | ||
name: package-splunkbase | ||
path: build/package | ||
- name: AppInspect API | ||
uses: splunk/[email protected] | ||
with: | ||
username: ${{ secrets.SPL_COM_USER }} | ||
password: ${{ secrets.SPL_COM_PASSWORD }} | ||
app_path: build/package/ | ||
included_tags: ${{ matrix.tags }} | ||
- uses: actions/upload-artifact@v4 | ||
if: always() | ||
with: | ||
name: appinspect-api-html-report-${{ matrix.tags }} | ||
path: AppInspect_response.html | ||
setup: | ||
needs: | ||
- setup-workflow | ||
- build | ||
- test-inventory | ||
if: ${{ !cancelled() && needs.build.result == 'success' }} | ||
runs-on: ubuntu-latest | ||
outputs: | ||
argo-server: ${{ steps.test-setup.outputs.argo-server }} | ||
argo-http1: ${{ steps.test-setup.outputs.argo-http1 }} | ||
argo-secure: ${{ steps.test-setup.outputs.argo-secure }} | ||
spl-host-suffix: ${{ steps.test-setup.outputs.spl-host-suffix }} | ||
argo-href: "" | ||
argo-base-href: ${{ steps.test-setup.outputs.argo-base-href }} | ||
argo-workflow-tmpl-name: ${{ steps.test-setup.outputs.argo-workflow-tmpl-name }} | ||
argo-cancel-workflow-tmpl-name: ${{ steps.test-setup.outputs.argo-cancel-workflow-tmpl-name }} | ||
k8s-manifests-branch: ${{ steps.test-setup.outputs.k8s-manifests-branch }} | ||
argo-namespace: ${{ steps.test-setup.outputs.argo-namespace }} | ||
addon-name: ${{ steps.test-setup.outputs.addon-name }} | ||
job-name: ${{ steps.test-setup.outputs.job-name }} | ||
labels: ${{ steps.test-setup.outputs.labels }} | ||
addon-upload-path: ${{ steps.test-setup.outputs.addon-upload-path }} | ||
directory-path: ${{ steps.test-setup.outputs.directory-path }} | ||
s3-bucket: ${{ steps.test-setup.outputs.s3-bucket }} | ||
env: | ||
BUILD_NAME: ${{ needs.build.outputs.buildname }} | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
token: ${{ secrets.GH_TOKEN_ADMIN }} | ||
- name: setup for test | ||
id: test-setup | ||
shell: bash | ||
run: | | ||
sudo apt-get install -y crudini | ||
ADDON_NAME=$(crudini --get package/default/app.conf id name | tr '[:lower:]' '[:upper:]') | ||
if [[ -n $(echo "${ADDON_NAME}" | awk -F 'SPLUNK_TA_' '{print $2}') ]]; | ||
then | ||
ADDON_NAME=$(echo "${ADDON_NAME}" | awk -F 'SPLUNK_TA_' '{print $2}') | ||
elif [[ -n $(echo "${ADDON_NAME}" | awk -F '_FOR_SPLUNK' '{print $1}') ]]; | ||
then | ||
ADDON_NAME=$(echo "${ADDON_NAME}" | awk -F '_FOR_SPLUNK' '{print $1}') | ||
fi | ||
JOB_NAME=$(echo "$ADDON_NAME" | tail -c 16)-$(echo "${GITHUB_SHA}" | tail -c 8)-TEST-TYPE-${GITHUB_RUN_ID} | ||
JOB_NAME=${JOB_NAME//[_.]/-} | ||
LABELS="addon-name=${ADDON_NAME}" | ||
ADDON_UPLOAD_PATH="s3://${{ needs.setup-workflow.outputs.s3_bucket_k8s }}/ta-apps/${{ needs.build.outputs.buildname }}" | ||
{ | ||
echo "argo-server=${{ needs.setup-workflow.outputs.argo_server_domain_k8s }}:443" | ||
echo "argo-http1=true" | ||
echo "argo-secure=true" | ||
echo -e "argo-base-href=\'\'" | ||
echo "argo-namespace=workflows" | ||
echo "argo-workflow-tmpl-name=ta-workflow" | ||
echo "argo-cancel-workflow-tmpl-name=cancel-workflow" | ||
echo "directory-path=/tmp" | ||
echo "s3-bucket=${{ needs.setup-workflow.outputs.s3_bucket_k8s }}" | ||
echo "addon-name=\"$ADDON_NAME\"" | ||
echo "job-name=wf-$JOB_NAME" | ||
echo "labels=$LABELS" | ||
echo "addon-upload-path=$ADDON_UPLOAD_PATH" | ||
echo "spl-host-suffix=wfe.splgdi.com" | ||
echo "k8s-manifests-branch=${{ inputs.k8s-manifests-branch }}" | ||
} >> "$GITHUB_OUTPUT" | ||
- uses: actions/download-artifact@v4 | ||
if: ${{ needs.test-inventory.outputs.ucc_modinput_functional == 'true' && needs.test-inventory.outputs.modinput_functional == 'true'}} | ||
id: download-openapi | ||
with: | ||
name: artifact-openapi | ||
path: ${{ github.workspace }} | ||
- name: Setup python | ||
if: steps.download-openapi.conclusion != 'skipped' | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: 3.7 | ||
- name: modinput-test-prerequisites | ||
if: steps.download-openapi.conclusion != 'skipped' | ||
shell: bash | ||
env: | ||
PYTHON_KEYRING_BACKEND: keyring.backends.null.Keyring | ||
run: | | ||
python3.7 -m pip install poetry==1.5.1 | ||
export POETRY_REPOSITORIES_SPLUNK_ADD_ON_UCC_MODINPUT_TEST_URL=https://github.com/splunk/addonfactory-ucc-test.git | ||
export POETRY_HTTP_BASIC_SPLUNK_ADD_ON_UCC_MODINPUT_TEST_USERNAME=${{ secrets.SA_GH_USER_NAME }} | ||
export POETRY_HTTP_BASIC_SPLUNK_ADD_ON_UCC_MODINPUT_TEST_PASSWORD=${{ secrets.GH_TOKEN_ADMIN }} | ||
poetry install --only modinput | ||
poetry run ucc-test-modinput -o ${{ steps.download-openapi.outputs.download-path }}/openapi.json -t ${{ steps.download-openapi.outputs.download-path }}/tmp/ | ||
- name: upload-swagger-artifacts-to-s3 | ||
if: steps.download-openapi.conclusion != 'skipped' | ||
id: swaggerupload | ||
env: | ||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} | ||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
run: | | ||
swagger_name=swagger_$(basename "$BUILD_NAME" .spl) | ||
aws s3 sync "${{ steps.download-openapi.outputs.download-path }}/tmp/restapi_client/" "s3://${{ needs.setup-workflow.outputs.s3_bucket_k8s }}/ta-apps/$swagger_name/" --exclude "*" --include "README.md" --include "*swagger_client*" --only-show-errors | ||
run-knowledge-tests: | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.knowledge == 'true' && needs.setup-workflow.outputs.execute-knowledge-labeled == 'true' }} | ||
needs: | ||
- build | ||
- test-inventory | ||
- setup | ||
- meta | ||
- setup-workflow | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} | ||
sc4s: ${{ fromJson(needs.meta.outputs.matrix_supportedSC4S) }} | ||
openssl3_splunk: [ false ] | ||
include: | ||
- splunk: ${{ fromJson(needs.meta.outputs.openssl3_splunk) }} | ||
sc4s: ${{ fromJson(needs.meta.outputs.openssl3_sc4s) }} | ||
openssl3_splunk: true | ||
container: | ||
image: ghcr.io/splunk/workflow-engine-base:4.1.0 | ||
env: | ||
ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} | ||
ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} | ||
ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} | ||
ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} | ||
ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} | ||
SPLUNK_VERSION_BASE: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
TEST_TYPE: "knowledge" | ||
TEST_ARGS: "" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
- name: configure git # This step configures git to omit "dubious git ownership error" in later test-reporter stage | ||
id: configure-git | ||
run: | | ||
git --version | ||
git_path="$(pwd)" | ||
echo "$git_path" | ||
git config --global --add safe.directory "$git_path" | ||
- name: capture start time | ||
id: capture-start-time | ||
run: | | ||
echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" | ||
- name: Configure AWS credentials | ||
uses: aws-actions/configure-aws-credentials@v4 | ||
with: | ||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} | ||
- name: Read secrets from AWS Secrets Manager into environment variables | ||
id: get-argo-token | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: create job name | ||
id: create-job-name | ||
shell: bash | ||
run: | | ||
RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) | ||
JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} | ||
JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}} | ||
JOB_NAME=${JOB_NAME//[_.]/-} | ||
JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') | ||
echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" | ||
- name: run-tests | ||
id: run-tests | ||
timeout-minutes: 340 | ||
continue-on-error: true | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
uses: splunk/[email protected] | ||
with: | ||
splunk: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
test-type: ${{ env.TEST_TYPE }} | ||
test-args: "" | ||
job-name: ${{ steps.create-job-name.outputs.job-name }} | ||
labels: ${{ needs.setup.outputs.labels }} | ||
workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} | ||
workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} | ||
addon-url: ${{ needs.setup.outputs.addon-upload-path }} | ||
addon-name: ${{ needs.setup.outputs.addon-name }} | ||
sc4s-version: ${{ matrix.sc4s.version }} | ||
sc4s-docker-registry: ${{ matrix.sc4s.docker_registry }} | ||
k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} | ||
- name: Read secrets from AWS Secrets Manager again into environment variables in case credential rotation | ||
id: update-argo-token | ||
if: ${{ !cancelled() }} | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: calculate timeout | ||
id: calculate-timeout | ||
run: | | ||
start_time=${{ steps.capture-start-time.outputs.start_time }} | ||
current_time=$(date +%s) | ||
remaining_time_minutes=$(( 350-((current_time-start_time)/60) )) | ||
echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" | ||
- name: Check if pod was deleted | ||
id: is-pod-deleted | ||
timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
run: | | ||
set -o xtrace | ||
if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then | ||
echo "retry-workflow=true" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: Cancel workflow | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ cancelled() || steps.is-pod-deleted.outcome != 'success' }} | ||
run: | | ||
cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) | ||
cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) | ||
cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) | ||
if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" | ||
else | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" | ||
exit 1 | ||
fi | ||
- name: Retrying workflow | ||
id: retry-wf | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set -o xtrace | ||
set +e | ||
if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] | ||
then | ||
WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) | ||
echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" | ||
argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." | ||
else | ||
echo "No retry required" | ||
argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | ||
argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" | ||
fi | ||
- name: check if workflow completed | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
shell: bash | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set +e | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
echo "Status of workflow:" "$ARGO_STATUS" | ||
while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] | ||
do | ||
echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." | ||
argo wait "${WORKFLOW_NAME}" -n workflows || true | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
done | ||
- name: pull artifacts from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
echo "pulling artifacts" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} | ||
- name: pull logs from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
echo "pulling logs" | ||
mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests artifacts | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests logs | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/argo-logs | ||
- name: Upload cim-compliance-report for ${{ matrix.splunk.version }} | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ matrix.splunk.islatest == true }} | ||
with: | ||
name: cim-compliance-report | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results/cim-compliance-report.md | ||
- name: Test Report | ||
id: test_report | ||
uses: dorny/[email protected] | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} test report | ||
path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" | ||
reporter: java-junit | ||
- name: Parse JUnit XML | ||
if: ${{ !cancelled() }} | ||
run: | | ||
apt-get install -y libxml2-utils | ||
junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" | ||
junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} |$total_tests |$passed |$failures |$errors | $skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- name: Upload-artifact-for-github-summary | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: summary-ko-${{ matrix.splunk.version }}-${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
path: job_summary.txt | ||
- name: pull diag from s3 bucket | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
run: | | ||
echo "pulling diag" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/diag-${{ steps.create-job-name.outputs.job-name }}/diag-${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests diag | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/diag* | ||
knowledge-tests-report: | ||
needs: run-knowledge-tests | ||
runs-on: ubuntu-latest | ||
if: ${{ !cancelled() && needs.run-knowledge-tests.result != 'skipped' }} | ||
steps: | ||
- name: Download all summaries | ||
uses: actions/download-artifact@v4 | ||
with: | ||
pattern: summary-ko* | ||
- name: Combine summaries into a table | ||
run: | | ||
echo "| Job | Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests | Report Link" >> "$GITHUB_STEP_SUMMARY" | ||
echo "| ---------- | ----------- | ------ | ------ | ------ | ------- | ------ |" >> "$GITHUB_STEP_SUMMARY" | ||
for file in summary-ko*/job_summary.txt; do | ||
cat "$file" >> "$GITHUB_STEP_SUMMARY" | ||
done | ||
- uses: geekyeggo/delete-artifact@v5 | ||
with: | ||
name: | | ||
summary-ko* | ||
run-requirement-tests: | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.requirement_test == 'true' && needs.setup-workflow.outputs.execute-requirement-labeled == 'true' }} | ||
needs: | ||
- build | ||
- test-inventory | ||
- setup | ||
- meta | ||
- setup-workflow | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} | ||
sc4s: ${{ fromJson(needs.meta.outputs.matrix_supportedSC4S) }} | ||
openssl3_splunk: [ false ] | ||
include: | ||
- splunk: ${{ fromJson(needs.meta.outputs.openssl3_splunk) }} | ||
sc4s: ${{ fromJson(needs.meta.outputs.openssl3_sc4s) }} | ||
openssl3_splunk: true | ||
container: | ||
image: ghcr.io/splunk/workflow-engine-base:4.1.0 | ||
env: | ||
ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} | ||
ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} | ||
ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} | ||
ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} | ||
ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} | ||
TEST_TYPE: "requirement_test" | ||
TEST_ARGS: "" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
- name: configure git # This step configures git to omit "dubious git ownership error" in later test-reporter stage | ||
id: configure-git | ||
run: | | ||
git --version | ||
git_path="$(pwd)" | ||
echo "$git_path" | ||
git config --global --add safe.directory "$git_path" | ||
- name: capture start time | ||
id: capture-start-time | ||
run: | | ||
echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" | ||
- name: Configure AWS credentials | ||
uses: aws-actions/configure-aws-credentials@v4 | ||
with: | ||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} | ||
- name: Read secrets from AWS Secrets Manager into environment variables | ||
id: get-argo-token | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: create job name | ||
id: create-job-name | ||
shell: bash | ||
run: | | ||
RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) | ||
JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} | ||
JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}} | ||
JOB_NAME=${JOB_NAME//[_.]/-} | ||
JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') | ||
echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" | ||
- name: run-tests | ||
id: run-tests | ||
timeout-minutes: 340 | ||
continue-on-error: true | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
uses: splunk/[email protected] | ||
with: | ||
splunk: ${{ matrix.splunk.version }} | ||
test-type: ${{ env.TEST_TYPE }} | ||
test-args: "" | ||
job-name: ${{ steps.create-job-name.outputs.job-name }} | ||
labels: ${{ needs.setup.outputs.labels }} | ||
workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} | ||
workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} | ||
addon-url: ${{ needs.setup.outputs.addon-upload-path }} | ||
addon-name: ${{ needs.setup.outputs.addon-name }} | ||
sc4s-version: ${{ matrix.sc4s.version }} | ||
sc4s-docker-registry: ${{ matrix.sc4s.docker_registry }} | ||
k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} | ||
- name: calculate timeout | ||
id: calculate-timeout | ||
run: | | ||
start_time=${{ steps.capture-start-time.outputs.start_time }} | ||
current_time=$(date +%s) | ||
remaining_time_minutes=$(( 350-((current_time-start_time)/60) )) | ||
echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" | ||
- name: Check if pod was deleted | ||
id: is-pod-deleted | ||
timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
run: | | ||
set -o xtrace | ||
if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then | ||
echo "retry-workflow=true" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: Cancel workflow | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ cancelled() || steps.is-pod-deleted.outcome != 'success' }} | ||
run: | | ||
cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) | ||
cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) | ||
cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) | ||
if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" | ||
else | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" | ||
exit 1 | ||
fi | ||
- name: Retrying workflow | ||
id: retry-wf | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set -o xtrace | ||
set +e | ||
if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] | ||
then | ||
WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) | ||
echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" | ||
argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." | ||
else | ||
echo "No retry required" | ||
argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | ||
argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" | ||
fi | ||
- name: check if workflow completed | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
shell: bash | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set +e | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
echo "Status of workflow:" "$ARGO_STATUS" | ||
while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] | ||
do | ||
echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." | ||
argo wait "${WORKFLOW_NAME}" -n workflows || true | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
done | ||
- name: pull artifacts from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
echo "pulling artifacts" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} | ||
- name: pull logs from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
echo "pulling logs" | ||
mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }} ${{ env.TEST_TYPE }} tests artifacts | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }} ${{ env.TEST_TYPE }} tests logs | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/argo-logs | ||
- name: Test Report | ||
id: test_report | ||
uses: dorny/[email protected] | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: splunk ${{ matrix.splunk.version }} ${{ env.TEST_TYPE }} test report | ||
path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" | ||
reporter: java-junit | ||
- name: Parse JUnit XML | ||
if: ${{ !cancelled() }} | ||
run: | | ||
apt-get install -y libxml2-utils | ||
junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" | ||
junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} |$total_tests |$passed |$failures |$errors |$skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- name: Upload-artifact-for-github-summary | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: summary-requirement-${{ matrix.splunk.version }}-${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
path: job_summary.txt | ||
- name: pull diag from s3 bucket | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
run: | | ||
echo "pulling diag" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/diag-${{ steps.create-job-name.outputs.job-name }}/diag-${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }} ${{ env.TEST_TYPE }} tests diag | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/diag* | ||
Requirement-input-tests-report: | ||
needs: run-requirement-tests | ||
runs-on: ubuntu-latest | ||
if: ${{ !cancelled() && needs.run-requirement-tests.result != 'skipped' }} | ||
steps: | ||
- name: Download all summaries | ||
uses: actions/download-artifact@v4 | ||
with: | ||
pattern: summary-requirement* | ||
- name: Combine summaries into a table | ||
run: | | ||
echo "| Job | Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests | Report Link" >> "$GITHUB_STEP_SUMMARY" | ||
echo "| ---------- | ----------- | ------ | ------ | ------ | ------- | ------ |" >> "$GITHUB_STEP_SUMMARY" | ||
for file in summary-requirement*/job_summary.txt; do | ||
cat "$file" >> "$GITHUB_STEP_SUMMARY" | ||
done | ||
- uses: geekyeggo/delete-artifact@v5 | ||
with: | ||
name: | | ||
summary-requirement* | ||
run-ui-tests: | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.ui == 'true' && needs.setup-workflow.outputs.execute-ui-labeled == 'true' }} | ||
needs: | ||
- build | ||
- test-inventory | ||
- setup | ||
- meta | ||
- setup-workflow | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} | ||
browser: [ "chrome" ] | ||
vendor-version: ${{ fromJson(needs.meta.outputs.matrix_supportedUIVendors) }} | ||
marker: ${{ fromJson(inputs.ui_marker) }} | ||
openssl3_splunk: [ false ] | ||
include: | ||
- splunk: ${{ fromJson(needs.meta.outputs.openssl3_splunk) }} | ||
browser: "chrome" | ||
container: | ||
image: ghcr.io/splunk/workflow-engine-base:4.1.0 | ||
env: | ||
ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} | ||
ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} | ||
ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} | ||
ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} | ||
ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} | ||
SPLUNK_VERSION_BASE: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
TEST_TYPE: "ui" | ||
TEST_ARGS: "--browser ${{ matrix.browser }}" | ||
TEST_BROWSER: ${{ matrix.browser }} | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
- name: configure git # This step configures git to omit "dubious git ownership error" in later test-reporter stage | ||
id: configure-git | ||
run: | | ||
git --version | ||
git_path="$(pwd)" | ||
echo "$git_path" | ||
git config --global --add safe.directory "$git_path" | ||
- name: capture start time | ||
id: capture-start-time | ||
run: | | ||
echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" | ||
- name: Configure AWS credentials | ||
uses: aws-actions/configure-aws-credentials@v4 | ||
with: | ||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} | ||
- name: Read secrets from AWS Secrets Manager into environment variables | ||
id: get-argo-token | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: create job name | ||
id: create-job-name | ||
shell: bash | ||
run: | | ||
RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) | ||
JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} | ||
JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}-${{ matrix.browser }}} | ||
JOB_NAME=${JOB_NAME//[_.:]/-} | ||
JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') | ||
echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" | ||
- name: create test argument | ||
id: create-test-arg | ||
shell: bash | ||
run: | | ||
TEST_ARG_M="" | ||
EMPTY_MARKER="[]" | ||
if [[ "${{ inputs.ui_marker }}" != "$EMPTY_MARKER" ]]; then | ||
TEST_ARG_M="-m" | ||
fi | ||
echo "test-arg=$TEST_ARG_M" >> "$GITHUB_OUTPUT" | ||
- name: run-tests | ||
id: run-tests | ||
timeout-minutes: 340 | ||
continue-on-error: true | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
uses: splunk/[email protected] | ||
with: | ||
splunk: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
test-type: ${{ env.TEST_TYPE }} | ||
test-args: ${{ env.TEST_ARGS }} ${{ steps.create-test-arg.outputs.test-arg }} ${{ matrix.marker }} | ||
job-name: ${{ steps.create-job-name.outputs.job-name }} | ||
labels: ${{ needs.setup.outputs.labels }} | ||
workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} | ||
workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} | ||
addon-url: ${{ needs.setup.outputs.addon-upload-path }} | ||
addon-name: ${{ needs.setup.outputs.addon-name }} | ||
vendor-version: ${{ matrix.vendor-version.image }} | ||
sc4s-version: "No" | ||
k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} | ||
test-browser: ${{ env.TEST_BROWSER }} | ||
- name: Read secrets from AWS Secrets Manager again into environment variables in case credential rotation | ||
id: update-argo-token | ||
if: ${{ !cancelled() }} | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: calculate timeout | ||
id: calculate-timeout | ||
run: | | ||
start_time=${{ steps.capture-start-time.outputs.start_time }} | ||
current_time=$(date +%s) | ||
remaining_time_minutes=$(( 350-((current_time-start_time)/60) )) | ||
echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" | ||
- name: Check if pod was deleted | ||
id: is-pod-deleted | ||
timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
run: | | ||
set -o xtrace | ||
if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted" ; then | ||
echo "retry-workflow=true" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: Cancel workflow | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ cancelled() || steps.is-pod-deleted.outcome != 'success' }} | ||
run: | | ||
cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) | ||
cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) | ||
cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) | ||
if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" | ||
else | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" | ||
exit 1 | ||
fi | ||
- name: Retrying workflow | ||
id: retry-wf | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set -o xtrace | ||
set +e | ||
if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] | ||
then | ||
WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) | ||
echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" | ||
argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." | ||
else | ||
echo "No retry required" | ||
argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | ||
argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" | ||
fi | ||
- name: check if workflow completed | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
run: | | ||
set +e | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
echo "Status of workflow:" "$ARGO_STATUS" | ||
while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] | ||
do | ||
echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." | ||
argo wait "${WORKFLOW_NAME}" -n workflows || true | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
done | ||
- name: pull artifacts from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
echo "pulling artifacts" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} | ||
- name: pull logs from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
echo "pulling logs" | ||
mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests artifacts | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests logs | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/argo-logs | ||
- name: Test Report | ||
id: test_report | ||
uses: dorny/[email protected] | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} test report | ||
path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" | ||
reporter: java-junit | ||
- name: Parse JUnit XML | ||
if: ${{ !cancelled() }} | ||
run: | | ||
apt-get install -y libxml2-utils | ||
junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" | ||
junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} |$total_tests |$passed |$failures |$errors |$skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- name: Upload-artifact-for-github-summary | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: summary-${{ env.TEST_TYPE }}-${{ matrix.splunk.version }}-${{ secrets.OTHER_TA_REQUIRED_CONFIGS }}-${{ matrix.browser }}-${{ matrix.vendor-version.image }}-${{ matrix.marker }}-artifact | ||
path: job_summary.txt | ||
- name: pull diag from s3 bucket | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
run: | | ||
echo "pulling diag" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/diag-${{ steps.create-job-name.outputs.job-name }}/diag-${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests diag | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/diag* | ||
UI-tests-report: | ||
needs: run-ui-tests | ||
runs-on: ubuntu-latest | ||
if: ${{ !cancelled() && needs.run-ui-tests.result != 'skipped' }} | ||
steps: | ||
- name: Download all summaries | ||
uses: actions/download-artifact@v4 | ||
with: | ||
pattern: summary-ui* | ||
- name: Combine summaries into a table | ||
run: | | ||
echo "| Job | Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests | Report Link" >> "$GITHUB_STEP_SUMMARY" | ||
echo "| ---------- | ----------- | ------ | ------ | ------ | ------- | ------ |" >> "$GITHUB_STEP_SUMMARY" | ||
for file in summary-ui-*/job_summary.txt; do | ||
cat "$file" >> "$GITHUB_STEP_SUMMARY" | ||
done | ||
- uses: geekyeggo/delete-artifact@v5 | ||
with: | ||
name: | | ||
summary-ui* | ||
run-modinput-tests: | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.modinput_functional == 'true' && needs.setup-workflow.outputs.execute-modinput-labeled == 'true' }} | ||
needs: | ||
- build | ||
- test-inventory | ||
- setup | ||
- meta | ||
- setup-workflow | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} | ||
modinput-type: [ "modinput_functional" ] | ||
vendor-version: ${{ fromJson(needs.meta.outputs.matrix_supportedModinputFunctionalVendors) }} | ||
marker: ${{ fromJson(inputs.marker) }} | ||
openssl3_splunk: [ false ] | ||
include: | ||
- splunk: ${{ fromJson(needs.meta.outputs.openssl3_splunk) }} | ||
modinput-type: [ "modinput_functional" ] | ||
openssl3_splunk: true | ||
container: | ||
image: ghcr.io/splunk/workflow-engine-base:4.1.0 | ||
env: | ||
ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} | ||
ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} | ||
ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} | ||
ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} | ||
ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} | ||
SPLUNK_VERSION_BASE: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
TEST_TYPE: "modinput_functional" | ||
TEST_ARGS: "" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
- name: configure git # This step configures git to omit "dubious git ownership error" in later test-reporter stage | ||
id: configure-git | ||
run: | | ||
git --version | ||
git_path="$(pwd)" | ||
echo "$git_path" | ||
git config --global --add safe.directory "$git_path" | ||
- name: capture start time | ||
id: capture-start-time | ||
run: | | ||
echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" | ||
- name: Configure AWS credentials | ||
uses: aws-actions/configure-aws-credentials@v4 | ||
with: | ||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} | ||
- name: Read secrets from AWS Secrets Manager into environment variables | ||
id: get-argo-token | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: create job name | ||
id: create-job-name | ||
shell: bash | ||
run: | | ||
RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) | ||
JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} | ||
JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}} | ||
JOB_NAME=${JOB_NAME//[_.]/-} | ||
JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') | ||
echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" | ||
- name: create test argument | ||
id: create-test-arg | ||
shell: bash | ||
run: | | ||
TEST_ARG_M="" | ||
EMPTY_MARKER="[]" | ||
if [[ "${{ inputs.marker }}" != "$EMPTY_MARKER" ]]; then | ||
TEST_ARG_M="-m" | ||
fi | ||
echo "test-arg=$TEST_ARG_M" >> "$GITHUB_OUTPUT" | ||
- name: run-tests | ||
id: run-tests | ||
timeout-minutes: 340 | ||
continue-on-error: true | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
uses: splunk/[email protected] | ||
with: | ||
splunk: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
test-type: ${{ env.TEST_TYPE }} | ||
test-args: ${{ env.TEST_ARGS }} ${{ steps.create-test-arg.outputs.test-arg }} ${{ matrix.marker }} | ||
job-name: ${{ steps.create-job-name.outputs.job-name }} | ||
labels: ${{ needs.setup.outputs.labels }} | ||
workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} | ||
workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} | ||
addon-url: ${{ needs.setup.outputs.addon-upload-path }} | ||
addon-name: ${{ needs.setup.outputs.addon-name }} | ||
vendor-version: ${{ matrix.vendor-version.image }} | ||
sc4s-version: "No" | ||
k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} | ||
- name: Read secrets from AWS Secrets Manager again into environment variables in case credential rotation | ||
id: update-argo-token | ||
if: ${{ !cancelled() }} | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: calculate timeout | ||
id: calculate-timeout | ||
run: | | ||
start_time=${{ steps.capture-start-time.outputs.start_time }} | ||
current_time=$(date +%s) | ||
remaining_time_minutes=$(( 350-((current_time-start_time)/60) )) | ||
echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" | ||
- name: Check if pod was deleted | ||
id: is-pod-deleted | ||
timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
run: | | ||
set -o xtrace | ||
if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then | ||
echo "retry-workflow=true" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: Cancel workflow | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ cancelled() || steps.is-pod-deleted.outcome != 'success' }} | ||
run: | | ||
cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) | ||
cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) | ||
cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) | ||
if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" | ||
else | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" | ||
exit 1 | ||
fi | ||
- name: Retrying workflow | ||
id: retry-wf | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set -o xtrace | ||
set +e | ||
if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] | ||
then | ||
WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) | ||
echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" | ||
argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." | ||
else | ||
echo "No retry required" | ||
argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | ||
argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" | ||
fi | ||
- name: check if workflow completed | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
run: | | ||
set +e | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
echo "Status of workflow:" "$ARGO_STATUS" | ||
while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] | ||
do | ||
echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." | ||
argo wait "${WORKFLOW_NAME}" -n workflows || true | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
done | ||
- name: pull artifacts from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
echo "pulling artifacts" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} | ||
- name: pull logs from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
echo "pulling logs" | ||
mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests artifacts | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests logs | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/argo-logs | ||
- name: Test Report | ||
id: test_report | ||
uses: dorny/[email protected] | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} test report | ||
path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" | ||
reporter: java-junit | ||
- name: Parse JUnit XML | ||
if: ${{ !cancelled() }} | ||
run: | | ||
apt-get install -y libxml2-utils | ||
junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" | ||
junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ matrix.marker }} ${{ matrix.vendor-version.image }} |$total_tests |$passed |$failures |$errors | $skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- name: Upload-artifact-for-github-summary | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: summary-${{ env.TEST_TYPE }}-${{ matrix.splunk.version }}-${{ secrets.OTHER_TA_REQUIRED_CONFIGS }}-${{ matrix.vendor-version.image }}-${{ matrix.marker }}-artifact | ||
path: job_summary.txt | ||
- name: pull diag from s3 bucket | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
run: | | ||
echo "pulling diag" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/diag-${{ steps.create-job-name.outputs.job-name }}/diag-${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests diag | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/diag* | ||
Modinput-tests-report: | ||
needs: run-modinput-tests | ||
runs-on: ubuntu-latest | ||
if: ${{ !cancelled() && needs.run-modinput-tests.result != 'skipped' }} | ||
steps: | ||
- name: Download all summaries | ||
uses: actions/download-artifact@v4 | ||
with: | ||
pattern: summary-modinput* | ||
- name: Combine summaries into a table | ||
run: | | ||
echo "| Job | Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests | Report Link" >> "$GITHUB_STEP_SUMMARY" | ||
echo "| ---------- | ----------- | ------ | ------ | ------ | ------- | ------ |" >> "$GITHUB_STEP_SUMMARY" | ||
for file in summary-modinput*/job_summary.txt; do | ||
cat "$file" >> "$GITHUB_STEP_SUMMARY" | ||
done | ||
- uses: geekyeggo/delete-artifact@v5 | ||
with: | ||
name: | | ||
summary-modinput* | ||
run-scripted-input-tests-full-matrix: | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.scripted_inputs == 'true' && needs.setup-workflow.outputs.execute-scripted_inputs-labeled == 'true' }} | ||
needs: | ||
- build | ||
- test-inventory | ||
- setup | ||
- meta | ||
- setup-workflow | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} | ||
os: ${{ fromJson(inputs.scripted-inputs-os-list) }} | ||
container: | ||
image: ghcr.io/splunk/workflow-engine-base:4.1.0 | ||
env: | ||
ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} | ||
ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} | ||
ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} | ||
ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} | ||
ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} | ||
SPLUNK_VERSION_BASE: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
TEST_TYPE: "scripted_inputs" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
- name: configure git # This step configures git to omit "dubious git ownership error" in later test-reporter stage | ||
id: configure-git | ||
run: | | ||
git --version | ||
git_path="$(pwd)" | ||
echo "$git_path" | ||
git config --global --add safe.directory "$git_path" | ||
- name: capture start time | ||
id: capture-start-time | ||
run: | | ||
echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" | ||
- name: Configure AWS credentials | ||
uses: aws-actions/configure-aws-credentials@v4 | ||
with: | ||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} | ||
- name: Read secrets from AWS Secrets Manager into environment variables | ||
id: get-argo-token | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: create job name | ||
id: create-job-name | ||
shell: bash | ||
run: | | ||
RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) | ||
JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} | ||
JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}} | ||
JOB_NAME=${JOB_NAME//[_.]/-} | ||
JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') | ||
echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" | ||
- name: get os name and version | ||
id: os-name-version | ||
shell: bash | ||
run: | | ||
OS_NAME_VERSION=${{ matrix.os }} | ||
# shellcheck disable=SC2206 | ||
OS_NAME_VERSION=(${OS_NAME_VERSION//:/ }) | ||
OS_NAME=${OS_NAME_VERSION[0]} | ||
OS_VERSION=${OS_NAME_VERSION[1]} | ||
{ | ||
echo "os-name=$OS_NAME" | ||
echo "os-version=$OS_VERSION" | ||
} >> "$GITHUB_OUTPUT" | ||
- name: run-tests | ||
id: run-tests | ||
timeout-minutes: 340 | ||
continue-on-error: true | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
uses: splunk/[email protected] | ||
with: | ||
splunk: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
test-type: ${{ env.TEST_TYPE }} | ||
test-args: "--hostname=spl --os-name=${{ steps.os-name-version.outputs.os-name }} --os-version=${{ steps.os-name-version.outputs.os-version }} -m script_input" | ||
job-name: ${{ steps.create-job-name.outputs.job-name }} | ||
labels: ${{ needs.setup.outputs.labels }} | ||
workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} | ||
workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} | ||
addon-url: ${{ needs.setup.outputs.addon-upload-path }} | ||
addon-name: ${{ needs.setup.outputs.addon-name }} | ||
sc4s-version: "No" | ||
os-name: ${{ steps.os-name-version.outputs.os-name }} | ||
os-version: ${{ steps.os-name-version.outputs.os-version }} | ||
k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} | ||
- name: calculate timeout | ||
id: calculate-timeout | ||
run: | | ||
start_time=${{ steps.capture-start-time.outputs.start_time }} | ||
current_time=$(date +%s) | ||
remaining_time_minutes=$(( 350-((current_time-start_time)/60) )) | ||
echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" | ||
- name: Check if pod was deleted | ||
id: is-pod-deleted | ||
timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
run: | | ||
set -o xtrace | ||
if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then | ||
echo "retry-workflow=true" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: Cancel workflow | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ cancelled() || steps.is-pod-deleted.outcome != 'success' }} | ||
run: | | ||
cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) | ||
cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) | ||
cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) | ||
if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" | ||
else | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" | ||
exit 1 | ||
fi | ||
- name: Retrying workflow | ||
id: retry-wf | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set -o xtrace | ||
set +e | ||
if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] | ||
then | ||
WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) | ||
echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" | ||
argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." | ||
else | ||
echo "No retry required" | ||
argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | ||
argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" | ||
fi | ||
- name: check if workflow completed | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
run: | | ||
set +e | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
echo "Status of workflow:" "$ARGO_STATUS" | ||
while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] | ||
do | ||
echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." | ||
argo wait "${WORKFLOW_NAME}" -n workflows || true | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
done | ||
- name: pull artifacts from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
echo "pulling artifacts" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} | ||
- name: pull logs from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
echo "pulling logs" | ||
mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ steps.os-name-version.outputs.os-name }} ${{ steps.os-name-version.outputs.os-version }} tests artifacts | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ steps.os-name-version.outputs.os-name }} ${{ steps.os-name-version.outputs.os-version }} tests logs | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/argo-logs | ||
- name: Test Report | ||
id: test_report | ||
uses: dorny/[email protected] | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ steps.os-name-version.outputs.os-name }} ${{ steps.os-name-version.outputs.os-version }} test report | ||
path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" | ||
reporter: java-junit | ||
- name: Parse JUnit XML | ||
if: ${{ !cancelled() }} | ||
run: | | ||
apt-get install -y libxml2-utils | ||
junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" | ||
junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ steps.os-name-version.outputs.os-name }} ${{ steps.os-name-version.outputs.os-version }} |$total_tests |$passed |$failures |$errors |$skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- name: Upload-artifact-for-github-summary | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: summary-${{ env.TEST_TYPE }}-${{ matrix.splunk.version }}-${{ secrets.OTHER_TA_REQUIRED_CONFIGS }}-${{ steps.os-name-version.outputs.os-name }}-${{ steps.os-name-version.outputs.os-version }} | ||
path: job_summary.txt | ||
- name: pull diag from s3 bucket | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
run: | | ||
echo "pulling diag" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/diag-${{ steps.create-job-name.outputs.job-name }}/diag-${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ steps.os-name-version.outputs.os-name }} ${{ steps.os-name-version.outputs.os-version }} tests diag | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/diag* | ||
scripted-input-tests-report: | ||
needs: run-scripted-input-tests-full-matrix | ||
runs-on: ubuntu-latest | ||
if: ${{ !cancelled() && needs.run-scripted-input-tests-full-matrix.result != 'skipped' }} | ||
steps: | ||
- name: Download all summaries | ||
uses: actions/download-artifact@v4 | ||
with: | ||
pattern: summary-scripted* | ||
- name: Combine summaries into a table | ||
run: | | ||
echo "| Job | Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests | Report Link" >> "$GITHUB_STEP_SUMMARY" | ||
echo "| ---------- | ----------- | ------ | ------ | ------ | ------- | ------ |" >> "$GITHUB_STEP_SUMMARY" | ||
for file in summary-scripted*/job_summary.txt; do | ||
cat "$file" >> "$GITHUB_STEP_SUMMARY" | ||
done | ||
- uses: geekyeggo/delete-artifact@v5 | ||
with: | ||
name: | | ||
summary-scripted* | ||
pre-publish: | ||
if: ${{ !cancelled() && needs.validate-custom-version.result == 'success' }} | ||
# The following line will rename 'pre-publish' to 'pre-publish-not_main_pr' when PR is created towards main branch | ||
# It is necessary to avoid confusion caused by githubactions considering pre-publish for both push to develop branch | ||
# and pull_request to main branch events. | ||
name: ${{ github.event_name == 'pull_request' && github.base_ref == 'main' && 'pre-publish' || 'pre-publish-not_main_pr' }} | ||
needs: | ||
- validate-custom-version | ||
- meta | ||
- compliance-copyrights | ||
- lint | ||
- review_secrets | ||
- semgrep | ||
- build | ||
- test-inventory | ||
- run-unit-tests | ||
- appinspect | ||
- setup | ||
- run-knowledge-tests | ||
- run-modinput-tests | ||
- run-ui-tests | ||
- validate-pr-title | ||
runs-on: ubuntu-latest | ||
env: | ||
NEEDS: ${{ toJson(needs) }} | ||
steps: | ||
- name: check if tests have passed or skipped | ||
id: check | ||
shell: bash | ||
run: | | ||
RUN_PUBLISH=$(echo "$NEEDS" | jq ".[] | select( ( .result != \"skipped\" ) and .result != \"success\" ) | length == 0") | ||
if [[ "$RUN_PUBLISH" != *'false'* ]] | ||
then | ||
echo "run-publish=true" >> "$GITHUB_OUTPUT" | ||
else | ||
echo "run-publish=false" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: exit without publish | ||
if: ${{ steps.check.outputs.run-publish == 'false' || ( github.event.action == 'labeled') }} | ||
run: | | ||
echo "Expand check step to see which job has failed pre-publish step." | ||
exit 1 | ||
publish: | ||
if: | | ||
(!cancelled() && needs.pre-publish.result == 'success' && github.event_name != 'pull_request' && github.event_name != 'schedule') || | ||
(!cancelled() && needs.pre-publish.result == 'success' && github.event.inputs.custom-version != '' && needs.validate-custom-version.result == 'success') | ||
name: ${{ github.event.inputs.custom-version == '' && 'publish' || 'publish-custom-version' }} | ||
needs: | ||
- pre-publish | ||
- validate-custom-version | ||
runs-on: ubuntu-latest | ||
permissions: | ||
contents: write | ||
packages: read | ||
pull-requests: read | ||
statuses: write | ||
steps: | ||
- name: Checkout | ||
uses: actions/checkout@v4 | ||
with: | ||
submodules: false | ||
persist-credentials: false | ||
- name: Semantic Release | ||
if: ${{ github.event.inputs.custom-version == '' }} | ||
id: semantic | ||
uses: splunk/[email protected] | ||
env: | ||
GITHUB_TOKEN: ${{ secrets.GH_TOKEN_ADMIN }} | ||
with: | ||
git_committer_name: ${{ secrets.SA_GH_USER_NAME }} | ||
git_committer_email: ${{ secrets.SA_GH_USER_EMAIL }} | ||
gpg_private_key: ${{ secrets.SA_GPG_PRIVATE_KEY }} | ||
passphrase: ${{ secrets.SA_GPG_PASSPHRASE }} | ||
- name: Release custom version | ||
if: ${{ github.event.inputs.custom-version != '' }} | ||
id: custom | ||
uses: "softprops/action-gh-release@v2" | ||
with: | ||
token: "${{ secrets.GH_TOKEN_ADMIN }}" | ||
tag_name: v${{ github.event.inputs.custom-version }} | ||
target_commitish: "${{github.ref_name}}" | ||
make_latest: false | ||
- name: Download package-deployment | ||
if: ${{ steps.semantic.outputs.new_release_published == 'true' || steps.custom.outputs.upload_url != '' }} | ||
uses: actions/download-artifact@v4 | ||
id: download-package-deployment | ||
with: | ||
name: package-deployment | ||
path: download/artifacts/ | ||
- name: Download package-splunkbase | ||
if: ${{ steps.semantic.outputs.new_release_published == 'true' || steps.custom.outputs.upload_url != '' }} | ||
uses: actions/download-artifact@v4 | ||
id: download-package-splunkbase | ||
with: | ||
name: package-splunkbase | ||
path: download/artifacts/deployment | ||
- name: Download cim-compliance-report | ||
id: download-cim-compliance-report | ||
if: ${{ steps.semantic.outputs.new_release_published == 'true' || steps.custom.outputs.upload_url != '' }} | ||
continue-on-error: true | ||
uses: actions/download-artifact@v4 | ||
with: | ||
name: cim-compliance-report | ||
path: download/artifacts/deployment | ||
- name: List of assets | ||
if: ${{ steps.semantic.outputs.new_release_published == 'true'|| steps.custom.outputs.upload_url != '' }} | ||
run: | | ||
ls -la ${{ steps.download-package-splunkbase.outputs.download-path }} | ||
- name: Upload assets to release | ||
if: ${{ steps.semantic.outputs.new_release_published == 'true' || steps.custom.outputs.upload_url != '' }} | ||
uses: svenstaro/upload-release-action@v2 | ||
with: | ||
repo_token: ${{ github.token }} | ||
file: ${{ steps.download-package-splunkbase.outputs.download-path }}/* | ||
overwrite: true | ||
file_glob: true | ||
tag: v${{ github.event.inputs.custom-version != '' && github.event.inputs.custom-version || steps.semantic.outputs.new_release_version }} | ||
description: 'Version of release in the form of "x.x.x" string, specified by user instead of automatically generated semantic release' | ||
type: string | ||
default: "" | ||
execute-tests-on-push-to-release: | ||
required: false | ||
description: 'Flag to run all tests on push to release branch' | ||
type: string | ||
default: 'false' | ||
k8s-environment: | ||
required: false | ||
description: Specifies which environment to use for k8s testing. ["production", "staging"] | ||
type: string | ||
default: "production" | ||
k8s-manifests-branch: | ||
required: false | ||
description: "branch for k8s manifests to run the tests on" | ||
type: string | ||
default: "v3.0.8" | ||
scripted-inputs-os-list: | ||
required: false | ||
description: "list of OS used for scripted input tests" | ||
type: string | ||
default: >- | ||
["ubuntu:14.04", "ubuntu:16.04","ubuntu:18.04","ubuntu:22.04", "ubuntu:24.04", "redhat:8.4", "redhat:8.5", "redhat:8.6", "redhat:8.8"] | ||
secrets: | ||
GH_TOKEN_ADMIN: | ||
description: Github admin token | ||
required: true | ||
SEMGREP_PUBLISH_TOKEN: | ||
description: Semgrep token | ||
required: true | ||
AWS_ACCESS_KEY_ID: | ||
description: AWS access key id | ||
required: true | ||
AWS_DEFAULT_REGION: | ||
description: AWS default region | ||
required: true | ||
AWS_SECRET_ACCESS_KEY: | ||
description: AWS secret access key | ||
required: true | ||
OTHER_TA_REQUIRED_CONFIGS: | ||
description: other required configs | ||
required: true | ||
FOSSA_API_KEY: | ||
description: API token for FOSSA app | ||
required: true | ||
SA_GH_USER_NAME: | ||
description: GPG signature username | ||
required: true | ||
SA_GH_USER_EMAIL: | ||
description: GPG signature user email | ||
required: true | ||
SA_GPG_PRIVATE_KEY: | ||
description: GPG signature private key | ||
required: true | ||
SA_GPG_PASSPHRASE: | ||
description: GPG signature passphrase | ||
required: true | ||
SPL_COM_USER: | ||
description: username to splunk.com | ||
required: true | ||
SPL_COM_PASSWORD: | ||
description: password to splunk.com | ||
required: true | ||
permissions: | ||
contents: read | ||
packages: read | ||
concurrency: | ||
group: ${{ github.head_ref || github.run_id }} | ||
cancel-in-progress: true | ||
jobs: | ||
validate-custom-version: | ||
runs-on: ubuntu-latest | ||
if: ${{ github.event.inputs.custom-version != '' }} | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- name: Validate custom version | ||
run: | | ||
if [[ ! ${{ github.event.inputs.custom-version }} =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then | ||
echo "Invalid custom version provided. Please provide a valid semver version." | ||
exit 1 | ||
fi | ||
git fetch --tags | ||
if [ "$(git tag -l 'v${{ github.event.inputs.custom-version }}')" ]; then | ||
echo "The provided version already exists. Please provide a unique version." | ||
exit 1 | ||
fi | ||
setup-workflow: | ||
runs-on: ubuntu-latest | ||
outputs: | ||
execute-knowledge-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_knowledge_labeled }} | ||
execute-ui-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_ui_labeled }} | ||
execute-modinput-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_modinput_functional_labeled }} | ||
execute-scripted_inputs-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_scripted_inputs_labeled }} | ||
execute-requirement-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_requirement_test_labeled }} | ||
s3_bucket_k8s: ${{ steps.k8s-environment.outputs.s3_bucket }} | ||
argo_server_domain_k8s: ${{ steps.k8s-environment.outputs.argo_server_domain }} | ||
argo_token_secret_id_k8s: ${{ steps.k8s-environment.outputs.argo_token_secret_id }} | ||
steps: | ||
- name: set k8s environment | ||
id: k8s-environment | ||
run: | | ||
if [[ ${{ inputs.k8s-environment }} == 'staging' ]]; then | ||
echo "setting up argo variables for staging" | ||
{ | ||
echo "s3_bucket=ta-staging-artifacts" | ||
echo "argo_server_domain=argo.staging.wfe.splgdi.com" | ||
echo "argo_token_secret_id=ta-staging-github-workflow-automation-token" | ||
} >> "$GITHUB_OUTPUT" | ||
else | ||
echo "setting up argo variables for production" | ||
{ | ||
echo "s3_bucket=ta-production-artifacts" | ||
echo "argo_server_domain=argo.wfe.splgdi.com" | ||
echo "argo_token_secret_id=ta-github-workflow-automation-token" | ||
} >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: configure tests based on labels | ||
id: configure-tests-on-labels | ||
run: | | ||
set +e | ||
declare -A EXECUTE_LABELED | ||
TESTSET=("execute_knowledge" "execute_ui" "execute_modinput_functional" "execute_scripted_inputs" "execute_requirement_test") | ||
for test_type in "${TESTSET[@]}"; do | ||
EXECUTE_LABELED["$test_type"]="false" | ||
done | ||
case "${{ github.event_name }}" in | ||
"pull_request") | ||
labels=$(echo '${{ toJSON(github.event.pull_request.labels) }}' | jq -r '.[] | .name') | ||
if ${{ github.base_ref == 'main' }} && ${{ contains(github.event.pull_request.labels.*.name, 'use_labels') }}; then | ||
for test_type in "${TESTSET[@]}"; do | ||
if [[ "$labels" =~ $test_type ]]; then | ||
EXECUTE_LABELED["$test_type"]="true" | ||
fi | ||
done | ||
elif ${{ github.base_ref == 'main' }} || ${{ contains(github.event.pull_request.labels.*.name, 'execute_all_tests') }}; then | ||
for test_type in "${TESTSET[@]}"; do | ||
EXECUTE_LABELED["$test_type"]="true" | ||
done | ||
else | ||
for test_type in "${TESTSET[@]}"; do | ||
if [[ "$labels" =~ $test_type ]]; then | ||
EXECUTE_LABELED["$test_type"]="true" | ||
fi | ||
done | ||
fi | ||
;; | ||
"push") | ||
if ${{ github.ref_name == 'main' }} || ${{ github.ref_name == 'develop' }} || | ||
${{ startsWith(github.ref_name, 'release/') && inputs.execute-tests-on-push-to-release == 'true' }} ; then | ||
for test_type in "${TESTSET[@]}"; do | ||
EXECUTE_LABELED["$test_type"]="true" | ||
done | ||
fi | ||
;; | ||
"schedule") | ||
for test_type in "${TESTSET[@]}"; do | ||
EXECUTE_LABELED["$test_type"]="true" | ||
done | ||
;; | ||
"workflow_dispatch") | ||
if ${{ inputs.custom-version != '' }} ; then | ||
for test_type in "${TESTSET[@]}"; do | ||
EXECUTE_LABELED["$test_type"]="true" | ||
done | ||
fi | ||
;; | ||
*) | ||
echo "No tests were labeled for execution!" | ||
;; | ||
esac | ||
echo "Tests to execute based on labels:" | ||
for test_type in "${TESTSET[@]}"; do | ||
echo "$test_type""_labeled=${EXECUTE_LABELED["$test_type"]}" >> "$GITHUB_OUTPUT" | ||
echo "$test_type""_labeled: ${EXECUTE_LABELED["$test_type"]}" | ||
done | ||
validate-pr-title: | ||
name: Validate PR title | ||
if: github.event_name == 'pull_request' | ||
runs-on: ubuntu-latest | ||
permissions: | ||
contents: read | ||
packages: read | ||
pull-requests: read | ||
statuses: write | ||
steps: | ||
- uses: amannn/[email protected] | ||
with: | ||
wip: true | ||
validateSingleCommit: true | ||
env: | ||
GITHUB_TOKEN: ${{ github.token }} | ||
meta: | ||
runs-on: ubuntu-latest | ||
outputs: | ||
matrix_supportedSplunk: ${{ steps.matrix.outputs.supportedSplunk }} | ||
matrix_latestSplunk: ${{ steps.matrix.outputs.latestSplunk }} | ||
matrix_supportedSC4S: ${{ steps.matrix.outputs.supportedSC4S }} | ||
matrix_supportedModinputFunctionalVendors: ${{ steps.matrix.outputs.supportedModinputFunctionalVendors }} | ||
matrix_supportedUIVendors: ${{ steps.matrix.outputs.supportedUIVendors }} | ||
permissions: | ||
contents: write | ||
packages: read | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: false | ||
persist-credentials: false | ||
- id: matrix | ||
uses: splunk/[email protected] | ||
- name: job summary | ||
run: | | ||
splunk_version_list=$(echo '${{ steps.matrix.outputs.supportedSplunk }}' | jq -r '.[].version') | ||
sc4s_version_list=$(echo '${{ steps.matrix.outputs.supportedSC4S }}' | jq -r '.[].version') | ||
echo -e "## Summary of Versions Used\n- **Splunk versions used:** (${splunk_version_list})\n- **SC4S versions used:** (${sc4s_version_list})\n- Browser: Chrome" >> "$GITHUB_STEP_SUMMARY" | ||
fossa-scan: | ||
runs-on: ubuntu-latest | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- name: run fossa analyze and create report | ||
id: fossa-scan | ||
run: | | ||
curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install-latest.sh | bash | ||
fossa analyze --debug 2>&1 | tee /tmp/fossa_analyze_output.txt | ||
exit_code="${PIPESTATUS[0]}" | ||
FOSSA_REPORT_URL=$(grep -o 'https://app.fossa.com[^ ]*' /tmp/fossa_analyze_output.txt || true) | ||
echo "url=$FOSSA_REPORT_URL" | ||
echo "FOSSA_REPORT_URL=$FOSSA_REPORT_URL" >> "$GITHUB_OUTPUT" | ||
fossa report attribution --format text --timeout 600 > /tmp/THIRDPARTY | ||
exit "$exit_code" | ||
env: | ||
FOSSA_API_KEY: ${{ secrets.FOSSA_API_KEY }} | ||
- name: upload THIRDPARTY file | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: THIRDPARTY | ||
path: /tmp/THIRDPARTY | ||
- name: job summary | ||
if: success() || failure() | ||
run: | | ||
echo "FOSSA Report: ${{ steps.fossa-scan.outputs.FOSSA_REPORT_URL }}" >> "$GITHUB_STEP_SUMMARY" | ||
fossa-test: | ||
continue-on-error: true | ||
runs-on: ubuntu-latest | ||
needs: | ||
- fossa-scan | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- name: run fossa test | ||
run: | | ||
curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install-latest.sh | bash | ||
fossa test --debug | ||
env: | ||
FOSSA_API_KEY: ${{ secrets.FOSSA_API_KEY }} | ||
compliance-copyrights: | ||
name: compliance-copyrights | ||
runs-on: ubuntu-latest | ||
steps: | ||
- name: Checkout | ||
uses: actions/checkout@v4 | ||
- name: REUSE Compliance Check | ||
uses: fsfe/[email protected] | ||
lint: | ||
runs-on: ubuntu-latest | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- uses: actions/setup-python@v5 | ||
with: | ||
python-version: "3.7" | ||
- uses: pre-commit/[email protected] | ||
review_secrets: | ||
name: security-detect-secrets | ||
runs-on: ubuntu-latest | ||
steps: | ||
- name: Checkout | ||
if: github.event_name != 'pull_request' | ||
uses: actions/checkout@v4 | ||
with: | ||
submodules: false | ||
fetch-depth: "0" | ||
- name: Checkout for PR | ||
if: github.event_name == 'pull_request' | ||
uses: actions/checkout@v4 | ||
with: | ||
submodules: false | ||
fetch-depth: "0" | ||
ref: ${{ github.head_ref }} | ||
- name: Secret Scanning Trufflehog | ||
uses: trufflesecurity/[email protected] | ||
with: | ||
extra_args: -x .github/workflows/exclude-patterns.txt --json --only-verified | ||
version: 3.77.0 | ||
semgrep: | ||
uses: splunk/sast-scanning/.github/workflows/sast-scan.yml@main | ||
secrets: | ||
SEMGREP_KEY: ${{ secrets.SEMGREP_PUBLISH_TOKEN }} | ||
test-inventory: | ||
runs-on: ubuntu-latest | ||
# Map a step output to a job output | ||
outputs: | ||
unit: ${{ steps.testset.outputs.unit }} | ||
knowledge: ${{ steps.testset.outputs.knowledge }} | ||
ui: ${{ steps.testset.outputs.ui }} | ||
modinput_functional: ${{ steps.testset.outputs.modinput_functional }} | ||
requirement_test: ${{ steps.testset.outputs.requirement_test }} | ||
scripted_inputs: ${{ steps.testset.outputs.scripted_inputs }} | ||
ucc_modinput_functional: ${{ steps.modinput-version.outputs.ucc_modinput_tests }} | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- id: testset | ||
name: Check available test types | ||
run: | | ||
find tests -type d -maxdepth 1 -mindepth 1 | sed 's|^tests/||g' | while read -r TESTSET; do echo "$TESTSET=true" >> "$GITHUB_OUTPUT"; echo "$TESTSET::true"; done | ||
- id: modinput-version | ||
name: Check modinput tests version | ||
run: | | ||
CENTAURS_MODINPUT_TESTS_CHECK_DIR="tests/modinput_functional/centaurs" | ||
ucc_modinput_tests="true" | ||
if [ -d "$CENTAURS_MODINPUT_TESTS_CHECK_DIR" ]; then | ||
ucc_modinput_tests="false" | ||
fi | ||
echo "ucc_modinput_tests=$ucc_modinput_tests" >> "$GITHUB_OUTPUT" | ||
run-unit-tests: | ||
name: test-unit-python3-${{ matrix.python-version }} | ||
if: ${{ needs.test-inventory.outputs.unit == 'true' }} | ||
runs-on: ubuntu-latest | ||
needs: | ||
- test-inventory | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
python-version: | ||
- "3.7" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- uses: actions/setup-python@v5 | ||
with: | ||
python-version: ${{ matrix.python-version }} | ||
- name: Setup addon | ||
run: | | ||
if [ -f "poetry.lock" ] | ||
then | ||
mkdir -p package/lib || true | ||
python${{ matrix.python-version }} -m pip install poetry==1.5.1 poetry-plugin-export==1.4.0 | ||
poetry lock --check | ||
poetry export --without-hashes -o package/lib/requirements.txt | ||
poetry export --without-hashes --with dev -o requirements_dev.txt | ||
fi | ||
if [ ! -f requirements_dev.txt ]; then echo no requirements;exit 0 ;fi | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf https://github.com | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf ssh://[email protected] | ||
poetry install --with dev | ||
- name: Create directories | ||
run: | | ||
mkdir -p /opt/splunk/var/log/splunk | ||
chmod -R 777 /opt/splunk/var/log/splunk | ||
- name: Copy pytest ini | ||
run: cp tests/unit/pytest-ci.ini pytest.ini | ||
- name: Run Pytest with coverage | ||
run: poetry run pytest --cov=./ --cov-report=xml --junitxml=test-results/junit.xml tests/unit | ||
- name : Job summary | ||
continue-on-error: true | ||
run: | | ||
sudo apt-get install -y libxml2-utils | ||
junit_xml_file=$(find "test-results" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo -e "| Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests |\n| ----------- | ------------ | ------------ | ------------- | ------------- |\n| $total_tests | $passed | $failures | $errors | $skipped |" >> "$GITHUB_STEP_SUMMARY" | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- uses: actions/upload-artifact@v4 | ||
if: success() || failure() | ||
with: | ||
name: test-results-unit-python_${{ matrix.python-version }} | ||
path: test-results/* | ||
run-unit-tests-3_9: | ||
name: test-unit-python3-${{ matrix.python-version }} | ||
if: ${{ needs.test-inventory.outputs.unit == 'true' }} | ||
runs-on: ubuntu-latest | ||
needs: | ||
- test-inventory | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
python-version: | ||
- "3.9" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- uses: actions/setup-python@v5 | ||
with: | ||
python-version: ${{ matrix.python-version }} | ||
- name: Setup addon | ||
run: | | ||
if [ -f "poetry.lock" ] | ||
then | ||
mkdir -p package/lib || true | ||
python${{ matrix.python-version }} -m pip install poetry==1.5.1 poetry-plugin-export==1.4.0 | ||
poetry lock --check | ||
poetry export --without-hashes -o package/lib/requirements.txt | ||
poetry export --without-hashes --with dev -o requirements_dev.txt | ||
fi | ||
if [ ! -f requirements_dev.txt ]; then echo no requirements;exit 0 ;fi | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf https://github.com | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf ssh://[email protected] | ||
poetry install --with dev | ||
- name: Create directories | ||
run: | | ||
mkdir -p /opt/splunk/var/log/splunk | ||
chmod -R 777 /opt/splunk/var/log/splunk | ||
- name: Copy pytest ini | ||
run: cp tests/unit/pytest-ci.ini pytest.ini | ||
- name: Run Pytest with coverage | ||
run: poetry run pytest --cov=./ --cov-report=xml --junitxml=test-results/junit.xml tests/unit | ||
- name : Job summary | ||
continue-on-error: true | ||
run: | | ||
sudo apt-get install -y libxml2-utils | ||
junit_xml_file=$(find "test-results" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo -e "| Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests |\n| ----------- | ------------ | ------------ | ------------- | ------------- |\n| $total_tests | $passed | $failures | $errors | $skipped |" >> "$GITHUB_STEP_SUMMARY" | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- uses: actions/upload-artifact@v4 | ||
if: success() || failure() | ||
with: | ||
name: test-results-unit-python_${{ matrix.python-version }} | ||
path: test-results/* | ||
build: | ||
runs-on: ubuntu-latest | ||
needs: | ||
- validate-custom-version | ||
- setup-workflow | ||
- test-inventory | ||
- meta | ||
- compliance-copyrights | ||
- lint | ||
- review_secrets | ||
- semgrep | ||
- run-unit-tests | ||
- fossa-scan | ||
if: ${{ !cancelled() && (needs.run-unit-tests.result == 'success' || needs.run-unit-tests.result == 'skipped') && (needs.validate-custom-version.result == 'success' || needs.validate-custom-version.result == 'skipped') }} | ||
outputs: | ||
buildname: ${{ steps.buildupload.outputs.name }} | ||
permissions: | ||
contents: write | ||
packages: read | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
# Very Important semantic-release won't trigger a tagged | ||
# build if this is not set false | ||
persist-credentials: false | ||
- name: Setup python | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: 3.7 | ||
- name: create requirements file for pip | ||
run: | | ||
if [ -f "poetry.lock" ] | ||
then | ||
echo " poetry.lock found " | ||
python3.7 -m pip install poetry==1.5.1 poetry-plugin-export==1.4.0 | ||
poetry export --without-hashes -o requirements.txt | ||
if [ "$(grep -cve '^\s*$' requirements.txt)" -ne 0 ] | ||
then | ||
echo "Prod dependencies were found, creating package/lib folder" | ||
mkdir -p package/lib || true | ||
mv requirements.txt package/lib | ||
else | ||
echo "No prod dependencies were found" | ||
rm requirements.txt | ||
fi | ||
poetry export --without-hashes --with dev -o requirements_dev.txt | ||
cat requirements_dev.txt | ||
fi | ||
- name: Get pip cache dir | ||
id: pip-cache | ||
run: | | ||
echo "dir=$(pip cache dir)" >> "$GITHUB_OUTPUT" | ||
- name: Run Check there are libraries to scan | ||
id: checklibs | ||
run: if [ -f requirements_dev.txt ]; then echo "ENABLED=true" >> "$GITHUB_OUTPUT"; fi | ||
- name: pip cache | ||
if: ${{ steps.checklibs.outputs.ENABLED == 'true' }} | ||
uses: actions/cache@v4 | ||
with: | ||
path: ${{ steps.pip-cache.outputs.dir }} | ||
key: ${{ runner.os }}-pip-${{ hashFiles('requirements_dev.txt') }} | ||
restore-keys: | | ||
${{ runner.os }}-pip- | ||
- name: Install deps | ||
if: ${{ steps.checklibs.outputs.ENABLED == 'true' }} | ||
run: | | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf https://github.com | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf ssh://[email protected] | ||
pip install -r requirements_dev.txt | ||
- name: Semantic Release Get Next | ||
id: semantic | ||
if: github.event_name != 'pull_request' | ||
uses: splunk/[email protected] | ||
with: | ||
dry_run: true | ||
git_committer_name: ${{ secrets.SA_GH_USER_NAME }} | ||
git_committer_email: ${{ secrets.SA_GH_USER_EMAIL }} | ||
gpg_private_key: ${{ secrets.SA_GPG_PRIVATE_KEY }} | ||
passphrase: ${{ secrets.SA_GPG_PASSPHRASE }} | ||
env: | ||
GITHUB_TOKEN: ${{ github.token }} | ||
- name: Determine the version to build | ||
id: BuildVersion | ||
run: | | ||
INPUT_SEMVER="${{ github.event.inputs.custom-version != '' && github.event.inputs.custom-version || steps.semantic.outputs.new_release_version }}" | ||
echo "Initial semver ${INPUT_SEMVER}" | ||
INPUT_PRNUMBER="${{ github.event.number }}" | ||
SEMVER_REGEX='^v?[0-9]+\.[0-9]+\.[0-9]+$' | ||
BETA_REGEX='^v?[0-9]+\.[0-9]+\.[0-9]+-beta\.[0-9]+$' | ||
echo working with version $INPUT_SEMVER | ||
if [[ $INPUT_SEMVER =~ $SEMVER_REGEX ]]; | ||
then | ||
echo using provided semver | ||
VERSION=$INPUT_SEMVER | ||
elif [[ $INPUT_SEMVER =~ $BETA_REGEX ]]; | ||
then | ||
VERSION=$(echo $INPUT_SEMVER | awk '{gsub("-beta\.", "-B");print}') | ||
else | ||
if [[ $GITHUB_EVENT_NAME != 'pull_request' ]]; | ||
then | ||
echo this is not a release build and NOT PR RUNID + run ID | ||
VERSION=0.0.${GITHUB_RUN_ID} | ||
else | ||
echo this is not a release build and is a PR use run ID | ||
VERSION=0.${INPUT_PRNUMBER}.${GITHUB_RUN_ID} | ||
fi | ||
fi | ||
FINALVERSION="${VERSION//v}" | ||
echo "Version to build is ${FINALVERSION}" | ||
echo "VERSION=${FINALVERSION}" >> "$GITHUB_OUTPUT" | ||
- name: Download THIRDPARTY | ||
if: github.event_name != 'pull_request' && github.event_name != 'schedule' | ||
uses: actions/download-artifact@v4 | ||
with: | ||
name: THIRDPARTY | ||
- name: Download THIRDPARTY (Optional for PR and schedule) | ||
if: github.event_name == 'pull_request' || github.event_name == 'schedule' | ||
continue-on-error: true | ||
uses: actions/download-artifact@v4 | ||
with: | ||
name: THIRDPARTY | ||
- name: Update Notices | ||
run: | | ||
cp -f THIRDPARTY package/THIRDPARTY || echo "THIRDPARTY file not found (allowed for PR and schedule)" | ||
- name: Build Package | ||
id: uccgen | ||
uses: splunk/addonfactory-ucc-generator-action@v2 | ||
with: | ||
version: ${{ steps.BuildVersion.outputs.VERSION }} | ||
- name: Slim Package | ||
id: slim | ||
run: | | ||
pip install splunk-packaging-toolkit | ||
pip install semantic-version==2.6.0 | ||
INPUT_SOURCE=${{ steps.uccgen.outputs.OUTPUT }} | ||
SOURCE_REGEX='^.*/$' | ||
if [[ $INPUT_SOURCE =~ $SOURCE_REGEX ]];then | ||
echo Removing trailing / from INPUT_SOURCE slim is picky | ||
INPUT_SOURCE=$(echo $INPUT_SOURCE | sed 's/\(.*\)\//\1/') | ||
fi | ||
slim generate-manifest "${INPUT_SOURCE}" --update >/tmp/app.manifest || true | ||
cp /tmp/app.manifest "${INPUT_SOURCE}"/app.manifest | ||
mkdir -p build/package/splunkbase | ||
mkdir -p build/package/deployment | ||
slim package -o build/package/splunkbase "${INPUT_SOURCE}" | ||
for f in build/package/splunkbase/*.tar.gz; do | ||
n=$(echo "${f}" | awk '{gsub("-[0-9]+.[0-9]+.[0-9]+-[a-f0-9]+-?", "");print}' | sed 's/.tar.gz/.spl/') | ||
mv "${f}" "${n}" | ||
done | ||
PACKAGE=$(ls build/package/splunkbase/*) | ||
slim partition "${PACKAGE}" -o build/package/deployment/ || true | ||
for f in build/package/deployment/*.tar.gz; do | ||
n=$(echo "${f}" | awk '{gsub("-[0-9]+.[0-9]+.[0-9]+-[a-f0-9]+-?", "");print}' | sed 's/.tar.gz/.spl/') | ||
mv "${f}" "${n}" | ||
done | ||
slim validate "${PACKAGE}" | ||
chmod -R +r build | ||
echo "OUTPUT=$PACKAGE" >> "$GITHUB_OUTPUT" | ||
if: always() | ||
- name: artifact-openapi | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: artifact-openapi | ||
path: ${{ github.workspace }}/${{ steps.uccgen.outputs.OUTPUT }}/appserver/static/openapi.json | ||
if: ${{ !cancelled() && needs.test-inventory.outputs.ucc_modinput_functional == 'true' && needs.test-inventory.outputs.modinput_functional == 'true' }} | ||
- name: artifact-splunk-base | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: package-splunkbase | ||
path: ${{ steps.slim.outputs.OUTPUT }} | ||
if: ${{ !cancelled() }} | ||
- name: upload-build-to-s3 | ||
id: buildupload | ||
env: | ||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} | ||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
run: | | ||
echo "name=$(basename "${{ steps.slim.outputs.OUTPUT }}")" >> "$GITHUB_OUTPUT" | ||
basename "${{ steps.slim.outputs.OUTPUT }}" | ||
aws s3 cp "${{ steps.slim.outputs.OUTPUT }}" "s3://${{ needs.setup-workflow.outputs.s3_bucket_k8s }}/ta-apps/" | ||
- name: artifact-splunk-parts | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: package-deployment | ||
path: build/package/deployment** | ||
if: ${{ !cancelled() }} | ||
build-3_9: | ||
runs-on: ubuntu-latest | ||
needs: | ||
- validate-custom-version | ||
- setup-workflow | ||
- test-inventory | ||
- meta | ||
- compliance-copyrights | ||
- lint | ||
- review_secrets | ||
- semgrep | ||
- run-unit-tests-3_9 | ||
- fossa-scan | ||
if: | | ||
always() && | ||
(needs.run-unit-tests-3_9.result == 'success' || needs.run-unit-tests-3_9.result == 'skipped') && | ||
(needs.validate-custom-version.result == 'success' || needs.validate-custom-version.result == 'skipped') | ||
permissions: | ||
contents: write | ||
packages: read | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
# Very Important semantic-release won't trigger a tagged | ||
# build if this is not set false | ||
persist-credentials: false | ||
- name: Setup python | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: 3.9 | ||
- name: create requirements file for pip | ||
run: | | ||
if [ -f "poetry.lock" ] | ||
then | ||
echo " poetry.lock found " | ||
python3.9 -m pip install poetry==1.5.1 poetry-plugin-export==1.4.0 | ||
poetry export --without-hashes -o requirements.txt | ||
if [ "$(grep -cve '^\s*$' requirements.txt)" -ne 0 ] | ||
then | ||
echo "Prod dependencies were found, creating package/lib folder" | ||
mkdir -p package/lib || true | ||
mv requirements.txt package/lib | ||
else | ||
echo "No prod dependencies were found" | ||
rm requirements.txt | ||
fi | ||
poetry export --without-hashes --with dev -o requirements_dev.txt | ||
cat requirements_dev.txt | ||
fi | ||
- id: pip-cache | ||
run: | | ||
echo "dir=$(pip cache dir)" >> "$GITHUB_OUTPUT" | ||
- name: pip cache | ||
uses: actions/cache@v4 | ||
with: | ||
path: ${{ steps.pip-cache.outputs.dir }} | ||
key: ${{ runner.os }}-pip-python3_9-${{ hashFiles('requirements_dev.txt') }} | ||
restore-keys: | | ||
${{ runner.os }}-pip-python3_9 | ||
- run: | | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf https://github.com | ||
git config --global --add url."https://${{ secrets.GH_TOKEN_ADMIN }}@github.com".insteadOf ssh://[email protected] | ||
pip install -r requirements_dev.txt | ||
- id: semantic | ||
if: github.event_name != 'pull_request' | ||
uses: splunk/[email protected] | ||
with: | ||
dry_run: true | ||
git_committer_name: ${{ secrets.SA_GH_USER_NAME }} | ||
git_committer_email: ${{ secrets.SA_GH_USER_EMAIL }} | ||
gpg_private_key: ${{ secrets.SA_GPG_PRIVATE_KEY }} | ||
passphrase: ${{ secrets.SA_GPG_PASSPHRASE }} | ||
env: | ||
GITHUB_TOKEN: ${{ github.token }} | ||
- id: BuildVersion | ||
run: | | ||
INPUT_SEMVER="${{ github.event.inputs.custom-version != '' && github.event.inputs.custom-version || steps.semantic.outputs.new_release_version }}" | ||
echo "Initial semver ${INPUT_SEMVER}" | ||
INPUT_PRNUMBER="${{ github.event.number }}" | ||
SEMVER_REGEX='^v?[0-9]+\.[0-9]+\.[0-9]+$' | ||
BETA_REGEX='^v?[0-9]+\.[0-9]+\.[0-9]+-beta\.[0-9]+$' | ||
echo working with version $INPUT_SEMVER | ||
if [[ $INPUT_SEMVER =~ $SEMVER_REGEX ]]; | ||
then | ||
echo using provided semver | ||
VERSION=$INPUT_SEMVER | ||
elif [[ $INPUT_SEMVER =~ $BETA_REGEX ]]; | ||
then | ||
VERSION=$(echo $INPUT_SEMVER | awk '{gsub("-beta\.", "-B");print}') | ||
else | ||
if [[ $GITHUB_EVENT_NAME != 'pull_request' ]]; | ||
then | ||
echo this is not a release build and NOT PR RUNID + run ID | ||
VERSION=0.0.${GITHUB_RUN_ID} | ||
else | ||
echo this is not a release build and is a PR use run ID | ||
VERSION=0.${INPUT_PRNUMBER}.${GITHUB_RUN_ID} | ||
fi | ||
fi | ||
FINALVERSION="${VERSION//v}" | ||
echo "Version to build is $FINALVERSION" | ||
echo "VERSION=$FINALVERSION" >> "$GITHUB_OUTPUT" | ||
- id: uccgen | ||
uses: splunk/addonfactory-ucc-generator-action@v2 | ||
with: | ||
version: ${{ steps.BuildVersion.outputs.VERSION }} | ||
run-requirements-unit-tests: | ||
runs-on: ubuntu-latest | ||
needs: | ||
- build | ||
- test-inventory | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.requirement_test == 'true' }} | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- name: Install Python 3 | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: 3.7 | ||
- name: run-tests | ||
uses: splunk/[email protected] | ||
with: | ||
input-files: tests/requirement_test/logs | ||
- name: Archive production artifacts | ||
if: ${{ !cancelled() }} | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: test-results | ||
path: | | ||
test_*.txt | ||
appinspect: | ||
name: quality-appinspect-${{ matrix.tags }} | ||
needs: build | ||
if: ${{ !cancelled() && needs.build.result == 'success' }} | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
tags: | ||
- "cloud" | ||
- "appapproval" | ||
- "deprecated_feature" | ||
- "developer_guidance" | ||
- "future" | ||
- "self-service" | ||
- "splunk_appinspect" | ||
- "manual" | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- uses: actions/download-artifact@v4 | ||
with: | ||
name: package-splunkbase | ||
path: build/package/ | ||
- name: Scan | ||
uses: splunk/[email protected] | ||
with: | ||
app_path: build/package/ | ||
included_tags: ${{ matrix.tags }} | ||
result_file: appinspect_result_${{ matrix.tags }}.json | ||
- name: upload-appinspect-report | ||
if: ${{ !cancelled() }} | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: appinspect_${{ matrix.tags }}_checks.json | ||
path: appinspect_result_${{ matrix.tags }}.json | ||
- name: upload-markdown | ||
if: matrix.tags == 'manual' | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: check_markdown | ||
path: | | ||
*_markdown.txt | ||
appinspect-api: | ||
name: appinspect api ${{ matrix.tags }} | ||
needs: build | ||
if: | | ||
!cancelled() && | ||
needs.build.result == 'success' && | ||
( github.base_ref == 'main' || github.ref_name == 'main' ) | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
tags: | ||
- "cloud" | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- uses: actions/download-artifact@v4 | ||
with: | ||
name: package-splunkbase | ||
path: build/package | ||
- name: AppInspect API | ||
uses: splunk/[email protected] | ||
with: | ||
username: ${{ secrets.SPL_COM_USER }} | ||
password: ${{ secrets.SPL_COM_PASSWORD }} | ||
app_path: build/package/ | ||
included_tags: ${{ matrix.tags }} | ||
- uses: actions/upload-artifact@v4 | ||
if: always() | ||
with: | ||
name: appinspect-api-html-report-${{ matrix.tags }} | ||
path: AppInspect_response.html | ||
setup: | ||
needs: | ||
- setup-workflow | ||
- build | ||
- test-inventory | ||
if: ${{ !cancelled() && needs.build.result == 'success' }} | ||
runs-on: ubuntu-latest | ||
outputs: | ||
argo-server: ${{ steps.test-setup.outputs.argo-server }} | ||
argo-http1: ${{ steps.test-setup.outputs.argo-http1 }} | ||
argo-secure: ${{ steps.test-setup.outputs.argo-secure }} | ||
spl-host-suffix: ${{ steps.test-setup.outputs.spl-host-suffix }} | ||
argo-href: "" | ||
argo-base-href: ${{ steps.test-setup.outputs.argo-base-href }} | ||
argo-workflow-tmpl-name: ${{ steps.test-setup.outputs.argo-workflow-tmpl-name }} | ||
argo-cancel-workflow-tmpl-name: ${{ steps.test-setup.outputs.argo-cancel-workflow-tmpl-name }} | ||
k8s-manifests-branch: ${{ steps.test-setup.outputs.k8s-manifests-branch }} | ||
argo-namespace: ${{ steps.test-setup.outputs.argo-namespace }} | ||
addon-name: ${{ steps.test-setup.outputs.addon-name }} | ||
job-name: ${{ steps.test-setup.outputs.job-name }} | ||
labels: ${{ steps.test-setup.outputs.labels }} | ||
addon-upload-path: ${{ steps.test-setup.outputs.addon-upload-path }} | ||
directory-path: ${{ steps.test-setup.outputs.directory-path }} | ||
s3-bucket: ${{ steps.test-setup.outputs.s3-bucket }} | ||
env: | ||
BUILD_NAME: ${{ needs.build.outputs.buildname }} | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
token: ${{ secrets.GH_TOKEN_ADMIN }} | ||
- name: setup for test | ||
id: test-setup | ||
shell: bash | ||
run: | | ||
sudo apt-get install -y crudini | ||
ADDON_NAME=$(crudini --get package/default/app.conf id name | tr '[:lower:]' '[:upper:]') | ||
if [[ -n $(echo "${ADDON_NAME}" | awk -F 'SPLUNK_TA_' '{print $2}') ]]; | ||
then | ||
ADDON_NAME=$(echo "${ADDON_NAME}" | awk -F 'SPLUNK_TA_' '{print $2}') | ||
elif [[ -n $(echo "${ADDON_NAME}" | awk -F '_FOR_SPLUNK' '{print $1}') ]]; | ||
then | ||
ADDON_NAME=$(echo "${ADDON_NAME}" | awk -F '_FOR_SPLUNK' '{print $1}') | ||
fi | ||
JOB_NAME=$(echo "$ADDON_NAME" | tail -c 16)-$(echo "${GITHUB_SHA}" | tail -c 8)-TEST-TYPE-${GITHUB_RUN_ID} | ||
JOB_NAME=${JOB_NAME//[_.]/-} | ||
LABELS="addon-name=${ADDON_NAME}" | ||
ADDON_UPLOAD_PATH="s3://${{ needs.setup-workflow.outputs.s3_bucket_k8s }}/ta-apps/${{ needs.build.outputs.buildname }}" | ||
{ | ||
echo "argo-server=${{ needs.setup-workflow.outputs.argo_server_domain_k8s }}:443" | ||
echo "argo-http1=true" | ||
echo "argo-secure=true" | ||
echo -e "argo-base-href=\'\'" | ||
echo "argo-namespace=workflows" | ||
echo "argo-workflow-tmpl-name=ta-workflow" | ||
echo "argo-cancel-workflow-tmpl-name=cancel-workflow" | ||
echo "directory-path=/tmp" | ||
echo "s3-bucket=${{ needs.setup-workflow.outputs.s3_bucket_k8s }}" | ||
echo "addon-name=\"$ADDON_NAME\"" | ||
echo "job-name=wf-$JOB_NAME" | ||
echo "labels=$LABELS" | ||
echo "addon-upload-path=$ADDON_UPLOAD_PATH" | ||
echo "spl-host-suffix=wfe.splgdi.com" | ||
echo "k8s-manifests-branch=${{ inputs.k8s-manifests-branch }}" | ||
} >> "$GITHUB_OUTPUT" | ||
- uses: actions/download-artifact@v4 | ||
if: ${{ needs.test-inventory.outputs.ucc_modinput_functional == 'true' && needs.test-inventory.outputs.modinput_functional == 'true'}} | ||
id: download-openapi | ||
with: | ||
name: artifact-openapi | ||
path: ${{ github.workspace }} | ||
- name: Setup python | ||
if: steps.download-openapi.conclusion != 'skipped' | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: 3.7 | ||
- name: modinput-test-prerequisites | ||
if: steps.download-openapi.conclusion != 'skipped' | ||
shell: bash | ||
env: | ||
PYTHON_KEYRING_BACKEND: keyring.backends.null.Keyring | ||
run: | | ||
python3.7 -m pip install poetry==1.5.1 | ||
export POETRY_REPOSITORIES_SPLUNK_ADD_ON_UCC_MODINPUT_TEST_URL=https://github.com/splunk/addonfactory-ucc-test.git | ||
export POETRY_HTTP_BASIC_SPLUNK_ADD_ON_UCC_MODINPUT_TEST_USERNAME=${{ secrets.SA_GH_USER_NAME }} | ||
export POETRY_HTTP_BASIC_SPLUNK_ADD_ON_UCC_MODINPUT_TEST_PASSWORD=${{ secrets.GH_TOKEN_ADMIN }} | ||
poetry install --only modinput | ||
poetry run ucc-test-modinput -o ${{ steps.download-openapi.outputs.download-path }}/openapi.json -t ${{ steps.download-openapi.outputs.download-path }}/tmp/ | ||
- name: upload-swagger-artifacts-to-s3 | ||
if: steps.download-openapi.conclusion != 'skipped' | ||
id: swaggerupload | ||
env: | ||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} | ||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
run: | | ||
swagger_name=swagger_$(basename "$BUILD_NAME" .spl) | ||
aws s3 sync "${{ steps.download-openapi.outputs.download-path }}/tmp/restapi_client/" "s3://${{ needs.setup-workflow.outputs.s3_bucket_k8s }}/ta-apps/$swagger_name/" --exclude "*" --include "README.md" --include "*swagger_client*" --only-show-errors | ||
run-knowledge-tests: | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.knowledge == 'true' && needs.setup-workflow.outputs.execute-knowledge-labeled == 'true' }} | ||
needs: | ||
- build | ||
- test-inventory | ||
- setup | ||
- meta | ||
- setup-workflow | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} | ||
sc4s: ${{ fromJson(needs.meta.outputs.matrix_supportedSC4S) }} | ||
container: | ||
image: ghcr.io/splunk/workflow-engine-base:4.1.0 | ||
env: | ||
ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} | ||
ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} | ||
ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} | ||
ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} | ||
ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} | ||
SPLUNK_VERSION_BASE: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
TEST_TYPE: "knowledge" | ||
TEST_ARGS: "" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
- name: configure git # This step configures git to omit "dubious git ownership error" in later test-reporter stage | ||
id: configure-git | ||
run: | | ||
git --version | ||
git_path="$(pwd)" | ||
echo "$git_path" | ||
git config --global --add safe.directory "$git_path" | ||
- name: capture start time | ||
id: capture-start-time | ||
run: | | ||
echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" | ||
- name: Configure AWS credentials | ||
uses: aws-actions/configure-aws-credentials@v4 | ||
with: | ||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} | ||
- name: Read secrets from AWS Secrets Manager into environment variables | ||
id: get-argo-token | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: create job name | ||
id: create-job-name | ||
shell: bash | ||
run: | | ||
RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) | ||
JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} | ||
JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}} | ||
JOB_NAME=${JOB_NAME//[_.]/-} | ||
JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') | ||
echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" | ||
- name: run-tests | ||
id: run-tests | ||
timeout-minutes: 340 | ||
continue-on-error: true | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
uses: splunk/[email protected] | ||
with: | ||
splunk: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
test-type: ${{ env.TEST_TYPE }} | ||
test-args: "" | ||
job-name: ${{ steps.create-job-name.outputs.job-name }} | ||
labels: ${{ needs.setup.outputs.labels }} | ||
workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} | ||
workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} | ||
addon-url: ${{ needs.setup.outputs.addon-upload-path }} | ||
addon-name: ${{ needs.setup.outputs.addon-name }} | ||
sc4s-version: ${{ matrix.sc4s.version }} | ||
sc4s-docker-registry: ${{ matrix.sc4s.docker_registry }} | ||
k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} | ||
- name: Read secrets from AWS Secrets Manager again into environment variables in case credential rotation | ||
id: update-argo-token | ||
if: ${{ !cancelled() }} | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: calculate timeout | ||
id: calculate-timeout | ||
run: | | ||
start_time=${{ steps.capture-start-time.outputs.start_time }} | ||
current_time=$(date +%s) | ||
remaining_time_minutes=$(( 350-((current_time-start_time)/60) )) | ||
echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" | ||
- name: Check if pod was deleted | ||
id: is-pod-deleted | ||
timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
run: | | ||
set -o xtrace | ||
if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then | ||
echo "retry-workflow=true" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: Cancel workflow | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ cancelled() || steps.is-pod-deleted.outcome != 'success' }} | ||
run: | | ||
cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) | ||
cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) | ||
cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) | ||
if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" | ||
else | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" | ||
exit 1 | ||
fi | ||
- name: Retrying workflow | ||
id: retry-wf | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set -o xtrace | ||
set +e | ||
if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] | ||
then | ||
WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) | ||
echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" | ||
argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." | ||
else | ||
echo "No retry required" | ||
argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | ||
argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" | ||
fi | ||
- name: check if workflow completed | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
shell: bash | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set +e | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
echo "Status of workflow:" "$ARGO_STATUS" | ||
while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] | ||
do | ||
echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." | ||
argo wait "${WORKFLOW_NAME}" -n workflows || true | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
done | ||
- name: pull artifacts from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
echo "pulling artifacts" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} | ||
- name: pull logs from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
echo "pulling logs" | ||
mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests artifacts | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests logs | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/argo-logs | ||
- name: Upload cim-compliance-report for ${{ matrix.splunk.version }} | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ matrix.splunk.islatest == true }} | ||
with: | ||
name: cim-compliance-report | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results/cim-compliance-report.md | ||
- name: Test Report | ||
id: test_report | ||
uses: dorny/[email protected] | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} test report | ||
path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" | ||
reporter: java-junit | ||
- name: Parse JUnit XML | ||
if: ${{ !cancelled() }} | ||
run: | | ||
apt-get install -y libxml2-utils | ||
junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" | ||
junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} |$total_tests |$passed |$failures |$errors | $skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- name: Upload-artifact-for-github-summary | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: summary-ko-${{ matrix.splunk.version }}-${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
path: job_summary.txt | ||
- name: pull diag from s3 bucket | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
run: | | ||
echo "pulling diag" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/diag-${{ steps.create-job-name.outputs.job-name }}/diag-${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests diag | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/diag* | ||
knowledge-tests-report: | ||
needs: run-knowledge-tests | ||
runs-on: ubuntu-latest | ||
if: ${{ !cancelled() && needs.run-knowledge-tests.result != 'skipped' }} | ||
steps: | ||
- name: Download all summaries | ||
uses: actions/download-artifact@v4 | ||
with: | ||
pattern: summary-ko* | ||
- name: Combine summaries into a table | ||
run: | | ||
echo "| Job | Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests | Report Link" >> "$GITHUB_STEP_SUMMARY" | ||
echo "| ---------- | ----------- | ------ | ------ | ------ | ------- | ------ |" >> "$GITHUB_STEP_SUMMARY" | ||
for file in summary-ko*/job_summary.txt; do | ||
cat "$file" >> "$GITHUB_STEP_SUMMARY" | ||
done | ||
- uses: geekyeggo/delete-artifact@v5 | ||
with: | ||
name: | | ||
summary-ko* | ||
run-requirement-tests: | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.requirement_test == 'true' && needs.setup-workflow.outputs.execute-requirement-labeled == 'true' }} | ||
needs: | ||
- build | ||
- test-inventory | ||
- setup | ||
- meta | ||
- setup-workflow | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} | ||
sc4s: ${{ fromJson(needs.meta.outputs.matrix_supportedSC4S) }} | ||
container: | ||
image: ghcr.io/splunk/workflow-engine-base:4.1.0 | ||
env: | ||
ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} | ||
ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} | ||
ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} | ||
ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} | ||
ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} | ||
TEST_TYPE: "requirement_test" | ||
TEST_ARGS: "" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
- name: configure git # This step configures git to omit "dubious git ownership error" in later test-reporter stage | ||
id: configure-git | ||
run: | | ||
git --version | ||
git_path="$(pwd)" | ||
echo "$git_path" | ||
git config --global --add safe.directory "$git_path" | ||
- name: capture start time | ||
id: capture-start-time | ||
run: | | ||
echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" | ||
- name: Configure AWS credentials | ||
uses: aws-actions/configure-aws-credentials@v4 | ||
with: | ||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} | ||
- name: Read secrets from AWS Secrets Manager into environment variables | ||
id: get-argo-token | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: create job name | ||
id: create-job-name | ||
shell: bash | ||
run: | | ||
RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) | ||
JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} | ||
JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}} | ||
JOB_NAME=${JOB_NAME//[_.]/-} | ||
JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') | ||
echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" | ||
- name: run-tests | ||
id: run-tests | ||
timeout-minutes: 340 | ||
continue-on-error: true | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
uses: splunk/[email protected] | ||
with: | ||
splunk: ${{ matrix.splunk.version }} | ||
test-type: ${{ env.TEST_TYPE }} | ||
test-args: "" | ||
job-name: ${{ steps.create-job-name.outputs.job-name }} | ||
labels: ${{ needs.setup.outputs.labels }} | ||
workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} | ||
workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} | ||
addon-url: ${{ needs.setup.outputs.addon-upload-path }} | ||
addon-name: ${{ needs.setup.outputs.addon-name }} | ||
sc4s-version: ${{ matrix.sc4s.version }} | ||
sc4s-docker-registry: ${{ matrix.sc4s.docker_registry }} | ||
k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} | ||
- name: calculate timeout | ||
id: calculate-timeout | ||
run: | | ||
start_time=${{ steps.capture-start-time.outputs.start_time }} | ||
current_time=$(date +%s) | ||
remaining_time_minutes=$(( 350-((current_time-start_time)/60) )) | ||
echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" | ||
- name: Check if pod was deleted | ||
id: is-pod-deleted | ||
timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
run: | | ||
set -o xtrace | ||
if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then | ||
echo "retry-workflow=true" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: Cancel workflow | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ cancelled() || steps.is-pod-deleted.outcome != 'success' }} | ||
run: | | ||
cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) | ||
cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) | ||
cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) | ||
if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" | ||
else | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" | ||
exit 1 | ||
fi | ||
- name: Retrying workflow | ||
id: retry-wf | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set -o xtrace | ||
set +e | ||
if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] | ||
then | ||
WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) | ||
echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" | ||
argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." | ||
else | ||
echo "No retry required" | ||
argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | ||
argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" | ||
fi | ||
- name: check if workflow completed | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
shell: bash | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set +e | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
echo "Status of workflow:" "$ARGO_STATUS" | ||
while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] | ||
do | ||
echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." | ||
argo wait "${WORKFLOW_NAME}" -n workflows || true | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
done | ||
- name: pull artifacts from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
echo "pulling artifacts" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} | ||
- name: pull logs from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
echo "pulling logs" | ||
mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }} ${{ env.TEST_TYPE }} tests artifacts | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }} ${{ env.TEST_TYPE }} tests logs | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/argo-logs | ||
- name: Test Report | ||
id: test_report | ||
uses: dorny/[email protected] | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: splunk ${{ matrix.splunk.version }} ${{ env.TEST_TYPE }} test report | ||
path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" | ||
reporter: java-junit | ||
- name: Parse JUnit XML | ||
if: ${{ !cancelled() }} | ||
run: | | ||
apt-get install -y libxml2-utils | ||
junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" | ||
junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} |$total_tests |$passed |$failures |$errors |$skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- name: Upload-artifact-for-github-summary | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: summary-requirement-${{ matrix.splunk.version }}-${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
path: job_summary.txt | ||
- name: pull diag from s3 bucket | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
run: | | ||
echo "pulling diag" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/diag-${{ steps.create-job-name.outputs.job-name }}/diag-${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }} ${{ env.TEST_TYPE }} tests diag | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/diag* | ||
Requirement-input-tests-report: | ||
needs: run-requirement-tests | ||
runs-on: ubuntu-latest | ||
if: ${{ !cancelled() && needs.run-requirement-tests.result != 'skipped' }} | ||
steps: | ||
- name: Download all summaries | ||
uses: actions/download-artifact@v4 | ||
with: | ||
pattern: summary-requirement* | ||
- name: Combine summaries into a table | ||
run: | | ||
echo "| Job | Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests | Report Link" >> "$GITHUB_STEP_SUMMARY" | ||
echo "| ---------- | ----------- | ------ | ------ | ------ | ------- | ------ |" >> "$GITHUB_STEP_SUMMARY" | ||
for file in summary-requirement*/job_summary.txt; do | ||
cat "$file" >> "$GITHUB_STEP_SUMMARY" | ||
done | ||
- uses: geekyeggo/delete-artifact@v5 | ||
with: | ||
name: | | ||
summary-requirement* | ||
run-ui-tests: | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.ui == 'true' && needs.setup-workflow.outputs.execute-ui-labeled == 'true' }} | ||
needs: | ||
- build | ||
- test-inventory | ||
- setup | ||
- meta | ||
- setup-workflow | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} | ||
browser: [ "chrome" ] | ||
vendor-version: ${{ fromJson(needs.meta.outputs.matrix_supportedUIVendors) }} | ||
marker: ${{ fromJson(inputs.ui_marker) }} | ||
container: | ||
image: ghcr.io/splunk/workflow-engine-base:4.1.0 | ||
env: | ||
ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} | ||
ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} | ||
ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} | ||
ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} | ||
ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} | ||
SPLUNK_VERSION_BASE: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
TEST_TYPE: "ui" | ||
TEST_ARGS: "--browser ${{ matrix.browser }}" | ||
TEST_BROWSER: ${{ matrix.browser }} | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
- name: configure git # This step configures git to omit "dubious git ownership error" in later test-reporter stage | ||
id: configure-git | ||
run: | | ||
git --version | ||
git_path="$(pwd)" | ||
echo "$git_path" | ||
git config --global --add safe.directory "$git_path" | ||
- name: capture start time | ||
id: capture-start-time | ||
run: | | ||
echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" | ||
- name: Configure AWS credentials | ||
uses: aws-actions/configure-aws-credentials@v4 | ||
with: | ||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} | ||
- name: Read secrets from AWS Secrets Manager into environment variables | ||
id: get-argo-token | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: create job name | ||
id: create-job-name | ||
shell: bash | ||
run: | | ||
RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) | ||
JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} | ||
JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}-${{ matrix.browser }}} | ||
JOB_NAME=${JOB_NAME//[_.:]/-} | ||
JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') | ||
echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" | ||
- name: create test argument | ||
id: create-test-arg | ||
shell: bash | ||
run: | | ||
TEST_ARG_M="" | ||
EMPTY_MARKER="[]" | ||
if [[ "${{ inputs.ui_marker }}" != "$EMPTY_MARKER" ]]; then | ||
TEST_ARG_M="-m" | ||
fi | ||
echo "test-arg=$TEST_ARG_M" >> "$GITHUB_OUTPUT" | ||
- name: run-tests | ||
id: run-tests | ||
timeout-minutes: 340 | ||
continue-on-error: true | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
uses: splunk/[email protected] | ||
with: | ||
splunk: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
test-type: ${{ env.TEST_TYPE }} | ||
test-args: ${{ env.TEST_ARGS }} ${{ steps.create-test-arg.outputs.test-arg }} ${{ matrix.marker }} | ||
job-name: ${{ steps.create-job-name.outputs.job-name }} | ||
labels: ${{ needs.setup.outputs.labels }} | ||
workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} | ||
workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} | ||
addon-url: ${{ needs.setup.outputs.addon-upload-path }} | ||
addon-name: ${{ needs.setup.outputs.addon-name }} | ||
vendor-version: ${{ matrix.vendor-version.image }} | ||
sc4s-version: "No" | ||
k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} | ||
test-browser: ${{ env.TEST_BROWSER }} | ||
- name: Read secrets from AWS Secrets Manager again into environment variables in case credential rotation | ||
id: update-argo-token | ||
if: ${{ !cancelled() }} | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: calculate timeout | ||
id: calculate-timeout | ||
run: | | ||
start_time=${{ steps.capture-start-time.outputs.start_time }} | ||
current_time=$(date +%s) | ||
remaining_time_minutes=$(( 350-((current_time-start_time)/60) )) | ||
echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" | ||
- name: Check if pod was deleted | ||
id: is-pod-deleted | ||
timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
run: | | ||
set -o xtrace | ||
if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted" ; then | ||
echo "retry-workflow=true" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: Cancel workflow | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ cancelled() || steps.is-pod-deleted.outcome != 'success' }} | ||
run: | | ||
cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) | ||
cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) | ||
cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) | ||
if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" | ||
else | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" | ||
exit 1 | ||
fi | ||
- name: Retrying workflow | ||
id: retry-wf | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set -o xtrace | ||
set +e | ||
if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] | ||
then | ||
WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) | ||
echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" | ||
argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." | ||
else | ||
echo "No retry required" | ||
argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | ||
argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" | ||
fi | ||
- name: check if workflow completed | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
run: | | ||
set +e | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
echo "Status of workflow:" "$ARGO_STATUS" | ||
while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] | ||
do | ||
echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." | ||
argo wait "${WORKFLOW_NAME}" -n workflows || true | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
done | ||
- name: pull artifacts from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
echo "pulling artifacts" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} | ||
- name: pull logs from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
echo "pulling logs" | ||
mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests artifacts | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests logs | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/argo-logs | ||
- name: Test Report | ||
id: test_report | ||
uses: dorny/[email protected] | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} test report | ||
path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" | ||
reporter: java-junit | ||
- name: Parse JUnit XML | ||
if: ${{ !cancelled() }} | ||
run: | | ||
apt-get install -y libxml2-utils | ||
junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" | ||
junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} |$total_tests |$passed |$failures |$errors |$skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- name: Upload-artifact-for-github-summary | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: summary-${{ env.TEST_TYPE }}-${{ matrix.splunk.version }}-${{ secrets.OTHER_TA_REQUIRED_CONFIGS }}-${{ matrix.browser }}-${{ matrix.vendor-version.image }}-${{ matrix.marker }}-artifact | ||
path: job_summary.txt | ||
- name: pull diag from s3 bucket | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
run: | | ||
echo "pulling diag" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/diag-${{ steps.create-job-name.outputs.job-name }}/diag-${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests diag | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/diag* | ||
UI-tests-report: | ||
needs: run-ui-tests | ||
runs-on: ubuntu-latest | ||
if: ${{ !cancelled() && needs.run-ui-tests.result != 'skipped' }} | ||
steps: | ||
- name: Download all summaries | ||
uses: actions/download-artifact@v4 | ||
with: | ||
pattern: summary-ui* | ||
- name: Combine summaries into a table | ||
run: | | ||
echo "| Job | Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests | Report Link" >> "$GITHUB_STEP_SUMMARY" | ||
echo "| ---------- | ----------- | ------ | ------ | ------ | ------- | ------ |" >> "$GITHUB_STEP_SUMMARY" | ||
for file in summary-ui-*/job_summary.txt; do | ||
cat "$file" >> "$GITHUB_STEP_SUMMARY" | ||
done | ||
- uses: geekyeggo/delete-artifact@v5 | ||
with: | ||
name: | | ||
summary-ui* | ||
run-modinput-tests: | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.modinput_functional == 'true' && needs.setup-workflow.outputs.execute-modinput-labeled == 'true' }} | ||
needs: | ||
- build | ||
- test-inventory | ||
- setup | ||
- meta | ||
- setup-workflow | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} | ||
modinput-type: [ "modinput_functional" ] | ||
vendor-version: ${{ fromJson(needs.meta.outputs.matrix_supportedModinputFunctionalVendors) }} | ||
marker: ${{ fromJson(inputs.marker) }} | ||
container: | ||
image: ghcr.io/splunk/workflow-engine-base:4.1.0 | ||
env: | ||
ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} | ||
ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} | ||
ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} | ||
ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} | ||
ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} | ||
SPLUNK_VERSION_BASE: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
TEST_TYPE: "modinput_functional" | ||
TEST_ARGS: "" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
- name: configure git # This step configures git to omit "dubious git ownership error" in later test-reporter stage | ||
id: configure-git | ||
run: | | ||
git --version | ||
git_path="$(pwd)" | ||
echo "$git_path" | ||
git config --global --add safe.directory "$git_path" | ||
- name: capture start time | ||
id: capture-start-time | ||
run: | | ||
echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" | ||
- name: Configure AWS credentials | ||
uses: aws-actions/configure-aws-credentials@v4 | ||
with: | ||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} | ||
- name: Read secrets from AWS Secrets Manager into environment variables | ||
id: get-argo-token | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: create job name | ||
id: create-job-name | ||
shell: bash | ||
run: | | ||
RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) | ||
JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} | ||
JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}} | ||
JOB_NAME=${JOB_NAME//[_.]/-} | ||
JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') | ||
echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" | ||
- name: create test argument | ||
id: create-test-arg | ||
shell: bash | ||
run: | | ||
TEST_ARG_M="" | ||
EMPTY_MARKER="[]" | ||
if [[ "${{ inputs.marker }}" != "$EMPTY_MARKER" ]]; then | ||
TEST_ARG_M="-m" | ||
fi | ||
echo "test-arg=$TEST_ARG_M" >> "$GITHUB_OUTPUT" | ||
- name: run-tests | ||
id: run-tests | ||
timeout-minutes: 340 | ||
continue-on-error: true | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
uses: splunk/[email protected] | ||
with: | ||
splunk: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
test-type: ${{ env.TEST_TYPE }} | ||
test-args: ${{ env.TEST_ARGS }} ${{ steps.create-test-arg.outputs.test-arg }} ${{ matrix.marker }} | ||
job-name: ${{ steps.create-job-name.outputs.job-name }} | ||
labels: ${{ needs.setup.outputs.labels }} | ||
workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} | ||
workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} | ||
addon-url: ${{ needs.setup.outputs.addon-upload-path }} | ||
addon-name: ${{ needs.setup.outputs.addon-name }} | ||
vendor-version: ${{ matrix.vendor-version.image }} | ||
sc4s-version: "No" | ||
k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} | ||
- name: Read secrets from AWS Secrets Manager again into environment variables in case credential rotation | ||
id: update-argo-token | ||
if: ${{ !cancelled() }} | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: calculate timeout | ||
id: calculate-timeout | ||
run: | | ||
start_time=${{ steps.capture-start-time.outputs.start_time }} | ||
current_time=$(date +%s) | ||
remaining_time_minutes=$(( 350-((current_time-start_time)/60) )) | ||
echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" | ||
- name: Check if pod was deleted | ||
id: is-pod-deleted | ||
timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
run: | | ||
set -o xtrace | ||
if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then | ||
echo "retry-workflow=true" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: Cancel workflow | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ cancelled() || steps.is-pod-deleted.outcome != 'success' }} | ||
run: | | ||
cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) | ||
cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) | ||
cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) | ||
if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" | ||
else | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" | ||
exit 1 | ||
fi | ||
- name: Retrying workflow | ||
id: retry-wf | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set -o xtrace | ||
set +e | ||
if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] | ||
then | ||
WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) | ||
echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" | ||
argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." | ||
else | ||
echo "No retry required" | ||
argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | ||
argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" | ||
fi | ||
- name: check if workflow completed | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
run: | | ||
set +e | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
echo "Status of workflow:" "$ARGO_STATUS" | ||
while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] | ||
do | ||
echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." | ||
argo wait "${WORKFLOW_NAME}" -n workflows || true | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
done | ||
- name: pull artifacts from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
echo "pulling artifacts" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} | ||
- name: pull logs from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
echo "pulling logs" | ||
mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests artifacts | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests logs | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/argo-logs | ||
- name: Test Report | ||
id: test_report | ||
uses: dorny/[email protected] | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} test report | ||
path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" | ||
reporter: java-junit | ||
- name: Parse JUnit XML | ||
if: ${{ !cancelled() }} | ||
run: | | ||
apt-get install -y libxml2-utils | ||
junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" | ||
junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ matrix.marker }} ${{ matrix.vendor-version.image }} |$total_tests |$passed |$failures |$errors | $skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- name: Upload-artifact-for-github-summary | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: summary-${{ env.TEST_TYPE }}-${{ matrix.splunk.version }}-${{ secrets.OTHER_TA_REQUIRED_CONFIGS }}-${{ matrix.vendor-version.image }}-${{ matrix.marker }}-artifact | ||
path: job_summary.txt | ||
- name: pull diag from s3 bucket | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
run: | | ||
echo "pulling diag" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/diag-${{ steps.create-job-name.outputs.job-name }}/diag-${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests diag | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/diag* | ||
Modinput-tests-report: | ||
needs: run-modinput-tests | ||
runs-on: ubuntu-latest | ||
if: ${{ !cancelled() && needs.run-modinput-tests.result != 'skipped' }} | ||
steps: | ||
- name: Download all summaries | ||
uses: actions/download-artifact@v4 | ||
with: | ||
pattern: summary-modinput* | ||
- name: Combine summaries into a table | ||
run: | | ||
echo "| Job | Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests | Report Link" >> "$GITHUB_STEP_SUMMARY" | ||
echo "| ---------- | ----------- | ------ | ------ | ------ | ------- | ------ |" >> "$GITHUB_STEP_SUMMARY" | ||
for file in summary-modinput*/job_summary.txt; do | ||
cat "$file" >> "$GITHUB_STEP_SUMMARY" | ||
done | ||
- uses: geekyeggo/delete-artifact@v5 | ||
with: | ||
name: | | ||
summary-modinput* | ||
run-scripted-input-tests-full-matrix: | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.scripted_inputs == 'true' && needs.setup-workflow.outputs.execute-scripted_inputs-labeled == 'true' }} | ||
needs: | ||
- build | ||
- test-inventory | ||
- setup | ||
- meta | ||
- setup-workflow | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} | ||
os: ${{ fromJson(inputs.scripted-inputs-os-list) }} | ||
container: | ||
image: ghcr.io/splunk/workflow-engine-base:4.1.0 | ||
env: | ||
ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} | ||
ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} | ||
ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} | ||
ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} | ||
ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} | ||
SPLUNK_VERSION_BASE: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
TEST_TYPE: "scripted_inputs" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
- name: configure git # This step configures git to omit "dubious git ownership error" in later test-reporter stage | ||
id: configure-git | ||
run: | | ||
git --version | ||
git_path="$(pwd)" | ||
echo "$git_path" | ||
git config --global --add safe.directory "$git_path" | ||
- name: capture start time | ||
id: capture-start-time | ||
run: | | ||
echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" | ||
- name: Configure AWS credentials | ||
uses: aws-actions/configure-aws-credentials@v4 | ||
with: | ||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} | ||
- name: Read secrets from AWS Secrets Manager into environment variables | ||
id: get-argo-token | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: create job name | ||
id: create-job-name | ||
shell: bash | ||
run: | | ||
RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) | ||
JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} | ||
JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}} | ||
JOB_NAME=${JOB_NAME//[_.]/-} | ||
JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') | ||
echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" | ||
- name: get os name and version | ||
id: os-name-version | ||
shell: bash | ||
run: | | ||
OS_NAME_VERSION=${{ matrix.os }} | ||
# shellcheck disable=SC2206 | ||
OS_NAME_VERSION=(${OS_NAME_VERSION//:/ }) | ||
OS_NAME=${OS_NAME_VERSION[0]} | ||
OS_VERSION=${OS_NAME_VERSION[1]} | ||
{ | ||
echo "os-name=$OS_NAME" | ||
echo "os-version=$OS_VERSION" | ||
} >> "$GITHUB_OUTPUT" | ||
- name: run-tests | ||
id: run-tests | ||
timeout-minutes: 340 | ||
continue-on-error: true | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
uses: splunk/[email protected] | ||
with: | ||
splunk: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
test-type: ${{ env.TEST_TYPE }} | ||
test-args: "--hostname=spl --os-name=${{ steps.os-name-version.outputs.os-name }} --os-version=${{ steps.os-name-version.outputs.os-version }} -m script_input" | ||
job-name: ${{ steps.create-job-name.outputs.job-name }} | ||
labels: ${{ needs.setup.outputs.labels }} | ||
workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} | ||
workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} | ||
addon-url: ${{ needs.setup.outputs.addon-upload-path }} | ||
addon-name: ${{ needs.setup.outputs.addon-name }} | ||
sc4s-version: "No" | ||
os-name: ${{ steps.os-name-version.outputs.os-name }} | ||
os-version: ${{ steps.os-name-version.outputs.os-version }} | ||
k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} | ||
- name: calculate timeout | ||
id: calculate-timeout | ||
run: | | ||
start_time=${{ steps.capture-start-time.outputs.start_time }} | ||
current_time=$(date +%s) | ||
remaining_time_minutes=$(( 350-((current_time-start_time)/60) )) | ||
echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" | ||
- name: Check if pod was deleted | ||
id: is-pod-deleted | ||
timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
run: | | ||
set -o xtrace | ||
if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then | ||
echo "retry-workflow=true" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: Cancel workflow | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ cancelled() || steps.is-pod-deleted.outcome != 'success' }} | ||
run: | | ||
cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) | ||
cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) | ||
cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) | ||
if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" | ||
else | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" | ||
exit 1 | ||
fi | ||
- name: Retrying workflow | ||
id: retry-wf | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set -o xtrace | ||
set +e | ||
if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] | ||
then | ||
WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) | ||
echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" | ||
argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." | ||
else | ||
echo "No retry required" | ||
argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | ||
argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" | ||
fi | ||
- name: check if workflow completed | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
run: | | ||
set +e | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
echo "Status of workflow:" "$ARGO_STATUS" | ||
while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] | ||
do | ||
echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." | ||
argo wait "${WORKFLOW_NAME}" -n workflows || true | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
done | ||
- name: pull artifacts from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
echo "pulling artifacts" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} | ||
- name: pull logs from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
echo "pulling logs" | ||
mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ steps.os-name-version.outputs.os-name }} ${{ steps.os-name-version.outputs.os-version }} tests artifacts | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ steps.os-name-version.outputs.os-name }} ${{ steps.os-name-version.outputs.os-version }} tests logs | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/argo-logs | ||
- name: Test Report | ||
id: test_report | ||
uses: dorny/[email protected] | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ steps.os-name-version.outputs.os-name }} ${{ steps.os-name-version.outputs.os-version }} test report | ||
path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" | ||
reporter: java-junit | ||
- name: Parse JUnit XML | ||
if: ${{ !cancelled() }} | ||
run: | | ||
apt-get install -y libxml2-utils | ||
junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" | ||
junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") | ||
failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") | ||
errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") | ||
skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ steps.os-name-version.outputs.os-name }} ${{ steps.os-name-version.outputs.os-version }} |$total_tests |$passed |$failures |$errors |$skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- name: Upload-artifact-for-github-summary | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: summary-${{ env.TEST_TYPE }}-${{ matrix.splunk.version }}-${{ secrets.OTHER_TA_REQUIRED_CONFIGS }}-${{ steps.os-name-version.outputs.os-name }}-${{ steps.os-name-version.outputs.os-version }} | ||
path: job_summary.txt | ||
- name: pull diag from s3 bucket | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
run: | | ||
echo "pulling diag" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/diag-${{ steps.create-job-name.outputs.job-name }}/diag-${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ steps.os-name-version.outputs.os-name }} ${{ steps.os-name-version.outputs.os-version }} tests diag | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/diag* | ||
scripted-input-tests-report: | ||
needs: run-scripted-input-tests-full-matrix | ||
runs-on: ubuntu-latest | ||
if: ${{ !cancelled() && needs.run-scripted-input-tests-full-matrix.result != 'skipped' }} | ||
steps: | ||
- name: Download all summaries | ||
uses: actions/download-artifact@v4 | ||
with: | ||
pattern: summary-scripted* | ||
- name: Combine summaries into a table | ||
run: | | ||
echo "| Job | Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests | Report Link" >> "$GITHUB_STEP_SUMMARY" | ||
echo "| ---------- | ----------- | ------ | ------ | ------ | ------- | ------ |" >> "$GITHUB_STEP_SUMMARY" | ||
for file in summary-scripted*/job_summary.txt; do | ||
cat "$file" >> "$GITHUB_STEP_SUMMARY" | ||
done | ||
- uses: geekyeggo/delete-artifact@v5 | ||
with: | ||
name: | | ||
summary-scripted* | ||
pre-publish: | ||
if: ${{ !cancelled() && needs.validate-custom-version.result == 'success' }} | ||
# The following line will rename 'pre-publish' to 'pre-publish-not_main_pr' when PR is created towards main branch | ||
# It is necessary to avoid confusion caused by githubactions considering pre-publish for both push to develop branch | ||
# and pull_request to main branch events. | ||
name: ${{ github.event_name == 'pull_request' && github.base_ref == 'main' && 'pre-publish' || 'pre-publish-not_main_pr' }} | ||
needs: | ||
- validate-custom-version | ||
- meta | ||
- compliance-copyrights | ||
- lint | ||
- review_secrets | ||
- semgrep | ||
- build | ||
- test-inventory | ||
- run-unit-tests | ||
- appinspect | ||
- setup | ||
- run-knowledge-tests | ||
- run-modinput-tests | ||
- run-ui-tests | ||
- validate-pr-title | ||
runs-on: ubuntu-latest | ||
env: | ||
NEEDS: ${{ toJson(needs) }} | ||
steps: | ||
- name: check if tests have passed or skipped | ||
id: check | ||
shell: bash | ||
run: | | ||
RUN_PUBLISH=$(echo "$NEEDS" | jq ".[] | select( ( .result != \"skipped\" ) and .result != \"success\" ) | length == 0") | ||
if [[ "$RUN_PUBLISH" != *'false'* ]] | ||
then | ||
echo "run-publish=true" >> "$GITHUB_OUTPUT" | ||
else | ||
echo "run-publish=false" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: exit without publish | ||
if: ${{ steps.check.outputs.run-publish == 'false' || ( github.event.action == 'labeled') }} | ||
run: | | ||
echo "Expand check step to see which job has failed pre-publish step." | ||
exit 1 | ||
publish: | ||
if: | | ||
(!cancelled() && needs.pre-publish.result == 'success' && github.event_name != 'pull_request' && github.event_name != 'schedule') || | ||
(!cancelled() && needs.pre-publish.result == 'success' && github.event.inputs.custom-version != '' && needs.validate-custom-version.result == 'success') | ||
name: ${{ github.event.inputs.custom-version == '' && 'publish' || 'publish-custom-version' }} | ||
needs: | ||
- pre-publish | ||
- validate-custom-version | ||
runs-on: ubuntu-latest | ||
permissions: | ||
contents: write | ||
packages: read | ||
pull-requests: read | ||
statuses: write | ||
steps: | ||
- name: Checkout | ||
uses: actions/checkout@v4 | ||
with: | ||
submodules: false | ||
persist-credentials: false | ||
- name: Semantic Release | ||
if: ${{ github.event.inputs.custom-version == '' }} | ||
id: semantic | ||
uses: splunk/[email protected] | ||
env: | ||
GITHUB_TOKEN: ${{ secrets.GH_TOKEN_ADMIN }} | ||
with: | ||
git_committer_name: ${{ secrets.SA_GH_USER_NAME }} | ||
git_committer_email: ${{ secrets.SA_GH_USER_EMAIL }} | ||
gpg_private_key: ${{ secrets.SA_GPG_PRIVATE_KEY }} | ||
passphrase: ${{ secrets.SA_GPG_PASSPHRASE }} | ||
- name: Release custom version | ||
if: ${{ github.event.inputs.custom-version != '' }} | ||
id: custom | ||
uses: "softprops/action-gh-release@v2" | ||
with: | ||
token: "${{ secrets.GH_TOKEN_ADMIN }}" | ||
tag_name: v${{ github.event.inputs.custom-version }} | ||
target_commitish: "${{github.ref_name}}" | ||
make_latest: false | ||
- name: Download package-deployment | ||
if: ${{ steps.semantic.outputs.new_release_published == 'true' || steps.custom.outputs.upload_url != '' }} | ||
uses: actions/download-artifact@v4 | ||
id: download-package-deployment | ||
with: | ||
name: package-deployment | ||
path: download/artifacts/ | ||
- name: Download package-splunkbase | ||
if: ${{ steps.semantic.outputs.new_release_published == 'true' || steps.custom.outputs.upload_url != '' }} | ||
uses: actions/download-artifact@v4 | ||
id: download-package-splunkbase | ||
with: | ||
name: package-splunkbase | ||
path: download/artifacts/deployment | ||
- name: Download cim-compliance-report | ||
id: download-cim-compliance-report | ||
if: ${{ steps.semantic.outputs.new_release_published == 'true' || steps.custom.outputs.upload_url != '' }} | ||
continue-on-error: true | ||
uses: actions/download-artifact@v4 | ||
with: | ||
name: cim-compliance-report | ||
path: download/artifacts/deployment | ||
- name: List of assets | ||
if: ${{ steps.semantic.outputs.new_release_published == 'true'|| steps.custom.outputs.upload_url != '' }} | ||
run: | | ||
ls -la ${{ steps.download-package-splunkbase.outputs.download-path }} | ||
- name: Upload assets to release | ||
if: ${{ steps.semantic.outputs.new_release_published == 'true' || steps.custom.outputs.upload_url != '' }} | ||
uses: svenstaro/upload-release-action@v2 | ||
with: | ||
repo_token: ${{ github.token }} | ||
file: ${{ steps.download-package-splunkbase.outputs.download-path }}/* | ||
overwrite: true | ||
file_glob: true | ||
tag: v${{ github.event.inputs.custom-version != '' && github.event.inputs.custom-version || steps.semantic.outputs.new_release_version }} |