Skip to content

Commit

Permalink
feat: label handling for reusable workflow (#148)
Browse files Browse the repository at this point in the history
* chore: first implementation

* chore: refactor of code

* chore: remove unused parts of code

* chore: add basic loging

* chore: fix for preserve_infra condition

* chore: update typos

* chore: refine github array handling

* chore: back to yes/no delay-destroy and refactor

* chore: further refactoring

* chore: fixed handling of other than pr events

* chore: add wokrflow_dispatch condition

* chore: input handling for workflow_dispatch

* chore: fix manual_dispatch inputs handling

* chore: refactor if to case statement

* chore: add workflow concurrency

* fix: handling of github empty values

* chore: update README

* chore: remove workflow_dispatch and concurenct, other minor fixes

* chore: limit new features added

* chore: fixing pre commit

* chore: add all_tests label handling

* fix: correct test execution conditional statements

* fix: specify labels names
  • Loading branch information
mkolasinski-splunk authored May 31, 2023
1 parent e3fec7d commit 8c5f61c
Show file tree
Hide file tree
Showing 2 changed files with 89 additions and 57 deletions.
72 changes: 62 additions & 10 deletions .github/workflows/reusable-build-test-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,12 @@ jobs:
execute-modinput_functional: ${{ steps.delay-destroy-setup.outputs.execute-modinput_functional }}
execute-scripted_inputs: ${{ steps.delay-destroy-setup.outputs.execute-scripted_inputs }}
execute-requirement_test: ${{ steps.delay-destroy-setup.outputs.execute-requirement_test }}
execute-labeled-knowledge: ${{ steps.configure-tests-on-labels.outputs.execute-labeled-knowledge }}
execute-labeled-ui: ${{ steps.configure-tests-on-labels.outputs.execute-labeled-ui }}
execute-labeled-escu: ${{ steps.configure-tests-on-labels.outputs.execute-labeled-escu }}
execute-labeled-modinput: ${{ steps.configure-tests-on-labels.outputs.execute-labeled-modinput_functional }}
execute-labeled-scripted_inputs: ${{ steps.configure-tests-on-labels.outputs.execute-labeled-scripted_inputs }}
execute-labeled-requirement: ${{ steps.configure-tests-on-labels.outputs.execute-labeled-requirement_test }}
steps:
- name: skip workflow if description is empty for labeled pr
id: skip-workflow
Expand Down Expand Up @@ -106,7 +112,7 @@ jobs:
TESTSET="${{ steps.skip-workflow.outputs.testset }}"
for test_type in $TESTSET; do
eval DELAY_DESTROY_"$test_type"="No"
eval EXECUTE_"$test_type"="Yes"
eval EXECUTE_"$test_type"="No"
done
if [[ '${{ github.event.label.name }}' == 'preserve_infra' ]]; then
echo "$PR_BODY" >> body.txt
Expand All @@ -116,9 +122,8 @@ jobs:
fi
for test_type in $TESTSET; do
if [[ $tests =~ $test_type ]]; then
eval EXECUTE_"$test_type"="Yes"
eval DELAY_DESTROY_"$test_type"="Yes"
else
eval EXECUTE_"$test_type"="No"
fi
done
fi
Expand All @@ -138,6 +143,53 @@ jobs:
echo "execute-escu=$EXECUTE_escu"
echo "execute-requirement_test=$EXECUTE_requirement_test"
} >> "$GITHUB_OUTPUT"
- name: configure tests based on labels
id: configure-tests-on-labels
run: |
set +e
declare -A EXECUTE_LABELED
TESTSET=("execute_knowledge" "execute_ui" "execute_modinput_functional" "execute_scripted_inputs" "execute_escu" "execute_requirement_test")
for test_type in "${TESTSET[@]}"; do
EXECUTE_LABELED["$test_type"]="false"
done
case "${{ github.event_name }}" in
"pull_request")
if ${{ github.base_ref == 'main' }} || ${{ contains(github.event.pull_request.labels.*.name, 'execute_all_tests') }}; then
for test_type in "${TESTSET[@]}"; do
EXECUTE_LABELED["$test_type"]="true"
done
else
labels=$(echo '${{ toJSON(github.event.pull_request.labels) }}' | jq -r '.[] | .name')
for test_type in "${TESTSET[@]}"; do
if [[ "$labels" =~ $test_type ]]; then
EXECUTE_LABELED["$test_type"]="true"
fi
done
fi
;;
"push")
if ${{ github.ref_name == 'main' }} || ${{ github.ref_name == 'develop' }}; then
for test_type in "${TESTSET[@]}"; do
EXECUTE_LABELED["$test_type"]="true"
done
fi
;;
"schedule")
for test_type in "${TESTSET[@]}"; do
EXECUTE_LABELED["$test_type"]="true"
done
;;
*)
echo "No tests were labeled for execution!"
;;
esac
echo "Tests to execute based on labels:"
for test_type in "${TESTSET[@]}"; do
echo "execute-labeled-$test_type=${EXECUTE_LABELED["$test_type"]}" >> "$GITHUB_OUTPUT"
echo "execute-labeled-$test_type: ${EXECUTE_LABELED["$test_type"]}"
done
meta:
runs-on: ubuntu-latest
needs:
Expand Down Expand Up @@ -735,7 +787,7 @@ jobs:
} >> "$GITHUB_OUTPUT"
run-knowledge-tests:
if: ${{ needs.test-inventory.outputs.knowledge == 'true' && needs.setup-workflow.outputs.execute-ko == 'Yes' }}
if: ${{ needs.test-inventory.outputs.knowledge == 'true' && (needs.setup-workflow.outputs.execute-ko == 'Yes' || needs.setup-workflow.outputs.execute-labeled-knowledge == 'true') }}
needs:
- build
- test-inventory
Expand Down Expand Up @@ -942,7 +994,7 @@ jobs:
${{ needs.setup.outputs.directory-path }}/diag*
run-requirement-tests:
if: ${{ needs.test-inventory.outputs.requirement_test == 'true' && needs.setup-workflow.outputs.execute-requirement_test == 'Yes' }}
if: ${{ needs.test-inventory.outputs.requirement_test == 'true' && (needs.setup-workflow.outputs.execute-requirement_test == 'Yes' || needs.setup-workflow.outputs.execute-labeled-requirement == 'true') }}
needs:
- build
- test-inventory
Expand Down Expand Up @@ -1128,7 +1180,7 @@ jobs:
${{ needs.setup.outputs.directory-path }}/diag*
run-ui-tests:
if: ${{ needs.test-inventory.outputs.ui == 'true' && needs.setup-workflow.outputs.execute-ui == 'Yes' }}
if: ${{ needs.test-inventory.outputs.ui == 'true' && (needs.setup-workflow.outputs.execute-ui == 'Yes' || needs.setup-workflow.outputs.execute-labeled-ui == 'true') }}
needs:
- build
- test-inventory
Expand Down Expand Up @@ -1322,7 +1374,7 @@ jobs:
${{ needs.setup.outputs.directory-path }}/diag*
run-modinput-tests:
if: ${{ needs.test-inventory.outputs.modinput_functional == 'true' && needs.setup-workflow.outputs.execute-modinput_functional == 'Yes' }}
if: ${{ needs.test-inventory.outputs.modinput_functional == 'true' && (needs.setup-workflow.outputs.execute-modinput_functional == 'Yes' || needs.setup-workflow.outputs.execute-labeled-modinput == 'true') }}
needs:
- build
- test-inventory
Expand Down Expand Up @@ -1529,7 +1581,7 @@ jobs:
${{ needs.setup.outputs.directory-path }}/diag*
run-scripted-input-tests-full-matrix:
if: ${{ needs.test-inventory.outputs.scripted_inputs == 'true' && ( github.base_ref == 'main' || github.ref_name == 'main' ) && needs.setup-workflow.outputs.execute-scripted_inputs == 'Yes' }}
if: ${{ needs.test-inventory.outputs.scripted_inputs == 'true' && ( github.base_ref == 'main' || github.ref_name == 'main' ) && (needs.setup-workflow.outputs.execute-scripted_inputs == 'Yes' || needs.setup-workflow.outputs.execute-labeled-scripted_inputs == 'true') }}
needs:
- build
- test-inventory
Expand Down Expand Up @@ -1730,7 +1782,7 @@ jobs:
${{ needs.setup.outputs.directory-path }}/diag*
run-scripted-input-tests-canary:
if: ${{ needs.test-inventory.outputs.scripted_inputs == 'true' && ( github.base_ref == 'develop' || github.ref_name == 'develop' ) && needs.setup-workflow.outputs.execute-scripted_inputs == 'Yes' }}
if: ${{ needs.test-inventory.outputs.scripted_inputs == 'true' && ( github.base_ref == 'develop' || github.ref_name == 'develop' ) && (needs.setup-workflow.outputs.execute-scripted_inputs == 'Yes' || needs.setup-workflow.outputs.execute-labeled-scripted_inputs == 'true') }}
needs:
- build
- test-inventory
Expand Down Expand Up @@ -1930,7 +1982,7 @@ jobs:
${{ needs.setup.outputs.directory-path }}/diag*
run-escu-tests:
if: ${{ needs.test-inventory.outputs.escu == 'true' && ( github.base_ref == 'main' || github.ref_name == 'main' || github.base_ref == 'develop' || github.ref_name == 'develop' ) && needs.setup-workflow.outputs.execute-escu == 'Yes' }}
if: ${{ needs.test-inventory.outputs.escu == 'true' && ( github.base_ref == 'main' || github.ref_name == 'main' || github.base_ref == 'develop' || github.ref_name == 'develop' ) && (needs.setup-workflow.outputs.execute-escu == 'Yes' || needs.setup-workflow.outputs.execute-labeled-escu == 'true') }}
needs:
- build
- test-inventory
Expand Down
74 changes: 27 additions & 47 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,27 @@ General troubleshooting
- Validate If the failure is caused by a code change in the action which modified behaviour in the latest release causing the stage to fail.
setup-workflow
=======================
**Description:**
- Job that is scanning pull_request and based on PR body or included labels defining tests to be executed or infrastructures to be preserved.
- To preserve infrastructure:
- add to PR label `preserve_infra`
- add to PR description add `preserve: {comma separated list of test type}`
- available choices: `knowledge ui modinput_functional scripted_inputs escu requirement_test`
- to trigger tests again, reapply `preserve_infra` label
- To trigger specified test type
- add to PR one or multiple labels
- available choices: `execute_knowledge execute_ui execute_modinput_functional execute_scripted_inputs execute_escu execute_requirement_test execute_all_tests`
- adding labels will result retriggering job
- All tests are executed by default when:
- PR target branch is 'main'
- PUSH event on branches 'main' and 'develop'
- SCHEDULE event

meta stage
==========
=======================

**Description:**

Expand All @@ -77,45 +96,6 @@ meta stage
<img src="images/meta/meta_logs.png" alt="meta_logs" style="width:200px;"/>


compliance-sample-scanner
=========================
**Description:**
- This action scans Splunk Add-on test data for potentially identifying information which should be anonymized.
**Action used:** https://github.com/splunk/addonfactory-sample-scanner
**Pass/fail behaviour:**
- The action will check `tests/knowledge/*` for potentially identifying data and update the build or pr with annotations identifying violations.

**Troubleshooting steps for failures if any:**

- Tokenise the sensitive data which is shown in the failures using PSA tool's data generator Data Generator — pytest-splunk-addon documentation

- If you get failures in the .samples or .sample file, replace that value with a token, and add that token's replacement, relevant details

**Exception file:**

- `.ge_ignore` in addon root folder All the false positive can be added in this file.

- ref: https://github.com/splunk/splunk-add-on-for-box/blob/4fe6f4ec2ceaf847211a335f6ca3c154cc805fb7/.ge_ignore

- apart from `.ge_ignore` also `.false-positives.yaml` can be used

- ref: https://github.com/splunk/splunk-add-on-for-microsoft-sysmon/blob/main/.false-positives.yaml

**Artifacts:**

- Annotations, and test report like is also available in stage logs

<img src="images/sample_scanner/annotations.png" alt="annotations" style="width:200px;"/>
<img src="images/sample_scanner/results.png" alt="results" style="width:200px;"/>
<img src="images/sample_scanner/report_link.png" alt="report_link" style="width:200px;"/>


fossa-scan
=======================

Expand Down Expand Up @@ -198,7 +178,7 @@ i.e <img src="images/compliance-copyrights/license.png" alt="license" style="wid
lint
====
=======================
**Description:**
Expand Down Expand Up @@ -263,7 +243,7 @@ security-detect-secrets
security-sast-semgrep
=====================
=======================
**Description:**
Expand Down Expand Up @@ -304,7 +284,7 @@ security-sast-semgrep
- Findings can be observed in the console logs of the stage and also at Semgrep link for which is provided in the end.
test-inventory
==============
=======================
**Description**
Expand All @@ -321,7 +301,7 @@ modinput_functional::true
```
Validate PR title
=================
=======================
**Description**
Expand Down Expand Up @@ -350,7 +330,7 @@ feat(ui): Add Button component.
See https://www.conventionalcommits.org/ for more examples.
build
=====
=======================
**Description**
Expand Down Expand Up @@ -385,7 +365,7 @@ installation-update.json
- package-raw
security-virustotal
===================
=======================
**Description**
Expand All @@ -400,7 +380,7 @@ GitHub Action to upload and scan files with VirusTotal which analyze files, doma
AppInspect
==========
=======================
**Description**
Expand Down

0 comments on commit 8c5f61c

Please sign in to comment.