diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..51d5a5cea --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 120 +extend-ignore = E203 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..998499598 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,34 @@ +## Bug Report + +**Description:** +Provide a clear and concise description of the bug you encountered. + +**Expected Behavior:** +Explain what you expected to happen when performing the steps above. + +**Actual Behavior:** +Describe what actually happened instead of the expected behavior. + +**Steps to Reproduce:** +List the steps to reproduce the bug. Be as specific as possible. + +**Environment:** +- Operating System and Version: +- AI Verify Version: +- Any other relevant information (e.g. version of related package dependencies installed): + +Did you build using source code or from docker file? If from source code, please provide the below information: +- Python Version: +- Node Version: +- Redis Version: +- Mongo Version: +- Screen Resolution: + +**Screenshots/Code Snippets:** +If applicable, include screenshots or code snippets that demonstrate the issue. + +**Additional Context:** +Add any additional context about the problem here. + +**Possible Solution (Optional):** +If you have any ideas or suggestions for a possible solution, you can include them here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..4ed593846 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,19 @@ +## Feature Request + +**Description:** +Provide a clear and concise description of the feature you would like to request. + +**Is your feature request related to a problem/limitation? Please describe:** +Explain any problems or limitations you are currently facing that this feature could help address. + +**Describe the solution you'd like:** +Provide a detailed description of the feature you would like to see implemented. + +**Alternatives (Optional):** +If applicable, describe any alternative solutions or features you have considered. + +**Additional Context (Optional):** +Add any additional context or information about the feature request here. + +**Related Issues/PRs (Optional):** +If there are any related issues or pull requests, you can mention them here. diff --git a/.github/workflows/pre-merge-checks-apigw.yml b/.github/workflows/pre-merge-checks-apigw.yml index d7bd178d7..0d5f83c3b 100644 --- a/.github/workflows/pre-merge-checks-apigw.yml +++ b/.github/workflows/pre-merge-checks-apigw.yml @@ -5,67 +5,41 @@ # 4. Dependency analysis (undesirable licenses) # 5. Deploy reports generated from the above to GitHub Pages -## support monorepo - name: Pre-Merge Checks (ai-verify-apigw) on: - # Runs on pull request to main + # Runs when a pull request to main is being assigned pull_request: - branches: [master, main] + types: [ assigned, synchronize ] + branches: + - 'main' paths: - 'ai-verify-apigw/**' # Run this workflow manually from Actions tab workflow_dispatch: -# Sets permissions of GITHUB_TOKEN -permissions: - contents: write - pages: write - pull-requests: write - id-token: write - # Allow one concurrent deployment concurrency: group: ${{ github.repository }}-${{ github.workflow }} cancel-in-progress: true jobs: - # Single deploy job since we're just deploying + pre-merge-checks: -# environment: -# name: github-pages -# url: ${{ steps.deployment.outputs.page_url }} + # Run only when PR is assigned, even on subsequent commits (i.e. synchronize) + if: (github.event_name == 'pull_request' && github.event.pull_request.assignee != null) || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest - timeout-minutes: 15 + timeout-minutes: 40 steps: -# - name: Add label to PR -# env: -# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -# PR: ${{ github.event.pull_request.html_url }} -# run: | -# gh pr edit $PR --add-label 'ai-verify-apigw' - - # Checkout code + - name: Checkout code - run: | - set +e - git config --global init.defaultBranch main - git init - git branch -m main - git remote add origin https://github.com/IMDA-BTG/aiverify.git - git config --local gc.auto 0 - authToken="x-access-token:${{ secrets.GITHUB_TOKEN }}" - encodedAuthToken=$(echo -n "$authToken" | base64 -w0) - git config --local http.https://github.com/.extraheader "AUTHORIZATION: basic ${encodedAuthToken}" - git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules --depth=1 origin +${{github.sha}}:refs/remotes/origin/main - git sparse-checkout init - git sparse-checkout set ai-verify-apigw _site - git checkout --progress --force -B master refs/remotes/origin/main - ls -l - set -e + uses: actions/checkout@v3 + with: + sparse-checkout: | + ai-verify-apigw # Install dependencies - name: Setup npm cache/install @@ -86,8 +60,8 @@ jobs: # Unit Tests & Coverage - name: Unit tests with coverage working-directory: ${{ github.workspace }}/ai-verify-apigw - if: always() - timeout-minutes: 5 + if: ${{ ! cancelled() }} + timeout-minutes: 30 run: | set +e npm run coverage @@ -103,11 +77,12 @@ jobs: # eslint - name: Code quality analysis - lint working-directory: ${{ github.workspace }}/ai-verify-apigw - if: always() + if: ${{ ! cancelled() }} run: | set +e + npx eslint . + exit_code_lint=$? npx eslint -f html -o eslint-report.html . - exit_code_lint=$? npx eslint -f json -o eslint-report.json . node ci/createBadges.mjs lint set -e @@ -119,13 +94,15 @@ jobs: # npm audit - name: Dependency analysis - vulnerabilities & licenses working-directory: ${{ github.workspace }}/ai-verify-apigw - if: always() + if: ${{ ! cancelled() }} run: | set +e + npm audit + exit_code_audit=$? npm audit --json | npx npm-audit-markdown --output npm-audit-report.md - exit_code_audit=$? npx markdown-to-html-cli --source npm-audit-report.md --output npm-audit-report.html -y npx license-checker --summary --out licenses-found.txt -y + cat licenses-found.txt node ci/createBadges.mjs dependency echo -e "License Check Summary for apigw\n" | cat - licenses-found.txt > license-report.txt node ci/createBadges.mjs license @@ -136,8 +113,9 @@ jobs: fi ### Publish reports to ci dashboard ### + - name: Checkout dashboard - if: always() + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} uses: actions/checkout@v3 with: repository: IMDA-BTG/ci-dashboard @@ -146,51 +124,21 @@ jobs: path: check-results - name: Push results to dashboard - if: always() + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} working-directory: ${{ github.workspace }}/check-results run: | set +e + find ../ -type f -name ".gitignore" -exec rm {} + + [ -d "docs/pre-merge/apigw" ] && rm -rf docs/pre-merge/apigw mkdir -p docs/pre-merge/apigw mv ../ai-verify-apigw/coverage docs/pre-merge/apigw/ mv ../ai-verify-apigw/*.svg docs/pre-merge/apigw/ mv ../ai-verify-apigw/*.html docs/pre-merge/apigw/ mv ../ai-verify-apigw/*.md docs/pre-merge/apigw/ mv ../ai-verify-apigw/*.txt docs/pre-merge/apigw/ - git add * + git add docs/pre-merge/apigw git config user.name "imda-btg" git config user.email "idma-btg@imda.gov.sg" git commit -m "feat(apigw) actions publish apigw reports to dashboard" git push set -e - -# - name: Prepare artifact -# if: always() -# run: | -# set +e -# mkdir -p _site/apigw -# rm -rf _site/apigw/ -# mv ai-verify-apigw/coverage _site/apigw/ -# mv ai-verify-apigw/*.svg _site/apigw/ -# mv ai-verify-apigw/*.html _site/apigw/ -# mv ai-verify-apigw/*.md _site/apigw/ -# mv ai-verify-apigw/*.txt _site/apigw/ -# git branch -m main -# git add _site/apigw -# git config user.name "imda-btg" -# git config user.email "idma-btg@imda.gov.sg" -# git commit -m "feat(apigw): actions publish apigw reports to pages" -# git config --unset-all http.https://github.com/.extraheader -# authToken="x-access-token:${{ secrets.GITHUB_TOKEN }}" -# encodedAuthToken=$(echo -n "$authToken" | base64 -w0) -# git config --local http.https://github.com/.extraheader "AUTHORIZATION: basic ${encodedAuthToken}" -# git push origin main -# set -e -# -# - name: Upload artifact -# if: always() -# uses: actions/upload-pages-artifact@v1 -# -# - name: Publish artifact to Pages -# if: always() -# id: deployment -# uses: actions/deploy-pages@v1 diff --git a/.github/workflows/pre-merge-checks-app.yml b/.github/workflows/pre-merge-checks-app.yml new file mode 100644 index 000000000..2d4c0421a --- /dev/null +++ b/.github/workflows/pre-merge-checks-app.yml @@ -0,0 +1,120 @@ +# Pre-merge Checks (for Python projects) +# 1. Unit tests with code coverage (pytest) +# 2. Code quality analysis (flake8) +# 3. Dependency analysis (vulnerabilities) +# 4. Dependency analysis (undesirable licenses) +# 5. Deploy reports generated from the above to GitHub Pages + +name: Pre-Merge Checks (test-engine-app) + +on: + # Runs when a pull request to main is being assigned + pull_request: + types: [ assigned, synchronize ] + branches: + - 'main' + paths: + - 'test-engine-app/**' + + # Run this workflow manually from Actions tab + workflow_dispatch: + +# Allow one concurrent deployment +concurrency: + group: ${{ github.repository }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + + pre-merge-checks: + # Run only when PR is assigned, even on subsequent commits (i.e. synchronize) + if: (github.event_name == 'pull_request' && github.event.pull_request.assignee != null) || github.event_name == 'workflow_dispatch' + + runs-on: ubuntu-latest + timeout-minutes: 40 + + steps: + # Install redis + - name: Install redis + run: | + sudo apt update + sudo apt install redis-server + + # Checkout code + - name: Checkout code + uses: actions/checkout@v3 + with: + sparse-checkout: | + test-engine-app + + # Install dependencies + - name: Setup pip cache/install + uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' + cache-dependency-path: test-engine-app + + - name: Install dependencies + working-directory: ${{ github.workspace }}/test-engine-app + run: | + curl -H 'Authorization: token ${{ secrets.CHECKOUT_TOKEN }}' -H 'Accept: application/vnd.github.v3.raw' -O -L https://api.github.com/repos/IMDA-BTG/aiverify/contents/test-engine-core/dist/test_engine_core-0.9.0.tar.gz + pip install -r requirements.txt + pip install test_engine_core-0.9.0.tar.gz + pip install pytest pytest-mock pytest-html pytest-json pytest-cov coverage anybadge + pip install flake8 flake8-html + + # Unit Tests & Coverage + - name: Unit tests with coverage + if: ${{ ! cancelled() }} + working-directory: ${{ github.workspace }}/test-engine-app + timeout-minutes: 30 + run: | + bash ci/run-test.sh + + # flake8 + - name: Code quality analysis - lint + if: ${{ ! cancelled() }} + working-directory: ${{ github.workspace }}/test-engine-app + run: | + bash ci/run-flake8.sh + + # pip-audit + - name: Dependency analysis - vulnerabilities & licenses + if: ${{ ! cancelled() }} + working-directory: ${{ github.workspace }}/test-engine-app + run: | + bash ci/run-pip-audit.sh + + ### Publish reports to ci dashboard ### + + - name: Checkout dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + uses: actions/checkout@v3 + with: + repository: IMDA-BTG/ci-dashboard + token: ${{ secrets.CHECKOUT_TOKEN }} + ref: main + path: check-results + + - name: Push results to dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + working-directory: ${{ github.workspace }}/check-results + run: | + set +e + find ../ -type f -name ".gitignore" -exec rm {} + + [ -d "docs/pre-merge/app" ] && rm -rf docs/pre-merge/app + mkdir -p docs/pre-merge/app + mv ../test-engine-app/htmlcov docs/pre-merge/app/ + mv ../test-engine-app/flake8-report docs/pre-merge/app/ + mv ../test-engine-app/assets docs/pre-merge/app/ + mv ../test-engine-app/*.svg docs/pre-merge/app/ + mv ../test-engine-app/*.html docs/pre-merge/app/ + mv ../test-engine-app/*.md docs/pre-merge/app/ + mv ../test-engine-app/*.txt docs/pre-merge/app/ + git add docs/pre-merge/app + git config user.name "imda-btg" + git config user.email "idma-btg@imda.gov.sg" + git commit -m "feat(portal) actions publish test-engine-app reports to dashboard" + git push + set -e diff --git a/.github/workflows/pre-merge-checks-core-mods.yml b/.github/workflows/pre-merge-checks-core-mods.yml new file mode 100644 index 000000000..61cd32803 --- /dev/null +++ b/.github/workflows/pre-merge-checks-core-mods.yml @@ -0,0 +1,120 @@ +# Pre-merge Checks (for Python projects) +# 1. Unit tests with code coverage (pytest) +# 2. Code quality analysis (flake8) +# 3. Dependency analysis (vulnerabilities) +# 4. Dependency analysis (undesirable licenses) +# 5. Deploy reports generated from the above to GitHub Pages + +name: Pre-Merge Checks (test-engine-core-modules) + +on: + # Runs when a pull request to main is being assigned + pull_request: + types: [ assigned, synchronize ] + branches: + - 'main' + paths: + - 'test-engine-core-modules/**' + + # Run this workflow manually from Actions tab + workflow_dispatch: + +# Allow one concurrent deployment +concurrency: + group: ${{ github.repository }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + + pre-merge-checks: + # Run only when PR is assigned, even on subsequent commits (i.e. synchronize) + if: (github.event_name == 'pull_request' && github.event.pull_request.assignee != null) || github.event_name == 'workflow_dispatch' + + runs-on: ubuntu-latest + timeout-minutes: 40 + + steps: + # Install redis + - name: Install redis + run: | + sudo apt update + sudo apt install redis-server + + # Checkout code + - name: Checkout code + uses: actions/checkout@v3 + with: + sparse-checkout: | + test-engine-core-modules + + # Install dependencies + - name: Setup pip cache/install + uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' + cache-dependency-path: test-engine-core-modules + + - name: Install dependencies + working-directory: ${{ github.workspace }}/test-engine-core-modules + run: | + curl -H 'Authorization: token ${{ secrets.CHECKOUT_TOKEN }}' -H 'Accept: application/vnd.github.v3.raw' -O -L https://api.github.com/repos/IMDA-BTG/aiverify/contents/test-engine-core/dist/test_engine_core-0.9.0.tar.gz + pip install -r requirements.txt + pip install test_engine_core-0.9.0.tar.gz + pip install pytest pytest-mock pytest-html pytest-json pytest-cov coverage anybadge + pip install flake8 flake8-html + + # Unit Tests & Coverage + - name: Unit tests with coverage + if: ${{ ! cancelled() }} + working-directory: ${{ github.workspace }}/test-engine-core-modules + timeout-minutes: 30 + run: | + bash ci/run-test.sh + + # flake8 + - name: Code quality analysis - lint + if: ${{ ! cancelled() }} + working-directory: ${{ github.workspace }}/test-engine-core-modules + run: | + bash ci/run-flake8.sh + + # pip-audit + - name: Dependency analysis - vulnerabilities & licenses + if: ${{ ! cancelled() }} + working-directory: ${{ github.workspace }}/test-engine-core-modules + run: | + bash ci/run-pip-audit.sh + + ### Publish reports to ci dashboard ### + + - name: Checkout dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + uses: actions/checkout@v3 + with: + repository: IMDA-BTG/ci-dashboard + token: ${{ secrets.CHECKOUT_TOKEN }} + ref: main + path: check-results + + - name: Push results to dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + working-directory: ${{ github.workspace }}/check-results + run: | + set +e + find ../ -type f -name ".gitignore" -exec rm {} + + [ -d "docs/pre-merge/core-mods" ] && rm -rf docs/pre-merge/core-mods + mkdir -p docs/pre-merge/core-mods + mv ../test-engine-core-modules/htmlcov docs/pre-merge/core-mods/ + mv ../test-engine-core-modules/flake8-report docs/pre-merge/core-mods/ + mv ../test-engine-core-modules/assets docs/pre-merge/core-mods/ + mv ../test-engine-core-modules/*.svg docs/pre-merge/core-mods/ + mv ../test-engine-core-modules/*.html docs/pre-merge/core-mods/ + mv ../test-engine-core-modules/*.md docs/pre-merge/core-mods/ + mv ../test-engine-core-modules/*.txt docs/pre-merge/core-mods/ + git add docs/pre-merge/core-mods + git config user.name "imda-btg" + git config user.email "idma-btg@imda.gov.sg" + git commit -m "feat(portal) actions publish test-engine-core-modules reports to dashboard" + git push + set -e diff --git a/.github/workflows/pre-merge-checks-core.yml b/.github/workflows/pre-merge-checks-core.yml new file mode 100644 index 000000000..200c3cc58 --- /dev/null +++ b/.github/workflows/pre-merge-checks-core.yml @@ -0,0 +1,119 @@ +# Pre-merge Checks (for Python projects) +# 1. Unit tests with code coverage (pytest) +# 2. Code quality analysis (flake8) +# 3. Dependency analysis (vulnerabilities) +# 4. Dependency analysis (undesirable licenses) +# 5. Deploy reports generated from the above to GitHub Pages + +name: Pre-Merge Checks (test-engine-core) + +on: + # Runs when a pull request to main is being assigned + pull_request: + types: [ assigned, synchronize ] + branches: + - 'main' + paths: + - 'test-engine-core/**' + + # Run this workflow manually from Actions tab + workflow_dispatch: + +# Allow one concurrent deployment +concurrency: + group: ${{ github.repository }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + + pre-merge-checks: + # Run only when PR is assigned, even on subsequent commits (i.e. synchronize) + if: (github.event_name == 'pull_request' && github.event.pull_request.assignee != null) || github.event_name == 'workflow_dispatch' + + runs-on: ubuntu-latest + timeout-minutes: 40 + + steps: + # Install redis + - name: Install redis + run: | + sudo apt update + sudo apt install redis-server + + # Checkout code + - name: Checkout code + uses: actions/checkout@v3 + with: + sparse-checkout: | + test-engine-core + + # Install dependencies + - name: Setup pip cache/install + uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' + cache-dependency-path: test-engine-core + + - name: Install dependencies + working-directory: ${{ github.workspace }}/test-engine-core + run: | + pip install -r requirements.txt + pip install pytest pytest-mock pytest-html pytest-json pytest-cov coverage anybadge + pip install flake8 flake8-html + + # Unit Tests & Coverage + - name: Unit tests with coverage + if: ${{ ! cancelled() }} + working-directory: ${{ github.workspace }}/test-engine-core + timeout-minutes: 30 + run: | + bash ci/run-test.sh + + # flake8 + - name: Code quality analysis - lint + if: ${{ ! cancelled() }} + working-directory: ${{ github.workspace }}/test-engine-core + run: | + bash ci/run-flake8.sh + + # pip-audit + - name: Dependency analysis - vulnerabilities & licenses + if: ${{ ! cancelled() }} + working-directory: ${{ github.workspace }}/test-engine-core + run: | + bash ci/run-pip-audit.sh + + ### Publish reports to ci dashboard ### + + - name: Checkout dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + uses: actions/checkout@v3 + with: + repository: IMDA-BTG/ci-dashboard + token: ${{ secrets.CHECKOUT_TOKEN }} + ref: main + path: check-results + + - name: Push results to dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + working-directory: ${{ github.workspace }}/check-results + run: | + set +e + find ../ -type f -name ".gitignore" -exec rm {} + + [ -d "docs/pre-merge/core" ] && rm -rf docs/pre-merge/core + mkdir -p docs/pre-merge/core + mv ../test-engine-core/htmlcov docs/pre-merge/core/ + mv ../test-engine-core/flake8-report docs/pre-merge/core/ + mv ../test-engine-core/assets docs/pre-merge/core/ + mv ../test-engine-core/*.svg docs/pre-merge/core/ + mv ../test-engine-core/*.html docs/pre-merge/core/ + mv ../test-engine-core/*.md docs/pre-merge/core/ + mv ../test-engine-core/*.txt docs/pre-merge/core/ + git add docs/pre-merge/core + git config user.name "imda-btg" + git config user.email "idma-btg@imda.gov.sg" + git commit -m "feat(portal) actions publish test-engine-core reports to dashboard" + git push + set -e + diff --git a/.github/workflows/pre-merge-checks-plugin-ale.yml b/.github/workflows/pre-merge-checks-plugin-ale.yml new file mode 100644 index 000000000..7bb93716d --- /dev/null +++ b/.github/workflows/pre-merge-checks-plugin-ale.yml @@ -0,0 +1,139 @@ +# Pre-merge Checks (for Python projects) +# 1. Unit tests with code coverage (pytest) +# 2. Code quality analysis (flake8) +# 3. Dependency analysis (vulnerabilities) +# 4. Dependency analysis (undesirable licenses) +# 5. Deploy reports generated from the above to GitHub Pages + +## support monorepo + +name: Pre-Merge Checks (plugin-ale) + +on: + # Runs when a pull request to main is being assigned + pull_request: + types: [ assigned, synchronize ] + branches: + - 'main' + paths: + - 'stock-plugins/aiverify.stock.accumulated-local-effect/**' + + # Run this workflow manually from Actions tab + workflow_dispatch: + +# Allow one concurrent deployment +concurrency: + group: ${{ github.repository }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + + pre-merge-checks: + # Run only when PR is assigned, even on subsequent commits (i.e. synchronize) + if: (github.event_name == 'pull_request' && github.event.pull_request.assignee != null) || github.event_name == 'workflow_dispatch' + + runs-on: ubuntu-latest + timeout-minutes: 40 + + steps: + + # Checkout code + - name: Checkout code + uses: actions/checkout@v3 + with: + sparse-checkout: | + test-engine-core-modules + stock-plugins/aiverify.stock.accumulated-local-effect + + # Install dependencies + - name: Setup pip cache/install + uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' + cache-dependency-path: stock-plugins/aiverify.stock.accumulated-local-effect + + - name: Install dependencies for core-modules + working-directory: ${{ github.workspace }}/test-engine-core-modules + run: | + pip install -r requirements.txt + + - name: Install dependencies for plugin + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.accumulated-local-effect + run: | + cd algorithms/*/ + curl -H 'Authorization: token ${{ secrets.CHECKOUT_TOKEN }}' -H 'Accept: application/vnd.github.v3.raw' -O -L https://api.github.com/repos/IMDA-BTG/aiverify/contents/test-engine-core/dist/test_engine_core-0.9.0.tar.gz + pip install -r requirements.txt + pip install test_engine_core-0.9.0.tar.gz + pip install pytest pytest-mock pytest-html pytest-json pytest-cov coverage + pip install flake8 flake8-html anybadge + + # Unit Tests & Coverage + - name: Unit tests with coverage + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.accumulated-local-effect + if: ${{ ! cancelled() }} + timeout-minutes: 30 + run: | + cd algorithms + for algo_dir in */; do + echo "algo-dir: $algo_dir" + cd "$algo_dir" + bash ci/run-test.sh -m + cd .. + done + + # flake8 + - name: Code quality analysis - lint + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.accumulated-local-effect + if: ${{ ! cancelled() }} + run: | + cd algorithms/*/ + bash ci/run-flake8.sh + + - name: Uninstall dependencies (core-modules & dev) + working-directory: ${{ github.workspace }}/test-engine-core-modules + if: ${{ ! cancelled() }} + run: | + pip uninstall -y -r requirements.txt + pip uninstall -y pytest pytest-mock pytest-html pytest-json pytest-cov coverage + + # pip-audit + - name: Dependency analysis - vulnerabilities & licenses + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.accumulated-local-effect + if: ${{ ! cancelled() }} + run: | + cd algorithms/*/ + bash ci/run-pip-audit.sh + + ### Publish reports to ci dashboard ### + + - name: Checkout dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + uses: actions/checkout@v3 + with: + repository: IMDA-BTG/ci-dashboard + token: ${{ secrets.CHECKOUT_TOKEN }} + ref: main + path: check-results + + - name: Push results to dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + working-directory: ${{ github.workspace }}/check-results + run: | + set +e + find ../ -type f -name ".gitignore" -exec rm {} + + [ -d "docs/pre-merge/plugin-ale" ] && rm -rf docs/pre-merge/plugin-ale + mkdir -p docs/pre-merge/plugin-ale + mv ../stock-plugins/aiverify.stock.accumulated-local-effect/algorithms/*/htmlcov docs/pre-merge/plugin-ale/ + mv ../stock-plugins/aiverify.stock.accumulated-local-effect/algorithms/*/flake8-report docs/pre-merge/plugin-ale/ + mv ../stock-plugins/aiverify.stock.accumulated-local-effect/algorithms/*/assets docs/pre-merge/plugin-ale/ + mv ../stock-plugins/aiverify.stock.accumulated-local-effect/algorithms/*/*.svg docs/pre-merge/plugin-ale/ + mv ../stock-plugins/aiverify.stock.accumulated-local-effect/algorithms/*/*.html docs/pre-merge/plugin-ale/ + mv ../stock-plugins/aiverify.stock.accumulated-local-effect/algorithms/*/*.md docs/pre-merge/plugin-ale/ + mv ../stock-plugins/aiverify.stock.accumulated-local-effect/algorithms/*/*.txt docs/pre-merge/plugin-ale/ + git add docs/pre-merge/plugin-ale + git config user.name "imda-btg" + git config user.email "idma-btg@imda.gov.sg" + git commit -m "feat(portal) actions publish plugin-ale reports to dashboard" + git push + set -e diff --git a/.github/workflows/pre-merge-checks-plugin-fmtc.yml b/.github/workflows/pre-merge-checks-plugin-fmtc.yml new file mode 100644 index 000000000..6a8a803f5 --- /dev/null +++ b/.github/workflows/pre-merge-checks-plugin-fmtc.yml @@ -0,0 +1,132 @@ +# Pre-merge Checks (for Python projects) +# 1. Unit tests with code coverage (pytest) +# 2. Code quality analysis (flake8) +# 3. Dependency analysis (vulnerabilities) +# 4. Dependency analysis (undesirable licenses) +# 5. Deploy reports generated from the above to GitHub Pages + +name: Pre-Merge Checks (plugin-fmtc) + +on: + # Runs when a pull request to main is being assigned + pull_request: + types: [ assigned, synchronize ] + branches: + - 'main' + paths: + - 'stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification/**' + + # Run this workflow manually from Actions tab + workflow_dispatch: + +# Allow one concurrent deployment +concurrency: + group: ${{ github.repository }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + + pre-merge-checks: + # Run only when PR is assigned, even on subsequent commits (i.e. synchronize) + if: (github.event_name == 'pull_request' && github.event.pull_request.assignee != null) || github.event_name == 'workflow_dispatch' + + runs-on: ubuntu-latest + timeout-minutes: 40 + + steps: + + # Checkout code + - name: Checkout code + uses: actions/checkout@v3 + with: + sparse-checkout: | + test-engine-core-modules + stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification + + # Install dependencies + - name: Setup pip cache/install + uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' + cache-dependency-path: stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification + + - name: Install dependencies for core-modules + working-directory: ${{ github.workspace }}/test-engine-core-modules + run: | + pip install -r requirements.txt + + - name: Install dependencies for plugin + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification + run: | + cd algorithms/*/ + curl -H 'Authorization: token ${{ secrets.CHECKOUT_TOKEN }}' -H 'Accept: application/vnd.github.v3.raw' -O -L https://api.github.com/repos/IMDA-BTG/aiverify/contents/test-engine-core/dist/test_engine_core-0.9.0.tar.gz + pip install -r requirements.txt + pip install test_engine_core-0.9.0.tar.gz + pip install pytest pytest-mock pytest-html pytest-json pytest-cov coverage + pip install flake8 flake8-html anybadge + + # Unit Tests & Coverage + - name: Unit tests with coverage + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification + if: ${{ ! cancelled() }} + timeout-minutes: 30 + run: | + cd algorithms/*/ + bash ci/run-test.sh -m + + # flake8 + - name: Code quality analysis - lint + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification + if: ${{ ! cancelled() }} + run: | + cd algorithms/*/ + bash ci/run-flake8.sh + + - name: Uninstall dependencies (core-modules & dev) + working-directory: ${{ github.workspace }}/test-engine-core-modules + if: ${{ ! cancelled() }} + run: | + pip uninstall -y -r requirements.txt + pip uninstall -y pytest pytest-mock pytest-html pytest-json pytest-cov coverage + + # pip-audit + - name: Dependency analysis - vulnerabilities & licenses + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification + if: ${{ ! cancelled() }} + run: | + cd algorithms/*/ + bash ci/run-pip-audit.sh + + ### Publish reports to ci dashboard ### + + - name: Checkout dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + uses: actions/checkout@v3 + with: + repository: IMDA-BTG/ci-dashboard + token: ${{ secrets.CHECKOUT_TOKEN }} + ref: main + path: check-results + + - name: Push results to dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + working-directory: ${{ github.workspace }}/check-results + run: | + set +e + find ../ -type f -name ".gitignore" -exec rm {} + + [ -d "docs/pre-merge/plugin-fmtc" ] && rm -rf docs/pre-merge/plugin-fmtc + mkdir -p docs/pre-merge/plugin-fmtc + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification/algorithms/*/htmlcov docs/pre-merge/plugin-fmtc/ + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification/algorithms/*/flake8-report docs/pre-merge/plugin-fmtc/ + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification/algorithms/*/assets docs/pre-merge/plugin-fmtc/ + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification/algorithms/*/*.svg docs/pre-merge/plugin-fmtc/ + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification/algorithms/*/*.html docs/pre-merge/plugin-fmtc/ + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification/algorithms/*/*.md docs/pre-merge/plugin-fmtc/ + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-classification/algorithms/*/*.txt docs/pre-merge/plugin-fmtc/ + git add docs/pre-merge/plugin-fmtc + git config user.name "imda-btg" + git config user.email "idma-btg@imda.gov.sg" + git commit -m "feat(portal) actions publish plugin-fmtc reports to dashboard" + git push + set -e diff --git a/.github/workflows/pre-merge-checks-plugin-fmtr.yml b/.github/workflows/pre-merge-checks-plugin-fmtr.yml new file mode 100644 index 000000000..fc4044703 --- /dev/null +++ b/.github/workflows/pre-merge-checks-plugin-fmtr.yml @@ -0,0 +1,132 @@ +# Pre-merge Checks (for Python projects) +# 1. Unit tests with code coverage (pytest) +# 2. Code quality analysis (flake8) +# 3. Dependency analysis (vulnerabilities) +# 4. Dependency analysis (undesirable licenses) +# 5. Deploy reports generated from the above to GitHub Pages + +name: Pre-Merge Checks (plugin-fmtr) + +on: + # Runs when a pull request to main is being assigned + pull_request: + types: [ assigned, synchronize ] + branches: + - 'main' + paths: + - 'stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression/**' + + # Run this workflow manually from Actions tab + workflow_dispatch: + +# Allow one concurrent deployment +concurrency: + group: ${{ github.repository }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + + pre-merge-checks: + # Run only when PR is assigned, even on subsequent commits (i.e. synchronize) + if: (github.event_name == 'pull_request' && github.event.pull_request.assignee != null) || github.event_name == 'workflow_dispatch' + + runs-on: ubuntu-latest + timeout-minutes: 40 + + steps: + + # Checkout code + - name: Checkout code + uses: actions/checkout@v3 + with: + sparse-checkout: | + test-engine-core-modules + stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression + + # Install dependencies + - name: Setup pip cache/install + uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' + cache-dependency-path: stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression + + - name: Install dependencies for core-modules + working-directory: ${{ github.workspace }}/test-engine-core-modules + run: | + pip install -r requirements.txt + + - name: Install dependencies for plugin + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression + run: | + cd algorithms/*/ + curl -H 'Authorization: token ${{ secrets.CHECKOUT_TOKEN }}' -H 'Accept: application/vnd.github.v3.raw' -O -L https://api.github.com/repos/IMDA-BTG/aiverify/contents/test-engine-core/dist/test_engine_core-0.9.0.tar.gz + pip install -r requirements.txt + pip install test_engine_core-0.9.0.tar.gz + pip install pytest pytest-mock pytest-html pytest-json pytest-cov coverage + pip install flake8 flake8-html anybadge + + # Unit Tests & Coverage + - name: Unit tests with coverage + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression + if: ${{ ! cancelled() }} + timeout-minutes: 30 + run: | + cd algorithms/*/ + bash ci/run-test.sh -m + + # flake8 + - name: Code quality analysis - lint + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression + if: ${{ ! cancelled() }} + run: | + cd algorithms/*/ + bash ci/run-flake8.sh + + - name: Uninstall dependencies (core-modules & dev) + working-directory: ${{ github.workspace }}/test-engine-core-modules + if: ${{ ! cancelled() }} + run: | + pip uninstall -y -r requirements.txt + pip uninstall -y pytest pytest-mock pytest-html pytest-json pytest-cov coverage + + # pip-audit + - name: Dependency analysis - vulnerabilities & licenses + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression + if: ${{ ! cancelled() }} + run: | + cd algorithms/*/ + bash ci/run-pip-audit.sh + + ### Publish reports to ci dashboard ### + + - name: Checkout dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + uses: actions/checkout@v3 + with: + repository: IMDA-BTG/ci-dashboard + token: ${{ secrets.CHECKOUT_TOKEN }} + ref: main + path: check-results + + - name: Push results to dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + working-directory: ${{ github.workspace }}/check-results + run: | + set +e + find ../ -type f -name ".gitignore" -exec rm {} + + [ -d "docs/pre-merge/plugin-fmtr" ] && rm -rf docs/pre-merge/plugin-fmtr + mkdir -p docs/pre-merge/plugin-fmtr + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression/algorithms/*/htmlcov docs/pre-merge/plugin-fmtr/ + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression/algorithms/*/flake8-report docs/pre-merge/plugin-fmtr/ + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression/algorithms/*/assets docs/pre-merge/plugin-fmtr/ + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression/algorithms/*/*.svg docs/pre-merge/plugin-fmtr/ + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression/algorithms/*/*.html docs/pre-merge/plugin-fmtr/ + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression/algorithms/*/*.md docs/pre-merge/plugin-fmtr/ + mv ../stock-plugins/aiverify.stock.fairness-metrics-toolbox-for-regression/algorithms/*/*.txt docs/pre-merge/plugin-fmtr/ + git add docs/pre-merge/plugin-fmtr + git config user.name "imda-btg" + git config user.email "idma-btg@imda.gov.sg" + git commit -m "feat(portal) actions publish plugin-fmtr reports to dashboard" + git push + set -e diff --git a/.github/workflows/pre-merge-checks-plugin-ict.yml b/.github/workflows/pre-merge-checks-plugin-ict.yml new file mode 100644 index 000000000..8f9fe79e1 --- /dev/null +++ b/.github/workflows/pre-merge-checks-plugin-ict.yml @@ -0,0 +1,168 @@ +# Pre-merge Checks (for Python projects) +# 1. Unit tests with code coverage (pytest) +# 2. Code quality analysis (flake8) +# 3. Dependency analysis (vulnerabilities) +# 4. Dependency analysis (undesirable licenses) +# 5. Deploy reports generated from the above to GitHub Pages + +name: Pre-Merge Checks (plugin-ict) + +on: + # Runs when a pull request to main is being assigned + pull_request: + types: [ assigned, synchronize ] + branches: + - 'main' + paths: + - 'stock-plugins/aiverify.stock.image-corruption-toolbox/**' + + # Run this workflow manually from Actions tab + workflow_dispatch: + +# Allow one concurrent deployment +concurrency: + group: ${{ github.repository }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + + pre-merge-checks: + # Run only when PR is assigned, even on subsequent commits (i.e. synchronize) + if: (github.event_name == 'pull_request' && github.event.pull_request.assignee != null) || github.event_name == 'workflow_dispatch' + + runs-on: ubuntu-latest + timeout-minutes: 60 + + steps: + + # Checkout code + - name: Checkout code + uses: actions/checkout@v3 + with: + sparse-checkout: | + test-engine-core-modules + stock-plugins/aiverify.stock.image-corruption-toolbox + + # Install dependencies + - name: Setup pip cache/install + uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' + cache-dependency-path: stock-plugins/aiverify.stock.image-corruption-toolbox + + - name: Install dependencies for core-modules + working-directory: ${{ github.workspace }}/test-engine-core-modules + run: | + pip install -r requirements.txt + + - name: Install dependencies for plugin + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.image-corruption-toolbox + run: | + cd algorithms/ + curl -H 'Authorization: token ${{ secrets.CHECKOUT_TOKEN }}' -H 'Accept: application/vnd.github.v3.raw' -O -L https://api.github.com/repos/IMDA-BTG/aiverify/contents/test-engine-core/dist/test_engine_core-0.9.0.tar.gz + find ./ -type f -name 'requirements.txt' -exec pip install -r "{}" \; + pip install test_engine_core-0.9.0.tar.gz + pip install pytest pytest-mock pytest-html pytest-json pytest-cov coverage + pip install flake8 flake8-html anybadge + + # Unit Tests & Coverage + - name: Unit tests with coverage + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.image-corruption-toolbox + if: ${{ ! cancelled() }} + timeout-minutes: 45 + run: | + exit_code=0 + cd algorithms + for algo_dir in */; do + echo "algo-dir: $algo_dir" + cd "$algo_dir" + if ! bash ci/run-test.sh -m; then + exit_code=1 + fi + cd .. + done + exit $exit_code + + # flake8 + - name: Code quality analysis - lint + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.image-corruption-toolbox + if: ${{ ! cancelled() }} + run: | + exit_code=0 + cd algorithms + for algo_dir in */; do + echo "algo-dir: $algo_dir" + cd "$algo_dir" + if ! bash ci/run-flake8.sh; then + exit_code=1 + fi + cd .. + done + exit $exit_code + + - name: Uninstall dependencies (core-modules & dev) + working-directory: ${{ github.workspace }}/test-engine-core-modules + if: ${{ ! cancelled() }} + run: | + pip uninstall -y -r requirements.txt + pip uninstall -y pytest pytest-mock pytest-html pytest-json pytest-cov coverage + + # pip-audit + - name: Dependency analysis - vulnerabilities & licenses + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.image-corruption-toolbox + if: ${{ ! cancelled() }} + run: | + exit_code=0 + cd algorithms + for algo_dir in */; do + echo "algo-dir: $algo_dir" + cd "$algo_dir" + if ! bash ci/run-pip-audit.sh; then + exit_code=1 + fi + cd .. + done + exit $exit_code + + ### Publish reports to ci dashboard ### + - name: Checkout dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + uses: actions/checkout@v3 + with: + repository: IMDA-BTG/ci-dashboard + token: ${{ secrets.CHECKOUT_TOKEN }} + ref: main + path: check-results + + - name: Push results to dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + working-directory: ${{ github.workspace }}/check-results + run: | + set +e + find ../ -type f -name ".gitignore" -exec rm {} + + [ -d "docs/pre-merge/plugin-ict" ] && rm -rf docs/pre-merge/plugin-ict + mkdir -p docs/pre-merge/plugin-ict + + cd ../stock-plugins/aiverify.stock.image-corruption-toolbox/algorithms/ + for algo_dir in */; do + echo "algo_dir: $algo_dir" + dst_dir=../../../check-results/docs/pre-merge/plugin-ict/$algo_dir + echo "dst_dir: $dst_dir" + mkdir -p $dst_dir + mv ./${algo_dir}htmlcov $dst_dir + mv ./${algo_dir}flake8-report $dst_dir + mv ./${algo_dir}assets $dst_dir + mv ./${algo_dir}*.svg $dst_dir + mv ./${algo_dir}*.html $dst_dir + mv ./${algo_dir}*.md $dst_dir + mv ./${algo_dir}*.txt $dst_dir + done + + cd ../../../check-results + git add docs/pre-merge/plugin-ict + git config user.name "imda-btg" + git config user.email "idma-btg@imda.gov.sg" + git commit -m "feat(portal) actions publish plugin-ict reports to dashboard" + git push + set -e diff --git a/.github/workflows/pre-merge-checks-plugin-pdp.yml b/.github/workflows/pre-merge-checks-plugin-pdp.yml new file mode 100644 index 000000000..631e57bd0 --- /dev/null +++ b/.github/workflows/pre-merge-checks-plugin-pdp.yml @@ -0,0 +1,132 @@ +# Pre-merge Checks (for Python projects) +# 1. Unit tests with code coverage (pytest) +# 2. Code quality analysis (flake8) +# 3. Dependency analysis (vulnerabilities) +# 4. Dependency analysis (undesirable licenses) +# 5. Deploy reports generated from the above to GitHub Pages + +name: Pre-Merge Checks (plugin-pdp) + +on: + # Runs when a pull request to main is being assigned + pull_request: + types: [ assigned, synchronize ] + branches: + - 'main' + paths: + - 'stock-plugins/aiverify.stock.partial-dependence-plot/**' + + # Run this workflow manually from Actions tab + workflow_dispatch: + +# Allow one concurrent deployment +concurrency: + group: ${{ github.repository }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + + pre-merge-checks: + # Run only when PR is assigned, even on subsequent commits (i.e. synchronize) + if: (github.event_name == 'pull_request' && github.event.pull_request.assignee != null) || github.event_name == 'workflow_dispatch' + + runs-on: ubuntu-latest + timeout-minutes: 40 + + steps: + + # Checkout code + - name: Checkout code + uses: actions/checkout@v3 + with: + sparse-checkout: | + test-engine-core-modules + stock-plugins/aiverify.stock.partial-dependence-plot + + # Install dependencies + - name: Setup pip cache/install + uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' + cache-dependency-path: stock-plugins/aiverify.stock.partial-dependence-plot + + - name: Install dependencies for core-modules + working-directory: ${{ github.workspace }}/test-engine-core-modules + run: | + pip install -r requirements.txt + + - name: Install dependencies for plugin + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.partial-dependence-plot + run: | + cd algorithms/*/ + curl -H 'Authorization: token ${{ secrets.CHECKOUT_TOKEN }}' -H 'Accept: application/vnd.github.v3.raw' -O -L https://api.github.com/repos/IMDA-BTG/aiverify/contents/test-engine-core/dist/test_engine_core-0.9.0.tar.gz + pip install -r requirements.txt + pip install test_engine_core-0.9.0.tar.gz + pip install pytest pytest-mock pytest-html pytest-json pytest-cov coverage + pip install flake8 flake8-html anybadge + + # Unit Tests & Coverage + - name: Unit tests with coverage + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.partial-dependence-plot + if: ${{ ! cancelled() }} + timeout-minutes: 30 + run: | + cd algorithms/*/ + bash ci/run-test.sh -m + + # flake8 + - name: Code quality analysis - lint + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.partial-dependence-plot + if: ${{ ! cancelled() }} + run: | + cd algorithms/*/ + bash ci/run-flake8.sh + + - name: Uninstall dependencies (core-modules & dev) + working-directory: ${{ github.workspace }}/test-engine-core-modules + if: ${{ ! cancelled() }} + run: | + pip uninstall -y -r requirements.txt + pip uninstall -y pytest pytest-mock pytest-html pytest-json pytest-cov coverage + + # pip-audit + - name: Dependency analysis - vulnerabilities & licenses + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.partial-dependence-plot + if: ${{ ! cancelled() }} + run: | + cd algorithms/*/ + bash ci/run-pip-audit.sh + + ### Publish reports to ci dashboard ### + + - name: Checkout dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + uses: actions/checkout@v3 + with: + repository: IMDA-BTG/ci-dashboard + token: ${{ secrets.CHECKOUT_TOKEN }} + ref: main + path: check-results + + - name: Push results to dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + working-directory: ${{ github.workspace }}/check-results + run: | + set +e + find ../ -type f -name ".gitignore" -exec rm {} + + [ -d "docs/pre-merge/plugin-pdp" ] && rm -rf docs/pre-merge/plugin-pdp + mkdir -p docs/pre-merge/plugin-pdp + mv ../stock-plugins/aiverify.stock.partial-dependence-plot/algorithms/*/htmlcov docs/pre-merge/plugin-pdp/ + mv ../stock-plugins/aiverify.stock.partial-dependence-plot/algorithms/*/flake8-report docs/pre-merge/plugin-pdp/ + mv ../stock-plugins/aiverify.stock.partial-dependence-plot/algorithms/*/assets docs/pre-merge/plugin-pdp/ + mv ../stock-plugins/aiverify.stock.partial-dependence-plot/algorithms/*/*.svg docs/pre-merge/plugin-pdp/ + mv ../stock-plugins/aiverify.stock.partial-dependence-plot/algorithms/*/*.html docs/pre-merge/plugin-pdp/ + mv ../stock-plugins/aiverify.stock.partial-dependence-plot/algorithms/*/*.md docs/pre-merge/plugin-pdp/ + mv ../stock-plugins/aiverify.stock.partial-dependence-plot/algorithms/*/*.txt docs/pre-merge/plugin-pdp/ + git add docs/pre-merge/plugin-pdp + git config user.name "imda-btg" + git config user.email "idma-btg@imda.gov.sg" + git commit -m "feat(portal) actions publish plugin-pdp reports to dashboard" + git push + set -e diff --git a/.github/workflows/pre-merge-checks-plugin-rt.yml b/.github/workflows/pre-merge-checks-plugin-rt.yml new file mode 100644 index 000000000..ac62f0bae --- /dev/null +++ b/.github/workflows/pre-merge-checks-plugin-rt.yml @@ -0,0 +1,132 @@ +# Pre-merge Checks (for Python projects) +# 1. Unit tests with code coverage (pytest) +# 2. Code quality analysis (flake8) +# 3. Dependency analysis (vulnerabilities) +# 4. Dependency analysis (undesirable licenses) +# 5. Deploy reports generated from the above to GitHub Pages + +name: Pre-Merge Checks (plugin-rt) + +on: + # Runs when a pull request to main is being assigned + pull_request: + types: [ assigned, synchronize ] + branches: + - 'main' + paths: + - 'stock-plugins/aiverify.stock.robustness-toolbox/**' + + # Run this workflow manually from Actions tab + workflow_dispatch: + +# Allow one concurrent deployment +concurrency: + group: ${{ github.repository }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + + pre-merge-checks: + # Run only when PR is assigned, even on subsequent commits (i.e. synchronize) + if: (github.event_name == 'pull_request' && github.event.pull_request.assignee != null) || github.event_name == 'workflow_dispatch' + + runs-on: ubuntu-latest + timeout-minutes: 60 + + steps: + + # Checkout code + - name: Checkout code + uses: actions/checkout@v3 + with: + sparse-checkout: | + test-engine-core-modules + stock-plugins/aiverify.stock.robustness-toolbox + + # Install dependencies + - name: Setup pip cache/install + uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' + cache-dependency-path: stock-plugins/aiverify.stock.robustness-toolbox + + - name: Install dependencies for core-modules + working-directory: ${{ github.workspace }}/test-engine-core-modules + run: | + pip install -r requirements.txt + + - name: Install dependencies for plugin + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.robustness-toolbox + run: | + cd algorithms/*/ + curl -H 'Authorization: token ${{ secrets.CHECKOUT_TOKEN }}' -H 'Accept: application/vnd.github.v3.raw' -O -L https://api.github.com/repos/IMDA-BTG/aiverify/contents/test-engine-core/dist/test_engine_core-0.9.0.tar.gz + pip install -r requirements.txt + pip install test_engine_core-0.9.0.tar.gz + pip install pytest pytest-mock pytest-html pytest-json pytest-cov coverage + pip install flake8 flake8-html anybadge + + # Unit Tests & Coverage + - name: Unit tests with coverage + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.robustness-toolbox + if: ${{ ! cancelled() }} + timeout-minutes: 50 + run: | + cd algorithms/*/ + bash ci/run-test.sh -m + + # flake8 + - name: Code quality analysis - lint + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.robustness-toolbox + if: ${{ ! cancelled() }} + run: | + cd algorithms/*/ + bash ci/run-flake8.sh + + - name: Uninstall dependencies (core-modules & dev) + working-directory: ${{ github.workspace }}/test-engine-core-modules + if: ${{ ! cancelled() }} + run: | + pip uninstall -y -r requirements.txt + pip uninstall -y pytest pytest-mock pytest-html pytest-json pytest-cov coverage + + # pip-audit + - name: Dependency analysis - vulnerabilities & licenses + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.robustness-toolbox + if: ${{ ! cancelled() }} + run: | + cd algorithms/*/ + bash ci/run-pip-audit.sh + + ### Publish reports to ci dashboard ### + + - name: Checkout dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + uses: actions/checkout@v3 + with: + repository: IMDA-BTG/ci-dashboard + token: ${{ secrets.CHECKOUT_TOKEN }} + ref: main + path: check-results + + - name: Push results to dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + working-directory: ${{ github.workspace }}/check-results + run: | + set +e + find ../ -type f -name ".gitignore" -exec rm {} + + [ -d "docs/pre-merge/plugin-rt" ] && rm -rf docs/pre-merge/plugin-rt + mkdir -p docs/pre-merge/plugin-rt + mv ../stock-plugins/aiverify.stock.robustness-toolbox/algorithms/*/htmlcov docs/pre-merge/plugin-rt/ + mv ../stock-plugins/aiverify.stock.robustness-toolbox/algorithms/*/flake8-report docs/pre-merge/plugin-rt/ + mv ../stock-plugins/aiverify.stock.robustness-toolbox/algorithms/*/assets docs/pre-merge/plugin-rt/ + mv ../stock-plugins/aiverify.stock.robustness-toolbox/algorithms/*/*.svg docs/pre-merge/plugin-rt/ + mv ../stock-plugins/aiverify.stock.robustness-toolbox/algorithms/*/*.html docs/pre-merge/plugin-rt/ + mv ../stock-plugins/aiverify.stock.robustness-toolbox/algorithms/*/*.md docs/pre-merge/plugin-rt/ + mv ../stock-plugins/aiverify.stock.robustness-toolbox/algorithms/*/*.txt docs/pre-merge/plugin-rt/ + git add docs/pre-merge/plugin-rt + git config user.name "imda-btg" + git config user.email "idma-btg@imda.gov.sg" + git commit -m "feat(portal) actions publish plugin-rt reports to dashboard" + git push + set -e diff --git a/.github/workflows/pre-merge-checks-plugin-st.yml b/.github/workflows/pre-merge-checks-plugin-st.yml new file mode 100644 index 000000000..5fd56505c --- /dev/null +++ b/.github/workflows/pre-merge-checks-plugin-st.yml @@ -0,0 +1,132 @@ +# Pre-merge Checks (for Python projects) +# 1. Unit tests with code coverage (pytest) +# 2. Code quality analysis (flake8) +# 3. Dependency analysis (vulnerabilities) +# 4. Dependency analysis (undesirable licenses) +# 5. Deploy reports generated from the above to GitHub Pages + +name: Pre-Merge Checks (plugin-st) + +on: + # Runs when a pull request to main is being assigned + pull_request: + types: [ assigned, synchronize ] + branches: + - 'main' + paths: + - 'stock-plugins/aiverify.stock.shap-toolbox/**' + + # Run this workflow manually from Actions tab + workflow_dispatch: + +# Allow one concurrent deployment +concurrency: + group: ${{ github.repository }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + + pre-merge-checks: + # Run only when PR is assigned, even on subsequent commits (i.e. synchronize) + if: (github.event_name == 'pull_request' && github.event.pull_request.assignee != null) || github.event_name == 'workflow_dispatch' + + runs-on: ubuntu-latest + timeout-minutes: 40 + + steps: + + # Checkout code + - name: Checkout code + uses: actions/checkout@v3 + with: + sparse-checkout: | + test-engine-core-modules + stock-plugins/aiverify.stock.shap-toolbox + + # Install dependencies + - name: Setup pip cache/install + uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' + cache-dependency-path: stock-plugins/aiverify.stock.shap-toolbox + + - name: Install dependencies for core-modules + working-directory: ${{ github.workspace }}/test-engine-core-modules + run: | + pip install -r requirements.txt + + - name: Install dependencies for plugin + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.shap-toolbox + run: | + cd algorithms/*/ + curl -H 'Authorization: token ${{ secrets.CHECKOUT_TOKEN }}' -H 'Accept: application/vnd.github.v3.raw' -O -L https://api.github.com/repos/IMDA-BTG/aiverify/contents/test-engine-core/dist/test_engine_core-0.9.0.tar.gz + pip install -r requirements.txt + pip install test_engine_core-0.9.0.tar.gz + pip install pytest pytest-mock pytest-html pytest-json pytest-cov coverage + pip install flake8 flake8-html anybadge + + # Unit Tests & Coverage + - name: Unit tests with coverage + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.shap-toolbox + if: ${{ ! cancelled() }} + timeout-minutes: 30 + run: | + cd algorithms/*/ + bash ci/run-test.sh -m + + # flake8 + - name: Code quality analysis - lint + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.shap-toolbox + if: ${{ ! cancelled() }} + run: | + cd algorithms/*/ + bash ci/run-flake8.sh + + - name: Uninstall dependencies (core-modules & dev) + working-directory: ${{ github.workspace }}/test-engine-core-modules + if: ${{ ! cancelled() }} + run: | + pip uninstall -y -r requirements.txt + pip uninstall -y pytest pytest-mock pytest-html pytest-json pytest-cov coverage + + # pip-audit + - name: Dependency analysis - vulnerabilities & licenses + working-directory: ${{ github.workspace }}/stock-plugins/aiverify.stock.shap-toolbox + if: ${{ ! cancelled() }} + run: | + cd algorithms/*/ + bash ci/run-pip-audit.sh + + ### Publish reports to ci dashboard ### + + - name: Checkout dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + uses: actions/checkout@v3 + with: + repository: IMDA-BTG/ci-dashboard + token: ${{ secrets.CHECKOUT_TOKEN }} + ref: main + path: check-results + + - name: Push results to dashboard + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} + working-directory: ${{ github.workspace }}/check-results + run: | + set +e + find ../ -type f -name ".gitignore" -exec rm {} + + [ -d "docs/pre-merge/plugin-st" ] && rm -rf docs/pre-merge/plugin-st + mkdir -p docs/pre-merge/plugin-st + mv ../stock-plugins/aiverify.stock.shap-toolbox/algorithms/*/htmlcov docs/pre-merge/plugin-st/ + mv ../stock-plugins/aiverify.stock.shap-toolbox/algorithms/*/flake8-report docs/pre-merge/plugin-st/ + mv ../stock-plugins/aiverify.stock.shap-toolbox/algorithms/*/assets docs/pre-merge/plugin-st/ + mv ../stock-plugins/aiverify.stock.shap-toolbox/algorithms/*/*.svg docs/pre-merge/plugin-st/ + mv ../stock-plugins/aiverify.stock.shap-toolbox/algorithms/*/*.html docs/pre-merge/plugin-st/ + mv ../stock-plugins/aiverify.stock.shap-toolbox/algorithms/*/*.md docs/pre-merge/plugin-st/ + mv ../stock-plugins/aiverify.stock.shap-toolbox/algorithms/*/*.txt docs/pre-merge/plugin-st/ + git add docs/pre-merge/plugin-st + git config user.name "imda-btg" + git config user.email "idma-btg@imda.gov.sg" + git commit -m "feat(portal) actions publish plugin-st reports to dashboard" + git push + set -e diff --git a/.github/workflows/pre-merge-checks-portal.yml b/.github/workflows/pre-merge-checks-portal.yml index 353934b35..5ac11c77f 100644 --- a/.github/workflows/pre-merge-checks-portal.yml +++ b/.github/workflows/pre-merge-checks-portal.yml @@ -5,68 +5,43 @@ # 4. Dependency analysis (undesirable licenses) # 5. Deploy reports generated from the above to GitHub Pages -# support monorepo - name: Pre-Merge Checks (ai-verify-portal) on: - # Runs on pull request to main + # Runs when a pull request to main is being assigned pull_request: - branches: [ master, main ] + types: [ assigned, synchronize ] + branches: + - 'main' paths: - 'ai-verify-portal/**' # Run this workflow manually from Actions tab workflow_dispatch: -# Sets permissions of GITHUB_TOKEN -permissions: - contents: write - pages: write - pull-requests: write - id-token: write - # Allow one concurrent deployment concurrency: group: ${{ github.repository }}-${{ github.workflow }} cancel-in-progress: true jobs: - # Single deploy job since we're just deploying + pre-merge-checks: -# environment: -# name: github-pages -# url: ${{ steps.deployment.outputs.page_url }} + # Run only when PR is assigned, even on subsequent commits (i.e. synchronize) + if: (github.event_name == 'pull_request' && github.event.pull_request.assignee != null) || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest - timeout-minutes: 15 + timeout-minutes: 40 steps: -# - name: Add label to PR -# env: -# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -# PR: ${{ github.event.pull_request.html_url }} -# run: | -# gh pr edit $PR --add-label 'ai-verify-portal' - # Checkout code - name: Checkout code - run: | - set +e - git config --global init.defaultBranch main - git init - git branch -m main - git remote add origin https://github.com/IMDA-BTG/aiverify.git - git config --local gc.auto 0 - authToken="x-access-token:${{ secrets.GITHUB_TOKEN }}" - encodedAuthToken=$(echo -n "$authToken" | base64 -w0) - git config --local http.https://github.com/.extraheader "AUTHORIZATION: basic ${encodedAuthToken}" - git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules --depth=1 origin +${{github.sha}}:refs/remotes/origin/main - git sparse-checkout init - git sparse-checkout set ai-verify-portal ai-verify-shared-library _site - git checkout --progress --force -B master refs/remotes/origin/main - ls -l - set -e + uses: actions/checkout@v3 + with: + sparse-checkout: | + ai-verify-portal + ai-verify-shared-library # Install dependencies - name: Setup npm cache/install @@ -101,11 +76,18 @@ jobs: cp .env.development .env npm run build + # Format check + - name: Format check + if: ${{ ! cancelled() }} + working-directory: ${{ github.workspace }}/ai-verify-portal + run: | + npm run format-check + # Unit Tests & Coverage - name: Unit tests with coverage - if: always() + if: ${{ ! cancelled() }} working-directory: ${{ github.workspace }}/ai-verify-portal - timeout-minutes: 5 + timeout-minutes: 30 run: | set +e sudo timedatectl set-timezone Asia/Singapore @@ -121,12 +103,13 @@ jobs: # eslint - name: Code quality analysis - lint - if: always() + if: ${{ ! cancelled() }} working-directory: ${{ github.workspace }}/ai-verify-portal run: | set +e - npx eslint -f html -o eslint-report.html . + npx eslint . exit_code_lint=$? + npx eslint -f html -o eslint-report.html . npx eslint -f json -o eslint-report.json . node ci/createBadges.mjs lint set -e @@ -137,17 +120,19 @@ jobs: # npm audit - name: Dependency analysis - vulnerabilities & licenses - if: always() + if: ${{ ! cancelled() }} working-directory: ${{ github.workspace }}/ai-verify-portal run: | set +e + npm audit + exit_code_audit=$? npm audit --json | npx npm-audit-markdown --output npm-audit-report.md - exit_code_audit=$? npx markdown-to-html-cli --source npm-audit-report.md --output npm-audit-report.html -y echo -e "License Check Summary for portal\n" > license-report.txt cat licenses-found.txt >> license-report.txt echo -e "\nLicense Check Summary for shared-library\n" >> license-report.txt cat ../ai-verify-shared-library/licenses-found.txt >> license-report.txt + cat license-report.txt cp license-report.txt licenses-found.txt node ci/createBadges.mjs dependency node ci/createBadges.mjs license @@ -158,8 +143,9 @@ jobs: fi ### Publish reports to ci dashboard ### + - name: Checkout dashboard - if: always() + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} uses: actions/checkout@v3 with: repository: IMDA-BTG/ci-dashboard @@ -168,53 +154,21 @@ jobs: path: check-results - name: Push results to dashboard - if: always() + if: ${{ github.event.pull_request.head.repo.full_name == github.repository && always() }} working-directory: ${{ github.workspace }}/check-results run: | set +e + find ../ -type f -name ".gitignore" -exec rm {} + + [ -d "docs/pre-merge/portal" ] && rm -rf docs/pre-merge/portal mkdir -p docs/pre-merge/portal mv ../ai-verify-portal/coverage docs/pre-merge/portal/ mv ../ai-verify-portal/*.svg docs/pre-merge/portal/ mv ../ai-verify-portal/*.html docs/pre-merge/portal/ mv ../ai-verify-portal/*.md docs/pre-merge/portal/ mv ../ai-verify-portal/*.txt docs/pre-merge/portal/ - git add * + git add docs/pre-merge/portal git config user.name "imda-btg" git config user.email "idma-btg@imda.gov.sg" git commit -m "feat(portal) actions publish portal reports to dashboard" git push set -e - -# - name: Prepare artifact 2 -# if: always() -# run: | -# set +e -# mkdir -p check-results/docs/pre-merge/portal -# cd check-results -# git init -# git branch -m main -# git remote add origin https://github.com/IMDA-BTG/ci-dashboard.git -# mv ../ai-verify-portal/coverage docs/pre-merge/portal/ -# mv ../ai-verify-portal/*.svg docs/pre-merge/portal/ -# mv ../ai-verify-portal/*.html docs/pre-merge/portal/ -# mv ../ai-verify-portal/*.md docs/pre-merge/portal/ -# mv ../ai-verify-portal/*.txt docs/pre-merge/portal/ -# git add * -# git config user.name "imda-btg" -# git config user.email "idma-btg@imda.gov.sg" -# git commit -m "feat(portal): actions publish portal reports to dashboard" -# git config --unset-all http.https://github.com/.extraheader -# authToken="x-access-token:${{ secrets.CHECKOUT_TOKEN }}" -# encodedAuthToken=$(echo -n "$authToken" | base64 -w0) -# git config --local http.https://github.com/.extraheader "AUTHORIZATION: basic ${encodedAuthToken}" -# git push -# set -e -# -# - name: Upload artifact -# if: always() -# uses: actions/upload-pages-artifact@v1 -# -# - name: Publish artifact to Pages -# if: always() -# id: deployment -# uses: actions/deploy-pages@v1 diff --git a/.gitignore b/.gitignore index 8d9840a61..e08cbf857 100644 --- a/.gitignore +++ b/.gitignore @@ -1,188 +1,6 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -#dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -.idea/ - -# Deployment temp folder -temp/ - -# Other git files -.flake8 -.pre-commit-config.yaml -.gitignore - -# Testing files -assets/ -ci-central/ -cov-badge.svg -coverage.json -test-badge.svg -test-report.html -test-report.json - -# Node modules/ frontend folders -node_modules/ -node_modules -build/ -*.zip -*.log -lib-cov +venv coverage -cache -uploads +/uploads/ +/package-lock.json +**/*.log +.tox \ No newline at end of file diff --git a/ai-verify-apigw/.github/pull_request_template.md b/ai-verify-apigw/.github/pull_request_template.md deleted file mode 100644 index 5d6732077..000000000 --- a/ai-verify-apigw/.github/pull_request_template.md +++ /dev/null @@ -1,48 +0,0 @@ -# Pull Request Template - -## Description - -[Provide a brief description of the changes or features introduced by this pull request.] - -## Motivation and Context - -[Explain the motivation or the context behind this pull request. Why is it necessary?] - -## Type of Change - - - - - - - - - - - - - -## How to Test - -[Provide clear instructions on how to test and verify the changes introduced by this pull request, including any specific unit tests you have created to demonstrate your changes.] - -## Checklist - -Please check all the boxes that apply to this pull request using "x": - -- [ ] I have tested the changes locally and verified that they work as expected. -- [ ] I have added or updated the necessary documentation (README, API docs, etc.). -- [ ] I have added appropriate unit tests or functional tests for the changes made. -- [ ] I have followed the project's coding conventions and style guidelines. -- [ ] I have rebased my branch onto the latest commit of the main branch. -- [ ] I have squashed or reorganized my commits into logical units. -- [ ] I have added any necessary dependencies or packages to the project's build configuration. -- [ ] I have performed a self-review of my own code. - -## Screenshots (if applicable) - -[If the changes involve visual modifications, include screenshots or GIFs that demonstrate the changes.] - -## Additional Notes - -[Add any additional information or context that might be relevant to reviewers.] \ No newline at end of file diff --git a/ai-verify-apigw/.github/workflows/pre-merge-checks-apigw.yml b/ai-verify-apigw/.github/workflows/pre-merge-checks-apigw.yml deleted file mode 100644 index 807611fb0..000000000 --- a/ai-verify-apigw/.github/workflows/pre-merge-checks-apigw.yml +++ /dev/null @@ -1,123 +0,0 @@ -# Pre-merge Checks (for Nodejs/Typescript projects) -# 1. Unit tests with code coverage (jest) -# 2. Code quality analysis (lint) -# 3. Dependency analysis (vulnerabilities) -# 4. Dependency analysis (undesirable licenses) -# 5. Deploy reports generated from the above to GitHub Pages -name: Pre-Merge Checks - -on: - # Runs on pull request to main - #pull_request: - # branches: [master, main] - - # Run this workflow manually from Actions tab - workflow_dispatch: - -# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages -permissions: - contents: read - pages: write - id-token: write - -# Allow one concurrent deployment -concurrency: - group: ${{ github.repository }}-${{ github.workflow }} - cancel-in-progress: true - -jobs: - # Single deploy job since we're just deploying - pre-merge-checks: - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest - timeout-minutes: 15 - steps: - - # Checkout code - - name: Checkout code - uses: actions/checkout@v3 - - # Install dependencies - - name: Setup npm cache/install - uses: actions/setup-node@v3 - with: - node-version: 18 - cache: 'npm' - - run: | - npm install - npm i -D jest jest-html-reporter jest-json-reporter ts-jest @jest/globals badge-maker - npm i -D eslint eslint-formatter-html @typescript-eslint/eslint-plugin @typescript-eslint/parser - - # Compile typescript source files - - name: Compile typescript - if: contains(github.repository, 'ci-central') - run: | - npm i -D typescript - tsc - - # Unit Tests & Coverage - - name: Unit tests with coverage - timeout-minutes: 5 - run: | - set +e - npm run coverage - exit_code_jest=$? - node ci/createBadges.mjs test - node ci/createBadges.mjs coverage - set -e - if [ $exit_code_jest -ne 0 ]; then - echo "jest failed, exiting..." - exit $exit_code_jest - fi - - # eslint - - name: Code quality analysis - lint - if: always() - run: | - set +e - npx eslint -f html -o eslint-report.html . - exit_code_lint=$? - npx eslint -f json -o eslint-report.json . - node ci/createBadges.mjs lint - set -e - if [ $exit_code_lint -ne 0 ]; then - echo "lint failed, exiting..." - exit $exit_code_lint - fi - - # npm audit - - name: Dependency analysis - vulnerabilities & licenses - if: always() - run: | - set +e - npm audit --json | npx npm-audit-markdown --output npm-audit-report.md - exit_code_audit=$? - npx markdown-to-html-cli --source npm-audit-report.md --output npm-audit-report.html -y - npx license-checker --summary --out licenses-found.txt -y - node ci/createBadges.mjs dependency - echo -e "License Check Summary for apigw\n" | cat - licenses-found.txt > license-report.txt - node ci/createBadges.mjs license - set -e - if [ $exit_code_audit -ne 0 ]; then - echo "npm audit failed, exiting..." - exit $exit_code_audit - fi - - ### Publish reports to Pages ### - - - name: Prepare artifact - if: always() - run: | - mv coverage _site - mv *.svg *.html *.md *.txt _site/ - - - name: Upload artifact - if: always() - uses: actions/upload-pages-artifact@v1 - - - name: Publish artifact to Pages - if: always() - id: deployment - uses: actions/deploy-pages@v1 diff --git a/ai-verify-apigw/__mocks__/lib/redisClient.mjs b/ai-verify-apigw/__mocks__/lib/redisClient.mjs index 2013b468c..9b466865a 100644 --- a/ai-verify-apigw/__mocks__/lib/redisClient.mjs +++ b/ai-verify-apigw/__mocks__/lib/redisClient.mjs @@ -9,6 +9,7 @@ const redisConnect = jest.fn().mockReturnValue({ keys: jest.fn(), pSubscribe: jest.fn(), hGetAll: jest.fn(), + exists: jest.fn(), }); export default redisConnect; \ No newline at end of file diff --git a/ai-verify-apigw/__tests__/graphql/dataset.test.mjs b/ai-verify-apigw/__tests__/graphql/dataset.test.mjs new file mode 100644 index 000000000..cbb44bd2b --- /dev/null +++ b/ai-verify-apigw/__tests__/graphql/dataset.test.mjs @@ -0,0 +1,300 @@ +import {jest} from '@jest/globals' +import mongoose from 'mongoose'; +import casual from '#testutil/mockData.mjs'; + + +describe("Test Dataset GraphQL queries and mutations", () => { + let server; + let ProjectModel; + let DatasetModel; + let data = []; + let projData; + + beforeAll(async() => { + + // set some mocks first + jest.unstable_mockModule("#lib/redisClient.mjs", () => { + return import("#mocks/lib/redisClient.mjs"); + }); + const models = await import("#models"); + DatasetModel = models.DatasetModel; + ProjectModel = models.ProjectModel; + + // create some initial data + const docs = casual.multipleDatasets(3); + for (const doc of docs) { + doc.__t = 'DatasetModel'; + const obj = new DatasetModel(doc); + let saveDoc = await obj.save(); + data.push(saveDoc.toObject()) + } + + const project = casual.project; + project.__t = 'ProjectModel'; + project.modelAndDatasets = { + groundTruthColumn: 'two_year_recid', + model: data[0]._id.toString(), + testDataset: data[0]._id.toString(), + groundTruthDataset: data[1]._id.toString(), + } + const obj = new ProjectModel(project); + projData = await obj.save(); + + let { createApolloServer } = await import("#testutil/testApolloServer.mjs"); + let resolver = await import('#graphql/modules/assets/dataset.mjs'); + server = createApolloServer(resolver.default); + }) + + + beforeEach(async () => { + jest.clearAllMocks(); + }) + + + it("should list all datasets", async () => { + const query = ` + query { + datasets { + id + name + filename + filePath + ctime + size + status + description + dataColumns { + id + name + datatype + label + } + numRows + numCols + serializer + dataFormat + errorMessages + type + } + } + ` + const response = await server.executeOperation({ + query, + }) + + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeUndefined(); + const docs = response.body.singleResult.data?.datasets; + expect(docs.length).toBeGreaterThanOrEqual(data.length); + for (let i=0; i e.id === data[i]._id.toString()); + expect(doc).toBeDefined(); + expect(doc.id).toBe(data[i]._id.toString()) + expect(doc.name).toBe(data[i].name); + } + }) + + + it("should not update dataset with invalid id", async() => { + + const dataLen = data.length; + const id = data[dataLen-1]._id; + + const query = ` + mutation($datasetID: ObjectID!, $dataset: DatasetInput!) { + updateDataset(datasetID:$datasetID, dataset:$dataset) { + id + } + } + ` + + //test missing id + const response = await server.executeOperation({ + query, + variables: { + //datasetID: id, + dataset: { + description: 'Mock Description1', + name: 'New File Name1.png', + status: 'Cancelled', + } + } + }) + // check response + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeDefined(); + + //test invalid id + const response2 = await server.executeOperation({ + query, + variables: { + datasetID: mongoose.Types.ObjectId(), + dataset: { + description: 'Mock Description2', + name: 'New File Name2.png', + status: 'Cancelled', + } + } + }) + // check response + expect(response2.body.kind).toBe('single'); + expect(response2.body.singleResult.errors).toBeDefined(); + + // check not updated into db + const doc = await DatasetModel.findOne({ _id: mongoose.Types.ObjectId(id) }); + expect(doc.name).toEqual(data[dataLen-1].name); + expect(doc.status).toEqual(data[dataLen-1].status); + + }) + + + it("should update dataset", async() => { + + const dataLen = data.length; + const id = data[dataLen-1]._id; + + const query = ` + mutation($datasetID: ObjectID!, $dataset: DatasetInput!) { + updateDataset(datasetID:$datasetID, dataset:$dataset) { + id + } + } + ` + const response = await server.executeOperation({ + query, + variables: { + datasetID: id, + dataset: { + description: 'Mock Description', + name: 'New File Name.png', + status: 'Cancelled', + } + } + }) + + // check response + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeUndefined(); + + // check updated into db + const doc = await DatasetModel.findOne({ _id: mongoose.Types.ObjectId(id) }); + expect(doc.name).toEqual('New File Name.png'); + expect(doc.status).toEqual('Cancelled'); + expect(doc.description).toEqual('Mock Description'); + }) + + + it("should not delete dataset used by project", async() => { + + let projCount = await ProjectModel.countDocuments({ _id: projData._id }); + expect(projCount).toBe(1); + + const id1 = projData.modelAndDatasets.testDataset.id; + const id2 = projData.modelAndDatasets.groundTruthDataset.id; + + let count1 = await DatasetModel.countDocuments({ _id: id1 }); + expect(count1).toBe(1); + let count2 = await DatasetModel.countDocuments({ _id: id2 }); + expect(count2).toBe(1); + + const query = ` + mutation($id: ObjectID!) { + deleteDataset(id:$id) + } + ` + // testDataset + const response = await server.executeOperation({ + query, + variables: { + id: id1, + } + }) + // check response + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeDefined(); + let count1a = await DatasetModel.countDocuments({ _id: id1 }); + expect(count1a).toBe(1); + + // grountTruthDataset + const response2 = await server.executeOperation({ + query, + variables: { + id: id2, + } + }) + // check response + expect(response2.body.kind).toBe('single'); + expect(response2.body.singleResult.errors).toBeDefined(); + let count2a = await DatasetModel.countDocuments({ _id: id2 }); + expect(count2a).toBe(1); + }) + + + it("should delete dataset", async() => { + const dataLen = data.length; + const id = data[dataLen-1]._id; + + let count = await DatasetModel.countDocuments({ _id: id }); + expect(count).toBe(1); + + const query = ` + mutation($id: ObjectID!) { + deleteDataset(id:$id) + } + ` + const response = await server.executeOperation({ + query, + variables: { + id: id.toString(), + } + }) + // check response + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeUndefined(); + expect(response.body.singleResult.data?.deleteDataset).toBe(id.toString()) + + // check deleted from db + count = await DatasetModel.countDocuments({ _id: id }); + expect(count).toBe(0); + }) + + + it("should not delete dataset with invalid id", async() => { + + // check initial db count + let count = await DatasetModel.countDocuments(); + + const query = ` + mutation($id: ObjectID!) { + deleteDataset(id:$id) + } + ` + //missing id + let response = await server.executeOperation({ + query, + variables: { + // id: id.toString(), + } + }) + // check response + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeDefined(); + + //invalid id + response = await server.executeOperation({ + query, + variables: { + id: mongoose.Types.ObjectId() + } + }) + // check response + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeDefined(); + + // check post db count + let counta = await DatasetModel.countDocuments(); + expect(counta).toBe(count); + + }) + +}); \ No newline at end of file diff --git a/ai-verify-apigw/__tests__/graphql/model.test.mjs b/ai-verify-apigw/__tests__/graphql/model.test.mjs new file mode 100644 index 000000000..9a1eaad4f --- /dev/null +++ b/ai-verify-apigw/__tests__/graphql/model.test.mjs @@ -0,0 +1,282 @@ +import {jest} from '@jest/globals' +import mongoose from 'mongoose'; +import casual from '#testutil/mockData.mjs'; + + +describe("Test Model GraphQL queries and mutations", () => { + let server; + let ProjectModel; + let ModelFileModel; + let data = []; + let projData; + + beforeAll(async() => { + + // set some mocks first + jest.unstable_mockModule("#lib/redisClient.mjs", () => { + return import("#mocks/lib/redisClient.mjs"); + }); + const models = await import("#models"); + ModelFileModel = models.ModelFileModel; + ProjectModel = models.ProjectModel; + + // create some initial data + const docs = casual.multipleModels(2); + for (const doc of docs) { + doc.__t = 'ModelFileModel'; + const obj = new ModelFileModel(doc); + let saveDoc = await obj.save(); + data.push(saveDoc.toObject()) + } + + // ProjectModel = models.ProjectModel; + const project = casual.project; + project.__t = 'ProjectModel'; + project.modelAndDatasets = { + groundTruthColumn: 'two_year_recid', + model: data[0]._id.toString(), + testDataset: data[0]._id.toString(), + groundTruthDataset: data[1]._id.toString(), + } + const obj = new ProjectModel(project); + projData = await obj.save(); + + let { createApolloServer } = await import("#testutil/testApolloServer.mjs"); + let resolver = await import('#graphql/modules/assets/model.mjs'); + server = createApolloServer(resolver.default); + }) + + + beforeEach(async () => { + jest.clearAllMocks(); + }) + + + it("should list all models", async () => { + const query = ` + query { + modelFiles { + id + name + filename + filePath + ctime + size + status + description + serializer + modelFormat + modelType + errorMessages + type + } + } + ` + const response = await server.executeOperation({ + query, + }) + + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeUndefined(); + const docs = response.body.singleResult.data?.modelFiles; + expect(docs.length).toBeGreaterThanOrEqual(data.length); + for (let i=0; i e.id === data[i]._id.toString()); + expect(doc).toBeDefined(); + expect(doc.id).toBe(data[i]._id.toString()) + expect(doc.name).toBe(data[i].name); + } + }) + + + it("should not update model with invalid id", async() => { + + const dataLen = data.length; + const id = data[dataLen-1]._id; + + const query = ` + mutation($modelFileID: ObjectID!, $modelFile: ModelFileInput!) { + updateModel(modelFileID:$modelFileID, modelFile:$modelFile) { + id + } + } + ` + + //test missing id + const response = await server.executeOperation({ + query, + variables: { + //modelFileID: id, + modelFile: { + description: 'Mock Description1', + modelType: 'Regression', + name: 'New File Name1.png', + status: 'Cancelled', + } + } + }) + // check response + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeDefined(); + + //test invalid id + const response2 = await server.executeOperation({ + query, + variables: { + modelFileID: mongoose.Types.ObjectId(), + modelFile: { + description: 'Mock Description1', + modelType: 'Regression', + name: 'New File Name1.png', + status: 'Cancelled', + } + } + }) + // check response + expect(response2.body.kind).toBe('single'); + expect(response2.body.singleResult.errors).toBeDefined(); + + // check not updated into db + const doc = await ModelFileModel.findOne({ _id: mongoose.Types.ObjectId(id) }); + expect(doc.name).toEqual(data[dataLen-1].name); + expect(doc.status).toEqual(data[dataLen-1].status); + + }) + + + it("should update model", async() => { + + const dataLen = data.length; + const id = data[dataLen-1]._id; + + const query = ` + mutation($modelFileID: ObjectID!, $modelFile: ModelFileInput!) { + updateModel(modelFileID:$modelFileID, modelFile:$modelFile) { + id + } + } + ` + const response = await server.executeOperation({ + query, + variables: { + modelFileID: id, + modelFile: { + description: 'Mock Description1', + modelType: 'Regression', + name: 'New File Name1.png', + status: 'Cancelled', + } + } + }) + + // check response + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeUndefined(); + + // check updated into db + const doc = await ModelFileModel.findOne({ _id: mongoose.Types.ObjectId(id) }); + expect(doc.name).toEqual('New File Name1.png'); + expect(doc.modelType).toEqual('Regression'); + expect(doc.status).toEqual('Cancelled'); + expect(doc.description).toEqual('Mock Description1'); + }) + + + it("should not delete model used by project", async() => { + + let projCount = await ProjectModel.countDocuments({ _id: projData._id }); + expect(projCount).toBe(1); + + const id1 = projData.modelAndDatasets.model.id; + + let count1 = await ModelFileModel.countDocuments({ _id: id1 }); + expect(count1).toBe(1); + + const query = ` + mutation($id: ObjectID!) { + deleteModelFile(id:$id) + } + ` + const response = await server.executeOperation({ + query, + variables: { + id: id1, + } + }) + // check response + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeDefined(); + let count1a = await ModelFileModel.countDocuments({ _id: id1 }); + expect(count1a).toBe(1); + + }) + + + it("should delete model", async() => { + const dataLen = data.length; + const id = data[dataLen-1]._id; + + let count = await ModelFileModel.countDocuments({ _id: id }); + expect(count).toBe(1); + + const query = ` + mutation($id: ObjectID!) { + deleteModelFile(id:$id) + } + ` + const response = await server.executeOperation({ + query, + variables: { + id: id.toString(), + } + }) + // check response + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeUndefined(); + expect(response.body.singleResult.data?.deleteModelFile).toBe(id.toString()) + + // check deleted from db + count = await ModelFileModel.countDocuments({ _id: id }); + expect(count).toBe(0); + }) + + + it("should not delete model with invalid id", async() => { + + // check initial db count + let count = await ModelFileModel.countDocuments(); + + const query = ` + mutation($id: ObjectID!) { + deleteModelFile(id:$id) + } + ` + //missing id + let response = await server.executeOperation({ + query, + variables: { + // id: id.toString(), + } + }) + // check response + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeDefined(); + + //invalid id + response = await server.executeOperation({ + query, + variables: { + id: mongoose.Types.ObjectId() + } + }) + // check response + expect(response.body.kind).toBe('single'); + expect(response.body.singleResult.errors).toBeDefined(); + + // check post db count + let counta = await ModelFileModel.countDocuments(); + expect(counta).toBe(count); + + }) + +}); \ No newline at end of file diff --git a/ai-verify-apigw/__tests__/graphql/project.test.mjs b/ai-verify-apigw/__tests__/graphql/project.test.mjs index 1c3f21c15..1ef035963 100644 --- a/ai-verify-apigw/__tests__/graphql/project.test.mjs +++ b/ai-verify-apigw/__tests__/graphql/project.test.mjs @@ -1,12 +1,15 @@ import {jest} from '@jest/globals' import mongoose from 'mongoose'; import casual from '#testutil/mockData.mjs'; +import { mockModel, mockTestDataset } from '../../testutil/mockData.mjs'; describe("Test Project GraphQL queries", () => { let server; let ProjectModel; let ProjectTemplateModel; + let ModelFileModel; + let DatasetFileModel; let testEngineQueue; let data = []; let templateData = []; @@ -31,12 +34,22 @@ describe("Test Project GraphQL queries", () => { const models = await import("#models"); ProjectModel = models.ProjectModel; ProjectTemplateModel = models.ProjectTemplateModel; - // make sure collection empty - // await ProjectModel.deleteMany(); + ModelFileModel = models.ModelFileModel; + DatasetFileModel = models.DatasetModel; // create some initial data + const model = new ModelFileModel(mockModel); + const dataset = new DatasetFileModel(mockTestDataset); + const savedModel = await model.save(); + const savedDataset = await dataset.save(); const docs = casual.multipleProjects(2); for (const doc of docs) { doc.__t = 'ProjectModel'; + doc.modelAndDatasets = { + groundTruthColumn: 'two_year_recid', + model: savedModel._id, + testDataset: savedDataset._id, + groundTruthDataset: savedDataset._id, + } const obj = new ProjectModel(doc); let saveDoc = await obj.save(); data.push(saveDoc.toObject()) @@ -420,7 +433,22 @@ mutation($project: ProjectInput!, $templateId: String!) { company } pages { - layouts + layouts { + h + i + isBounded + isDraggable + isResizable + maxH + maxW + minH + minW + resizeHandles + static + w + x + y + } } } } @@ -476,11 +504,10 @@ mutation($projectId: ObjectID!, $templateInfo: ProjectInformationInput!) { it("should generate report without tests", async() => { const proj = data[0]; - const mad = casual.modelAndDataset; const query = ` -mutation($projectID: ObjectID!, $algorithms: [String]!, $modelAndDatasets: ModelAndDatasetsReportInput) { - generateReport(projectID:$projectID, algorithms:$algorithms, modelAndDatasets:$modelAndDatasets) { +mutation($projectID: ObjectID!, $algorithms: [String]!) { + generateReport(projectID:$projectID, algorithms:$algorithms) { projectID status } @@ -491,7 +518,6 @@ mutation($projectID: ObjectID!, $algorithms: [String]!, $modelAndDatasets: Model variables: { projectID: proj._id.toString(), algorithms: [], - modelAndDatasets: mad, } }) @@ -513,8 +539,8 @@ mutation($projectID: ObjectID!, $algorithms: [String]!, $modelAndDatasets: Model const algorithms = proj.testInformationData.map(test => test.algorithmGID); const query = ` -mutation($projectID: ObjectID!, $algorithms: [String]!, $modelAndDatasets: ModelAndDatasetsReportInput) { - generateReport(projectID:$projectID, algorithms:$algorithms, modelAndDatasets:$modelAndDatasets) { +mutation($projectID: ObjectID!, $algorithms: [String]!) { + generateReport(projectID:$projectID, algorithms:$algorithms) { projectID status } @@ -525,7 +551,6 @@ mutation($projectID: ObjectID!, $algorithms: [String]!, $modelAndDatasets: Model variables: { projectID: proj._id.toString(), algorithms, - modelAndDatasets: mad, } }) diff --git a/ai-verify-apigw/__tests__/graphql/projectTemplate.test.mjs b/ai-verify-apigw/__tests__/graphql/projectTemplate.test.mjs index 1a20feff5..72636b298 100644 --- a/ai-verify-apigw/__tests__/graphql/projectTemplate.test.mjs +++ b/ai-verify-apigw/__tests__/graphql/projectTemplate.test.mjs @@ -1,4 +1,4 @@ -// import {jest} from '@jest/globals'; +import {jest} from '@jest/globals'; import mongoose from 'mongoose'; import casual from '#testutil/mockData.mjs'; @@ -8,6 +8,9 @@ describe("Test Project Template GraphQL queries", () => { let data = []; beforeAll(async() => { + jest.unstable_mockModule("#lib/redisClient.mjs", () => { + return import("#mocks/lib/redisClient.mjs"); + }); const models = await import("#models"); ProjectTemplateModel = models.ProjectTemplateModel; // make sure collection empty diff --git a/ai-verify-apigw/__tests__/lib/report.test.mjs b/ai-verify-apigw/__tests__/lib/report.test.mjs index 779bedbc9..90c47f418 100644 --- a/ai-verify-apigw/__tests__/lib/report.test.mjs +++ b/ai-verify-apigw/__tests__/lib/report.test.mjs @@ -10,6 +10,9 @@ describe("Test module report.mjs", () => { beforeAll(async() => { // mocking + jest.unstable_mockModule("#lib/redisClient.mjs", () => { + return import("#mocks/lib/redisClient.mjs"); + }); jest.unstable_mockModule("#lib/apolloPubSub.mjs", () => { return import('#mocks/lib/apolloPubSub.mjs'); }); diff --git a/ai-verify-apigw/__tests__/lib/testEngineQueue.test.mjs b/ai-verify-apigw/__tests__/lib/testEngineQueue.test.mjs index 60b15c92b..659767649 100644 --- a/ai-verify-apigw/__tests__/lib/testEngineQueue.test.mjs +++ b/ai-verify-apigw/__tests__/lib/testEngineQueue.test.mjs @@ -114,4 +114,78 @@ describe("Test module testEngineQueue.mjs", () => { expect(report.generateReport).toHaveBeenCalledWith(reportData._id.toString()) }) +}) + +//assets +describe("Test asset functions in module testEngineQueue.mjs", () => { + let redis; + let testEngineQueue; + let worker; + let onCall = jest.fn(); + + let datasetData; + let modelData; + + beforeAll(async() => { + // mocking + jest.unstable_mockModule("node:worker_threads", () => { + const worker_threads = jest.createMockFromModule('node:worker_threads'); + return { + __esModule: true, + default: worker_threads, + Worker: worker_threads.Worker.mockImplementation(() => ({ + on: onCall, + })) + } + }); + worker = await import("node:worker_threads"); + jest.unstable_mockModule("#lib/redisClient.mjs", () => { + return import("#mocks/lib/redisClient.mjs"); + }); + const redisConnect = await import('#lib/redisClient.mjs'); + redis = redisConnect.default(); + + const models = await import("#models"); + + const datasetDoc = new models.DatasetModel({filePath: 'test/file/path/mockdata.sav', name: 'mock dataset', filename: 'mock dataset'}); + datasetData = await datasetDoc.save(); + const modelFileDoc = new models.ModelFileModel({filePath: 'test/file/path/mockdata.sav', name: 'mock dataset', filename: 'mock dataset'}); + modelData = await modelFileDoc.save(); + + testEngineQueue = await import("#lib/testEngineQueue.mjs"); + }) + + afterAll(async () => { + jest.clearAllMocks(); + }) + + + it("should queue datasets", async() => { + redis.hSet.mockResolvedValue(); + redis.xAdd.mockResolvedValue(); + await testEngineQueue.queueDataset(datasetData); + expect(redis.hSet).toHaveBeenCalled(); + expect(redis.xAdd).toHaveBeenCalled(); + const xAddLastCall = redis.xAdd.mock.lastCall; + expect(xAddLastCall[0]).toBe('TestEngineService'); + expect(xAddLastCall[1]).toBe('*'); + expect(xAddLastCall[2]).toHaveProperty('validateDataset'); + redis.hSet.mockClear(); + redis.xAdd.mockClear(); + }) + + it("should queue models", async() => { + redis.hSet.mockResolvedValue(); + redis.xAdd.mockResolvedValue(); + await testEngineQueue.queueModel(modelData); + expect(redis.hSet).toHaveBeenCalled(); + expect(redis.xAdd).toHaveBeenCalled(); + const xAddLastCall = redis.xAdd.mock.lastCall; + expect(xAddLastCall[0]).toBe('TestEngineService'); + expect(xAddLastCall[1]).toBe('*'); + expect(xAddLastCall[2]).toHaveProperty('validateModel'); + redis.hSet.mockClear(); + redis.xAdd.mockClear(); + }) + }) \ No newline at end of file diff --git a/ai-verify-apigw/__tests__/lib/testEngineWorker.test.mjs b/ai-verify-apigw/__tests__/lib/testEngineWorker.test.mjs index ca3889393..24d76d96d 100644 --- a/ai-verify-apigw/__tests__/lib/testEngineWorker.test.mjs +++ b/ai-verify-apigw/__tests__/lib/testEngineWorker.test.mjs @@ -12,6 +12,13 @@ describe("Test module testEngineWorker.mjs", () => { let reportData; const key = "task:63b3f9764326cf3aa1ffd2ed-63b637da0ddd17d140912582"; + let DatasetModel; + let datasetData; + let cancelledDatasetData; + let ModelFileModel; + let modelData; + let cancelledModelData; + beforeAll(async() => { // mocking jest.unstable_mockModule("node:worker_threads", () => { @@ -56,6 +63,18 @@ describe("Test module testEngineWorker.mjs", () => { const projectData = await projectDoc.save(); const doc = new ReportModel(casual.report(projectData, "RunningTests")); reportData = await doc.save(); + + DatasetModel = models.DatasetModel; + const datasetDoc = new DatasetModel({filePath: 'test/file/path/mockdata.sav', name: 'mock dataset', filename: 'mock dataset'}); + datasetData = await datasetDoc.save(); + const cancelledDatasetDoc = new DatasetModel({filePath: 'test/file/path/mockdata.sav', name: 'mock dataset', filename: 'mock dataset', status: 'Cancelled'}); + cancelledDatasetData = await cancelledDatasetDoc.save(); + + ModelFileModel = models.ModelFileModel; + const modelDoc = new ModelFileModel({filePath: 'test/file/path/mockdata.sav', name: 'mock model', filename: 'mock model'}); + modelData = await modelDoc.save(); + const cancelledModelDoc = new DatasetModel({filePath: 'test/file/path/mockdata.sav', name: 'mock dataset', filename: 'mock dataset', status: 'Cancelled'}); + cancelledModelData = await cancelledModelDoc.save(); redis.keys.mockResolvedValue([]); testEngineWorker = await import("#lib/testEngineWorker.mjs"); @@ -201,5 +220,175 @@ describe("Test module testEngineWorker.mjs", () => { pubsub.default.publish.mockClear(); }) + it("should not process validateDataset ServiceResponse if dataset status is cancelled", async() => { + redis.exists.mockResolvedValue(1); + const resp = { + type: 'ServiceResponse', + serviceType: 'validateDataset', + datasetId: cancelledDatasetData._id.toString(), + status: 'done', + validationResult: 'valid', + serializedBy: 'pickle', + dataFormat: 'pandas', + columns: '[{"name": "age_cat_cat", "datatype": "int64"}, {"name": "sex_code", "datatype": "int64"}]', + numRows: '1235', + numCols: '2', + logFile: 'testlog.log', + } + redis.hGetAll.mockResolvedValue(resp); + pSubscribeCallback(null, `__keyspace@0__:service:${cancelledDatasetData._id.toString()}`) + await new Promise((r) => setTimeout(r, 500)); + // check updated + const updatedDoc = await DatasetModel.findById(cancelledDatasetData._id); + expect(updatedDoc.status).toBe('Cancelled'); + + // check published + expect(pubsub.default.publish).not.toHaveBeenCalled(); + redis.hGetAll.mockClear(); + pubsub.default.publish.mockClear(); + }) + + it("should not process validateModel ServiceResponse if dataset status is cancelled", async() => { + redis.exists.mockResolvedValue(1); + const resp = { + type: 'ServiceResponse', + serviceType: 'validateModel', + modelFileId: cancelledModelData._id.toString(), + status: 'done', + validationResult: 'valid', + serializedBy: 'joblib', + modelFormat: 'sklearn', + logFile: 'testlog.log', + } + redis.hGetAll.mockResolvedValue(resp); + pSubscribeCallback(null, `__keyspace@0__:service:${cancelledModelData._id.toString()}`) + await new Promise((r) => setTimeout(r, 500)); + // check updated + + const updatedDoc = await DatasetModel.findById(cancelledModelData._id); + expect(updatedDoc.status).toBe('Cancelled'); + + // check published + expect(pubsub.default.publish).not.toHaveBeenCalled(); + redis.hGetAll.mockClear(); + pubsub.default.publish.mockClear(); + }) + + it("should process validateDataset ServiceResponse and update dataset (valid)", async() => { + redis.exists.mockResolvedValue(1); + const resp = { + type: 'ServiceResponse', + status: 'done', + validationResult: 'valid', + serializedBy: 'pickle', + dataFormat: 'pandas', + columns: '[{"name": "age_cat_cat", "datatype": "int64"}, {"name": "sex_code", "datatype": "int64"}]', + numRows: 1235, + numCols: 2, + serviceType: 'validateDataset', + datasetId: datasetData._id.toString(), + logFile: 'testlog.log', + } + + redis.hGetAll.mockResolvedValue(resp); + pSubscribeCallback(null, `__keyspace@0__:service:${datasetData._id.toString()}`) + await new Promise((r) => setTimeout(r, 500)); + const updatedDoc = await DatasetModel.findById(datasetData._id); + //check updated + expect(updatedDoc.status).toBe('Valid'); + expect(updatedDoc.numRows).toBe(1235); + expect(updatedDoc.numCols).toBe(2); + expect(updatedDoc.serializer).toBe('pickle'); + expect(updatedDoc.dataFormat).toBe('pandas'); + + expect(pubsub.default.publish).toHaveBeenCalled(); + const call = pubsub.default.publish.mock.lastCall; + expect(call[0]).toBe('VALIDATE_DATASET_STATUS_UPDATED') + redis.hGetAll.mockClear(); + pubsub.default.publish.mockClear(); + }) + + it("should process validateDataset ServiceResponse and update dataset (invalid)", async() => { + redis.exists.mockResolvedValue(1); + const resp = { + type: 'ServiceResponse', + serviceType: 'validateDataset', + datasetId: datasetData._id.toString(), + status: 'done', + validationResult: 'invalid', + errorMessages: '[{"category": "DATA_OR_MODEL_ERROR", "code": "CDATx00059", "description": "The dataset /mock/file/path.sav is not supported. Please upload a supported dataset: Unable to get data instance: There was an error loading dataset(file): /mock/file/path.sav (There was an error deserializing dataset: /mock/file/path.sav)", "severity": "critical", "component": "service_processing.py"}]', + logFile: 'testlog.log', + } + redis.hGetAll.mockResolvedValue(resp); + pSubscribeCallback(null, `__keyspace@0__:service:${datasetData._id.toString()}`) + await new Promise((r) => setTimeout(r, 500)); + const updatedDoc = await DatasetModel.findById(datasetData._id); + // check updated + expect(updatedDoc.status).toBe('Invalid'); + expect(updatedDoc.errorMessages).toBe('The dataset /mock/file/path.sav is not supported. Please upload a supported dataset: Unable to get data instance: There was an error loading dataset(file): /mock/file/path.sav (There was an error deserializing dataset: /mock/file/path.sav)'); + // check published + expect(pubsub.default.publish).toHaveBeenCalled(); + const call = pubsub.default.publish.mock.lastCall; + expect(call[0]).toBe('VALIDATE_DATASET_STATUS_UPDATED') + redis.hGetAll.mockClear(); + pubsub.default.publish.mockClear(); + }) + + it("should process validateModel ServiceResponse and update model (valid)", async() => { + redis.exists.mockResolvedValue(1); + const resp = { + type: 'ServiceResponse', + serviceType: 'validateModel', + modelFileId: modelData._id.toString(), + status: 'done', + validationResult: 'valid', + serializedBy: 'joblib', + modelFormat: 'sklearn', + logFile: 'testlog.log', + } + redis.hGetAll.mockResolvedValue(resp); + pSubscribeCallback(null, `__keyspace@0__:service:${modelData._id.toString()}`) + await new Promise((r) => setTimeout(r, 500)); + const updatedDoc = await ModelFileModel.findById(modelData._id); + //check updated + expect(updatedDoc.status).toBe('Valid'); + expect(updatedDoc.serializer).toBe('joblib'); + expect(updatedDoc.modelFormat).toBe('sklearn'); + + // check published + expect(pubsub.default.publish).toHaveBeenCalled(); + const call = pubsub.default.publish.mock.lastCall; + expect(call[0]).toBe('VALIDATE_MODEL_STATUS_UPDATED') + redis.hGetAll.mockClear(); + pubsub.default.publish.mockClear(); + }) + + it("should process validateModel ServiceResponse and update model (invalid)", async() => { + redis.exists.mockResolvedValue(1); + const resp = { + type: 'ServiceResponse', + serviceType: 'validateModel', + modelFileId: modelData._id.toString(), + status: 'done', + validationResult: 'invalid', + errorMessages: '[{"category": "DATA_OR_MODEL_ERROR", "code": "CDATx00059", "description": "The model /mock/file/path.sav is not supported. Please upload a supported model: Unable to get data instance: There was an error loading model(file): /mock/file/path.sav (There was an error deserializing model: /mock/file/path.sav)", "severity": "critical", "component": "service_processing.py"}]', + logFile: 'testlog.log', + } + redis.hGetAll.mockResolvedValue(resp); + pSubscribeCallback(null, `__keyspace@0__:service:${modelData._id.toString()}`) + await new Promise((r) => setTimeout(r, 500)); + const updatedDoc = await ModelFileModel.findById(modelData._id); + //check updated + expect(updatedDoc.status).toBe('Invalid'); + expect(updatedDoc.errorMessages).toBe('The model /mock/file/path.sav is not supported. Please upload a supported model: Unable to get data instance: There was an error loading model(file): /mock/file/path.sav (There was an error deserializing model: /mock/file/path.sav)'); + + // check published + expect(pubsub.default.publish).toHaveBeenCalled(); + const call = pubsub.default.publish.mock.lastCall; + expect(call[0]).toBe('VALIDATE_MODEL_STATUS_UPDATED') + redis.hGetAll.mockClear(); + pubsub.default.publish.mockClear(); + }) + }) \ No newline at end of file diff --git a/ai-verify-apigw/__tests__/routes/template.test.mjs b/ai-verify-apigw/__tests__/routes/template.test.mjs index aa3d7b761..f06713df6 100644 --- a/ai-verify-apigw/__tests__/routes/template.test.mjs +++ b/ai-verify-apigw/__tests__/routes/template.test.mjs @@ -2,6 +2,7 @@ import supertest from 'supertest'; import casual from '#testutil/mockData.mjs'; import { setupServerWithRouter } from '#testutil/testExpressRouter.mjs'; +import { jest } from '@jest/globals'; describe("Test /template route", () => { let server; @@ -11,6 +12,9 @@ describe("Test /template route", () => { const templateCID = "testTemplate"; beforeAll(async() => { + jest.unstable_mockModule("#lib/redisClient.mjs", () => { + return import("#mocks/lib/redisClient.mjs"); + }); const router = await import("#routes/template.mjs"); const app = setupServerWithRouter("/template", router.default); request = supertest(app); diff --git a/ai-verify-apigw/__tests__/routes/upload.test.mjs b/ai-verify-apigw/__tests__/routes/upload.test.mjs new file mode 100644 index 000000000..11aa1223a --- /dev/null +++ b/ai-verify-apigw/__tests__/routes/upload.test.mjs @@ -0,0 +1,119 @@ +import {afterEach, jest} from '@jest/globals'; +import supertest from 'supertest'; +import mockfs from 'mock-fs'; + +import { setupServerWithRouter } from '#testutil/testExpressRouter.mjs'; +import multer from 'multer'; + + +describe("Test /upload route", () => { + let server; + let request; + + beforeAll(async() => { + + jest.unstable_mockModule("#lib/testEngineQueue.mjs", () => { + return import("#mocks/lib/testEngineQueue.mjs"); + }); + + jest.unstable_mockModule("#lib/testEngineWorker.mjs", () => { + return import("#mocks/lib/testEngineWorker.mjs"); + }); + + jest.unstable_mockModule("#lib/redisClient.mjs", () => { + return import("#mocks/lib/redisClient.mjs"); + }); + + jest.mock('multer', () => { + const multer = () => ({ + array: jest.fn(() => 'default') + .mockImplementationOnce(() => { + return (req, res, next) => { + req.files = [ + { + fieldname: 'myFiles', + originalname: 'mockdata.sav', + encoding: '7bit', + mimetype: 'application/octet-stream', + destination: '/tmp', + filename: 'mockdata.sav', + path: '/tmp/mockdata.sav', + size: 2195 + }, + ]; + return next(); + }; + }) + .mockImplementationOnce(() => { + return (req, res, next) => { + req.files = [ + { + fieldname: 'myModelFiles', + originalname: 'mockmodel.sav', + encoding: '7bit', + mimetype: 'application/octet-stream', + destination: '/tmp', + filename: 'mockmodel.sav', + path: '/tmp/mockmodel.sav', + size: 132878 + } + ]; + req.body = { + myModelFolders: '', + myModelType: 'Classification' + } + return next(); + }; + }) + }) + multer.diskStorage = () => jest.fn() + multer.memoryStorage = () => jest.fn() + return multer + }) + + const router = await import("#routes/upload.mjs"); + const app = setupServerWithRouter("/upload", router.default); + request = supertest(app); + server = app.listen(4010); + + app.use(multer({}).array()); + + }) + + afterAll(done => { + if (server) + server.close(); + done(); + }) + + beforeEach(() => { + jest.clearAllMocks(); + mockfs({ + '/tmp/mockdata.sav': 'mock data content', + '/tmp/mockmodel.sav': 'mock model content', + }); + }) + + afterEach(() => { + mockfs.restore(); + }) + + it("/upload/data should upload dataset file", async () => { + + request.post("/upload/data") + .then( response => { + expect(response.status).toBe(201) + }) + + }) + + it("/upload/model should upload model file", async () => { + + request.post("/upload/model") + .then( response => { + expect(response.status).toBe(201) + }) + + }) + +}) diff --git a/ai-verify-apigw/lib/report.mjs b/ai-verify-apigw/lib/report.mjs index 7fda1404c..8743f194a 100644 --- a/ai-verify-apigw/lib/report.mjs +++ b/ai-verify-apigw/lib/report.mjs @@ -31,7 +31,7 @@ export async function generateReport(reportId) { // generate report // added --no-sandbox param to make it work in containerized env. - const browser = await puppeteer.launch({args: ['--no-sandbox']}); + const browser = await puppeteer.launch({args: ['--no-sandbox'], headless:'new'}); const page = await browser.newPage(); const url = `${WEB_REPORT_URL}/${report.project.toString()}`; // const pdf_name = `report_${report.project.toString()}.pdf`; @@ -42,6 +42,17 @@ export async function generateReport(reportId) { path: pdf_path, printBackground: true, format: 'A4', + margin: { + top: 10, + bottom: 33, + }, + displayHeaderFooter: true, + footerTemplate: ` + +
+ Page of +
+ ` }); // update report status diff --git a/ai-verify-apigw/package-lock.json b/ai-verify-apigw/package-lock.json index 2bb85ddef..3572ff60b 100644 --- a/ai-verify-apigw/package-lock.json +++ b/ai-verify-apigw/package-lock.json @@ -30,7 +30,7 @@ "mongoose": "^6.8.2", "multer": "^1.4.5-lts.1", "node-cache": "^5.1.2", - "puppeteer": "^19.4.1", + "puppeteer": "^20.7.1", "redis": "^4.5.1", "ws": "^8.11.0" }, @@ -45,6 +45,7 @@ "jest": "^29.5.0", "jest-html-reporter": "^3.7.1", "jest-json-reporter": "^1.2.2", + "mock-fs": "^5.2.0", "mongodb-memory-server": "^8.12.1", "supertest": "^6.3.3", "ts-jest": "^29.1.0" @@ -4191,6 +4192,55 @@ "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" }, + "node_modules/@puppeteer/browsers": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/@puppeteer/browsers/-/browsers-1.4.1.tgz", + "integrity": "sha512-H43VosMzywHCcYcgv0GXXopvwnV21Ud9g2aXbPlQUJj1Xcz9V0wBwHeFz6saFhx/3VKisZfI1GEKEOhQCau7Vw==", + "dependencies": { + "debug": "4.3.4", + "extract-zip": "2.0.1", + "progress": "2.0.3", + "proxy-agent": "6.2.1", + "tar-fs": "2.1.1", + "unbzip2-stream": "1.4.3", + "yargs": "17.7.1" + }, + "bin": { + "browsers": "lib/cjs/main-cli.js" + }, + "engines": { + "node": ">=16.3.0" + }, + "peerDependencies": { + "typescript": ">= 4.7.4" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@puppeteer/browsers/node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@puppeteer/browsers/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, "node_modules/@redis/bloom": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.1.0.tgz", @@ -4888,7 +4938,6 @@ "version": "8.8.2", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==", - "dev": true, "bin": { "acorn": "bin/acorn" }, @@ -4905,6 +4954,14 @@ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, + "node_modules/acorn-walk": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", + "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "engines": { + "node": ">=0.4.0" + } + }, "node_modules/adm-zip": { "version": "0.5.10", "resolved": "https://registry.npmjs.org/adm-zip/-/adm-zip-0.5.10.tgz", @@ -4917,6 +4974,7 @@ "version": "6.0.2", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, "dependencies": { "debug": "4" }, @@ -4928,6 +4986,7 @@ "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, "dependencies": { "ms": "2.1.2" }, @@ -4943,7 +5002,8 @@ "node_modules/agent-base/node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true }, "node_modules/ajv": { "version": "6.12.6", @@ -4989,7 +5049,6 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, "engines": { "node": ">=8" } @@ -5056,6 +5115,17 @@ "integrity": "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==", "dev": true }, + "node_modules/ast-types": { + "version": "0.13.4", + "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.13.4.tgz", + "integrity": "sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w==", + "dependencies": { + "tslib": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/async-mutex": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.3.2.tgz", @@ -5298,7 +5368,8 @@ "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true }, "node_modules/base64-js": { "version": "1.5.1", @@ -5319,6 +5390,14 @@ } ] }, + "node_modules/basic-ftp": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.0.3.tgz", + "integrity": "sha512-QHX8HLlncOLpy54mh+k/sWIFd0ThmRqwe9ZjELybGZK+tZ8rUb9VO0saKJUROTbE+KhzDUT7xziGpGrW8Kmd+g==", + "engines": { + "node": ">=10.0.0" + } + }, "node_modules/binary-search": { "version": "1.3.6", "resolved": "https://registry.npmjs.org/binary-search/-/binary-search-1.3.6.tgz", @@ -5373,6 +5452,7 @@ "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -5599,6 +5679,17 @@ "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==" }, + "node_modules/chromium-bidi": { + "version": "0.4.11", + "resolved": "https://registry.npmjs.org/chromium-bidi/-/chromium-bidi-0.4.11.tgz", + "integrity": "sha512-p03ajLhlQ5gebw3cmbDBFmBc2wnJM5dnXS8Phu6mblGn/KQd76yOVL5VwE0VAisa7oazNfKGTaXlIZ8Q5Bb9OA==", + "dependencies": { + "mitt": "3.0.0" + }, + "peerDependencies": { + "devtools-protocol": "*" + } + }, "node_modules/ci-info": { "version": "3.8.0", "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", @@ -5624,7 +5715,6 @@ "version": "8.0.1", "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "dev": true, "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", @@ -5705,7 +5795,8 @@ "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true }, "node_modules/concat-stream": { "version": "1.6.2", @@ -5828,9 +5919,9 @@ } }, "node_modules/cosmiconfig": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.0.0.tgz", - "integrity": "sha512-da1EafcpH6b/TD8vDRaWV7xFINlHlF6zKsGwS1TsuVJTZRkquaS5HTMq7uq6h31619QjbsYl21gVDOm32KM1vQ==", + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.2.0.tgz", + "integrity": "sha512-3rTMnFJA1tCOPwRxtgF4wd7Ab2qvDbL8jX+3smjIbS4HlZBagTlpERbdN7iAbWlrfxE3M8c27kTwTawQ7st+OQ==", "dependencies": { "import-fresh": "^3.2.1", "js-yaml": "^4.1.0", @@ -5839,14 +5930,17 @@ }, "engines": { "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" } }, "node_modules/cross-fetch": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.5.tgz", - "integrity": "sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw==", + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.6.tgz", + "integrity": "sha512-riRvo06crlE8HiqOwIpQhxwdOk4fOeR7FVM/wXoxchFEqMNUjvbs3bfo4OTgMEMHzppd4DxFBDbyySj8Cv781g==", "dependencies": { - "node-fetch": "2.6.7" + "node-fetch": "^2.6.11" } }, "node_modules/cross-spawn": { @@ -5892,6 +5986,14 @@ "integrity": "sha512-IiJwMC8rdZE0+xiEZHeru6YoONC4rfPMqGm2W85jMIbkFvv5nFTwJVFHam2eFrN6txmoUYFAFXiv8ICVeTO0MA==", "dev": true }, + "node_modules/data-uri-to-buffer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-5.0.1.tgz", + "integrity": "sha512-a9l6T1qqDogvvnw0nKlfZzqsyikEBZBClF39V3TFoKhDtGBqHu2HkuomJc02j5zft8zrUaXEuoicLeW54RkzPg==", + "engines": { + "node": ">= 14" + } + }, "node_modules/dateformat": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-3.0.2.tgz", @@ -5918,8 +6020,7 @@ "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", - "dev": true + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==" }, "node_modules/deepmerge": { "version": "4.3.1", @@ -5930,6 +6031,20 @@ "node": ">=0.10.0" } }, + "node_modules/degenerator": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/degenerator/-/degenerator-4.0.2.tgz", + "integrity": "sha512-HKwIFvZROUMfH3qI3gBpD61BYh7q3c3GXD5UGZzoVNJwVSYgZKvYl1fRMXc9ozoTxl/VZxKJ5v/bA+19tywFiw==", + "dependencies": { + "ast-types": "^0.13.2", + "escodegen": "^1.8.1", + "esprima": "^4.0.0", + "vm2": "^3.9.17" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -5974,9 +6089,9 @@ } }, "node_modules/devtools-protocol": { - "version": "0.0.1068969", - "resolved": "https://registry.npmjs.org/devtools-protocol/-/devtools-protocol-0.0.1068969.tgz", - "integrity": "sha512-ATFTrPbY1dKYhPPvpjtwWKSK2mIwGmRwX54UASn9THEuIZCe2n9k3vVuMmt6jWeL+e5QaaguEv/pMyR+JQB7VQ==" + "version": "0.0.1135028", + "resolved": "https://registry.npmjs.org/devtools-protocol/-/devtools-protocol-0.0.1135028.tgz", + "integrity": "sha512-jEcNGrh6lOXNRJvZb9RjeevtZGrgugPKSMJZxfyxWQnhlKawMPhMtk/dfC+Z/6xNXExlzTKlY5LzIAK/fRpQIw==" }, "node_modules/dezalgo": { "version": "1.0.4", @@ -6063,8 +6178,7 @@ "node_modules/emoji-regex": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" }, "node_modules/encodeurl": { "version": "1.0.2", @@ -6094,7 +6208,6 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "dev": true, "engines": { "node": ">=6" } @@ -6112,6 +6225,74 @@ "node": ">=0.8.0" } }, + "node_modules/escodegen": { + "version": "1.14.3", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.14.3.tgz", + "integrity": "sha512-qFcX0XJkdg+PB3xjZZG/wKSuT1PnQWx57+TVSjIMmILd2yC/6ByYElPwJnslDsuWuSAp4AwJGumarAAmJch5Kw==", + "dependencies": { + "esprima": "^4.0.1", + "estraverse": "^4.2.0", + "esutils": "^2.0.2", + "optionator": "^0.8.1" + }, + "bin": { + "escodegen": "bin/escodegen.js", + "esgenerate": "bin/esgenerate.js" + }, + "engines": { + "node": ">=4.0" + }, + "optionalDependencies": { + "source-map": "~0.6.1" + } + }, + "node_modules/escodegen/node_modules/levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA==", + "dependencies": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/escodegen/node_modules/optionator": { + "version": "0.8.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", + "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "dependencies": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.6", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "word-wrap": "~1.2.3" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/escodegen/node_modules/prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/escodegen/node_modules/type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==", + "dependencies": { + "prelude-ls": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/eslint": { "version": "8.38.0", "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.38.0.tgz", @@ -6461,7 +6642,6 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "dev": true, "bin": { "esparse": "bin/esparse.js", "esvalidate": "bin/esvalidate.js" @@ -6516,7 +6696,6 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true, "engines": { "node": ">=4.0" } @@ -6525,7 +6704,6 @@ "version": "2.0.3", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true, "engines": { "node": ">=0.10.0" } @@ -6709,8 +6887,7 @@ "node_modules/fast-levenshtein": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", - "dev": true + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==" }, "node_modules/fast-safe-stringify": { "version": "2.1.1", @@ -6897,10 +7074,24 @@ "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" }, + "node_modules/fs-extra": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", + "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^4.0.0", + "universalify": "^0.1.0" + }, + "engines": { + "node": ">=6 <7 || >=8" + } + }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true }, "node_modules/fsevents": { "version": "2.3.2", @@ -6942,7 +7133,6 @@ "version": "2.0.5", "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true, "engines": { "node": "6.* || 8.* || >= 10.*" } @@ -6995,10 +7185,46 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/get-uri": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-uri/-/get-uri-6.0.1.tgz", + "integrity": "sha512-7ZqONUVqaabogsYNWlYj0t3YZaL6dhuEueZXGF+/YVmf6dHmaFg8/6psJKqhx9QykIDKzpGcy2cn4oV4YC7V/Q==", + "dependencies": { + "basic-ftp": "^5.0.2", + "data-uri-to-buffer": "^5.0.1", + "debug": "^4.3.4", + "fs-extra": "^8.1.0" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/get-uri/node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/get-uri/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, "node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dev": true, "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -7056,8 +7282,7 @@ "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "dev": true + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" }, "node_modules/grapheme-splitter": { "version": "1.0.4", @@ -7203,10 +7428,55 @@ "node": ">= 0.8" } }, + "node_modules/http-proxy-agent": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.0.tgz", + "integrity": "sha512-+ZT+iBxVUQ1asugqnD6oWoRiS25AkjNfG085dKJGtGxkdwLQrMKU5wJr2bOOFAXzKcTuqq+7fZlTMgG3SRfIYQ==", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/http-proxy-agent/node_modules/agent-base": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.0.tgz", + "integrity": "sha512-o/zjMZRhJxny7OyEF+Op8X+efiELC7k7yOjMzgfzVqOzXqkBkWI79YoTdOtsuWd5BWhAGAuOY/Xa6xpiaWXiNg==", + "dependencies": { + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/http-proxy-agent/node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/http-proxy-agent/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, "node_modules/https-proxy-agent": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "dev": true, "dependencies": { "agent-base": "6", "debug": "4" @@ -7219,6 +7489,7 @@ "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, "dependencies": { "ms": "2.1.2" }, @@ -7234,7 +7505,8 @@ "node_modules/https-proxy-agent/node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true }, "node_modules/human-signals": { "version": "2.1.0", @@ -7330,6 +7602,7 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dev": true, "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -7429,7 +7702,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, "engines": { "node": ">=8" } @@ -9299,6 +9571,14 @@ "node": ">=6" } }, + "node_modules/jsonfile": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", + "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, "node_modules/jsonschema": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/jsonschema/-/jsonschema-1.4.1.tgz", @@ -9620,6 +9900,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, "dependencies": { "brace-expansion": "^1.1.7" }, @@ -9635,6 +9916,11 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/mitt": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mitt/-/mitt-3.0.0.tgz", + "integrity": "sha512-7dX2/10ITVyqh4aOSVI9gdape+t9l2/8QxHrFmUXu4EEUpdlxl6RudZUPZoc+zuY2hk1j7XxVroIVIan/pD/SQ==" + }, "node_modules/mkdirp": { "version": "0.5.6", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", @@ -9651,6 +9937,15 @@ "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==" }, + "node_modules/mock-fs": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/mock-fs/-/mock-fs-5.2.0.tgz", + "integrity": "sha512-2dF2R6YMSZbpip1V1WHKGLNjr/k48uQClqMVb5H3MOvwc9qhYis3/IWbj02qIg/Y8MDXKFF4c5v0rxx2o6xTZw==", + "dev": true, + "engines": { + "node": ">=12.0.0" + } + }, "node_modules/moment": { "version": "2.29.4", "resolved": "https://registry.npmjs.org/moment/-/moment-2.29.4.tgz", @@ -9955,6 +10250,14 @@ "node": ">= 0.6" } }, + "node_modules/netmask": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/netmask/-/netmask-2.0.2.tgz", + "integrity": "sha512-dBpDMdxv9Irdq66304OLfEmQ9tbNRFnFTuZiLo+bD+r332bBmMJ8GBLXklIXXgxd3+v9+KUnZaUR5PJMa75Gsg==", + "engines": { + "node": ">= 0.4.0" + } + }, "node_modules/new-find-package-json": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/new-find-package-json/-/new-find-package-json-2.0.0.tgz", @@ -10047,9 +10350,9 @@ } }, "node_modules/node-fetch": { - "version": "2.6.7", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", - "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.11.tgz", + "integrity": "sha512-4I6pdBY1EthSqDmJkiNk3JIT8cswwR9nfeW/cPdUagJYEQG7R95WRH74wpz7ma8Gh/9dI9FP+OU+0E4FvtA55w==", "dependencies": { "whatwg-url": "^5.0.0" }, @@ -10239,6 +10542,85 @@ "node": ">=6" } }, + "node_modules/pac-proxy-agent": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-6.0.3.tgz", + "integrity": "sha512-5Hr1KgPDoc21Vn3rsXBirwwDnF/iac1jN/zkpsOYruyT+ZgsUhUOgVwq3v9+ukjZd/yGm/0nzO1fDfl7rkGoHQ==", + "dependencies": { + "agent-base": "^7.0.2", + "debug": "^4.3.4", + "get-uri": "^6.0.1", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", + "pac-resolver": "^6.0.1", + "socks-proxy-agent": "^8.0.1" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/pac-proxy-agent/node_modules/agent-base": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.0.tgz", + "integrity": "sha512-o/zjMZRhJxny7OyEF+Op8X+efiELC7k7yOjMzgfzVqOzXqkBkWI79YoTdOtsuWd5BWhAGAuOY/Xa6xpiaWXiNg==", + "dependencies": { + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/pac-proxy-agent/node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/pac-proxy-agent/node_modules/https-proxy-agent": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.0.tgz", + "integrity": "sha512-0euwPCRyAPSgGdzD1IVN9nJYHtBhJwb6XPfbpQcYbPCwrBidX6GzxmchnaF4sfF/jPb74Ojx5g4yTg3sixlyPw==", + "dependencies": { + "agent-base": "^7.0.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/pac-proxy-agent/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/pac-resolver": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/pac-resolver/-/pac-resolver-6.0.1.tgz", + "integrity": "sha512-dg497MhVT7jZegPRuOScQ/z0aV/5WR0gTdRu1md+Irs9J9o+ls5jIuxjo1WfaTG+eQQkxyn5HMGvWK+w7EIBkQ==", + "dependencies": { + "degenerator": "^4.0.1", + "ip": "^1.1.5", + "netmask": "^2.0.2" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/pac-resolver/node_modules/ip": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.8.tgz", + "integrity": "sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg==" + }, "node_modules/pako": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/pako/-/pako-2.1.0.tgz", @@ -10294,6 +10676,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, "engines": { "node": ">=0.10.0" } @@ -10451,6 +10834,68 @@ "node": ">= 0.10" } }, + "node_modules/proxy-agent": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/proxy-agent/-/proxy-agent-6.2.1.tgz", + "integrity": "sha512-OIbBKlRAT+ycCm6wAYIzMwPejzRtjy8F3QiDX0eKOA3e4pe3U9F/IvzcHP42bmgQxVv97juG+J8/gx+JIeCX/Q==", + "dependencies": { + "agent-base": "^7.0.2", + "debug": "^4.3.4", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", + "lru-cache": "^7.14.1", + "pac-proxy-agent": "^6.0.3", + "proxy-from-env": "^1.1.0", + "socks-proxy-agent": "^8.0.1" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/proxy-agent/node_modules/agent-base": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.0.tgz", + "integrity": "sha512-o/zjMZRhJxny7OyEF+Op8X+efiELC7k7yOjMzgfzVqOzXqkBkWI79YoTdOtsuWd5BWhAGAuOY/Xa6xpiaWXiNg==", + "dependencies": { + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/proxy-agent/node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/proxy-agent/node_modules/https-proxy-agent": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.0.tgz", + "integrity": "sha512-0euwPCRyAPSgGdzD1IVN9nJYHtBhJwb6XPfbpQcYbPCwrBidX6GzxmchnaF4sfF/jPb74Ojx5g4yTg3sixlyPw==", + "dependencies": { + "agent-base": "^7.0.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/proxy-agent/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, "node_modules/proxy-from-env": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", @@ -10474,39 +10919,41 @@ } }, "node_modules/puppeteer": { - "version": "19.4.1", - "resolved": "https://registry.npmjs.org/puppeteer/-/puppeteer-19.4.1.tgz", - "integrity": "sha512-PCnrR13B8A+VSEDXRmrNXRZbrkF1tfsI1hKSC7vs13eNS6CUD3Y4FA8SF8/VZy+Pm1kg5AggJT2Nu3HLAtGkFg==", + "version": "20.7.1", + "resolved": "https://registry.npmjs.org/puppeteer/-/puppeteer-20.7.1.tgz", + "integrity": "sha512-yIkba2PT6rkPw85gCAmbW6KO6VVFTdONnvQp8taeMZaOTUJ193nVajykD1jT3vFbjvwKz8te9nbBVb9ITQqbPA==", "hasInstallScript": true, "dependencies": { - "cosmiconfig": "8.0.0", - "https-proxy-agent": "5.0.1", - "progress": "2.0.3", - "proxy-from-env": "1.1.0", - "puppeteer-core": "19.4.1" + "@puppeteer/browsers": "1.4.1", + "cosmiconfig": "8.2.0", + "puppeteer-core": "20.7.1" }, "engines": { - "node": ">=14.1.0" + "node": ">=16.3.0" } }, "node_modules/puppeteer-core": { - "version": "19.4.1", - "resolved": "https://registry.npmjs.org/puppeteer-core/-/puppeteer-core-19.4.1.tgz", - "integrity": "sha512-JHIuqtqrUAx4jGOTxXu4ilapV2jabxtVMA/e4wwFUMvtSsqK4nVBSI+Z1SKDoz7gRy/JUIc8WzmfocCa6SIZ1w==", + "version": "20.7.1", + "resolved": "https://registry.npmjs.org/puppeteer-core/-/puppeteer-core-20.7.1.tgz", + "integrity": "sha512-f45j9JxImFaYndQYsFA39CEDKwpn1nAF/Iw7urznatjhaslSB/RfHG7sC7iPpDbmP+CLpPEkHhqNxqpaUDdTLw==", "dependencies": { - "cross-fetch": "3.1.5", + "@puppeteer/browsers": "1.4.1", + "chromium-bidi": "0.4.11", + "cross-fetch": "3.1.6", "debug": "4.3.4", - "devtools-protocol": "0.0.1068969", - "extract-zip": "2.0.1", - "https-proxy-agent": "5.0.1", - "proxy-from-env": "1.1.0", - "rimraf": "3.0.2", - "tar-fs": "2.1.1", - "unbzip2-stream": "1.4.3", - "ws": "8.11.0" + "devtools-protocol": "0.0.1135028", + "ws": "8.13.0" }, "engines": { - "node": ">=14.1.0" + "node": ">=16.3.0" + }, + "peerDependencies": { + "typescript": ">= 4.7.4" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, "node_modules/puppeteer-core/node_modules/debug": { @@ -10760,7 +11207,6 @@ "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "dev": true, "engines": { "node": ">=0.10.0" } @@ -10841,6 +11287,7 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, "dependencies": { "glob": "^7.1.3" }, @@ -11115,11 +11562,56 @@ "npm": ">= 3.0.0" } }, + "node_modules/socks-proxy-agent": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.1.tgz", + "integrity": "sha512-59EjPbbgg8U3x62hhKOFVAmySQUcfRQ4C7Q/D5sEHnZTQRrQlNKINks44DMR1gwXp0p4LaVIeccX2KHTTcHVqQ==", + "dependencies": { + "agent-base": "^7.0.1", + "debug": "^4.3.4", + "socks": "^2.7.1" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/socks-proxy-agent/node_modules/agent-base": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.0.tgz", + "integrity": "sha512-o/zjMZRhJxny7OyEF+Op8X+efiELC7k7yOjMzgfzVqOzXqkBkWI79YoTdOtsuWd5BWhAGAuOY/Xa6xpiaWXiNg==", + "dependencies": { + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/socks-proxy-agent/node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/socks-proxy-agent/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, "node_modules/source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, + "devOptional": true, "engines": { "node": ">=0.10.0" } @@ -11249,7 +11741,6 @@ "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", @@ -11263,7 +11754,6 @@ "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, "dependencies": { "ansi-regex": "^5.0.1" }, @@ -11680,7 +12170,7 @@ "version": "5.0.4", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.4.tgz", "integrity": "sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==", - "dev": true, + "devOptional": true, "peer": true, "bin": { "tsc": "bin/tsc", @@ -11739,6 +12229,14 @@ "node": ">=4" } }, + "node_modules/universalify": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", + "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "engines": { + "node": ">= 4.0.0" + } + }, "node_modules/unixify": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unixify/-/unixify-1.0.0.tgz", @@ -11868,6 +12366,21 @@ "node": ">= 0.8" } }, + "node_modules/vm2": { + "version": "3.9.19", + "resolved": "https://registry.npmjs.org/vm2/-/vm2-3.9.19.tgz", + "integrity": "sha512-J637XF0DHDMV57R6JyVsTak7nIL8gy5KH4r1HiwWLf/4GBbb5MKL5y7LpmF4A8E2nR6XmzpmMFQ7V7ppPTmUQg==", + "dependencies": { + "acorn": "^8.7.0", + "acorn-walk": "^8.2.0" + }, + "bin": { + "vm2": "bin/vm2" + }, + "engines": { + "node": ">=6.0" + } + }, "node_modules/walker": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", @@ -11918,7 +12431,6 @@ "version": "1.2.3", "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", - "dev": true, "engines": { "node": ">=0.10.0" } @@ -11927,7 +12439,6 @@ "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", @@ -11944,7 +12455,6 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, "dependencies": { "color-convert": "^2.0.1" }, @@ -11959,7 +12469,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, "dependencies": { "color-name": "~1.1.4" }, @@ -11970,8 +12479,7 @@ "node_modules/wrap-ansi/node_modules/color-name": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" }, "node_modules/wrappy": { "version": "1.0.2", @@ -11992,15 +12500,15 @@ } }, "node_modules/ws": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.11.0.tgz", - "integrity": "sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz", + "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==", "engines": { "node": ">=10.0.0" }, "peerDependencies": { "bufferutil": "^4.0.1", - "utf-8-validate": "^5.0.2" + "utf-8-validate": ">=5.0.2" }, "peerDependenciesMeta": { "bufferutil": { @@ -12032,7 +12540,6 @@ "version": "5.0.8", "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "dev": true, "engines": { "node": ">=10" } @@ -12046,7 +12553,6 @@ "version": "17.7.1", "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.1.tgz", "integrity": "sha512-cwiTb08Xuv5fqF4AovYacTFNxk62th7LKJ6BL9IGUpTJrWoU7/7WdQGTP2SjKf1dUNBGzDd28p/Yfs/GI6JrLw==", - "dev": true, "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", @@ -12064,7 +12570,6 @@ "version": "21.1.1", "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "dev": true, "engines": { "node": ">=12" } diff --git a/ai-verify-apigw/package.json b/ai-verify-apigw/package.json index dfcf063e5..de9344cb7 100644 --- a/ai-verify-apigw/package.json +++ b/ai-verify-apigw/package.json @@ -34,7 +34,7 @@ "mongoose": "^6.8.2", "multer": "^1.4.5-lts.1", "node-cache": "^5.1.2", - "puppeteer": "^19.4.1", + "puppeteer": "^20.7.1", "redis": "^4.5.1", "ws": "^8.11.0" }, @@ -53,6 +53,7 @@ "jest": "^29.5.0", "jest-html-reporter": "^3.7.1", "jest-json-reporter": "^1.2.2", + "mock-fs": "^5.2.0", "mongodb-memory-server": "^8.12.1", "supertest": "^6.3.3", "ts-jest": "^29.1.0" diff --git a/ai-verify-apigw/testutil/mockData.mjs b/ai-verify-apigw/testutil/mockData.mjs index b79e71846..6cd41a411 100644 --- a/ai-verify-apigw/testutil/mockData.mjs +++ b/ai-verify-apigw/testutil/mockData.mjs @@ -1,8 +1,9 @@ +import { jest } from '@jest/globals'; import casual from 'casual'; import mongoose from 'mongoose'; casual.define('ObjectId', function() { - return mongoose.Types.ObjectId.toString(); + return mongoose.Types.ObjectId().toString(); }) casual.define('randomString', function(len) { @@ -86,7 +87,6 @@ casual.define('reportPages', function(count) { "maxW": casual.integer(1,12), "minH": casual.integer(1,36), "maxH": casual.integer(1,36), - "moved": false, "static": false }], reportWidgets: [ @@ -160,8 +160,7 @@ casual.define('project', function() { return { ...template, inputBlockData: {}, - testInformationData: casual.testInformation(1), - // modelAndDatasets: kimee TODO + testInformationData: casual.testInformation(1) } }) @@ -218,14 +217,101 @@ casual.define('report', function(project, status) { return report; }) -casual.define('modelAndDataset', function() { - return { - modelFileName: 'testmodel.sav', - testDatasetFileName: 'testdata.sav', - groundTruthDatasetFileName: 'testgroundtruthdata.sav', - modelType: casual.random_element(["Classification","Regression"]), - groundTruthColumn: 'testcolumn' +const mockModel = { + filename: 'pickle_scikit_bc_compas.sav', + name: 'pickle_scikit_bc_compas.sav', + filePath: '/home/test/uploads/model/pickle_scikit_bc_compas.sav', + ctime: new Date('2023-06-05T07:17:25.132Z'), + description: '', + status: 'Valid', + size: '502.71 KB', + modelType: 'Classification', + serializer: 'pickle', + modelFormat: 'sklearn', + errorMessages: '', + type: 'File', + createdAt: new Date('2023-06-05T07:17:25.140Z'), + updatedAt: new Date('2023-06-05T07:17:26.151Z') +} + +const mockTestDataset = { + filename: 'pickle_pandas_tabular_compas_testing.sav', + name: 'pickle_pandas_tabular_compas_testing.sav', + type: 'File', + filePath: '/home/test/uploads/data/pickle_pandas_tabular_compas_testing.sav', + ctime: '2023-06-05T07:17:06.360Z', + description: '', + status: 'Valid', + size: '68.33 KB', + serializer: 'pickle', + dataFormat: 'pandas', + errorMessages: '', + dataColumns: [ + { + name: 'age_cat_cat', + datatype: 'int64', + label: 'age_cat_cat', + _id: mongoose.Types.ObjectId('647d8bf3ef104c4da904734a') + }, + { + name: 'sex_code', + datatype: 'int64', + label: 'sex_code', + _id: mongoose.Types.ObjectId('647d8bf3ef104c4da904734b') + }, + { + name: 'race_code', + datatype: 'int64', + label: 'race_code', + _id: mongoose.Types.ObjectId('647d8bf3ef104c4da904734c') + }, + { + name: 'priors_count', + datatype: 'int64', + label: 'priors_count', + _id: mongoose.Types.ObjectId('647d8bf3ef104c4da904734d') + }, + { + name: 'c_charge_degree_cat', + datatype: 'int64', + label: 'c_charge_degree_cat', + _id: mongoose.Types.ObjectId('647d8bf3ef104c4da904734e') + }, + { + name: 'two_year_recid', + datatype: 'int64', + label: 'two_year_recid', + _id: mongoose.Types.ObjectId('647d8bf3ef104c4da904734f') + } + ], + createdAt: new Date('2023-06-05T07:17:06.368Z'), + updatedAt: new Date('2023-06-05T07:17:07.385Z'), + __v: 0, + numCols: 6, + numRows: 1235 +}; + +casual.define('multipleDatasets', function(count) { + if (typeof count !== 'number') { + count=2; + } + let ar = []; + for (let i=0; i { // set up connection to mongodb - console.debug(`Connecting to ${process.env.DB_URI}`); + // console.debug(`Connecting to ${process.env.DB_URI}`); await mongoose.connect(process.env.DB_URI, {useNewUrlParser: true}); // mock apollo pub sub jest.unstable_mockModule("#lib/apolloPubSub.mjs", () => { diff --git a/ai-verify-portal/.eslintignore b/ai-verify-portal/.eslintignore new file mode 100644 index 000000000..6361e82b7 --- /dev/null +++ b/ai-verify-portal/.eslintignore @@ -0,0 +1,2 @@ +/node_modules +/plugins \ No newline at end of file diff --git a/ai-verify-portal/.eslintrc.json b/ai-verify-portal/.eslintrc.json index 92a80b5d3..ba39a4a54 100644 --- a/ai-verify-portal/.eslintrc.json +++ b/ai-verify-portal/.eslintrc.json @@ -1,15 +1,51 @@ { - "extends": ["next/core-web-vitals", "plugin:@typescript-eslint/recommended"], + "extends": [ + "next/core-web-vitals", + "plugin:@typescript-eslint/recommended", + "prettier" + ], // prettier - ensure eslint lint does not do formatting-related rules "parser": "@typescript-eslint/parser", "plugins": ["@typescript-eslint"], "root": true, "rules": { - "react-hooks/rules-of-hooks": 1, + "react-hooks/rules-of-hooks": "warn", "react-hooks/exhaustive-deps": 0, - "prefer-const": 1, - "@typescript-eslint/no-empty-function": 1, - "@typescript-eslint/ban-ts-comment": 1, - "@typescript-eslint/no-inferrable-types": 1, - "@typescript-eslint/no-unused-vars": ["warn", { "argsIgnorePattern": "^_" }] + // "react/self-closing-comp": "error", TODO: Enable and fix below lint errors in separate PR + "prefer-const": "error", + "@typescript-eslint/no-empty-function": "error", + "@typescript-eslint/ban-ts-comment": "warn", + "@typescript-eslint/no-inferrable-types": "error", + "@typescript-eslint/no-unused-vars": [ + "warn", + { "argsIgnorePattern": "^_", "varsIgnorePattern": "^_" } + ] + // TODO: Enable and fix below lint errors in separate PR + // "import/order": [ + // "warn", + // { + // "groups": [ + // "builtin", + // "external", + // "parent", + // "sibling", + // "index", + // "object", + // "type" + // ], + // "pathGroups": [ + // { + // "pattern": "@/**/**", + // "group": "parent", + // "position": "before" + // } + // ], + // "alphabetize": { "order": "asc" } + // } + // ], + // "no-restricted-imports": ["error", + // { + // "patterns": ["../"] + // } + // ] } } diff --git a/ai-verify-portal/.github/pull_request_template.md b/ai-verify-portal/.github/pull_request_template.md deleted file mode 100644 index 5d6732077..000000000 --- a/ai-verify-portal/.github/pull_request_template.md +++ /dev/null @@ -1,48 +0,0 @@ -# Pull Request Template - -## Description - -[Provide a brief description of the changes or features introduced by this pull request.] - -## Motivation and Context - -[Explain the motivation or the context behind this pull request. Why is it necessary?] - -## Type of Change - - - - - - - - - - - - - -## How to Test - -[Provide clear instructions on how to test and verify the changes introduced by this pull request, including any specific unit tests you have created to demonstrate your changes.] - -## Checklist - -Please check all the boxes that apply to this pull request using "x": - -- [ ] I have tested the changes locally and verified that they work as expected. -- [ ] I have added or updated the necessary documentation (README, API docs, etc.). -- [ ] I have added appropriate unit tests or functional tests for the changes made. -- [ ] I have followed the project's coding conventions and style guidelines. -- [ ] I have rebased my branch onto the latest commit of the main branch. -- [ ] I have squashed or reorganized my commits into logical units. -- [ ] I have added any necessary dependencies or packages to the project's build configuration. -- [ ] I have performed a self-review of my own code. - -## Screenshots (if applicable) - -[If the changes involve visual modifications, include screenshots or GIFs that demonstrate the changes.] - -## Additional Notes - -[Add any additional information or context that might be relevant to reviewers.] \ No newline at end of file diff --git a/ai-verify-portal/.github/workflows/pre-merge-checks-portal.yml b/ai-verify-portal/.github/workflows/pre-merge-checks-portal.yml deleted file mode 100644 index 16fa66a61..000000000 --- a/ai-verify-portal/.github/workflows/pre-merge-checks-portal.yml +++ /dev/null @@ -1,164 +0,0 @@ -# Pre-merge Checks (for Nodejs/Typescript projects) -# 1. Unit tests with code coverage (jest) -# 2. Code quality analysis (lint) -# 3. Dependency analysis (vulnerabilities) -# 4. Dependency analysis (undesirable licenses) -# 5. Deploy reports generated from the above to GitHub Pages -name: Pre-Merge Checks - -on: - # Runs on pull request to main - #pull_request: - # branches: [master, main] - - # Run this workflow manually from Actions tab - workflow_dispatch: - -# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages -permissions: - contents: read - pages: write - id-token: write - -# Allow one concurrent deployment -concurrency: - group: ${{ github.repository }}-${{ github.workflow }} - cancel-in-progress: true - -jobs: - # Single deploy job since we're just deploying - pre-merge-checks: - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest - timeout-minutes: 15 - steps: - - # Checkout code - - name: Checkout code (portal) - uses: actions/checkout@v3 - with: - repository: ${{ github.repository_owner }}/ai-verify-portal - ref: main - token: ${{ secrets.CHECKOUT_TOKEN }} - path: portal - - - name: Checkout code (shared-library) - uses: actions/checkout@v3 - with: - repository: ${{ github.repository_owner }}/ai-verify-shared-library - ref: main - token: ${{ secrets.CHECKOUT_TOKEN }} - path: shared-library - - # Install dependencies - - name: Setup npm cache/install - uses: actions/setup-node@v3 - with: - node-version: 18 - cache: 'npm' - cache-dependency-path: portal - - - name: Install dependencies for shared-library - working-directory: ${{ github.workspace }}/shared-library - run: | - npm install --omit=dev - npx license-checker --summary --out licenses-found.txt -y - npm install -D - npm run build - - - name: Install dependencies for portal - working-directory: ${{ github.workspace }}/portal - run: | - npm install --omit=dev - npx license-checker --summary --out licenses-found.txt -y - npm install -D - npm i -D jest jest-html-reporter jest-json-reporter ts-jest @jest/globals badge-maker - npm i -D eslint eslint-formatter-html @typescript-eslint/eslint-plugin @typescript-eslint/parser - npm link ../shared-library - - # Compile typescript source files - - name: Build portal (next build) - working-directory: ${{ github.workspace }}/portal - run: | - cp .env.development .env - npm run build - - # Unit Tests & Coverage - - name: Unit tests with coverage - if: always() - working-directory: ${{ github.workspace }}/portal - timeout-minutes: 5 - run: | - set +e - sudo timedatectl set-timezone Asia/Singapore - npm run coverage - exit_code_jest=$? - node ci/createBadges.mjs test - node ci/createBadges.mjs coverage - set -e - if [ $exit_code_jest -ne 0 ]; then - echo "jest failed, exiting..." - exit $exit_code_jest - fi - - # eslint - - name: Code quality analysis - lint - if: always() - working-directory: ${{ github.workspace }}/portal - run: | - set +e - npx eslint -f html -o eslint-report.html . - exit_code_lint=$? - npx eslint -f json -o eslint-report.json . - node ci/createBadges.mjs lint - set -e - if [ $exit_code_lint -ne 0 ]; then - echo "lint failed, exiting..." - exit $exit_code_lint - fi - - # npm audit - - name: Dependency analysis - vulnerabilities & licenses - if: always() - working-directory: ${{ github.workspace }}/portal - run: | - set +e - npm audit --json | npx npm-audit-markdown --output npm-audit-report.md - exit_code_audit=$? - npx markdown-to-html-cli --source npm-audit-report.md --output npm-audit-report.html -y - echo -e "License Check Summary for portal\n" > license-report.txt - cat licenses-found.txt >> license-report.txt - echo -e "\nLicense Check Summary for shared-library\n" >> license-report.txt - cat ./ai-verify-shared-library/licenses-found.txt >> license-report.txt - cp license-report.txt licenses-found.txt - node ci/createBadges.mjs dependency - node ci/createBadges.mjs license - set -e - if [ $exit_code_audit -ne 0 ]; then - echo "npm audit failed, exiting..." - exit $exit_code_audit - fi - - ### Publish reports to Pages ### - - - name: Prepare artifact - if: always() - run: | - set +e - mv portal/coverage _site - mv portal/*.svg _site/ - mv portal/*.html _site/ - mv portal/*.md _site/ - mv portal/*.txt _site/ - set -e - - - name: Upload artifact - if: always() - uses: actions/upload-pages-artifact@v1 - - - name: Publish artifact to Pages - if: always() - id: deployment - uses: actions/deploy-pages@v1 diff --git a/ai-verify-portal/.prettierignore b/ai-verify-portal/.prettierignore new file mode 100644 index 000000000..e61594134 --- /dev/null +++ b/ai-verify-portal/.prettierignore @@ -0,0 +1,38 @@ +# dependencies +/node_modules +/.pnp +.pnp.js + +# testing +/coverage +test-report.html +test-results.json + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# local env files +.env*.local + +# vercel +.vercel + +# typescript +*.tsbuildinfo + +# plugins +/temp +/plugins \ No newline at end of file diff --git a/ai-verify-portal/.prettierrc.json b/ai-verify-portal/.prettierrc.json new file mode 100644 index 000000000..29a6c36a1 --- /dev/null +++ b/ai-verify-portal/.prettierrc.json @@ -0,0 +1,8 @@ +{ + "trailingComma": "es5", + "printWidth": 80, + "tabWidth": 2, + "semi": true, + "singleQuote": true, + "bracketSameLine": true +} diff --git a/ai-verify-portal/.vscode/extensions.json b/ai-verify-portal/.vscode/extensions.json new file mode 100644 index 000000000..1d7ac851e --- /dev/null +++ b/ai-verify-portal/.vscode/extensions.json @@ -0,0 +1,3 @@ +{ + "recommendations": ["dbaeumer.vscode-eslint", "esbenp.prettier-vscode"] +} diff --git a/ai-verify-portal/.vscode/launch.json b/ai-verify-portal/.vscode/launch.json index 1fb4a5ca6..cbb9f4a4a 100644 --- a/ai-verify-portal/.vscode/launch.json +++ b/ai-verify-portal/.vscode/launch.json @@ -1,28 +1,28 @@ { - "version": "0.2.0", - "configurations": [ - { - "name": "Next.js: debug server-side", - "type": "node-terminal", - "request": "launch", - "command": "npm run dev" - }, - { - "name": "Next.js: debug client-side", - "type": "chrome", - "request": "launch", - "url": "http://localhost:3000" - }, - { - "name": "Next.js: debug full stack", - "type": "node-terminal", - "request": "launch", - "command": "npm run dev", - "serverReadyAction": { - "pattern": "started server on .+, url: (https?://.+)", - "uriFormat": "%s", - "action": "debugWithChrome" - } - } - ] -} \ No newline at end of file + "version": "0.2.0", + "configurations": [ + { + "name": "Next.js: debug server-side", + "type": "node-terminal", + "request": "launch", + "command": "npm run dev" + }, + { + "name": "Next.js: debug client-side", + "type": "chrome", + "request": "launch", + "url": "http://localhost:3000" + }, + { + "name": "Next.js: debug full stack", + "type": "node-terminal", + "request": "launch", + "command": "npm run dev", + "serverReadyAction": { + "pattern": "started server on .+, url: (https?://.+)", + "uriFormat": "%s", + "action": "debugWithChrome" + } + } + ] +} diff --git a/ai-verify-portal/.vscode/settings.json b/ai-verify-portal/.vscode/settings.json deleted file mode 100644 index 22a182e69..000000000 --- a/ai-verify-portal/.vscode/settings.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "editor.insertSpaces": true, - "editor.tabSize": 2, - "editor.detectIndentation": true, - "editor.codeActionsOnSave": { - "source.removeUnusedImports": true - } -} \ No newline at end of file diff --git a/ai-verify-portal/README.md b/ai-verify-portal/README.md index 80a26e08e..8b02ea75e 100644 --- a/ai-verify-portal/README.md +++ b/ai-verify-portal/README.md @@ -3,6 +3,7 @@ ## Software Requirements ### Operation System Supported + - Linux ### Prerequisites @@ -14,21 +15,25 @@ ## Installing NodeJS 16.x ### Using Ubuntu + ```sh curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash - sudo apt-get install -y nodejs ``` ### Using Debian, as root + ```sh curl -fsSL https://deb.nodesource.com/setup_16.x | bash - apt-get install -y nodejs ``` + ## Installing and Setting up mongodb Install mongodb. See [MongoDB Installation Guides](https://www.mongodb.com/docs/manual/installation/) Create a new user + ```sh use aiverify db.createUser({user:"aiverify",pwd:"aiverify",roles:["readWrite"]}) @@ -52,24 +57,27 @@ notify-keyspace-events Kh ``` ## Customize the environment variable -To customize the default environment, you can create a **.env.local** file in the project root directory. This will override the **.env** defaults set. +To customize the default environment, you can create a **.env.local** file in the project root directory. This will override the **.env** defaults set. # Running the Application Make sure that mongodb and redis server are running before starting the application. ## Run NodeJS in development mode + ```sh npm run dev ``` ## Run NodeJS in production mode + ```sh npm run start ``` ## Run NodeJS unit tests + ```sh npm run test ``` diff --git a/ai-verify-portal/__mocks__/mockGlobals.ts b/ai-verify-portal/__mocks__/mockGlobals.ts index 64b69dbd9..46d10da9a 100644 --- a/ai-verify-portal/__mocks__/mockGlobals.ts +++ b/ai-verify-portal/__mocks__/mockGlobals.ts @@ -1,8 +1,8 @@ function silentConsoleLogs() { - jest.spyOn(console, 'log').mockImplementation(jest.fn()); - jest.spyOn(console, 'debug').mockImplementation(jest.fn()); - jest.spyOn(console, 'warn').mockImplementation(jest.fn()); - jest.spyOn(console, 'error').mockImplementation(jest.fn()); + jest.spyOn(console, 'log').mockImplementation(jest.fn()); + jest.spyOn(console, 'debug').mockImplementation(jest.fn()); + jest.spyOn(console, 'warn').mockImplementation(jest.fn()); + jest.spyOn(console, 'error').mockImplementation(jest.fn()); } -export { silentConsoleLogs } \ No newline at end of file +export { silentConsoleLogs }; diff --git a/ai-verify-portal/__mocks__/mockProviders.tsx b/ai-verify-portal/__mocks__/mockProviders.tsx index a285de9ac..c22e17f57 100644 --- a/ai-verify-portal/__mocks__/mockProviders.tsx +++ b/ai-verify-portal/__mocks__/mockProviders.tsx @@ -2,18 +2,19 @@ import { MockedProvider, MockedResponse } from '@apollo/client/testing'; import { NotificationsProvider } from 'src/modules/notifications/providers/notificationsContext'; import { PropsWithChildren } from 'react'; - type MockProvidersProps = { - apolloMocks?: MockedResponse, Record>[] | undefined -} + apolloMocks?: + | MockedResponse, Record>[] + | undefined; +}; function MockProviders(props: PropsWithChildren) { const { apolloMocks, children } = props; - return - - {children} - - + return ( + + {children} + + ); } -export { MockProviders } \ No newline at end of file +export { MockProviders }; diff --git a/ai-verify-portal/__mocks__/plugins.ts b/ai-verify-portal/__mocks__/plugins.ts index b254c8172..69b71d46f 100644 --- a/ai-verify-portal/__mocks__/plugins.ts +++ b/ai-verify-portal/__mocks__/plugins.ts @@ -1,2295 +1,2188 @@ -import PluginManagerType from "src/types/pluginManager.interface"; -import { ApiResult } from "../src/modules/plugins/api/plugins"; -import AIFPlugin from "src/types/plugin.interface"; -import { DependencyStatusResult } from "src/modules/plugins/api/algorithms"; +import PluginManagerType from 'src/types/pluginManager.interface'; +import { ApiResult } from '../src/modules/plugins/api/plugins'; +import AIFPlugin from 'src/types/plugin.interface'; +import { DependencyStatusResult } from 'src/modules/plugins/api/algorithms'; -const pluginsListResponse: ApiResult = { +const pluginsListResponse: ApiResult = { status: 200, data: { - "plugins": [ + plugins: [ { - "gid": "aiverify.stock.algorithms.partial_dependence_plot", - "version": "0.1.0", - "name": "Partial Dependence Plot", - "author": "Test User", - "description": "Partial dependence plot (PDP) depicts the relationship between a small number of input variable and target. They show how predictions partially depend on values of the input variables of interests.", - "algorithms": [ + gid: 'aiverify.stock.algorithms.partial_dependence_plot', + version: '0.1.0', + name: 'Partial Dependence Plot', + author: 'Test User', + description: + 'Partial dependence plot (PDP) depicts the relationship between a small number of input variable and target. They show how predictions partially depend on values of the input variables of interests.', + algorithms: [ { - "cid": "partial_dependence_plot", - "name": "Partial Dependence Plot", - "modelType": [ - "classification", - "regression" + cid: 'partial_dependence_plot', + name: 'Partial Dependence Plot', + modelType: ['classification', 'regression'], + version: '0.1.0', + author: 'Test User', + description: + 'Partial dependence plot (PDP) depicts the relationship between a small number of input variable and target. They show how predictions partially depend on values of the input variables of interests.', + tags: ['Partial Dependence Plot', 'classification', 'regression'], + requireGroundTruth: false, + type: 'Algorithm', + gid: 'aiverify.stock.algorithms.partial_dependence_plot:partial_dependence_plot', + pluginGID: 'aiverify.stock.algorithms.partial_dependence_plot', + algoPath: + '/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.partial_dependence_plot/algorithms/partial_dependence_plot', + requirements: [ + 'numpy==1.24.1 ; python_version >= "3.10" and python_version < "3.12"', + 'scipy==1.10.0 ; python_version >= "3.10" and python_version < "3.12"', ], - "version": "0.1.0", - "author": "Test User", - "description": "Partial dependence plot (PDP) depicts the relationship between a small number of input variable and target. They show how predictions partially depend on values of the input variables of interests.", - "tags": [ - "Partial Dependence Plot", - "classification", - "regression" - ], - "requireGroundTruth": false, - "type": "Algorithm", - "gid": "aiverify.stock.algorithms.partial_dependence_plot:partial_dependence_plot", - "pluginGID": "aiverify.stock.algorithms.partial_dependence_plot", - "algoPath": "/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.partial_dependence_plot/algorithms/partial_dependence_plot", - "requirements": [ - "numpy==1.24.1 ; python_version >= \"3.10\" and python_version < \"3.12\"", - "scipy==1.10.0 ; python_version >= \"3.10\" and python_version < \"3.12\"" - ], - "inputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/partial_dependence_plot/input.schema.json", - "title": "Algorithm Plugin Input Arguments", - "description": "A schema for algorithm plugin input arguments", - "type": "object", - "required": [ - "target_feature_name", - "percentiles", - "grid_resolution" + inputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/partial_dependence_plot/input.schema.json', + title: 'Algorithm Plugin Input Arguments', + description: 'A schema for algorithm plugin input arguments', + type: 'object', + required: [ + 'target_feature_name', + 'percentiles', + 'grid_resolution', ], - "properties": { - "target_feature_name": { - "title": "Target Feature Name", - "description": "Target Feature Name (e.g. Interest_Rate)", - "type": "string" + properties: { + target_feature_name: { + title: 'Target Feature Name', + description: 'Target Feature Name (e.g. Interest_Rate)', + type: 'string', + }, + percentiles: { + title: 'Cut-off percentiles', + description: 'Cut-off percentiles (e.g. [0.01, 0.99])', + type: 'array', + minItems: 2, + maxItems: 2, + items: { + type: 'number', + }, }, - "percentiles": { - "title": "Cut-off percentiles", - "description": "Cut-off percentiles (e.g. [0.01, 0.99])", - "type": "array", - "minItems": 2, - "maxItems": 2, - "items": { - "type": "number" - } + grid_resolution: { + title: 'Grid Resolution', + description: 'Grid Resolution (e.g. 25)', + type: 'number', }, - "grid_resolution": { - "title": "Grid Resolution", - "description": "Grid Resolution (e.g. 25)", - "type": "number" - } - } + }, }, - "outputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/partial_dependence_plot/output.schema.json", - "title": "Algorithm Plugin Output Arguments", - "description": "A schema for algorithm plugin output arguments", - "type": "object", - "required": [ - "feature_names", - "output_classes", - "results" - ], - "minProperties": 1, - "properties": { - "feature_names": { - "type": "array", - "description": "Array of feature names", - "minItems": 1, - "items": { - "type": "string" - } + outputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/partial_dependence_plot/output.schema.json', + title: 'Algorithm Plugin Output Arguments', + description: 'A schema for algorithm plugin output arguments', + type: 'object', + required: ['feature_names', 'output_classes', 'results'], + minProperties: 1, + properties: { + feature_names: { + type: 'array', + description: 'Array of feature names', + minItems: 1, + items: { + type: 'string', + }, }, - "output_classes": { - "type": "array", - "description": "Array of output classes", - "minItems": 1, - "items": { - "type": "string" - } + output_classes: { + type: 'array', + description: 'Array of output classes', + minItems: 1, + items: { + type: 'string', + }, + }, + results: { + description: 'Matrix of feature values (# feature names)', + type: 'array', + minItems: 1, + items: { + description: 'Matrix of PDP values (# output classes)', + type: 'array', + minItems: 1, + items: { + type: 'array', + description: 'Array of values for each PDP', + minItems: 1, + items: { + type: 'number', + }, + }, + }, }, - "results": { - "description": "Matrix of feature values (# feature names)", - "type": "array", - "minItems": 1, - "items": { - "description": "Matrix of PDP values (# output classes)", - "type": "array", - "minItems": 1, - "items": { - "type": "array", - "description": "Array of values for each PDP", - "minItems": 1, - "items": { - "type": "number" - } - } - } - } - } - } - } + }, + }, + }, ], - "isStock": false, - "installedAt": 1678686862514 + isStock: false, + installedAt: 1678686862514, }, { - "gid": "aiverify.stock.decorators", - "name": "AI Verify Stock Decorators", - "version": "1.0.0", - "reportWidgets": [ + gid: 'aiverify.stock.decorators', + name: 'AI Verify Stock Decorators', + version: '1.0.0', + reportWidgets: [ { - "cid": "divider", - "name": "Divider", - "tags": [ - "stock", - "decorator" - ], - "properties": [], - "widgetSize": { - "minW": 1, - "minH": 1, - "maxW": 12, - "maxH": 1 + cid: 'divider', + name: 'Divider', + tags: ['stock', 'decorator'], + properties: [], + widgetSize: { + minW: 1, + minH: 1, + maxW: 12, + maxH: 1, }, - "dependencies": [], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.decorators:divider", - "version": "1.0.0", - "pluginGID": "aiverify.stock.decorators", - "mdxPath": "aiverify.stock.decorators/widgets/divider.mdx", - "status": "OK" + dependencies: [], + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.decorators:divider', + version: '1.0.0', + pluginGID: 'aiverify.stock.decorators', + mdxPath: 'aiverify.stock.decorators/widgets/divider.mdx', + status: 'OK', }, { - "cid": "header1", - "name": "Header 1", - "tags": [ - "stock", - "decorator", - "header" - ], - "properties": [ + cid: 'header1', + name: 'Header 1', + tags: ['stock', 'decorator', 'header'], + properties: [ { - "key": "title", - "helper": "Enter the header title", - "default": "" + key: 'title', + helper: 'Enter the header title', + default: '', }, { - "key": "text", - "helper": "Enter the text for paragraph below header", - "default": "" - } + key: 'text', + helper: 'Enter the text for paragraph below header', + default: '', + }, ], - "widgetSize": { - "minW": 1, - "minH": 2, - "maxW": 12, - "maxH": 36 + widgetSize: { + minW: 1, + minH: 2, + maxW: 12, + maxH: 36, }, - "dependencies": [], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.decorators:header1", - "version": "1.0.0", - "pluginGID": "aiverify.stock.decorators", - "mdxPath": "aiverify.stock.decorators/widgets/header1.mdx", - "status": "OK" + dependencies: [], + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.decorators:header1', + version: '1.0.0', + pluginGID: 'aiverify.stock.decorators', + mdxPath: 'aiverify.stock.decorators/widgets/header1.mdx', + status: 'OK', }, { - "cid": "header2", - "name": "Header 2", - "tags": [ - "stock", - "decorator", - "header" - ], - "properties": [ + cid: 'header2', + name: 'Header 2', + tags: ['stock', 'decorator', 'header'], + properties: [ { - "key": "title", - "helper": "Enter the header title", - "default": "" + key: 'title', + helper: 'Enter the header title', + default: '', }, { - "key": "text", - "helper": "Enter the text for paragraph below header", - "default": "" - } + key: 'text', + helper: 'Enter the text for paragraph below header', + default: '', + }, ], - "widgetSize": { - "minW": 1, - "minH": 1, - "maxW": 12, - "maxH": 36 + widgetSize: { + minW: 1, + minH: 1, + maxW: 12, + maxH: 36, }, - "dependencies": [], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.decorators:header2", - "version": "1.0.0", - "pluginGID": "aiverify.stock.decorators", - "mdxPath": "aiverify.stock.decorators/widgets/header2.mdx", - "status": "OK" + dependencies: [], + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.decorators:header2', + version: '1.0.0', + pluginGID: 'aiverify.stock.decorators', + mdxPath: 'aiverify.stock.decorators/widgets/header2.mdx', + status: 'OK', }, { - "cid": "header3", - "name": "Header 3", - "tags": [ - "stock", - "decorator", - "header" - ], - "properties": [ + cid: 'header3', + name: 'Header 3', + tags: ['stock', 'decorator', 'header'], + properties: [ { - "key": "title", - "helper": "Enter the header title", - "default": "" + key: 'title', + helper: 'Enter the header title', + default: '', }, { - "key": "text", - "helper": "Enter the text for paragraph below header", - "default": "" - } + key: 'text', + helper: 'Enter the text for paragraph below header', + default: '', + }, ], - "widgetSize": { - "minW": 1, - "minH": 1, - "maxW": 12, - "maxH": 36 + widgetSize: { + minW: 1, + minH: 1, + maxW: 12, + maxH: 36, }, - "dependencies": [], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.decorators:header3", - "version": "1.0.0", - "pluginGID": "aiverify.stock.decorators", - "mdxPath": "aiverify.stock.decorators/widgets/header3.mdx", - "status": "OK" + dependencies: [], + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.decorators:header3', + version: '1.0.0', + pluginGID: 'aiverify.stock.decorators', + mdxPath: 'aiverify.stock.decorators/widgets/header3.mdx', + status: 'OK', }, { - "cid": "header4", - "name": "Header 4", - "tags": [ - "stock", - "decorator", - "header" - ], - "properties": [ + cid: 'header4', + name: 'Header 4', + tags: ['stock', 'decorator', 'header'], + properties: [ { - "key": "title", - "helper": "Enter the header title", - "default": "" + key: 'title', + helper: 'Enter the header title', + default: '', }, { - "key": "text", - "helper": "Enter the text for paragraph below header", - "default": "" - } + key: 'text', + helper: 'Enter the text for paragraph below header', + default: '', + }, ], - "widgetSize": { - "minW": 1, - "minH": 1, - "maxW": 12, - "maxH": 36 + widgetSize: { + minW: 1, + minH: 1, + maxW: 12, + maxH: 36, }, - "dependencies": [], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.decorators:header4", - "version": "1.0.0", - "pluginGID": "aiverify.stock.decorators", - "mdxPath": "aiverify.stock.decorators/widgets/header4.mdx", - "status": "OK" + dependencies: [], + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.decorators:header4', + version: '1.0.0', + pluginGID: 'aiverify.stock.decorators', + mdxPath: 'aiverify.stock.decorators/widgets/header4.mdx', + status: 'OK', }, { - "cid": "header5", - "name": "Header 5", - "tags": [ - "stock", - "decorator", - "header" - ], - "properties": [ + cid: 'header5', + name: 'Header 5', + tags: ['stock', 'decorator', 'header'], + properties: [ { - "key": "title", - "helper": "Enter the header title", - "default": "" + key: 'title', + helper: 'Enter the header title', + default: '', }, { - "key": "text", - "helper": "Enter the text for paragraph below header", - "default": "" - } + key: 'text', + helper: 'Enter the text for paragraph below header', + default: '', + }, ], - "widgetSize": { - "minW": 1, - "minH": 1, - "maxW": 12, - "maxH": 36 + widgetSize: { + minW: 1, + minH: 1, + maxW: 12, + maxH: 36, }, - "dependencies": [], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.decorators:header5", - "version": "1.0.0", - "pluginGID": "aiverify.stock.decorators", - "mdxPath": "aiverify.stock.decorators/widgets/header5.mdx", - "status": "OK" + dependencies: [], + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.decorators:header5', + version: '1.0.0', + pluginGID: 'aiverify.stock.decorators', + mdxPath: 'aiverify.stock.decorators/widgets/header5.mdx', + status: 'OK', }, { - "cid": "header6", - "name": "Header 6", - "tags": [ - "stock", - "decorator", - "header" - ], - "properties": [ + cid: 'header6', + name: 'Header 6', + tags: ['stock', 'decorator', 'header'], + properties: [ { - "key": "title", - "helper": "Enter the header title", - "default": "" + key: 'title', + helper: 'Enter the header title', + default: '', }, { - "key": "text", - "helper": "Enter the text for paragraph below header", - "default": "" - } + key: 'text', + helper: 'Enter the text for paragraph below header', + default: '', + }, ], - "widgetSize": { - "minW": 1, - "minH": 1, - "maxW": 12, - "maxH": 36 + widgetSize: { + minW: 1, + minH: 1, + maxW: 12, + maxH: 36, }, - "dependencies": [], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.decorators:header6", - "version": "1.0.0", - "pluginGID": "aiverify.stock.decorators", - "mdxPath": "aiverify.stock.decorators/widgets/header6.mdx", - "status": "OK" - } + dependencies: [], + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.decorators:header6', + version: '1.0.0', + pluginGID: 'aiverify.stock.decorators', + mdxPath: 'aiverify.stock.decorators/widgets/header6.mdx', + status: 'OK', + }, ], - "isStock": true, - "installedAt": 1679903556691 + isStock: true, + installedAt: 1679903556691, }, { - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets", - "name": "Widgets for Fairness Metrics Toolbox", - "version": "0.1.0", - "author": "Leong Peck Yoke", - "description": "Stock widgets to display results from Fairness Metrics Toolbox (FMT) algoritm", - "reportWidgets": [ + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets', + name: 'Widgets for Fairness Metrics Toolbox', + version: '0.1.0', + author: 'Leong Peck Yoke', + description: + 'Stock widgets to display results from Fairness Metrics Toolbox (FMT) algoritm', + reportWidgets: [ { - "cid": "false-discovery-rate-chart", - "name": "Chart for False Discovery Rate metric", - "description": "Show bar chart for False Discovery Rate metric", - "tags": [ - "stock", - "chart", - "fairness" - ], - "properties": [], - "widgetSize": { - "minW": 12, - "minH": 12, - "maxW": 12, - "maxH": 36 + cid: 'false-discovery-rate-chart', + name: 'Chart for False Discovery Rate metric', + description: 'Show bar chart for False Discovery Rate metric', + tags: ['stock', 'chart', 'fairness'], + properties: [], + widgetSize: { + minW: 12, + minH: 12, + maxW: 12, + maxH: 36, }, - "dependencies": [ + dependencies: [ { - "gid": "aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox", - "version": "0.1.0", - "valid": true + gid: 'aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox', + version: '0.1.0', + valid: true, }, { - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree", - "valid": true - } + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree', + valid: true, + }, ], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:false-discovery-rate-chart", - "version": "0.1.0", - "pluginGID": "aiverify.stock.fairness-metrics-toolbox-widgets", - "mdxPath": "aiverify.stock.fairness-metrics-toolbox-widgets/widgets/false-discovery-rate-chart.mdx", - "status": "OK" + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:false-discovery-rate-chart', + version: '0.1.0', + pluginGID: 'aiverify.stock.fairness-metrics-toolbox-widgets', + mdxPath: + 'aiverify.stock.fairness-metrics-toolbox-widgets/widgets/false-discovery-rate-chart.mdx', + status: 'OK', }, { - "cid": "false-negative-rate-chart", - "name": "Chart for False Negative Rate metric", - "description": "Show bar chart for False Negative Rate metric", - "tags": [ - "stock", - "chart", - "fairness" - ], - "properties": [], - "widgetSize": { - "minW": 12, - "minH": 12, - "maxW": 12, - "maxH": 36 + cid: 'false-negative-rate-chart', + name: 'Chart for False Negative Rate metric', + description: 'Show bar chart for False Negative Rate metric', + tags: ['stock', 'chart', 'fairness'], + properties: [], + widgetSize: { + minW: 12, + minH: 12, + maxW: 12, + maxH: 36, }, - "dependencies": [ + dependencies: [ { - "gid": "aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox", - "version": "0.1.0", - "valid": true + gid: 'aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox', + version: '0.1.0', + valid: true, }, { - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree", - "valid": true - } + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree', + valid: true, + }, ], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:false-negative-rate-chart", - "version": "0.1.0", - "pluginGID": "aiverify.stock.fairness-metrics-toolbox-widgets", - "mdxPath": "aiverify.stock.fairness-metrics-toolbox-widgets/widgets/false-negative-rate-chart.mdx", - "status": "OK" + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:false-negative-rate-chart', + version: '0.1.0', + pluginGID: 'aiverify.stock.fairness-metrics-toolbox-widgets', + mdxPath: + 'aiverify.stock.fairness-metrics-toolbox-widgets/widgets/false-negative-rate-chart.mdx', + status: 'OK', }, { - "cid": "false-omission-rate-chart", - "name": "Chart for False Omission Rate metric", - "description": "Show bar chart for False Omission Rate metric", - "tags": [ - "stock", - "chart", - "fairness" - ], - "properties": [], - "widgetSize": { - "minW": 12, - "minH": 12, - "maxW": 12, - "maxH": 36 + cid: 'false-omission-rate-chart', + name: 'Chart for False Omission Rate metric', + description: 'Show bar chart for False Omission Rate metric', + tags: ['stock', 'chart', 'fairness'], + properties: [], + widgetSize: { + minW: 12, + minH: 12, + maxW: 12, + maxH: 36, }, - "dependencies": [ + dependencies: [ { - "gid": "aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox", - "version": "0.1.0", - "valid": true + gid: 'aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox', + version: '0.1.0', + valid: true, }, { - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree", - "valid": true - } + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree', + valid: true, + }, ], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:false-omission-rate-chart", - "version": "0.1.0", - "pluginGID": "aiverify.stock.fairness-metrics-toolbox-widgets", - "mdxPath": "aiverify.stock.fairness-metrics-toolbox-widgets/widgets/false-omission-rate-chart.mdx", - "status": "OK" + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:false-omission-rate-chart', + version: '0.1.0', + pluginGID: 'aiverify.stock.fairness-metrics-toolbox-widgets', + mdxPath: + 'aiverify.stock.fairness-metrics-toolbox-widgets/widgets/false-omission-rate-chart.mdx', + status: 'OK', }, { - "cid": "false-positive-rate-chart", - "name": "Chart for False Positive Rate metric", - "description": "Show bar chart for False Positive Rate metric", - "tags": [ - "stock", - "chart", - "fairness" - ], - "properties": [], - "widgetSize": { - "minW": 12, - "minH": 12, - "maxW": 12, - "maxH": 36 + cid: 'false-positive-rate-chart', + name: 'Chart for False Positive Rate metric', + description: 'Show bar chart for False Positive Rate metric', + tags: ['stock', 'chart', 'fairness'], + properties: [], + widgetSize: { + minW: 12, + minH: 12, + maxW: 12, + maxH: 36, }, - "dependencies": [ + dependencies: [ { - "gid": "aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox", - "version": "0.1.0", - "valid": true + gid: 'aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox', + version: '0.1.0', + valid: true, }, { - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree", - "valid": true - } + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree', + valid: true, + }, ], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:false-positive-rate-chart", - "version": "0.1.0", - "pluginGID": "aiverify.stock.fairness-metrics-toolbox-widgets", - "mdxPath": "aiverify.stock.fairness-metrics-toolbox-widgets/widgets/false-positive-rate-chart.mdx", - "status": "OK" + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:false-positive-rate-chart', + version: '0.1.0', + pluginGID: 'aiverify.stock.fairness-metrics-toolbox-widgets', + mdxPath: + 'aiverify.stock.fairness-metrics-toolbox-widgets/widgets/false-positive-rate-chart.mdx', + status: 'OK', }, { - "cid": "introduction", - "name": "Introduction to the FMT Algorithm", - "description": "Widget to provide introduction for the FMT", - "tags": [], - "properties": [], - "widgetSize": { - "minW": 12, - "minH": 9, - "maxW": 12, - "maxH": 10 + cid: 'introduction', + name: 'Introduction to the FMT Algorithm', + description: 'Widget to provide introduction for the FMT', + tags: [], + properties: [], + widgetSize: { + minW: 12, + minH: 9, + maxW: 12, + maxH: 10, }, - "dependencies": [], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:introduction", - "version": "0.1.0", - "pluginGID": "aiverify.stock.fairness-metrics-toolbox-widgets", - "mdxPath": "aiverify.stock.fairness-metrics-toolbox-widgets/widgets/introduction.mdx", - "status": "OK" + dependencies: [], + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:introduction', + version: '0.1.0', + pluginGID: 'aiverify.stock.fairness-metrics-toolbox-widgets', + mdxPath: + 'aiverify.stock.fairness-metrics-toolbox-widgets/widgets/introduction.mdx', + status: 'OK', }, { - "cid": "negative-predictive-value-parity-chart", - "name": "Chart for Negative Predictive Value Parity metric", - "description": "Show bar chart for Negative Predictive Value Parity metric", - "tags": [ - "stock", - "chart", - "fairness" - ], - "properties": [], - "widgetSize": { - "minW": 12, - "minH": 12, - "maxW": 12, - "maxH": 36 + cid: 'negative-predictive-value-parity-chart', + name: 'Chart for Negative Predictive Value Parity metric', + description: + 'Show bar chart for Negative Predictive Value Parity metric', + tags: ['stock', 'chart', 'fairness'], + properties: [], + widgetSize: { + minW: 12, + minH: 12, + maxW: 12, + maxH: 36, }, - "dependencies": [ + dependencies: [ { - "gid": "aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox", - "version": "0.1.0", - "valid": true + gid: 'aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox', + version: '0.1.0', + valid: true, }, { - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree", - "valid": true - } + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree', + valid: true, + }, ], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:negative-predictive-value-parity-chart", - "version": "0.1.0", - "pluginGID": "aiverify.stock.fairness-metrics-toolbox-widgets", - "mdxPath": "aiverify.stock.fairness-metrics-toolbox-widgets/widgets/negative-predictive-value-parity-chart.mdx", - "status": "OK" + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:negative-predictive-value-parity-chart', + version: '0.1.0', + pluginGID: 'aiverify.stock.fairness-metrics-toolbox-widgets', + mdxPath: + 'aiverify.stock.fairness-metrics-toolbox-widgets/widgets/negative-predictive-value-parity-chart.mdx', + status: 'OK', }, { - "cid": "positive-predictive-value-parity-chart", - "name": "Chart for Positive Predictive Value Parity metric", - "description": "Show bar chart for Positive Predictive Value Parity metric", - "tags": [ - "stock", - "chart", - "fairness" - ], - "properties": [], - "widgetSize": { - "minW": 12, - "minH": 12, - "maxW": 12, - "maxH": 36 + cid: 'positive-predictive-value-parity-chart', + name: 'Chart for Positive Predictive Value Parity metric', + description: + 'Show bar chart for Positive Predictive Value Parity metric', + tags: ['stock', 'chart', 'fairness'], + properties: [], + widgetSize: { + minW: 12, + minH: 12, + maxW: 12, + maxH: 36, }, - "dependencies": [ + dependencies: [ { - "gid": "aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox", - "version": "0.1.0", - "valid": true + gid: 'aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox', + version: '0.1.0', + valid: true, }, { - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree", - "valid": true - } + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree', + valid: true, + }, ], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:positive-predictive-value-parity-chart", - "version": "0.1.0", - "pluginGID": "aiverify.stock.fairness-metrics-toolbox-widgets", - "mdxPath": "aiverify.stock.fairness-metrics-toolbox-widgets/widgets/positive-predictive-value-parity-chart.mdx", - "status": "OK" + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:positive-predictive-value-parity-chart', + version: '0.1.0', + pluginGID: 'aiverify.stock.fairness-metrics-toolbox-widgets', + mdxPath: + 'aiverify.stock.fairness-metrics-toolbox-widgets/widgets/positive-predictive-value-parity-chart.mdx', + status: 'OK', }, { - "cid": "true-negative-rate-chart", - "name": "Chart for True Negative Rate metric", - "description": "Show bar chart for True Negative Rate metric", - "tags": [ - "stock", - "chart", - "fairness" - ], - "properties": [], - "widgetSize": { - "minW": 12, - "minH": 12, - "maxW": 12, - "maxH": 36 + cid: 'true-negative-rate-chart', + name: 'Chart for True Negative Rate metric', + description: 'Show bar chart for True Negative Rate metric', + tags: ['stock', 'chart', 'fairness'], + properties: [], + widgetSize: { + minW: 12, + minH: 12, + maxW: 12, + maxH: 36, }, - "dependencies": [ + dependencies: [ { - "gid": "aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox", - "version": "0.1.0", - "valid": true + gid: 'aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox', + version: '0.1.0', + valid: true, }, { - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree", - "valid": true - } + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree', + valid: true, + }, ], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:true-negative-rate-chart", - "version": "0.1.0", - "pluginGID": "aiverify.stock.fairness-metrics-toolbox-widgets", - "mdxPath": "aiverify.stock.fairness-metrics-toolbox-widgets/widgets/true-negative-rate-chart.mdx", - "status": "OK" + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:true-negative-rate-chart', + version: '0.1.0', + pluginGID: 'aiverify.stock.fairness-metrics-toolbox-widgets', + mdxPath: + 'aiverify.stock.fairness-metrics-toolbox-widgets/widgets/true-negative-rate-chart.mdx', + status: 'OK', }, { - "cid": "true-positive-rate-chart", - "name": "Chart for True Positive Rate metric", - "description": "Show bar chart for True Positive Rate metric", - "tags": [ - "stock", - "chart", - "fairness" - ], - "properties": [], - "widgetSize": { - "minW": 12, - "minH": 12, - "maxW": 12, - "maxH": 36 + cid: 'true-positive-rate-chart', + name: 'Chart for True Positive Rate metric', + description: 'Show bar chart for True Positive Rate metric', + tags: ['stock', 'chart', 'fairness'], + properties: [], + widgetSize: { + minW: 12, + minH: 12, + maxW: 12, + maxH: 36, }, - "dependencies": [ + dependencies: [ { - "gid": "aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox", - "version": "0.1.0", - "valid": true + gid: 'aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox', + version: '0.1.0', + valid: true, }, { - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree", - "valid": true - } + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree', + valid: true, + }, ], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:true-positive-rate-chart", - "version": "0.1.0", - "pluginGID": "aiverify.stock.fairness-metrics-toolbox-widgets", - "mdxPath": "aiverify.stock.fairness-metrics-toolbox-widgets/widgets/true-positive-rate-chart.mdx", - "status": "OK" - } + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:true-positive-rate-chart', + version: '0.1.0', + pluginGID: 'aiverify.stock.fairness-metrics-toolbox-widgets', + mdxPath: + 'aiverify.stock.fairness-metrics-toolbox-widgets/widgets/true-positive-rate-chart.mdx', + status: 'OK', + }, ], - "inputBlocks": [ + inputBlocks: [ { - "cid": "fairness-tree", - "name": "Fairness Tree", - "description": "There are many fairness metrics, and it is impossible to fulfill all of them due to the Impossibility Theorem of Machine Fairness. Therefore, it is important to select and rank the relevant metrics to focus during the resolving of fairness issues (if any). The fairness tree guides the user to make this decision, and this section documents the decision-making process.", - "fullScreen": true, - "type": "InputBlock", - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree", - "version": "0.1.0", - "pluginGID": "aiverify.stock.fairness-metrics-toolbox-widgets", - "width": "md", - "mdxPath": "aiverify.stock.fairness-metrics-toolbox-widgets/inputs/fairness-tree.mdx" - } + cid: 'fairness-tree', + name: 'Fairness Tree', + description: + 'There are many fairness metrics, and it is impossible to fulfill all of them due to the Impossibility Theorem of Machine Fairness. Therefore, it is important to select and rank the relevant metrics to focus during the resolving of fairness issues (if any). The fairness tree guides the user to make this decision, and this section documents the decision-making process.', + fullScreen: true, + type: 'InputBlock', + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree', + version: '0.1.0', + pluginGID: 'aiverify.stock.fairness-metrics-toolbox-widgets', + width: 'md', + mdxPath: + 'aiverify.stock.fairness-metrics-toolbox-widgets/inputs/fairness-tree.mdx', + }, ], - "isStock": true, - "installedAt": 1679903994465 + isStock: true, + installedAt: 1679903994465, }, { - "gid": "aiverify.stock.process-checklist", - "name": "AI Verify Process Checklist", - "version": "0.1.0", - "author": "Leong Peck Yoke", - "description": "Process checklist for AI Verify framework", - "reportWidgets": [ + gid: 'aiverify.stock.process-checklist', + name: 'AI Verify Process Checklist', + version: '0.1.0', + author: 'Leong Peck Yoke', + description: 'Process checklist for AI Verify framework', + reportWidgets: [ { - "cid": "explainability-process-checklist-answers", - "name": "Explainability Process Checklist Answers", - "tags": [ - "stock", - "process-checklist" - ], - "properties": [ + cid: 'explainability-process-checklist-answers', + name: 'Explainability Process Checklist Answers', + tags: ['stock', 'process-checklist'], + properties: [ { - "key": "section", - "helper": "Enter the section to display", - "default": "Explainability" + key: 'section', + helper: 'Enter the section to display', + default: 'Explainability', }, { - "key": "startIndex", - "helper": "Enter the start index of the process check to display", - "default": "0" - } + key: 'startIndex', + helper: 'Enter the start index of the process check to display', + default: '0', + }, ], - "widgetSize": { - "minW": 12, - "minH": 3, - "maxW": 12, - "maxH": 36 + widgetSize: { + minW: 12, + minH: 3, + maxW: 12, + maxH: 36, }, - "dependencies": [ + dependencies: [ { - "gid": "aiverify.stock.process-checklist:explainability-process-checklist", - "valid": true - } + gid: 'aiverify.stock.process-checklist:explainability-process-checklist', + valid: true, + }, ], - "mockdata": [ + mockdata: [ { - "type": "InputBlock", - "gid": "aiverify.stock.process-checklist:explainability-process-checklist", - "datapath": "explainability-mockdata.json", - "data": { - "elaboration-explainability-2": "Documented as part of company's software development process", - "completed-explainability-2": "Yes" - } - } + type: 'InputBlock', + gid: 'aiverify.stock.process-checklist:explainability-process-checklist', + datapath: 'explainability-mockdata.json', + data: { + 'elaboration-explainability-2': + "Documented as part of company's software development process", + 'completed-explainability-2': 'Yes', + }, + }, ], - "type": "ReportWidget", - "gid": "aiverify.stock.process-checklist:explainability-process-checklist-answers", - "version": "0.1.0", - "pluginGID": "aiverify.stock.process-checklist", - "mdxPath": "aiverify.stock.process-checklist/widgets/explainability-process-checklist-answers.mdx", - "status": "OK" + type: 'ReportWidget', + gid: 'aiverify.stock.process-checklist:explainability-process-checklist-answers', + version: '0.1.0', + pluginGID: 'aiverify.stock.process-checklist', + mdxPath: + 'aiverify.stock.process-checklist/widgets/explainability-process-checklist-answers.mdx', + status: 'OK', }, { - "cid": "fairness-process-checklist-answers", - "name": "Fairness Process Checklist Answers", - "tags": [ - "stock", - "process-checklist" - ], - "properties": [ + cid: 'fairness-process-checklist-answers', + name: 'Fairness Process Checklist Answers', + tags: ['stock', 'process-checklist'], + properties: [ { - "key": "section", - "helper": "Enter the section to display", - "default": "Fairness" + key: 'section', + helper: 'Enter the section to display', + default: 'Fairness', }, { - "key": "startIndex", - "helper": "Enter the start index of the process check to display", - "default": "0" - } + key: 'startIndex', + helper: 'Enter the start index of the process check to display', + default: '0', + }, ], - "widgetSize": { - "minW": 12, - "minH": 3, - "maxW": 12, - "maxH": 36 + widgetSize: { + minW: 12, + minH: 3, + maxW: 12, + maxH: 36, }, - "dependencies": [ + dependencies: [ { - "gid": "aiverify.stock.process-checklist:fairness-process-checklist", - "valid": true - } + gid: 'aiverify.stock.process-checklist:fairness-process-checklist', + valid: true, + }, ], - "mockdata": [], - "type": "ReportWidget", - "gid": "aiverify.stock.process-checklist:fairness-process-checklist-answers", - "version": "0.1.0", - "pluginGID": "aiverify.stock.process-checklist", - "mdxPath": "aiverify.stock.process-checklist/widgets/fairness-process-checklist-answers.mdx", - "status": "OK" - } + mockdata: [], + type: 'ReportWidget', + gid: 'aiverify.stock.process-checklist:fairness-process-checklist-answers', + version: '0.1.0', + pluginGID: 'aiverify.stock.process-checklist', + mdxPath: + 'aiverify.stock.process-checklist/widgets/fairness-process-checklist-answers.mdx', + status: 'OK', + }, ], - "inputBlocks": [ + inputBlocks: [ { - "cid": "explainability-process-checklist", - "name": "Explainability Process Checklist", - "description": "Process checklist for Explainability principle", - "group": "AI Verify Process Checklists", - "width": "xl", - "type": "InputBlock", - "gid": "aiverify.stock.process-checklist:explainability-process-checklist", - "version": "0.1.0", - "pluginGID": "aiverify.stock.process-checklist", - "mdxPath": "aiverify.stock.process-checklist/inputs/explainability-process-checklist.mdx" + cid: 'explainability-process-checklist', + name: 'Explainability Process Checklist', + description: 'Process checklist for Explainability principle', + group: 'AI Verify Process Checklists', + width: 'xl', + type: 'InputBlock', + gid: 'aiverify.stock.process-checklist:explainability-process-checklist', + version: '0.1.0', + pluginGID: 'aiverify.stock.process-checklist', + mdxPath: + 'aiverify.stock.process-checklist/inputs/explainability-process-checklist.mdx', }, { - "cid": "fairness-process-checklist", - "name": "Fairness Process Checklist", - "description": "Process checklist for Fairness principle", - "group": "AI Verify Process Checklists", - "width": "xl", - "type": "InputBlock", - "gid": "aiverify.stock.process-checklist:fairness-process-checklist", - "version": "0.1.0", - "pluginGID": "aiverify.stock.process-checklist", - "mdxPath": "aiverify.stock.process-checklist/inputs/fairness-process-checklist.mdx" - } + cid: 'fairness-process-checklist', + name: 'Fairness Process Checklist', + description: 'Process checklist for Fairness principle', + group: 'AI Verify Process Checklists', + width: 'xl', + type: 'InputBlock', + gid: 'aiverify.stock.process-checklist:fairness-process-checklist', + version: '0.1.0', + pluginGID: 'aiverify.stock.process-checklist', + mdxPath: + 'aiverify.stock.process-checklist/inputs/fairness-process-checklist.mdx', + }, ], - "isStock": true, - "installedAt": 1679904178330 + isStock: true, + installedAt: 1679904178330, }, { - "gid": "aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification", - "version": "0.1.0", - "name": "fairness metrics toolbox for classification", - "author": "IMDA-T2E", - "description": "The Fairness Metrics Toolbox (FMT) for Classification contains a list of fairness metrics to measure how resources (e.g. opportunities, food, loan, medical help) are allocated among the demographic groups (e.g. married male, married female) given a set of sensitive feature(s) (e.g. gender, marital status). This plugin is developed for classification models.", - "algorithms": [ + gid: 'aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification', + version: '0.1.0', + name: 'fairness metrics toolbox for classification', + author: 'IMDA-T2E', + description: + 'The Fairness Metrics Toolbox (FMT) for Classification contains a list of fairness metrics to measure how resources (e.g. opportunities, food, loan, medical help) are allocated among the demographic groups (e.g. married male, married female) given a set of sensitive feature(s) (e.g. gender, marital status). This plugin is developed for classification models.', + algorithms: [ { - "cid": "fairness_metrics_toolbox_for_classification", - "name": "fairness metrics toolbox for classification", - "modelType": [ - "classification" + cid: 'fairness_metrics_toolbox_for_classification', + name: 'fairness metrics toolbox for classification', + modelType: ['classification'], + version: '0.1.0', + author: 'IMDA-T2E', + description: + 'The Fairness Metrics Toolbox (FMT) for Classification contains a list of fairness metrics to measure how resources (e.g. opportunities, food, loan, medical help) are allocated among the demographic groups (e.g. married male, married female) given a set of sensitive feature(s) (e.g. gender, marital status). This plugin is developed for classification models.', + tags: [ + 'fairness metrics toolbox for classification', + 'classification', ], - "version": "0.1.0", - "author": "IMDA-T2E", - "description": "The Fairness Metrics Toolbox (FMT) for Classification contains a list of fairness metrics to measure how resources (e.g. opportunities, food, loan, medical help) are allocated among the demographic groups (e.g. married male, married female) given a set of sensitive feature(s) (e.g. gender, marital status). This plugin is developed for classification models.", - "tags": [ - "fairness metrics toolbox for classification", - "classification" + requireGroundTruth: true, + requiredFiles: [ + 'AUTHORS.rst', + 'CHANGELOG.md', + 'input.schema.json', + 'LICENSE', + 'output.schema.json', + 'fairness_metrics_toolbox_for_classification.meta.json', + 'fairness_metrics_toolbox_for_classification.py', + 'README.md', + 'requirements.txt', + 'syntax_checker.py', ], - "requireGroundTruth": true, - "requiredFiles": [ - "AUTHORS.rst", - "CHANGELOG.md", - "input.schema.json", - "LICENSE", - "output.schema.json", - "fairness_metrics_toolbox_for_classification.meta.json", - "fairness_metrics_toolbox_for_classification.py", - "README.md", - "requirements.txt", - "syntax_checker.py" + type: 'Algorithm', + gid: 'aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification:fairness_metrics_toolbox_for_classification', + pluginGID: + 'aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification', + algoPath: + '/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification/algorithms/fairness_metrics_toolbox_for_classification', + requirements: [ + 'joblib==1.2.0 ; python_version >= "3.10" and python_version < "4.0"', + 'numpy==1.23.5 ; python_version >= "3.10" and python_version < "4.0"', + 'scikit-learn==1.2.2 ; python_version >= "3.10" and python_version < "4.0"', + 'scipy==1.9.3 ; python_version >= "3.10" and python_version < "4.0"', + 'threadpoolctl==3.1.0 ; python_version >= "3.10" and python_version < "4.0"', ], - "type": "Algorithm", - "gid": "aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification:fairness_metrics_toolbox_for_classification", - "pluginGID": "aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification", - "algoPath": "/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification/algorithms/fairness_metrics_toolbox_for_classification", - "requirements": [ - "joblib==1.2.0 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "numpy==1.23.5 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "scikit-learn==1.2.2 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "scipy==1.9.3 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "threadpoolctl==3.1.0 ; python_version >= \"3.10\" and python_version < \"4.0\"" - ], - "inputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/input.schema.json", - "title": "Algorithm Plugin Input Arguments", - "description": "A schema for algorithm plugin input arguments", - "type": "object", - "required": [ - "sensitive_feature" - ], - "properties": { - "sensitive_feature": { - "title": "Sensitive Feature Names", - "description": "Array of Sensitive Feature Names (e.g. Gender)", - "type": "array", - "items": { - "type": "string" - } - } - } + inputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/input.schema.json', + title: 'Algorithm Plugin Input Arguments', + description: 'A schema for algorithm plugin input arguments', + type: 'object', + required: ['sensitive_feature'], + properties: { + sensitive_feature: { + title: 'Sensitive Feature Names', + description: 'Array of Sensitive Feature Names (e.g. Gender)', + type: 'array', + items: { + type: 'string', + }, + }, + }, }, - "outputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/output.schema.json", - "title": "Algorithm Plugin Output Arguments", - "description": "A schema for algorithm plugin output arguments", - "type": "object", - "required": [ - "sensitive_feature", - "output_classes", - "results" - ], - "properties": { - "sensitive_feature": { - "description": "Array of sensitive feature names", - "type": "array", - "minItems": 1, - "items": { - "type": "string" - } + outputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/output.schema.json', + title: 'Algorithm Plugin Output Arguments', + description: 'A schema for algorithm plugin output arguments', + type: 'object', + required: ['sensitive_feature', 'output_classes', 'results'], + properties: { + sensitive_feature: { + description: 'Array of sensitive feature names', + type: 'array', + minItems: 1, + items: { + type: 'string', + }, }, - "output_classes": { - "description": "Array of output classes", - "type": "array", - "minItems": 1, - "items": { - "type": [ - "string", - "number", - "integer", - "boolean" - ] - } + output_classes: { + description: 'Array of output classes', + type: 'array', + minItems: 1, + items: { + type: ['string', 'number', 'integer', 'boolean'], + }, }, - "results": { - "description": "Array of metrics by output classes (# output classes)", - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "description": "Dictionary of metric values by group", - "required": [ - "True Positive Rate", - "True Negative Rate", - "Positive Predictive Value Parity", - "Negative Predictive Value Parity", - "False Positive Rate", - "False Negative Rate", - "False Discovery Rate", - "False Omission Rate" + results: { + description: + 'Array of metrics by output classes (# output classes)', + type: 'array', + minItems: 1, + items: { + type: 'object', + description: 'Dictionary of metric values by group', + required: [ + 'True Positive Rate', + 'True Negative Rate', + 'Positive Predictive Value Parity', + 'Negative Predictive Value Parity', + 'False Positive Rate', + 'False Negative Rate', + 'False Discovery Rate', + 'False Omission Rate', ], - "properties": { - "True Positive Rate": { - "$ref": "#/$defs/metric" + properties: { + 'True Positive Rate': { + $ref: '#/$defs/metric', + }, + 'True Negative Rate': { + $ref: '#/$defs/metric', }, - "True Negative Rate": { - "$ref": "#/$defs/metric" + 'Positive Predictive Value Parity': { + $ref: '#/$defs/metric', }, - "Positive Predictive Value Parity": { - "$ref": "#/$defs/metric" + 'Negative Predictive Value Parity': { + $ref: '#/$defs/metric', }, - "Negative Predictive Value Parity": { - "$ref": "#/$defs/metric" + 'False Positive Rate': { + $ref: '#/$defs/metric', }, - "False Positive Rate": { - "$ref": "#/$defs/metric" + 'False Negative Rate': { + $ref: '#/$defs/metric', }, - "False Negative Rate": { - "$ref": "#/$defs/metric" + 'False Discovery Rate': { + $ref: '#/$defs/metric', }, - "False Discovery Rate": { - "$ref": "#/$defs/metric" + 'False Omission Rate': { + $ref: '#/$defs/metric', }, - "False Omission Rate": { - "$ref": "#/$defs/metric" - } - } - } - } + }, + }, + }, }, - "$defs": { - "metric": { - "description": "Array of metric values for each group, e.g. [{group:[1,2], metric:0.122},...]", - "type": "array", - "items": { - "type": "object", - "required": [ - "group", - "metric" - ], - "properties": { - "group": { - "type": "array", - "description": "Array of group values, one value for each feature, .e.g group: [1,4,7]" + $defs: { + metric: { + description: + 'Array of metric values for each group, e.g. [{group:[1,2], metric:0.122},...]', + type: 'array', + items: { + type: 'object', + required: ['group', 'metric'], + properties: { + group: { + type: 'array', + description: + 'Array of group values, one value for each feature, .e.g group: [1,4,7]', }, - "metric": { - "type": "number" - } - } + metric: { + type: 'number', + }, + }, }, - "minItems": 2 - } - } - } - } + minItems: 2, + }, + }, + }, + }, ], - "isStock": true, - "installedAt": 1681953154295 + isStock: true, + installedAt: 1681953154295, }, { - "gid": "aiverify.stock.shap-toolbox-widgets", - "version": "1.0.0", - "name": "Widgets for SHAP toolbox", - "reportWidgets": [ + gid: 'aiverify.stock.shap-toolbox-widgets', + version: '1.0.0', + name: 'Widgets for SHAP toolbox', + reportWidgets: [ { - "cid": "global-explainability-chart", - "widgetSize": { - "minW": 12, - "minH": 16, - "maxW": 12, - "maxH": 36 + cid: 'global-explainability-chart', + widgetSize: { + minW: 12, + minH: 16, + maxW: 12, + maxH: 36, }, - "name": "Global Explainability - Overall Feature Importance (Bar Graph)", - "description": "Displays the overall feature importance in a barchart and summarized the results", - "dependencies": [ + name: 'Global Explainability - Overall Feature Importance (Bar Graph)', + description: + 'Displays the overall feature importance in a barchart and summarized the results', + dependencies: [ { - "gid": "aiverify.stock.algorithms.shap_toolbox:shap_toolbox", - "valid": true - } + gid: 'aiverify.stock.algorithms.shap_toolbox:shap_toolbox', + valid: true, + }, ], - "mockdata": [], - "properties": [ + mockdata: [], + properties: [ { - "key": "topNFeatures", - "helper": "Show the top N features in the chart", - "default": "20" - } + key: 'topNFeatures', + helper: 'Show the top N features in the chart', + default: '20', + }, ], - "type": "ReportWidget", - "gid": "aiverify.stock.shap-toolbox-widgets:global-explainability-chart", - "version": "1.0.0", - "pluginGID": "aiverify.stock.shap-toolbox-widgets", - "mdxPath": "aiverify.stock.shap-toolbox-widgets/widgets/global-explainability-chart.mdx", - "status": "OK" - } + type: 'ReportWidget', + gid: 'aiverify.stock.shap-toolbox-widgets:global-explainability-chart', + version: '1.0.0', + pluginGID: 'aiverify.stock.shap-toolbox-widgets', + mdxPath: + 'aiverify.stock.shap-toolbox-widgets/widgets/global-explainability-chart.mdx', + status: 'OK', + }, ], - "isStock": false, - "installedAt": 1682644230122 + isStock: false, + installedAt: 1682644230122, }, ], - "inputBlocks": [ + inputBlocks: [ { - "cid": "explainability-process-checklist", - "name": "Explainability Process Checklist", - "description": "Process checklist for Explainability principle", - "group": "AI Verify Process Checklists", - "width": "xl", - "type": "InputBlock", - "gid": "aiverify.stock.process-checklist:explainability-process-checklist", - "version": "0.1.0", - "pluginGID": "aiverify.stock.process-checklist", - "mdxPath": "aiverify.stock.process-checklist/inputs/explainability-process-checklist.mdx" + cid: 'explainability-process-checklist', + name: 'Explainability Process Checklist', + description: 'Process checklist for Explainability principle', + group: 'AI Verify Process Checklists', + width: 'xl', + type: 'InputBlock', + gid: 'aiverify.stock.process-checklist:explainability-process-checklist', + version: '0.1.0', + pluginGID: 'aiverify.stock.process-checklist', + mdxPath: + 'aiverify.stock.process-checklist/inputs/explainability-process-checklist.mdx', }, { - "cid": "fairness-process-checklist", - "name": "Fairness Process Checklist", - "description": "Process checklist for Fairness principle", - "group": "AI Verify Process Checklists", - "width": "xl", - "type": "InputBlock", - "gid": "aiverify.stock.process-checklist:fairness-process-checklist", - "version": "0.1.0", - "pluginGID": "aiverify.stock.process-checklist", - "mdxPath": "aiverify.stock.process-checklist/inputs/fairness-process-checklist.mdx" + cid: 'fairness-process-checklist', + name: 'Fairness Process Checklist', + description: 'Process checklist for Fairness principle', + group: 'AI Verify Process Checklists', + width: 'xl', + type: 'InputBlock', + gid: 'aiverify.stock.process-checklist:fairness-process-checklist', + version: '0.1.0', + pluginGID: 'aiverify.stock.process-checklist', + mdxPath: + 'aiverify.stock.process-checklist/inputs/fairness-process-checklist.mdx', }, { - "cid": "fairness-tree", - "name": "Fairness Tree", - "description": "There are many fairness metrics, and it is impossible to fulfill all of them due to the Impossibility Theorem of Machine Fairness. Therefore, it is important to select and rank the relevant metrics to focus during the resolving of fairness issues (if any). The fairness tree guides the user to make this decision, and this section documents the decision-making process.", - "fullScreen": true, - "type": "InputBlock", - "gid": "aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree", - "version": "0.1.0", - "pluginGID": "aiverify.stock.fairness-metrics-toolbox-widgets", - "width": "md", - "mdxPath": "aiverify.stock.fairness-metrics-toolbox-widgets/inputs/fairness-tree.mdx" + cid: 'fairness-tree', + name: 'Fairness Tree', + description: + 'There are many fairness metrics, and it is impossible to fulfill all of them due to the Impossibility Theorem of Machine Fairness. Therefore, it is important to select and rank the relevant metrics to focus during the resolving of fairness issues (if any). The fairness tree guides the user to make this decision, and this section documents the decision-making process.', + fullScreen: true, + type: 'InputBlock', + gid: 'aiverify.stock.fairness-metrics-toolbox-widgets:fairness-tree', + version: '0.1.0', + pluginGID: 'aiverify.stock.fairness-metrics-toolbox-widgets', + width: 'md', + mdxPath: + 'aiverify.stock.fairness-metrics-toolbox-widgets/inputs/fairness-tree.mdx', }, { - "cid": "testForm1", - "name": "Test Form 1", - "description": "Test Form 1 description blah blah", - "type": "InputBlock", - "gid": "aiverify.tests:testForm1", - "version": "1.0.0-alpha", - "pluginGID": "aiverify.tests", - "width": "md", - "mdxPath": "aiverify.tests/inputs/testForm1.mdx" - } + cid: 'testForm1', + name: 'Test Form 1', + description: 'Test Form 1 description blah blah', + type: 'InputBlock', + gid: 'aiverify.tests:testForm1', + version: '1.0.0-alpha', + pluginGID: 'aiverify.tests', + width: 'md', + mdxPath: 'aiverify.tests/inputs/testForm1.mdx', + }, ], - "algorithms": [ + algorithms: [ { - "cid": "adversarial_examples_toolbox", - "name": "adversarial examples toolbox", - "modelType": [ - "classification" - ], - "version": "0.1.0", - "author": "IMDA-T2E", - "description": "My adversarial examples toolbox", - "tags": [ - "adversarial examples toolbox", - "classification" + cid: 'adversarial_examples_toolbox', + name: 'adversarial examples toolbox', + modelType: ['classification'], + version: '0.1.0', + author: 'IMDA-T2E', + description: 'My adversarial examples toolbox', + tags: ['adversarial examples toolbox', 'classification'], + requireGroundTruth: true, + requiredFiles: [ + 'AUTHORS.rst', + 'CHANGELOG.md', + 'input.schema.json', + 'LICENSE', + 'output.schema.json', + 'adversarial_examples_toolbox.meta.json', + 'adversarial_examples_toolbox.py', + 'README.md', + 'requirements.txt', + 'syntax_checker.py', ], - "requireGroundTruth": true, - "requiredFiles": [ - "AUTHORS.rst", - "CHANGELOG.md", - "input.schema.json", - "LICENSE", - "output.schema.json", - "adversarial_examples_toolbox.meta.json", - "adversarial_examples_toolbox.py", - "README.md", - "requirements.txt", - "syntax_checker.py" + type: 'Algorithm', + gid: 'aiverify.stock.algorithms.adversarial_examples_toolbox:adversarial_examples_toolbox', + pluginGID: 'aiverify.stock.algorithms.adversarial_examples_toolbox', + algoPath: + '/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.adversarial_examples_toolbox/algorithms/adversarial_examples_toolbox', + requirements: [ + 'joblib==1.2.0 ; python_version >= "3.10" and python_version < "4.0"', + 'numpy==1.23.5 ; python_version >= "3.10" and python_version < "4.0"', + 'scikit-learn==1.2.2 ; python_version >= "3.10" and python_version < "4.0"', + 'scipy==1.9.3 ; python_version >= "3.10" and python_version < "4.0"', + 'threadpoolctl==3.1.0 ; python_version >= "3.10" and python_version < "4.0"', ], - "type": "Algorithm", - "gid": "aiverify.stock.algorithms.adversarial_examples_toolbox:adversarial_examples_toolbox", - "pluginGID": "aiverify.stock.algorithms.adversarial_examples_toolbox", - "algoPath": "/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.adversarial_examples_toolbox/algorithms/adversarial_examples_toolbox", - "requirements": [ - "joblib==1.2.0 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "numpy==1.23.5 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "scikit-learn==1.2.2 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "scipy==1.9.3 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "threadpoolctl==3.1.0 ; python_version >= \"3.10\" and python_version < \"4.0\"" - ], - "inputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/ai.verify.stock.algorithms.adversarial-examples-toolbox/input.schema.json", - "title": "Algorithm Plugin Input Arguments", - "description": "A schema for algorithm plugin input arguments", - "type": "object", - "required": [], - "properties": {} + inputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/ai.verify.stock.algorithms.adversarial-examples-toolbox/input.schema.json', + title: 'Algorithm Plugin Input Arguments', + description: 'A schema for algorithm plugin input arguments', + type: 'object', + required: [], + properties: {}, }, - "outputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/ai.verify.stock.algorithms.adversarial-examples-toolbox/output.schema.json", - "title": "Algorithm Plugin Output Arguments", - "description": "A schema for algorithm plugin output arguments", - "type": "object", - "required": [ - "results" - ], - "minProperties": 1, - "properties": { - "results": { - "description": "Algorithm Output", - "type": "object", - "required": [ - "num_of_adversarial_samples", - "org_accuracy", - "adversarial_accuracy", - "num_of_failed_adversarial_samples" + outputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/ai.verify.stock.algorithms.adversarial-examples-toolbox/output.schema.json', + title: 'Algorithm Plugin Output Arguments', + description: 'A schema for algorithm plugin output arguments', + type: 'object', + required: ['results'], + minProperties: 1, + properties: { + results: { + description: 'Algorithm Output', + type: 'object', + required: [ + 'num_of_adversarial_samples', + 'org_accuracy', + 'adversarial_accuracy', + 'num_of_failed_adversarial_samples', ], - "properties": { - "num_of_adversarial_samples": { - "description": "Number of final adversarial samples", - "type": "number" + properties: { + num_of_adversarial_samples: { + description: 'Number of final adversarial samples', + type: 'number', + }, + original_accuracy: { + description: 'Original Accuracy', + type: 'number', }, - "original_accuracy": { - "description": "Original Accuracy", - "type": "number" + adversarial_accuracy: { + description: 'Adversarial accuracy', + type: 'number', }, - "adversarial_accuracy": { - "description": "Adversarial accuracy", - "type": "number" + num_of_failed_adversarial_samples: { + description: + 'Number of samples that failed to generate adversarial samples', + type: 'number', }, - "num_of_failed_adversarial_samples": { - "description": "Number of samples that failed to generate adversarial samples", - "type": "number" - } - } - } - } - } + }, + }, + }, + }, }, { - "cid": "fairness_metrics_toolbox_for_classification", - "name": "fairness metrics toolbox for classification", - "modelType": [ - "classification" + cid: 'fairness_metrics_toolbox_for_classification', + name: 'fairness metrics toolbox for classification', + modelType: ['classification'], + version: '0.1.0', + author: 'IMDA-T2E', + description: + 'The Fairness Metrics Toolbox (FMT) for Classification contains a list of fairness metrics to measure how resources (e.g. opportunities, food, loan, medical help) are allocated among the demographic groups (e.g. married male, married female) given a set of sensitive feature(s) (e.g. gender, marital status). This plugin is developed for classification models.', + tags: ['fairness metrics toolbox for classification', 'classification'], + requireGroundTruth: true, + requiredFiles: [ + 'AUTHORS.rst', + 'CHANGELOG.md', + 'input.schema.json', + 'LICENSE', + 'output.schema.json', + 'fairness_metrics_toolbox_for_classification.meta.json', + 'fairness_metrics_toolbox_for_classification.py', + 'README.md', + 'requirements.txt', + 'syntax_checker.py', ], - "version": "0.1.0", - "author": "IMDA-T2E", - "description": "The Fairness Metrics Toolbox (FMT) for Classification contains a list of fairness metrics to measure how resources (e.g. opportunities, food, loan, medical help) are allocated among the demographic groups (e.g. married male, married female) given a set of sensitive feature(s) (e.g. gender, marital status). This plugin is developed for classification models.", - "tags": [ - "fairness metrics toolbox for classification", - "classification" + type: 'Algorithm', + gid: 'aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification:fairness_metrics_toolbox_for_classification', + pluginGID: + 'aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification', + algoPath: + '/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification/algorithms/fairness_metrics_toolbox_for_classification', + requirements: [ + 'joblib==1.2.0 ; python_version >= "3.10" and python_version < "4.0"', + 'numpy==1.23.5 ; python_version >= "3.10" and python_version < "4.0"', + 'scikit-learn==1.2.2 ; python_version >= "3.10" and python_version < "4.0"', + 'scipy==1.9.3 ; python_version >= "3.10" and python_version < "4.0"', + 'threadpoolctl==3.1.0 ; python_version >= "3.10" and python_version < "4.0"', ], - "requireGroundTruth": true, - "requiredFiles": [ - "AUTHORS.rst", - "CHANGELOG.md", - "input.schema.json", - "LICENSE", - "output.schema.json", - "fairness_metrics_toolbox_for_classification.meta.json", - "fairness_metrics_toolbox_for_classification.py", - "README.md", - "requirements.txt", - "syntax_checker.py" - ], - "type": "Algorithm", - "gid": "aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification:fairness_metrics_toolbox_for_classification", - "pluginGID": "aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification", - "algoPath": "/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification/algorithms/fairness_metrics_toolbox_for_classification", - "requirements": [ - "joblib==1.2.0 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "numpy==1.23.5 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "scikit-learn==1.2.2 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "scipy==1.9.3 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "threadpoolctl==3.1.0 ; python_version >= \"3.10\" and python_version < \"4.0\"" - ], - "inputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/input.schema.json", - "title": "Algorithm Plugin Input Arguments", - "description": "A schema for algorithm plugin input arguments", - "type": "object", - "required": [ - "sensitive_feature" - ], - "properties": { - "sensitive_feature": { - "title": "Sensitive Feature Names", - "description": "Array of Sensitive Feature Names (e.g. Gender)", - "type": "array", - "items": { - "type": "string" - } - } - } + inputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/input.schema.json', + title: 'Algorithm Plugin Input Arguments', + description: 'A schema for algorithm plugin input arguments', + type: 'object', + required: ['sensitive_feature'], + properties: { + sensitive_feature: { + title: 'Sensitive Feature Names', + description: 'Array of Sensitive Feature Names (e.g. Gender)', + type: 'array', + items: { + type: 'string', + }, + }, + }, }, - "outputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/output.schema.json", - "title": "Algorithm Plugin Output Arguments", - "description": "A schema for algorithm plugin output arguments", - "type": "object", - "required": [ - "sensitive_feature", - "output_classes", - "results" - ], - "properties": { - "sensitive_feature": { - "description": "Array of sensitive feature names", - "type": "array", - "minItems": 1, - "items": { - "type": "string" - } + outputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/output.schema.json', + title: 'Algorithm Plugin Output Arguments', + description: 'A schema for algorithm plugin output arguments', + type: 'object', + required: ['sensitive_feature', 'output_classes', 'results'], + properties: { + sensitive_feature: { + description: 'Array of sensitive feature names', + type: 'array', + minItems: 1, + items: { + type: 'string', + }, }, - "output_classes": { - "description": "Array of output classes", - "type": "array", - "minItems": 1, - "items": { - "type": [ - "string", - "number", - "integer", - "boolean" - ] - } + output_classes: { + description: 'Array of output classes', + type: 'array', + minItems: 1, + items: { + type: ['string', 'number', 'integer', 'boolean'], + }, }, - "results": { - "description": "Array of metrics by output classes (# output classes)", - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "description": "Dictionary of metric values by group", - "required": [ - "True Positive Rate", - "True Negative Rate", - "Positive Predictive Value Parity", - "Negative Predictive Value Parity", - "False Positive Rate", - "False Negative Rate", - "False Discovery Rate", - "False Omission Rate" + results: { + description: + 'Array of metrics by output classes (# output classes)', + type: 'array', + minItems: 1, + items: { + type: 'object', + description: 'Dictionary of metric values by group', + required: [ + 'True Positive Rate', + 'True Negative Rate', + 'Positive Predictive Value Parity', + 'Negative Predictive Value Parity', + 'False Positive Rate', + 'False Negative Rate', + 'False Discovery Rate', + 'False Omission Rate', ], - "properties": { - "True Positive Rate": { - "$ref": "#/$defs/metric" + properties: { + 'True Positive Rate': { + $ref: '#/$defs/metric', + }, + 'True Negative Rate': { + $ref: '#/$defs/metric', }, - "True Negative Rate": { - "$ref": "#/$defs/metric" + 'Positive Predictive Value Parity': { + $ref: '#/$defs/metric', }, - "Positive Predictive Value Parity": { - "$ref": "#/$defs/metric" + 'Negative Predictive Value Parity': { + $ref: '#/$defs/metric', }, - "Negative Predictive Value Parity": { - "$ref": "#/$defs/metric" + 'False Positive Rate': { + $ref: '#/$defs/metric', }, - "False Positive Rate": { - "$ref": "#/$defs/metric" + 'False Negative Rate': { + $ref: '#/$defs/metric', }, - "False Negative Rate": { - "$ref": "#/$defs/metric" + 'False Discovery Rate': { + $ref: '#/$defs/metric', }, - "False Discovery Rate": { - "$ref": "#/$defs/metric" + 'False Omission Rate': { + $ref: '#/$defs/metric', }, - "False Omission Rate": { - "$ref": "#/$defs/metric" - } - } - } - } + }, + }, + }, }, - "$defs": { - "metric": { - "description": "Array of metric values for each group, e.g. [{group:[1,2], metric:0.122},...]", - "type": "array", - "items": { - "type": "object", - "required": [ - "group", - "metric" - ], - "properties": { - "group": { - "type": "array", - "description": "Array of group values, one value for each feature, .e.g group: [1,4,7]" + $defs: { + metric: { + description: + 'Array of metric values for each group, e.g. [{group:[1,2], metric:0.122},...]', + type: 'array', + items: { + type: 'object', + required: ['group', 'metric'], + properties: { + group: { + type: 'array', + description: + 'Array of group values, one value for each feature, .e.g group: [1,4,7]', }, - "metric": { - "type": "number" - } - } + metric: { + type: 'number', + }, + }, }, - "minItems": 2 - } - } - } + minItems: 2, + }, + }, + }, }, { - "cid": "shap_toolbox", - "name": "shap toolbox", - "modelType": [ - "classification", - "regression" - ], - "version": "0.1.0", - "author": "IMDA-T2E", - "description": "SHAP (SHapley Additive exPlanations) is a game theoretic approach to explain the output of any machine learning model. It connects optimal credit allocation with local explanations using the classic Shapley values from game theory and their related extensions (see papers for details and citations).", - "tags": [ - "shap toolbox", - "classification", - "regression" + cid: 'shap_toolbox', + name: 'shap toolbox', + modelType: ['classification', 'regression'], + version: '0.1.0', + author: 'IMDA-T2E', + description: + 'SHAP (SHapley Additive exPlanations) is a game theoretic approach to explain the output of any machine learning model. It connects optimal credit allocation with local explanations using the classic Shapley values from game theory and their related extensions (see papers for details and citations).', + tags: ['shap toolbox', 'classification', 'regression'], + requireGroundTruth: true, + requiredFiles: [ + 'AUTHORS.rst', + 'CHANGELOG.md', + 'input.schema.json', + 'LICENSE', + 'output.schema.json', + 'shap_toolbox.meta.json', + 'shap_toolbox.py', + 'README.md', + 'requirements.txt', + 'syntax_checker.py', + 'src', ], - "requireGroundTruth": true, - "requiredFiles": [ - "AUTHORS.rst", - "CHANGELOG.md", - "input.schema.json", - "LICENSE", - "output.schema.json", - "shap_toolbox.meta.json", - "shap_toolbox.py", - "README.md", - "requirements.txt", - "syntax_checker.py", - "src" + type: 'Algorithm', + gid: 'aiverify.stock.algorithms.shap_toolbox:shap_toolbox', + pluginGID: 'aiverify.stock.algorithms.shap_toolbox', + algoPath: + '/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.shap_toolbox/algorithms/shap_toolbox', + requirements: [ + 'cloudpickle==2.2.1 ; python_version >= "3.10" and python_version < "4.0"', + 'colorama==0.4.6 ; python_version >= "3.10" and python_version < "4.0" and platform_system == "Windows"', + 'joblib==1.2.0 ; python_version >= "3.10" and python_version < "4.0"', + 'llvmlite==0.39.1 ; python_version >= "3.10" and python_version < "4.0"', + 'numba==0.56.4 ; python_version >= "3.10" and python_version < "4.0"', + 'numpy==1.23.5 ; python_version >= "3.10" and python_version < "4.0"', + 'packaging==23.0 ; python_version >= "3.10" and python_version < "4.0"', + 'pandas==1.5.3 ; python_version >= "3.10" and python_version < "4.0"', + 'python-dateutil==2.8.2 ; python_version >= "3.10" and python_version < "4.0"', + 'pytz==2023.3 ; python_version >= "3.10" and python_version < "4.0"', + 'scikit-learn==1.2.2 ; python_version >= "3.10" and python_version < "4.0"', + 'scipy==1.9.3 ; python_version >= "3.10" and python_version < "4.0"', + 'setuptools==67.6.1 ; python_version >= "3.10" and python_version < "4.0"', + 'shap==0.41.0 ; python_version >= "3.10" and python_version < "4.0"', + 'six==1.16.0 ; python_version >= "3.10" and python_version < "4.0"', + 'slicer==0.0.7 ; python_version >= "3.10" and python_version < "4.0"', + 'threadpoolctl==3.1.0 ; python_version >= "3.10" and python_version < "4.0"', + 'tqdm==4.65.0 ; python_version >= "3.10" and python_version < "4.0"', ], - "type": "Algorithm", - "gid": "aiverify.stock.algorithms.shap_toolbox:shap_toolbox", - "pluginGID": "aiverify.stock.algorithms.shap_toolbox", - "algoPath": "/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.shap_toolbox/algorithms/shap_toolbox", - "requirements": [ - "cloudpickle==2.2.1 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "colorama==0.4.6 ; python_version >= \"3.10\" and python_version < \"4.0\" and platform_system == \"Windows\"", - "joblib==1.2.0 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "llvmlite==0.39.1 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "numba==0.56.4 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "numpy==1.23.5 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "packaging==23.0 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "pandas==1.5.3 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "python-dateutil==2.8.2 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "pytz==2023.3 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "scikit-learn==1.2.2 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "scipy==1.9.3 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "setuptools==67.6.1 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "shap==0.41.0 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "six==1.16.0 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "slicer==0.0.7 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "threadpoolctl==3.1.0 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "tqdm==4.65.0 ; python_version >= \"3.10\" and python_version < \"4.0\"" - ], - "inputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.shap_toolbox/input.schema.json", - "title": "Algorithm Plugin Input Arguments", - "description": "A schema for algorithm plugin input arguments", - "type": "object", - "required": [ - "algo_type", - "explain_type", - "background_path", - "background_samples", - "data_samples" + inputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.shap_toolbox/input.schema.json', + title: 'Algorithm Plugin Input Arguments', + description: 'A schema for algorithm plugin input arguments', + type: 'object', + required: [ + 'algo_type', + 'explain_type', + 'background_path', + 'background_samples', + 'data_samples', ], - "properties": { - "algo_type": { - "title": "Algorithm Type", - "description": "Algorithm Type - [tree, linear, kernel (default)]", - "type": "string" + properties: { + algo_type: { + title: 'Algorithm Type', + description: 'Algorithm Type - [tree, linear, kernel (default)]', + type: 'string', + }, + explain_type: { + title: 'Explain Type', + description: 'Explain Type - [global, local (default)]', + type: 'string', }, - "explain_type": { - "title": "Explain Type", - "description": "Explain Type - [global, local (default)]", - "type": "string" + background_path: { + title: 'Background data path', + description: 'Background data path', + type: 'string', }, - "background_path": { - "title": "Background data path", - "description": "Background data path", - "type": "string" + background_samples: { + title: 'Background Samples', + description: 'Background Samples (e.g. 25)', + type: 'number', }, - "background_samples": { - "title": "Background Samples", - "description": "Background Samples (e.g. 25)", - "type": "number" + data_samples: { + title: 'Data Samples', + description: 'Data Samples (e.g. 25)', + type: 'number', }, - "data_samples": { - "title": "Data Samples", - "description": "Data Samples (e.g. 25)", - "type": "number" - } - } + }, }, - "outputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.shap_toolbox/output.schema.json", - "title": "Algorithm Plugin Output Arguments", - "description": "A schema for algorithm plugin output arguments", - "type": "object", - "required": [ - "feature_names", - "results" - ], - "properties": { - "feature_names": { - "type": "array", - "description": "Array of feature names", - "minItems": 1, - "items": { - "type": "string" - } + outputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.shap_toolbox/output.schema.json', + title: 'Algorithm Plugin Output Arguments', + description: 'A schema for algorithm plugin output arguments', + type: 'object', + required: ['feature_names', 'results'], + properties: { + feature_names: { + type: 'array', + description: 'Array of feature names', + minItems: 1, + items: { + type: 'string', + }, }, - "results": { - "description": "Matrix of feature values (# feature names)", - "type": "object", - "required": [ - "num_local_classes", - "local", - "single_explainer_values", - "single_shap_values", - "global_shap_values", - "global_samples", - "num_global_classes", - "global" + results: { + description: 'Matrix of feature values (# feature names)', + type: 'object', + required: [ + 'num_local_classes', + 'local', + 'single_explainer_values', + 'single_shap_values', + 'global_shap_values', + 'global_samples', + 'num_global_classes', + 'global', ], - "properties": { - "num_local_classes": { - "description": "Number of local classes", - "type": "number" + properties: { + num_local_classes: { + description: 'Number of local classes', + type: 'number', }, - "local": { - "description": "# of local classes", - "type": "array", - "minItems": 1, - "items": { - "type": "array", - "minItems": 1, - "items": { - "type": "array", - "description": "class values", - "minItems": 1, - "items": { - "type": "number" - } - } - } + local: { + description: '# of local classes', + type: 'array', + minItems: 1, + items: { + type: 'array', + minItems: 1, + items: { + type: 'array', + description: 'class values', + minItems: 1, + items: { + type: 'number', + }, + }, + }, }, - "single_explainer_values": { - "description": "array of single explainer values", - "type": "array", - "minItems": 1, - "items": { - "type": "number" - } + single_explainer_values: { + description: 'array of single explainer values', + type: 'array', + minItems: 1, + items: { + type: 'number', + }, + }, + single_shap_values: { + description: 'array of single shap values', + type: 'array', + minItems: 1, + items: { + type: 'array', + description: 'class values', + minItems: 1, + items: { + type: 'number', + }, + }, }, - "single_shap_values": { - "description": "array of single shap values", - "type": "array", - "minItems": 1, - "items": { - "type": "array", - "description": "class values", - "minItems": 1, - "items": { - "type": "number" - } - } + global_shap_values: { + description: 'global shap values', + type: 'array', + items: { + type: 'array', + description: + 'Matrix of SHAP values (# samples x # features)', + minItems: 1, + items: { + type: 'array', + description: 'Array of SHAP values for each feature', + minItems: 1, + items: { + type: 'number', + }, + }, + }, }, - "global_shap_values": { - "description": "global shap values", - "type": "array", - "items": { - "type": "array", - "description": "Matrix of SHAP values (# samples x # features)", - "minItems": 1, - "items": { - "type": "array", - "description": "Array of SHAP values for each feature", - "minItems": 1, - "items": { - "type": "number" - } - } - } + global_samples: { + description: + 'Matrix of feature values (# samples x # features)', + type: 'array', + items: { + type: 'array', + description: 'Array of sample values for each feature', + minItems: 1, + items: { + type: 'number', + }, + }, }, - "global_samples": { - "description": "Matrix of feature values (# samples x # features)", - "type": "array", - "items": { - "type": "array", - "description": "Array of sample values for each feature", - "minItems": 1, - "items": { - "type": "number" - } - } + num_global_classes: { + description: 'Number of global classes', + type: 'number', }, - "num_global_classes": { - "description": "Number of global classes", - "type": "number" + global: { + description: '# of global classes', + type: 'array', + items: { + type: 'array', + minItems: 1, + items: { + type: 'number', + }, + }, }, - "global": { - "description": "# of global classes", - "type": "array", - "items": { - "type": "array", - "minItems": 1, - "items": { - "type": "number" - } - } - } - } - } - } - } + }, + }, + }, + }, }, { - "cid": "partial_dependence_plot", - "name": "Partial Dependence Plot", - "modelType": [ - "classification", - "regression" - ], - "version": "0.1.0", - "author": "Test User", - "description": "Partial dependence plot (PDP) depicts the relationship between a small number of input variable and target. They show how predictions partially depend on values of the input variables of interests.", - "tags": [ - "Partial Dependence Plot", - "classification", - "regression" + cid: 'partial_dependence_plot', + name: 'Partial Dependence Plot', + modelType: ['classification', 'regression'], + version: '0.1.0', + author: 'Test User', + description: + 'Partial dependence plot (PDP) depicts the relationship between a small number of input variable and target. They show how predictions partially depend on values of the input variables of interests.', + tags: ['Partial Dependence Plot', 'classification', 'regression'], + requireGroundTruth: false, + type: 'Algorithm', + gid: 'aiverify.stock.algorithms.partial_dependence_plot:partial_dependence_plot', + pluginGID: 'aiverify.stock.algorithms.partial_dependence_plot', + algoPath: + '/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.partial_dependence_plot/algorithms/partial_dependence_plot', + requirements: [ + 'numpy==1.24.1 ; python_version >= "3.10" and python_version < "3.12"', + 'scipy==1.10.0 ; python_version >= "3.10" and python_version < "3.12"', ], - "requireGroundTruth": false, - "type": "Algorithm", - "gid": "aiverify.stock.algorithms.partial_dependence_plot:partial_dependence_plot", - "pluginGID": "aiverify.stock.algorithms.partial_dependence_plot", - "algoPath": "/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.partial_dependence_plot/algorithms/partial_dependence_plot", - "requirements": [ - "numpy==1.24.1 ; python_version >= \"3.10\" and python_version < \"3.12\"", - "scipy==1.10.0 ; python_version >= \"3.10\" and python_version < \"3.12\"" - ], - "inputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/partial_dependence_plot/input.schema.json", - "title": "Algorithm Plugin Input Arguments", - "description": "A schema for algorithm plugin input arguments", - "type": "object", - "required": [ - "target_feature_name", - "percentiles", - "grid_resolution" - ], - "properties": { - "target_feature_name": { - "title": "Target Feature Name", - "description": "Target Feature Name (e.g. Interest_Rate)", - "type": "string" + inputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/partial_dependence_plot/input.schema.json', + title: 'Algorithm Plugin Input Arguments', + description: 'A schema for algorithm plugin input arguments', + type: 'object', + required: ['target_feature_name', 'percentiles', 'grid_resolution'], + properties: { + target_feature_name: { + title: 'Target Feature Name', + description: 'Target Feature Name (e.g. Interest_Rate)', + type: 'string', }, - "percentiles": { - "title": "Cut-off percentiles", - "description": "Cut-off percentiles (e.g. [0.01, 0.99])", - "type": "array", - "minItems": 2, - "maxItems": 2, - "items": { - "type": "number" - } + percentiles: { + title: 'Cut-off percentiles', + description: 'Cut-off percentiles (e.g. [0.01, 0.99])', + type: 'array', + minItems: 2, + maxItems: 2, + items: { + type: 'number', + }, + }, + grid_resolution: { + title: 'Grid Resolution', + description: 'Grid Resolution (e.g. 25)', + type: 'number', }, - "grid_resolution": { - "title": "Grid Resolution", - "description": "Grid Resolution (e.g. 25)", - "type": "number" - } - } + }, }, - "outputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/partial_dependence_plot/output.schema.json", - "title": "Algorithm Plugin Output Arguments", - "description": "A schema for algorithm plugin output arguments", - "type": "object", - "required": [ - "feature_names", - "output_classes", - "results" - ], - "minProperties": 1, - "properties": { - "feature_names": { - "type": "array", - "description": "Array of feature names", - "minItems": 1, - "items": { - "type": "string" - } + outputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/partial_dependence_plot/output.schema.json', + title: 'Algorithm Plugin Output Arguments', + description: 'A schema for algorithm plugin output arguments', + type: 'object', + required: ['feature_names', 'output_classes', 'results'], + minProperties: 1, + properties: { + feature_names: { + type: 'array', + description: 'Array of feature names', + minItems: 1, + items: { + type: 'string', + }, + }, + output_classes: { + type: 'array', + description: 'Array of output classes', + minItems: 1, + items: { + type: 'string', + }, }, - "output_classes": { - "type": "array", - "description": "Array of output classes", - "minItems": 1, - "items": { - "type": "string" - } + results: { + description: 'Matrix of feature values (# feature names)', + type: 'array', + minItems: 1, + items: { + description: 'Matrix of PDP values (# output classes)', + type: 'array', + minItems: 1, + items: { + type: 'array', + description: 'Array of values for each PDP', + minItems: 1, + items: { + type: 'number', + }, + }, + }, }, - "results": { - "description": "Matrix of feature values (# feature names)", - "type": "array", - "minItems": 1, - "items": { - "description": "Matrix of PDP values (# output classes)", - "type": "array", - "minItems": 1, - "items": { - "type": "array", - "description": "Array of values for each PDP", - "minItems": 1, - "items": { - "type": "number" - } - } - } - } - } - } + }, + }, }, { - "cid": "fairness_metrics_toolbox", - "name": "Fairness Metrics Toolbox", - "modelType": [ - "classification" - ], - "version": "0.1.0", - "author": "Kelvin Kok", - "description": "The Fairness Metrics Toolbox (FMT) contains a list of fairness metrics to measure how resources (e.g. opportunities, food, loan, medical help) are allocated among the demographic groups (e.g. married male, married female) given a set of sensitive feature(s) (e.g. gender, marital status).", - "tags": [ - "Fairness Metrics Toolbox", - "classification" + cid: 'fairness_metrics_toolbox', + name: 'Fairness Metrics Toolbox', + modelType: ['classification'], + version: '0.1.0', + author: 'Kelvin Kok', + description: + 'The Fairness Metrics Toolbox (FMT) contains a list of fairness metrics to measure how resources (e.g. opportunities, food, loan, medical help) are allocated among the demographic groups (e.g. married male, married female) given a set of sensitive feature(s) (e.g. gender, marital status).', + tags: ['Fairness Metrics Toolbox', 'classification'], + requireGroundTruth: true, + requiredFiles: [ + 'AUTHORS.rst', + 'CHANGELOG.md', + 'input.schema.json', + 'LICENSE', + 'output.schema.json', + 'fairness_metrics_toolbox.meta.json', + 'fairness_metrics_toolbox.py', + 'README.md', + 'requirements.txt', + 'syntax_checker.py', ], - "requireGroundTruth": true, - "requiredFiles": [ - "AUTHORS.rst", - "CHANGELOG.md", - "input.schema.json", - "LICENSE", - "output.schema.json", - "fairness_metrics_toolbox.meta.json", - "fairness_metrics_toolbox.py", - "README.md", - "requirements.txt", - "syntax_checker.py" + type: 'Algorithm', + gid: 'aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox', + pluginGID: 'aiverify.stock.algorithms.fairness_metrics_toolbox', + algoPath: + '/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/algorithms/fairness_metrics_toolbox', + requirements: [ + 'joblib==1.2.0 ; python_version >= "3.10" and python_version < "4.0"', + 'numpy==1.24.2 ; python_version >= "3.10" and python_version < "4.0"', + 'scikit-learn==1.2.1 ; python_version >= "3.10" and python_version < "4.0"', + 'scipy==1.9.3 ; python_version >= "3.10" and python_version < "4.0"', + 'threadpoolctl==3.1.0 ; python_version >= "3.10" and python_version < "4.0"', ], - "type": "Algorithm", - "gid": "aiverify.stock.algorithms.fairness_metrics_toolbox:fairness_metrics_toolbox", - "pluginGID": "aiverify.stock.algorithms.fairness_metrics_toolbox", - "algoPath": "/home/amdlahir/projects/ai-verify/ai-verify-portal/plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/algorithms/fairness_metrics_toolbox", - "requirements": [ - "joblib==1.2.0 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "numpy==1.24.2 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "scikit-learn==1.2.1 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "scipy==1.9.3 ; python_version >= \"3.10\" and python_version < \"4.0\"", - "threadpoolctl==3.1.0 ; python_version >= \"3.10\" and python_version < \"4.0\"" - ], - "inputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/input.schema.json", - "title": "Algorithm Plugin Input Arguments", - "description": "A schema for algorithm plugin input arguments", - "type": "object", - "required": [ - "sensitive_feature" - ], - "properties": { - "sensitive_feature": { - "title": "Sensitive Feature Names", - "description": "Array of Sensitive Feature Names (e.g. Gender)", - "type": "array", - "items": { - "type": "string" - } - } - } + inputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/input.schema.json', + title: 'Algorithm Plugin Input Arguments', + description: 'A schema for algorithm plugin input arguments', + type: 'object', + required: ['sensitive_feature'], + properties: { + sensitive_feature: { + title: 'Sensitive Feature Names', + description: 'Array of Sensitive Feature Names (e.g. Gender)', + type: 'array', + items: { + type: 'string', + }, + }, + }, }, - "outputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/output.schema.json", - "title": "Algorithm Plugin Output Arguments", - "description": "A schema for algorithm plugin output arguments", - "type": "object", - "required": [ - "sensitive_feature", - "output_classes", - "results" - ], - "properties": { - "sensitive_feature": { - "description": "Array of sensitive feature names", - "type": "array", - "minItems": 1, - "items": { - "type": "string" - } + outputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/aiverify.stock.algorithms.fairness_metrics_toolbox/output.schema.json', + title: 'Algorithm Plugin Output Arguments', + description: 'A schema for algorithm plugin output arguments', + type: 'object', + required: ['sensitive_feature', 'output_classes', 'results'], + properties: { + sensitive_feature: { + description: 'Array of sensitive feature names', + type: 'array', + minItems: 1, + items: { + type: 'string', + }, }, - "output_classes": { - "description": "Array of output classes", - "type": "array", - "minItems": 1, - "items": { - "type": [ - "string", - "number", - "integer", - "boolean" - ] - } + output_classes: { + description: 'Array of output classes', + type: 'array', + minItems: 1, + items: { + type: ['string', 'number', 'integer', 'boolean'], + }, }, - "results": { - "description": "Array of metrics by output classes (# output classes)", - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "description": "Dictionary of metric values by group", - "required": [ - "True Positive Rate", - "True Negative Rate", - "Positive Predictive Value Parity", - "Negative Predictive Value Parity", - "False Positive Rate", - "False Negative Rate", - "False Discovery Rate", - "False Omission Rate" + results: { + description: + 'Array of metrics by output classes (# output classes)', + type: 'array', + minItems: 1, + items: { + type: 'object', + description: 'Dictionary of metric values by group', + required: [ + 'True Positive Rate', + 'True Negative Rate', + 'Positive Predictive Value Parity', + 'Negative Predictive Value Parity', + 'False Positive Rate', + 'False Negative Rate', + 'False Discovery Rate', + 'False Omission Rate', ], - "properties": { - "True Positive Rate": { - "$ref": "#/$defs/metric" + properties: { + 'True Positive Rate': { + $ref: '#/$defs/metric', + }, + 'True Negative Rate': { + $ref: '#/$defs/metric', }, - "True Negative Rate": { - "$ref": "#/$defs/metric" + 'Positive Predictive Value Parity': { + $ref: '#/$defs/metric', }, - "Positive Predictive Value Parity": { - "$ref": "#/$defs/metric" + 'Negative Predictive Value Parity': { + $ref: '#/$defs/metric', }, - "Negative Predictive Value Parity": { - "$ref": "#/$defs/metric" + 'False Positive Rate': { + $ref: '#/$defs/metric', }, - "False Positive Rate": { - "$ref": "#/$defs/metric" + 'False Negative Rate': { + $ref: '#/$defs/metric', }, - "False Negative Rate": { - "$ref": "#/$defs/metric" + 'False Discovery Rate': { + $ref: '#/$defs/metric', }, - "False Discovery Rate": { - "$ref": "#/$defs/metric" + 'False Omission Rate': { + $ref: '#/$defs/metric', }, - "False Omission Rate": { - "$ref": "#/$defs/metric" - } - } - } - } + }, + }, + }, }, - "$defs": { - "metric": { - "description": "Array of metric values for each group, e.g. [{group:[1,2], metric:0.122},...]", - "type": "array", - "items": { - "type": "object", - "required": [ - "group", - "metric" - ], - "properties": { - "group": { - "type": "array", - "description": "Array of group values, one value for each feature, .e.g group: [1,4,7]" + $defs: { + metric: { + description: + 'Array of metric values for each group, e.g. [{group:[1,2], metric:0.122},...]', + type: 'array', + items: { + type: 'object', + required: ['group', 'metric'], + properties: { + group: { + type: 'array', + description: + 'Array of group values, one value for each feature, .e.g group: [1,4,7]', + }, + metric: { + type: 'number', }, - "metric": { - "type": "number" - } - } + }, }, - "minItems": 2 - } - } - } - } + minItems: 2, + }, + }, + }, + }, ], - "templates": [ + templates: [ { - "cid": "demo-2-mar-2023-template", - "name": "Demo 2 Mar 2023 Template", - "description": "template for demo on 2 Mar 2023", - "type": "Template", - "gid": "046f18f4-9006-480f-9696-ad2ff65523c7:demo-2-mar-2023-template", - "version": "1.0.0", - "pluginGID": "046f18f4-9006-480f-9696-ad2ff65523c7", - "data": { - "pages": [ + cid: 'demo-2-mar-2023-template', + name: 'Demo 2 Mar 2023 Template', + description: 'template for demo on 2 Mar 2023', + type: 'Template', + gid: '046f18f4-9006-480f-9696-ad2ff65523c7:demo-2-mar-2023-template', + version: '1.0.0', + pluginGID: '046f18f4-9006-480f-9696-ad2ff65523c7', + data: { + pages: [ { - "layouts": [ + layouts: [ { - "w": 12, - "h": 2, - "x": 0, - "y": 0, - "i": "1677654688094", - "minW": 1, - "maxW": 12, - "minH": 2, - "maxH": 36, - "moved": false, - "static": false + w: 12, + h: 2, + x: 0, + y: 0, + i: '1677654688094', + minW: 1, + maxW: 12, + minH: 2, + maxH: 36, + moved: false, + static: false, }, { - "w": 12, - "h": 15, - "x": 0, - "y": 2, - "i": "1677654738248", - "minW": 12, - "maxW": 12, - "minH": 12, - "maxH": 36, - "moved": false, - "static": false + w: 12, + h: 15, + x: 0, + y: 2, + i: '1677654738248', + minW: 12, + maxW: 12, + minH: 12, + maxH: 36, + moved: false, + static: false, }, { - "w": 12, - "h": 17, - "x": 0, - "y": 17, - "i": "1677654744924", - "minW": 12, - "maxW": 12, - "minH": 12, - "maxH": 36, - "moved": false, - "static": false + w: 12, + h: 17, + x: 0, + y: 17, + i: '1677654744924', + minW: 12, + maxW: 12, + minH: 12, + maxH: 36, + moved: false, + static: false, }, { - "w": 12, - "h": 1, - "x": 0, - "y": 35, - "i": "_youcantseeme", - "moved": false, - "static": false - } + w: 12, + h: 1, + x: 0, + y: 35, + i: '_youcantseeme', + moved: false, + static: false, + }, ], - "reportWidgets": [ + reportWidgets: [ { - "layoutItemProperties": { - "justifyContent": "center", - "alignItems": "center", - "color": null, - "bgcolor": null + layoutItemProperties: { + justifyContent: 'center', + alignItems: 'center', + color: null, + bgcolor: null, + }, + widgetGID: 'aiverify.stock.decorators:header1', + key: '1677654688094', + properties: { + title: '', + text: '{name}', }, - "widgetGID": "aiverify.stock.decorators:header1", - "key": "1677654688094", - "properties": { - "title": "", - "text": "{name}" - } }, { - "layoutItemProperties": { - "justifyContent": "left", - "alignItems": "top", - "color": null, - "bgcolor": null + layoutItemProperties: { + justifyContent: 'left', + alignItems: 'top', + color: null, + bgcolor: null, }, - "widgetGID": "aiverify.stock.fairness-metrics-toolbox-widgets:false-discovery-rate-chart", - "key": "1677654738248", - "properties": null + widgetGID: + 'aiverify.stock.fairness-metrics-toolbox-widgets:false-discovery-rate-chart', + key: '1677654738248', + properties: null, }, { - "layoutItemProperties": { - "justifyContent": "left", - "alignItems": "top", - "color": null, - "bgcolor": null + layoutItemProperties: { + justifyContent: 'left', + alignItems: 'top', + color: null, + bgcolor: null, }, - "widgetGID": "aiverify.stock.fairness-metrics-toolbox-widgets:false-negative-rate-chart", - "key": "1677654744924", - "properties": null - } - ] + widgetGID: + 'aiverify.stock.fairness-metrics-toolbox-widgets:false-negative-rate-chart', + key: '1677654744924', + properties: null, + }, + ], }, { - "layouts": [ + layouts: [ { - "w": 12, - "h": 35, - "x": 0, - "y": 0, - "i": "1677654763640", - "minW": 12, - "maxW": 12, - "minH": 12, - "maxH": 36, - "moved": false, - "static": false + w: 12, + h: 35, + x: 0, + y: 0, + i: '1677654763640', + minW: 12, + maxW: 12, + minH: 12, + maxH: 36, + moved: false, + static: false, }, { - "w": 12, - "h": 1, - "x": 0, - "y": 35, - "i": "_youcantseeme", - "moved": false, - "static": false - } + w: 12, + h: 1, + x: 0, + y: 35, + i: '_youcantseeme', + moved: false, + static: false, + }, ], - "reportWidgets": [ + reportWidgets: [ { - "layoutItemProperties": { - "justifyContent": "left", - "alignItems": "top", - "color": null, - "bgcolor": null + layoutItemProperties: { + justifyContent: 'left', + alignItems: 'top', + color: null, + bgcolor: null, }, - "widgetGID": "aiverify.stock.fairness-metrics-toolbox-widgets:false-omission-rate-chart", - "key": "1677654763640", - "properties": null - } - ] + widgetGID: + 'aiverify.stock.fairness-metrics-toolbox-widgets:false-omission-rate-chart', + key: '1677654763640', + properties: null, + }, + ], }, { - "layouts": [ + layouts: [ { - "w": 12, - "h": 29, - "x": 0, - "y": 2, - "i": "1677656674885", - "minW": 12, - "maxW": 12, - "minH": 3, - "maxH": 36, - "moved": false, - "static": false + w: 12, + h: 29, + x: 0, + y: 2, + i: '1677656674885', + minW: 12, + maxW: 12, + minH: 3, + maxH: 36, + moved: false, + static: false, }, { - "w": 12, - "h": 2, - "x": 0, - "y": 0, - "i": "1677656749854", - "minW": 1, - "maxW": 12, - "minH": 2, - "maxH": 36, - "moved": false, - "static": false + w: 12, + h: 2, + x: 0, + y: 0, + i: '1677656749854', + minW: 1, + maxW: 12, + minH: 2, + maxH: 36, + moved: false, + static: false, }, { - "w": 12, - "h": 1, - "x": 0, - "y": 35, - "i": "_youcantseeme", - "moved": false, - "static": false - } + w: 12, + h: 1, + x: 0, + y: 35, + i: '_youcantseeme', + moved: false, + static: false, + }, ], - "reportWidgets": [ + reportWidgets: [ { - "layoutItemProperties": { - "justifyContent": "left", - "alignItems": "top" + layoutItemProperties: { + justifyContent: 'left', + alignItems: 'top', + }, + widgetGID: + 'aiverify.stock.process-checklist:fairness-process-checklist-answers', + key: '1677656674885', + properties: { + section: 'Fairness', + startIndex: '0', }, - "widgetGID": "aiverify.stock.process-checklist:fairness-process-checklist-answers", - "key": "1677656674885", - "properties": { - "section": "Fairness", - "startIndex": "0" - } }, { - "layoutItemProperties": { - "justifyContent": "left", - "alignItems": "center" + layoutItemProperties: { + justifyContent: 'left', + alignItems: 'center', + }, + widgetGID: 'aiverify.stock.decorators:header1', + key: '1677656749854', + properties: { + title: '', + text: 'Fairness Checklist', }, - "widgetGID": "aiverify.stock.decorators:header1", - "key": "1677656749854", - "properties": { - "title": "", - "text": "Fairness Checklist" - } - } - ] + }, + ], }, { - "layouts": [ + layouts: [ { - "w": 12, - "h": 35, - "x": 0, - "y": 0, - "i": "1677656693173", - "minW": 12, - "maxW": 12, - "minH": 3, - "maxH": 36, - "moved": false, - "static": false + w: 12, + h: 35, + x: 0, + y: 0, + i: '1677656693173', + minW: 12, + maxW: 12, + minH: 3, + maxH: 36, + moved: false, + static: false, }, { - "w": 12, - "h": 1, - "x": 0, - "y": 35, - "i": "_youcantseeme", - "moved": false, - "static": false - } + w: 12, + h: 1, + x: 0, + y: 35, + i: '_youcantseeme', + moved: false, + static: false, + }, ], - "reportWidgets": [ + reportWidgets: [ { - "layoutItemProperties": { - "justifyContent": "left", - "alignItems": "top" + layoutItemProperties: { + justifyContent: 'left', + alignItems: 'top', }, - "widgetGID": "aiverify.stock.process-checklist:fairness-process-checklist-answers", - "key": "1677656693173", - "properties": { - "section": "Fairness", - "startIndex": "3" - } - } - ] + widgetGID: + 'aiverify.stock.process-checklist:fairness-process-checklist-answers', + key: '1677656693173', + properties: { + section: 'Fairness', + startIndex: '3', + }, + }, + ], }, { - "layouts": [ + layouts: [ { - "w": 12, - "h": 14, - "x": 0, - "y": 2, - "i": "1677656725405", - "minW": 12, - "maxW": 12, - "minH": 3, - "maxH": 36, - "moved": false, - "static": false + w: 12, + h: 14, + x: 0, + y: 2, + i: '1677656725405', + minW: 12, + maxW: 12, + minH: 3, + maxH: 36, + moved: false, + static: false, }, { - "w": 12, - "h": 2, - "x": 0, - "y": 0, - "i": "1677656776114", - "minW": 1, - "maxW": 12, - "minH": 2, - "maxH": 36, - "moved": false, - "static": false + w: 12, + h: 2, + x: 0, + y: 0, + i: '1677656776114', + minW: 1, + maxW: 12, + minH: 2, + maxH: 36, + moved: false, + static: false, }, { - "w": 12, - "h": 1, - "x": 0, - "y": 35, - "i": "_youcantseeme", - "moved": false, - "static": false - } + w: 12, + h: 1, + x: 0, + y: 35, + i: '_youcantseeme', + moved: false, + static: false, + }, ], - "reportWidgets": [ + reportWidgets: [ { - "layoutItemProperties": { - "justifyContent": "left", - "alignItems": "top" + layoutItemProperties: { + justifyContent: 'left', + alignItems: 'top', + }, + widgetGID: + 'aiverify.stock.process-checklist:explainability-process-checklist-answers', + key: '1677656725405', + properties: { + section: 'Explainability', + startIndex: '0', }, - "widgetGID": "aiverify.stock.process-checklist:explainability-process-checklist-answers", - "key": "1677656725405", - "properties": { - "section": "Explainability", - "startIndex": "0" - } }, { - "layoutItemProperties": { - "justifyContent": "left", - "alignItems": "center" + layoutItemProperties: { + justifyContent: 'left', + alignItems: 'center', }, - "widgetGID": "aiverify.stock.decorators:header1", - "key": "1677656776114", - "properties": { - "title": "", - "text": "Explainability Checklist" - } - } - ] - } + widgetGID: 'aiverify.stock.decorators:header1', + key: '1677656776114', + properties: { + title: '', + text: 'Explainability Checklist', + }, + }, + ], + }, ], - "globalVars": [], - "fromPlugin": true, - "projectInfo": { - "name": "Demo 2 Mar 2023 Template", - "description": "template for demo on 2 Mar 2023" - } + globalVars: [], + fromPlugin: true, + projectInfo: { + name: 'Demo 2 Mar 2023 Template', + description: 'template for demo on 2 Mar 2023', + }, }, - "id": "64536cac2285de1805dea945" - } + id: '64536cac2285de1805dea945', + }, + ], + stockPlugins: [ + 'aiverify.stock.decorators', + 'aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification', + 'aiverify.stock.fairness-metrics-toolbox-widgets', + 'aiverify.stock.process-checklist', ], - "stockPlugins": [ - "aiverify.stock.decorators", - "aiverify.stock.algorithms.fairness_metrics_toolbox_for_classification", - "aiverify.stock.fairness-metrics-toolbox-widgets", - "aiverify.stock.process-checklist" - ] - } -} + }, +}; -const emptyListResponse: ApiResult = { +const emptyListResponse: ApiResult = { status: 200, data: { plugins: [], inputBlocks: [], algorithms: [], templates: [], - stockPlugins: [] - } + stockPlugins: [], + }, }; const installPluginResponse: ApiResult = { - "status": 200, - "data": { - "gid": "aiverify.stock.algorithms.partial_dependence_plot", - "version": "0.1.0", - "name": "Partial Dependence Plot", - "author": "Test User", - "description": "Partial dependence plot (PDP) depicts the relationship between a small number of input variable and target. They show how predictions partially depend on values of the input variables of interests.", - "isStock": false, - "reportWidgets": [], - "inputBlocks": [], - "templates": [], - "algorithms": [ - { - "cid": "partial_dependence_plot", - "name": "Partial Dependence Plot", - "modelType": [ - "classification", - "regression" - ], - "version": "0.1.0", - "author": "Test User", - "description": "Partial dependence plot (PDP) depicts the relationship between a small number of input variable and target. They show how predictions partially depend on values of the input variables of interests.", - "tags": [ - "Partial Dependence Plot", - "classification", - "regression" - ], - "requireGroundTruth": false, - "type": "Algorithm", - "gid": "aiverify.stock.algorithms.partial_dependence_plot:partial_dependence_plot", - "pluginGID": "aiverify.stock.algorithms.partial_dependence_plot", - "algoPath": "/home/amdlahir/imda/projects/ai-verify-portal/plugins/aiverify.stock.algorithms.partial_dependence_plot/algorithms/partial_dependence_plot", - "requirements": [ - "numpy==1.24.1 ; python_version >= \"3.10\" and python_version < \"3.12\"", - "scipy==1.10.0 ; python_version >= \"3.10\" and python_version < \"3.12\"" - ], - "inputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/partial_dependence_plot/input.schema.json", - "title": "Algorithm Plugin Input Arguments", - "description": "A schema for algorithm plugin input arguments", - "type": "object", - "required": [ - "target_feature_name", - "percentiles", - "grid_resolution" - ], - "properties": { - "target_feature_name": { - "title": "Target Feature Name", - "description": "Target Feature Name (e.g. Interest_Rate)", - "type": "string" - }, - "percentiles": { - "title": "Cut-off percentiles", - "description": "Cut-off percentiles (e.g. [0.01, 0.99])", - "type": "array", - "minItems": 2, - "maxItems": 2, - "items": { - "type": "number" - } - }, - "grid_resolution": { - "title": "Grid Resolution", - "description": "Grid Resolution (e.g. 25)", - "type": "number" - } - } + status: 200, + data: { + gid: 'aiverify.stock.algorithms.partial_dependence_plot', + version: '0.1.0', + name: 'Partial Dependence Plot', + author: 'Test User', + description: + 'Partial dependence plot (PDP) depicts the relationship between a small number of input variable and target. They show how predictions partially depend on values of the input variables of interests.', + isStock: false, + reportWidgets: [], + inputBlocks: [], + templates: [], + algorithms: [ + { + cid: 'partial_dependence_plot', + name: 'Partial Dependence Plot', + modelType: ['classification', 'regression'], + version: '0.1.0', + author: 'Test User', + description: + 'Partial dependence plot (PDP) depicts the relationship between a small number of input variable and target. They show how predictions partially depend on values of the input variables of interests.', + tags: ['Partial Dependence Plot', 'classification', 'regression'], + requireGroundTruth: false, + type: 'Algorithm', + gid: 'aiverify.stock.algorithms.partial_dependence_plot:partial_dependence_plot', + pluginGID: 'aiverify.stock.algorithms.partial_dependence_plot', + algoPath: + '/home/amdlahir/imda/projects/ai-verify-portal/plugins/aiverify.stock.algorithms.partial_dependence_plot/algorithms/partial_dependence_plot', + requirements: [ + 'numpy==1.24.1 ; python_version >= "3.10" and python_version < "3.12"', + 'scipy==1.10.0 ; python_version >= "3.10" and python_version < "3.12"', + ], + inputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/partial_dependence_plot/input.schema.json', + title: 'Algorithm Plugin Input Arguments', + description: 'A schema for algorithm plugin input arguments', + type: 'object', + required: ['target_feature_name', 'percentiles', 'grid_resolution'], + properties: { + target_feature_name: { + title: 'Target Feature Name', + description: 'Target Feature Name (e.g. Interest_Rate)', + type: 'string', + }, + percentiles: { + title: 'Cut-off percentiles', + description: 'Cut-off percentiles (e.g. [0.01, 0.99])', + type: 'array', + minItems: 2, + maxItems: 2, + items: { + type: 'number', + }, + }, + grid_resolution: { + title: 'Grid Resolution', + description: 'Grid Resolution (e.g. 25)', + type: 'number', + }, + }, + }, + outputSchema: { + $schema: 'https://json-schema.org/draft/2020-12/schema', + $id: 'https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/partial_dependence_plot/output.schema.json', + title: 'Algorithm Plugin Output Arguments', + description: 'A schema for algorithm plugin output arguments', + type: 'object', + required: ['feature_names', 'output_classes', 'results'], + minProperties: 1, + properties: { + feature_names: { + type: 'array', + description: 'Array of feature names', + minItems: 1, + items: { + type: 'string', + }, + }, + output_classes: { + type: 'array', + description: 'Array of output classes', + minItems: 1, + items: { + type: 'string', + }, + }, + results: { + description: 'Matrix of feature values (# feature names)', + type: 'array', + minItems: 1, + items: { + description: 'Matrix of PDP values (# output classes)', + type: 'array', + minItems: 1, + items: { + type: 'array', + description: 'Array of values for each PDP', + minItems: 1, + items: { + type: 'number', + }, }, - "outputSchema": { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://gitlab.com/imda_dsl/t2po/ai-verify/ai-verify-stock-plugins/partial_dependence_plot/output.schema.json", - "title": "Algorithm Plugin Output Arguments", - "description": "A schema for algorithm plugin output arguments", - "type": "object", - "required": [ - "feature_names", - "output_classes", - "results" - ], - "minProperties": 1, - "properties": { - "feature_names": { - "type": "array", - "description": "Array of feature names", - "minItems": 1, - "items": { - "type": "string" - } - }, - "output_classes": { - "type": "array", - "description": "Array of output classes", - "minItems": 1, - "items": { - "type": "string" - } - }, - "results": { - "description": "Matrix of feature values (# feature names)", - "type": "array", - "minItems": 1, - "items": { - "description": "Matrix of PDP values (# output classes)", - "type": "array", - "minItems": 1, - "items": { - "type": "array", - "description": "Array of values for each PDP", - "minItems": 1, - "items": { - "type": "number" - } - } - } - } - } - } - } - ] - } + }, + }, + }, + }, + }, + ], + }, }; -const algoPackageDependencyStatusResponse: ApiResult = { - "status": 200, - "data": [ - {"requirement": "numpy==1.24.1", "result": true, "comment": "Not compatible"}, - {"requirement": "scipy==1.10.0", "result": false, "comment": ""}] -} +const algoPackageDependencyStatusResponse: ApiResult = + { + status: 200, + data: [ + { requirement: 'numpy==1.24.1', result: true, comment: 'Not compatible' }, + { requirement: 'scipy==1.10.0', result: false, comment: '' }, + ], + }; -export { pluginsListResponse, emptyListResponse, installPluginResponse, algoPackageDependencyStatusResponse } \ No newline at end of file +export { + pluginsListResponse, + emptyListResponse, + installPluginResponse, + algoPackageDependencyStatusResponse, +}; diff --git a/ai-verify-portal/__tests__/modules/assets.test.tsx b/ai-verify-portal/__tests__/modules/assets.test.tsx index be49a6edf..b284aff3e 100644 --- a/ai-verify-portal/__tests__/modules/assets.test.tsx +++ b/ai-verify-portal/__tests__/modules/assets.test.tsx @@ -6,87 +6,116 @@ import { silentConsoleLogs } from '__mocks__/mockGlobals'; jest.mock('next/router', () => ({ useRouter: jest.fn(), -})) +})); describe('Assets', () => { - beforeAll(() => { silentConsoleLogs(); - }) + }); describe('Initial Render', () => { - it('should render the back button', () => { - render(); - expect(screen.getByTestId("assets-back-button")); - }) - + render( + + + + ); + expect(screen.getByTestId('assets-back-button')); + }); + it('should navigate to home page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("assets-back-button"); + render( + + + + ); + const button = screen.getByTestId('assets-back-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/home') - }) - + expect(router.push).toHaveBeenCalledWith('/home'); + }); + it('should render the asset folder buttons', () => { - render(); - expect(screen.getByTestId("add-new-dataset-button")).toHaveTextContent("New Dataset"); - expect(screen.getByTestId("add-new-model-button")).toHaveTextContent("New AI Model"); - expect(screen.getByTestId("open-dataset-list-button")).toHaveTextContent("Datasets"); - expect(screen.getByTestId("open-model-list-button")).toHaveTextContent("AI Models"); - }) - + render( + + + + ); + expect(screen.getByTestId('add-new-dataset-button')).toHaveTextContent( + 'New Dataset' + ); + expect(screen.getByTestId('add-new-model-button')).toHaveTextContent( + 'New AI Model' + ); + expect(screen.getByTestId('open-dataset-list-button')).toHaveTextContent( + 'Datasets' + ); + expect(screen.getByTestId('open-model-list-button')).toHaveTextContent( + 'AI Models' + ); + }); + it('should navigate to dataset page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("open-dataset-list-button"); + render( + + + + ); + const button = screen.getByTestId('open-dataset-list-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets/datasets') - }) - + expect(router.push).toHaveBeenCalledWith('/assets/datasets'); + }); + it('should navigate to model page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("open-model-list-button"); + render( + + + + ); + const button = screen.getByTestId('open-model-list-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets/models') - }) - + expect(router.push).toHaveBeenCalledWith('/assets/models'); + }); + it('should navigate to new dataset page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("add-new-dataset-button"); + render( + + + + ); + const button = screen.getByTestId('add-new-dataset-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets/newDataset') - }) - + expect(router.push).toHaveBeenCalledWith('/assets/newDataset'); + }); + it('should navigate to new model page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("add-new-model-button"); + render( + + + + ); + const button = screen.getByTestId('add-new-model-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets/newModel') - }) - - }) - - - - -}) \ No newline at end of file + expect(router.push).toHaveBeenCalledWith('/assets/newModel'); + }); + }); +}); diff --git a/ai-verify-portal/__tests__/modules/datasets.test.tsx b/ai-verify-portal/__tests__/modules/datasets.test.tsx index 0c3de5243..62d2dd386 100644 --- a/ai-verify-portal/__tests__/modules/datasets.test.tsx +++ b/ai-verify-portal/__tests__/modules/datasets.test.tsx @@ -6,46 +6,62 @@ import { silentConsoleLogs } from '__mocks__/mockGlobals'; jest.mock('next/router', () => ({ useRouter: jest.fn(), -})) +})); -jest.mock('src/modules/assets/datasetList', () => () => 'DatasetListComponent') +jest.mock('src/modules/assets/datasetList', () => () => 'DatasetListComponent'); describe('Datasets', () => { - beforeAll(() => { silentConsoleLogs(); }); it('should render the back button', () => { - render(); - expect(screen.getByTestId("datasets-back-button")); - }) + render( + + + + ); + expect(screen.getByTestId('datasets-back-button')); + }); it('should navigate to assets page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("datasets-back-button"); + render( + + + + ); + const button = screen.getByTestId('datasets-back-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets') - }) + expect(router.push).toHaveBeenCalledWith('/assets'); + }); it('should render the add new model button', () => { - render(); - expect(screen.getByTestId("add-new-datasets-button")).toHaveTextContent("New Dataset +"); - }) + render( + + + + ); + expect(screen.getByTestId('add-new-datasets-button')).toHaveTextContent( + 'New Dataset +' + ); + }); it('should navigate to add new dataset page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("add-new-datasets-button"); + render( + + + + ); + const button = screen.getByTestId('add-new-datasets-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets/newDataset') - }) - -}) \ No newline at end of file + expect(router.push).toHaveBeenCalledWith('/assets/newDataset'); + }); +}); diff --git a/ai-verify-portal/__tests__/modules/modelList.test.tsx b/ai-verify-portal/__tests__/modules/modelList.test.tsx index 0ef645ae6..0e7678f05 100644 --- a/ai-verify-portal/__tests__/modules/modelList.test.tsx +++ b/ai-verify-portal/__tests__/modules/modelList.test.tsx @@ -1,11 +1,11 @@ import { render, screen } from '@testing-library/react'; import ModelListComponent from 'src/modules/assets/modelList'; -import { MockedProvider } from "@apollo/client/testing"; +import { MockedProvider } from '@apollo/client/testing'; import { gql } from '@apollo/client'; jest.mock('next/router', () => ({ useRouter: jest.fn(), -})) +})); const mocks = [ { @@ -13,73 +13,73 @@ const mocks = [ query: gql` query Query { modelFiles { - id - name - filename - filePath - ctime - size - status - description - serializer - modelFormat - modelType - errorMessages - type + id + name + filename + filePath + ctime + size + status + description + serializer + modelFormat + modelType + errorMessages + type } - }`, + } + `, }, result: { data: { - "modelFiles": [ - { - "__typename": "ModelFile", - "id": "6424f3d19dc349ad75842588", - "name": "tensorflow_tabular_compas_sequential.sav", - "filename": "tensorflow_tabular_compas_sequential.sav", - "filePath": "/home/user/aiverify/uploads/model/tensorflow_tabular_compas_sequential.sav", - "ctime": "2023-03-30T02:28:33.890Z", - "size": null, - "status": "Valid", - "description": null, - "serializer": "tensorflow", - "modelFormat": "tensorflow", - "modelType": "Classification", - "errorMessages": null, - "type": "Folder" - }, - { - "__typename": "ModelFile", - "id": "642a564af921a817b2192b8c", - "name": "joblib_scikit_ada_compas.sav", - "filename": "joblib_scikit_ada_compas.sav", - "filePath": "/home/user/aiverify/uploads/model/joblib_scikit_ada_compas.sav", - "ctime": "2023-04-03T04:30:02.414Z", - "size": "561.89 KB", - "status": "Valid", - "description": "", - "serializer": "joblib", - "modelFormat": "sklearn", - "modelType": "Classification", - "errorMessages": "", - "type": "File" - } - ] - } - } - } + modelFiles: [ + { + __typename: 'ModelFile', + id: '6424f3d19dc349ad75842588', + name: 'tensorflow_tabular_compas_sequential.sav', + filename: 'tensorflow_tabular_compas_sequential.sav', + filePath: + '/home/user/aiverify/uploads/model/tensorflow_tabular_compas_sequential.sav', + ctime: '2023-03-30T02:28:33.890Z', + size: null, + status: 'Valid', + description: null, + serializer: 'tensorflow', + modelFormat: 'tensorflow', + modelType: 'Classification', + errorMessages: null, + type: 'Folder', + }, + { + __typename: 'ModelFile', + id: '642a564af921a817b2192b8c', + name: 'joblib_scikit_ada_compas.sav', + filename: 'joblib_scikit_ada_compas.sav', + filePath: + '/home/user/aiverify/uploads/model/joblib_scikit_ada_compas.sav', + ctime: '2023-04-03T04:30:02.414Z', + size: '561.89 KB', + status: 'Valid', + description: '', + serializer: 'joblib', + modelFormat: 'sklearn', + modelType: 'Classification', + errorMessages: '', + type: 'File', + }, + ], + }, + }, + }, ]; - describe('Model List', () => { - it('should render the filters', () => { render( - + ); - expect(screen.getByTestId("model-list-filters")); - }) - -}) \ No newline at end of file + expect(screen.getByTestId('model-list-filters')); + }); +}); diff --git a/ai-verify-portal/__tests__/modules/models.test.tsx b/ai-verify-portal/__tests__/modules/models.test.tsx index 7b0ff180f..776a021d5 100644 --- a/ai-verify-portal/__tests__/modules/models.test.tsx +++ b/ai-verify-portal/__tests__/modules/models.test.tsx @@ -6,46 +6,62 @@ import { silentConsoleLogs } from '__mocks__/mockGlobals'; jest.mock('next/router', () => ({ useRouter: jest.fn(), -})) +})); -jest.mock('src/modules/assets/modelList', () => () => 'ModelListComponent') +jest.mock('src/modules/assets/modelList', () => () => 'ModelListComponent'); describe('Models', () => { - beforeAll(() => { silentConsoleLogs(); }); it('should render the back button', () => { - render(); - expect(screen.getByTestId("models-back-button")); - }) + render( + + + + ); + expect(screen.getByTestId('models-back-button')); + }); it('should navigate to assets page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("models-back-button"); + render( + + + + ); + const button = screen.getByTestId('models-back-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets') - }) + expect(router.push).toHaveBeenCalledWith('/assets'); + }); it('should render the add new model button', () => { - render(); - expect(screen.getByTestId("add-new-models-button")).toHaveTextContent("New Model +"); - }) + render( + + + + ); + expect(screen.getByTestId('add-new-models-button')).toHaveTextContent( + 'New Model +' + ); + }); it('should navigate to add new model page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("add-new-models-button"); + render( + + + + ); + const button = screen.getByTestId('add-new-models-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets/newModel') - }) - -}) \ No newline at end of file + expect(router.push).toHaveBeenCalledWith('/assets/newModel'); + }); +}); diff --git a/ai-verify-portal/__tests__/modules/newDataset.test.tsx b/ai-verify-portal/__tests__/modules/newDataset.test.tsx index 79d5ca41a..c1b7ec403 100644 --- a/ai-verify-portal/__tests__/modules/newDataset.test.tsx +++ b/ai-verify-portal/__tests__/modules/newDataset.test.tsx @@ -4,14 +4,13 @@ import NewDatasetModule from 'src/modules/assets/newDataset'; import { gql } from '@apollo/client'; import { act } from 'react-dom/test-utils'; import axios from 'axios'; -import MockAdapter from "axios-mock-adapter"; +import MockAdapter from 'axios-mock-adapter'; import { MockProviders } from '__mocks__/mockProviders'; import { silentConsoleLogs } from '__mocks__/mockGlobals'; jest.mock('next/router', () => ({ useRouter: jest.fn(), -})) - +})); const mocks = [ { @@ -19,95 +18,93 @@ const mocks = [ query: gql` subscription validateDatasetUpdated { validateDatasetStatusUpdated { - _id - dataColumns { - name - datatype - label - } - numRows - numCols - status - serializer - dataFormat - errorMessages + _id + dataColumns { + name + datatype + label + } + numRows + numCols + status + serializer + dataFormat + errorMessages } } `, }, result: { data: { - "validateDatasetStatusUpdated": { - "_id": "646da7040da1d597e3ca9b18", - "dataColumns": [ - { - "name": "age_cat_cat", - "datatype": "int64", - "label": "age_cat_cat", - "__typename": "DatasetColumn" - }, - { - "name": "sex_code", - "datatype": "int64", - "label": "sex_code", - "__typename": "DatasetColumn" - }, - { - "name": "race_code", - "datatype": "int64", - "label": "race_code", - "__typename": "DatasetColumn" - }, - { - "name": "priors_count", - "datatype": "int64", - "label": "priors_count", - "__typename": "DatasetColumn" - }, - { - "name": "c_charge_degree_cat", - "datatype": "int64", - "label": "c_charge_degree_cat", - "__typename": "DatasetColumn" - }, - { - "name": "two_year_recid", - "datatype": "int64", - "label": "two_year_recid", - "__typename": "DatasetColumn" - } - ], - "numRows": 1235, - "numCols": 6, - "status": "Valid", - "serializer": "joblib", - "dataFormat": "pandas", - "errorMessages": "", - "__typename": "DatasetStatusUpdate" - } - - - } - } - } + validateDatasetStatusUpdated: { + _id: '646da7040da1d597e3ca9b18', + dataColumns: [ + { + name: 'age_cat_cat', + datatype: 'int64', + label: 'age_cat_cat', + __typename: 'DatasetColumn', + }, + { + name: 'sex_code', + datatype: 'int64', + label: 'sex_code', + __typename: 'DatasetColumn', + }, + { + name: 'race_code', + datatype: 'int64', + label: 'race_code', + __typename: 'DatasetColumn', + }, + { + name: 'priors_count', + datatype: 'int64', + label: 'priors_count', + __typename: 'DatasetColumn', + }, + { + name: 'c_charge_degree_cat', + datatype: 'int64', + label: 'c_charge_degree_cat', + __typename: 'DatasetColumn', + }, + { + name: 'two_year_recid', + datatype: 'int64', + label: 'two_year_recid', + __typename: 'DatasetColumn', + }, + ], + numRows: 1235, + numCols: 6, + status: 'Valid', + serializer: 'joblib', + dataFormat: 'pandas', + errorMessages: '', + __typename: 'DatasetStatusUpdate', + }, + }, + }, + }, ]; const uploadResponse = [ { - "filename": "file1.png", - "name": "file1.png", - "type": "File", - "filePath": "/home/uploads/file1.png", - "ctime": "2023-05-24T05:56:20.830Z", - "description": "", - "status": "Pending", - "size": "502.71 KB", - "serializer": "", - "dataFormat": "", - "errorMessages": "", - "_id": "646da7040da1d597e3ca9b18", - "dataColumns": [] - } + filename: 'file1.png', + name: 'file1.png', + type: 'File', + filePath: '/home/uploads/file1.png', + ctime: '2023-05-24T05:56:20.830Z', + description: '', + status: 'Pending', + size: '502.71 KB', + serializer: '', + dataFormat: '', + errorMessages: '', + _id: '646da7040da1d597e3ca9b18', + dataColumns: [], + }, ]; const file1 = new File(['SomeMockFileContent1'], 'file1.png'); @@ -123,7 +120,6 @@ const file10 = new File(['SomeMockFileContent1'], 'file10.png'); const file11 = new File(['SomeMockFileContent1'], 'file11.png'); describe('New Dataset', () => { - let amock: any; beforeAll(() => { @@ -137,100 +133,144 @@ describe('New Dataset', () => { }); describe('Initial Render', () => { - it('should render the back button', () => { - render(); - expect(screen.getByTestId("newdataset-back-button")); - }) - + render( + + + + ); + expect(screen.getByTestId('newdataset-back-button')); + }); + it('should navigate to dataset list page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("newdataset-back-button"); + render( + + + + ); + const button = screen.getByTestId('newdataset-back-button'); act(() => { button.click(); }); - expect(router.push).toHaveBeenCalledWith('/assets/datasets') - }) - + expect(router.push).toHaveBeenCalledWith('/assets/datasets'); + }); + it('should render the upload screen', () => { - render(); + render( + + + + ); expect(screen.getByText('Add New Datasets')).toBeDefined(); expect(screen.getByText('Before uploading...')).toBeDefined(); - expect(screen.getByTestId("upload-file-dropbox")).toHaveTextContent("Drag & Drop or Click to Browse"); - expect(screen.getByTestId("upload-folder-button")).toHaveTextContent("Upload Folder"); + expect(screen.getByTestId('upload-file-dropbox')).toHaveTextContent( + 'Drag & Drop or Click to Browse' + ); + expect(screen.getByTestId('upload-folder-button')).toHaveTextContent( + 'Upload Folder' + ); expect(screen.getByText('Selected Files')).toBeDefined(); - expect(screen.getByTestId("upload-datasets-button")).toHaveAttribute('aria-disabled', 'true'); - }) - - }) + expect(screen.getByTestId('upload-datasets-button')).toHaveAttribute( + 'aria-disabled', + 'true' + ); + }); + }); describe('File Picking', () => { - - it('should enable the upload button when files are picked', async() => { - const { container } = render(); - const button = screen.getByTestId("upload-datasets-button"); + it('should enable the upload button when files are picked', async () => { + const { container } = render( + + + + ); + const button = screen.getByTestId('upload-datasets-button'); expect(button).toHaveAttribute('aria-disabled', 'true'); const file1 = new File(['hello'], 'file1.png'); - Object.defineProperty(file1, 'webkitRelativePath', { value: "" }) - const input = container.querySelector(`input[name="file-dropbox"]`) - Object.defineProperty(input, 'files', {value: [file1] }) + Object.defineProperty(file1, 'webkitRelativePath', { value: '' }); + const input = container.querySelector(`input[name="file-dropbox"]`); + Object.defineProperty(input, 'files', { value: [file1] }); act(() => { - if (input){ - fireEvent.change(input) + if (input) { + fireEvent.change(input); } }); await Promise.resolve(); expect(screen.queryByText('file1.png')).toBeTruthy(); expect(button).not.toHaveAttribute('aria-disabled', 'true'); - }) + }); - it('should show error message when more than 10 files are selected', async() => { - const { container } = render(); - const button = screen.getByTestId("upload-datasets-button"); + it('should show error message when more than 10 files are selected', async () => { + const { container } = render( + + + + ); + const button = screen.getByTestId('upload-datasets-button'); expect(button).toHaveAttribute('aria-disabled', 'true'); - const input = container.querySelector(`input[name="file-dropbox"]`) + const input = container.querySelector(`input[name="file-dropbox"]`); Object.defineProperty(input, 'files', { - value: [file1, file2, file3, file4, file5, file6, file7, file8, file9, file10, file11] - }) + value: [ + file1, + file2, + file3, + file4, + file5, + file6, + file7, + file8, + file9, + file10, + file11, + ], + }); act(() => { - if (input){ - fireEvent.change(input) + if (input) { + fireEvent.change(input); } }); await Promise.resolve(); expect(button).toHaveAttribute('aria-disabled', 'true'); expect(screen.queryByText('file1.png')).toBeNull(); expect(screen.getByText('File selection error')).toBeDefined(); - expect(screen.getByText('Maximum 10 files to be uploaded at once. Please select less files.')).toBeDefined(); - }) + expect( + screen.getByText( + 'Maximum 10 files to be uploaded at once. Please select less files.' + ) + ).toBeDefined(); + }); - it('should show error message when more than 10 files in total are selected', async() => { - const { container } = render(); - const button = screen.getByTestId("upload-datasets-button"); + it('should show error message when more than 10 files in total are selected', async () => { + const { container } = render( + + + + ); + const button = screen.getByTestId('upload-datasets-button'); expect(button).toHaveAttribute('aria-disabled', 'true'); - const input = container.querySelector(`input[name="file-dropbox"]`) + const input = container.querySelector(`input[name="file-dropbox"]`); Object.defineProperty(input, 'files', { value: [file1, file2, file3, file4, file5, file6, file7, file8, file9], writable: true, - }) + }); act(() => { - if (input){ - fireEvent.change(input) + if (input) { + fireEvent.change(input); } }); await Promise.resolve(); expect(button).not.toHaveAttribute('aria-disabled', 'true'); - const input2 = container.querySelector(`input[name="file-dropbox"]`) + const input2 = container.querySelector(`input[name="file-dropbox"]`); Object.defineProperty(input2, 'files', { - value: [file10, file11] - }) + value: [file10, file11], + }); act(() => { - if (input2){ - fireEvent.change(input2) + if (input2) { + fireEvent.change(input2); } }); expect(button).not.toHaveAttribute('aria-disabled', 'true'); @@ -238,65 +278,85 @@ describe('New Dataset', () => { button.click(); }); expect(screen.getByText('File selection error')).toBeDefined(); - expect(screen.getByText('Maximum 10 files to be uploaded at once. Please select less files.')).toBeDefined(); - }) + expect( + screen.getByText( + 'Maximum 10 files to be uploaded at once. Please select less files.' + ) + ).toBeDefined(); + }); - it('should show error message if any file selected is more than 4GB', async() => { - const { container } = render(); - const button = screen.getByTestId("upload-datasets-button"); + it('should show error message if any file selected is more than 4GB', async () => { + const { container } = render( + + + + ); + const button = screen.getByTestId('upload-datasets-button'); expect(button).toHaveAttribute('aria-disabled', 'true'); const file = new File([''], 'file.png'); - Object.defineProperty(file, 'size', { value: 4000000001 }) - const input = container.querySelector(`input[name="file-dropbox"]`) + Object.defineProperty(file, 'size', { value: 4000000001 }); + const input = container.querySelector(`input[name="file-dropbox"]`); Object.defineProperty(input, 'files', { - value: [file1, file, file3] - }) + value: [file1, file, file3], + }); act(() => { - if (input){ - fireEvent.change(input) + if (input) { + fireEvent.change(input); } }); await Promise.resolve(); expect(button).toHaveAttribute('aria-disabled', 'true'); expect(screen.queryByText('file.png')).toBeNull(); expect(screen.getByText('File selection error')).toBeDefined(); - expect(screen.getByText('Maximum file size is 4GB. Please upload smaller files.')).toBeDefined(); - }) + expect( + screen.getByText( + 'Maximum file size is 4GB. Please upload smaller files.' + ) + ).toBeDefined(); + }); - it('should unpick file', async() => { - const { container } = render(); - Object.defineProperty(file1, 'webkitRelativePath', { value: "" }) - const input = container.querySelector(`input[name="file-dropbox"]`) - Object.defineProperty(input, 'files', {value: [file1] }) + it('should unpick file', async () => { + const { container } = render( + + + + ); + Object.defineProperty(file1, 'webkitRelativePath', { value: '' }); + const input = container.querySelector(`input[name="file-dropbox"]`); + Object.defineProperty(input, 'files', { value: [file1] }); act(() => { - if (input){ - fireEvent.change(input) + if (input) { + fireEvent.change(input); } }); await Promise.resolve(); expect(screen.queryByText('file1.png')).toBeTruthy(); act(() => { - screen.getByTestId("unpick-file").click(); + screen.getByTestId('unpick-file').click(); }); expect(screen.queryByText('file1.png')).toBeNull(); - }) + }); - it('should display cancel button when files are being uploaded', async() => { - const { container } = render(); - const button = screen.getByTestId("upload-datasets-button"); + it('should display cancel button when files are being uploaded', async () => { + const { container } = render( + + + + ); + const button = screen.getByTestId('upload-datasets-button'); expect(button).toHaveAttribute('aria-disabled', 'true'); - Object.defineProperty(file1, 'webkitRelativePath', { value: "" }) - const input = container.querySelector(`input[name="file-dropbox"]`) - Object.defineProperty(input, 'files', {value: [file1] }) + Object.defineProperty(file1, 'webkitRelativePath', { value: '' }); + const input = container.querySelector(`input[name="file-dropbox"]`); + Object.defineProperty(input, 'files', { value: [file1] }); act(() => { - if (input){ - fireEvent.change(input) + if (input) { + fireEvent.change(input); } }); await Promise.resolve(); expect(screen.queryByText('file1.png')).toBeTruthy(); expect(button).not.toHaveAttribute('aria-disabled', 'true'); - const uploads = [{data: [file1]}]; + const uploads = [{ data: [file1] }]; act(() => { amock.onPost(`/api/upload/data`).reply(200, uploads); button.click(); @@ -305,24 +365,25 @@ describe('New Dataset', () => { await waitFor(() => { cancelButton = screen.getByTestId('cancel-button'); expect(cancelButton).toBeDefined(); - }) - - }) - - }) + }); + }); + }); describe('File Validation', () => { - - it('should load validation components upon successful upload', async() => { - const { container } = render(); - const button = screen.getByTestId("upload-datasets-button"); + it('should load validation components upon successful upload', async () => { + const { container } = render( + + + + ); + const button = screen.getByTestId('upload-datasets-button'); expect(button).toHaveAttribute('aria-disabled', 'true'); - Object.defineProperty(file1, 'webkitRelativePath', { value: "" }) - const input = container.querySelector(`input[name="file-dropbox"]`) - Object.defineProperty(input, 'files', {value: [file1] }) + Object.defineProperty(file1, 'webkitRelativePath', { value: '' }); + const input = container.querySelector(`input[name="file-dropbox"]`); + Object.defineProperty(input, 'files', { value: [file1] }); act(() => { - if (input){ - fireEvent.change(input) + if (input) { + fireEvent.change(input); } }); await Promise.resolve(); @@ -338,11 +399,8 @@ describe('New Dataset', () => { expect(screen.queryByText('Status:')).toBeTruthy(); expect(screen.queryByText('Date Uploaded:')).toBeTruthy(); expect(screen.queryByText('Size:')).toBeTruthy(); - expect(screen.getByTestId("cancel-validation-button")).toBeDefined(); - }) - }) - - }) - - -}) \ No newline at end of file + expect(screen.getByTestId('cancel-validation-button')).toBeDefined(); + }); + }); + }); +}); diff --git a/ai-verify-portal/__tests__/modules/newModel.test.tsx b/ai-verify-portal/__tests__/modules/newModel.test.tsx index e84a5d9e3..a35c0b42f 100644 --- a/ai-verify-portal/__tests__/modules/newModel.test.tsx +++ b/ai-verify-portal/__tests__/modules/newModel.test.tsx @@ -7,72 +7,108 @@ import { silentConsoleLogs } from '__mocks__/mockGlobals'; jest.mock('next/router', () => ({ useRouter: jest.fn(), -})) +})); describe('New Model', () => { - beforeAll(() => { silentConsoleLogs(); }); it('should render the back button', () => { - render(); - expect(screen.getByTestId("newmodel-back-button")); - }) + render( + + + + ); + expect(screen.getByTestId('newmodel-back-button')); + }); it('should navigate to models page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("newmodel-back-button"); + render( + + + + ); + const button = screen.getByTestId('newmodel-back-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets/models') - }) + expect(router.push).toHaveBeenCalledWith('/assets/models'); + }); it('should render the upload/ api option buttons', () => { - render(); - expect(screen.getByTestId("new-model-option")).toHaveTextContent("Upload AI Model"); - expect(screen.getByTestId("new-pipeline-option")).toHaveTextContent("Upload Pipeline"); - }) + render( + + + + ); + expect(screen.getByTestId('new-model-option')).toHaveTextContent( + 'Upload AI Model' + ); + expect(screen.getByTestId('new-pipeline-option')).toHaveTextContent( + 'Upload Pipeline' + ); + }); it('should enable next button on upload model option selection and navigate to new model upload page on click', () => { - (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - expect(screen.getByTestId("newmodel-next-button")).toHaveTextContent("Next >"); - expect(screen.getByTestId("newmodel-next-button")).toHaveAttribute('aria-disabled', 'true'); - const option = screen.getByTestId("new-model-option"); + render( + + + + ); + expect(screen.getByTestId('newmodel-next-button')).toHaveTextContent( + 'Next >' + ); + expect(screen.getByTestId('newmodel-next-button')).toHaveAttribute( + 'aria-disabled', + 'true' + ); + const option = screen.getByTestId('new-model-option'); act(() => { - option.dispatchEvent(new MouseEvent('click', {bubbles: true})); + option.dispatchEvent(new MouseEvent('click', { bubbles: true })); }); - expect(screen.getByTestId("newmodel-next-button")).not.toHaveAttribute('aria-disabled', 'true'); - const button = screen.getByTestId("newmodel-next-button"); + expect(screen.getByTestId('newmodel-next-button')).not.toHaveAttribute( + 'aria-disabled', + 'true' + ); + const button = screen.getByTestId('newmodel-next-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets/newModelUpload') - }) + expect(router.push).toHaveBeenCalledWith('/assets/newModelUpload'); + }); it('should enable next button on pipeline model option selection and navigate to new model upload page on click', () => { - (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - expect(screen.getByTestId("newmodel-next-button")).toHaveTextContent("Next >"); - expect(screen.getByTestId("newmodel-next-button")).toHaveAttribute('aria-disabled', 'true'); - const option = screen.getByTestId("new-pipeline-option"); + render( + + + + ); + expect(screen.getByTestId('newmodel-next-button')).toHaveTextContent( + 'Next >' + ); + expect(screen.getByTestId('newmodel-next-button')).toHaveAttribute( + 'aria-disabled', + 'true' + ); + const option = screen.getByTestId('new-pipeline-option'); act(() => { - option.dispatchEvent(new MouseEvent('click', {bubbles: true})); + option.dispatchEvent(new MouseEvent('click', { bubbles: true })); }); - expect(screen.getByTestId("newmodel-next-button")).not.toHaveAttribute('aria-disabled', 'true'); - const button = screen.getByTestId("newmodel-next-button"); + expect(screen.getByTestId('newmodel-next-button')).not.toHaveAttribute( + 'aria-disabled', + 'true' + ); + const button = screen.getByTestId('newmodel-next-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets/newPipelineUpload') - }) - -}) \ No newline at end of file + expect(router.push).toHaveBeenCalledWith('/assets/newPipelineUpload'); + }); +}); diff --git a/ai-verify-portal/__tests__/modules/newModelUpload.test.tsx b/ai-verify-portal/__tests__/modules/newModelUpload.test.tsx index 83ea34eab..5def2dec6 100644 --- a/ai-verify-portal/__tests__/modules/newModelUpload.test.tsx +++ b/ai-verify-portal/__tests__/modules/newModelUpload.test.tsx @@ -6,64 +6,83 @@ import { MockProviders } from '__mocks__/mockProviders'; jest.mock('next/router', () => ({ useRouter: jest.fn(), -})) +})); const mocks = [ { request: { query: gql` - subscription validateModelUpdated { - validateModelStatusUpdated { - _id - status - serializer - modelFormat - errorMessages - } + subscription validateModelUpdated { + validateModelStatusUpdated { + _id + status + serializer + modelFormat + errorMessages } + } `, }, result: { data: { - "validateModelStatusUpdated": { - "_id": "642a564af921a817b2192b8c", - "status": "Valid", - "serializer": "joblib", - "modelFormat": "sklearn", - "errorMessages": "", - "__typename": "Status" - } - } - } - } + validateModelStatusUpdated: { + _id: '642a564af921a817b2192b8c', + status: 'Valid', + serializer: 'joblib', + modelFormat: 'sklearn', + errorMessages: '', + __typename: 'Status', + }, + }, + }, + }, ]; describe('New Model Upload', () => { - it('should render the back button', () => { - render(); - expect(screen.getByTestId("newmodelupload-back-button")); - }) + render( + + + + ); + expect(screen.getByTestId('newmodelupload-back-button')); + }); it('should navigate to new model page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("newmodelupload-back-button"); + render( + + + + ); + const button = screen.getByTestId('newmodelupload-back-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets/newModel') - }) + expect(router.push).toHaveBeenCalledWith('/assets/newModel'); + }); it('should render the upload screen', () => { - render(); - expect(screen.getByText('Add New AI Model > Upload Model File')).toBeDefined(); + render( + + + + ); + expect( + screen.getByText('Add New AI Model > Upload Model File') + ).toBeDefined(); expect(screen.getByText('Before uploading...')).toBeDefined(); - expect(screen.getByTestId("upload-file-dropbox")).toHaveTextContent("Drag & Drop or Click to Browse"); - expect(screen.getByTestId("upload-folder-button")).toHaveTextContent("Upload Folder"); + expect(screen.getByTestId('upload-file-dropbox')).toHaveTextContent( + 'Drag & Drop or Click to Browse' + ); + expect(screen.getByTestId('upload-folder-button')).toHaveTextContent( + 'Upload Folder' + ); expect(screen.getByText('Selected Files')).toBeDefined(); - expect(screen.getByTestId("upload-models-button")).toHaveAttribute('aria-disabled', 'true'); - }) - -}) \ No newline at end of file + expect(screen.getByTestId('upload-models-button')).toHaveAttribute( + 'aria-disabled', + 'true' + ); + }); +}); diff --git a/ai-verify-portal/__tests__/modules/newPipelineUpload.test.tsx b/ai-verify-portal/__tests__/modules/newPipelineUpload.test.tsx index 66c014aae..a25d0439b 100644 --- a/ai-verify-portal/__tests__/modules/newPipelineUpload.test.tsx +++ b/ai-verify-portal/__tests__/modules/newPipelineUpload.test.tsx @@ -7,68 +7,84 @@ import { silentConsoleLogs } from '__mocks__/mockGlobals'; jest.mock('next/router', () => ({ useRouter: jest.fn(), -})) +})); const mocks = [ { request: { query: gql` - subscription validateModelUpdated { - validateModelStatusUpdated { - _id - status - serializer - modelFormat - errorMessages - } + subscription validateModelUpdated { + validateModelStatusUpdated { + _id + status + serializer + modelFormat + errorMessages } + } `, }, result: { data: { - "validateModelStatusUpdated": { - "_id": "642a564af921a817b2192b8c", - "status": "Valid", - "serializer": "joblib", - "modelFormat": "sklearn", - "errorMessages": "", - "__typename": "Status" - } - } - } - } + validateModelStatusUpdated: { + _id: '642a564af921a817b2192b8c', + status: 'Valid', + serializer: 'joblib', + modelFormat: 'sklearn', + errorMessages: '', + __typename: 'Status', + }, + }, + }, + }, ]; describe('New Pipeline Upload', () => { - beforeAll(() => { silentConsoleLogs(); }); it('should render the back button', () => { - render(); - expect(screen.getByTestId("newpipelineupload-back-button")); - }) - + render( + + + + ); + expect(screen.getByTestId('newpipelineupload-back-button')); + }); + it('should navigate to new model page on button click', () => { (useRouter as jest.Mock).mockReturnValue({ - push: jest.fn(), - }) + push: jest.fn(), + }); const router = useRouter(); - render(); - const button = screen.getByTestId("newpipelineupload-back-button"); + render( + + + + ); + const button = screen.getByTestId('newpipelineupload-back-button'); button.click(); - expect(router.push).toHaveBeenCalledWith('/assets/newModel') - }) + expect(router.push).toHaveBeenCalledWith('/assets/newModel'); + }); it('should render the upload screen', () => { - render(); - expect(screen.getByText('Add New AI Model > Upload Pipeline File')).toBeDefined(); + render( + + + + ); + expect( + screen.getByText('Add New AI Model > Upload Pipeline File') + ).toBeDefined(); expect(screen.getByText('Before uploading...')).toBeDefined(); - expect(screen.getByTestId("upload-file-dropbox")).toHaveTextContent("Drag & Drop or Click to Browse"); + expect(screen.getByTestId('upload-file-dropbox')).toHaveTextContent( + 'Drag & Drop or Click to Browse' + ); expect(screen.getByText('Selected Folders')).toBeDefined(); - expect(screen.getByTestId("upload-models-button")).toHaveAttribute('aria-disabled', 'true'); - }) - - -}) \ No newline at end of file + expect(screen.getByTestId('upload-models-button')).toHaveAttribute( + 'aria-disabled', + 'true' + ); + }); +}); diff --git a/ai-verify-portal/__tests__/modules/plugins.test.tsx b/ai-verify-portal/__tests__/modules/plugins.test.tsx index a34f18c17..407a4bc5a 100644 --- a/ai-verify-portal/__tests__/modules/plugins.test.tsx +++ b/ai-verify-portal/__tests__/modules/plugins.test.tsx @@ -3,81 +3,129 @@ import userEvent from '@testing-library/user-event'; import PluginsModule from 'src/modules/plugins/index'; import * as APIs from 'src/modules/plugins/api/plugins'; import * as algoAPIs from 'src/modules/plugins/api/algorithms'; -import { algoPackageDependencyStatusResponse, emptyListResponse, installPluginResponse, pluginsListResponse } from '__mocks__/plugins'; +import { + algoPackageDependencyStatusResponse, + emptyListResponse, + installPluginResponse, + pluginsListResponse, +} from '__mocks__/plugins'; import { MockProviders } from '__mocks__/mockProviders'; import { silentConsoleLogs } from '__mocks__/mockGlobals'; -jest.mock('src/modules/plugins/api/plugins', () => ({ __esModule: true, ...jest.requireActual('src/modules/plugins/api/plugins')})); -jest.mock('src/modules/plugins/api/algorithms', () => ({ __esModule: true, ...jest.requireActual('src/modules/plugins/api/algorithms')})); +jest.mock('src/modules/plugins/api/plugins', () => ({ + __esModule: true, + ...jest.requireActual('src/modules/plugins/api/plugins'), +})); +jest.mock('src/modules/plugins/api/algorithms', () => ({ + __esModule: true, + ...jest.requireActual('src/modules/plugins/api/algorithms'), +})); describe('Plugins Manager', () => { - const fetchAllPluginsSpy = jest.spyOn(APIs, 'fetchAllPlugins'); const uploadPluginSpy = jest.spyOn(APIs, 'uploadPlugin'); const deletePluginSpy = jest.spyOn(APIs, 'deletePlugin'); - const getPythonPackageDependencyStatusSpy = jest.spyOn(algoAPIs, 'getPythonPackageDependencyStatus'); + const getPythonPackageDependencyStatusSpy = jest.spyOn( + algoAPIs, + 'getPythonPackageDependencyStatus' + ); beforeAll(() => { silentConsoleLogs(); - }) + }); afterEach(() => { fetchAllPluginsSpy.mockReset(); uploadPluginSpy.mockReset(); deletePluginSpy.mockReset(); getPythonPackageDependencyStatusSpy.mockReset(); - }) + }); describe('Initial Render', () => { - it('should fetch all plugins on render and sort by Installed Date (asc)', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - const { container } = render(); + const { container } = render( + + + + ); await screen.findByText(/^Partial Dependence Plot$/i); - const pluginsListNodes = container.querySelectorAll('.pluginList-card'); + const pluginsListNodes = + container.querySelectorAll('.pluginList-card'); const pluginsListElements = Array.from(pluginsListNodes); - const pluginNamesAndInstalledDate:[string, string][] = pluginsListElements.map(el => { - const header = el.querySelector('h3') as Element; - const dateDiv = el.querySelector('.listItem_installedDate') as Element; - return [header.textContent as string, dateDiv.textContent as string]; - }); + const pluginNamesAndInstalledDate: [string, string][] = + pluginsListElements.map((el) => { + const header = el.querySelector('h3') as Element; + const dateDiv = el.querySelector( + '.listItem_installedDate' + ) as Element; + return [header.textContent as string, dateDiv.textContent as string]; + }); expect(fetchAllPluginsSpy).toBeCalledTimes(1); expect(pluginsListElements.length).toEqual(6); - expect(pluginNamesAndInstalledDate[0][0]).toEqual('Partial Dependence Plot'); - expect(pluginNamesAndInstalledDate[1][0]).toEqual('AI Verify Stock Decorators'); - expect(pluginNamesAndInstalledDate[2][0]).toEqual('Widgets for Fairness Metrics Toolbox'); - expect(pluginNamesAndInstalledDate[3][0]).toEqual('AI Verify Process Checklist'); - expect(pluginNamesAndInstalledDate[4][0]).toEqual('fairness metrics toolbox for classification'); - expect(pluginNamesAndInstalledDate[5][0]).toEqual('Widgets for SHAP toolbox'); + expect(pluginNamesAndInstalledDate[0][0]).toEqual( + 'Partial Dependence Plot' + ); + expect(pluginNamesAndInstalledDate[1][0]).toEqual( + 'AI Verify Stock Decorators' + ); + expect(pluginNamesAndInstalledDate[2][0]).toEqual( + 'Widgets for Fairness Metrics Toolbox' + ); + expect(pluginNamesAndInstalledDate[3][0]).toEqual( + 'AI Verify Process Checklist' + ); + expect(pluginNamesAndInstalledDate[4][0]).toEqual( + 'fairness metrics toolbox for classification' + ); + expect(pluginNamesAndInstalledDate[5][0]).toEqual( + 'Widgets for SHAP toolbox' + ); await waitFor(() => { expect(container.querySelector('.listItem__selected')).toBeTruthy(); - }) - const selectedPlugin = container.querySelector('.listItem__selected')?.querySelector('h3') as Element; + }); + const selectedPlugin = container + .querySelector('.listItem__selected') + ?.querySelector('h3') as Element; expect(selectedPlugin.textContent).toBe('Partial Dependence Plot'); }); it('should display details of first plugin on plugins list', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - const { container } = render(); + const { container } = render( + + + + ); await screen.findByTestId('pluginDetailsPanel'); - const pluginDetailsHeader = container.querySelector('#detailsHead > h2') as HTMLHeadingElement; - expect(pluginDetailsHeader.textContent).toEqual('Partial Dependence Plot'); + const pluginDetailsHeader = container.querySelector( + '#detailsHead > h2' + ) as HTMLHeadingElement; + expect(pluginDetailsHeader.textContent).toEqual( + 'Partial Dependence Plot' + ); }); - + it('should display "no plugins found" when list is empty', async () => { fetchAllPluginsSpy.mockResolvedValue(emptyListResponse); - render(); + render( + + + + ); await screen.findByText(/No plugins found/i); expect(screen.queryByText('No plugins found')).toBeTruthy(); }); - - }) + }); describe('Quick Filters, Sort and Search', () => { - it('should render quick filters, search and sort elements', async () => { fetchAllPluginsSpy.mockResolvedValue(emptyListResponse); - render(); + render( + + + + ); expect(screen.getByText('Plugin Manager')).toBeTruthy(); await screen.findByText(/Sort by/i); expect(screen.queryByText('Sort by')).toBeTruthy(); @@ -85,10 +133,14 @@ describe('Plugins Manager', () => { expect(screen.queryByPlaceholderText('Search plugins')).toBeTruthy(); expect(screen.queryByText('Install Plugin')).toBeTruthy(); }); - + it('should show sort by menu when menu clicked', async () => { fetchAllPluginsSpy.mockResolvedValue(emptyListResponse); - const { container } = render(); + const { container } = render( + + + + ); await screen.findByText(/Sort by/i); const sortMenu = container.querySelector('#pluginsSortMenu') as Element; expect(sortMenu).toBeTruthy(); @@ -104,149 +156,270 @@ describe('Plugins Manager', () => { it('should sort by Installed Date (desc) ', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - const { container } = render(); + const { container } = render( + + + + ); await screen.findByText(/^Partial Dependence Plot$/i); const sortMenu = container.querySelector('#pluginsSortMenu') as Element; await userEvent.click(sortMenu); const sortOption = screen.queryByText('Installed Date (desc)') as Element; await userEvent.click(sortOption); - const pluginsListNodes = container.querySelectorAll('.pluginList-card'); + const pluginsListNodes = + container.querySelectorAll('.pluginList-card'); const pluginsListElements = Array.from(pluginsListNodes); - const pluginNamesAndInstalledDate:[string, string][] = pluginsListElements.map(el => { - const header = el.querySelector('h3') as Element; - const dateDiv = el.querySelector('.listItem_installedDate') as Element; - return [header.textContent as string, dateDiv.textContent as string]; - }); - expect(pluginNamesAndInstalledDate[0][0]).toEqual('Widgets for SHAP toolbox'); - expect(pluginNamesAndInstalledDate[1][0]).toEqual('fairness metrics toolbox for classification') - expect(pluginNamesAndInstalledDate[2][0]).toEqual('AI Verify Process Checklist') - expect(pluginNamesAndInstalledDate[3][0]).toEqual('Widgets for Fairness Metrics Toolbox') - expect(pluginNamesAndInstalledDate[4][0]).toEqual('AI Verify Stock Decorators') - expect(pluginNamesAndInstalledDate[5][0]).toEqual('Partial Dependence Plot') + const pluginNamesAndInstalledDate: [string, string][] = + pluginsListElements.map((el) => { + const header = el.querySelector('h3') as Element; + const dateDiv = el.querySelector( + '.listItem_installedDate' + ) as Element; + return [header.textContent as string, dateDiv.textContent as string]; + }); + expect(pluginNamesAndInstalledDate[0][0]).toEqual( + 'Widgets for SHAP toolbox' + ); + expect(pluginNamesAndInstalledDate[1][0]).toEqual( + 'fairness metrics toolbox for classification' + ); + expect(pluginNamesAndInstalledDate[2][0]).toEqual( + 'AI Verify Process Checklist' + ); + expect(pluginNamesAndInstalledDate[3][0]).toEqual( + 'Widgets for Fairness Metrics Toolbox' + ); + expect(pluginNamesAndInstalledDate[4][0]).toEqual( + 'AI Verify Stock Decorators' + ); + expect(pluginNamesAndInstalledDate[5][0]).toEqual( + 'Partial Dependence Plot' + ); }); it('should sort by Plugin Name (asc) ', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - const { container } = render(); + const { container } = render( + + + + ); await screen.findByText(/^Partial Dependence Plot$/i); const sortMenu = container.querySelector('#pluginsSortMenu') as Element; await userEvent.click(sortMenu); const sortOption = screen.queryByText('Plugin Name (asc)') as Element; await userEvent.click(sortOption); - const pluginsListNodes = container.querySelectorAll('.pluginList-card'); + const pluginsListNodes = + container.querySelectorAll('.pluginList-card'); const pluginsListElements = Array.from(pluginsListNodes); - const pluginNamesAndInstalledDate:[string, string][] = pluginsListElements.map(el => { - const header = el.querySelector('h3') as Element; - const dateDiv = el.querySelector('.listItem_installedDate') as Element; - return [header.textContent as string, dateDiv.textContent as string]; - }); - expect(pluginNamesAndInstalledDate[0][0]).toEqual('AI Verify Process Checklist') - expect(pluginNamesAndInstalledDate[1][0]).toEqual('AI Verify Stock Decorators') - expect(pluginNamesAndInstalledDate[2][0]).toEqual('fairness metrics toolbox for classification') - expect(pluginNamesAndInstalledDate[3][0]).toEqual('Partial Dependence Plot') - expect(pluginNamesAndInstalledDate[4][0]).toEqual('Widgets for Fairness Metrics Toolbox') - expect(pluginNamesAndInstalledDate[5][0]).toEqual('Widgets for SHAP toolbox'); + const pluginNamesAndInstalledDate: [string, string][] = + pluginsListElements.map((el) => { + const header = el.querySelector('h3') as Element; + const dateDiv = el.querySelector( + '.listItem_installedDate' + ) as Element; + return [header.textContent as string, dateDiv.textContent as string]; + }); + expect(pluginNamesAndInstalledDate[0][0]).toEqual( + 'AI Verify Process Checklist' + ); + expect(pluginNamesAndInstalledDate[1][0]).toEqual( + 'AI Verify Stock Decorators' + ); + expect(pluginNamesAndInstalledDate[2][0]).toEqual( + 'fairness metrics toolbox for classification' + ); + expect(pluginNamesAndInstalledDate[3][0]).toEqual( + 'Partial Dependence Plot' + ); + expect(pluginNamesAndInstalledDate[4][0]).toEqual( + 'Widgets for Fairness Metrics Toolbox' + ); + expect(pluginNamesAndInstalledDate[5][0]).toEqual( + 'Widgets for SHAP toolbox' + ); }); it('should sort by Plugin Name (desc) ', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - const { container } = render(); + const { container } = render( + + + + ); await screen.findByText(/^Partial Dependence Plot$/i); const sortMenu = container.querySelector('#pluginsSortMenu') as Element; await userEvent.click(sortMenu); const sortOption = screen.queryByText('Plugin Name (desc)') as Element; await userEvent.click(sortOption); - const pluginsListNodes = container.querySelectorAll('.pluginList-card'); + const pluginsListNodes = + container.querySelectorAll('.pluginList-card'); const pluginsListElements = Array.from(pluginsListNodes); - const pluginNamesAndInstalledDate:[string, string][] = pluginsListElements.map(el => { - const header = el.querySelector('h3') as Element; - const dateDiv = el.querySelector('.listItem_installedDate') as Element; - return [header.textContent as string, dateDiv.textContent as string]; - }); - expect(pluginNamesAndInstalledDate[0][0]).toEqual('Widgets for SHAP toolbox'); - expect(pluginNamesAndInstalledDate[1][0]).toEqual('Widgets for Fairness Metrics Toolbox'); - expect(pluginNamesAndInstalledDate[2][0]).toEqual('Partial Dependence Plot'); - expect(pluginNamesAndInstalledDate[3][0]).toEqual('fairness metrics toolbox for classification'); - expect(pluginNamesAndInstalledDate[4][0]).toEqual('AI Verify Stock Decorators'); - expect(pluginNamesAndInstalledDate[5][0]).toEqual('AI Verify Process Checklist'); + const pluginNamesAndInstalledDate: [string, string][] = + pluginsListElements.map((el) => { + const header = el.querySelector('h3') as Element; + const dateDiv = el.querySelector( + '.listItem_installedDate' + ) as Element; + return [header.textContent as string, dateDiv.textContent as string]; + }); + expect(pluginNamesAndInstalledDate[0][0]).toEqual( + 'Widgets for SHAP toolbox' + ); + expect(pluginNamesAndInstalledDate[1][0]).toEqual( + 'Widgets for Fairness Metrics Toolbox' + ); + expect(pluginNamesAndInstalledDate[2][0]).toEqual( + 'Partial Dependence Plot' + ); + expect(pluginNamesAndInstalledDate[3][0]).toEqual( + 'fairness metrics toolbox for classification' + ); + expect(pluginNamesAndInstalledDate[4][0]).toEqual( + 'AI Verify Stock Decorators' + ); + expect(pluginNamesAndInstalledDate[5][0]).toEqual( + 'AI Verify Process Checklist' + ); }); - + it('should filter plugins by text search - match in plugin name', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - const { container } = render(); - const searchInput = screen.queryByPlaceholderText('Search plugins') as HTMLInputElement; + const { container } = render( + + + + ); + const searchInput = screen.queryByPlaceholderText( + 'Search plugins' + ) as HTMLInputElement; await waitFor(async () => { - expect(await screen.findAllByText(/^Partial Dependence Plot$/i)).toHaveLength(3); - }) - expect(await screen.findAllByText(/^AI Verify Process Checklist$/i)).toHaveLength(1); - const pluginsListNodes = container.querySelectorAll('.pluginList-card'); + expect( + await screen.findAllByText(/^Partial Dependence Plot$/i) + ).toHaveLength(3); + }); + expect( + await screen.findAllByText(/^AI Verify Process Checklist$/i) + ).toHaveLength(1); + const pluginsListNodes = + container.querySelectorAll('.pluginList-card'); expect(pluginsListNodes.length).toEqual(6); await userEvent.type(searchInput, 'veri'); await waitFor(async () => { - expect(await screen.findAllByText(/^AI Verify Process Checklist$/i)).toHaveLength(2); - }) - expect(screen.queryAllByText(/^AI Verify Stock Decorators$/i)).toHaveLength(1); + expect( + await screen.findAllByText(/^AI Verify Process Checklist$/i) + ).toHaveLength(2); + }); + expect( + screen.queryAllByText(/^AI Verify Stock Decorators$/i) + ).toHaveLength(1); expect(screen.queryByText(/^Partial Dependence Plot$/i)).toBeNull(); - const filteredPluginsListNodes = container.querySelectorAll('.pluginList-card'); + const filteredPluginsListNodes = + container.querySelectorAll('.pluginList-card'); expect(filteredPluginsListNodes).toHaveLength(2); - await userEvent.click(screen.queryByTestId('clearSearchInputIcon') as Element); + await userEvent.click( + screen.queryByTestId('clearSearchInputIcon') as Element + ); await waitFor(async () => { - expect(await screen.findAllByText(/^Partial Dependence Plot$/i)).toHaveLength(3); - }) - expect(await screen.findAllByText(/^AI Verify Process Checklist$/i)).toHaveLength(1); - const clearSearchPluginsListNodes = container.querySelectorAll('.pluginList-card'); + expect( + await screen.findAllByText(/^Partial Dependence Plot$/i) + ).toHaveLength(3); + }); + expect( + await screen.findAllByText(/^AI Verify Process Checklist$/i) + ).toHaveLength(1); + const clearSearchPluginsListNodes = + container.querySelectorAll('.pluginList-card'); expect(clearSearchPluginsListNodes.length).toEqual(6); }); it('should filter plugins by text search - match in widget name', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - const { container } = render(); - const searchInput = screen.queryByPlaceholderText('Search plugins') as HTMLInputElement; + const { container } = render( + + + + ); + const searchInput = screen.queryByPlaceholderText( + 'Search plugins' + ) as HTMLInputElement; await waitFor(async () => { - expect(await screen.findAllByText(/^Partial Dependence Plot$/i)).toHaveLength(3); - }) - expect(await screen.findAllByText(/^Widgets For Fairness Metrics Toolbox$/i)).toHaveLength(1); - const pluginsListNodes = container.querySelectorAll('.pluginList-card'); + expect( + await screen.findAllByText(/^Partial Dependence Plot$/i) + ).toHaveLength(3); + }); + expect( + await screen.findAllByText(/^Widgets For Fairness Metrics Toolbox$/i) + ).toHaveLength(1); + const pluginsListNodes = + container.querySelectorAll('.pluginList-card'); expect(pluginsListNodes.length).toEqual(6); await userEvent.type(searchInput, 'false'); await waitFor(async () => { - expect(await screen.findAllByText(/^Widgets For Fairness Metrics Toolbox$/i)).toHaveLength(2); - }) - const filteredPluginsListNodes = container.querySelectorAll('.pluginList-card'); + expect( + await screen.findAllByText(/^Widgets For Fairness Metrics Toolbox$/i) + ).toHaveLength(2); + }); + const filteredPluginsListNodes = + container.querySelectorAll('.pluginList-card'); expect(filteredPluginsListNodes).toHaveLength(1); }); it('should filter plugins by text search - match in plugin description', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - const { container } = render(); - const searchInput = screen.queryByPlaceholderText('Search plugins') as HTMLInputElement; + const { container } = render( + + + + ); + const searchInput = screen.queryByPlaceholderText( + 'Search plugins' + ) as HTMLInputElement; await waitFor(async () => { - expect(await screen.findAllByText(/^Partial Dependence Plot$/i)).toHaveLength(3); - }) - expect(await screen.findAllByText(/^Widgets For Fairness Metrics Toolbox$/i)).toHaveLength(1); - const pluginsListNodes = container.querySelectorAll('.pluginList-card'); + expect( + await screen.findAllByText(/^Partial Dependence Plot$/i) + ).toHaveLength(3); + }); + expect( + await screen.findAllByText(/^Widgets For Fairness Metrics Toolbox$/i) + ).toHaveLength(1); + const pluginsListNodes = + container.querySelectorAll('.pluginList-card'); expect(pluginsListNodes.length).toEqual(6); await userEvent.type(searchInput, '(FMT)'); await waitFor(async () => { - expect(await screen.findAllByText(/^Widgets For Fairness Metrics Toolbox$/i)).toHaveLength(2); - }) - expect(await screen.findAllByText(/^Fairness Metrics Toolbox For Classification$/i)).toHaveLength(1); - const filteredPluginsListNodes = container.querySelectorAll('.pluginList-card'); + expect( + await screen.findAllByText(/^Widgets For Fairness Metrics Toolbox$/i) + ).toHaveLength(2); + }); + expect( + await screen.findAllByText( + /^Fairness Metrics Toolbox For Classification$/i + ) + ).toHaveLength(1); + const filteredPluginsListNodes = + container.querySelectorAll('.pluginList-card'); expect(filteredPluginsListNodes).toHaveLength(2); }); //TODO - assert filter - }) + }); describe('Install Plugin', () => { it('should show install plugin confirmation modal dialog when file is selected', async () => { fetchAllPluginsSpy.mockResolvedValue(emptyListResponse); - render(); + render( + + + + ); const fileName = 'mockplugin.zip'; - const pluginFileInput = await screen.findByTestId('pluginFileInput'); + const pluginFileInput = await screen.findByTestId( + 'pluginFileInput' + ); const mockFile = new File(['---'], fileName, { type: 'application/zip' }); await userEvent.upload(pluginFileInput, mockFile); - expect((pluginFileInput.files as FileList)).toHaveLength(1); + expect(pluginFileInput.files as FileList).toHaveLength(1); expect((pluginFileInput.files as FileList)[0]).toBe(mockFile); await screen.findByText(fileName); expect(screen.queryByText('Install')).toBeTruthy(); @@ -257,31 +430,47 @@ describe('Plugins Manager', () => { it('should install plugin when "Install" button is clicked and show success message', async () => { fetchAllPluginsSpy.mockResolvedValue(emptyListResponse); uploadPluginSpy.mockResolvedValueOnce(installPluginResponse); - render(); + render( + + + + ); const fileName = 'mockplugin.zip'; - const pluginFileInput = await screen.findByTestId('pluginFileInput'); - const mockFile = new File(['---'], fileName, {type: 'application/zip'}); + const pluginFileInput = await screen.findByTestId( + 'pluginFileInput' + ); + const mockFile = new File(['---'], fileName, { type: 'application/zip' }); await userEvent.upload(pluginFileInput, mockFile); const installBtn = await screen.findByText('Install'); await userEvent.click(installBtn); expect(uploadPluginSpy).toHaveBeenCalledWith(mockFile); expect(uploadPluginSpy).toHaveBeenCalledTimes(1); - expect(screen.queryByText(/Plugin was successfully installed/i)).toBeTruthy(); + expect( + screen.queryByText(/Plugin was successfully installed/i) + ).toBeTruthy(); const okBtn = await screen.findByText(/ok/i); await userEvent.click(okBtn); - expect(screen.queryByText(/Plugin was successfully installed/i)).toBeNull(); + expect( + screen.queryByText(/Plugin was successfully installed/i) + ).toBeNull(); await waitFor(() => expect(fetchAllPluginsSpy).toHaveBeenCalledTimes(2)); }); it('should display error modal dialog if selected file mimetype is not "zip" type', async () => { fetchAllPluginsSpy.mockResolvedValue(emptyListResponse); uploadPluginSpy.mockResolvedValueOnce(installPluginResponse); - render(); + render( + + + + ); const fileName = 'mockplugin.zip'; - const pluginFileInput = await screen.findByTestId('pluginFileInput'); - const mockFile = new File(['---'], fileName, {type: 'image/png'}); + const pluginFileInput = await screen.findByTestId( + 'pluginFileInput' + ); + const mockFile = new File(['---'], fileName, { type: 'image/png' }); await userEvent.upload(pluginFileInput, mockFile, { applyAccept: false }); - expect((pluginFileInput.files as FileList)).toHaveLength(1); + expect(pluginFileInput.files as FileList).toHaveLength(1); expect(screen.queryByText(/Unable to install plugin/i)).toBeTruthy(); expect(uploadPluginSpy).not.toHaveBeenCalled(); }); @@ -289,129 +478,149 @@ describe('Plugins Manager', () => { it('should display error modal dialog if selected file filename extension is not ".zip"', async () => { fetchAllPluginsSpy.mockResolvedValue(emptyListResponse); uploadPluginSpy.mockResolvedValueOnce(installPluginResponse); - render(); + render( + + + + ); const fileName = 'mock.png'; - const pluginFileInput = await screen.findByTestId('pluginFileInput'); - const mockFile = new File(['---'], fileName, {type: 'application/zip'}); + const pluginFileInput = await screen.findByTestId( + 'pluginFileInput' + ); + const mockFile = new File(['---'], fileName, { type: 'application/zip' }); await userEvent.upload(pluginFileInput, mockFile, { applyAccept: false }); - expect((pluginFileInput.files as FileList)).toHaveLength(1); + expect(pluginFileInput.files as FileList).toHaveLength(1); expect(screen.queryByText(/Unable to install plugin/i)).toBeTruthy(); expect(uploadPluginSpy).not.toHaveBeenCalled(); }); }); - describe('Plugins List Card and Plugin Details', () => { - it('should display correct information on list card', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - render(); - const aivPChecklistCardHeading = await screen.findByText(/AI Verify Process Checklist/i) as Element; - const aivPChecklistCard = aivPChecklistCardHeading.parentElement as Element; + render( + + + + ); + const aivPChecklistCardHeading = (await screen.findByText( + /AI Verify Process Checklist/i + )) as Element; + const aivPChecklistCard = + aivPChecklistCardHeading.parentElement as Element; expect(aivPChecklistCard).toMatchInlineSnapshot(` -
-

- AI Verify Process Checklist -

+

+ AI Verify Process Checklist +

- Version: -
-
- 0.1.0 +
+ Version: +
+
+ 0.1.0 +
-
-
- Installed on: +
+ Installed on: +
+
+ 27/03/2023, 16:02:58 +
+
+
- 27/03/2023, 16:02:58 +
+ Author: +
+
+ Leong Peck Yoke +
-
+
-
- Author: +
+ widgets: + + 2 +
-
- Leong Peck Yoke +
+ input blocks: + + + 2 +
-
-
-
- widgets: - - 2 - -
-
- input blocks: - - 2 - -
-
-
-
`); }); - + it('should highlight selected plugin and display the plugin details when clicked', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - render(); + render( + + + + ); await waitFor(async () => { - expect(await screen.findAllByText(/^Partial Dependence Plot$/i)).toHaveLength(3); - }) - const listCardHeading = await screen.findByText(/AI Verify Process Checklist/i) as Element; + expect( + await screen.findAllByText(/^Partial Dependence Plot$/i) + ).toHaveLength(3); + }); + const listCardHeading = (await screen.findByText( + /AI Verify Process Checklist/i + )) as Element; const listCard = listCardHeading.parentElement as Element; expect(listCard.classList.contains('listItem__selected')).toBe(false); await userEvent.click(listCard); @@ -420,14 +629,24 @@ describe('Plugins Manager', () => { it('should display tabs and number of components', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - const { container } = render(); + const { container } = render( + + + + ); await waitFor(async () => { - expect(await screen.findAllByText(/^Partial Dependence Plot$/i)).toHaveLength(3); - }) - const listCardHeading = await screen.findByText(/AI Verify Process Checklist/i) as Element; + expect( + await screen.findAllByText(/^Partial Dependence Plot$/i) + ).toHaveLength(3); + }); + const listCardHeading = (await screen.findByText( + /AI Verify Process Checklist/i + )) as Element; const listCard = listCardHeading.parentElement as Element; await userEvent.click(listCard); - const headings = await screen.findAllByText(/^AI Verify Process Checklist$/) as Element[]; + const headings = (await screen.findAllByText( + /^AI Verify Process Checklist$/ + )) as Element[]; expect(headings).toHaveLength(2); const tabBtns = container.querySelectorAll('.tabBtn'); expect(tabBtns).toHaveLength(2); @@ -437,20 +656,40 @@ describe('Plugins Manager', () => { it('should display plugin details under tabs', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - const { container } = render(); + const { container } = render( + + + + ); await waitFor(async () => { - expect(await screen.findAllByText(/^Partial Dependence Plot$/i)).toHaveLength(3); - }) - const listCardHeading = await screen.findByText(/AI Verify Process Checklist/i) as Element; + expect( + await screen.findAllByText(/^Partial Dependence Plot$/i) + ).toHaveLength(3); + }); + const listCardHeading = (await screen.findByText( + /AI Verify Process Checklist/i + )) as Element; const listCard = listCardHeading.parentElement as Element; await userEvent.click(listCard); - await screen.findAllByText(/^AI Verify Process Checklist$/) as Element[]; + (await screen.findAllByText( + /^AI Verify Process Checklist$/ + )) as Element[]; const tabBtns = container.querySelectorAll('.tabBtn'); - expect(screen.queryByText(/^Explainability Process Checklist Answers$/i)).toBeTruthy(); - expect(screen.queryByText(/^Fairness Process Checklist Answers$/i)).toBeTruthy(); - expect(screen.queryByText(/^Explainability Process Checklist 0.1.0$/i)).toBeNull(); - expect(screen.queryByText(/^Fairness Process Checklist 0.1.0$/i)).toBeNull(); - const wDetailsHeading = screen.queryByText(/^Explainability Process Checklist Answers$/i) as Element; + expect( + screen.queryByText(/^Explainability Process Checklist Answers$/i) + ).toBeTruthy(); + expect( + screen.queryByText(/^Fairness Process Checklist Answers$/i) + ).toBeTruthy(); + expect( + screen.queryByText(/^Explainability Process Checklist 0.1.0$/i) + ).toBeNull(); + expect( + screen.queryByText(/^Fairness Process Checklist 0.1.0$/i) + ).toBeNull(); + const wDetailsHeading = screen.queryByText( + /^Explainability Process Checklist Answers$/i + ) as Element; const widgetDetailsCard = wDetailsHeading.parentElement; expect(widgetDetailsCard).toMatchInlineSnapshot(`
{ `); await userEvent.click(tabBtns[1]); - expect(screen.queryByText(/^Explainability Process Checklist Answers$/i)).toBeNull(); - expect(screen.queryByText(/^Fairness Process Checklist Answers$/i)).toBeNull(); - expect(screen.queryByText(/^Explainability Process Checklist 0.1.0$/i)).toBeTruthy(); - expect(screen.queryByText(/^Fairness Process Checklist 0.1.0$/i)).toBeTruthy(); - const ibDetailsHeading = screen.queryByText(/^Explainability Process Checklist 0.1.0$/i) as Element; + expect( + screen.queryByText(/^Explainability Process Checklist Answers$/i) + ).toBeNull(); + expect( + screen.queryByText(/^Fairness Process Checklist Answers$/i) + ).toBeNull(); + expect( + screen.queryByText(/^Explainability Process Checklist 0.1.0$/i) + ).toBeTruthy(); + expect( + screen.queryByText(/^Fairness Process Checklist 0.1.0$/i) + ).toBeTruthy(); + const ibDetailsHeading = screen.queryByText( + /^Explainability Process Checklist 0.1.0$/i + ) as Element; const iblockDetailsCard = ibDetailsHeading.parentElement; expect(iblockDetailsCard).toMatchInlineSnapshot(`
{ it('should display plugin algo environment dependencies statuses', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - getPythonPackageDependencyStatusSpy.mockResolvedValue(algoPackageDependencyStatusResponse) - render(); + getPythonPackageDependencyStatusSpy.mockResolvedValue( + algoPackageDependencyStatusResponse + ); + render( + + + + ); await waitFor(async () => { - expect(await screen.findAllByText(/^Partial Dependence Plot$/i)).toHaveLength(3); + expect( + await screen.findAllByText(/^Partial Dependence Plot$/i) + ).toHaveLength(3); expect(await screen.findByText(/^Required Packages$/i)).toBeTruthy(); }); expect(getPythonPackageDependencyStatusSpy).toHaveBeenCalledWith([ - {"requirement": "numpy==1.24.1"}, - {"requirement": "scipy==1.10.0"} + { requirement: 'numpy==1.24.1' }, + { requirement: 'scipy==1.10.0' }, ]); - const requiredPackageHeading = screen.queryByText(/^Required Packages$/i) as Element; + const requiredPackageHeading = screen.queryByText( + /^Required Packages$/i + ) as Element; const statusContainer = requiredPackageHeading.parentElement as Element; expect(statusContainer).toMatchInlineSnapshot(`
{ }); describe('Delete Plugin', () => { - it('should show delete plugin confirmation modal dialog delete button is clicked', async () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); - render(); + render( + + + + ); await waitFor(async () => { - expect(await screen.findAllByText(/^Partial Dependence Plot$/i)).toHaveLength(3); - }) + expect( + await screen.findAllByText(/^Partial Dependence Plot$/i) + ).toHaveLength(3); + }); const delBtn = screen.queryByText(/^Delete$/i) as Element; await userEvent.click(delBtn); - await screen.findByText(/^Are you sure you want to delete this plugin\?$/i); + await screen.findByText( + /^Are you sure you want to delete this plugin\?$/i + ); expect(await screen.findAllByText(/^Delete$/i)).toHaveLength(2); }); @@ -690,18 +956,26 @@ describe('Plugins Manager', () => { fetchAllPluginsSpy.mockResolvedValue(pluginsListResponse); const GID = 'aiverify.stock.algorithms.partial_dependence_plot'; deletePluginSpy.mockResolvedValueOnce({ status: 200, data: GID }); - render(); + render( + + + + ); await waitFor(async () => { - expect(await screen.findAllByText(/^Partial Dependence Plot$/i)).toHaveLength(3); - }) + expect( + await screen.findAllByText(/^Partial Dependence Plot$/i) + ).toHaveLength(3); + }); const delBtn = screen.queryByText(/^Delete$/i) as Element; await userEvent.click(delBtn); const deleteBtns = await screen.findAllByText(/^Delete$/i); await userEvent.click(deleteBtns[1]); await screen.findByText(/^Plugin was successfully removed$/i); expect(deletePluginSpy).toHaveBeenCalledWith(GID); - expect(screen.queryByText(/^Plugin was successfully removed$/i)).toBeTruthy(); + expect( + screen.queryByText(/^Plugin was successfully removed$/i) + ).toBeTruthy(); await waitFor(() => expect(fetchAllPluginsSpy).toHaveBeenCalledTimes(2)); }); }); -}) \ No newline at end of file +}); diff --git a/ai-verify-portal/ci/createBadges.mjs b/ai-verify-portal/ci/createBadges.mjs index e3bce767c..306ec1766 100644 --- a/ai-verify-portal/ci/createBadges.mjs +++ b/ai-verify-portal/ci/createBadges.mjs @@ -1,184 +1,196 @@ -// Create badge icons for unit tests, coverage, lint, dependencies and licenses check -// Usage: node ci/createBadges.mjs -// where may be test, coverage, lint, dependency or license -// note that createBadges.mjs must be run from the root dir of the project. -import fs from 'fs' -import { makeBadge } from 'badge-maker' - -import { createRequire } from 'module' -const require = createRequire(import.meta.url) - -if (process.argv.length < 3) { - console.log('badgeToCreate arg not provided') - process.exit(-1) -} - -const badgeToCreate = process.argv[2] -let color = 'red' - -// Create coverage badge -if (badgeToCreate == 'coverage') { - const covPct = codeCoverage() - color = 'brightgreen' - if (covPct < 20) - color = 'red' - else if (covPct < 70) - color = '#ffa500' // orange_2 - const covBadge = { - label: 'coverage', - message: covPct.toFixed(0) + '%', - color: color - } - const covSvg = makeBadge(covBadge) - fs.writeFileSync('cov-badge.svg', covSvg) -} - -// Create test results badge -if (badgeToCreate == 'test') { - const {numPassedTests, numFailedTests} = testResults() - color = numPassedTests != 0 && numFailedTests == 0 ? 'brightgreen' : 'red' - const testBadge = { - label: 'tests', - message: numPassedTests + " passed, " + numFailedTests + " failed", - color: color - } - const testSvg = makeBadge(testBadge) - fs.writeFileSync('test-badge.svg', testSvg) -} - -// Create lint results badge -if (badgeToCreate == 'lint') { - const numProblems = checkLintErrors() - if (numProblems.errors == 0 && numProblems.warnings == 0) - color = 'brightgreen' - else if (numProblems.errors > 0) - color = 'red' - else - color = '#ffa500' // orange_2 - const lintBadge = { - label: 'lint', - message: numProblems.errors + " errors, " + numProblems.warnings + " warnings", - color: color - } - const lintSvg = makeBadge(lintBadge) - fs.writeFileSync('lint-badge.svg', lintSvg) -} - -// Create dependency check badge -if (badgeToCreate == 'dependency') { - const numVulnerabilities = checkDependencies() - color = 'red' - let msg = "error" - if (numVulnerabilities) { - if (numVulnerabilities == 0) - color = 'brightgreen' - else - color = 'red' - msg = numVulnerabilities + " vulnerabilities" - } - const depBadge = { - label: 'dependencies', - message: msg, - color: color - } - const depSvg = makeBadge(depBadge) - fs.writeFileSync('dep-badge.svg', depSvg) -} - -// Create license check badge -if (badgeToCreate == 'license') { - const numCopyleftLic = checkCopyleftLicenses().length - color = numCopyleftLic == 0 ? 'brightgreen' : 'red' - const licBadge = { - label: 'licenses', - message: numCopyleftLic + " copyleft", - color: color - } - const licSvg = makeBadge(licBadge) - fs.writeFileSync('lic-badge.svg', licSvg) -} - - -// Create badge for shields.io -// const covBadge = { -// schemaVersion: 1, -// label: 'coverage', -// message: covPct.toFixed(0) + '%', -// color: color -// } -// console.log("covBadge:", JSON.stringify(covBadge)) -// fs.writeFileSync(process.argv[2], JSON.stringify(covBadge)) - -// fs.writeFile(covBadgeFile, covSvg, err => { -// if (err) { -// console.log(`Error writing ${covBadgeFile}: ${err.message}`) -// process.exit(1) -// } -// fs.writeFile(testBadgeFile, testSvg, err => { -// if (err) { -// console.log(`Error writing ${testBadgeFile}: ${err.message}`) -// process.exit(1) -// } -// console.log(`Created ${covBadgeFile} and ${testBadgeFile}`) -// }) -// }) - -// return number of passed and failed tests. -function testResults() { - // path reference in require() is relative to this js, not cwd - const testSummary = require('../test-results.json') - return { numPassedTests: testSummary.numPassedTests, numFailedTests: testSummary.numFailedTests } -} - -// return code coverage percent. -function codeCoverage() { - // path reference in require() is relative to this js, not cwd - const covSummary = require('../coverage/coverage-summary.json') - - return covSummary.total.statements.pct -} - -// return number of linter errors and warnings -function checkLintErrors() { - // path reference in require() is relative to this js, not cwd - const lintResults = require('../eslint-report.json') - let numErrors = 0 - let numWarnings = 0 - lintResults.forEach((row) => { - numErrors += row.errorCount - numErrors += row.fatalErrorCount - numWarnings += row.warningCount - }) - return { errors : numErrors, warnings : numWarnings } -} - -// return number of vulnerabilities found by dependency checker. -function checkDependencies() { - const text = fs.readFileSync('./npm-audit-report.md', { encoding: 'utf8', flag: 'r' }) - const pattern = /Found \*\*(\d+)\*\* vulnerabilities within/ - const matches = text.match(pattern); - if (matches) { - const num = matches[1] // Extract the captured group - //console.log("Dep-check: vulnerabilities found:", num) - return num - } else { - //console.log("Dep-check error: pattern not found.") - return null - } -} - -// return string array of copyleft licenses found -function checkCopyleftLicenses() { - const copyleftLic = [ 'GPL', 'LGPL', 'MPL', 'AGPL', 'EUPL', 'CCDL', - 'EPL', 'CC-BY-SA', 'OSL', 'CPL' ] - const text = fs.readFileSync('./license-report.txt', { encoding: 'utf8', flag: 'r' }) - const foundLic = [] - copyleftLic.forEach((lic) => { - const licRegex = new RegExp(lic, 'i') - if (licRegex.test(text)) { - foundLic.push(lic) - } - }) - return foundLic -} - +// Create badge icons for unit tests, coverage, lint, dependencies and licenses check +// Usage: node ci/createBadges.mjs +// where may be test, coverage, lint, dependency or license +// note that createBadges.mjs must be run from the root dir of the project. +import fs from 'fs'; +import { makeBadge } from 'badge-maker'; + +import { createRequire } from 'module'; +const require = createRequire(import.meta.url); + +if (process.argv.length < 3) { + console.log('badgeToCreate arg not provided'); + process.exit(-1); +} + +const badgeToCreate = process.argv[2]; +let color = 'red'; + +// Create coverage badge +if (badgeToCreate == 'coverage') { + const covPct = codeCoverage(); + color = 'brightgreen'; + if (covPct < 20) color = 'red'; + else if (covPct < 70) color = '#ffa500'; // orange_2 + const covBadge = { + label: 'coverage', + message: covPct.toFixed(0) + '%', + color: color, + }; + const covSvg = makeBadge(covBadge); + fs.writeFileSync('cov-badge.svg', covSvg); +} + +// Create test results badge +if (badgeToCreate == 'test') { + const { numPassedTests, numFailedTests } = testResults(); + color = numPassedTests != 0 && numFailedTests == 0 ? 'brightgreen' : 'red'; + const testBadge = { + label: 'tests', + message: numPassedTests + ' passed, ' + numFailedTests + ' failed', + color: color, + }; + const testSvg = makeBadge(testBadge); + fs.writeFileSync('test-badge.svg', testSvg); +} + +// Create lint results badge +if (badgeToCreate == 'lint') { + const numProblems = checkLintErrors(); + if (numProblems.errors == 0 && numProblems.warnings == 0) + color = 'brightgreen'; + else if (numProblems.errors > 0) color = 'red'; + else color = '#ffa500'; // orange_2 + const lintBadge = { + label: 'lint', + message: + numProblems.errors + ' errors, ' + numProblems.warnings + ' warnings', + color: color, + }; + const lintSvg = makeBadge(lintBadge); + fs.writeFileSync('lint-badge.svg', lintSvg); +} + +// Create dependency check badge +if (badgeToCreate == 'dependency') { + const numVulnerabilities = checkDependencies(); + color = 'red'; + let msg = 'error'; + if (numVulnerabilities) { + if (numVulnerabilities == 0) color = 'brightgreen'; + else color = 'red'; + msg = numVulnerabilities + ' vulnerabilities'; + } + const depBadge = { + label: 'dependencies', + message: msg, + color: color, + }; + const depSvg = makeBadge(depBadge); + fs.writeFileSync('dep-badge.svg', depSvg); +} + +// Create license check badge +if (badgeToCreate == 'license') { + const numCopyleftLic = checkCopyleftLicenses().length; + color = numCopyleftLic == 0 ? 'brightgreen' : 'red'; + const licBadge = { + label: 'licenses', + message: numCopyleftLic + ' copyleft', + color: color, + }; + const licSvg = makeBadge(licBadge); + fs.writeFileSync('lic-badge.svg', licSvg); +} + +// Create badge for shields.io +// const covBadge = { +// schemaVersion: 1, +// label: 'coverage', +// message: covPct.toFixed(0) + '%', +// color: color +// } +// console.log("covBadge:", JSON.stringify(covBadge)) +// fs.writeFileSync(process.argv[2], JSON.stringify(covBadge)) + +// fs.writeFile(covBadgeFile, covSvg, err => { +// if (err) { +// console.log(`Error writing ${covBadgeFile}: ${err.message}`) +// process.exit(1) +// } +// fs.writeFile(testBadgeFile, testSvg, err => { +// if (err) { +// console.log(`Error writing ${testBadgeFile}: ${err.message}`) +// process.exit(1) +// } +// console.log(`Created ${covBadgeFile} and ${testBadgeFile}`) +// }) +// }) + +// return number of passed and failed tests. +function testResults() { + // path reference in require() is relative to this js, not cwd + const testSummary = require('../test-results.json'); + return { + numPassedTests: testSummary.numPassedTests, + numFailedTests: testSummary.numFailedTests, + }; +} + +// return code coverage percent. +function codeCoverage() { + // path reference in require() is relative to this js, not cwd + const covSummary = require('../coverage/coverage-summary.json'); + + return covSummary.total.statements.pct; +} + +// return number of linter errors and warnings +function checkLintErrors() { + // path reference in require() is relative to this js, not cwd + const lintResults = require('../eslint-report.json'); + let numErrors = 0; + let numWarnings = 0; + lintResults.forEach((row) => { + numErrors += row.errorCount; + numErrors += row.fatalErrorCount; + numWarnings += row.warningCount; + }); + return { errors: numErrors, warnings: numWarnings }; +} + +// return number of vulnerabilities found by dependency checker. +function checkDependencies() { + const text = fs.readFileSync('./npm-audit-report.md', { + encoding: 'utf8', + flag: 'r', + }); + const pattern = /Found \*\*(\d+)\*\* vulnerabilities within/; + const matches = text.match(pattern); + if (matches) { + const num = matches[1]; // Extract the captured group + //console.log("Dep-check: vulnerabilities found:", num) + return num; + } else { + //console.log("Dep-check error: pattern not found.") + return null; + } +} + +// return string array of copyleft licenses found +function checkCopyleftLicenses() { + const copyleftLic = [ + 'GPL', + 'LGPL', + 'MPL', + 'AGPL', + 'EUPL', + 'CCDL', + 'EPL', + 'CC-BY-SA', + 'OSL', + 'CPL', + ]; + const text = fs.readFileSync('./license-report.txt', { + encoding: 'utf8', + flag: 'r', + }); + const foundLic = []; + copyleftLic.forEach((lic) => { + const licRegex = new RegExp(lic, 'i'); + if (licRegex.test(text)) { + foundLic.push(lic); + } + }); + return foundLic; +} diff --git a/ai-verify-portal/config/ai-verify.algorithm.schema.json b/ai-verify-portal/config/ai-verify.algorithm.schema.json index 574a26a54..2e1921e33 100644 --- a/ai-verify-portal/config/ai-verify.algorithm.schema.json +++ b/ai-verify-portal/config/ai-verify.algorithm.schema.json @@ -1,72 +1,65 @@ -{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "/ai-verify.algorithm.schema.json", - "title": "Algorithm Metadata Schema", - "description": "AI Verify algorithm metadata schema", - "type": "object", - "properties": { - "cid": { - "description": "Unique identififer for the algorithm within the plugin", - "type": "string", - "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-._]*$", - "minLength": 1, - "maxLength": 128 - }, - "name": { - "description": "Algorithm name", - "type": "string", - "minLength": 1, - "maxLength": 128 - }, - "modelType": { - "description": "AI model type", - "type": "array", - "minItems": 1, - "maxItems": 2, - "items": { - "type": "string", - "enum": [ - "classification", - "regression" - ] - } - }, - "version": { - "description": "Version of the algorithm, default to plugin version if not specificed", - "type": "string", - "minLength": 1, - "maxLength": 256 - }, - "author": { - "description": "Algorithm author", - "type": "string", - "minLength": 1, - "maxLength": 128 - }, - "description": { - "description": "Plugin description", - "type": "string", - "maxLength": 256 - }, - "tags": { - "description": "Tags describing this algorithm", - "type": "array", - "maxItems": 100, - "items": { - "type": "string", - "minLength": 1, - "maxLength": 128 - } - }, - "requireGroundTruth": { - "description": "Does this algorithm requires ground truth?", - "type": "boolean", - "default": true - } - }, - "required": [ - "cid", - "name", - "modelType" - ] -} \ No newline at end of file +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "/ai-verify.algorithm.schema.json", + "title": "Algorithm Metadata Schema", + "description": "AI Verify algorithm metadata schema", + "type": "object", + "properties": { + "cid": { + "description": "Unique identififer for the algorithm within the plugin", + "type": "string", + "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-._]*$", + "minLength": 1, + "maxLength": 128 + }, + "name": { + "description": "Algorithm name", + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "modelType": { + "description": "AI model type", + "type": "array", + "minItems": 1, + "maxItems": 2, + "items": { + "type": "string", + "enum": ["classification", "regression"] + } + }, + "version": { + "description": "Version of the algorithm, default to plugin version if not specificed", + "type": "string", + "minLength": 1, + "maxLength": 256 + }, + "author": { + "description": "Algorithm author", + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "description": { + "description": "Plugin description", + "type": "string", + "maxLength": 256 + }, + "tags": { + "description": "Tags describing this algorithm", + "type": "array", + "maxItems": 100, + "items": { + "type": "string", + "minLength": 1, + "maxLength": 128 + } + }, + "requireGroundTruth": { + "description": "Does this algorithm requires ground truth?", + "type": "boolean", + "default": true + } + }, + "required": ["cid", "name", "modelType"] +} diff --git a/ai-verify-portal/config/ai-verify.inputBlock.schema.json b/ai-verify-portal/config/ai-verify.inputBlock.schema.json index 1b77e518c..7d32d506d 100644 --- a/ai-verify-portal/config/ai-verify.inputBlock.schema.json +++ b/ai-verify-portal/config/ai-verify.inputBlock.schema.json @@ -1,53 +1,44 @@ -{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "/ai-verify.inputBlock.schema.json", - "title": "Input Block Metadata Schema", - "description": "AI Verify Input Block metadata schema", - "type": "object", - "properties": { - "cid": { - "description": "Unique identififer for the input block within the plugin", - "type": "string", - "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-._]*$", - "minLength": 1, - "maxLength": 128 - }, - "name": { - "description": "Input Block name", - "type": "string", - "minLength": 1, - "maxLength": 128 - }, - "description": { - "description": "Input Block description", - "type": "string", - "maxLength": 256 - }, - "group": { - "description": "Input Block group", - "type": "string", - "minLength": 1, - "maxLength": 128 - }, - "width": { - "description": "Width of Input Block dialog", - "type": "string", - "enum": [ - "xs", - "sm", - "md", - "lg", - "xl" - ], - "default": "md" - }, - "fullScreen": { - "description": "Width of Input Block dialog", - "type": "boolean" - } - }, - "required": [ - "cid", - "name" - ] -} \ No newline at end of file +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "/ai-verify.inputBlock.schema.json", + "title": "Input Block Metadata Schema", + "description": "AI Verify Input Block metadata schema", + "type": "object", + "properties": { + "cid": { + "description": "Unique identififer for the input block within the plugin", + "type": "string", + "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-._]*$", + "minLength": 1, + "maxLength": 128 + }, + "name": { + "description": "Input Block name", + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "description": { + "description": "Input Block description", + "type": "string", + "maxLength": 256 + }, + "group": { + "description": "Input Block group", + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "width": { + "description": "Width of Input Block dialog", + "type": "string", + "enum": ["xs", "sm", "md", "lg", "xl"], + "default": "md" + }, + "fullScreen": { + "description": "Width of Input Block dialog", + "type": "boolean" + } + }, + "required": ["cid", "name"] +} diff --git a/ai-verify-portal/config/ai-verify.plugin.schema.json b/ai-verify-portal/config/ai-verify.plugin.schema.json index 800f29e71..67bc905fa 100644 --- a/ai-verify-portal/config/ai-verify.plugin.schema.json +++ b/ai-verify-portal/config/ai-verify.plugin.schema.json @@ -1,48 +1,44 @@ { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "/ai-verify.plugin.schema.json", - "title": "Plugin Metadata Schema", - "description": "AI Verify plugin metadata schema", - "type": "object", - "properties": { - "gid": { - "description": "Unique global identififer for the plugin", - "type": "string", - "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-._]*$", - "minLength": 1, + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "/ai-verify.plugin.schema.json", + "title": "Plugin Metadata Schema", + "description": "AI Verify plugin metadata schema", + "type": "object", + "properties": { + "gid": { + "description": "Unique global identififer for the plugin", + "type": "string", + "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-._]*$", + "minLength": 1, "maxLength": 128 - }, - "version": { - "description": "Version of the plugin", - "type": "string", - "minLength": 1, + }, + "version": { + "description": "Version of the plugin", + "type": "string", + "minLength": 1, "maxLength": 256 - }, - "name": { - "description": "Plugin name", - "type": "string", - "minLength": 1, + }, + "name": { + "description": "Plugin name", + "type": "string", + "minLength": 1, "maxLength": 128 - }, - "author": { - "description": "Plugin author", - "type": "string", + }, + "author": { + "description": "Plugin author", + "type": "string", "maxLength": 128 - }, - "description": { - "description": "Plugin description", - "type": "string", + }, + "description": { + "description": "Plugin description", + "type": "string", "maxLength": 256 - }, - "url": { - "description": "URL of project page", - "type": "string", + }, + "url": { + "description": "URL of project page", + "type": "string", "maxLength": 2048 - } - }, - "required": [ - "gid", - "version", - "name" - ] -} \ No newline at end of file + } + }, + "required": ["gid", "version", "name"] +} diff --git a/ai-verify-portal/config/ai-verify.template.data.json b/ai-verify-portal/config/ai-verify.template.data.json index 301b6fce8..729e1a439 100644 --- a/ai-verify-portal/config/ai-verify.template.data.json +++ b/ai-verify-portal/config/ai-verify.template.data.json @@ -1,81 +1,70 @@ -{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "/ai-verify.template.data.json", - "title": "Template Metadata Data Schema", - "description": "AI Verify Template Data schema", - "type": "object", - "properties": { - "pages": { - "description": "Unique identififer for the template within the plugin", - "type": "array", - "minItems": 1, - "maxItems": 256, - "items": { - "type": "object", - "description": "Array of page layout", - "properties": { - "layouts": true, - "reportWidgets": { - "type": "array", - "minItems": 1, - "maxItems": 256, - "items": { - "type": "object", - "description": "Schema for report widget", - "properties": { - "widgetGID": { - "type": "string", - "minLength": 1, - "maxLength": 256 - }, - "key": { - "type": "string", - "minLength": 1, - "maxLength": 128 - }, - "layoutItemProperties": true, - "properties": true - }, - "required": [ - "widgetGID", - "key" - ] - } - } - }, - "required": [ - "layouts", - "reportWidgets" - ] - } - }, - "globalVars": { - "type": "array", - "description": "Array of global globalVars", - "items": { - "type": "object", - "description": "Global variable key value pair", - "properties": { - "key": { - "type": "string", - "description": "Property key", - "minLength": 1, - "maxLength": 128 - }, - "value": { - "type": "string", - "description": "Property value", - "maxLength": 128 - } - }, - "required": [ - "key", - "value" - ] - } - } - }, - "required": [ - "pages" - ] -} \ No newline at end of file +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "/ai-verify.template.data.json", + "title": "Template Metadata Data Schema", + "description": "AI Verify Template Data schema", + "type": "object", + "properties": { + "pages": { + "description": "Unique identififer for the template within the plugin", + "type": "array", + "minItems": 1, + "maxItems": 256, + "items": { + "type": "object", + "description": "Array of page layout", + "properties": { + "layouts": true, + "reportWidgets": { + "type": "array", + "minItems": 1, + "maxItems": 256, + "items": { + "type": "object", + "description": "Schema for report widget", + "properties": { + "widgetGID": { + "type": "string", + "minLength": 1, + "maxLength": 256 + }, + "key": { + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "layoutItemProperties": true, + "properties": true + }, + "required": ["widgetGID", "key"] + } + } + }, + "required": ["layouts", "reportWidgets"] + } + }, + "globalVars": { + "type": "array", + "description": "Array of global globalVars", + "items": { + "type": "object", + "description": "Global variable key value pair", + "properties": { + "key": { + "type": "string", + "description": "Property key", + "minLength": 1, + "maxLength": 128 + }, + "value": { + "type": "string", + "description": "Property value", + "maxLength": 128 + } + }, + "required": ["key", "value"] + } + } + }, + "required": ["pages"] +} diff --git a/ai-verify-portal/config/ai-verify.template.schema.json b/ai-verify-portal/config/ai-verify.template.schema.json index 9dc4c8fd9..423eb1f3b 100644 --- a/ai-verify-portal/config/ai-verify.template.schema.json +++ b/ai-verify-portal/config/ai-verify.template.schema.json @@ -1,36 +1,33 @@ -{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "/ai-verify.template.schema.json", - "title": "Template Metadata Schema", - "description": "AI Verify Template metadata schema", - "type": "object", - "properties": { - "cid": { - "description": "Unique identififer for the template within the plugin", - "type": "string", - "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-._]*$", - "minLength": 1, - "maxLength": 128 - }, - "name": { - "description": "Template name", - "type": "string", - "minLength": 1, - "maxLength": 128 - }, - "description": { - "description": "Template description", - "type": "string", - "maxLength": 256 - }, - "author": { - "description": "Template author", - "type": "string", - "maxLength": 128 - } - }, - "required": [ - "cid", - "name" - ] -} \ No newline at end of file +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "/ai-verify.template.schema.json", + "title": "Template Metadata Schema", + "description": "AI Verify Template metadata schema", + "type": "object", + "properties": { + "cid": { + "description": "Unique identififer for the template within the plugin", + "type": "string", + "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-._]*$", + "minLength": 1, + "maxLength": 128 + }, + "name": { + "description": "Template name", + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "description": { + "description": "Template description", + "type": "string", + "maxLength": 256 + }, + "author": { + "description": "Template author", + "type": "string", + "maxLength": 128 + } + }, + "required": ["cid", "name"] +} diff --git a/ai-verify-portal/config/ai-verify.widget.schema.json b/ai-verify-portal/config/ai-verify.widget.schema.json index dfcf67f0c..82120e1aa 100644 --- a/ai-verify-portal/config/ai-verify.widget.schema.json +++ b/ai-verify-portal/config/ai-verify.widget.schema.json @@ -1,178 +1,162 @@ -{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "/ai-verify.widget.schema.json", - "title": "Widget Metadata Schema", - "description": "AI Verify Widget metadata schema", - "type": "object", - "properties": { - "cid": { - "description": "Unique identififer for the widget within the plugin", - "type": "string", - "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-._]*$", - "minLength": 1, - "maxLength": 128 - }, - "name": { - "description": "Widget name", - "type": "string", - "minLength": 1, - "maxLength": 128 - }, - "description": { - "description": "Widget description", - "type": "string", - "maxLength": 256 - }, - "widgetSize": { - "description": "Describe the widget size in terms of canvas grid units", - "type": "object", - "properties": { - "minW": { - "description": "Minimum widget width", - "type": "integer", - "minimum": 1, - "maximum": 12 - }, - "minH": { - "description": "Minimum widget height", - "type": "integer", - "minimum": 1, - "maximum": 36 - }, - "maxW": { - "description": "Maximum widget width", - "type": "integer", - "minimum": 1, - "maximum": 12 - }, - "maxH": { - "description": "Maximum widget height", - "type": "integer", - "minimum": 1, - "maximum": 36 - } - }, - "required": [ - "minW", - "minH", - "maxW", - "maxH" - ] - }, - "properties": { - "description": "List of widget properties", - "type": "array", - "maxItems": 256, - "items": { - "type": "object", - "properties": { - "key": { - "description": "Property key", - "type": "string", - "minLength": 1, - "maxLength": 128 - }, - "helper": { - "description": "Helper text for the property", - "type": "string", - "maxLength": 128 - }, - "default": { - "description": "Property default value", - "type": "string", - "maxLength": 128 - } - }, - "required": [ - "key", - "helper" - ] - } - }, - "tags": { - "description": "Widget tags, used for searching", - "type": "array", - "maxItems": 128, - "items": { - "type": "string", - "minLength": 1, - "maxLength": 128 - } - }, - "dependencies": { - "description": "Widget dependencies", - "type": "array", - "maxItems": 256, - "items": { - "type": "object", - "properties": { - "gid": { - "description": "GID of the dependency component plugin. If empty, assume component within same plugin.", - "type": "string", - "maxLength": 128 - }, - "cid": { - "description": "CID of the dependency component", - "type": "string", - "minLength": 1, - "maxLength": 128 - }, - "version": { - "description": "Version of the component dependency required", - "type": "string", - "minLength": 1, - "maxLength": 256 - } - }, - "required": [ - "cid" - ] - } - }, - "mockdata": { - "description": "Sample data to be fed into the widget in canvas mode", - "type": "array", - "maxItems": 256, - "items": { - "type": "object", - "properties": { - "type": { - "description": "Type of sample data", - "type": "string", - "enum": [ - "Algorithm", - "InputBlock" - ] - }, - "gid": { - "description": "GID of sample data for component dependency. If empty, assume GID of same plugin", - "type": "string", - "maxLength": 128 - }, - "cid": { - "description": "CID of sample data for component dependency", - "type": "string", - "minLength": 1, - "maxLength": 128 - }, - "datapath": { - "description": "Path to the file containing sample data", - "type": "string", - "minLength": 1, - "maxLength": 256 - } - }, - "required": [ - "type", - "cid", - "datapath" - ] - } - } - }, - "required": [ - "cid", - "name", - "widgetSize" - ] -} \ No newline at end of file +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "/ai-verify.widget.schema.json", + "title": "Widget Metadata Schema", + "description": "AI Verify Widget metadata schema", + "type": "object", + "properties": { + "cid": { + "description": "Unique identififer for the widget within the plugin", + "type": "string", + "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-._]*$", + "minLength": 1, + "maxLength": 128 + }, + "name": { + "description": "Widget name", + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "description": { + "description": "Widget description", + "type": "string", + "maxLength": 256 + }, + "widgetSize": { + "description": "Describe the widget size in terms of canvas grid units", + "type": "object", + "properties": { + "minW": { + "description": "Minimum widget width", + "type": "integer", + "minimum": 1, + "maximum": 12 + }, + "minH": { + "description": "Minimum widget height", + "type": "integer", + "minimum": 1, + "maximum": 36 + }, + "maxW": { + "description": "Maximum widget width", + "type": "integer", + "minimum": 1, + "maximum": 12 + }, + "maxH": { + "description": "Maximum widget height", + "type": "integer", + "minimum": 1, + "maximum": 36 + } + }, + "required": ["minW", "minH", "maxW", "maxH"] + }, + "properties": { + "description": "List of widget properties", + "type": "array", + "maxItems": 256, + "items": { + "type": "object", + "properties": { + "key": { + "description": "Property key", + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "helper": { + "description": "Helper text for the property", + "type": "string", + "maxLength": 128 + }, + "default": { + "description": "Property default value", + "type": "string", + "maxLength": 128 + } + }, + "required": ["key", "helper"] + } + }, + "tags": { + "description": "Widget tags, used for searching", + "type": "array", + "maxItems": 128, + "items": { + "type": "string", + "minLength": 1, + "maxLength": 128 + } + }, + "dependencies": { + "description": "Widget dependencies", + "type": "array", + "maxItems": 256, + "items": { + "type": "object", + "properties": { + "gid": { + "description": "GID of the dependency component plugin. If empty, assume component within same plugin.", + "type": "string", + "maxLength": 128 + }, + "cid": { + "description": "CID of the dependency component", + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "version": { + "description": "Version of the component dependency required", + "type": "string", + "minLength": 1, + "maxLength": 256 + } + }, + "required": ["cid"] + } + }, + "mockdata": { + "description": "Sample data to be fed into the widget in canvas mode", + "type": "array", + "maxItems": 256, + "items": { + "type": "object", + "properties": { + "type": { + "description": "Type of sample data", + "type": "string", + "enum": ["Algorithm", "InputBlock"] + }, + "gid": { + "description": "GID of sample data for component dependency. If empty, assume GID of same plugin", + "type": "string", + "maxLength": 128 + }, + "cid": { + "description": "CID of sample data for component dependency", + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "datapath": { + "description": "Path to the file containing sample data", + "type": "string", + "minLength": 1, + "maxLength": 256 + } + }, + "required": ["type", "cid", "datapath"] + } + }, + "dynamicHeight": { + "description": "Whether this widget has dynamic height", + "type": "boolean", + "default": false + } + }, + "required": ["cid", "name", "widgetSize"] +} diff --git a/ai-verify-portal/config/plugin.stock.ts b/ai-verify-portal/config/plugin.stock.ts index e47626a12..e9afb621b 100644 --- a/ai-verify-portal/config/plugin.stock.ts +++ b/ai-verify-portal/config/plugin.stock.ts @@ -1,14 +1,14 @@ -const stockPlugins = [ - "aiverify.stock.accumulated_local_effect", - "aiverify.stock.partial_dependence_plot", - "aiverify.stock.fairness_metrics_toolbox_for_classification", - "aiverify.stock.fairness_metrics_toolbox_for_regression", - "aiverify.stock.shap_toolbox", - "aiverify.stock.robustness_toolbox", - "aiverify.stock.image_corruption_toolbox", - "aiverify.stock.decorators", - "aiverify.stock.process_checklist", - "aiverify.stock.reports", -] - -export default stockPlugins; \ No newline at end of file +const stockPlugins = [ + 'aiverify.stock.accumulated_local_effect', + 'aiverify.stock.partial_dependence_plot', + 'aiverify.stock.fairness_metrics_toolbox_for_classification', + 'aiverify.stock.fairness_metrics_toolbox_for_regression', + 'aiverify.stock.shap_toolbox', + 'aiverify.stock.robustness_toolbox', + 'aiverify.stock.image_corruption_toolbox', + 'aiverify.stock.decorators', + 'aiverify.stock.process_checklist', + 'aiverify.stock.reports', +]; + +export default stockPlugins; diff --git a/ai-verify-portal/jest.config.js b/ai-verify-portal/jest.config.js index 6bad4d1bb..128790c3f 100644 --- a/ai-verify-portal/jest.config.js +++ b/ai-verify-portal/jest.config.js @@ -1,22 +1,17 @@ -/* eslint-disable @typescript-eslint/no-var-requires */ - -const nextJest = require('next/jest') - -const createJestConfig = nextJest({ - // Provide the path to your Next.js app to load next.config.js and .env files in your test environment - dir: './', -}) - -// Add any custom config to be passed to Jest -/** @type {import('jest').Config} */ -const customJestConfig = { - reporters: [ - "default", - [ - "jest-html-reporter", {"pageTitle": "Test Report"}, - ] - ], - testResultsProcessor: "./node_modules/jest-json-reporter", +/* eslint-disable @typescript-eslint/no-var-requires */ + +const nextJest = require('next/jest'); + +const createJestConfig = nextJest({ + // Provide the path to your Next.js app to load next.config.js and .env files in your test environment + dir: './', +}); + +// Add any custom config to be passed to Jest +/** @type {import('jest').Config} */ +const customJestConfig = { + reporters: ['default', ['jest-html-reporter', { pageTitle: 'Test Report' }]], + testResultsProcessor: './node_modules/jest-json-reporter', collectCoverage: true, coverageThreshold: { global: { @@ -26,18 +21,18 @@ const customJestConfig = { statements: 80, }, }, - coverageProvider: "v8", - coverageReporters: ["html", "json-summary", "text"], - // Add more setup options before each test is run - setupFilesAfterEnv: ['/jest.setup.js'], - // if using TypeScript with a baseUrl set to the root directory then you need the below for alias' to work - moduleDirectories: ['node_modules', '/'], - testEnvironment: 'jest-environment-jsdom', - moduleNameMapper: { - "src/(.*)": "/src/$1", - "server/(.*)": "/server/$1", - }, -} - -// createJestConfig is exported this way to ensure that next/jest can load the Next.js config which is async -module.exports = createJestConfig(customJestConfig) \ No newline at end of file + coverageProvider: 'v8', + coverageReporters: ['html', 'json-summary', 'text'], + // Add more setup options before each test is run + setupFilesAfterEnv: ['/jest.setup.js'], + // if using TypeScript with a baseUrl set to the root directory then you need the below for alias' to work + moduleDirectories: ['node_modules', '/'], + testEnvironment: 'jest-environment-jsdom', + moduleNameMapper: { + 'src/(.*)': '/src/$1', + 'server/(.*)': '/server/$1', + }, +}; + +// createJestConfig is exported this way to ensure that next/jest can load the Next.js config which is async +module.exports = createJestConfig(customJestConfig); diff --git a/ai-verify-portal/jest.setup.js b/ai-verify-portal/jest.setup.js index efcc85536..957e3cf08 100644 --- a/ai-verify-portal/jest.setup.js +++ b/ai-verify-portal/jest.setup.js @@ -1,3 +1,3 @@ import '@testing-library/jest-dom/extend-expect'; jest.mock('next/router', () => require('next-router-mock')); -global.fetch = jest.fn(); \ No newline at end of file +global.fetch = jest.fn(); diff --git a/ai-verify-portal/next.config.js b/ai-verify-portal/next.config.js index dc07a1a2c..9659c9197 100644 --- a/ai-verify-portal/next.config.js +++ b/ai-verify-portal/next.config.js @@ -8,40 +8,43 @@ module.exports = { destination: '/home', permanent: true, }, - ] + ]; }, rewrites() { return [ { - source: "/api/graphql", + source: '/api/graphql', destination: `${process.env.APIGW_URL}/graphql`, }, { - source: "/api/upload/data", + source: '/api/upload/data', destination: `${process.env.APIGW_URL}/upload/data`, }, { - source: "/api/upload/model", + source: '/api/upload/model', destination: `${process.env.APIGW_URL}/upload/model`, - },{ - source: "/api/report/:projectId", + }, + { + source: '/api/report/:projectId', destination: `${process.env.APIGW_URL}/report/:projectId`, }, { - source: "/api/template/:path", + source: '/api/template/:path', destination: `${process.env.APIGW_URL}/template/:path`, }, { - source: "/api/logs/:projectId", + source: '/api/logs/:projectId', destination: `${process.env.APIGW_URL}/logs/:projectId`, }, { - source: "/api/requirements/client", - destination: process.env.TEST_ENGINE_URL ? `${process.env.TEST_ENGINE_URL}/requirements/client` : 'http://localhost:8080/requirements/client', + source: '/api/requirements/client', + destination: process.env.TEST_ENGINE_URL + ? `${process.env.TEST_ENGINE_URL}/requirements/client` + : 'http://localhost:8080/requirements/client', }, ]; }, eslint: { - dirs: ['pages', 'server', 'src', 'plugins'] + dirs: ['pages', 'server', 'src', 'plugins'], }, }; diff --git a/ai-verify-portal/package-lock.json b/ai-verify-portal/package-lock.json index e6cf81d4a..a8f1e7e06 100644 --- a/ai-verify-portal/package-lock.json +++ b/ai-verify-portal/package-lock.json @@ -52,7 +52,7 @@ "redis": "^4.5.0", "remark-gfm": "^3.0.1", "remark-mdx-images": "^2.0.0", - "semver": "^7.3.8", + "semver": "^7.5.2", "swr": "^2.0.0", "uuid": "^9.0.0" }, @@ -75,9 +75,11 @@ "axios-mock-adapter": "^1.21.4", "eslint": "latest", "eslint-config-next": "latest", + "eslint-config-prettier": "^8.8.0", "jest": "^29.2.2", "jest-environment-jsdom": "^29.2.2", "next-router-mock": "^0.9.3", + "prettier": "^2.8.8", "typescript": "latest" } }, @@ -5675,6 +5677,18 @@ } } }, + "node_modules/eslint-config-prettier": { + "version": "8.8.0", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.8.0.tgz", + "integrity": "sha512-wLbQiFre3tdGgpDv67NQKnJuTlcUVYHas3k+DZCc2U2BadthoEY4B7hLPvAxaqdyOGCzuLfii2fqGph10va7oA==", + "dev": true, + "bin": { + "eslint-config-prettier": "bin/cli.js" + }, + "peerDependencies": { + "eslint": ">=7.0.0" + } + }, "node_modules/eslint-import-resolver-node": { "version": "0.3.7", "dev": true, @@ -10558,6 +10572,21 @@ "node": ">= 0.8.0" } }, + "node_modules/prettier": { + "version": "2.8.8", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.8.tgz", + "integrity": "sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==", + "dev": true, + "bin": { + "prettier": "bin-prettier.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, "node_modules/pretty-format": { "version": "27.5.1", "dev": true, @@ -11281,8 +11310,9 @@ } }, "node_modules/semver": { - "version": "7.3.8", - "license": "ISC", + "version": "7.5.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.2.tgz", + "integrity": "sha512-SoftuTROv/cRjCze/scjGyiDtcUyxw1rgYQSZY7XTmtR5hX+dm76iDbTH8TkLPHCQmlbQVSSbNZCPM2hb0knnQ==", "dependencies": { "lru-cache": "^6.0.0" }, diff --git a/ai-verify-portal/package.json b/ai-verify-portal/package.json index 88f6388d4..f8d673f05 100644 --- a/ai-verify-portal/package.json +++ b/ai-verify-portal/package.json @@ -21,7 +21,10 @@ "dev": "next dev", "build": "next build", "start": "next start", - "lint": "eslint -f html -o eslint-report.html", + "lint": "next lint -f html -o eslint-report.html", + "lint-check": "next lint", + "format-check": "prettier --check .", + "format-fix": "prettier --write .", "test": "jest --silent --verbose", "test-cov": "jest --coverage", "post-update": "echo \"codesandbox preview only, need an update\" && yarn upgrade --latest", @@ -73,7 +76,7 @@ "redis": "^4.5.0", "remark-gfm": "^3.0.1", "remark-mdx-images": "^2.0.0", - "semver": "^7.3.8", + "semver": "^7.5.2", "swr": "^2.0.0", "uuid": "^9.0.0" }, @@ -96,9 +99,11 @@ "axios-mock-adapter": "^1.21.4", "eslint": "latest", "eslint-config-next": "latest", + "eslint-config-prettier": "^8.8.0", "jest": "^29.2.2", "jest-environment-jsdom": "^29.2.2", "next-router-mock": "^0.9.3", + "prettier": "^2.8.8", "typescript": "latest" }, "proxy": "http://localhost:4000" diff --git a/ai-verify-portal/pages/_app.tsx b/ai-verify-portal/pages/_app.tsx index 1b1522d30..1ffdca128 100644 --- a/ai-verify-portal/pages/_app.tsx +++ b/ai-verify-portal/pages/_app.tsx @@ -16,7 +16,7 @@ import graphqlClient from 'src/lib/graphqlClient'; import { config } from '@fortawesome/fontawesome-svg-core'; import { NotificationsProvider } from 'src/modules/notifications/providers/notificationsContext'; // import '@fortawesome/fontawesome-svg-core/styles.css' -config.autoAddCss = false +config.autoAddCss = false; // Client-side cache, shared for the whole session of the user in the browser. const clientSideEmotionCache = createEmotionCache(); @@ -28,22 +28,25 @@ interface MyAppProps extends AppProps { export default function MyApp(props: MyAppProps) { const { Component, emotionCache = clientSideEmotionCache, pageProps } = props; const client = graphqlClient(false); - + return ( - - - - AI Verify - - - {/* CssBaseline kickstart an elegant, consistent, and simple baseline to build upon. */} - - - - - + + + + AI Verify + + + {/* CssBaseline kickstart an elegant, consistent, and simple baseline to build upon. */} + + + + + diff --git a/ai-verify-portal/pages/_document.tsx b/ai-verify-portal/pages/_document.tsx index dc781ec6d..90df39bc5 100644 --- a/ai-verify-portal/pages/_document.tsx +++ b/ai-verify-portal/pages/_document.tsx @@ -1,18 +1,34 @@ -import Document, { Html, Head, Main, NextScript } from 'next/document'; +import Document, { + Html, + Head, + Main, + NextScript, + DocumentContext, + DocumentInitialProps, +} from 'next/document'; import createEmotionServer from '@emotion/server/create-instance'; import theme from '../src/lib/theme'; import createEmotionCache from '../src/lib/createEmotionCache'; +import { AppProps, AppType } from 'next/app'; +import { EmotionCache } from '@emotion/cache'; -export default class MyDocument extends Document { +interface DocumentProps extends DocumentInitialProps { + emotionStyleTags: React.ReactNode[]; +} + +interface AppPropsWithEmotion extends AppProps { + emotionCache?: EmotionCache; +} + +export default class MyDocument extends Document { render() { return ( - {/* PWA primary color */} - {(this.props as any).emotionStyleTags} + {this.props.emotionStyleTags}
@@ -26,7 +42,7 @@ export default class MyDocument extends Document { // `getInitialProps` belongs to `_document` (instead of `_app`), // it's compatible with static-site generation (SSG). -MyDocument.getInitialProps = async (ctx) => { +MyDocument.getInitialProps = async (ctx: DocumentContext) => { // Resolution order // // On the server: @@ -58,10 +74,14 @@ MyDocument.getInitialProps = async (ctx) => { ctx.renderPage = () => originalRenderPage({ - enhanceApp: (App: any) => - (function EnhanceApp(props) { + enhanceApp: ( + App: React.ComponentType< + React.ComponentProps & AppPropsWithEmotion + > + ) => + function EnhanceApp(props) { return ; - }), + }, }); const initialProps = await Document.getInitialProps(ctx); diff --git a/ai-verify-portal/pages/api/bundler/[gid].ts b/ai-verify-portal/pages/api/bundler/[gid].ts index 5ec144952..c58fcbd54 100644 --- a/ai-verify-portal/pages/api/bundler/[gid].ts +++ b/ai-verify-portal/pages/api/bundler/[gid].ts @@ -1,17 +1,20 @@ -import type { NextApiRequest, NextApiResponse } from 'next' -import { getMDXBundle } from 'server/bundler'; - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { gid } = req.query; - - const result = await getMDXBundle(gid as string); - if (!result) { - return res.status(400); - } - const {code, frontmatter} = result; - res.status(200).json({code, frontmatter}); - } catch (err) { - res.status(400); - } -} \ No newline at end of file +import type { NextApiRequest, NextApiResponse } from 'next'; +import { getMDXBundle } from 'server/bundler'; + +export default async function handler( + req: NextApiRequest, + res: NextApiResponse +) { + try { + const { gid } = req.query; + + const result = await getMDXBundle(gid as string); + if (!result) { + return res.status(400); + } + const { code, frontmatter } = result; + res.status(200).json({ code, frontmatter }); + } catch (err) { + res.status(400); + } +} diff --git a/ai-verify-portal/pages/api/bundler/summary/[gid].ts b/ai-verify-portal/pages/api/bundler/summary/[gid].ts index 540042e6a..bc4cc0057 100644 --- a/ai-verify-portal/pages/api/bundler/summary/[gid].ts +++ b/ai-verify-portal/pages/api/bundler/summary/[gid].ts @@ -1,17 +1,20 @@ -import type { NextApiRequest, NextApiResponse } from 'next' +import type { NextApiRequest, NextApiResponse } from 'next'; import { getSummaryBundle } from 'server/bundler'; -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { gid } = req.query; +export default async function handler( + req: NextApiRequest, + res: NextApiResponse +) { + try { + const { gid } = req.query; - const result = await getSummaryBundle(gid as string); - if (!result) { - return res.status(400); - } - const {code, frontmatter} = result; - res.status(200).json({code, frontmatter}); - } catch (err) { - res.status(400); - } -} \ No newline at end of file + const result = await getSummaryBundle(gid as string); + if (!result) { + return res.status(400); + } + const { code, frontmatter } = result; + res.status(200).json({ code, frontmatter }); + } catch (err) { + res.status(400); + } +} diff --git a/ai-verify-portal/pages/api/plugins/delete/[gid].tsx b/ai-verify-portal/pages/api/plugins/delete/[gid].tsx index c074d442c..3d07846f0 100644 --- a/ai-verify-portal/pages/api/plugins/delete/[gid].tsx +++ b/ai-verify-portal/pages/api/plugins/delete/[gid].tsx @@ -1,29 +1,32 @@ -import type { NextApiRequest, NextApiResponse } from 'next'; -import { deletePlugin, isStockPlugin } from 'server/pluginManager'; -import _ from 'lodash'; - -export default async function deletePluginAPI (req: NextApiRequest, res: NextApiResponse) { - const { gid } = req.query; - - console.log("deletePlugin", gid); - - if (!gid || _.isArray(gid)) { - return res.status(400).end(); - } - - if (isStockPlugin(gid as string)) { - return res.status(400).json({ error: "Cannot delete stock plugin" }); - } - - if (req.method === 'DELETE') { - try { - await deletePlugin(gid as string, true); - return res.status(200).end(); - } catch (e) { - console.log("error", e); - return res.status(400).json({ error: e }) - } - } else { - return res.status(405); - } -} \ No newline at end of file +import type { NextApiRequest, NextApiResponse } from 'next'; +import { deletePlugin, isStockPlugin } from 'server/pluginManager'; +import _ from 'lodash'; + +export default async function deletePluginAPI( + req: NextApiRequest, + res: NextApiResponse +) { + const { gid } = req.query; + + console.log('deletePlugin', gid); + + if (!gid || _.isArray(gid)) { + return res.status(400).end(); + } + + if (isStockPlugin(gid as string)) { + return res.status(400).json({ error: 'Cannot delete stock plugin' }); + } + + if (req.method === 'DELETE') { + try { + await deletePlugin(gid as string, true); + return res.status(200).end(); + } catch (e) { + console.log('error', e); + return res.status(400).json({ error: e }); + } + } else { + return res.status(405); + } +} diff --git a/ai-verify-portal/pages/api/plugins/list.ts b/ai-verify-portal/pages/api/plugins/list.ts index 319cc74a3..9d3c5deb3 100644 --- a/ai-verify-portal/pages/api/plugins/list.ts +++ b/ai-verify-portal/pages/api/plugins/list.ts @@ -1,8 +1,11 @@ -import type { NextApiRequest, NextApiResponse } from 'next' - -import { getPlugins } from 'server/pluginManager'; - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const plugins = await getPlugins(); - res.status(200).json(plugins) -} \ No newline at end of file +import type { NextApiRequest, NextApiResponse } from 'next'; + +import { getPlugins } from 'server/pluginManager'; + +export default async function handler( + req: NextApiRequest, + res: NextApiResponse +) { + const plugins = await getPlugins(); + res.status(200).json(plugins); +} diff --git a/ai-verify-portal/pages/api/plugins/upload.ts b/ai-verify-portal/pages/api/plugins/upload.ts index 0a61da46d..d3a2f369c 100644 --- a/ai-verify-portal/pages/api/plugins/upload.ts +++ b/ai-verify-portal/pages/api/plugins/upload.ts @@ -1,98 +1,106 @@ -// Backend -import type { NextApiRequest, NextApiResponse } from 'next' - -import path from 'node:path'; -import fs from 'node:fs'; -import os from 'node:os'; - -import formidable from 'formidable'; -import { existsSync, mkdirSync } from 'node:fs'; -import _ from 'lodash'; -import AdmZip from 'adm-zip'; - -import { installPlugin } from 'server/pluginManager'; - -// const TEMP_DIR = './temp'; -const TEMP_DIR = os.tmpdir(); - -if (!existsSync(TEMP_DIR)) { - mkdirSync(TEMP_DIR); -} - -export const config = { - api: { - bodyParser: false, - }, -}; - -function deleteTempFiles(files: formidable.Files) { - for (const file of Object.values(files as { [file: string]: formidable.File})) { - fs.rmSync(file.filepath, { force:true }) - } -} - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const form = new formidable.IncomingForm({ - uploadDir: TEMP_DIR, - // keepExtensions: true, - allowEmptyFiles: false, - maxFileSize: 500*1024*1024, // 500mb - }); - form.parse(req, async (err, _fields, files) => { - // console.log("uploaded", err, files); - if (err) { - deleteTempFiles(files); - return res.status(500).end() - } - - if (_.isNil(files.myFile)) { - deleteTempFiles(files) - return res.status(400).end(); - } - - let file = files.myFile as any; - // console.log("isSingle", file) - - let tmpdir = path.join(TEMP_DIR, `plugin_${file.newFilename}`); - try { - let tmpPluginPath = tmpdir; - const zip = new AdmZip(file.filepath); - let zipEntries = zip.getEntries(); - let found = false; - for (let entry of zipEntries) { - // console.log("entry", entry.isDirectory, entry.entryName); - // check if any subdir - if (entry.isDirectory && (entry.entryName.endsWith("inputs/") || entry.entryName.endsWith("widgets/") || entry.entryName.endsWith("algorithms/") || entry.entryName.endsWith("templates/"))) { - found = true; - let ret = entry.entryName.match(/^(.+)\/(inputs|widgets|algorithms|templates)\//) - // console.log("ret", ret) - if (ret) { - tmpPluginPath = path.join(tmpPluginPath, ret[1]) - } - break; - } - } - if (!found) { - throw new Error("Invalid plugin") - } - // console.log("tmpPluginPath", tmpPluginPath) - zip.extractAllTo(tmpdir, true); - const newPlugin = await installPlugin(tmpPluginPath); - res.status(200).json(newPlugin); - } catch (error) { - let errMsg: string; - if (error instanceof Error) { - errMsg = error.message; - } else { - errMsg = String(error); - } - res.status(400).json({ error: errMsg }) - } finally { - if (fs.existsSync(tmpdir)) { - fs.rmdirSync(tmpdir, { recursive:true }) - } - deleteTempFiles(files); - } - - }); -} +// Backend +import type { NextApiRequest, NextApiResponse } from 'next'; + +import path from 'node:path'; +import fs from 'node:fs'; +import os from 'node:os'; + +import formidable from 'formidable'; +import { existsSync, mkdirSync } from 'node:fs'; +import _ from 'lodash'; +import AdmZip from 'adm-zip'; + +import { installPlugin } from 'server/pluginManager'; + +const TEMP_DIR = os.tmpdir(); + +if (!existsSync(TEMP_DIR)) { + mkdirSync(TEMP_DIR); +} + +export const config = { + api: { + bodyParser: false, + }, +}; + +function deleteTempFiles(files: formidable.Files) { + for (const file of Object.values( + files as { [file: string]: formidable.File } + )) { + fs.rmSync(file.filepath, { force: true }); + } +} + +export default async function handler( + req: NextApiRequest, + res: NextApiResponse +) { + const form = new formidable.IncomingForm({ + uploadDir: TEMP_DIR, + allowEmptyFiles: false, + maxFileSize: 500 * 1024 * 1024, // 500mb + }); + form.parse(req, async (err, _fields, files) => { + if (err) { + deleteTempFiles(files); + return res.status(500).end(); + } + + if (_.isNil(files.myFile)) { + deleteTempFiles(files); + return res.status(400).end(); + } + + const file = files.myFile as formidable.File; + + const tmpdir = path.join(TEMP_DIR, `plugin_${file.newFilename}`); + try { + let tmpPluginPath = tmpdir; + const zip = new AdmZip(file.filepath); + const zipEntries = zip.getEntries(); + let found = false; + for (const entry of zipEntries) { + // console.log("entry", entry.isDirectory, entry.entryName); + // check if any subdir + if ( + entry.isDirectory && + (entry.entryName.endsWith('inputs/') || + entry.entryName.endsWith('widgets/') || + entry.entryName.endsWith('algorithms/') || + entry.entryName.endsWith('templates/')) + ) { + found = true; + const ret = entry.entryName.match( + /^(.+)\/(inputs|widgets|algorithms|templates)\// + ); + // console.log("ret", ret) + if (ret) { + tmpPluginPath = path.join(tmpPluginPath, ret[1]); + } + break; + } + } + if (!found) { + throw new Error('Invalid plugin'); + } + // console.log("tmpPluginPath", tmpPluginPath) + zip.extractAllTo(tmpdir, true); + const newPlugin = await installPlugin(tmpPluginPath); + res.status(200).json(newPlugin); + } catch (error) { + let errMsg: string; + if (error instanceof Error) { + errMsg = error.message; + } else { + errMsg = String(error); + } + res.status(400).json({ error: errMsg }); + } finally { + if (fs.existsSync(tmpdir)) { + fs.rmdirSync(tmpdir, { recursive: true }); + } + deleteTempFiles(files); + } + }); +} diff --git a/ai-verify-portal/pages/assets.tsx b/ai-verify-portal/pages/assets.tsx index 8f64296ee..e18a5f2ac 100644 --- a/ai-verify-portal/pages/assets.tsx +++ b/ai-verify-portal/pages/assets.tsx @@ -1,8 +1,8 @@ -import AssetsModule from 'src/modules/assets' +import AssetsModule from 'src/modules/assets'; /** * Home page */ export default function AssetsPage() { - return () -} \ No newline at end of file + return ; +} diff --git a/ai-verify-portal/pages/assets/datasets.tsx b/ai-verify-portal/pages/assets/datasets.tsx index bfc1af3a9..650391713 100644 --- a/ai-verify-portal/pages/assets/datasets.tsx +++ b/ai-verify-portal/pages/assets/datasets.tsx @@ -1,6 +1,5 @@ import DatasetsModule from 'src/modules/assets/datasets'; - export default function DatasetsPage() { - return () -} \ No newline at end of file + return ; +} diff --git a/ai-verify-portal/pages/assets/models.tsx b/ai-verify-portal/pages/assets/models.tsx index f609b64d2..daef139ee 100644 --- a/ai-verify-portal/pages/assets/models.tsx +++ b/ai-verify-portal/pages/assets/models.tsx @@ -1,6 +1,5 @@ -import ModelsModule from 'src/modules/assets/models' - +import ModelsModule from 'src/modules/assets/models'; export default function ModelsPage() { - return () -} \ No newline at end of file + return ; +} diff --git a/ai-verify-portal/pages/assets/newDataset.tsx b/ai-verify-portal/pages/assets/newDataset.tsx index 79878dcf1..9fda9dcef 100644 --- a/ai-verify-portal/pages/assets/newDataset.tsx +++ b/ai-verify-portal/pages/assets/newDataset.tsx @@ -1,5 +1,5 @@ import NewDatasetModule from 'src/modules/assets/newDataset'; export default function NewDatasetPage() { - return () -} \ No newline at end of file + return ; +} diff --git a/ai-verify-portal/pages/assets/newModel.tsx b/ai-verify-portal/pages/assets/newModel.tsx index 57943d797..470b58ac2 100644 --- a/ai-verify-portal/pages/assets/newModel.tsx +++ b/ai-verify-portal/pages/assets/newModel.tsx @@ -1,6 +1,5 @@ import NewModelModule from 'src/modules/assets/newModel'; - export default function NewModelPage() { - return () -} \ No newline at end of file + return ; +} diff --git a/ai-verify-portal/pages/assets/newModelUpload.tsx b/ai-verify-portal/pages/assets/newModelUpload.tsx index f836a4b5f..f899fcf0d 100644 --- a/ai-verify-portal/pages/assets/newModelUpload.tsx +++ b/ai-verify-portal/pages/assets/newModelUpload.tsx @@ -1,5 +1,5 @@ import NewModelUploadModule from 'src/modules/assets/newModelUpload'; export default function NewModelUploadPage() { - return () -} \ No newline at end of file + return ; +} diff --git a/ai-verify-portal/pages/assets/newPipelineUpload.tsx b/ai-verify-portal/pages/assets/newPipelineUpload.tsx index 7d0475f30..4f2c3e22a 100644 --- a/ai-verify-portal/pages/assets/newPipelineUpload.tsx +++ b/ai-verify-portal/pages/assets/newPipelineUpload.tsx @@ -1,5 +1,5 @@ import NewPipelineUploadModule from 'src/modules/assets/newPipelineUpload'; export default function NewPipelineUploadPage() { - return () -} \ No newline at end of file + return ; +} diff --git a/ai-verify-portal/pages/home.tsx b/ai-verify-portal/pages/home.tsx index 01f391370..53a328246 100644 --- a/ai-verify-portal/pages/home.tsx +++ b/ai-verify-portal/pages/home.tsx @@ -1,23 +1,23 @@ -import HomeModule from 'src/modules/home' -import { listProjects } from 'server/lib/projectServiceBackend'; -import Project from 'src/types/project.interface'; - -export async function getServerSideProps() { - const projects = await listProjects(); - return { - props: { - projects: projects, - } - } -} - -type Props = { - projects: Project[] -} - -/** - * Home page - */ -export default function HomePage({projects}: Props) { - return () -} \ No newline at end of file +import HomeModule from 'src/modules/home'; +import { listProjects } from 'server/lib/projectServiceBackend'; +import Project from 'src/types/project.interface'; + +export async function getServerSideProps() { + const projects = await listProjects(); + return { + props: { + projects: projects, + }, + }; +} + +type Props = { + projects: Project[]; +}; + +/** + * Home page + */ +export default function HomePage({ projects }: Props) { + return ; +} diff --git a/ai-verify-portal/pages/index.tsx b/ai-verify-portal/pages/index.tsx index bf87e0f9c..581300866 100644 --- a/ai-verify-portal/pages/index.tsx +++ b/ai-verify-portal/pages/index.tsx @@ -2,7 +2,5 @@ * Not in use currently. User should be redirected to homepage or login page (future). */ export default function Index() { - return ( -
- ); + return
; } diff --git a/ai-verify-portal/pages/plugins.tsx b/ai-verify-portal/pages/plugins.tsx index c212d9ba8..860a708a5 100644 --- a/ai-verify-portal/pages/plugins.tsx +++ b/ai-verify-portal/pages/plugins.tsx @@ -1,8 +1,6 @@ -import React from 'react'; -import PluginsModule from 'src/modules/plugins'; - -export default function PluginsPage() { - return ( - - ) -} \ No newline at end of file +import React from 'react'; +import PluginsModule from 'src/modules/plugins'; + +export default function PluginsPage() { + return ; +} diff --git a/ai-verify-portal/pages/project/[id].tsx b/ai-verify-portal/pages/project/[id].tsx index 3c2ebd002..a59c3289e 100644 --- a/ai-verify-portal/pages/project/[id].tsx +++ b/ai-verify-portal/pages/project/[id].tsx @@ -1,54 +1,67 @@ -import { useRouter } from 'next/router' -import { GetServerSideProps } from 'next' - -import ProjectModule from 'src/modules/project'; -import Project, { ModelAndDatasets } from 'src/types/project.interface'; -import PluginManagerType from 'src/types/pluginManager.interface'; -import { getPlugins } from 'server/pluginManager'; - -import { listProjects, getProject } from 'server/lib/projectServiceBackend'; -import Dataset, { DatasetColumn } from 'src/types/dataset.interface'; -import ModelFile from 'src/types/model.interface'; - -export const getServerSideProps: GetServerSideProps = async ({params}) => { - const id = params!.id as string; - const data = await getProject(id) - const pluginManager = await getPlugins(); - const { __typename, ...modelAndDatasets } = data.modelAndDatasets as ModelAndDatasets & { __typename: string | undefined }; - if (modelAndDatasets) { - const { groundTruthDataset, model, testDataset } = modelAndDatasets; - if (groundTruthDataset) { - const { __typename, ...rest } = groundTruthDataset as Dataset & { __typename: string }; - modelAndDatasets.groundTruthDataset = rest; - } - if (model) { - const { __typename, ...rest } = model as ModelFile & { __typename: string }; - modelAndDatasets.model = rest; - } - if (testDataset) { - const { __typename, ...rest } = testDataset as Dataset & { __typename: string }; - modelAndDatasets.testDataset = rest - } - } - - data.modelAndDatasets = modelAndDatasets; - - return { - props: { - pluginManager, - data - }, - } -} - -type Props = { - data: Project, - pluginManager: PluginManagerType -} - -export default function ProjectUpdatePage({data, pluginManager}: Props) { - const router = useRouter() - const { pid } = router.query - - return () -} \ No newline at end of file +import { GetServerSideProps } from 'next'; +import ProjectModule from 'src/modules/project'; +import Project, { ModelAndDatasets } from 'src/types/project.interface'; +import PluginManagerType from 'src/types/pluginManager.interface'; +import { getPlugins } from 'server/pluginManager'; + +import { getProject } from 'server/lib/projectServiceBackend'; +import Dataset from 'src/types/dataset.interface'; +import ModelFile from 'src/types/model.interface'; + +export const getServerSideProps: GetServerSideProps = async ({ params }) => { + if (!params || !params.id) { + console.log('url parameter required - id'); + return { notFound: true }; + } + + const id = params.id as string; + const data = await getProject(id); + const pluginManager = await getPlugins(); + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const { __typename, ...modelAndDatasets } = + data.modelAndDatasets as ModelAndDatasets & { + __typename: string | undefined; + }; + if (modelAndDatasets) { + const { groundTruthDataset, model, testDataset } = modelAndDatasets; + if (groundTruthDataset) { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const { __typename, ...rest } = groundTruthDataset as Dataset & { + __typename: string; + }; + modelAndDatasets.groundTruthDataset = rest; + } + if (model) { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const { __typename, ...rest } = model as ModelFile & { + __typename: string; + }; + modelAndDatasets.model = rest; + } + if (testDataset) { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const { __typename, ...rest } = testDataset as Dataset & { + __typename: string; + }; + modelAndDatasets.testDataset = rest; + } + } + + data.modelAndDatasets = modelAndDatasets; + + return { + props: { + pluginManager, + data, + }, + }; +}; + +type Props = { + data: Project; + pluginManager: PluginManagerType; +}; + +export default function ProjectUpdatePage({ data, pluginManager }: Props) { + return ; +} diff --git a/ai-verify-portal/pages/project/create.tsx b/ai-verify-portal/pages/project/create.tsx index 345a0ec21..e0e94a4f9 100644 --- a/ai-verify-portal/pages/project/create.tsx +++ b/ai-verify-portal/pages/project/create.tsx @@ -1,31 +1,35 @@ -import { GetServerSideProps } from 'next' -import ProjectModule from 'src/modules/project'; -import Project from 'src/types/project.interface'; -import PluginManagerType from 'src/types/pluginManager.interface'; -import { getPlugins } from 'server/pluginManager'; - -export const getServerSideProps: GetServerSideProps = async (context) => { - // console.log("static check", pluginManager) - const pluginManager = await getPlugins(); - return { - props: { - pluginManager - }, - } -} - -type Props = { - pluginManager: PluginManagerType -} - -export default function ProjectCreatePage({ pluginManager }: Props) { - const emptyProjectState: Partial = { // TODO: 👈 look into correcting type check here - projectInfo: { - name: "", - }, - pages: [], - inputBlocks: [], - globalVars: [], - } - return () -} \ No newline at end of file +import { GetServerSideProps } from 'next'; +import ProjectModule from 'src/modules/project'; +import Project from 'src/types/project.interface'; +import PluginManagerType from 'src/types/pluginManager.interface'; +import { getPlugins } from 'server/pluginManager'; + +export const getServerSideProps: GetServerSideProps = async () => { + const pluginManager = await getPlugins(); + return { + props: { + pluginManager, + }, + }; +}; + +type Props = { + pluginManager: PluginManagerType; +}; + +export default function ProjectCreatePage({ pluginManager }: Props) { + const emptyProjectState: Partial = { + projectInfo: { + name: '', + }, + pages: [], + inputBlocks: [], + globalVars: [], + }; + return ( + + ); +} diff --git a/ai-verify-portal/pages/projectTemplate/[id].tsx b/ai-verify-portal/pages/projectTemplate/[id].tsx index 386054ca1..b91298ee7 100644 --- a/ai-verify-portal/pages/projectTemplate/[id].tsx +++ b/ai-verify-portal/pages/projectTemplate/[id].tsx @@ -1,33 +1,34 @@ -import { useRouter } from 'next/router' -import { GetServerSideProps } from 'next' - -import ProjectTemplateModule from 'src/modules/projectTemplate'; -import ProjectTemplate from 'src/types/projectTemplate.interface'; -import PluginManagerType from 'src/types/pluginManager.interface'; -import { getPlugins } from 'server/pluginManager'; - -import { getProjectTemplate } from 'server/lib/projectServiceBackend'; - -export const getServerSideProps: GetServerSideProps = async ({params}) => { - const id = params!.id as string; - const data = await getProjectTemplate(id) - const pluginManager = await getPlugins(); - return { - props: { - pluginManager, - data - }, - } -} - -type Props = { - data: ProjectTemplate, - pluginManager: PluginManagerType -} - -export default function ProjectUpdatePage({data, pluginManager}: Props) { - const router = useRouter() - const { pid } = router.query - - return () -} \ No newline at end of file +import { GetServerSideProps } from 'next'; + +import ProjectTemplateModule from 'src/modules/projectTemplate'; +import ProjectTemplate from 'src/types/projectTemplate.interface'; +import PluginManagerType from 'src/types/pluginManager.interface'; +import { getPlugins } from 'server/pluginManager'; + +import { getProjectTemplate } from 'server/lib/projectServiceBackend'; + +export const getServerSideProps: GetServerSideProps = async ({ params }) => { + if (!params || !params.id) { + console.log('url parameter required - id'); + return { notFound: true }; + } + + const id = params.id as string; + const data = await getProjectTemplate(id); + const pluginManager = await getPlugins(); + return { + props: { + pluginManager, + data, + }, + }; +}; + +type Props = { + data: ProjectTemplate; + pluginManager: PluginManagerType; +}; + +export default function ProjectUpdatePage({ data, pluginManager }: Props) { + return ; +} diff --git a/ai-verify-portal/pages/projectTemplate/create.tsx b/ai-verify-portal/pages/projectTemplate/create.tsx index fd93ce062..ddab7a411 100644 --- a/ai-verify-portal/pages/projectTemplate/create.tsx +++ b/ai-verify-portal/pages/projectTemplate/create.tsx @@ -1,32 +1,34 @@ -import { GetServerSideProps } from 'next' -import ProjectTemplateModule from 'src/modules/projectTemplate'; -import ProjectTemplate from 'src/types/projectTemplate.interface'; -import PluginManagerType from 'src/types/pluginManager.interface'; -// import pluginManager from 'server/pluginManager'; -import { getPlugins } from 'server/pluginManager'; - -export const getServerSideProps: GetServerSideProps = async (context) => { - // console.log("static check", pluginManager) - const pluginManager = await getPlugins(); - // console.log("pluginManager", pluginManager) - return { - props: { - pluginManager - }, - } -} - -type Props = { - pluginManager: PluginManagerType -} - -export default function ProjectTemplateCreatePage({ pluginManager }: Props) { - const emptyProjectState: Partial = { //TODO: 👈 look into correcting type here. - projectInfo: { - name: "", - }, - pages: [], - globalVars: [], - } - return () -} \ No newline at end of file +import { GetServerSideProps } from 'next'; +import ProjectTemplateModule from 'src/modules/projectTemplate'; +import ProjectTemplate from 'src/types/projectTemplate.interface'; +import PluginManagerType from 'src/types/pluginManager.interface'; +import { getPlugins } from 'server/pluginManager'; + +export const getServerSideProps: GetServerSideProps = async () => { + const pluginManager = await getPlugins(); + return { + props: { + pluginManager, + }, + }; +}; + +type Props = { + pluginManager: PluginManagerType; +}; + +export default function ProjectTemplateCreatePage({ pluginManager }: Props) { + const emptyProjectState: Partial = { + projectInfo: { + name: '', + }, + pages: [], + globalVars: [], + }; + return ( + + ); +} diff --git a/ai-verify-portal/pages/projectTemplates.tsx b/ai-verify-portal/pages/projectTemplates.tsx index 55443bac5..a42ffa070 100644 --- a/ai-verify-portal/pages/projectTemplates.tsx +++ b/ai-verify-portal/pages/projectTemplates.tsx @@ -1,25 +1,20 @@ -import React, { useState } from 'react'; - -import { listProjectTemplates } from 'server/lib/projectServiceBackend'; -import TemplateListModule from 'src/modules/projectTemplate/templateListModule'; -import ProjectTemplate from 'src/types/projectTemplate.interface'; - -export async function getServerSideProps() { - const templates = await listProjectTemplates(); - return { - props: { - templates, - } - } -} - - -type Props = { - templates: ProjectTemplate[] -} - -export default function ProjectTemplateListPage({ templates }: Props) { - return ( - - ) -} \ No newline at end of file +import { listProjectTemplates } from 'server/lib/projectServiceBackend'; +import TemplateListModule from 'src/modules/projectTemplate/templateListModule'; +import ProjectTemplate from 'src/types/projectTemplate.interface'; + +export async function getServerSideProps() { + const templates = await listProjectTemplates(); + return { + props: { + templates, + }, + }; +} + +type Props = { + templates: ProjectTemplate[]; +}; + +export default function ProjectTemplateListPage({ templates }: Props) { + return ; +} diff --git a/ai-verify-portal/pages/reportStatus/[id].tsx b/ai-verify-portal/pages/reportStatus/[id].tsx index d46cfb90a..16043818c 100644 --- a/ai-verify-portal/pages/reportStatus/[id].tsx +++ b/ai-verify-portal/pages/reportStatus/[id].tsx @@ -1,30 +1,29 @@ -import { GetServerSideProps } from 'next'; -import ReportStatusModule from 'src/modules/reportStatus'; -import { Report } from 'src/types/project.interface'; -import { getReport } from 'server/lib/projectServiceBackend'; -import PluginManagerType from 'src/types/pluginManager.interface'; - - -export const getServerSideProps: GetServerSideProps = async ({params}) => { - if (!params || !params.id) { - console.log('url parameter required - id'); - return { notFound: true }; - } - - const id = params.id as string; - const report = await getReport(id); - return { - props: { - report - }, - } -} - -type Props = { - report: Report, - pluginManager: PluginManagerType -} - -export default function ReportStatusPage({report}: Props) { - return () -} \ No newline at end of file +import { GetServerSideProps } from 'next'; +import ReportStatusModule from 'src/modules/reportStatus'; +import { Report } from 'src/types/project.interface'; +import { getReport } from 'server/lib/projectServiceBackend'; +import PluginManagerType from 'src/types/pluginManager.interface'; + +export const getServerSideProps: GetServerSideProps = async ({ params }) => { + if (!params || !params.id) { + console.log('url parameter required - id'); + return { notFound: true }; + } + + const id = params.id as string; + const report = await getReport(id); + return { + props: { + report, + }, + }; +}; + +type Props = { + report: Report; + pluginManager: PluginManagerType; +}; + +export default function ReportStatusPage({ report }: Props) { + return ; +} diff --git a/ai-verify-portal/pages/reportStatus/printview/[id].tsx b/ai-verify-portal/pages/reportStatus/printview/[id].tsx index 2ebfc2e9a..f1a1606a7 100644 --- a/ai-verify-portal/pages/reportStatus/printview/[id].tsx +++ b/ai-verify-portal/pages/reportStatus/printview/[id].tsx @@ -1,48 +1,46 @@ -import { useRouter } from 'next/router' -import { GetServerSideProps } from 'next' - -import PrintViewModule from 'src/modules/reportStatus/printView'; -import {Report} from 'src/types/project.interface'; -import { getReport } from 'server/lib/projectServiceBackend'; -import { getMDXBundle } from 'server/bundler'; - -export const getServerSideProps: GetServerSideProps = async ({params}) => { - const id = params!.id as string; - const report = await getReport(id) - let mdxBundleMap = {} as any; - for (let page of report.projectSnapshot.pages) { - for (let widget of page.reportWidgets) { - if (!(widget.widgetGID in mdxBundleMap)) { - try { - const bundle = await getMDXBundle(widget.widgetGID); - if (bundle) - mdxBundleMap[widget.widgetGID] = bundle; - else - mdxBundleMap[widget.widgetGID] = null; - } catch (e) { - console.log("Error getting mdx bundle for", widget.widgetGID); - mdxBundleMap[widget.widgetGID] = null; - } - } - } - } - - return { - props: { - report, - mdxBundleMap, - }, - } -} - -type Props = { - report: Report; - mdxBundleMap: any; -} - -export default function PrintViewPage({report, mdxBundleMap}: Props) { - const router = useRouter() - const { pid } = router.query - - return () -} \ No newline at end of file +import { GetServerSideProps } from 'next'; + +import PrintViewModule from 'src/modules/reportStatus/printView'; +import { Report } from 'src/types/project.interface'; +import { getReport } from 'server/lib/projectServiceBackend'; +import { getMDXBundle } from 'server/bundler'; + +export const getServerSideProps: GetServerSideProps = async ({ params }) => { + if (!params || !params.id) { + console.log('url parameter required - id'); + return { notFound: true }; + } + const id = params.id as string; + const report = await getReport(id); + const mdxBundleMap = {} as any; + for (const page of report.projectSnapshot.pages) { + for (const widget of page.reportWidgets) { + if (!(widget.widgetGID in mdxBundleMap)) { + try { + const bundle = await getMDXBundle(widget.widgetGID); + if (bundle) mdxBundleMap[widget.widgetGID] = bundle; + else mdxBundleMap[widget.widgetGID] = null; + } catch (e) { + console.log('Error getting mdx bundle for', widget.widgetGID); + mdxBundleMap[widget.widgetGID] = null; + } + } + } + } + + return { + props: { + report, + mdxBundleMap, + }, + }; +}; + +type Props = { + report: Report; + mdxBundleMap: any; +}; + +export default function PrintViewPage({ report, mdxBundleMap }: Props) { + return ; +} diff --git a/ai-verify-portal/server/bundler.ts b/ai-verify-portal/server/bundler.ts index 012963ba5..4ea1cd0bf 100644 --- a/ai-verify-portal/server/bundler.ts +++ b/ai-verify-portal/server/bundler.ts @@ -1,60 +1,61 @@ -import path from 'node:path'; -import { bundleMDX } from 'mdx-bundler' -// import {getMDXComponent} from 'mdx-bundler/client'; -import { getByGID } from './pluginManager'; -import { pluginPath } from './lib/pluginService'; -import { BaseMDXComponent, InputBlock } from 'src/types/plugin.interface'; -import remarkMdxImages from 'remark-mdx-images'; -import remarkGfm from 'remark-gfm'; - -export async function getMDXBundle(gid: string) { - const widget = await getByGID(gid) as BaseMDXComponent | null; - if (!widget) - return; - // console.log("getMDXBundle", pluginPath, widget.mdxPath) - const result = await bundleMDX({ - cwd: pluginPath, - file: path.join(pluginPath, widget.mdxPath), - // globals: {'MyCheckbox':'MyCheckbox'}, - mdxOptions: options => { - options.remarkPlugins = [...(options.remarkPlugins ?? []), remarkMdxImages, remarkGfm] - return options - }, - esbuildOptions: options => { - options.loader = { - ...options.loader, - '.png': 'dataurl', - } - return options - }, - }) - // const {code, frontmatter} = result; - return { ...result, widget }; -} - -export async function getSummaryBundle(gid: string) { - const widget = await getByGID(gid) as InputBlock | null; - if (!widget) - return; - if (!widget.summaryPath) - return; - // console.log("getMDXBundle", pluginPath, widget.mdxPath) - const result = await bundleMDX({ - cwd: pluginPath, - file: path.join(pluginPath, widget.summaryPath), - // globals: {'MyCheckbox':'MyCheckbox'}, - // mdxOptions: options => { - // options.remarkPlugins = [...(options.remarkPlugins ?? []), remarkMdxImages, remarkGfm] - // return options - // }, - // esbuildOptions: options => { - // options.loader = { - // ...options.loader, - // '.png': 'dataurl', - // } - // return options - // }, - }) - // const {code, frontmatter} = result; - return { ...result, widget }; -} \ No newline at end of file +import path from 'node:path'; +import { bundleMDX } from 'mdx-bundler'; +// import {getMDXComponent} from 'mdx-bundler/client'; +import { getByGID } from './pluginManager'; +import { pluginPath } from './lib/pluginService'; +import { BaseMDXComponent, InputBlock } from 'src/types/plugin.interface'; +import remarkMdxImages from 'remark-mdx-images'; +import remarkGfm from 'remark-gfm'; + +export async function getMDXBundle(gid: string) { + const widget = (await getByGID(gid)) as BaseMDXComponent | null; + if (!widget) return; + // console.log("getMDXBundle", pluginPath, widget.mdxPath) + const result = await bundleMDX({ + cwd: pluginPath, + file: path.join(pluginPath, widget.mdxPath), + // globals: {'MyCheckbox':'MyCheckbox'}, + mdxOptions: (options) => { + options.remarkPlugins = [ + ...(options.remarkPlugins ?? []), + remarkMdxImages, + remarkGfm, + ]; + return options; + }, + esbuildOptions: (options) => { + options.loader = { + ...options.loader, + '.png': 'dataurl', + }; + return options; + }, + }); + // const {code, frontmatter} = result; + return { ...result, widget }; +} + +export async function getSummaryBundle(gid: string) { + const widget = (await getByGID(gid)) as InputBlock | null; + if (!widget) return; + if (!widget.summaryPath) return; + // console.log("getMDXBundle", pluginPath, widget.mdxPath) + const result = await bundleMDX({ + cwd: pluginPath, + file: path.join(pluginPath, widget.summaryPath), + // globals: {'MyCheckbox':'MyCheckbox'}, + // mdxOptions: options => { + // options.remarkPlugins = [...(options.remarkPlugins ?? []), remarkMdxImages, remarkGfm] + // return options + // }, + // esbuildOptions: options => { + // options.loader = { + // ...options.loader, + // '.png': 'dataurl', + // } + // return options + // }, + }); + // const {code, frontmatter} = result; + return { ...result, widget }; +} diff --git a/ai-verify-portal/server/lib/assetServiceBackend.ts b/ai-verify-portal/server/lib/assetServiceBackend.ts index 804585ab4..ff71333da 100644 --- a/ai-verify-portal/server/lib/assetServiceBackend.ts +++ b/ai-verify-portal/server/lib/assetServiceBackend.ts @@ -1,65 +1,63 @@ -import { gql, useQuery, useMutation } from "@apollo/client"; -import graphqlClient from "src/lib/graphqlClient"; +import { gql } from '@apollo/client'; +import graphqlClient from 'src/lib/graphqlClient'; import Dataset from 'src/types/dataset.interface'; import ModelFile from 'src/types/model.interface'; -import _ from 'lodash'; export const GET_DATASETS = gql` - query Query { - datasets { - id - name - ctime - size - status - dataColumns { - id - name - datatype - label - } - serializer - dataFormat - errorMessages - } + query Query { + datasets { + id + name + ctime + size + status + dataColumns { + id + name + datatype + label + } + serializer + dataFormat + errorMessages } -` + } +`; export async function listDatasets(): Promise { - const client = graphqlClient(true) - const { data } = await client.query({ - query: GET_DATASETS - }) + const client = graphqlClient(true); + const { data } = await client.query({ + query: GET_DATASETS, + }); - let datasets = data.datasets as Dataset[]; - - return datasets; -} + const datasets = data.datasets as Dataset[]; + return datasets; +} export const GET_MODELS = gql` - query Query { - modelFiles { - id - name - ctime - size - status - serializer - modelFormat - errorMessages - } + query Query { + modelFiles { + id + name + ctime + size + status + serializer + modelFormat + errorMessages } -` + } +`; export async function listModels(): Promise { - const client = graphqlClient(true) - const { data } = await client.query({ - query: GET_MODELS, - }); + const client = graphqlClient(true); + const { data } = await client.query({ + query: GET_MODELS, + }); - let modelFiles = data.modelFiles as ModelFile[]; - - return modelFiles; -} \ No newline at end of file + const modelFiles = data.modelFiles as ModelFile[]; + + return modelFiles; +} diff --git a/ai-verify-portal/server/lib/pluginService.ts b/ai-verify-portal/server/lib/pluginService.ts index 38b97f526..7c467074f 100644 --- a/ai-verify-portal/server/lib/pluginService.ts +++ b/ai-verify-portal/server/lib/pluginService.ts @@ -1,504 +1,524 @@ -import path from 'node:path'; -import fs, { statSync } from 'node:fs'; -import { exec } from 'node:child_process'; -import { Validator } from 'jsonschema'; -const validator = new Validator(); - -import redisConnect from '../redisClient'; -export const redis = redisConnect(); - -import { createProjectTemplate } from './projectServiceBackend'; - -const PLUGIN_DIR = path.join(process.cwd(), 'plugins'); -export const pluginPath = PLUGIN_DIR; - -const metaSuffix = '.meta.json'; -const pluginMetaFile = "plugin.meta.json"; - -const python_cmd = process.env.PYTHON_EXECUTABLE || "python3"; -const python_script_checker = path.join(process.cwd(), "server/scripts/syntax_checker.py"); - -// read in the schemas -import pluginSchema from 'config/ai-verify.plugin.schema.json'; -import algoSchema from 'config/ai-verify.algorithm.schema.json'; -import inputBlockSchema from 'config/ai-verify.inputBlock.schema.json'; -import templateSchema from 'config/ai-verify.template.schema.json'; -import templateDataSchema from 'config/ai-verify.template.data.json'; -import widgetSchema from 'config/ai-verify.widget.schema.json'; - -// const LAST_MODIFIED_KEY = "plugin:lastModified"; -const PLUGIN_SET_PREFIX = "plugin:list"; - -import AIFPlugin, { AIFPluginCache, PluginComponentType } from 'src/types/plugin.interface'; -import { bundleMDX } from 'mdx-bundler'; -import remarkMdxImages from 'remark-mdx-images'; -import remarkGfm from 'remark-gfm'; -import { toErrorWithMessage } from 'src/lib/errorUtils'; - -export function readJSONFromFile(filepath: string, errorReturn: any = {}): any { - try { - return JSON.parse(fs.readFileSync(filepath).toString('utf8')); - } catch (e) { - console.warn("Unable to read JSON from file", filepath); - return errorReturn; - } -} - -export function readRequirementsFile(filepath: string): string[] | undefined { - try { - const text = fs.readFileSync(filepath).toString('utf8'); - let ar = text.split(/\r?\n/).map(e => e.trim()).filter(e => e.length > 0) - return ar; - } catch (e) { - console.warn("Unable to read JSON from file", filepath); - return undefined; - } -} - -export async function deletePluginKeysFromRedis() { - // delete plugin set - let keys = await redis.keys(`${PLUGIN_SET_PREFIX}:*`); - for (const key of keys) - await redis.del(key); - // delete algo keys - keys = await redis.keys("algo:*"); - for (const key of keys) - await redis.del(key); - // delete widget keys - keys = await redis.keys("widget:*"); - for (const key of keys) - await redis.del(key); - // delete input block keys - keys = await redis.keys("inputBlock:*"); - for (const key of keys) - await redis.del(key); - keys = await redis.keys("template:*"); - for (const key of keys) - await redis.del(key); -} - -/** - * - * @param pdir Plugin directory - * @param cached for caching the data - * @returns - * @note assume that already validated - */ -export async function scanPluginDirectory(pdir: string, cached: AIFPluginCache | null = null): Promise { - console.log("scanPluginDirectory", pdir) - // read in the plugin meta file - const pluginMeta = readJSONFromFile(path.join(pdir, pluginMetaFile)) as AIFPlugin; - const pluginKey = `${PLUGIN_SET_PREFIX}:${pluginMeta.gid}`; - // read in report widgets if any - const widgets_subdir = path.join(pdir, "widgets"); - let txn = redis.multi(); - if (fs.existsSync(widgets_subdir)) { - const allfiles = fs.readdirSync(widgets_subdir); - const metaFiles = allfiles.filter(f => f.endsWith(metaSuffix)); - const reportWidgets = []; - for (const metaFile of metaFiles) { - // read widget meta json - const metaPath = path.join(widgets_subdir, metaFile); - const widget = readJSONFromFile(metaPath); - - widget.type = PluginComponentType.ReportWidget; - widget.gid = `${pluginMeta.gid}:${widget.cid}`; // auto set widget gid = : - widget.version = pluginMeta.version; - widget.pluginGID = pluginMeta.gid; - // set the path to the MDX - widget.mdxPath = path.relative(PLUGIN_DIR, path.join(widgets_subdir, `${widget.cid}.mdx`)); - - // auto format gid for dependencies - if (widget.dependencies) { - for (const dep of widget.dependencies) { - dep.gid = (dep.gid && dep.gid.length > 0)?`${dep.gid}:${dep.cid}`:`${pluginMeta.gid}:${dep.cid}`; - } - } - - // read in mock data if any - if (widget.mockdata) { - for (const mock of widget.mockdata) { - mock.gid = (mock.gid && mock.gid.length > 0)?`${mock.gid}:${mock.cid}`:`${pluginMeta.gid}:${mock.cid}`; - const datapath = path.join(widgets_subdir, mock.datapath); - const data = readJSONFromFile(datapath, null); - mock.data = data; - } - } - - const key = `widget:${widget.gid}`; - txn = txn.hSet(key, "data", JSON.stringify(widget)) - - reportWidgets.push(widget.cid); - if (cached) { - cached.reportWidgets.push(widget); - } - } - if (reportWidgets.length > 0) { - txn = txn.hSet(pluginKey, "reportWidgets", JSON.stringify(reportWidgets)); - } - } - - const inputs_subdir = path.join(pdir, "inputs"); - if (fs.existsSync(inputs_subdir)) { - const allfiles = fs.readdirSync(inputs_subdir); - const metaFiles = allfiles.filter(f => f.endsWith(metaSuffix)); - const inputBlocks = []; - for (const metaFile of metaFiles) { - // read widget meta json - const metaPath = path.join(inputs_subdir, metaFile); - const widget = readJSONFromFile(metaPath); - - widget.type = PluginComponentType.InputBlock; - widget.gid = `${pluginMeta.gid}:${widget.cid}`; // auto set widget gid = : - widget.version = pluginMeta.version; - widget.pluginGID = pluginMeta.gid; - if (!widget.width) - widget.width = "md"; // set default width to md - // set the path to the MDX - widget.mdxPath = path.relative(PLUGIN_DIR, path.join(inputs_subdir, `${widget.cid}.mdx`)) - widget.summaryPath = path.relative(PLUGIN_DIR, path.join(inputs_subdir, `${widget.cid}.summary.mdx`)) - const key = `inputBlock:${widget.gid}`; - txn = txn.hSet(key, "data", JSON.stringify(widget)); - inputBlocks.push(widget.cid); - if (cached) { - cached.inputBlocks.push(widget); - } - } - if (inputBlocks.length > 0) { - txn = txn.hSet(pluginKey, "inputBlocks", JSON.stringify(inputBlocks)); - } - } - - const templates_subdir = path.join(pdir, "templates"); - if (fs.existsSync(templates_subdir)) { - const allfiles = fs.readdirSync(templates_subdir); - const metaFiles = allfiles.filter(f => f.endsWith(metaSuffix)); - let templates = []; - for (const metaFile of metaFiles) { - // console.log("metaFile", metaFile) - // read widget meta json - const metaPath = path.join(templates_subdir, metaFile); - let template = readJSONFromFile(metaPath); - - template.type = PluginComponentType.Template; - template.gid = `${pluginMeta.gid}:${template.cid}`; // auto set widget gid = : - template.version = pluginMeta.version; - template.pluginGID = pluginMeta.gid; - - const dataPath = path.join(templates_subdir, `${template.cid}.data.json`); - let data = readJSONFromFile(dataPath); - data.fromPlugin = true; - data.projectInfo = { - name: template.name, - description: template.description || undefined, - company: template.author || undefined, - } - - const id = await createProjectTemplate(data); - // data.id = id; - // console.log("id", id); - - // template.data = path.relative(PLUGIN_DIR, path.join(templates_subdir, `${template.cid}.mdx`)) - const key = `template:${template.gid}`; - txn = txn.hSet(key, "data", JSON.stringify(template)); - txn = txn.hSet(key, "data2", JSON.stringify(data)); - txn = txn.hSet(key, "id", id); - // const typedWidget = widget as InputBlock; - // // console.log("typedWidget", typedWidget.type); - // pluginMeta.inputBlocks.push(typedWidget); - templates.push(template.cid); - if (cached) { - cached.templates.push(template); - // cached._mymap[widget.gid] = widget; - } - - } - if (templates.length > 0) { - // pluginMeta.inputBlocks = inputBlocks; - txn = txn.hSet(pluginKey, "templates", JSON.stringify(templates)); - } - } - - const algo_subdir = path.join(pdir, "algorithms"); - if (fs.existsSync(algo_subdir)) { - // console.log("algo_subdir", algo_subdir) - // console.log("version", pluginMeta) - const algodirs = fs.readdirSync(algo_subdir); - // console.log("allfiles", algodirs) - let algorithms = []; - for (const cid of algodirs) { - const mydir = path.join(algo_subdir, cid); - // console.log("mydir", mydir) - if (!fs.lstatSync(mydir).isDirectory()) - continue; - const metaPath = path.join(mydir, `${cid}.meta.json`); - let algo = readJSONFromFile(metaPath); - - algo.type = PluginComponentType.Algorithm; - algo.gid = `${pluginMeta.gid}:${algo.cid}`; // auto set widget gid = : - algo.version = pluginMeta.version; - algo.pluginGID = pluginMeta.gid; - algo.algoPath = path.join(algo_subdir, cid) - const key = `algo:${algo.gid}`; - // read requirements.txt - const requirements = readRequirementsFile(path.join(mydir, "requirements.txt")); - // read input and output schema - const inputSchema = readJSONFromFile(path.join(mydir, "input.schema.json"), null); - const outputSchema = readJSONFromFile(path.join(mydir, "output.schema.json"), null) - - txn = txn.hSet(key, "data", JSON.stringify(algo)) - .hSet(key, "requirements", JSON.stringify(requirements)) - .hSet(key, "inputSchema", JSON.stringify(inputSchema)) - .hSet(key, "outputSchema", JSON.stringify(outputSchema)) - - algo.requirements = requirements; - algo.inputSchema = inputSchema; - algo.outputSchema = outputSchema; - - algorithms.push(algo.cid); - if (cached) { - cached.algorithms.push(algo); - } - } - if (algorithms.length > 0) { - txn = txn.hSet(pluginKey, "algorithms", JSON.stringify(algorithms)) - } - } - - txn = txn.hSet(pluginKey, "meta", JSON.stringify(pluginMeta)); - - try { - const { birthtime } = statSync(pdir); - const timestamp = new Date(birthtime).getTime(); - txn = txn.hSet(pluginKey, 'installedAt', timestamp.toString()); - } catch(err) { - console.log(toErrorWithMessage(err)); - } - - txn.exec(); - return pluginMeta; -} // scanPluginDirectory - -function validatePythonScript (scriptPath: string): Promise { - return new Promise((resolve, reject) => { - if (!fs.existsSync(scriptPath)) - return reject("Script does not exists"); - - try { - exec(`${python_cmd} ${python_script_checker} "${scriptPath}"`, (error, stdout, stderr) => { - if (error) - return reject("Invalid python script"); - else - return resolve() - }) - } catch (e) { - return reject(e); - } - - }) -} - -async function validateMDX (scriptPath: string): Promise { - const result = await bundleMDX({ - // cwd: pluginPath, - file: scriptPath, - // globals: {'MyCheckbox':'MyCheckbox'}, - // globals: {'ai-verify-shared-library/charts':'ai-verify-shared-library/charts'}, - mdxOptions: options => { - options.remarkPlugins = [...(options.remarkPlugins ?? []), remarkMdxImages, remarkGfm] - return options - }, - esbuildOptions: options => { - options.loader = { - ...options.loader, - '.png': 'dataurl', - } - options.external = [ - "ai-verify-shared-library/*" - ] - return options - }, - }) - // console.log("validateMDX result", result) - return; -} - -async function validateSummary (scriptPath: string): Promise { - const result = await bundleMDX({ - // cwd: pluginPath, - file: scriptPath, - }) - // console.log("validateMDX result", result) -} - -export function validatePluginDirectory(pdir: string): Promise { - // console.log("validatePluginDirectory", pdir) - return new Promise(async (resolve, reject) => { - // read in the plugin meta file - const pluginMeta = readJSONFromFile(path.join(pdir, pluginMetaFile)); - let res = validator.validate(pluginMeta, pluginSchema); - if (!res.valid) { - // console.error("Invalid plugin schema"); - return reject("Invalid plugin schema"); - } - - const pluginKey = `${PLUGIN_SET_PREFIX}:${pluginMeta.gid}`; - const widgets_subdir = path.join(pdir, "widgets"); - if (fs.existsSync(widgets_subdir)) { - const allfiles = fs.readdirSync(widgets_subdir); - const metaFiles = allfiles.filter(f => f.endsWith(metaSuffix)); - // pluginMeta.reportWidgets = []; - for (const metaFile of metaFiles) { - // console.log("metaFile", metaFile) - // read widget meta json - const metaPath = path.join(widgets_subdir, metaFile); - let widget = readJSONFromFile(metaPath); - const res = validator.validate(widget, widgetSchema); - if (!res.valid) { - return reject("Invalid widget schema: " + res.errors); - } - - // validate mdx - const mdxPath = path.join(widgets_subdir, `${widget.cid}.mdx`); - try { - await validateMDX(mdxPath); - } catch (e) { - console.log("widget MDX error", e); - return reject(`widget ${widget.cid} MDX is invalid`); - } - - // read in mock data if any - if (widget.mockdata) { - for (let mock of widget.mockdata) { - const datapath = path.join(widgets_subdir, mock.datapath); - const relative = path.relative(pdir, datapath); - if (relative.startsWith(".")) { - return reject("Invalid data path"); - } - // console.log("paths", pdir, datapath) - // console.log("relative", relative) - const data = readJSONFromFile(datapath, null); - if (!data) { - return reject("Unable to read sample data"); - } - } - } - } - } - - const inputs_subdir = path.join(pdir, "inputs"); - if (fs.existsSync(inputs_subdir)) { - const allfiles = fs.readdirSync(inputs_subdir); - const metaFiles = allfiles.filter(f => f.endsWith(metaSuffix)); - for (const metaFile of metaFiles) { - // console.log("metaFile", metaFile) - // read widget meta json - const metaPath = path.join(inputs_subdir, metaFile); - let widget = readJSONFromFile(metaPath); - const res = validator.validate(widget, inputBlockSchema); - if (!res.valid) { - return reject("Invalid input block schema: " + res.errors); - } - - // validate mdx - const mdxPath = path.join(inputs_subdir, `${widget.cid}.mdx`); - try { - await validateMDX(mdxPath); - } catch (e) { - console.log("input block mdx error", e) - return reject(`Input block ${widget.cid} MDX is invalid`); - } - - const summaryPath = path.join(inputs_subdir, `${widget.cid}.summary.mdx`); - try { - await validateSummary(summaryPath); - } catch (e) { - console.log("input block summary mdx error", e) - return reject(`Input block ${widget.cid} summary MDX is invalid`); - } - - - } - } - - const template_subdir = path.join(pdir, "templates"); - if (fs.existsSync(template_subdir)) { - const allfiles = fs.readdirSync(template_subdir); - const metaFiles = allfiles.filter(f => f.endsWith(metaSuffix)); - for (const metaFile of metaFiles) { - // console.log("metaFile", metaFile) - // read template meta json - const metaPath = path.join(template_subdir, metaFile); - let template = readJSONFromFile(metaPath); - const res = validator.validate(template, templateSchema); - if (!res.valid) { - return reject("Invalid template schema"); - } - const dataPath = path.join(template_subdir, `${template.cid}.data.json`); - let data = readJSONFromFile(dataPath); - const res2 = validator.validate(data, templateDataSchema as any); - if (!res2.valid) { - console.log("data validation failed", res2.errors) - return reject("Invalid template data schema"); - } - - } - } - - const algo_subdir = path.join(pdir, "algorithms"); - if (fs.existsSync(algo_subdir)) { - // console.log("check", python_script_checker, fs.existsSync(python_script_checker)) - // console.log("algo_subdir", algo_subdir) - // console.log("version", pluginMeta) - const algodirs = fs.readdirSync(algo_subdir); - // console.log("allfiles", algodirs) - for (const cid of algodirs) { - const mydir = path.join(algo_subdir, cid); - // console.log("mydir", mydir) - if (!fs.lstatSync(mydir).isDirectory()) - continue; - - // read algorithm meta - const metaPath = path.join(mydir, `${cid}.meta.json`); - let algo = readJSONFromFile(metaPath); - let res = validator.validate(algo, algoSchema); - if (!res.valid) { - return reject("Invalid algorithm schema"); - } - - // validate algo script - const scriptPath = path.join(mydir, `${algo.cid}.py`) - // console.log("scriptPath", scriptPath) - try { - await validatePythonScript(scriptPath); - } catch(e) { - return reject(`Algorithm ${algo.cid} python script is invalid`); - } - - // read requirements.txt - const requirements = readRequirementsFile(path.join(mydir, "requirements.txt")); - if (!requirements) { - return reject("Unable to read requirements file"); - } - // read input and output schema - const inputSchema = readJSONFromFile(path.join(mydir, "input.schema.json"), null); - if (!inputSchema) { - return reject("Unable to read input.schema.json"); - } - const outputSchema = readJSONFromFile(path.join(mydir, "output.schema.json"), null) - if (!outputSchema) { - return reject("Unable to read output.schema.json"); - } - - } - // console.log("check algos", pluginMeta.algorithms) - } - // cached._mymap[pluginMeta.gid] = pluginMeta; - // return pluginMeta; - - resolve(pluginMeta.gid); - }) - -} // validatePluginDirectory - - +import path from 'node:path'; +import fs, { statSync } from 'node:fs'; +import { exec } from 'node:child_process'; +import { Validator } from 'jsonschema'; +const validator = new Validator(); + +import redisConnect from '../redisClient'; +export const redis = redisConnect(); + +import { createProjectTemplate } from './projectServiceBackend'; + +const PLUGIN_DIR = path.join(process.cwd(), 'plugins'); +export const pluginPath = PLUGIN_DIR; + +const metaSuffix = '.meta.json'; +const pluginMetaFile = 'plugin.meta.json'; + +const python_cmd = process.env.PYTHON_EXECUTABLE || 'python3'; +const python_script_checker = path.join( + process.cwd(), + 'server/scripts/syntax_checker.py' +); + +// read in the schemas +import pluginSchema from 'config/ai-verify.plugin.schema.json'; +import algoSchema from 'config/ai-verify.algorithm.schema.json'; +import inputBlockSchema from 'config/ai-verify.inputBlock.schema.json'; +import templateSchema from 'config/ai-verify.template.schema.json'; +import templateDataSchema from 'config/ai-verify.template.data.json'; +import widgetSchema from 'config/ai-verify.widget.schema.json'; + +const PLUGIN_SET_PREFIX = 'plugin:list'; + +import AIFPlugin, { + AIFPluginCache, + PluginComponentType, +} from 'src/types/plugin.interface'; +import { bundleMDX } from 'mdx-bundler'; +import remarkMdxImages from 'remark-mdx-images'; +import remarkGfm from 'remark-gfm'; +import { toErrorWithMessage } from 'src/lib/errorUtils'; + +export function readJSONFromFile(filepath: string, errorReturn: any = {}): any { + try { + return JSON.parse(fs.readFileSync(filepath).toString('utf8')); + } catch (e) { + console.warn('Unable to read JSON from file', filepath); + return errorReturn; + } +} + +export function readRequirementsFile(filepath: string): string[] | undefined { + try { + const text = fs.readFileSync(filepath).toString('utf8'); + const ar = text + .split(/\r?\n/) + .map((e) => e.trim()) + .filter((e) => e.length > 0); + return ar; + } catch (e) { + console.warn('Unable to read JSON from file', filepath); + return undefined; + } +} + +export async function deletePluginKeysFromRedis() { + // delete plugin set + let keys = await redis.keys(`${PLUGIN_SET_PREFIX}:*`); + for (const key of keys) await redis.del(key); + // delete algo keys + keys = await redis.keys('algo:*'); + for (const key of keys) await redis.del(key); + // delete widget keys + keys = await redis.keys('widget:*'); + for (const key of keys) await redis.del(key); + // delete input block keys + keys = await redis.keys('inputBlock:*'); + for (const key of keys) await redis.del(key); + keys = await redis.keys('template:*'); + for (const key of keys) await redis.del(key); +} + +/** + * + * @param pdir Plugin directory + * @param cached for caching the data + * @returns + * @note assume that already validated + */ +export async function scanPluginDirectory( + pdir: string, + cached: AIFPluginCache | null = null +): Promise { + console.log('scanPluginDirectory', pdir); + // read in the plugin meta file + const pluginMeta = readJSONFromFile( + path.join(pdir, pluginMetaFile) + ) as AIFPlugin; + const pluginKey = `${PLUGIN_SET_PREFIX}:${pluginMeta.gid}`; + // read in report widgets if any + const widgets_subdir = path.join(pdir, 'widgets'); + let txn = redis.multi(); + if (fs.existsSync(widgets_subdir)) { + const allfiles = fs.readdirSync(widgets_subdir); + const metaFiles = allfiles.filter((f) => f.endsWith(metaSuffix)); + const reportWidgets = []; + for (const metaFile of metaFiles) { + // read widget meta json + const metaPath = path.join(widgets_subdir, metaFile); + const widget = readJSONFromFile(metaPath); + + widget.type = PluginComponentType.ReportWidget; + widget.gid = `${pluginMeta.gid}:${widget.cid}`; // auto set widget gid = : + widget.version = pluginMeta.version; + widget.pluginGID = pluginMeta.gid; + // set the path to the MDX + widget.mdxPath = path.relative( + PLUGIN_DIR, + path.join(widgets_subdir, `${widget.cid}.mdx`) + ); + + // auto format gid for dependencies + if (widget.dependencies) { + for (const dep of widget.dependencies) { + dep.gid = + dep.gid && dep.gid.length > 0 + ? `${dep.gid}:${dep.cid}` + : `${pluginMeta.gid}:${dep.cid}`; + } + } + + // read in mock data if any + if (widget.mockdata) { + for (const mock of widget.mockdata) { + mock.gid = + mock.gid && mock.gid.length > 0 + ? `${mock.gid}:${mock.cid}` + : `${pluginMeta.gid}:${mock.cid}`; + const datapath = path.join(widgets_subdir, mock.datapath); + const data = readJSONFromFile(datapath, null); + mock.data = data; + } + } + + const key = `widget:${widget.gid}`; + txn = txn.hSet(key, 'data', JSON.stringify(widget)); + + reportWidgets.push(widget.cid); + if (cached) { + cached.reportWidgets.push(widget); + } + } + if (reportWidgets.length > 0) { + txn = txn.hSet(pluginKey, 'reportWidgets', JSON.stringify(reportWidgets)); + } + } + + const inputs_subdir = path.join(pdir, 'inputs'); + if (fs.existsSync(inputs_subdir)) { + const allfiles = fs.readdirSync(inputs_subdir); + const metaFiles = allfiles.filter((f) => f.endsWith(metaSuffix)); + const inputBlocks = []; + for (const metaFile of metaFiles) { + // read widget meta json + const metaPath = path.join(inputs_subdir, metaFile); + const widget = readJSONFromFile(metaPath); + + widget.type = PluginComponentType.InputBlock; + widget.gid = `${pluginMeta.gid}:${widget.cid}`; // auto set widget gid = : + widget.version = pluginMeta.version; + widget.pluginGID = pluginMeta.gid; + if (!widget.width) widget.width = 'md'; // set default width to md + // set the path to the MDX + widget.mdxPath = path.relative( + PLUGIN_DIR, + path.join(inputs_subdir, `${widget.cid}.mdx`) + ); + widget.summaryPath = path.relative( + PLUGIN_DIR, + path.join(inputs_subdir, `${widget.cid}.summary.mdx`) + ); + const key = `inputBlock:${widget.gid}`; + txn = txn.hSet(key, 'data', JSON.stringify(widget)); + inputBlocks.push(widget.cid); + if (cached) { + cached.inputBlocks.push(widget); + } + } + if (inputBlocks.length > 0) { + txn = txn.hSet(pluginKey, 'inputBlocks', JSON.stringify(inputBlocks)); + } + } + + const templates_subdir = path.join(pdir, 'templates'); + if (fs.existsSync(templates_subdir)) { + const allfiles = fs.readdirSync(templates_subdir); + const metaFiles = allfiles.filter((f) => f.endsWith(metaSuffix)); + const templates = []; + for (const metaFile of metaFiles) { + // console.log("metaFile", metaFile) + // read widget meta json + const metaPath = path.join(templates_subdir, metaFile); + const template = readJSONFromFile(metaPath); + + template.type = PluginComponentType.Template; + template.gid = `${pluginMeta.gid}:${template.cid}`; // auto set widget gid = : + template.version = pluginMeta.version; + template.pluginGID = pluginMeta.gid; + + const dataPath = path.join(templates_subdir, `${template.cid}.data.json`); + const data = readJSONFromFile(dataPath); + data.fromPlugin = true; + data.projectInfo = { + name: template.name, + description: template.description || undefined, + company: template.author || undefined, + }; + + const id = await createProjectTemplate(data); + // data.id = id; + // console.log("id", id); + + // template.data = path.relative(PLUGIN_DIR, path.join(templates_subdir, `${template.cid}.mdx`)) + const key = `template:${template.gid}`; + txn = txn.hSet(key, 'data', JSON.stringify(template)); + txn = txn.hSet(key, 'data2', JSON.stringify(data)); + txn = txn.hSet(key, 'id', id); + // const typedWidget = widget as InputBlock; + // // console.log("typedWidget", typedWidget.type); + // pluginMeta.inputBlocks.push(typedWidget); + templates.push(template.cid); + if (cached) { + cached.templates.push(template); + // cached._mymap[widget.gid] = widget; + } + } + if (templates.length > 0) { + // pluginMeta.inputBlocks = inputBlocks; + txn = txn.hSet(pluginKey, 'templates', JSON.stringify(templates)); + } + } + + const algo_subdir = path.join(pdir, 'algorithms'); + if (fs.existsSync(algo_subdir)) { + // console.log("algo_subdir", algo_subdir) + // console.log("version", pluginMeta) + const algodirs = fs.readdirSync(algo_subdir); + // console.log("allfiles", algodirs) + const algorithms = []; + for (const cid of algodirs) { + const mydir = path.join(algo_subdir, cid); + // console.log("mydir", mydir) + if (!fs.lstatSync(mydir).isDirectory()) continue; + const metaPath = path.join(mydir, `${cid}.meta.json`); + const algo = readJSONFromFile(metaPath); + + algo.type = PluginComponentType.Algorithm; + algo.gid = `${pluginMeta.gid}:${algo.cid}`; // auto set widget gid = : + algo.version = pluginMeta.version; + algo.pluginGID = pluginMeta.gid; + algo.algoPath = path.join(algo_subdir, cid); + const key = `algo:${algo.gid}`; + // read requirements.txt + const requirements = readRequirementsFile( + path.join(mydir, 'requirements.txt') + ); + // read input and output schema + const inputSchema = readJSONFromFile( + path.join(mydir, 'input.schema.json'), + null + ); + const outputSchema = readJSONFromFile( + path.join(mydir, 'output.schema.json'), + null + ); + + txn = txn + .hSet(key, 'data', JSON.stringify(algo)) + .hSet(key, 'requirements', JSON.stringify(requirements)) + .hSet(key, 'inputSchema', JSON.stringify(inputSchema)) + .hSet(key, 'outputSchema', JSON.stringify(outputSchema)); + + algo.requirements = requirements; + algo.inputSchema = inputSchema; + algo.outputSchema = outputSchema; + + algorithms.push(algo.cid); + if (cached) { + cached.algorithms.push(algo); + } + } + if (algorithms.length > 0) { + txn = txn.hSet(pluginKey, 'algorithms', JSON.stringify(algorithms)); + } + } + + txn = txn.hSet(pluginKey, 'meta', JSON.stringify(pluginMeta)); + + try { + const { birthtime } = statSync(pdir); + const timestamp = new Date(birthtime).getTime(); + txn = txn.hSet(pluginKey, 'installedAt', timestamp.toString()); + } catch (err) { + console.log(toErrorWithMessage(err)); + } + + txn.exec(); + return pluginMeta; +} // scanPluginDirectory + +function validatePythonScript(scriptPath: string): Promise { + return new Promise((resolve, reject) => { + if (!fs.existsSync(scriptPath)) return reject('Script does not exists'); + + try { + exec( + `${python_cmd} ${python_script_checker} "${scriptPath}"`, + (error) => { + if (error) return reject('Invalid python script'); + else return resolve(); + } + ); + } catch (e) { + return reject(e); + } + }); +} + +async function validateMDX(scriptPath: string): Promise { + await bundleMDX({ + // cwd: pluginPath, + file: scriptPath, + // globals: {'MyCheckbox':'MyCheckbox'}, + // globals: {'ai-verify-shared-library/charts':'ai-verify-shared-library/charts'}, + mdxOptions: (options) => { + options.remarkPlugins = [ + ...(options.remarkPlugins ?? []), + remarkMdxImages, + remarkGfm, + ]; + return options; + }, + esbuildOptions: (options) => { + options.loader = { + ...options.loader, + '.png': 'dataurl', + }; + options.external = ['ai-verify-shared-library/*']; + return options; + }, + }); + // console.log("validateMDX result", result) + return; +} + +async function validateSummary(scriptPath: string): Promise { + await bundleMDX({ + file: scriptPath, + }); +} + +export function validatePluginDirectory(pdir: string): Promise { + return new Promise(async (resolve, reject) => { + // read in the plugin meta file + const pluginMeta = readJSONFromFile(path.join(pdir, pluginMetaFile)); + const res = validator.validate(pluginMeta, pluginSchema); + if (!res.valid) { + return reject('Invalid plugin schema'); + } + + const widgets_subdir = path.join(pdir, 'widgets'); + if (fs.existsSync(widgets_subdir)) { + const allfiles = fs.readdirSync(widgets_subdir); + const metaFiles = allfiles.filter((f) => f.endsWith(metaSuffix)); + // pluginMeta.reportWidgets = []; + for (const metaFile of metaFiles) { + // read widget meta json + const metaPath = path.join(widgets_subdir, metaFile); + const widget = readJSONFromFile(metaPath); + const res = validator.validate(widget, widgetSchema); + if (!res.valid) { + return reject('Invalid widget schema: ' + res.errors); + } + + // validate mdx + const mdxPath = path.join(widgets_subdir, `${widget.cid}.mdx`); + try { + await validateMDX(mdxPath); + } catch (e) { + console.log('widget MDX error', e); + return reject(`widget ${widget.cid} MDX is invalid`); + } + + // read in mock data if any + if (widget.mockdata) { + for (const mock of widget.mockdata) { + const datapath = path.join(widgets_subdir, mock.datapath); + const relative = path.relative(pdir, datapath); + if (relative.startsWith('.')) { + return reject('Invalid data path'); + } + // console.log("paths", pdir, datapath) + // console.log("relative", relative) + const data = readJSONFromFile(datapath, null); + if (!data) { + return reject('Unable to read sample data'); + } + } + } + } + } + + const inputs_subdir = path.join(pdir, 'inputs'); + if (fs.existsSync(inputs_subdir)) { + const allfiles = fs.readdirSync(inputs_subdir); + const metaFiles = allfiles.filter((f) => f.endsWith(metaSuffix)); + for (const metaFile of metaFiles) { + // console.log("metaFile", metaFile) + // read widget meta json + const metaPath = path.join(inputs_subdir, metaFile); + const widget = readJSONFromFile(metaPath); + const res = validator.validate(widget, inputBlockSchema); + if (!res.valid) { + return reject('Invalid input block schema: ' + res.errors); + } + + // validate mdx + const mdxPath = path.join(inputs_subdir, `${widget.cid}.mdx`); + try { + await validateMDX(mdxPath); + } catch (e) { + console.log('input block mdx error', e); + return reject(`Input block ${widget.cid} MDX is invalid`); + } + + const summaryPath = path.join( + inputs_subdir, + `${widget.cid}.summary.mdx` + ); + try { + await validateSummary(summaryPath); + } catch (e) { + console.log('input block summary mdx error', e); + return reject(`Input block ${widget.cid} summary MDX is invalid`); + } + } + } + + const template_subdir = path.join(pdir, 'templates'); + if (fs.existsSync(template_subdir)) { + const allfiles = fs.readdirSync(template_subdir); + const metaFiles = allfiles.filter((f) => f.endsWith(metaSuffix)); + for (const metaFile of metaFiles) { + // read template meta json + const metaPath = path.join(template_subdir, metaFile); + const template = readJSONFromFile(metaPath); + const res = validator.validate(template, templateSchema); + if (!res.valid) { + return reject('Invalid template schema'); + } + const dataPath = path.join( + template_subdir, + `${template.cid}.data.json` + ); + const data = readJSONFromFile(dataPath); + const res2 = validator.validate(data, templateDataSchema as any); + if (!res2.valid) { + console.log('data validation failed', res2.errors); + return reject('Invalid template data schema'); + } + } + } + + const algo_subdir = path.join(pdir, 'algorithms'); + if (fs.existsSync(algo_subdir)) { + const algodirs = fs.readdirSync(algo_subdir); + for (const cid of algodirs) { + const mydir = path.join(algo_subdir, cid); + if (!fs.lstatSync(mydir).isDirectory()) continue; + + // read algorithm meta + const metaPath = path.join(mydir, `${cid}.meta.json`); + const algo = readJSONFromFile(metaPath); + const res = validator.validate(algo, algoSchema); + if (!res.valid) { + return reject('Invalid algorithm schema'); + } + + // validate algo script + const scriptPath = path.join(mydir, `${algo.cid}.py`); + try { + await validatePythonScript(scriptPath); + } catch (e) { + return reject(`Algorithm ${algo.cid} python script is invalid`); + } + + // read requirements.txt + const requirements = readRequirementsFile( + path.join(mydir, 'requirements.txt') + ); + if (!requirements) { + return reject('Unable to read requirements file'); + } + // read input and output schema + const inputSchema = readJSONFromFile( + path.join(mydir, 'input.schema.json'), + null + ); + if (!inputSchema) { + return reject('Unable to read input.schema.json'); + } + const outputSchema = readJSONFromFile( + path.join(mydir, 'output.schema.json'), + null + ); + if (!outputSchema) { + return reject('Unable to read output.schema.json'); + } + } + } + + resolve(pluginMeta.gid); + }); +} diff --git a/ai-verify-portal/server/lib/projectServiceBackend.ts b/ai-verify-portal/server/lib/projectServiceBackend.ts index 7d56c5388..cc1d04a90 100644 --- a/ai-verify-portal/server/lib/projectServiceBackend.ts +++ b/ai-verify-portal/server/lib/projectServiceBackend.ts @@ -1,438 +1,437 @@ -import { gql } from "@apollo/client"; -import graphqlClient from "src/lib/graphqlClient"; - -import Project, { Report } from 'src/types/project.interface'; -import { getByGID } from '../pluginManager'; -import { ReportWidget } from "src/types/plugin.interface"; -import _ from 'lodash'; -import ProjectTemplate from "src/types/projectTemplate.interface"; - -export const GET_PROJECTS = gql` - query Query { - projects { - id - template { - id - projectInfo { - name - description - company - } - } - createdAt - updatedAt - projectInfo { - name - description - company - } - report { - status - tests { - algorithmGID - } - } - modelAndDatasets { - groundTruthColumn - model { - id - filename - filePath - modelType - name - status - } - testDataset { - id - filename - filePath - name - status - } - groundTruthDataset { - id - filename - filePath - name - status - } - } - } - } -` - -export const GET_PROJECT = gql` - query Query($id: ObjectID!) { - project(id: $id) { - id - projectInfo { - name - description - reportTitle - company - } - globalVars { - key - value - } - inputBlockData - testInformationData { - algorithmGID - testArguments - } - report { - status - } - pages { - layouts { - h - i - isBounded - isDraggable - isResizable - maxH - maxW - minH - minW - resizeHandles - static - w - x - y - } - reportWidgets { - widgetGID - key - layoutItemProperties { - justifyContent - alignItems - textAlign - color - bgcolor - } - properties - } - } - modelAndDatasets { - groundTruthColumn - model { - id - filename - filePath - modelType - name - status - ctime - } - testDataset { - id - filename - filePath - dataColumns { label, name, datatype } - name - status - ctime - } - groundTruthDataset { - id - filename - filePath - dataColumns { label, name, datatype } - name - status - ctime - } - } - } - } -` - -async function populateProject (project: Project|ProjectTemplate): Promise { - if (project.pages) { - for (let page of project.pages) { - for (let item of page.reportWidgets) { - // console.log("item", item) - item.widget = await getByGID(item.widgetGID) as ReportWidget; - // console.log("-> widget", item.widget) - } - } - } - return project; -} - -export async function listProjects(): Promise { - const client = graphqlClient(true) - const { data } = await client.query({ - query: GET_PROJECTS, - }); - // console.log("data", data.projects) - let projects = data.projects as Project[]; - for (let proj of projects) { - await populateProject(proj); - } - // let projects = (data.projects as Project[]).map(async proj => { await populateProject(proj); return proj }) - // let projects = data.projects as Project[]; - // console.log("projects", projects) - return projects; -} - -export async function getProject(id: string): Promise { - console.log("getProject") - const client = graphqlClient(true) - try { - const { data } = await client.query({ - query: GET_PROJECT, - variables: { - id, - } - }); - return await populateProject(_.cloneDeep(data.project)) as Project; - } catch (err) { - console.error("getProject error", JSON.stringify(err, null, 2)); - return Promise.reject(err); - } - // return data.project; -} - -export const GET_REPORT = gql` - query Query($projectID: ObjectID!) { - report(projectID: $projectID) { - projectID - status - timeStart - timeTaken - totalTestTimeTaken - inputBlockData - projectSnapshot { - projectInfo { - name - description - company - } - globalVars { - key - value - } - inputBlockData - testInformationData { - algorithmGID - testArguments - } - modelAndDatasets { - testDataset { - filename - name - size - description - type - dataFormat - } - model { - name - filename - description - size - type - modelType - modelFormat - } - groundTruthColumn - groundTruthDataset { - filename - name - size - description - type - dataFormat - } - } - pages { - layouts { - h - i - isBounded - isDraggable - isResizable - maxH - maxW - minH - minW - resizeHandles - static - w - x - y - } - reportWidgets { - widgetGID - key - layoutItemProperties { - justifyContent - alignItems - textAlign - color - bgcolor - } - properties - } - } - } - tests { - algorithmGID - algorithm { - gid - name - description - } - testArguments - status - progress - timeStart - timeTaken - output - errorMessages { - id - description - } - } - } - } -` - -export async function getReport(projectID: string): Promise { - const client = graphqlClient(true) - const { data } = await client.query({ - query: GET_REPORT, - variables: { - projectID, - } - }); - return data.report; -} - -export const GET_PROJECT_TEMPLATES = gql` - query Query { - projectTemplates { - id - fromPlugin - createdAt - updatedAt - projectInfo { - name - description - company - } - } - } -` - -export const GET_PROJECT_TEMPLATE = gql` - query Query($id: ObjectID!) { - projectTemplate(id: $id) { - id - fromPlugin - projectInfo { - name - description - company - } - globalVars { - key - value - } - pages { - layouts { - h - i - isBounded - isDraggable - isResizable - maxH - maxW - minH - minW - resizeHandles - static - w - x - y - } - reportWidgets { - widgetGID - key - layoutItemProperties { - justifyContent - alignItems - textAlign - color - bgcolor - } - properties - } - } - } - } -` -export async function listProjectTemplates(): Promise { - const client = graphqlClient(true) - const { data } = await client.query({ - query: GET_PROJECT_TEMPLATES, - }); - // console.log("data", data.projects) - let templates = data.projectTemplates as ProjectTemplate[]; - for (let template of templates) { - await populateProject(template); - } - return templates; -} - -export async function getProjectTemplate(id: string): Promise { - const client = graphqlClient(true) - const { data } = await client.query({ - query: GET_PROJECT_TEMPLATE, - variables: { - id, - } - }); - return await populateProject(_.cloneDeep(data.projectTemplate)); - // return data.project; -} - -export const CREATE_PROJECT_TEMPLATE = gql` -mutation Mutation($projectTemplate: ProjectTemplateInput!) { - createProjectTemplate(projectTemplate: $projectTemplate) { - id - } -} -` -export async function createProjectTemplate(projectTemplate: ProjectTemplate): Promise { - // console.log("createProjectTemplate", projectTemplate) - const client = graphqlClient(true) - const { data } = await client.mutate({ - mutation: CREATE_PROJECT_TEMPLATE, - variables: { - projectTemplate, - } - }); - return data.createProjectTemplate.id; - // return data.project; -} - -export const DELETE_PROJECT_TEMPLATE = gql` -mutation Mutation($id: ObjectID!) { - deleteProjectTemplate(id: $id) -} -` - -export async function deleteProjectTemplate(id: string): Promise { - // console.log("createProjectTemplate", projectTemplate) - const client = graphqlClient(true) - const { data } = await client.mutate({ - mutation: DELETE_PROJECT_TEMPLATE, - variables: { - id, - } - }); - return data; - // return data.project; -} +import { gql } from '@apollo/client'; +import graphqlClient from 'src/lib/graphqlClient'; + +import Project, { Report } from 'src/types/project.interface'; +import { getByGID } from '../pluginManager'; +import { ReportWidget } from 'src/types/plugin.interface'; +import _ from 'lodash'; +import ProjectTemplate from 'src/types/projectTemplate.interface'; + +export const GET_PROJECTS = gql` + query Query { + projects { + id + template { + id + projectInfo { + name + description + company + } + } + createdAt + updatedAt + projectInfo { + name + description + company + } + report { + status + tests { + algorithmGID + } + } + modelAndDatasets { + groundTruthColumn + model { + id + filename + filePath + modelType + name + status + } + testDataset { + id + filename + filePath + name + status + } + groundTruthDataset { + id + filename + filePath + name + status + } + } + } + } +`; + +export const GET_PROJECT = gql` + query Query($id: ObjectID!) { + project(id: $id) { + id + projectInfo { + name + description + reportTitle + company + } + globalVars { + key + value + } + inputBlockData + testInformationData { + algorithmGID + testArguments + } + report { + status + } + pages { + layouts { + h + i + isBounded + isDraggable + isResizable + maxH + maxW + minH + minW + resizeHandles + static + w + x + y + } + reportWidgets { + widgetGID + key + layoutItemProperties { + justifyContent + alignItems + textAlign + color + bgcolor + } + properties + } + } + modelAndDatasets { + groundTruthColumn + model { + id + filename + filePath + modelType + name + status + ctime + } + testDataset { + id + filename + filePath + dataColumns { + label + name + datatype + } + name + status + ctime + } + groundTruthDataset { + id + filename + filePath + dataColumns { + label + name + datatype + } + name + status + ctime + } + } + } + } +`; + +async function populateProject( + project: Project | ProjectTemplate +): Promise { + if (project.pages) { + for (const page of project.pages) { + for (const item of page.reportWidgets) { + item.widget = (await getByGID(item.widgetGID)) as ReportWidget; + } + } + } + return project; +} + +export async function listProjects(): Promise { + const client = graphqlClient(true); + const { data } = await client.query({ + query: GET_PROJECTS, + }); + const projects = data.projects as Project[]; + for (const proj of projects) { + await populateProject(proj); + } + return projects; +} + +export async function getProject(id: string): Promise { + console.log('getProject'); + const client = graphqlClient(true); + try { + const { data } = await client.query({ + query: GET_PROJECT, + variables: { + id, + }, + }); + return (await populateProject(_.cloneDeep(data.project))) as Project; + } catch (err) { + console.error('getProject error', JSON.stringify(err, null, 2)); + return Promise.reject(err); + } +} + +export const GET_REPORT = gql` + query Query($projectID: ObjectID!) { + report(projectID: $projectID) { + projectID + status + timeStart + timeTaken + totalTestTimeTaken + inputBlockData + projectSnapshot { + projectInfo { + name + description + company + } + globalVars { + key + value + } + inputBlockData + testInformationData { + algorithmGID + testArguments + } + modelAndDatasets { + testDataset { + filename + name + size + description + type + dataFormat + } + model { + name + filename + description + size + type + modelType + modelFormat + } + groundTruthColumn + groundTruthDataset { + filename + name + size + description + type + dataFormat + } + } + pages { + layouts { + h + i + isBounded + isDraggable + isResizable + maxH + maxW + minH + minW + resizeHandles + static + w + x + y + } + reportWidgets { + widgetGID + key + layoutItemProperties { + justifyContent + alignItems + textAlign + color + bgcolor + } + properties + } + } + } + tests { + algorithmGID + algorithm { + gid + name + description + } + testArguments + status + progress + timeStart + timeTaken + output + errorMessages { + id + description + } + } + } + } +`; + +export async function getReport(projectID: string): Promise { + const client = graphqlClient(true); + const { data } = await client.query({ + query: GET_REPORT, + variables: { + projectID, + }, + }); + return data.report; +} + +export const GET_PROJECT_TEMPLATES = gql` + query Query { + projectTemplates { + id + fromPlugin + createdAt + updatedAt + projectInfo { + name + description + company + } + } + } +`; + +export const GET_PROJECT_TEMPLATE = gql` + query Query($id: ObjectID!) { + projectTemplate(id: $id) { + id + fromPlugin + projectInfo { + name + description + company + } + globalVars { + key + value + } + pages { + layouts { + h + i + isBounded + isDraggable + isResizable + maxH + maxW + minH + minW + resizeHandles + static + w + x + y + } + reportWidgets { + widgetGID + key + layoutItemProperties { + justifyContent + alignItems + textAlign + color + bgcolor + } + properties + } + } + } + } +`; +export async function listProjectTemplates(): Promise { + const client = graphqlClient(true); + const { data } = await client.query({ + query: GET_PROJECT_TEMPLATES, + }); + const templates = data.projectTemplates as ProjectTemplate[]; + for (const template of templates) { + await populateProject(template); + } + return templates; +} + +export async function getProjectTemplate(id: string): Promise { + const client = graphqlClient(true); + const { data } = await client.query({ + query: GET_PROJECT_TEMPLATE, + variables: { + id, + }, + }); + return await populateProject(_.cloneDeep(data.projectTemplate)); +} + +export const CREATE_PROJECT_TEMPLATE = gql` + mutation Mutation($projectTemplate: ProjectTemplateInput!) { + createProjectTemplate(projectTemplate: $projectTemplate) { + id + } + } +`; +export async function createProjectTemplate( + projectTemplate: ProjectTemplate +): Promise { + const client = graphqlClient(true); + const { data } = await client.mutate({ + mutation: CREATE_PROJECT_TEMPLATE, + variables: { + projectTemplate, + }, + }); + return data.createProjectTemplate.id; +} + +export const DELETE_PROJECT_TEMPLATE = gql` + mutation Mutation($id: ObjectID!) { + deleteProjectTemplate(id: $id) + } +`; + +export async function deleteProjectTemplate(id: string): Promise { + const client = graphqlClient(true); + const { data } = await client.mutate({ + mutation: DELETE_PROJECT_TEMPLATE, + variables: { + id, + }, + }); + return data; +} diff --git a/ai-verify-portal/server/pluginManager.ts b/ai-verify-portal/server/pluginManager.ts index fb1fb31fa..af56c3205 100644 --- a/ai-verify-portal/server/pluginManager.ts +++ b/ai-verify-portal/server/pluginManager.ts @@ -1,308 +1,320 @@ -/** - * Service layer for plugin manager - */ -import path from 'node:path'; -import fs from 'node:fs'; - -import AIFPlugin, { ReportWidget, ReportWidgetStatus, InputBlock, Algorithm, ProjectTemplateComponent, BasePluginComponent } from 'src/types/plugin.interface'; -import PluginManagerType from 'src/types/pluginManager.interface'; -import stockPlugins from 'config/plugin.stock'; -import { redis, deletePluginKeysFromRedis, pluginPath, scanPluginDirectory, validatePluginDirectory } from './lib/pluginService'; -import { deleteProjectTemplate } from './lib/projectServiceBackend'; - -import moment from 'moment'; -import samver from 'semver'; -import ProjectTemplate from 'src/types/projectTemplate.interface'; -import { toErrorWithMessage } from 'src/lib/errorUtils'; - -// let cached = global.plugins; -const LAST_MODIFIED_KEY = 'plugin:lastModified'; -const PLUGIN_SET_PREFIX = 'plugin:list'; - -/** - * Scan the plugins directory and retrieve list of plugins and widgets. - * No validation here. Assume that plugins already validated and well-behaved. - */ -async function scanPluginDirectories() { - console.log('scanPluginDirectories') - const pluginsDir = fs.readdirSync(pluginPath); - await deletePluginKeysFromRedis(); - const cached = { - reportWidgets: [] as ReportWidget[], - inputBlocks: [] as InputBlock[], - algorithms: [] as Algorithm[], - templates: [] as ProjectTemplateComponent[], - } - for (const dir of pluginsDir) { - const pdir = path.join(pluginPath, dir); - try { - await validatePluginDirectory(pdir); - await scanPluginDirectory(pdir, cached) - } catch (e) { - console.log('Error reading plugin:', e); - } - } - - // Check widget dependencies - /* - for (let widget of cached.reportWidgets) { - let status = ReportWidgetStatus.OK; - if (widget.dependencies) { - for (let dep of widget.dependencies) { - // let comp = getByGID(dep.gid); - let comp = cached._mymap[dep.gid] || null - // console.log('dep', dep, comp, comp?.type) - dep.valid = !!comp; - if (!dep.valid) - status = ReportWidgetStatus.MissingDependencies; - } - } - widget.status = status; - const key = `widget:${widget.gid}`; - await redis.hSet(key, 'data', JSON.stringify(widget)) - } - */ - - return cached; -} - -export async function deletePlugin(gid: string, publish = true) { - // check if gid exists - const rediskey = `plugin:list:${gid}`; - const alldata = await redis.hGetAll(rediskey); - const keys = Object.keys(alldata); - if (keys.length == 0) { - throw new Error('Invalid gid ' + rediskey); - } - - if (keys.includes('inputBlocks')) { - const inputBlocks = JSON.parse(alldata['inputBlocks']) - for (const ib of inputBlocks) { - await redis.del(`inputBlock:${gid}:${ib}`) - } - } - if (keys.includes('reportWidgets')) { - const reportWidgets = JSON.parse(alldata['reportWidgets']) - for (const ib of reportWidgets) { - await redis.del(`widget:${gid}:${ib}`) - } - } - if (keys.includes('templates')) { - const templates = JSON.parse(alldata['templates']) - for (const template of templates) { - const key = `template:${gid}:${template}`; - const id = await redis.hGet(key, 'id'); - await redis.del(key) - if (id) - deleteProjectTemplate(id); - } - } - if (keys.includes('algorithms')) { - const algorithms = JSON.parse(alldata['algorithms']) - for (const ib of algorithms) { - const key = `algo:${gid}:${ib}` - if (publish) - await redis.publish('algo.delete', key) - await redis.del(key); - } - } - - await redis.del(rediskey); - - const pdir = path.join(pluginPath, gid); - fs.rmSync(pdir, { recursive: true }) -} - -export async function installPlugin(tempDir: string) { - const gid = await validatePluginDirectory(tempDir); - const pdir = path.join(pluginPath, gid); - - // validate path - const relative = path.relative(pluginPath, pdir); - if (relative.startsWith('.')) { - throw ('Invalid GID'); - } - // if pdir already exists, remove old dir - let isUpdate = false; - if (fs.existsSync(pdir)) { - isUpdate = true; - await deletePlugin(gid, false); - } - // kwk-v0.4.1 - change rename to cp. The plugins folder need to - // be mapped to a mounted volume for test engine to access the uploaded - // plugin files. As plugins folder now is a different filesystem then - // the temp dir, need to change rename to cp and rm as rename doesn't - // work across filesystems. - fs.cpSync(tempDir, pdir, { recursive: true }); - fs.rmSync(tempDir, { recursive: true, force: true }); - - const cached = { - reportWidgets: [] as ReportWidget[], - inputBlocks: [] as InputBlock[], - algorithms: [] as Algorithm[], - templates: [] as ProjectTemplateComponent[], - } // clear the cached - - const plugin = await scanPluginDirectory(pdir, cached); - plugin.isStock = stockPlugins.includes(gid); - if (cached.reportWidgets) - plugin.reportWidgets = cached.reportWidgets; - if (cached.inputBlocks) - plugin.inputBlocks = cached.inputBlocks; - if (cached.templates) - plugin.templates = cached.templates; - if (cached.algorithms) { - plugin.algorithms = cached.algorithms; - // publish install msg to redis - for (const algo of plugin.algorithms) { - redis.publish(isUpdate ? 'algo.update' : 'algo.install', `algo:${algo.gid}`) - } - } - - return plugin; -} - -export async function getByGID(gid: string): Promise { - const key = await redis.keys(`*:${gid}`); - if (key.length == 0) - return null; - const data = await redis.hGet(key[0], 'data'); - if (!data) - return null; - const obj = JSON.parse(data); - switch (obj.type) { - case 'Algorithm': - obj.requirements = JSON.parse((await redis.hGet(key[0], 'requirements')) || '[]'); - obj.inputSchema = JSON.parse((await redis.hGet(key[0], 'inputSchema')) || '{}'); - obj.outputSchema = JSON.parse((await redis.hGet(key[0], 'outputSchema')) || '{}'); - return obj as Algorithm; - case 'InputBlock': - return obj as InputBlock; - case 'Template': - obj.data = JSON.parse((await redis.hGet(key[0], 'data2')) || '{}'); - obj.id = (await redis.hGet(key[0], 'id')) || null; - return obj as ProjectTemplate; - case 'ReportWidget': - return obj as ReportWidget; - default: - return null; - } -} - -async function checkPluginDir() { - if (!fs.existsSync(pluginPath)) { - await deletePluginKeysFromRedis(); - await redis.del(LAST_MODIFIED_KEY); - fs.mkdirSync(pluginPath); - return; - } - const mtime = await redis.hGet(LAST_MODIFIED_KEY, 'mtime'); - if (!mtime) { - await redis.hSet(LAST_MODIFIED_KEY, 'mtime', moment().toISOString()); - try { - await scanPluginDirectories(); - } catch (e) { - console.error('Scan plugins dir error', e); - } - - } -} - -export async function getPlugins(populate = true): Promise { - // check whether plugins directory exists and populate the redis registries if empty - await checkPluginDir(); - - const keys = await redis.keys(`${PLUGIN_SET_PREFIX}:*`); - const plugins: AIFPlugin[] = []; - const inputBlocks: InputBlock[] = []; - const algorithms: Algorithm[] = []; - const reportWidgets: ReportWidget[] = []; - const templates: ProjectTemplate[] = []; - const _mymap: any = {}; - - for (const rediskey of keys) { - const gid = rediskey.slice(PLUGIN_SET_PREFIX.length + 1); - let plugin = {} as any; - if (populate) { - const allKV = await redis.hGetAll(rediskey); - plugin = JSON.parse(allKV['meta']); - - const getComponents = async (key: string) => { - if (!allKV[key]) - return; - const obj = JSON.parse(allKV[key]); - plugin[key] = []; - for (const cid of (obj as string[])) { - const comp = await getByGID(`${gid}:${cid}`); - if (comp) { - if (key !== 'templates') - _mymap[(comp as BasePluginComponent).gid] = plugin.version; - plugin[key].push(comp); - if (key === 'inputBlocks') - inputBlocks.push(comp as InputBlock); - else if (key === 'algorithms') - algorithms.push(comp as Algorithm); - else if (key === 'reportWidgets') - reportWidgets.push(comp as ReportWidget); - else if (key === 'templates') - templates.push(comp as ProjectTemplate); - } - } - } - - await Promise.all([ - getComponents('reportWidgets'), - getComponents('inputBlocks'), - getComponents('algorithms'), - getComponents('templates') - ]) - - plugin.isStock = stockPlugins.includes(gid); - try { - plugin.installedAt = parseInt(allKV['installedAt']); - } catch (err) { - console.log(toErrorWithMessage(err)); - } - plugins.push(plugin); - } else { - const meta = await redis.hGet(rediskey, 'meta'); - if (meta) - plugins.push(JSON.parse(meta)); - } - } - - // Check widget dependencies - for (const widget of reportWidgets) { - let status = ReportWidgetStatus.OK; - if (widget.dependencies) { - for (const dep of widget.dependencies) { - dep.valid = false; - if (_mymap[dep.gid]) { - if (dep.version) - dep.valid = samver.satisfies(_mymap[dep.gid], dep.version) - else - dep.valid = true; - } - if (!dep.valid) { - status = ReportWidgetStatus.MissingDependencies; - } - } - } - widget.status = status; - } - - plugins.sort((a, b) => a.installedAt < b.installedAt ? -1 : 1); - - return { - plugins, - inputBlocks, - algorithms, - templates, - stockPlugins, - } -} - - -export function isStockPlugin(gid: string) { - return stockPlugins.includes(gid); -} +/** + * Service layer for plugin manager + */ +import path from 'node:path'; +import fs from 'node:fs'; + +import AIFPlugin, { + ReportWidget, + ReportWidgetStatus, + InputBlock, + Algorithm, + ProjectTemplateComponent, + BasePluginComponent, +} from 'src/types/plugin.interface'; +import PluginManagerType from 'src/types/pluginManager.interface'; +import stockPlugins from 'config/plugin.stock'; +import { + redis, + deletePluginKeysFromRedis, + pluginPath, + scanPluginDirectory, + validatePluginDirectory, +} from './lib/pluginService'; +import { deleteProjectTemplate } from './lib/projectServiceBackend'; + +import moment from 'moment'; +import samver from 'semver'; +import ProjectTemplate from 'src/types/projectTemplate.interface'; +import { toErrorWithMessage } from 'src/lib/errorUtils'; + +// let cached = global.plugins; +const LAST_MODIFIED_KEY = 'plugin:lastModified'; +const PLUGIN_SET_PREFIX = 'plugin:list'; + +/** + * Scan the plugins directory and retrieve list of plugins and widgets. + * No validation here. Assume that plugins already validated and well-behaved. + */ +async function scanPluginDirectories() { + console.log('scanPluginDirectories'); + const pluginsDir = fs.readdirSync(pluginPath); + await deletePluginKeysFromRedis(); + const cached = { + reportWidgets: [] as ReportWidget[], + inputBlocks: [] as InputBlock[], + algorithms: [] as Algorithm[], + templates: [] as ProjectTemplateComponent[], + }; + for (const dir of pluginsDir) { + const pdir = path.join(pluginPath, dir); + try { + await validatePluginDirectory(pdir); + await scanPluginDirectory(pdir, cached); + } catch (e) { + console.log('Error reading plugin:', e); + } + } + + // Check widget dependencies + /* + for (let widget of cached.reportWidgets) { + let status = ReportWidgetStatus.OK; + if (widget.dependencies) { + for (let dep of widget.dependencies) { + // let comp = getByGID(dep.gid); + let comp = cached._mymap[dep.gid] || null + // console.log('dep', dep, comp, comp?.type) + dep.valid = !!comp; + if (!dep.valid) + status = ReportWidgetStatus.MissingDependencies; + } + } + widget.status = status; + const key = `widget:${widget.gid}`; + await redis.hSet(key, 'data', JSON.stringify(widget)) + } + */ + + return cached; +} + +export async function deletePlugin(gid: string, publish = true) { + // check if gid exists + const rediskey = `plugin:list:${gid}`; + const alldata = await redis.hGetAll(rediskey); + const keys = Object.keys(alldata); + if (keys.length == 0) { + throw new Error('Invalid gid ' + rediskey); + } + + if (keys.includes('inputBlocks')) { + const inputBlocks = JSON.parse(alldata['inputBlocks']); + for (const ib of inputBlocks) { + await redis.del(`inputBlock:${gid}:${ib}`); + } + } + if (keys.includes('reportWidgets')) { + const reportWidgets = JSON.parse(alldata['reportWidgets']); + for (const ib of reportWidgets) { + await redis.del(`widget:${gid}:${ib}`); + } + } + if (keys.includes('templates')) { + const templates = JSON.parse(alldata['templates']); + for (const template of templates) { + const key = `template:${gid}:${template}`; + const id = await redis.hGet(key, 'id'); + await redis.del(key); + if (id) deleteProjectTemplate(id); + } + } + if (keys.includes('algorithms')) { + const algorithms = JSON.parse(alldata['algorithms']); + for (const ib of algorithms) { + const key = `algo:${gid}:${ib}`; + if (publish) await redis.publish('algo.delete', key); + await redis.del(key); + } + } + + await redis.del(rediskey); + + const pdir = path.join(pluginPath, gid); + fs.rmSync(pdir, { recursive: true }); +} + +export async function installPlugin(tempDir: string) { + const gid = await validatePluginDirectory(tempDir); + const pdir = path.join(pluginPath, gid); + + // validate path + const relative = path.relative(pluginPath, pdir); + if (relative.startsWith('.')) { + throw 'Invalid GID'; + } + // if pdir already exists, remove old dir + let isUpdate = false; + if (fs.existsSync(pdir)) { + isUpdate = true; + await deletePlugin(gid, false); + } + // kwk-v0.4.1 - change rename to cp. The plugins folder need to + // be mapped to a mounted volume for test engine to access the uploaded + // plugin files. As plugins folder now is a different filesystem then + // the temp dir, need to change rename to cp and rm as rename doesn't + // work across filesystems. + fs.cpSync(tempDir, pdir, { recursive: true }); + fs.rmSync(tempDir, { recursive: true, force: true }); + + const cached = { + reportWidgets: [] as ReportWidget[], + inputBlocks: [] as InputBlock[], + algorithms: [] as Algorithm[], + templates: [] as ProjectTemplateComponent[], + }; // clear the cached + + const plugin = await scanPluginDirectory(pdir, cached); + plugin.isStock = stockPlugins.includes(gid); + if (cached.reportWidgets) plugin.reportWidgets = cached.reportWidgets; + if (cached.inputBlocks) plugin.inputBlocks = cached.inputBlocks; + if (cached.templates) plugin.templates = cached.templates; + if (cached.algorithms) { + plugin.algorithms = cached.algorithms; + // publish install msg to redis + for (const algo of plugin.algorithms) { + redis.publish( + isUpdate ? 'algo.update' : 'algo.install', + `algo:${algo.gid}` + ); + } + } + + return plugin; +} + +export async function getByGID( + gid: string +): Promise< + AIFPlugin | InputBlock | ReportWidget | Algorithm | ProjectTemplate | null +> { + const key = await redis.keys(`*:${gid}`); + if (key.length == 0) return null; + const data = await redis.hGet(key[0], 'data'); + if (!data) return null; + const obj = JSON.parse(data); + switch (obj.type) { + case 'Algorithm': + obj.requirements = JSON.parse( + (await redis.hGet(key[0], 'requirements')) || '[]' + ); + obj.inputSchema = JSON.parse( + (await redis.hGet(key[0], 'inputSchema')) || '{}' + ); + obj.outputSchema = JSON.parse( + (await redis.hGet(key[0], 'outputSchema')) || '{}' + ); + return obj as Algorithm; + case 'InputBlock': + return obj as InputBlock; + case 'Template': + obj.data = JSON.parse((await redis.hGet(key[0], 'data2')) || '{}'); + obj.id = (await redis.hGet(key[0], 'id')) || null; + return obj as ProjectTemplate; + case 'ReportWidget': + return obj as ReportWidget; + default: + return null; + } +} + +async function checkPluginDir() { + if (!fs.existsSync(pluginPath)) { + await deletePluginKeysFromRedis(); + await redis.del(LAST_MODIFIED_KEY); + fs.mkdirSync(pluginPath); + return; + } + const mtime = await redis.hGet(LAST_MODIFIED_KEY, 'mtime'); + if (!mtime) { + await redis.hSet(LAST_MODIFIED_KEY, 'mtime', moment().toISOString()); + try { + await scanPluginDirectories(); + } catch (e) { + console.error('Scan plugins dir error', e); + } + } +} + +export async function getPlugins(populate = true): Promise { + // check whether plugins directory exists and populate the redis registries if empty + await checkPluginDir(); + + const keys = await redis.keys(`${PLUGIN_SET_PREFIX}:*`); + const plugins: AIFPlugin[] = []; + const inputBlocks: InputBlock[] = []; + const algorithms: Algorithm[] = []; + const reportWidgets: ReportWidget[] = []; + const templates: ProjectTemplate[] = []; + const _mymap: any = {}; + + for (const rediskey of keys) { + const gid = rediskey.slice(PLUGIN_SET_PREFIX.length + 1); + let plugin = {} as any; + if (populate) { + const allKV = await redis.hGetAll(rediskey); + plugin = JSON.parse(allKV['meta']); + + const getComponents = async (key: string) => { + if (!allKV[key]) return; + const obj = JSON.parse(allKV[key]); + plugin[key] = []; + for (const cid of obj as string[]) { + const comp = await getByGID(`${gid}:${cid}`); + if (comp) { + if (key !== 'templates') + _mymap[(comp as BasePluginComponent).gid] = plugin.version; + plugin[key].push(comp); + if (key === 'inputBlocks') inputBlocks.push(comp as InputBlock); + else if (key === 'algorithms') algorithms.push(comp as Algorithm); + else if (key === 'reportWidgets') + reportWidgets.push(comp as ReportWidget); + else if (key === 'templates') + templates.push(comp as ProjectTemplate); + } + } + }; + + await Promise.all([ + getComponents('reportWidgets'), + getComponents('inputBlocks'), + getComponents('algorithms'), + getComponents('templates'), + ]); + + plugin.isStock = stockPlugins.includes(gid); + try { + plugin.installedAt = parseInt(allKV['installedAt']); + } catch (err) { + console.log(toErrorWithMessage(err)); + } + plugins.push(plugin); + } else { + const meta = await redis.hGet(rediskey, 'meta'); + if (meta) plugins.push(JSON.parse(meta)); + } + } + + // Check widget dependencies + for (const widget of reportWidgets) { + let status = ReportWidgetStatus.OK; + if (widget.dependencies) { + for (const dep of widget.dependencies) { + dep.valid = false; + if (_mymap[dep.gid]) { + if (dep.version) + dep.valid = samver.satisfies(_mymap[dep.gid], dep.version); + else dep.valid = true; + } + if (!dep.valid) { + status = ReportWidgetStatus.MissingDependencies; + } + } + } + widget.status = status; + } + + plugins.sort((a, b) => (a.installedAt < b.installedAt ? -1 : 1)); + + return { + plugins, + inputBlocks, + algorithms, + templates, + stockPlugins, + }; +} + +export function isStockPlugin(gid: string) { + return stockPlugins.includes(gid); +} diff --git a/ai-verify-portal/server/redisClient.ts b/ai-verify-portal/server/redisClient.ts index 3b4f53a2e..eca6790be 100644 --- a/ai-verify-portal/server/redisClient.ts +++ b/ai-verify-portal/server/redisClient.ts @@ -1,26 +1,26 @@ -import { createClient } from 'redis'; - -const REDIS_URI = process.env.REDIS_URI -if (!REDIS_URI) { - throw new Error( - 'Please define the REDIS_URI environment variable inside .env.local' - ) -} - -const redisConnect = () => { - const redis = createClient({ - url: REDIS_URI - }); - redis.on('error', (err) => { - console.error('redis connection error:', err); - }); - redis.on('ready', () => { - // console.info('Message queue is ready') - }) - // if (NODE_ENV !== 'test') - redis.connect(); - return redis; -} -// const redis = connect(); - -export default redisConnect \ No newline at end of file +import { createClient } from 'redis'; + +const REDIS_URI = process.env.REDIS_URI; +if (!REDIS_URI) { + throw new Error( + 'Please define the REDIS_URI environment variable inside .env.local' + ); +} + +const redisConnect = () => { + const redis = createClient({ + url: REDIS_URI, + }); + redis.on('error', (err) => { + console.error('redis connection error:', err); + }); + redis.on('ready', () => { + // console.info('Message queue is ready') + }); + // if (NODE_ENV !== 'test') + redis.connect(); + return redis; +}; +// const redis = connect(); + +export default redisConnect; diff --git a/ai-verify-portal/src/@types/index.d.ts b/ai-verify-portal/src/@types/index.d.ts index 83b3fc0c1..6e95e7c99 100644 --- a/ai-verify-portal/src/@types/index.d.ts +++ b/ai-verify-portal/src/@types/index.d.ts @@ -1,8 +1,8 @@ -/* eslint-disable no-var */ - -declare global { - var mongoose: any; - var plugins: any; -} - -export {}; \ No newline at end of file +/* eslint-disable no-var */ + +declare global { + var mongoose: any; + var plugins: any; +} + +export {}; diff --git a/ai-verify-portal/src/components/HtmlTooltip.tsx b/ai-verify-portal/src/components/HtmlTooltip.tsx index d5a30b0e8..c7b915bff 100644 --- a/ai-verify-portal/src/components/HtmlTooltip.tsx +++ b/ai-verify-portal/src/components/HtmlTooltip.tsx @@ -1,20 +1,20 @@ -import * as React from 'react'; -import { styled } from '@mui/material/styles'; -import Tooltip, { TooltipProps, tooltipClasses } from '@mui/material/Tooltip'; - -const HtmlTooltip = styled(({ className, ...props }: TooltipProps) => ( - -))(({ theme }) => ({ - [`& .${tooltipClasses.tooltip}`]: { - // backgroundColor: '#f5f5f9', - backgroundColor: theme.palette.secondary.main, - // color: 'rgba(0, 0, 0, 0.87)', - color: theme.palette.secondary.contrastText, - // maxWidth: 800, - // width: 500, - fontSize: theme.typography.pxToRem(12), - border: '1px solid #dadde9', - }, -})); - -export default HtmlTooltip; +import * as React from 'react'; +import { styled } from '@mui/material/styles'; +import Tooltip, { TooltipProps, tooltipClasses } from '@mui/material/Tooltip'; + +const HtmlTooltip = styled(({ className, ...props }: TooltipProps) => ( + +))(({ theme }) => ({ + [`& .${tooltipClasses.tooltip}`]: { + // backgroundColor: '#f5f5f9', + backgroundColor: theme.palette.secondary.main, + // color: 'rgba(0, 0, 0, 0.87)', + color: theme.palette.secondary.contrastText, + // maxWidth: 800, + // width: 500, + fontSize: theme.typography.pxToRem(12), + border: '1px solid #dadde9', + }, +})); + +export default HtmlTooltip; diff --git a/ai-verify-portal/src/components/Link.tsx b/ai-verify-portal/src/components/Link.tsx index 1baf5f7d6..f1fbc3a28 100644 --- a/ai-verify-portal/src/components/Link.tsx +++ b/ai-verify-portal/src/components/Link.tsx @@ -10,31 +10,35 @@ const Anchor = styled('a')({}); interface NextLinkComposedProps extends Omit, 'href'>, - Omit { + Omit< + NextLinkProps, + 'href' | 'as' | 'onClick' | 'onMouseEnter' | 'onTouchStart' + > { to: NextLinkProps['href']; linkAs?: NextLinkProps['as']; } -export const NextLinkComposed = React.forwardRef( - function NextLinkComposed(props, ref) { - const { to, linkAs, replace, scroll, shallow, prefetch, locale, ...other } = props; +export const NextLinkComposed = React.forwardRef< + HTMLAnchorElement, + NextLinkComposedProps +>(function NextLinkComposed(props, ref) { + const { to, linkAs, replace, scroll, shallow, prefetch, locale, ...other } = + props; - return ( - - - - ); - }, -); + return ( + + + + ); +}); export type LinkProps = { activeClassName?: string; @@ -47,7 +51,10 @@ export type LinkProps = { // A styled version of the Next.js Link component: // https://nextjs.org/docs/api-reference/next/link -const Link = React.forwardRef(function Link(props, ref) { +const Link = React.forwardRef(function Link( + props, + ref +) { const { activeClassName = 'active', as, @@ -58,7 +65,6 @@ const Link = React.forwardRef(function Link(props, noLinkStyle, prefetch, replace, - role, // Link don't have roles. scroll, shallow, ...other @@ -71,7 +77,8 @@ const Link = React.forwardRef(function Link(props, }); const isExternal = - typeof href === 'string' && (href.indexOf('http') === 0 || href.indexOf('mailto:') === 0); + typeof href === 'string' && + (href.indexOf('http') === 0 || href.indexOf('mailto:') === 0); if (isExternal) { if (noLinkStyle) { @@ -82,10 +89,25 @@ const Link = React.forwardRef(function Link(props, } const linkAs = linkAsProp || as; - const nextjsProps = { to: href, linkAs, replace, scroll, shallow, prefetch, locale }; + const nextjsProps = { + to: href, + linkAs, + replace, + scroll, + shallow, + prefetch, + locale, + }; if (noLinkStyle) { - return ; + return ( + + ); } return ( diff --git a/ai-verify-portal/src/components/NeedHelp.tsx b/ai-verify-portal/src/components/NeedHelp.tsx index d6b029a2b..74b7fd6d9 100644 --- a/ai-verify-portal/src/components/NeedHelp.tsx +++ b/ai-verify-portal/src/components/NeedHelp.tsx @@ -1,22 +1,24 @@ -import IconButton from '@mui/material/IconButton'; -import InfoOutlinedIcon from '@mui/icons-material/InfoOutlined'; -import Box from '@mui/material/Box'; -import Typography from '@mui/material/Typography'; - -/** - * Generic Need Help? component - */ - -type Props = { - onClick?: React.MouseEventHandler, - testid?: string, -} - -export default function NeedHelp({ onClick, testid }: Props) { - return ( - - Need Help? - - - ) -} \ No newline at end of file +import IconButton from '@mui/material/IconButton'; +import InfoOutlinedIcon from '@mui/icons-material/InfoOutlined'; +import Box from '@mui/material/Box'; +import Typography from '@mui/material/Typography'; + +/** + * Generic Need Help? component + */ + +type Props = { + onClick?: React.MouseEventHandler; + testid?: string; +}; + +export default function NeedHelp({ onClick, testid }: Props) { + return ( + + Need Help? + + + + + ); +} diff --git a/ai-verify-portal/src/components/StyledMuiTooltip.tsx b/ai-verify-portal/src/components/StyledMuiTooltip.tsx index f90f3c6cc..f1d6c522d 100644 --- a/ai-verify-portal/src/components/StyledMuiTooltip.tsx +++ b/ai-verify-portal/src/components/StyledMuiTooltip.tsx @@ -1,18 +1,18 @@ import { styled } from '@mui/material/styles'; import Tooltip, { TooltipProps } from '@mui/material/Tooltip'; -type StyledTooltipProps = TooltipProps +type StyledTooltipProps = TooltipProps; const StyledTooltip = styled((props: StyledTooltipProps) => { - const { className, ...restOfProps } = props; - return + const { className, ...restOfProps } = props; + return ; })(() => ({ '& .MuiTooltip-tooltip': { background: '#702F8A', '&.MuiTooltip-tooltipPlacementTop': { marginTop: '-10px', }, - } + }, })); -export { StyledTooltip } +export { StyledTooltip }; diff --git a/ai-verify-portal/src/components/alertBox/index.tsx b/ai-verify-portal/src/components/alertBox/index.tsx index 97336df43..71bd78baf 100644 --- a/ai-verify-portal/src/components/alertBox/index.tsx +++ b/ai-verify-portal/src/components/alertBox/index.tsx @@ -10,16 +10,16 @@ enum AlertBoxSize { SMALL = 's', MEDIUM = 'm', LARGE = 'l', - AUTO = 'auto' + AUTO = 'auto', } enum AlertBoxFixedPositions { - CENTER = 'center' + CENTER = 'center', } -type DraggableAbsolutionPositon = {x: number, y: number} +type DraggableAbsolutionPositon = { x: number; y: number }; -type CSSAbsolutePosition = Pick +type CSSAbsolutePosition = Pick; type AlertBoxProps = { id?: string; @@ -33,22 +33,22 @@ type AlertBoxProps = { enableModalOverlay?: boolean; renderInPortal?: boolean; onCloseIconClick?: () => void; -} +}; type AlertBoxHeaderProps = { - heading?: string, - isDragHandle?: boolean, -} + heading?: string; + isDragHandle?: boolean; +}; type AlertBoxBodyProps = { bodyStyles?: React.CSSProperties; hasFooter?: boolean; -} +}; type AlertBoxFooterProps = { footerStyles?: React.CSSProperties; hasFooter?: boolean; -} +}; const portalDivId = 'aivModal'; @@ -69,8 +69,11 @@ function AlertBox(props: PropsWithChildren) { } = props; const sizeModifier = `alertBox_${size}`; - const positionModifier = defaultPosition || draggable ? 'absolute_pos' : `fixed_${fixedPosition}`; - const modalModfier = enableModalOverlay ? 'with_modalOverlay' : 'without_modalOverlay'; + const positionModifier = + defaultPosition || draggable ? 'absolute_pos' : `fixed_${fixedPosition}`; + const modalModfier = enableModalOverlay + ? 'with_modalOverlay' + : 'without_modalOverlay'; const inlineStyles = { ...containerStyles }; const dragHandleClassName = dragHandle || 'alertbox-dragHandle'; @@ -80,17 +83,44 @@ function AlertBox(props: PropsWithChildren) { } if (draggable) { - return <> - {enableModalOverlay ?
: null} - -
+ {enableModalOverlay ?
: null} + +
+ + {children} +
+
+ + ); + } + + if (enableModalOverlay && renderInPortal) { + return ( + + {enableModalOverlay ?
: null} +
@@ -101,14 +131,15 @@ function AlertBox(props: PropsWithChildren) { /> {children}
- - + + ); } - if (enableModalOverlay && renderInPortal) { - return + return ( + <> {enableModalOverlay ?
: null} -
) { /> {children}
- - } - - return <> - {enableModalOverlay ?
: null} -
- - {children} -
- + + ); } function Header(props: PropsWithChildren) { const { heading, isDragHandle = false, children } = props; - return
-
{heading}
- {children} -
+ isDragHandle ? 'alertbox-dragHandle' : null, + isDragHandle ? styles.header_dragHandle : null + )}> +
{heading}
+ {children} +
+ ); } function Body(props: PropsWithChildren) { const { bodyStyles, children, hasFooter = false } = props; - return
- {children} -
+ return ( +
+ {children} +
+ ); } function Footer(props: PropsWithChildren) { const { footerStyles, children } = props; - return
+ return ( +
{children} -
+
+ ); } AlertBox.Body = Body; AlertBox.Header = Header; AlertBox.Footer = Footer; -export { AlertBox, AlertBoxSize, AlertBoxFixedPositions } -export type { DraggableAbsolutionPositon } \ No newline at end of file +export { AlertBox, AlertBoxSize, AlertBoxFixedPositions }; +export type { DraggableAbsolutionPositon }; diff --git a/ai-verify-portal/src/components/alertBox/readme.md b/ai-verify-portal/src/components/alertBox/readme.md index 42360ed46..55880c5d3 100644 --- a/ai-verify-portal/src/components/alertBox/readme.md +++ b/ai-verify-portal/src/components/alertBox/readme.md @@ -2,25 +2,24 @@ ![Alert Popup / Dialog](./img/alert-popup.png) -This is an alert box component which has building blocks for making dialog boxes -* A display component (consume props and has no state). -* Compound component consisting of `Alert`, `Header`, `Body`, `Footer` sub-components. -* Uses 3rd-party `react draggable` library for draggable feature -* `Header`, `Body`, `Footer` are optional. Use them to leverage baked-in margins, paddings and layout properties. -* Consumer populates Header, Body and Footer with children components. - -The box has 3 fixed sizes - `AlertBoxSize.SMALL`, `AlertBoxSize.MEDIUM`, `AlertBoxSize.LARGE` +This is an alert box component which has building blocks for making dialog boxes +- A display component (consume props and has no state). +- Compound component consisting of `Alert`, `Header`, `Body`, `Footer` sub-components. +- Uses 3rd-party `react draggable` library for draggable feature +- `Header`, `Body`, `Footer` are optional. Use them to leverage baked-in margins, paddings and layout properties. +- Consumer populates Header, Body and Footer with children components. +The box has 3 fixed sizes - `AlertBoxSize.SMALL`, `AlertBoxSize.MEDIUM`, `AlertBoxSize.LARGE` ## Props -| Prop | Value | Description | -| :--- | :---- | :---- | -| size | enum AlertBoxSize | (optional) default - medium. -| draggable | boolean | (optional) default - false. Box is can be dragged by mouse if true. -| fixedPosition | enum AlertBoxFixedPositions | (optional) Currently, only supports `AlertBoxFixedPositions.CENTER`. This will float the box on center of screen. If not set, box will be relatively positioned to parent and other screen elements. This prop will be ignored if `draggable` or `defaultPosition` is set. -| more props yet to document - todo... | | +| Prop | Value | Description | +| :----------------------------------- | :-------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| size | enum AlertBoxSize | (optional) default - medium. | +| draggable | boolean | (optional) default - false. Box is can be dragged by mouse if true. | +| fixedPosition | enum AlertBoxFixedPositions | (optional) Currently, only supports `AlertBoxFixedPositions.CENTER`. This will float the box on center of screen. If not set, box will be relatively positioned to parent and other screen elements. This prop will be ignored if `draggable` or `defaultPosition` is set. | +| more props yet to document - todo... | | ### Usage @@ -45,7 +44,7 @@ function Example() { }, []) return
- {showAlertBox ? + {showAlertBox ? } -``` \ No newline at end of file +``` diff --git a/ai-verify-portal/src/components/alertBox/styles/alertBox.module.css b/ai-verify-portal/src/components/alertBox/styles/alertBox.module.css index e4ebe001e..2e865ec81 100644 --- a/ai-verify-portal/src/components/alertBox/styles/alertBox.module.css +++ b/ai-verify-portal/src/components/alertBox/styles/alertBox.module.css @@ -14,12 +14,12 @@ z-index: 1002; background-color: white; box-sizing: border-box; - box-shadow: 0px 0px 10px 0px rgba(0,0,0,0.1); + box-shadow: 0px 0px 10px 0px rgba(0, 0, 0, 0.1); border-radius: 5px; } .alertBox.with_modalOverlay { - box-shadow: 0px 0px 10px 4px rgba(0,0,0,0.2); + box-shadow: 0px 0px 10px 4px rgba(0, 0, 0, 0.2); } .fixed_center { @@ -62,11 +62,11 @@ } .header_dragHandle { - cursor: move + cursor: move; } .header_dragHandle:active { - cursor: move + cursor: move; } .alertBoxBody { @@ -79,12 +79,12 @@ .alertBoxBody::-webkit-scrollbar { width: 8px; - background-color: rgb(224, 224, 224); + background-color: rgb(224, 224, 224); border-radius: 5px; } .alertBoxBody::-webkit-scrollbar-thumb { - background-color: rgb(190, 189, 189); + background-color: rgb(190, 189, 189); border-radius: 5px; } @@ -100,7 +100,6 @@ margin-right: 5px; } - /* Extra Small modifier */ .alertBox.alertBox_xs { @@ -130,7 +129,6 @@ height: 45px; } - /* Small modifier */ .alertBox.alertBox_s { @@ -160,8 +158,6 @@ height: 45px; } - - /* Medium modifier */ .alertBox.alertBox_m { width: 600px; @@ -190,8 +186,6 @@ height: 45px; } - - /* Large modifier */ .alertBox.alertBox_l { width: 900px; @@ -220,7 +214,6 @@ height: 55px; } - /* Auto modifier */ .alertBox.alertBox_auto { width: 550px; @@ -248,5 +241,3 @@ .alertBox_auto .alertBoxFooter { height: 45px; } - - diff --git a/ai-verify-portal/src/components/baseMuiAccordionSummary.tsx b/ai-verify-portal/src/components/baseMuiAccordionSummary.tsx index 395fa8238..2fccfe338 100644 --- a/ai-verify-portal/src/components/baseMuiAccordionSummary.tsx +++ b/ai-verify-portal/src/components/baseMuiAccordionSummary.tsx @@ -1,32 +1,41 @@ import React from 'react'; import { styled } from '@mui/material/styles'; -import MuiAccordionSummary, { AccordionSummaryProps } from '@mui/material/AccordionSummary'; +import MuiAccordionSummary, { + AccordionSummaryProps, +} from '@mui/material/AccordionSummary'; import ExpandMoreIcon from '@mui/icons-material/ExpandMore'; import styles from './styles/muiaccordion.module.css'; - /* .MuiAccordionSummary-content cannot be css selected and styled using normal css. So we use a styled component to achieve it. Leverage this component to bake-in flex container. */ type BaseMuiAccordionSummaryProps = AccordionSummaryProps & { - expandiconstyles?: React.CSSProperties -} + expandiconstyles?: React.CSSProperties; +}; -const BaseMuiAccordionSummary = styled((props: BaseMuiAccordionSummaryProps) => { - const { expandiconstyles, children } = props; - return ( - } - {...props}> -
{children}
-
-)})(() => ({ +const BaseMuiAccordionSummary = styled( + (props: BaseMuiAccordionSummaryProps) => { + const { expandiconstyles, children } = props; + return ( + + } + {...props}> +
{children}
+
+ ); + } +)(() => ({ '& .MuiAccordionSummary-content': { margin: '5px', marginLeft: '10px', }, })); -export { BaseMuiAccordionSummary }; \ No newline at end of file +export { BaseMuiAccordionSummary }; diff --git a/ai-verify-portal/src/components/clientOnlyPortal.ts b/ai-verify-portal/src/components/clientOnlyPortal.ts index 72659d637..510f3947e 100644 --- a/ai-verify-portal/src/components/clientOnlyPortal.ts +++ b/ai-verify-portal/src/components/clientOnlyPortal.ts @@ -2,20 +2,20 @@ import { useRef, useEffect, useState, PropsWithChildren } from 'react'; import { createPortal } from 'react-dom'; type ClientOnlyPortalProps = { - selector: string -} + selector: string; +}; function ClientOnlyPortal(props: PropsWithChildren) { const { children, selector } = props; - const ref = useRef() - const [mounted, setMounted] = useState(false) + const ref = useRef(); + const [mounted, setMounted] = useState(false); useEffect(() => { - ref.current = document.querySelector(selector) - setMounted(true) - }, [selector]) + ref.current = document.querySelector(selector); + setMounted(true); + }, [selector]); - return mounted && ref.current ? createPortal(children, ref.current) : null + return mounted && ref.current ? createPortal(children, ref.current) : null; } -export { ClientOnlyPortal } \ No newline at end of file +export { ClientOnlyPortal }; diff --git a/ai-verify-portal/src/components/codeBlock.tsx b/ai-verify-portal/src/components/codeBlock.tsx index ec97e79c4..1a865f8a1 100644 --- a/ai-verify-portal/src/components/codeBlock.tsx +++ b/ai-verify-portal/src/components/codeBlock.tsx @@ -1,21 +1,21 @@ -import * as React from 'react'; - -import OutlinedInput from '@mui/material/OutlinedInput'; - -type Props = { - text?: string, -} - -export default function CodeBlock({ text }: Props) { - return ( - - ) -} \ No newline at end of file +import * as React from 'react'; + +import OutlinedInput from '@mui/material/OutlinedInput'; + +type Props = { + text?: string; +}; + +export default function CodeBlock({ text }: Props) { + return ( + + ); +} diff --git a/ai-verify-portal/src/components/confirmationDialog.tsx b/ai-verify-portal/src/components/confirmationDialog.tsx index ec72a9b4b..c0ef16931 100644 --- a/ai-verify-portal/src/components/confirmationDialog.tsx +++ b/ai-verify-portal/src/components/confirmationDialog.tsx @@ -1,60 +1,76 @@ - -import { AlertBox, AlertBoxFixedPositions, AlertBoxSize } from './alertBox'; - - -type CustomDialogProps = { - size?: AlertBoxSize - primaryBtnText?: string - showOKBtn?: boolean - title: string - message?: string - children?: React.ReactElement - onClose: (confirm: boolean) => void -} - -function ConfirmationDialog(props: CustomDialogProps) { - const { - size = AlertBoxSize.SMALL, - primaryBtnText = 'Proceed', - showOKBtn = false, - title, - children, - message, - onClose, - } = props; - - function handleCancel() { - if (onClose) - onClose(false); - } - - function handleConfirm() { - if (onClose) - onClose(true); - } - - return ( - - - -
{message}
-
{children}
-
- -
- {showOKBtn ? : -
- - -
} -
-
-
- ) -} - -export default ConfirmationDialog; \ No newline at end of file +import { AlertBox, AlertBoxFixedPositions, AlertBoxSize } from './alertBox'; + +type CustomDialogProps = { + size?: AlertBoxSize; + primaryBtnText?: string; + showOKBtn?: boolean; + title: string; + message?: string; + children?: React.ReactElement; + onClose: (confirm: boolean) => void; +}; + +function ConfirmationDialog(props: CustomDialogProps) { + const { + size = AlertBoxSize.SMALL, + primaryBtnText = 'Proceed', + showOKBtn = false, + title, + children, + message, + onClose, + } = props; + + function handleCancel() { + if (onClose) onClose(false); + } + + function handleConfirm() { + if (onClose) onClose(true); + } + + return ( + + + +
{message}
+
{children}
+
+ +
+ {showOKBtn ? ( + + ) : ( +
+ + +
+ )} +
+
+
+ ); +} + +export default ConfirmationDialog; diff --git a/ai-verify-portal/src/components/icon/iconNames.ts b/ai-verify-portal/src/components/icon/iconNames.ts index 58e40ca1d..fff1c9f38 100644 --- a/ai-verify-portal/src/components/icon/iconNames.ts +++ b/ai-verify-portal/src/components/icon/iconNames.ts @@ -3,4 +3,4 @@ export enum IconName { CHEVRON_LEFT, TRASH, PIPELINE, -} \ No newline at end of file +} diff --git a/ai-verify-portal/src/components/icon/index.tsx b/ai-verify-portal/src/components/icon/index.tsx index 8d90ff627..08be53cad 100644 --- a/ai-verify-portal/src/components/icon/index.tsx +++ b/ai-verify-portal/src/components/icon/index.tsx @@ -3,46 +3,43 @@ import { IconName } from './iconNames'; import { ChevronLeft, PdfSvg, Trash, Pipeline } from './svg'; type IconProps = { - name: IconName - style?: React.CSSProperties - color?: string - size?: number - onClick?: () => void -} + name: IconName; + style?: React.CSSProperties; + color?: string; + size?: number; + onClick?: () => void; +}; function Icon(props: PropsWithChildren) { - const { - name, - style, - color='#484747', - size = 35, - onClick - } = props; + const { name, style, color = '#484747', size = 35, onClick } = props; function SvgComponent() { switch (name) { case IconName.PDF: - return + return ; case IconName.CHEVRON_LEFT: - return + return ; case IconName.TRASH: - return + return ; case IconName.PIPELINE: - return + return ; default: - return null + return null; } } - return
- -
+ +
+ ); } -export { Icon }; \ No newline at end of file +export { Icon }; diff --git a/ai-verify-portal/src/components/icon/svg/chevronLeft.tsx b/ai-verify-portal/src/components/icon/svg/chevronLeft.tsx index e30d82366..8de4c3402 100644 --- a/ai-verify-portal/src/components/icon/svg/chevronLeft.tsx +++ b/ai-verify-portal/src/components/icon/svg/chevronLeft.tsx @@ -2,13 +2,18 @@ import { SvgIconProps } from './svgIconProps'; export function ChevronLeft(props: SvgIconProps) { const { size, color } = props; - return - - -} \ No newline at end of file + return ( + + + + ); +} diff --git a/ai-verify-portal/src/components/icon/svg/index.ts b/ai-verify-portal/src/components/icon/svg/index.ts index f0f65dcc5..2a70ba936 100644 --- a/ai-verify-portal/src/components/icon/svg/index.ts +++ b/ai-verify-portal/src/components/icon/svg/index.ts @@ -3,4 +3,4 @@ import { ChevronLeft } from './chevronLeft'; import { Trash } from './trash'; import { Pipeline } from './pipeline'; -export { PdfSvg, ChevronLeft, Trash, Pipeline }; \ No newline at end of file +export { PdfSvg, ChevronLeft, Trash, Pipeline }; diff --git a/ai-verify-portal/src/components/icon/svg/pdf.tsx b/ai-verify-portal/src/components/icon/svg/pdf.tsx index c6f0bbae0..08a596d76 100644 --- a/ai-verify-portal/src/components/icon/svg/pdf.tsx +++ b/ai-verify-portal/src/components/icon/svg/pdf.tsx @@ -2,22 +2,24 @@ import { SvgIconProps } from './svgIconProps'; export function PdfSvg(props: SvgIconProps) { const { size, color } = props; - return - - - - - - - - - - - -} \ No newline at end of file + return ( + + + + + + + + + + + + + ); +} diff --git a/ai-verify-portal/src/components/icon/svg/pipeline.tsx b/ai-verify-portal/src/components/icon/svg/pipeline.tsx index 5f06820de..7f693a34b 100644 --- a/ai-verify-portal/src/components/icon/svg/pipeline.tsx +++ b/ai-verify-portal/src/components/icon/svg/pipeline.tsx @@ -2,13 +2,15 @@ import { SvgIconProps } from './svgIconProps'; export function Pipeline(props: SvgIconProps) { const { size, color } = props; - return - - -} \ No newline at end of file + return ( + + + + ); +} diff --git a/ai-verify-portal/src/components/icon/svg/svgIconProps.ts b/ai-verify-portal/src/components/icon/svg/svgIconProps.ts index 8b75087f9..08ea01165 100644 --- a/ai-verify-portal/src/components/icon/svg/svgIconProps.ts +++ b/ai-verify-portal/src/components/icon/svg/svgIconProps.ts @@ -1,4 +1,4 @@ export type SvgIconProps = { - size: number, - color: string -} \ No newline at end of file + size: number; + color: string; +}; diff --git a/ai-verify-portal/src/components/icon/svg/trash.tsx b/ai-verify-portal/src/components/icon/svg/trash.tsx index d48e67894..07cdf02c2 100644 --- a/ai-verify-portal/src/components/icon/svg/trash.tsx +++ b/ai-verify-portal/src/components/icon/svg/trash.tsx @@ -2,13 +2,15 @@ import { SvgIconProps } from './svgIconProps'; export function Trash(props: SvgIconProps) { const { size, color } = props; - return - - -} \ No newline at end of file + return ( + + + + ); +} diff --git a/ai-verify-portal/src/components/iconButton/index.tsx b/ai-verify-portal/src/components/iconButton/index.tsx index f98122029..4b701350a 100644 --- a/ai-verify-portal/src/components/iconButton/index.tsx +++ b/ai-verify-portal/src/components/iconButton/index.tsx @@ -4,21 +4,20 @@ import { PropsWithChildren, FC } from 'react'; import clsx from 'clsx'; import styles from './styles/iconButton.module.css'; - type IconProps = { - style?: React.CSSProperties -} + style?: React.CSSProperties; +}; type IconButtonProps = { - iconComponent?: FC - rounded?: boolean - iconComponentStyle?: React.CSSProperties - iconFontSize?: number - noOutline?: boolean - disabled?: boolean - style?: React.CSSProperties, - onClick?: () => void, -} + iconComponent?: FC; + rounded?: boolean; + iconComponentStyle?: React.CSSProperties; + iconFontSize?: number; + noOutline?: boolean; + disabled?: boolean; + style?: React.CSSProperties; + onClick?: () => void; +}; function IconButton(props: PropsWithChildren) { const { @@ -39,25 +38,31 @@ function IconButton(props: PropsWithChildren) { } } - return + return ( + + ); } -export { IconButton }; \ No newline at end of file +export { IconButton }; diff --git a/ai-verify-portal/src/components/iconButton/styles/iconButton.module.css b/ai-verify-portal/src/components/iconButton/styles/iconButton.module.css index 49496084a..08f6c1798 100644 --- a/ai-verify-portal/src/components/iconButton/styles/iconButton.module.css +++ b/ai-verify-portal/src/components/iconButton/styles/iconButton.module.css @@ -9,7 +9,7 @@ .iconBtn:hover { transition-property: 'background-color'; transition-timing-function: ease-in-out; - transition-duration: 0.2s; + transition-duration: 0.2s; background-color: #d6d6d6; } @@ -34,7 +34,7 @@ .iconBtn__noOutline:hover { transition-property: 'background-color'; transition-timing-function: ease-in-out; - transition-duration: 0.2s; + transition-duration: 0.2s; background-color: #f1f1f1; } diff --git a/ai-verify-portal/src/components/listMenu/index.tsx b/ai-verify-portal/src/components/listMenu/index.tsx index 7085d2e39..d8aebfde8 100644 --- a/ai-verify-portal/src/components/listMenu/index.tsx +++ b/ai-verify-portal/src/components/listMenu/index.tsx @@ -6,54 +6,46 @@ type ListMenuProps = { containerStyles?: React.CSSProperties; onMouseEnter?: (e: React.MouseEvent) => void; onMouseLeave?: (e: React.MouseEvent) => void; -} +}; type ListMenuItemProps = { id: string; displayText: string; style?: React.CSSProperties; onClick: (id: string) => void; -} +}; function ListMenuItem(props: PropsWithChildren) { - const { - id, - style, - displayText, - children, - onClick - } = props; + const { id, style, displayText, children, onClick } = props; function handleOnClick(id: string) { return () => { if (onClick && typeof onClick === 'function') { onClick(id); } - } + }; } - return
-
-
{displayText}
-
{children}
+ return ( +
+
+
{displayText}
+
{children}
+
-
+ ); } function ListMenu(props: PropsWithChildren) { - const { - containerStyles, - children, - onMouseEnter, - onMouseLeave, - } = props; - + const { containerStyles, children, onMouseEnter, onMouseLeave } = props; + function handleMouseEnter(e: React.MouseEvent) { e.stopPropagation(); if (onMouseEnter && typeof onMouseEnter === 'function') { - onMouseEnter(e) + onMouseEnter(e); } } @@ -64,12 +56,15 @@ function ListMenu(props: PropsWithChildren) { } } - return
- {children} -
+ return ( +
+ {children} +
+ ); } -export { ListMenu, ListMenuItem }; \ No newline at end of file +export { ListMenu, ListMenuItem }; diff --git a/ai-verify-portal/src/components/listMenu/styles/listMenu.module.css b/ai-verify-portal/src/components/listMenu/styles/listMenu.module.css index fe607016f..710f88481 100644 --- a/ai-verify-portal/src/components/listMenu/styles/listMenu.module.css +++ b/ai-verify-portal/src/components/listMenu/styles/listMenu.module.css @@ -1,5 +1,5 @@ .listMenuContainer { - box-shadow: 0px 0px 8px 0px rgba(0,0,0,0.2); + box-shadow: 0px 0px 8px 0px rgba(0, 0, 0, 0.2); min-width: 190px; background: var(--color-white); border-radius: 3px; @@ -22,4 +22,4 @@ .menuItemContent { display: flex; -} \ No newline at end of file +} diff --git a/ai-verify-portal/src/components/mySelect.tsx b/ai-verify-portal/src/components/mySelect.tsx index 6f1d95bac..5fdbf2ab3 100644 --- a/ai-verify-portal/src/components/mySelect.tsx +++ b/ai-verify-portal/src/components/mySelect.tsx @@ -1,48 +1,60 @@ import * as React from 'react'; -import OutlinedInput from '@mui/material/OutlinedInput'; import FormControl from '@mui/material/FormControl'; import Typography from '@mui/material/Typography'; import FormHelperText from '@mui/material/FormHelperText'; import MenuItem from '@mui/material/MenuItem'; -import Select from '@mui/material/Select'; +import Select, { SelectProps } from '@mui/material/Select'; +import { SxProps } from '@mui/material'; type Props = { - title?: string, - description?: string, - id?: string, - inputProps?: any, - errorText?: string|null, - FormControlProps?: any, - items: any, - isDisabled?: boolean, -} + title?: string; + description?: string; + id?: string; + inputProps?: SelectProps; + errorText?: string | null; + FormControlProps?: { sx?: SxProps }; + items: string[]; + isDisabled?: boolean; +}; -function MySelect ({ title, description, id, inputProps={}, FormControlProps={}, errorText, items={}, isDisabled}: Props) { +function MySelect({ + title, + description, + id, + inputProps = {}, + FormControlProps = {}, + errorText, + items = [], + isDisabled, +}: Props) { return ( - - {title && {title}} - {description && {description}} - - {items && items.map((item: string) => { + disabled={isDisabled}> + {items && + items.map((item: string) => { return ( - {item} - ) - })} - + + {item} + + ); + })} + {errorText && {errorText}} - ) + ); } -export default MySelect; \ No newline at end of file +export default MySelect; diff --git a/ai-verify-portal/src/components/myTextField.tsx b/ai-verify-portal/src/components/myTextField.tsx index 9170af721..c2b4051a9 100644 --- a/ai-verify-portal/src/components/myTextField.tsx +++ b/ai-verify-portal/src/components/myTextField.tsx @@ -1,36 +1,44 @@ -import OutlinedInput from '@mui/material/OutlinedInput'; -import FormControl from '@mui/material/FormControl'; -import Typography from '@mui/material/Typography'; -import FormHelperText from '@mui/material/FormHelperText'; - -type Props = { - title?: string, - description?: string, - id?: string, - inputProps?: any, - errorText?: string|null, - FormControlProps?: any, -} - -function MyTextField ({ title, description, id, inputProps={}, FormControlProps={}, errorText }: Props) { - return ( - - {title && {title}} - {description && {description}} - - {errorText && {errorText}} - - ) -} - -export default MyTextField; +import OutlinedInput from '@mui/material/OutlinedInput'; +import FormControl from '@mui/material/FormControl'; +import Typography from '@mui/material/Typography'; +import FormHelperText from '@mui/material/FormHelperText'; + +type Props = { + title?: string; + description?: string; + id?: string; + inputProps?: any; + errorText?: string | null; + FormControlProps?: any; +}; + +function MyTextField({ + title, + description, + id, + inputProps = {}, + FormControlProps = {}, + errorText, +}: Props) { + return ( + + {title && {title}} + {description && ( + + {description} + + )} + + {errorText && {errorText}} + + ); +} + +export default MyTextField; diff --git a/ai-verify-portal/src/components/pageLeverlErrorAlert.tsx b/ai-verify-portal/src/components/pageLeverlErrorAlert.tsx index 92dd9bf2e..301a6300f 100644 --- a/ai-verify-portal/src/components/pageLeverlErrorAlert.tsx +++ b/ai-verify-portal/src/components/pageLeverlErrorAlert.tsx @@ -1,30 +1,35 @@ -import { AlertType, StandardAlert } from "src/components/standardAlerts"; +import { AlertType, StandardAlert } from 'src/components/standardAlerts'; type PageLevelErrorAlertProps = { - error: Error, - headingText: string, - content: string, -} + error: Error; + headingText: string; + content: string; +}; function PageLevelErrorAlert(props: PageLevelErrorAlertProps) { const { error, headingText, content } = props; - return
- -
-
{content}
- {error ? -
(Err: {error.message})
- : null} -
-
-
+ return ( +
+ +
+
{content}
+ {error ? ( +
+ (Err: {error.message}) +
+ ) : null} +
+
+
+ ); } -export { PageLevelErrorAlert } \ No newline at end of file +export { PageLevelErrorAlert }; diff --git a/ai-verify-portal/src/components/standardAlerts/index.tsx b/ai-verify-portal/src/components/standardAlerts/index.tsx index aa673298d..81e2c853f 100644 --- a/ai-verify-portal/src/components/standardAlerts/index.tsx +++ b/ai-verify-portal/src/components/standardAlerts/index.tsx @@ -7,26 +7,26 @@ import React, { PropsWithChildren } from 'react'; import clsx from 'clsx'; enum AlertType { - ERROR='error', - WARNING='warning', - INFO='info', - SUCCESS='success' + ERROR = 'error', + WARNING = 'warning', + INFO = 'info', + SUCCESS = 'success', } type StandardAlertProps = { - alertType: AlertType, - disableCloseIcon?: boolean, - headingText?: string, - style?: React.CSSProperties, - iconStyle?: React.CSSProperties, - headingStyle?: React.CSSProperties, - onCloseIconClick?: () => void, -} + alertType: AlertType; + disableCloseIcon?: boolean; + headingText?: string; + style?: React.CSSProperties; + iconStyle?: React.CSSProperties; + headingStyle?: React.CSSProperties; + onCloseIconClick?: () => void; +}; type AlertIconProps = { - type: AlertType, - style?: React.CSSProperties -} + type: AlertType; + style?: React.CSSProperties; +}; function AlertIcon(props: AlertIconProps) { const { type, style } = props; @@ -34,13 +34,33 @@ function AlertIcon(props: AlertIconProps) { switch (type) { case AlertType.ERROR: - return + return ( + + ); case AlertType.WARNING: - return + return ( + + ); case AlertType.INFO: - return + return ( + + ); case AlertType.SUCCESS: - return + return ( + + ); default: return null; } @@ -55,7 +75,7 @@ function StandardAlert(props: PropsWithChildren) { iconStyle, headingStyle, onCloseIconClick, - children + children, } = props; const customAlertBoxStyles: React.CSSProperties = { @@ -67,23 +87,29 @@ function StandardAlert(props: PropsWithChildren) { height: 'auto', ...style, }; - const customIconStyles: React.CSSProperties = disableCloseIcon ? { display: 'none' } : { color: 'lightGray' }; + const customIconStyles: React.CSSProperties = disableCloseIcon + ? { display: 'none' } + : { color: 'lightGray' }; const modifierClass = `type_${alertType}`; - return + return ( +
- +
-
{headingText}
+
+ {headingText} +
{children}
-
+
+ ); } -export { StandardAlert, AlertType } \ No newline at end of file +export { StandardAlert, AlertType }; diff --git a/ai-verify-portal/src/components/standardAlerts/readme.md b/ai-verify-portal/src/components/standardAlerts/readme.md index d81c07cd5..a6aee6364 100644 --- a/ai-verify-portal/src/components/standardAlerts/readme.md +++ b/ai-verify-portal/src/components/standardAlerts/readme.md @@ -5,24 +5,24 @@ ![Info Alert](./img/alert-info.png) ![Success Alert](./img/alert-success.png) -This is a basic alert message component. -* A display component (consume props and has no state). -* Composed using `AlertBox` component. +This is a basic alert message component. + +- A display component (consume props and has no state). +- Composed using `AlertBox` component. The width takes up 100% of the parent block elemet width. If you want to specify width, the recommended approach is to wrap the component in a `
` which has a width css property set. Another approach is to use the `style` prop. The height is automatic based on content. - ## Props -| Prop | Value | Description | -| :--- | :----: | :----: | -| alertType | enum AlertType | (Required) Sets the type of alert - error, warning, info, success -| disableCloseIcon | boolean | (optional) default - false. Hides the 'X' close icon if tru -| headingText | string | (optional) The bold text heading -| style | CSS Properties Object | (optional) CSS styles to override the alert container CSS properties -| onCloseIconClick | function | (optional) function that executes when close icon is clicked +| Prop | Value | Description | +| :--------------- | :-------------------: | :------------------------------------------------------------------: | +| alertType | enum AlertType | (Required) Sets the type of alert - error, warning, info, success | +| disableCloseIcon | boolean | (optional) default - false. Hides the 'X' close icon if tru | +| headingText | string | (optional) The bold text heading | +| style | CSS Properties Object | (optional) CSS styles to override the alert container CSS properties | +| onCloseIconClick | function | (optional) function that executes when close icon is clicked | ### Usage @@ -42,7 +42,7 @@ function Example() { }, []) return
- {showAlert ? + {showAlert ? } -``` \ No newline at end of file +``` diff --git a/ai-verify-portal/src/components/standardAlerts/styles/standardAlerts.module.css b/ai-verify-portal/src/components/standardAlerts/styles/standardAlerts.module.css index 42db13122..d0eb9245d 100644 --- a/ai-verify-portal/src/components/standardAlerts/styles/standardAlerts.module.css +++ b/ai-verify-portal/src/components/standardAlerts/styles/standardAlerts.module.css @@ -49,4 +49,4 @@ .icon_warning { color: var(--color-alert-warning); -} \ No newline at end of file +} diff --git a/ai-verify-portal/src/components/styledButton.tsx b/ai-verify-portal/src/components/styledButton.tsx deleted file mode 100644 index 110b24337..000000000 --- a/ai-verify-portal/src/components/styledButton.tsx +++ /dev/null @@ -1,16 +0,0 @@ -import * as React from 'react'; -import { styled } from '@mui/material/styles'; -import Button, { ButtonProps } from '@mui/material/Button'; - -const StyledButton = styled(Button)(({ theme }) => ({ - padding: '18px', - fontSize: '18px', - fontWeight: 700, - textTransform: 'none', - backgroundColor: '#4B0965', - '&:contained': { - backgroundColor: 'red', - } -})) as typeof Button; - -export default StyledButton; \ No newline at end of file diff --git a/ai-verify-portal/src/components/styles/muiaccordion.module.css b/ai-verify-portal/src/components/styles/muiaccordion.module.css index ab592b521..d3107ea2e 100644 --- a/ai-verify-portal/src/components/styles/muiaccordion.module.css +++ b/ai-verify-portal/src/components/styles/muiaccordion.module.css @@ -1,5 +1,5 @@ .expandIcon { - color: #FFFFFF; + color: #ffffff; font-size: 1.1rem; } @@ -8,4 +8,4 @@ align-items: center; width: 100%; font-size: 16px; -} \ No newline at end of file +} diff --git a/ai-verify-portal/src/components/textArea/index.tsx b/ai-verify-portal/src/components/textArea/index.tsx index c7d16b10a..0e0209340 100644 --- a/ai-verify-portal/src/components/textArea/index.tsx +++ b/ai-verify-portal/src/components/textArea/index.tsx @@ -2,15 +2,15 @@ import React, { ChangeEventHandler } from 'react'; import styles from './styles/textArea.module.css'; type TextInputProps = { - name: string, - label?: string - placeholder?: string - error?: string - value?: string - maxLength?: number - labelSibling?: React.ReactElement - onChange?: ChangeEventHandler -} + name: string; + label?: string; + placeholder?: string; + error?: string; + value?: string; + maxLength?: number; + labelSibling?: React.ReactElement; + onChange?: ChangeEventHandler; +}; function TextArea(props: TextInputProps) { const { @@ -21,23 +21,29 @@ function TextArea(props: TextInputProps) { maxLength, value, labelSibling, - onChange } = props; + onChange, + } = props; - return
-