diff --git a/.clang-format b/.clang-format
index 15012747148..2b438bf57cc 100644
--- a/.clang-format
+++ b/.clang-format
@@ -32,11 +32,14 @@ ColumnLimit: 100
ConstructorInitializerIndentWidth: 8
ContinuationIndentWidth: 8
ForEachMacros:
+ - 'ARRAY_FOR_EACH'
+ - 'ARRAY_FOR_EACH_PTR'
- 'FOR_EACH'
- 'FOR_EACH_FIXED_ARG'
- 'FOR_EACH_IDX'
- 'FOR_EACH_IDX_FIXED_ARG'
- 'FOR_EACH_NONEMPTY_TERM'
+ - 'FOR_EACH_FIXED_ARG_NONEMPTY_TERM'
- 'RB_FOR_EACH'
- 'RB_FOR_EACH_CONTAINER'
- 'SYS_DLIST_FOR_EACH_CONTAINER'
@@ -66,8 +69,16 @@ ForEachMacros:
- 'Z_GENLIST_FOR_EACH_NODE'
- 'Z_GENLIST_FOR_EACH_NODE_SAFE'
- 'STRUCT_SECTION_FOREACH'
+ - 'STRUCT_SECTION_FOREACH_ALTERNATE'
- 'TYPE_SECTION_FOREACH'
- 'K_SPINLOCK'
+ - 'COAP_RESOURCE_FOREACH'
+ - 'COAP_SERVICE_FOREACH'
+ - 'COAP_SERVICE_FOREACH_RESOURCE'
+ - 'HTTP_RESOURCE_FOREACH'
+ - 'HTTP_SERVER_CONTENT_TYPE_FOREACH'
+ - 'HTTP_SERVICE_FOREACH'
+ - 'HTTP_SERVICE_FOREACH_RESOURCE'
IfMacros:
- 'CHECKIF'
# Disabled for now, see bug https://github.com/zephyrproject-rtos/zephyr/issues/48520
@@ -82,8 +93,10 @@ IncludeCategories:
- Regex: '.*'
Priority: 3
IndentCaseLabels: false
+IndentGotoLabels: false
IndentWidth: 8
InsertBraces: true
+SpaceBeforeInheritanceColon: False
SpaceBeforeParens: ControlStatementsExceptControlMacros
SortIncludes: Never
UseTab: ForContinuationAndIndentation
diff --git a/.codechecker.yml b/.codechecker.yml
new file mode 100644
index 00000000000..d00e66ca4f7
--- /dev/null
+++ b/.codechecker.yml
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: Apache-2.0
+#
+# Copyright (c) 2024, Basalte bv
+
+analyzer:
+ # Start by disabling all
+ - --disable-all
+
+ # Enable the sensitive profile
+ - --enable=sensitive
+
+ # Disable unused cases
+ - --disable=boost
+ - --disable=mpi
+
+ # Many identifiers in zephyr start with _
+ - --disable=clang-diagnostic-reserved-identifier
+ - --disable=clang-diagnostic-reserved-macro-identifier
+
+ # Cleanup
+ - --clean
diff --git a/.github/ISSUE_TEMPLATE/001_bug_report.md b/.github/ISSUE_TEMPLATE/001_bug_report.md
index 57f8652b65b..a3d0677184b 100644
--- a/.github/ISSUE_TEMPLATE/001_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/001_bug_report.md
@@ -6,8 +6,8 @@ labels: bug
assignees: ''
---
-
-**Notes (delete this)**
+
**Describe the bug**
+
**To Reproduce**
+
**Expected behavior**
+
**Impact**
+
**Logs and console output**
+
**Environment (please complete the following information):**
+
- OS: (e.g. Linux, MacOS, Windows)
- Toolchain (e.g Zephyr SDK, ...)
- Commit SHA or Version used
**Additional context**
+
diff --git a/.github/ISSUE_TEMPLATE/002_enhancement.md b/.github/ISSUE_TEMPLATE/002_enhancement.md
index 615db8fec2e..36dd018047d 100644
--- a/.github/ISSUE_TEMPLATE/002_enhancement.md
+++ b/.github/ISSUE_TEMPLATE/002_enhancement.md
@@ -8,13 +8,21 @@ assignees: ''
---
**Is your enhancement proposal related to a problem? Please describe.**
+
**Describe the solution you'd like**
+
**Describe alternatives you've considered**
+
**Additional context**
+
diff --git a/.github/ISSUE_TEMPLATE/003_rfc-proposal.md b/.github/ISSUE_TEMPLATE/003_rfc-proposal.md
index fbb1fc8a1e1..9cea2d2dac2 100644
--- a/.github/ISSUE_TEMPLATE/003_rfc-proposal.md
+++ b/.github/ISSUE_TEMPLATE/003_rfc-proposal.md
@@ -9,43 +9,52 @@ assignees: ''
## Introduction
+
### Problem description
-
+
### Proposed change
-
+
## Detailed RFC
-
+
### Proposed change (Detailed)
-
+
### Dependencies
-
+
### Concerns and Unresolved Questions
-
+
## Alternatives
-
+
diff --git a/.github/ISSUE_TEMPLATE/004_feature_request.md b/.github/ISSUE_TEMPLATE/004_feature_request.md
index 3ffc06790ef..63aa536c8f5 100644
--- a/.github/ISSUE_TEMPLATE/004_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/004_feature_request.md
@@ -8,13 +8,21 @@ assignees: ''
---
**Is your feature request related to a problem? Please describe.**
+
**Describe the solution you'd like**
+
**Describe alternatives you've considered**
+
**Additional context**
+
diff --git a/.github/SECURITY.md b/.github/SECURITY.md
index 02a5582ae87..42354f627cf 100644
--- a/.github/SECURITY.md
+++ b/.github/SECURITY.md
@@ -11,9 +11,9 @@ updates:
At this time, with the latest release of v3.6, the supported
versions are:
- - v2.7: Current LTS
- - v3.5: Prior release
- - v3.6: Current release
+ - v3.7: Current LTS
+ - v3.6: Prior release
+ - v2.7: Prior LTS
## Reporting process
diff --git a/.github/workflows/backport_issue_check.yml b/.github/workflows/backport_issue_check.yml
index 95175ecf1bb..ecaaf352827 100644
--- a/.github/workflows/backport_issue_check.yml
+++ b/.github/workflows/backport_issue_check.yml
@@ -2,12 +2,20 @@ name: Backport Issue Check
on:
pull_request_target:
+ types:
+ - edited
+ - opened
+ - reopened
+ - synchronize
branches:
- v*-branch
jobs:
backport:
name: Backport Issue Check
+ concurrency:
+ group: backport-issue-check-${{ github.ref }}
+ cancel-in-progress: true
runs-on: ubuntu-22.04
if: github.repository == 'zephyrproject-rtos/zephyr'
diff --git a/.github/workflows/bsim-tests-publish.yaml b/.github/workflows/bsim-tests-publish.yaml
index aad33a22339..72608b36b24 100644
--- a/.github/workflows/bsim-tests-publish.yaml
+++ b/.github/workflows/bsim-tests-publish.yaml
@@ -13,7 +13,7 @@ jobs:
steps:
- name: Download artifacts
- uses: dawidd6/action-download-artifact@v3
+ uses: dawidd6/action-download-artifact@v6
with:
run_id: ${{ github.event.workflow_run.id }}
diff --git a/.github/workflows/bsim-tests.yaml b/.github/workflows/bsim-tests.yaml
index 52d384e92fd..0c50ca5d6fd 100644
--- a/.github/workflows/bsim-tests.yaml
+++ b/.github/workflows/bsim-tests.yaml
@@ -8,6 +8,8 @@ on:
- "west.yml"
- "subsys/bluetooth/**"
- "tests/bsim/**"
+ - "boards/nordic/nrf5*/*dt*"
+ - "dts/*/nordic/**"
- "tests/bluetooth/common/testlib/**"
- "samples/bluetooth/**"
- "boards/posix/**"
@@ -34,18 +36,13 @@ jobs:
runs-on:
group: zephyr-runner-v2-linux-x64-4xlarge
container:
- image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.11.20240324
+ image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.14.20240823
options: '--entrypoint /bin/bash'
env:
ZEPHYR_TOOLCHAIN_VARIANT: zephyr
BSIM_OUT_PATH: /opt/bsim/
BSIM_COMPONENTS_PATH: /opt/bsim/components
EDTT_PATH: ../tools/edtt
- bsim_bt_52_test_results_file: ./bsim_bt/52_bsim_results.xml
- bsim_bt_53_test_results_file: ./bsim_bt/53_bsim_results.xml
- bsim_bt_53split_test_results_file: ./bsim_bt/53_bsim_split_results.xml
- bsim_net_52_test_results_file: ./bsim_net/52_bsim_results.xml
- bsim_uart_test_results_file: ./bsim_uart/uart_bsim_results.xml
steps:
- name: Apply container owner mismatch workaround
run: |
@@ -90,7 +87,7 @@ jobs:
echo "ZEPHYR_SDK_INSTALL_DIR=/opt/toolchains/zephyr-sdk-$( cat SDK_VERSION )" >> $GITHUB_ENV
- name: Check common triggering files
- uses: tj-actions/changed-files@v44
+ uses: tj-actions/changed-files@v45
id: check-common-files
with:
files: |
@@ -103,9 +100,11 @@ jobs:
include/zephyr/arch/posix/
scripts/native_simulator/
tests/bsim/*
+ boards/nordic/nrf5*/*dt*
+ dts/*/nordic/
- name: Check if Bluethooth files changed
- uses: tj-actions/changed-files@v44
+ uses: tj-actions/changed-files@v45
id: check-bluetooth-files
with:
files: |
@@ -114,7 +113,7 @@ jobs:
subsys/bluetooth/
- name: Check if Networking files changed
- uses: tj-actions/changed-files@v44
+ uses: tj-actions/changed-files@v45
id: check-networking-files
with:
files: |
@@ -127,7 +126,7 @@ jobs:
include/zephyr/net/ieee802154*
- name: Check if UART files changed
- uses: tj-actions/changed-files@v44
+ uses: tj-actions/changed-files@v45
id: check-uart-files
with:
files: |
@@ -137,10 +136,10 @@ jobs:
- name: Update BabbleSim to manifest revision
if: >
- steps.check-bluetooth-files.outputs.any_changed == 'true'
- || steps.check-networking-files.outputs.any_changed == 'true'
- || steps.check-uart-files.outputs.any_changed == 'true'
- || steps.check-common-files.outputs.any_changed == 'true'
+ steps.check-bluetooth-files.outputs.any_modified == 'true'
+ || steps.check-networking-files.outputs.any_modified == 'true'
+ || steps.check-uart-files.outputs.any_modified == 'true'
+ || steps.check-common-files.outputs.any_modified == 'true'
run: |
export BSIM_VERSION=$( west list bsim -f {revision} )
echo "Manifest points to bsim sha $BSIM_VERSION"
@@ -151,61 +150,41 @@ jobs:
make everything -s -j 8
- name: Run Bluetooth Tests with BSIM
- if: steps.check-bluetooth-files.outputs.any_changed == 'true' || steps.check-common-files.outputs.any_changed == 'true'
+ if: steps.check-bluetooth-files.outputs.any_modified == 'true' || steps.check-common-files.outputs.any_modified == 'true'
run: |
- export ZEPHYR_BASE=${PWD}
- # Build and run the BT tests for nrf52_bsim:
- nice tests/bsim/bluetooth/compile.sh
- RESULTS_FILE=${ZEPHYR_BASE}/${bsim_bt_52_test_results_file} \
- TESTS_FILE=tests/bsim/bluetooth/tests.nrf52bsim.txt tests/bsim/run_parallel.sh
- # Build and run the BT controller tests also for the nrf5340bsim/nrf5340/cpunet
- nice tests/bsim/bluetooth/compile.nrf5340bsim_nrf5340_cpunet.sh
- BOARD=nrf5340bsim/nrf5340/cpunet \
- RESULTS_FILE=${ZEPHYR_BASE}/${bsim_bt_53_test_results_file} \
- TESTS_FILE=tests/bsim/bluetooth/tests.nrf5340bsim_nrf5340_cpunet.txt \
- tests/bsim/run_parallel.sh
- # Build and run the nrf5340 split stack tests set
- nice tests/bsim/bluetooth/compile.nrf5340bsim_nrf5340_cpuapp.sh
- BOARD=nrf5340bsim/nrf5340/cpuapp \
- RESULTS_FILE=${ZEPHYR_BASE}/${bsim_bt_53split_test_results_file} \
- TESTS_FILE=tests/bsim/bluetooth/tests.nrf5340bsim_nrf5340_cpuapp.txt \
- tests/bsim/run_parallel.sh
+ tests/bsim/ci.bt.sh
- name: Run Networking Tests with BSIM
- if: steps.check-networking-files.outputs.any_changed == 'true' || steps.check-common-files.outputs.any_changed == 'true'
+ if: steps.check-networking-files.outputs.any_modified == 'true' || steps.check-common-files.outputs.any_modified == 'true'
run: |
- export ZEPHYR_BASE=${PWD}
- WORK_DIR=${ZEPHYR_BASE}/bsim_net nice tests/bsim/net/compile.sh
- RESULTS_FILE=${ZEPHYR_BASE}/${bsim_net_52_test_results_file} \
- SEARCH_PATH=tests/bsim/net/ tests/bsim/run_parallel.sh
+ tests/bsim/ci.net.sh
- name: Run UART Tests with BSIM
- if: steps.check-uart-files.outputs.any_changed == 'true' || steps.check-common-files.outputs.any_changed == 'true'
+ if: steps.check-uart-files.outputs.any_modified == 'true' || steps.check-common-files.outputs.any_modified == 'true'
run: |
- echo "UART: Single device tests"
- ./scripts/twister -T tests/drivers/uart/ --force-color --inline-logs -v -M -p nrf52_bsim \
- --fixture gpio_loopback -- -uart0_loopback
- echo "UART: Multi device tests"
- export ZEPHYR_BASE=${PWD}
- WORK_DIR=${ZEPHYR_BASE}/bsim_uart nice tests/bsim/drivers/uart/compile.sh
- RESULTS_FILE=${ZEPHYR_BASE}/${bsim_uart_test_results_file} \
- SEARCH_PATH=tests/bsim/drivers/uart/ tests/bsim/run_parallel.sh
-
- - name: Upload Test Results
+ tests/bsim/ci.uart.sh
+
+ - name: Merge Test Results
+ run: |
+ pip3 install junitparser junit2html
+ junitparser merge --glob "./bsim_*/*bsim_results.*.xml" "./twister-out/twister.xml" junit.xml
+ junit2html junit.xml junit.html
+
+ - name: Upload Unit Test Results in HTML
if: always()
uses: actions/upload-artifact@v4
with:
- name: bsim-test-results
+ name: HTML Unit Test Results
+ if-no-files-found: ignore
path: |
- ./bsim_bt/52_bsim_results.xml
- ./bsim_bt/53_bsim_results.xml
- ./bsim_bt/53_bsim_split_results.xml
- ./bsim_net/52_bsim_results.xml
- ./bsim_uart/uart_bsim_results.xml
- ./twister-out/twister.xml
- ./twister-out/twister.json
- ${{ github.event_path }}
- if-no-files-found: warn
+ junit.html
+
+ - name: Publish Unit Test Results
+ uses: EnricoMi/publish-unit-test-result-action@v2
+ with:
+ check_name: Bsim Test Results
+ files: "junit.xml"
+ comment_mode: off
- name: Upload Event Details
if: always()
diff --git a/.github/workflows/clang.yaml b/.github/workflows/clang.yaml
index 77a36a0c931..e88141a9e23 100644
--- a/.github/workflows/clang.yaml
+++ b/.github/workflows/clang.yaml
@@ -12,7 +12,7 @@ jobs:
runs-on:
group: zephyr-runner-v2-linux-x64-4xlarge
container:
- image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.11.20240324
+ image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.14.20240823
options: '--entrypoint /bin/bash'
strategy:
fail-fast: false
diff --git a/.github/workflows/codecov.yaml b/.github/workflows/codecov.yaml
index 19c50aba9b4..aa81056cdfd 100644
--- a/.github/workflows/codecov.yaml
+++ b/.github/workflows/codecov.yaml
@@ -14,7 +14,7 @@ jobs:
runs-on:
group: zephyr-runner-v2-linux-x64-4xlarge
container:
- image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.11.20240324
+ image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.14.20240823
options: '--entrypoint /bin/bash'
strategy:
fail-fast: false
diff --git a/.github/workflows/compliance.yml b/.github/workflows/compliance.yml
index 09c0767cd19..c8ffedcb77b 100644
--- a/.github/workflows/compliance.yml
+++ b/.github/workflows/compliance.yml
@@ -1,6 +1,12 @@
name: Compliance Checks
-on: pull_request
+on:
+ pull_request:
+ types:
+ - edited
+ - opened
+ - reopened
+ - synchronize
jobs:
check_compliance:
@@ -32,7 +38,7 @@ jobs:
run: |
pip3 install setuptools
pip3 install wheel
- pip3 install python-magic lxml junitparser gitlint pylint pykwalify yamllint
+ pip3 install python-magic lxml junitparser gitlint pylint pykwalify yamllint clang-format unidiff sphinx-lint
pip3 install west
- name: west setup
@@ -52,6 +58,14 @@ jobs:
west config manifest.group-filter -- +ci,-optional
west update -o=--depth=1 -n 2>&1 1> west.update.log || west update -o=--depth=1 -n 2>&1 1> west.update2.log
+ - name: Check for PR description
+ if: ${{ github.event.pull_request.body == '' }}
+ continue-on-error: true
+ id: pr_description
+ run: |
+ echo "Pull request description cannot be empty."
+ exit 1
+
- name: Run Compliance Tests
continue-on-error: true
id: compliance
@@ -80,19 +94,33 @@ jobs:
exit 1;
fi
+ warns=("ClangFormat")
files=($(./scripts/ci/check_compliance.py -l))
+
for file in "${files[@]}"; do
f="${file}.txt"
if [[ -s $f ]]; then
- errors=$(cat $f)
- errors="${errors//'%'/'%25'}"
- errors="${errors//$'\n'/'%0A'}"
- errors="${errors//$'\r'/'%0D'}"
- echo "::error file=${f}::$errors"
- exit=1
+ results=$(cat $f)
+ results="${results//'%'/'%25'}"
+ results="${results//$'\n'/'%0A'}"
+ results="${results//$'\r'/'%0D'}"
+
+ if [[ "${warns[@]}" =~ "${file}" ]]; then
+ echo "::warning file=${f}::$results"
+ else
+ echo "::error file=${f}::$results"
+ exit=1
+ fi
fi
done
if [ "${exit}" == "1" ]; then
+ echo "Compliance error, check for error messages in the \"Run Compliance Tests\" step"
+ echo "You can run this step locally with the ./scripts/ci/check_compliance.py script."
+ exit 1;
+ fi
+
+ if [ "${{ steps.pr_description.outcome }}" == "failure" ]; then
+ echo "PR description cannot be empty"
exit 1;
fi
diff --git a/.github/workflows/devicetree_checks.yml b/.github/workflows/devicetree_checks.yml
index d0bf180c0f3..060a5d95973 100644
--- a/.github/workflows/devicetree_checks.yml
+++ b/.github/workflows/devicetree_checks.yml
@@ -28,11 +28,6 @@ jobs:
matrix:
python-version: ['3.10', '3.11', '3.12']
os: [ubuntu-22.04, macos-14, windows-2022]
- exclude:
- - os: macos-14
- python-version: 3.6
- - os: windows-2022
- python-version: 3.6
steps:
- name: checkout
uses: actions/checkout@v4
diff --git a/.github/workflows/doc-build.yml b/.github/workflows/doc-build.yml
index e8d53405257..1872a824604 100644
--- a/.github/workflows/doc-build.yml
+++ b/.github/workflows/doc-build.yml
@@ -17,7 +17,10 @@ env:
# The latest CMake available directly with apt is 3.18, but we need >=3.20
# so we fetch that through pip.
CMAKE_VERSION: 3.20.5
- DOXYGEN_VERSION: 1.9.6
+ DOXYGEN_VERSION: 1.12.0
+ # Job count is set to 2 less than the vCPU count of 16 because the total available RAM is 32GiB
+ # and each sphinx-build process may use more than 2GiB of RAM.
+ JOB_COUNT: 14
jobs:
doc-file-check:
@@ -26,7 +29,7 @@ jobs:
if: >
github.repository_owner == 'zephyrproject-rtos'
outputs:
- file_check: ${{ steps.check-doc-files.outputs.any_changed }}
+ file_check: ${{ steps.check-doc-files.outputs.any_modified }}
steps:
- name: checkout
uses: actions/checkout@v4
@@ -34,7 +37,7 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- name: Check if Documentation related files changed
- uses: tj-actions/changed-files@v44
+ uses: tj-actions/changed-files@v45
id: check-doc-files
with:
files: |
@@ -50,6 +53,8 @@ jobs:
scripts/dts/
doc/requirements.txt
.github/workflows/doc-build.yml
+ scripts/pylib/pytest-twister-harness/src/twister_harness/device/device_adapter.py
+ scripts/pylib/pytest-twister-harness/src/twister_harness/helpers/shell.py
doc-build-html:
name: "Documentation Build (HTML)"
@@ -57,20 +62,13 @@ jobs:
if: >
github.repository_owner == 'zephyrproject-rtos' &&
( needs.doc-file-check.outputs.file_check == 'true' || github.event_name != 'pull_request' )
- runs-on:
- group: zephyr-runner-v2-linux-x64-4xlarge
+ runs-on: ubuntu-22.04
timeout-minutes: 90
concurrency:
group: doc-build-html-${{ github.ref }}
cancel-in-progress: true
steps:
- - name: Print cloud service information
- run: |
- echo "ZEPHYR_RUNNER_CLOUD_PROVIDER = ${ZEPHYR_RUNNER_CLOUD_PROVIDER}"
- echo "ZEPHYR_RUNNER_CLOUD_NODE = ${ZEPHYR_RUNNER_CLOUD_NODE}"
- echo "ZEPHYR_RUNNER_CLOUD_POD = ${ZEPHYR_RUNNER_CLOUD_POD}"
-
- name: install-pkgs
run: |
sudo apt-get update
@@ -130,7 +128,11 @@ jobs:
else
DOC_TARGET="html"
fi
- DOC_TAG=${DOC_TAG} SPHINXOPTS_EXTRA="-q -t publish" make -C doc ${DOC_TARGET}
+
+ DOC_TAG=${DOC_TAG} \
+ SPHINXOPTS="-j ${JOB_COUNT} -W --keep-going -T" \
+ SPHINXOPTS_EXTRA="-q -t publish" \
+ make -C doc ${DOC_TARGET}
# API documentation coverage
python3 -m coverxygen --xml-dir doc/_build/html/doxygen/xml/ --src-dir include/ --output doc-coverage.info
@@ -140,9 +142,9 @@ jobs:
- name: compress-docs
run: |
- tar cfJ html-output.tar.xz --directory=doc/_build html
- tar cfJ api-output.tar.xz --directory=doc/_build html/doxygen/html
- tar cfJ api-coverage.tar.xz coverage-report
+ tar --use-compress-program="xz -T0" -cf html-output.tar.xz --directory=doc/_build html
+ tar --use-compress-program="xz -T0" -cf api-output.tar.xz --directory=doc/_build html/doxygen/html
+ tar --use-compress-program="xz -T0" -cf api-coverage.tar.xz coverage-report
- name: upload-build
uses: actions/upload-artifact@v4
@@ -183,8 +185,7 @@ jobs:
if: |
github.event_name != 'pull_request' &&
github.repository_owner == 'zephyrproject-rtos'
- runs-on:
- group: zephyr-runner-v2-linux-x64-4xlarge
+ runs-on: ubuntu-22.04
container: texlive/texlive:latest
timeout-minutes: 120
concurrency:
@@ -196,19 +197,13 @@ jobs:
run: |
git config --global --add safe.directory ${GITHUB_WORKSPACE}
- - name: Print cloud service information
- run: |
- echo "ZEPHYR_RUNNER_CLOUD_PROVIDER = ${ZEPHYR_RUNNER_CLOUD_PROVIDER}"
- echo "ZEPHYR_RUNNER_CLOUD_NODE = ${ZEPHYR_RUNNER_CLOUD_NODE}"
- echo "ZEPHYR_RUNNER_CLOUD_POD = ${ZEPHYR_RUNNER_CLOUD_POD}"
-
- name: checkout
uses: actions/checkout@v4
- name: install-pkgs
run: |
apt-get update
- apt-get install -y python3-pip python3-venv ninja-build doxygen graphviz librsvg2-bin
+ apt-get install -y python3-pip python3-venv ninja-build doxygen graphviz librsvg2-bin imagemagick
- name: cache-pip
uses: actions/cache@v4
@@ -243,7 +238,10 @@ jobs:
DOC_TAG="development"
fi
- DOC_TAG=${DOC_TAG} SPHINXOPTS="-q -j auto" LATEXMKOPTS="-quiet -halt-on-error" make -C doc pdf
+ DOC_TAG=${DOC_TAG} \
+ SPHINXOPTS="-q -j ${JOB_COUNT}" \
+ LATEXMKOPTS="-quiet -halt-on-error" \
+ make -C doc pdf
- name: upload-build
if: always()
diff --git a/.github/workflows/doc-publish-pr.yml b/.github/workflows/doc-publish-pr.yml
index e35c6d65120..b202b0a790a 100644
--- a/.github/workflows/doc-publish-pr.yml
+++ b/.github/workflows/doc-publish-pr.yml
@@ -21,7 +21,7 @@ jobs:
steps:
- name: Download artifacts
- uses: dawidd6/action-download-artifact@v3
+ uses: dawidd6/action-download-artifact@v6
with:
workflow: doc-build.yml
run_id: ${{ github.event.workflow_run.id }}
diff --git a/.github/workflows/doc-publish.yml b/.github/workflows/doc-publish.yml
index 869cfceaad2..51c451c75ea 100644
--- a/.github/workflows/doc-publish.yml
+++ b/.github/workflows/doc-publish.yml
@@ -24,7 +24,7 @@ jobs:
steps:
- name: Download artifacts
- uses: dawidd6/action-download-artifact@v3
+ uses: dawidd6/action-download-artifact@v6
with:
workflow: doc-build.yml
run_id: ${{ github.event.workflow_run.id }}
diff --git a/.github/workflows/errno.yml b/.github/workflows/errno.yml
index 593fe916deb..b1f7e6f4e62 100644
--- a/.github/workflows/errno.yml
+++ b/.github/workflows/errno.yml
@@ -10,7 +10,7 @@ jobs:
check-errno:
runs-on: ubuntu-22.04
container:
- image: ghcr.io/zephyrproject-rtos/ci:v0.26.11
+ image: ghcr.io/zephyrproject-rtos/ci:v0.26.13
steps:
- name: Apply container owner mismatch workaround
diff --git a/.github/workflows/footprint-tracking.yml b/.github/workflows/footprint-tracking.yml
index ede9f52602e..08b858af477 100644
--- a/.github/workflows/footprint-tracking.yml
+++ b/.github/workflows/footprint-tracking.yml
@@ -26,7 +26,7 @@ jobs:
group: zephyr-runner-v2-linux-x64-4xlarge
if: github.repository_owner == 'zephyrproject-rtos'
container:
- image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.11.20240324
+ image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.14.20240823
options: '--entrypoint /bin/bash'
strategy:
fail-fast: false
diff --git a/.github/workflows/hello_world_multiplatform.yaml b/.github/workflows/hello_world_multiplatform.yaml
index bcd11ee3ecc..08fd42d7866 100644
--- a/.github/workflows/hello_world_multiplatform.yaml
+++ b/.github/workflows/hello_world_multiplatform.yaml
@@ -26,7 +26,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- os: [ubuntu-22.04, macos-12, macos-14, windows-2022]
+ os: [ubuntu-22.04, ubuntu-24.04, macos-13, macos-14, windows-2022]
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
diff --git a/.github/workflows/manifest.yml b/.github/workflows/manifest.yml
index 040ec957e39..76b80f66504 100644
--- a/.github/workflows/manifest.yml
+++ b/.github/workflows/manifest.yml
@@ -26,12 +26,13 @@ jobs:
west init -l . || true
- name: Manifest
- uses: zephyrproject-rtos/action-manifest@v1.2.2
+ uses: zephyrproject-rtos/action-manifest@v1.3.1
with:
github-token: ${{ secrets.ZB_GITHUB_TOKEN }}
manifest-path: 'west.yml'
checkout-path: 'zephyrproject/zephyr'
use-tree-checkout: 'true'
+ check-impostor-commits: 'true'
label-prefix: 'manifest-'
verbosity-level: '1'
labels: 'manifest'
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index ccecd17df80..002485b9d80 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -21,7 +21,7 @@ jobs:
echo "TRIMMED_VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_OUTPUT
- name: REUSE Compliance Check
- uses: fsfe/reuse-action@v1
+ uses: fsfe/reuse-action@v4
with:
args: spdx -o zephyr-${{ steps.get_version.outputs.VERSION }}.spdx
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
new file mode 100644
index 00000000000..e2325da0a4a
--- /dev/null
+++ b/.github/workflows/scorecards.yml
@@ -0,0 +1,61 @@
+# This workflow uses actions that are not certified by GitHub. They are provided
+# by a third-party and are governed by separate terms of service, privacy
+# policy, and support documentation.
+
+name: Scorecards supply-chain security
+on:
+ # For Branch-Protection check. Only the default branch is supported. See
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
+ branch_protection_rule:
+ # To guarantee Maintained check is occasionally updated. See
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
+ schedule:
+ - cron: '43 7 * * 6'
+ push:
+ branches:
+ - main
+
+permissions: read-all
+
+jobs:
+ analysis:
+ name: Scorecard analysis
+ runs-on: ubuntu-latest
+ permissions:
+ # Needed for Code scanning upload
+ security-events: write
+ # Needed for GitHub OIDC token if publish_results is true
+ id-token: write
+
+ steps:
+ - name: "Checkout code"
+ uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ with:
+ persist-credentials: false
+
+ - name: "Run analysis"
+ uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0
+ with:
+ results_file: results.sarif
+ results_format: sarif
+ # Publish results to OpenSSF REST API for easy access by consumers.
+ # - Allows the repository to include the Scorecard badge.
+ # - See https://github.com/ossf/scorecard-action#publishing-results.
+ publish_results: true
+
+ # Upload the results as artifacts (optional). Commenting out will disable
+ # uploads of run results in SARIF format to the repository Actions tab.
+ # https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts
+ - name: "Upload artifact"
+ uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
+ with:
+ name: SARIF file
+ path: results.sarif
+ retention-days: 5
+
+ # Upload the results to GitHub's code scanning dashboard (optional).
+ # Commenting out will disable upload of results to your repo's Code Scanning dashboard
+ - name: "Upload to code-scanning"
+ uses: github/codeql-action/upload-sarif@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15
+ with:
+ sarif_file: results.sarif
diff --git a/.github/workflows/stale_issue.yml b/.github/workflows/stale_issue.yml
index 8dc31370125..f71e485ff8e 100644
--- a/.github/workflows/stale_issue.yml
+++ b/.github/workflows/stale_issue.yml
@@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-22.04
if: github.repository == 'zephyrproject-rtos/zephyr'
steps:
- - uses: actions/stale@v8
+ - uses: actions/stale@v9
with:
stale-pr-message: 'This pull request has been marked as stale because it has been open (more
than) 60 days with no activity. Remove the stale label or add a comment saying that you
diff --git a/.github/workflows/twister.yaml b/.github/workflows/twister.yaml
index 40f9fe04c2d..cc3c9e3ffdf 100644
--- a/.github/workflows/twister.yaml
+++ b/.github/workflows/twister.yaml
@@ -25,7 +25,7 @@ jobs:
runs-on:
group: zephyr-runner-v2-linux-x64-4xlarge
container:
- image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.11.20240324
+ image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.14.20240823
options: '--entrypoint /bin/bash'
outputs:
subset: ${{ steps.output-services.outputs.subset }}
@@ -129,7 +129,7 @@ jobs:
needs: twister-build-prep
if: needs.twister-build-prep.outputs.size != 0
container:
- image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.11.20240324
+ image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.14.20240823
options: '--entrypoint /bin/bash'
strategy:
fail-fast: false
@@ -310,7 +310,7 @@ jobs:
if: success() || failure()
steps:
- # Needed for opensearch and upload script
+ # Needed for elasticearch and upload script
- if: github.event_name == 'push' || github.event_name == 'schedule'
name: Checkout
uses: actions/checkout@v4
@@ -324,7 +324,7 @@ jobs:
path: artifacts
- if: github.event_name == 'push' || github.event_name == 'schedule'
- name: Upload to opensearch
+ name: Upload to elasticsearch
run: |
pip3 install elasticsearch
# set run date on upload to get consistent and unified data across the matrix.
diff --git a/.github/workflows/twister_tests_blackbox.yml b/.github/workflows/twister_tests_blackbox.yml
index edec21e28d3..8845c7eddd4 100644
--- a/.github/workflows/twister_tests_blackbox.yml
+++ b/.github/workflows/twister_tests_blackbox.yml
@@ -24,7 +24,7 @@ jobs:
python-version: ['3.10', '3.11', '3.12']
os: [ubuntu-22.04]
container:
- image: ghcr.io/zephyrproject-rtos/ci:v0.26.11
+ image: ghcr.io/zephyrproject-rtos/ci:v0.26.13
steps:
- name: Apply Container Owner Mismatch Workaround
@@ -43,6 +43,8 @@ jobs:
echo "$HOME/.local/bin" >> $GITHUB_PATH
west init -l . || true
+ # we do not depend on any hals, tools or bootloader, save some time and space...
+ west config manifest.group-filter -- -hal,-tools,-bootloader
west config --global update.narrow true
west update --path-cache /github/cache/zephyrproject 2>&1 1> west.update.log || west update --path-cache /github/cache/zephyrproject 2>&1 1> west.update.log || ( rm -rf ../modules ../bootloader ../tools && west update --path-cache /github/cache/zephyrproject)
west forall -c 'git reset --hard HEAD'
diff --git a/.github/workflows/west_cmds.yml b/.github/workflows/west_cmds.yml
index f3de00d82bb..d910bf15a29 100644
--- a/.github/workflows/west_cmds.yml
+++ b/.github/workflows/west_cmds.yml
@@ -31,11 +31,6 @@ jobs:
matrix:
python-version: ['3.10', '3.11', '3.12']
os: [ubuntu-22.04, macos-14, windows-2022]
- exclude:
- - os: macos-14
- python-version: 3.6
- - os: windows-2022
- python-version: 3.6
steps:
- name: checkout
uses: actions/checkout@v4
diff --git a/.gitignore b/.gitignore
index 15636eaaeac..266b95a424a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,8 +7,10 @@
*.swp
*.swo
*~
-.\#*
+
+# Emacs
\#*\#
+
build*/
!doc/build/
!scripts/build
@@ -27,6 +29,8 @@ outdir
outdir-*
scripts/basic/fixdep
scripts/gen_idt/gen_idt
+coverage-report
+doc-coverage.info
doc/_build
doc/doxygen
doc/xml
@@ -39,6 +43,7 @@ sanity-out*
twister-out*
bsim_out
bsim_bt_out
+myresults.xml
tests/RunResults.xml
scripts/grub
doc/reference/kconfig/*.rst
@@ -52,6 +57,7 @@ venv
.venv
.DS_Store
.clangd
+new.info
# CI output
compliance.xml
@@ -70,6 +76,7 @@ tags
BinaryFiles.txt
BoardYml.txt
Checkpatch.txt
+ClangFormat.txt
DevicetreeBindings.txt
GitDiffCheck.txt
Gitlint.txt
@@ -84,4 +91,5 @@ MaintainersFormat.txt
ModulesMaintainers.txt
Nits.txt
Pylint.txt
+SphinxLint.txt
YAMLLint.txt
diff --git a/.mailmap b/.mailmap
index bbf2b53318f..050d05cce9f 100644
--- a/.mailmap
+++ b/.mailmap
@@ -61,6 +61,7 @@ Lixin Guo
Łukasz Mazur
Manuel Argüelles
Manuel Argüelles
+Manuel Argüelles
Marc Herbert <46978960+marc-hb@users.noreply.github.com>
Marin Jurjević
Mariusz Ryndzionek
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f87a7d8553d..952acccbba8 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -109,6 +109,10 @@ add_library(zephyr_interface INTERFACE)
# flags that come with zephyr_interface.
zephyr_library_named(zephyr)
+if(CONFIG_LEGACY_GENERATED_INCLUDE_PATH)
+ zephyr_include_directories(${PROJECT_BINARY_DIR}/include/generated/zephyr)
+endif()
+
zephyr_include_directories(
include
${PROJECT_BINARY_DIR}/include/generated
@@ -188,6 +192,7 @@ get_property(OPTIMIZE_FOR_NO_OPTIMIZATIONS_FLAG TARGET compiler PROPERTY no_opti
get_property(OPTIMIZE_FOR_DEBUG_FLAG TARGET compiler PROPERTY optimization_debug)
get_property(OPTIMIZE_FOR_SPEED_FLAG TARGET compiler PROPERTY optimization_speed)
get_property(OPTIMIZE_FOR_SIZE_FLAG TARGET compiler PROPERTY optimization_size)
+get_property(OPTIMIZE_FOR_SIZE_AGGRESSIVE_FLAG TARGET compiler PROPERTY optimization_size_aggressive)
# From kconfig choice, pick the actual OPTIMIZATION_FLAG to use.
# Kconfig choice ensures only one of these CONFIG_*_OPTIMIZATIONS is set.
@@ -199,6 +204,8 @@ elseif(CONFIG_SPEED_OPTIMIZATIONS)
set(OPTIMIZATION_FLAG ${OPTIMIZE_FOR_SPEED_FLAG})
elseif(CONFIG_SIZE_OPTIMIZATIONS)
set(OPTIMIZATION_FLAG ${OPTIMIZE_FOR_SIZE_FLAG}) # Default in kconfig
+elseif(CONFIG_SIZE_OPTIMIZATIONS_AGGRESSIVE)
+ set(OPTIMIZATION_FLAG ${OPTIMIZE_FOR_SIZE_AGGRESSIVE_FLAG})
else()
message(FATAL_ERROR
"Unreachable code. Expected optimization level to have been chosen. See Kconfig.zephyr")
@@ -353,8 +360,21 @@ zephyr_compile_options(
$<$:-D_ASMLANGUAGE>
)
-# @Intent: Set fundamental linker specific flags
-toolchain_ld_base()
+find_package(Deprecated COMPONENTS toolchain_ld_base)
+
+if(DEFINED TOOLCHAIN_LD_FLAGS)
+ zephyr_ld_options(${TOOLCHAIN_LD_FLAGS})
+endif()
+
+zephyr_link_libraries(PROPERTY base)
+
+zephyr_link_libraries_ifndef(CONFIG_LINKER_USE_RELAX PROPERTY no_relax)
+
+zephyr_link_libraries_ifdef(CONFIG_LINKER_USE_RELAX PROPERTY relax)
+
+# Sort the common symbols and each input section by alignment
+# in descending order to minimize padding between these symbols.
+zephyr_link_libraries_ifdef(CONFIG_LINKER_SORT_BY_ALIGNMENT PROPERTY sort_alignment)
toolchain_ld_force_undefined_symbols(
_OffsetAbsSyms
@@ -362,13 +382,37 @@ toolchain_ld_force_undefined_symbols(
)
if(NOT CONFIG_NATIVE_BUILD)
- # @Intent: Set linker specific flags for bare metal target
- toolchain_ld_baremetal()
+ find_package(Deprecated COMPONENTS toolchain_ld_baremetal)
+
+ zephyr_link_libraries(PROPERTY baremetal)
+
+ # Note that some architectures will skip this flag if set to error, even
+ # though the compiler flag check passes (e.g. ARC and Xtensa). So warning
+ # should be the default for now.
+ #
+ # Skip this for native application as Zephyr only provides
+ # additions to the host toolchain linker script. The relocation
+ # sections (.rel*) requires us to override those provided
+ # by host toolchain. As we can't account for all possible
+ # combination of compiler and linker on all machines used
+ # for development, it is better to turn this off.
+ #
+ # CONFIG_LINKER_ORPHAN_SECTION_PLACE is to place the orphan sections
+ # without any warnings or errors, which is the default behavior.
+ # So there is no need to explicitly set a linker flag.
+ if(CONFIG_LINKER_ORPHAN_SECTION_WARN)
+ zephyr_link_libraries(PROPERTY orphan_warning)
+ elseif(CONFIG_LINKER_ORPHAN_SECTION_ERROR)
+ zephyr_link_libraries(PROPERTY orphan_error)
+ endif()
endif()
-if(CONFIG_CPP AND NOT CONFIG_MINIMAL_LIBCPP AND NOT CONFIG_NATIVE_LIBRARY)
- # @Intent: Set linker specific flags for C++
- toolchain_ld_cpp()
+if(CONFIG_CPP)
+ if(NOT CONFIG_MINIMAL_LIBCPP AND NOT CONFIG_NATIVE_LIBRARY)
+ find_package(Deprecated COMPONENTS toolchain_ld_cpp)
+ endif()
+
+ zephyr_link_libraries(PROPERTY cpp_base)
endif()
# @Intent: Add the basic toolchain warning flags
@@ -541,9 +585,9 @@ if(ZEPHYR_GIT_INDEX)
endif()
add_custom_command(
- OUTPUT ${PROJECT_BINARY_DIR}/include/generated/version.h
+ OUTPUT ${PROJECT_BINARY_DIR}/include/generated/zephyr/version.h
COMMAND ${CMAKE_COMMAND} -DZEPHYR_BASE=${ZEPHYR_BASE}
- -DOUT_FILE=${PROJECT_BINARY_DIR}/include/generated/version.h
+ -DOUT_FILE=${PROJECT_BINARY_DIR}/include/generated/zephyr/version.h
-DVERSION_TYPE=KERNEL
-DVERSION_FILE=${ZEPHYR_BASE}/VERSION
-DKERNEL_VERSION_CUSTOMIZATION="$"
@@ -552,13 +596,13 @@ add_custom_command(
DEPENDS ${ZEPHYR_BASE}/VERSION ${git_dependency}
COMMAND_EXPAND_LISTS
)
-add_custom_target(version_h DEPENDS ${PROJECT_BINARY_DIR}/include/generated/version.h)
+add_custom_target(version_h DEPENDS ${PROJECT_BINARY_DIR}/include/generated/zephyr/version.h)
if(EXISTS ${APPLICATION_SOURCE_DIR}/VERSION)
add_custom_command(
- OUTPUT ${PROJECT_BINARY_DIR}/include/generated/app_version.h
+ OUTPUT ${PROJECT_BINARY_DIR}/include/generated/zephyr/app_version.h
COMMAND ${CMAKE_COMMAND} -DZEPHYR_BASE=${ZEPHYR_BASE}
- -DOUT_FILE=${PROJECT_BINARY_DIR}/include/generated/app_version.h
+ -DOUT_FILE=${PROJECT_BINARY_DIR}/include/generated/zephyr/app_version.h
-DVERSION_TYPE=APP
-DVERSION_FILE=${APPLICATION_SOURCE_DIR}/VERSION
-DAPP_VERSION_CUSTOMIZATION="$"
@@ -567,7 +611,9 @@ if(EXISTS ${APPLICATION_SOURCE_DIR}/VERSION)
DEPENDS ${APPLICATION_SOURCE_DIR}/VERSION ${git_dependency}
COMMAND_EXPAND_LISTS
)
- add_custom_target(app_version_h DEPENDS ${PROJECT_BINARY_DIR}/include/generated/app_version.h)
+ add_custom_target(
+ app_version_h
+ DEPENDS ${PROJECT_BINARY_DIR}/include/generated/zephyr/app_version.h)
add_dependencies(zephyr_interface app_version_h)
endif()
@@ -622,8 +668,8 @@ set(ZEPHYR_CURRENT_CMAKE_DIR)
get_property(LIBC_LINK_LIBRARIES TARGET zephyr_interface PROPERTY LIBC_LINK_LIBRARIES)
zephyr_link_libraries(${LIBC_LINK_LIBRARIES})
-set(syscall_list_h ${CMAKE_CURRENT_BINARY_DIR}/include/generated/syscall_list.h)
-set(edk_syscall_list_h ${CMAKE_CURRENT_BINARY_DIR}/edk/include/generated/syscall_list.h)
+set(syscall_list_h ${CMAKE_CURRENT_BINARY_DIR}/include/generated/zephyr/syscall_list.h)
+set(edk_syscall_list_h ${CMAKE_CURRENT_BINARY_DIR}/edk/include/generated/zephyr/syscall_list.h)
set(syscalls_json ${CMAKE_CURRENT_BINARY_DIR}/misc/generated/syscalls.json)
set(struct_tags_json ${CMAKE_CURRENT_BINARY_DIR}/misc/generated/struct_tags.json)
@@ -761,7 +807,7 @@ add_custom_target(${SYSCALL_LIST_H_TARGET} DEPENDS ${syscall_list_h} ${picolibc_
set_property(TARGET ${SYSCALL_LIST_H_TARGET}
APPEND PROPERTY
ADDITIONAL_CLEAN_FILES
- ${CMAKE_CURRENT_BINARY_DIR}/include/generated/syscalls
+ ${CMAKE_CURRENT_BINARY_DIR}/include/generated/zephyr/syscalls
)
add_custom_target(${PARSE_SYSCALLS_TARGET}
@@ -781,19 +827,30 @@ if(CONFIG_TIMEOUT_64BIT)
set(SYSCALL_SPLIT_TIMEOUT_ARG --split-type k_timeout_t --split-type k_ticks_t)
endif()
-add_custom_command(OUTPUT include/generated/syscall_dispatch.c ${syscall_list_h}
- # Also, some files are written to include/generated/syscalls/
+# percepio/TraceRecorder/kernelports/Zephyr/scripts/tz_parse_syscalls.py hardcodes the path
+# to the `syscall_list.h`, make a copy of the generated file so that percepio is able to build
+if(CONFIG_LEGACY_GENERATED_INCLUDE_PATH)
+ set(LEGACY_SYSCALL_LIST_H_ARGS
+ ${CMAKE_COMMAND} -E copy
+ ${syscall_list_h}
+ ${CMAKE_CURRENT_BINARY_DIR}/include/generated/syscall_list.h)
+endif()
+
+add_custom_command(OUTPUT include/generated/zephyr/syscall_dispatch.c ${syscall_list_h}
+ # Also, some files are written to include/generated/zephyr/syscalls/
COMMAND
${PYTHON_EXECUTABLE}
${ZEPHYR_BASE}/scripts/build/gen_syscalls.py
--json-file ${syscalls_json} # Read this file
- --base-output include/generated/syscalls # Write to this dir
- --syscall-dispatch include/generated/syscall_dispatch.c # Write this file
- --syscall-export-llext include/generated/syscall_export_llext.c
+ --base-output include/generated/zephyr/syscalls # Write to this dir
+ --syscall-dispatch include/generated/zephyr/syscall_dispatch.c # Write this file
+ --syscall-export-llext include/generated/zephyr/syscall_export_llext.c
--syscall-list ${syscall_list_h}
$<$:--gen-mrsh-files>
${SYSCALL_LONG_REGISTERS_ARG}
${SYSCALL_SPLIT_TIMEOUT_ARG}
+ COMMAND
+ ${LEGACY_SYSCALL_LIST_H_ARGS}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS ${PARSE_SYSCALLS_TARGET}
)
@@ -801,7 +858,7 @@ add_custom_command(OUTPUT include/generated/syscall_dispatch.c ${syscall_list_h}
# This is passed into all calls to the gen_kobject_list.py script.
set(gen_kobject_list_include_args --include-subsystem-list ${struct_tags_json})
-set(DRV_VALIDATION ${PROJECT_BINARY_DIR}/include/generated/driver-validation.h)
+set(DRV_VALIDATION ${PROJECT_BINARY_DIR}/include/generated/zephyr/driver-validation.h)
add_custom_command(
OUTPUT ${DRV_VALIDATION}
COMMAND
@@ -834,7 +891,7 @@ add_dependencies(zephyr_generated_headers
set(OFFSETS_LIB offsets)
set(OFFSETS_C_PATH ${ARCH_DIR}/${ARCH}/core/offsets/offsets.c)
-set(OFFSETS_H_PATH ${PROJECT_BINARY_DIR}/include/generated/offsets.h)
+set(OFFSETS_H_PATH ${PROJECT_BINARY_DIR}/include/generated/zephyr/offsets.h)
add_library( ${OFFSETS_LIB} OBJECT ${OFFSETS_C_PATH})
target_include_directories(${OFFSETS_LIB} PRIVATE
@@ -1197,7 +1254,7 @@ if(CONFIG_USERSPACE)
PUBLIC $
)
- set(KOBJECT_LINKER_HEADER_DATA "${PROJECT_BINARY_DIR}/include/generated/linker-kobject-prebuilt-data.h")
+ set(KOBJECT_LINKER_HEADER_DATA "${PROJECT_BINARY_DIR}/include/generated/zephyr/linker-kobject-prebuilt-data.h")
add_custom_command(
OUTPUT ${KOBJECT_LINKER_HEADER_DATA}
@@ -1205,7 +1262,7 @@ if(CONFIG_USERSPACE)
${PYTHON_EXECUTABLE}
${ZEPHYR_BASE}/scripts/build/gen_kobject_placeholders.py
--object $
- --outdir ${PROJECT_BINARY_DIR}/include/generated
+ --outdir ${PROJECT_BINARY_DIR}/include/generated/zephyr
--datapct ${CONFIG_KOBJECT_DATA_AREA_RESERVE_EXTRA_PERCENT}
--rodata ${CONFIG_KOBJECT_RODATA_AREA_EXTRA_BYTES}
$<$:--verbose>
@@ -1297,6 +1354,20 @@ if(CONFIG_GEN_ISR_TABLES)
set_property(GLOBAL APPEND PROPERTY GENERATED_KERNEL_SOURCE_FILES isr_tables.c)
endif()
+if(CONFIG_SYMTAB)
+ add_custom_command(
+ OUTPUT symtab.c
+ COMMAND
+ ${PYTHON_EXECUTABLE}
+ ${ZEPHYR_BASE}/scripts/build/gen_symtab.py
+ -k $
+ -o symtab.c
+ DEPENDS ${ZEPHYR_LINK_STAGE_EXECUTABLE}
+ COMMAND_EXPAND_LISTS
+ )
+ set_property(GLOBAL APPEND PROPERTY GENERATED_KERNEL_SOURCE_FILES symtab.c)
+endif()
+
if(CONFIG_USERSPACE)
set(KOBJECT_HASH_LIST kobject_hash.gperf)
set(KOBJECT_HASH_OUTPUT_SRC_PRE kobject_hash_preprocessed.c)
@@ -1679,9 +1750,8 @@ if(CONFIG_BUILD_OUTPUT_BIN AND CONFIG_BUILD_OUTPUT_UF2)
set(BYPRODUCT_KERNEL_UF2_NAME "${PROJECT_BINARY_DIR}/${KERNEL_UF2_NAME}" CACHE FILEPATH "Kernel uf2 file" FORCE)
endif()
+set(KERNEL_META_PATH ${PROJECT_BINARY_DIR}/${KERNEL_META_NAME} CACHE INTERNAL "")
if(CONFIG_BUILD_OUTPUT_META)
- set(KERNEL_META_PATH ${PROJECT_BINARY_DIR}/${KERNEL_META_NAME} CACHE INTERNAL "")
-
list(APPEND
post_build_commands
COMMAND ${PYTHON_EXECUTABLE} ${ZEPHYR_BASE}/scripts/zephyr_module.py
@@ -1695,6 +1765,9 @@ if(CONFIG_BUILD_OUTPUT_META)
post_build_byproducts
${KERNEL_META_PATH}
)
+else(CONFIG_BUILD_OUTPUT_META)
+ # Prevent spdx to use invalid data
+ file(REMOVE ${KERNEL_META_PATH})
endif()
# Cleanup intermediate files
@@ -1855,6 +1928,20 @@ if(CONFIG_BUILD_OUTPUT_INFO_HEADER)
)
endif()
+if (CONFIG_LLEXT AND CONFIG_LLEXT_EXPORT_BUILTINS_BY_SLID)
+ #slidgen must be the first post-build command to be executed
+ #on the Zephyr ELF to ensure that all other commands, such as
+ #binary file generation, are operating on a preparated ELF.
+ list(PREPEND
+ post_build_commands
+ COMMAND ${PYTHON_EXECUTABLE}
+ ${ZEPHYR_BASE}/scripts/build/llext_prepare_exptab.py
+ --elf-file ${PROJECT_BINARY_DIR}/${KERNEL_ELF_NAME}
+ --slid-listing ${PROJECT_BINARY_DIR}/slid_listing.txt
+ )
+
+endif()
+
if(NOT CMAKE_C_COMPILER_ID STREQUAL "ARMClang")
set(check_init_priorities_input
$,${BYPRODUCT_KERNEL_EXE_NAME},${BYPRODUCT_KERNEL_ELF_NAME}>
@@ -1941,22 +2028,39 @@ elseif(CONFIG_LOG_MIPI_SYST_USE_CATALOG)
endif()
if(LOG_DICT_DB_NAME_ARG)
- if (NOT CONFIG_LOG_DICTIONARY_DB_TARGET)
- set(LOG_DICT_DB_ALL_TARGET ALL)
- endif()
- add_custom_command(
- OUTPUT ${LOG_DICT_DB_NAME}
- COMMAND
+ set(log_dict_gen_command
${PYTHON_EXECUTABLE}
${ZEPHYR_BASE}/scripts/logging/dictionary/database_gen.py
${KERNEL_ELF_NAME}
${LOG_DICT_DB_NAME_ARG}=${LOG_DICT_DB_NAME}
- --build-header ${PROJECT_BINARY_DIR}/include/generated/version.h
- WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
- COMMENT "Generating logging dictionary database: ${LOG_DICT_DB_NAME}"
- DEPENDS ${logical_target_for_zephyr_elf}
+ --build-header ${PROJECT_BINARY_DIR}/include/generated/zephyr/version.h
)
- add_custom_target(log_dict_db_gen ${LOG_DICT_DB_ALL_TARGET} DEPENDS ${LOG_DICT_DB_NAME})
+
+ if (NOT CONFIG_LOG_DICTIONARY_DB_TARGET)
+ # If not using a separate target for generating logging dictionary
+ # database, add the generation to post build command to make sure
+ # the database is actually being generated.
+ list(APPEND
+ post_build_commands
+ COMMAND ${CMAKE_COMMAND} -E echo "Generating logging dictionary database: ${LOG_DICT_DB_NAME}"
+ COMMAND ${log_dict_gen_command}
+ )
+ list(APPEND
+ post_build_byproducts
+ ${LOG_DICT_DB_NAME}
+ )
+ else()
+ # Seprate build target for generating logging dictionary database.
+ # This needs to be explicitly called/used to generate the database.
+ add_custom_command(
+ OUTPUT ${LOG_DICT_DB_NAME}
+ COMMAND ${log_dict_gen_command}
+ WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
+ COMMENT "Generating logging dictionary database: ${LOG_DICT_DB_NAME}"
+ DEPENDS ${logical_target_for_zephyr_elf}
+ )
+ add_custom_target(log_dict_db_gen DEPENDS ${LOG_DICT_DB_NAME})
+ endif()
endif()
# Add post_build_commands to post-process the final .elf file produced by
@@ -2069,12 +2173,15 @@ endif()
set(llext_edk_file ${PROJECT_BINARY_DIR}/${CONFIG_LLEXT_EDK_NAME}.tar.xz)
# TODO maybe generate flags for C CXX ASM
+zephyr_get_compile_definitions_for_lang(C zephyr_defs)
zephyr_get_compile_options_for_lang(C zephyr_flags)
# Filter out non LLEXT and LLEXT_EDK flags - and add required ones
-llext_filter_zephyr_flags(LLEXT_REMOVE_FLAGS ${zephyr_flags} llext_edk_cflags)
-llext_filter_zephyr_flags(LLEXT_EDK_REMOVE_FLAGS ${llext_edk_cflags} llext_edk_cflags)
+llext_filter_zephyr_flags(LLEXT_REMOVE_FLAGS ${zephyr_flags} llext_filt_flags)
+llext_filter_zephyr_flags(LLEXT_EDK_REMOVE_FLAGS ${llext_filt_flags} llext_filt_flags)
+set(llext_edk_cflags ${zephyr_defs} -DLL_EXTENSION_BUILD)
+list(APPEND llext_edk_cflags ${llext_filt_flags})
list(APPEND llext_edk_cflags ${LLEXT_APPEND_FLAGS})
list(APPEND llext_edk_cflags ${LLEXT_EDK_APPEND_FLAGS})
@@ -2082,13 +2189,13 @@ add_custom_command(
OUTPUT ${llext_edk_file}
# Regenerate syscalls in case CONFIG_LLEXT_EDK_USERSPACE_ONLY
COMMAND ${CMAKE_COMMAND}
- -E make_directory edk/include/generated
+ -E make_directory edk/include/generated/zephyr
COMMAND
${PYTHON_EXECUTABLE}
${ZEPHYR_BASE}/scripts/build/gen_syscalls.py
--json-file ${syscalls_json} # Read this file
- --base-output edk/include/generated/syscalls # Write to this dir
- --syscall-dispatch edk/include/generated/syscall_dispatch.c # Write this file
+ --base-output edk/include/generated/zephyr/syscalls # Write to this dir
+ --syscall-dispatch edk/include/generated/zephyr/syscall_dispatch.c # Write this file
--syscall-list ${edk_syscall_list_h}
$<$:--userspace-only>
${SYSCALL_LONG_REGISTERS_ARG}
@@ -2096,10 +2203,9 @@ add_custom_command(
COMMAND ${CMAKE_COMMAND}
-DPROJECT_BINARY_DIR=${PROJECT_BINARY_DIR}
-DAPPLICATION_SOURCE_DIR=${APPLICATION_SOURCE_DIR}
- -DINTERFACE_INCLUDE_DIRECTORIES="$,:>"
+ -DINTERFACE_INCLUDE_DIRECTORIES="$"
-Dllext_edk_file=${llext_edk_file}
- -DAUTOCONF_H=${AUTOCONF_H}
- -Dllext_cflags="${llext_edk_cflags}"
+ -Dllext_edk_cflags="${llext_edk_cflags}"
-Dllext_edk_name=${CONFIG_LLEXT_EDK_NAME}
-DWEST_TOPDIR=${WEST_TOPDIR}
-DZEPHYR_BASE=${ZEPHYR_BASE}
diff --git a/CODEOWNERS b/CODEOWNERS
index 84faf677d91..8ba6e3c5b78 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -27,8 +27,6 @@
/soc/arm/infineon_xmc/ @parthitce
/soc/arm/silabs_exx32/efm32pg1b/ @rdmeneze
/soc/arm/silabs_exx32/efr32mg21/ @l-alfred
-/soc/arm/st_stm32/ @erwango
-/soc/arm/st_stm32/*/power.c @FRASTM
/soc/arm/st_stm32/stm32mp1/ @arnopo
/soc/arm/st_stm32/stm32h7/*stm32h735* @benediktibk
/soc/arm/st_stm32/stm32l4/*stm32l451* @benediktibk
@@ -44,6 +42,7 @@
/soc/riscv/riscv-privileged/gd32vf103/ @soburi
/soc/starfive/jh71xx/ @pfarwsi
/soc/riscv/riscv-privileged/niosv/ @sweeaun
+/boards/adafruit/feather_nrf52840/ @jacobw
/boards/ene/ @ene-steven
/boards/arm/96b_argonkey/ @avisconti
/boards/arm/96b_avenger96/ @Mani-Sadhasivam
@@ -56,8 +55,6 @@
/boards/arm/acn52832/ @sven-hm
/boards/arm/arduino_mkrzero/ @soburi
/boards/arm/bbc_microbit_v2/ @LingaoM
-/boards/arm/bl5340_dvk/ @lairdjm
-/boards/arm/bl65*/ @lairdjm
/boards/arm/blackpill_f401ce/ @coderkalyan
/boards/arm/blackpill_f411ce/ @coderkalyan
/boards/arm/bt*10/ @greg-leach
@@ -68,7 +65,6 @@
/boards/arm/cy8ckit_062s4/ @DaWei8823
/boards/arm/cy8ckit_062_wifi_bt/ @ifyall @npal-cy
/boards/arm/cy8cproto_062_4343w/ @ifyall @npal-cy
-/boards/arm/disco_l475_iot1/ @erwango
/boards/arm/efm32pg_stk3401a/ @rdmeneze
/boards/arm/faze/ @mbittan @simonguinot
/boards/arm/frdm*/ @mmahadevan108 @dleach02
@@ -78,7 +74,6 @@
/boards/arm/ip_k66f/ @parthitce @lmajewski
/boards/arm/legend/ @mbittan @simonguinot
/boards/arm/lpcxpresso*/ @mmahadevan108 @dleach02
-/boards/arm/mg100/ @rerickson1
/boards/arm/mimx8mm_evk/ @Mani-Sadhasivam
/boards/arm/mimx8mm_phyboard_polis @pefech
/boards/arm/mimxrt*/ @mmahadevan108 @dleach02
@@ -86,10 +81,8 @@
/boards/arm/msp_exp432p401r_launchxl/ @Mani-Sadhasivam
/boards/arm/npcx7m6fb_evb/ @MulinChao @ChiHuaL
/boards/arm/nrf*/ @carlescufi @lemrey
-/boards/arm/nucleo*/ @erwango @ABOSTM @FRASTM
/boards/arm/nucleo_f401re/ @idlethread
/boards/arm/nuvoton_pfm_m487/ @ssekar15
-/boards/arm/pinnacle_100_dvk/ @rerickson1
/boards/arm/qemu_cortex_a9/ @ibirnbaum
/boards/arm/qemu_cortex_r*/ @stephanosio
/boards/arm/qemu_cortex_m*/ @ioannisg @stephanosio
@@ -107,14 +100,13 @@
/boards/arm/sensortile_box/ @avisconti
/boards/arm/steval_fcu001v1/ @Navin-Sankar
/boards/arm/stm32l1_disco/ @karlp
-/boards/arm/stm32*_disco/ @erwango @ABOSTM @FRASTM
/boards/arm/stm32h735g_disco/ @benediktibk
/boards/arm/stm32f3_disco/ @ydamigos
-/boards/arm/stm32*_eval/ @erwango @ABOSTM @FRASTM
/boards/arm/rcar_*/ @aaillet
/boards/arm/ubx_bmd345eval_nrf52840/ @Navin-Sankar @brec-u-blox
/boards/arm/nrf5340_audio_dk_nrf5340 @koffes @alexsven @erikrobstad @rick1082 @gWacey
/boards/arm/stm32_min_dev/ @sidcha
+/boards/ezurio/* @rerickson1
/boards/riscv/rv32m1_vega/ @dleach02
/boards/riscv/adp_xc7k_ae350/ @cwshu @kevinwang821020 @jimmyzhe
/boards/riscv/longan_nano/ @soburi
@@ -152,9 +144,7 @@
/drivers/*/*sam4l* @nandojve
/drivers/*/*cc13xx_cc26xx* @bwitherspoon
/drivers/*/*gd32* @nandojve
-/drivers/*/*litex* @mateusz-holenko @kgugala @pgielda
/drivers/*/*mcux* @mmahadevan108 @dleach02
-/drivers/*/*stm32* @erwango @ABOSTM @FRASTM
/drivers/*/*native_posix* @aescolar @daor-oti
/drivers/*/*lpc11u6x* @mbittan @simonguinot
/drivers/*/*npcx* @MulinChao @ChiHuaL
@@ -199,6 +189,7 @@
/drivers/dai/intel/ssp/ @kv2019i @marcinszkudlinski @abonislawski
/drivers/dai/intel/dmic/ @marcinszkudlinski @abonislawski
/drivers/dai/intel/alh/ @abonislawski
+/drivers/dma/dma_dw_axi.c @pbalsundar
/drivers/dma/*dw* @tbursztyka
/drivers/dma/*dw_common* @abonislawski
/drivers/dma/*sam0* @Sizurka
@@ -212,7 +203,6 @@
/drivers/entropy/*b91* @andy-liu-telink
/drivers/entropy/*bt_hci* @JordanYates
/drivers/entropy/*rv32m1* @dleach02
-/drivers/entropy/*litex* @mateusz-holenko @kgugala @pgielda
/drivers/ethernet/*dwmac* @npitre
/drivers/ethernet/*stm32* @Nukersson @lochej
/drivers/ethernet/*w5500* @parthitce
@@ -221,6 +211,8 @@
/drivers/ethernet/*adin2111* @GeorgeCGV
/drivers/ethernet/*oa_tc6* @lmajewski
/drivers/ethernet/*lan865x* @lmajewski
+/drivers/ethernet/dwc_xgmac @Smale-12048867
+/drivers/ethernet/dwc_xgmac/dwc_xgmac @Smale-12048867
/drivers/ethernet/phy/ @rlubos @tbursztyka @arvinf @jukkar
/drivers/ethernet/phy/*adin2111* @GeorgeCGV
/drivers/mdio/*adin2111* @GeorgeCGV
@@ -234,7 +226,6 @@
/drivers/gpio/*b91* @andy-liu-telink
/drivers/gpio/*lmp90xxx* @henrikbrixandersen
/drivers/gpio/*nct38xx* @MulinChao @ChiHuaL
-/drivers/gpio/*stm32* @erwango
/drivers/gpio/*eos_s3* @fkokosinski @kgugala
/drivers/gpio/*rcar* @aaillet
/drivers/gpio/*esp32* @sylvioalves
@@ -258,13 +249,12 @@
/drivers/i2c/i2c_test.c @mbolivar-ampere
/drivers/i2c/*rcar* @aaillet
/drivers/i2c/*kb1200* @ene-steven
-/drivers/i2s/*litex* @mateusz-holenko @kgugala @pgielda
/drivers/i2s/i2s_ll_stm32* @avisconti
/drivers/i2s/*nrfx* @anangl
/drivers/i3c/i3c_cdns.c @XenuIsWatching
/drivers/ieee802154/ @rlubos @tbursztyka @jukkar @fgrandel
/drivers/ieee802154/*b91* @andy-liu-telink
-/drivers/ieee802154/ieee802154_nrf5* @jciupis
+/drivers/ieee802154/ieee802154_nrf5* @ankuns
/drivers/ieee802154/ieee802154_rf2xx* @tbursztyka @nandojve
/drivers/ieee802154/ieee802154_cc13xx* @bwitherspoon @cfriedt @vaishnavachath
/drivers/interrupt_controller/ @dcpleung @nashif
@@ -334,13 +324,13 @@
/drivers/sensor/qdec_stm32/ @valeriosetti
/drivers/sensor/rpi_pico_temp/ @soburi
/drivers/sensor/st*/ @avisconti
+/drivers/sensor/veaa_x_3/ @jeppenodgaard @MaureenHelm
/drivers/sensor/ene_tack_kb1200/ @ene-steven
/drivers/serial/*b91* @andy-liu-telink
/drivers/serial/uart_altera_jtag.c @nashif @gohshunjing
/drivers/serial/uart_altera.c @gohshunjing
/drivers/serial/*ns16550* @dcpleung @nashif @gdengi
/drivers/serial/*nrfx* @anangl
-/drivers/serial/uart_liteuart.c @mateusz-holenko @kgugala @pgielda
/drivers/serial/Kconfig.mcux_iuart @Mani-Sadhasivam
/drivers/serial/uart_mcux_iuart.c @Mani-Sadhasivam
/drivers/serial/Kconfig.rtt @carlescufi @pkral78
@@ -360,7 +350,6 @@
/drivers/serial/uart_ite_it8xxx2.c @GTLin08
/drivers/serial/*intel_lw* @shilinte
/drivers/serial/*kb1200* @ene-steven
-/drivers/disk/sdmmc_sdhc.h @JunYangNXP
/drivers/disk/sdmmc_stm32.c @anthonybrandon
/drivers/ptp_clock/ @tbursztyka @jukkar
/drivers/spi/*b91* @andy-liu-telink
@@ -377,13 +366,11 @@
/drivers/timer/*xlnx_psttc* @wjliang @stephanosio
/drivers/timer/*cc13xx_cc26xx_rtc* @vanti
/drivers/timer/*cavs* @dcpleung
-/drivers/timer/*stm32_lptim* @FRASTM
/drivers/timer/*leon_gptimer* @julius-barendt
/drivers/timer/*mips_cp0* @frantony
/drivers/timer/*rcar_cmt* @aaillet
/drivers/timer/*esp32_sys* @uLipe
/drivers/timer/*sam0_rtc* @bendiscz
-/drivers/timer/*arcv2* @ruuddw
/drivers/timer/*xtensa* @dcpleung
/drivers/timer/*rv32m1_lptmr* @mbolivar
/drivers/timer/*nrf_rtc* @anangl
@@ -411,7 +398,6 @@
/drivers/wifi/eswifi/ @loicpoulain @nandojve
/drivers/wifi/winc1500/ @kludentwo
/drivers/virtualization/ @tbursztyka
-/dts/arc/ @abrodkin @ruuddw @iriszzw @evgeniy-paltsev
/dts/arm/acsip/ @NorthernDean
/dts/arm/aspeed/ @aspeeddylan
/dts/arm/atmel/ @galak @nandojve
@@ -427,7 +413,6 @@
/dts/arm64/renesas/ @lorc @xakep-amatop
/dts/arm/quicklogic/ @fkokosinski @kgugala
/dts/arm/seeed_studio/ @str4t0m
-/dts/arm/st/ @erwango
/dts/arm/st/h7/*stm32h735* @benediktibk
/dts/arm/st/l4/*stm32l451* @benediktibk
/dts/arm/ti/cc13?2* @bwitherspoon
@@ -450,7 +435,6 @@
/dts/riscv/ite/ @ite
/dts/riscv/microchip/microchip-miv.dtsi @galak
/dts/riscv/openisa/rv32m1* @dleach02
-/dts/riscv/riscv32-litex-vexriscv.dtsi @mateusz-holenko @kgugala @pgielda
/dts/riscv/starfive/ @rajnesh-kanwal @pfarwsi
/dts/riscv/andes/andes_v5* @cwshu @kevinwang821020 @jimmyzhe
/dts/riscv/niosv/ @sweeaun
@@ -482,11 +466,8 @@
/dts/bindings/*/nxp*s32* @manuargue
/dts/bindings/*/openisa* @dleach02
/dts/bindings/*/raspberrypi*pico* @yonsch
-/dts/bindings/*/st* @erwango
/dts/bindings/sensor/ams* @alexanderwachter
/dts/bindings/*/sifive* @mateusz-holenko @kgugala @pgielda
-/dts/bindings/*/litex* @mateusz-holenko @kgugala @pgielda
-/dts/bindings/*/vexriscv* @mateusz-holenko @kgugala @pgielda
/dts/bindings/*/andes* @cwshu @kevinwang821020 @jimmyzhe
/dts/bindings/*/neorv32* @henrikbrixandersen
/dts/bindings/*/*lan91c111* @sgrrzhf
diff --git a/Kconfig.constants b/Kconfig.constants
new file mode 100644
index 00000000000..980b1da193a
--- /dev/null
+++ b/Kconfig.constants
@@ -0,0 +1,19 @@
+# Constant variables to be used across Kconfig options
+
+# Copyright (c) 2024 basalte bv
+# SPDX-License-Identifier: Apache-2.0
+
+INT8_MIN := -128
+INT16_MIN := -32768
+INT32_MIN := -2147483648
+INT64_MIN := -9223372036854775808
+
+INT8_MAX := 127
+INT16_MAX := 32767
+INT32_MAX := 2147483647
+INT64_MAX := 9223372036854775807
+
+UINT8_MAX := 255
+UINT16_MAX := 65535
+UINT32_MAX := 4294967295
+UINT64_MAX := 18446744073709551615
diff --git a/Kconfig.zephyr b/Kconfig.zephyr
index 904999f9f66..f97819896d9 100644
--- a/Kconfig.zephyr
+++ b/Kconfig.zephyr
@@ -5,6 +5,8 @@
# Copyright (c) 2023 Nordic Semiconductor ASA
# SPDX-License-Identifier: Apache-2.0
+source "Kconfig.constants"
+
osource "${APPLICATION_SOURCE_DIR}/VERSION"
# Include Kconfig.defconfig files first so that they can override defaults and
@@ -258,6 +260,20 @@ config LINKER_USE_PINNED_SECTION
Requires that pinned sections exist in the architecture, SoC,
board or custom linker script.
+config LINKER_USE_ONDEMAND_SECTION
+ bool "Use Evictable Linker Section"
+ depends on DEMAND_MAPPING
+ depends on !LINKER_USE_PINNED_SECTION
+ depends on !ARCH_MAPS_ALL_RAM
+ help
+ If enabled, the symbols which may be evicted from memory
+ will be put into a linker section reserved for on-demand symbols.
+ During boot, the corresponding memory will be mapped as paged out.
+ This is conceptually the opposite of CONFIG_LINKER_USE_PINNED_SECTION.
+
+ Requires that on-demand sections exist in the architecture, SoC,
+ board or custom linker script.
+
config LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
bool "Generic sections are present at boot" if DEMAND_PAGING && LINKER_USE_PINNED_SECTION
default y
@@ -273,7 +289,7 @@ config LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
config LINKER_LAST_SECTION_ID
bool "Last section identifier"
- default y
+ default y if !ARM64
depends on ARM || ARM64 || RISCV
help
If enabled, the last section will contain an identifier.
@@ -478,6 +494,7 @@ choice COMPILER_OPTIMIZATIONS
prompt "Optimization level"
default NO_OPTIMIZATIONS if COVERAGE
default DEBUG_OPTIMIZATIONS if DEBUG
+ default SIZE_OPTIMIZATIONS_AGGRESSIVE if "$(ZEPHYR_TOOLCHAIN_VARIANT)" = "llvm"
default SIZE_OPTIMIZATIONS
help
Note that these flags shall only control the compiler
@@ -490,6 +507,12 @@ config SIZE_OPTIMIZATIONS
Compiler optimizations will be set to -Os independently of other
options.
+config SIZE_OPTIMIZATIONS_AGGRESSIVE
+ bool "Aggressively optimize for size"
+ help
+ Compiler optimizations wil be set to -Oz independently of other
+ options.
+
config SPEED_OPTIMIZATIONS
bool "Optimize for speed"
help
@@ -940,6 +963,8 @@ config DEPRECATED
help
Symbol that must be selected by a feature or module if it is
considered to be deprecated.
+ When adding this to an option, remember to follow the instructions in
+ https://docs.zephyrproject.org/latest/develop/api/api_lifecycle.html#deprecated
config WARN_DEPRECATED
bool
@@ -1065,3 +1090,20 @@ config BOOTLOADER_BOSSA_ADAFRUIT_UF2
endchoice
endmenu
+
+menu "Compatibility"
+
+config LEGACY_GENERATED_INCLUDE_PATH
+ bool "Legacy include path for generated headers"
+ default y
+ help
+ Allow applications and libraries to use the Zephyr legacy include
+ path for the generated headers which does not use the `zephyr/` prefix.
+
+ From now on, i.e., the preferred way to include the `version.h` header is to
+ use , this Kconfig is currently enabled by default so that
+ user applications won't immediately fail to compile.
+
+ This Kconfig will be deprecated and eventually removed in the future releases.
+
+endmenu
diff --git a/MAINTAINERS.yml b/MAINTAINERS.yml
index c24eb2e290c..cc7cb9a1411 100644
--- a/MAINTAINERS.yml
+++ b/MAINTAINERS.yml
@@ -119,6 +119,8 @@ ACPI:
- lib/acpi/
- include/zephyr/acpi/
- tests/lib/acpi/
+ - dts/bindings/acpi/
+ - include/zephyr/dt-bindings/acpi/
labels:
- "area: ACPI"
tests:
@@ -128,14 +130,17 @@ ARC arch:
status: maintained
maintainers:
- ruuddw
+ - evgeniy-paltsev
collaborators:
- abrodkin
- - evgeniy-paltsev
files:
- arch/arc/
- include/zephyr/arch/arc/
+ - drivers/timer/*arcv2*
+ - drivers/interrupt_controller/*arcv2*
- tests/arch/arc/
- - dts/arc/synopsys/
+ - dts/arc/
+ - dts/bindings/arc/
- doc/hardware/arch/arc-support-status.rst
labels:
- "area: ARC"
@@ -199,6 +204,8 @@ ARM Platforms:
- soc/arm/designstart/
- soc/arm/fvp_aemv8*/
- dts/arm/armv*.dtsi
+ - dts/bindings/arm/arm*.yaml
+ - drivers/interrupt_controller/intc_gic*
labels:
- "platform: ARM"
@@ -237,10 +244,11 @@ MIPS arch:
- arch.mips
Ambiq Platforms:
- status: odd fixes
+ status: maintained
+ maintainers:
+ - AlessandroLuo
collaborators:
- aaronyegx
- - AlessandroLuo
- RichardSWheatley
files:
- soc/ambiq/
@@ -293,47 +301,6 @@ Binary Descriptors:
tests:
- bindesc
-Bluetooth:
- status: maintained
- maintainers:
- - jhedberg
- collaborators:
- - hermabe
- - Vudentz
- - Thalley
- - asbjornsabo
- - sjanc
- files:
- - doc/connectivity/bluetooth/
- - include/zephyr/bluetooth/
- - samples/bluetooth/
- - subsys/bluetooth/
- - subsys/bluetooth/common/
- - tests/bluetooth/
- - tests/bsim/bluetooth/
- files-exclude:
- - include/zephyr/bluetooth/mesh/
- - subsys/bluetooth/controller/
- - subsys/bluetooth/host/
- - subsys/bluetooth/mesh/
- - samples/bluetooth/mesh/
- - subsys/bluetooth/audio/
- - include/zephyr/bluetooth/audio/
- - tests/bsim/bluetooth/audio/
- - tests/bsim/bluetooth/host/
- - tests/bsim/bluetooth/ll/
- - tests/bluetooth/controller/
- - tests/bluetooth/host*/
- - tests/bluetooth/mesh_*/
- - tests/bluetooth/mesh/
- - tests/bluetooth/audio/
- - tests/bsim/bluetooth/mesh/
- - tests/bluetooth/shell/audio*
- labels:
- - "area: Bluetooth"
- tests:
- - bluetooth
-
Bluetooth HCI:
status: maintained
maintainers:
@@ -350,6 +317,8 @@ Bluetooth HCI:
- include/zephyr/drivers/bluetooth/
- drivers/bluetooth/
- samples/bluetooth/hci_*/
+ - tests/bsim/bluetooth/hci_uart/
+ - dts/bindings/bluetooth/
labels:
- "area: Bluetooth Host"
- "area: Bluetooth"
@@ -369,7 +338,16 @@ Bluetooth controller:
- wopu-ot
- erbr-ot
files:
+ - doc/connectivity/bluetooth/bluetooth-ctlr-arch.rst
+ - doc/connectivity/bluetooth/img/ctlr*
+ - doc/connectivity/bluetooth/api/controller.rst
+ - include/zephyr/bluetooth/controller.h
+ - subsys/bluetooth/common/
- subsys/bluetooth/controller/
+ - subsys/bluetooth/crypto/
+ - subsys/bluetooth/shell/ll.c
+ - subsys/bluetooth/shell/ll.h
+ - subsys/bluetooth/shell/ticker.c
- tests/bluetooth/controller/
- tests/bsim/bluetooth/ll/
labels:
@@ -390,11 +368,60 @@ Bluetooth Host:
- sjanc
- theob-pro
files:
+ - doc/connectivity/bluetooth/
+ - include/zephyr/bluetooth/
+ - samples/bluetooth/
+ - subsys/bluetooth/common/
+ - subsys/bluetooth/crypto/
- subsys/bluetooth/host/
+ - subsys/bluetooth/lib/
- subsys/bluetooth/services/
- subsys/bluetooth/shell/
- - tests/bluetooth/host*/
- - tests/bsim/bluetooth/host/
+ - subsys/bluetooth/CMakeLists.txt
+ - subsys/bluetooth/Kconfig*
+ - tests/bluetooth/
+ - tests/bsim/bluetooth/
+ files-exclude:
+ - subsys/bluetooth/host/classic/
+ - include/zephyr/bluetooth/audio/
+ - include/zephyr/bluetooth/classic/
+ - include/zephyr/bluetooth/mesh/
+ - include/zephyr/bluetooth/iso.h
+ - include/zephyr/bluetooth/controller.h
+ - include/zephyr/bluetooth/mesh.h
+ - doc/connectivity/bluetooth/bluetooth-ctlr-arch.rst
+ - doc/connectivity/bluetooth/autopts/
+ - doc/connectivity/bluetooth/img/ctlr*
+ - doc/connectivity/bluetooth/api/audio/
+ - doc/connectivity/bluetooth/api/mesh/
+ - doc/connectivity/bluetooth/api/shell/iso.rst
+ - doc/connectivity/bluetooth/api/controller.rst
+ - samples/bluetooth/bap*/
+ - samples/bluetooth/cap*/
+ - samples/bluetooth/hap*/
+ - samples/bluetooth/hci_*/
+ - samples/bluetooth/pbp*/
+ - samples/bluetooth/tmap*/
+ - samples/bluetooth/iso_*/
+ - samples/bluetooth/mesh*/
+ - subsys/bluetooth/shell/bredr.c
+ - subsys/bluetooth/shell/iso.c
+ - subsys/bluetooth/shell/ll.c
+ - subsys/bluetooth/shell/ll.h
+ - subsys/bluetooth/shell/ticker.c
+ - subsys/bluetooth/Kconfig.iso
+ - subsys/bluetooth/host/iso.c
+ - subsys/bluetooth/host/iso_internal.h
+ - tests/bluetooth/audio/
+ - tests/bluetooth/controller/
+ - tests/bluetooth/mesh*/
+ - tests/bluetooth/qualification/
+ - tests/bluetooth/tester/
+ - tests/bsim/bluetooth/audio/
+ - tests/bsim/bluetooth/audio_samples/
+ - tests/bsim/bluetooth/hci_uart/
+ - tests/bsim/bluetooth/ll/
+ - tests/bsim/bluetooth/mesh/
labels:
- "area: Bluetooth Host"
- "area: Bluetooth"
@@ -411,12 +438,15 @@ Bluetooth Mesh:
- akredalen
- HaavardRei
- omkar3141
+ - KyraLengfeld
files:
- - subsys/bluetooth/mesh/
+ - doc/connectivity/bluetooth/api/mesh/
- include/zephyr/bluetooth/mesh/
+ - include/zephyr/bluetooth/mesh.h
+ - samples/bluetooth/mesh*/
+ - subsys/bluetooth/mesh/
- tests/bluetooth/mesh*/
- tests/bsim/bluetooth/mesh/
- - samples/bluetooth/mesh/
labels:
- "area: Bluetooth Mesh"
- "area: Bluetooth"
@@ -437,19 +467,23 @@ Bluetooth Audio:
- kruithofa
- larsgk
- pin-zephyr
+ - niym-ot
+ - jthm-ot
files:
- subsys/bluetooth/audio/
- include/zephyr/bluetooth/audio/
- tests/bluetooth/audio/
- tests/bsim/bluetooth/audio/
+ - tests/bsim/bluetooth/audio_samples/
- tests/bluetooth/shell/audio.conf
- tests/bluetooth/tester/overlay-le-audio.conf
+ - tests/bluetooth/tester/src/audio/
- doc/connectivity/bluetooth/api/audio/
- - samples/bluetooth/broadcast_audio*/
+ - samples/bluetooth/bap*/
+ - samples/bluetooth/cap*/
- samples/bluetooth/hap*/
- - samples/bluetooth/public_broadcast*/
+ - samples/bluetooth/pbp*/
- samples/bluetooth/tmap*/
- - samples/bluetooth/unicast_audio*/
labels:
- "area: Bluetooth Audio"
- "area: Bluetooth"
@@ -464,10 +498,50 @@ Bluetooth Classic:
- jhedberg
- sjanc
files:
+ - subsys/bluetooth/common/
- subsys/bluetooth/host/classic/
+ - subsys/bluetooth/shell/bredr.c
- include/zephyr/bluetooth/classic/
labels:
- "area: Bluetooth Classic"
+ - "area: Bluetooth"
+ tests:
+ - bluetooth
+
+Bluetooth ISO:
+ status: maintained
+ maintainers:
+ - Thalley
+ collaborators:
+ - jhedberg
+ files:
+ - include/zephyr/bluetooth/iso.h
+ - doc/connectivity/bluetooth/api/shell/iso.rst
+ - samples/bluetooth/iso_*/
+ - subsys/bluetooth/shell/iso.c
+ - subsys/bluetooth/Kconfig.iso
+ - subsys/bluetooth/host/iso.c
+ - subsys/bluetooth/host/iso_internal.h
+ labels:
+ - "area: Bluetooth ISO"
+ - "area: Bluetooth"
+ tests:
+ - bluetooth
+
+Bluetooth Qualification:
+ status: maintained
+ maintainers:
+ - sjanc
+ collaborators:
+ - Thalley
+ - jhedberg
+ files:
+ - doc/connectivity/bluetooth/autopts/
+ - tests/bluetooth/qualification/
+ - tests/bluetooth/tester/
+ labels:
+ - "area: Bluetooth Qualification"
+ - "area: Bluetooth"
tests:
- bluetooth
@@ -590,6 +664,22 @@ CMSIS API layer:
- portability.cmsis_rtos_v1
- portability.cmsis_rtos_v2
+DAP:
+ status: maintained
+ maintainers:
+ - jfischer-no
+ collaborators:
+ - maxd-nordic
+ files:
+ - include/zephyr/drivers/swdp.h
+ - drivers/dp/
+ - subsys/dap/
+ - samples/subsys/dap/
+ description: >-
+ Debug Access Port controller
+ labels:
+ - "area: dap"
+
DSP subsystem:
status: maintained
maintainers:
@@ -646,6 +736,7 @@ Coding Guidelines:
- nashif
- carlescufi
- jfischer-no
+ - simhein
files:
- .checkpatch.conf
- .clang-format
@@ -668,8 +759,8 @@ Coding Guidelines:
- "area: Coding Guidelines"
Common Architecture Interface:
- status: odd fixes
- collaborators:
+ status: maintained
+ maintainers:
- dcpleung
- nashif
files:
@@ -713,6 +804,32 @@ Debug:
tests:
- debug
+"Debug: Profiling: Perf":
+ status: odd fixes
+ files:
+ - doc/services/profiling/perf.rst
+ - samples/subsys/profiling/perf/
+ - scripts/profiling/stackcollapse.py
+ - subsys/profiling/perf/
+ labels:
+ - "area: Profiling / Perf"
+ tests:
+ - debug.profiling.perf
+
+"Debug: Symtab":
+ status: maintained
+ maintainers:
+ - ycsin
+ files:
+ - include/zephyr/debug/symtab.h
+ - subsys/debug/symtab/
+ - tests/subsys/debug/symtab/
+ - scripts/build/gen_symtab.py
+ labels:
+ - "area: Symtab"
+ tests:
+ - debug.symtab
+
Demand Paging:
status: maintained
maintainers:
@@ -731,8 +848,8 @@ Device Driver Model:
status: maintained
maintainers:
- gmarull
- collaborators:
- tbursztyka
+ collaborators:
- dcpleung
- nashif
files:
@@ -763,11 +880,12 @@ DFU:
- dfu
Devicetree:
- status: maintained
- maintainers:
- - galak
+ status: odd fixes
collaborators:
- decsny
+ - galak
+ files-regex:
+ - dts/bindings/.*zephyr.*
files:
- scripts/dts/
- dts/common/
@@ -775,7 +893,11 @@ Devicetree:
- doc/build/dts/
- include/zephyr/devicetree/
- scripts/kconfig/kconfigfunctions.py
+ - doc/build/kconfig/preprocessor-functions.rst
- include/zephyr/devicetree.h
+ - include/zephyr/dt-bindings/dt-util.h
+ - dts/binding-template.yaml
+ - dts/bindings/base/
files-exclude:
- dts/common/nordic/
labels:
@@ -783,19 +905,6 @@ Devicetree:
tests:
- libraries.devicetree
-Devicetree Bindings:
- status: maintained
- maintainers:
- - galak
- collaborators:
- - decsny
- files:
- - dts/bindings/
- - include/zephyr/dt-bindings/
- - dts/binding-template.yaml
- labels:
- - "area: Devicetree Binding"
-
Disk:
status: maintained
maintainers:
@@ -812,6 +921,9 @@ Disk:
- tests/subsys/sd/
- tests/drivers/disk/
- include/zephyr/sd/
+ - dts/bindings/sd/
+ - dts/bindings/mmc/
+ - dts/bindings/disk/
labels:
- "area: Disk Access"
tests:
@@ -825,15 +937,20 @@ Display drivers:
files:
- drivers/display/
- dts/bindings/display/
+ - include/zephyr/dt-bindings/display/
- include/zephyr/drivers/display.h
- include/zephyr/display/
- include/zephyr/drivers/display.h
- subsys/fb/
- samples/subsys/display/
+ - tests/subsys/display/
- doc/hardware/peripherals/display/
- tests/drivers/*/display/
labels:
- "area: Display"
+ tests:
+ - display.cfb
+ - drivers.display
Documentation:
status: maintained
@@ -852,18 +969,19 @@ Documentation:
- doc/project/
- doc/releases/
- doc/security/
+ - doc/safety/
- README.rst
- doc/substitutions.txt
- doc/images/Zephyr-Kite-in-tree.png
- doc/index-tex.rst
- doc/index.rst
- - doc/known-warnings.txt
- doc/templates/sample.tmpl
- doc/templates/board.tmpl
- boards/index.rst
files-exclude:
- doc/releases/migration-guide-*
- doc/releases/release-notes-*
+ - doc/develop/test/
labels:
- "area: Documentation"
@@ -871,10 +989,10 @@ Documentation Infrastructure:
status: maintained
maintainers:
- gmarull
+ - kartben
collaborators:
- carlescufi
- nashif
- - kartben
files:
- doc/_*/
- doc/CMakeLists.txt
@@ -887,8 +1005,8 @@ Documentation Infrastructure:
Release Notes:
status: maintained
maintainers:
- - nashif
- - aescolar
+ - dkalowsk
+ - mmahadevan108
collaborators:
- kartben
files:
@@ -912,6 +1030,7 @@ Release Notes:
- doc/hardware/peripherals/adc.rst
- tests/drivers/build_all/adc/
- include/zephyr/dt-bindings/adc/
+ - dts/bindings/adc/
labels:
- "area: ADC"
tests:
@@ -922,7 +1041,6 @@ Release Notes:
collaborators:
- lyakh
- lgirdwood
- - marc-hb
- kv2019i
files:
- drivers/audio/
@@ -988,6 +1106,7 @@ Release Notes:
- samples/modules/canopennode/
- samples/net/sockets/can/
- samples/subsys/canbus/
+ - scripts/west_commands/runners/canopen_program.py
- subsys/canbus/
- subsys/net/l2/canbus/
- tests/drivers/build_all/can/
@@ -1005,7 +1124,7 @@ Release Notes:
maintainers:
- rriveramcrus
collaborators:
- - GRobertZieba
+ - RobertZ2011
files:
- drivers/charger/
- dts/bindings/charger/
@@ -1026,6 +1145,7 @@ Release Notes:
files:
- drivers/clock_control/
- dts/bindings/clock/
+ - include/zephyr/dt-bindings/clock/
- include/zephyr/drivers/clock_control.h
- include/zephyr/dt-bindings/clock/
- tests/drivers/clock_control/
@@ -1089,6 +1209,7 @@ Release Notes:
- include/zephyr/crypto/
- samples/drivers/crypto/
- tests/crypto/
+ - doc/services/crypto/
labels:
- "area: Crypto / RNG"
tests:
@@ -1101,6 +1222,8 @@ Release Notes:
files:
- drivers/dac/
- include/zephyr/drivers/dac.h
+ - dts/bindings/dac/
+ - include/zephyr/dt-bindings/dac/
- tests/drivers/dac/
- samples/drivers/dac/
- doc/hardware/peripherals/dac.rst
@@ -1124,6 +1247,8 @@ Release Notes:
- drivers/dai/
- doc/hardware/peripherals/audio/dai.rst
- include/zephyr/drivers/dai.h
+ - include/zephyr/dt-bindings/dai/
+ - dts/bindings/dai/
labels:
- "area: DAI"
@@ -1148,6 +1273,8 @@ Release Notes:
- drivers/dma/
- tests/drivers/dma/
- include/zephyr/drivers/dma/
+ - dts/bindings/dma/
+ - include/zephyr/dt-bindings/dma/
- doc/hardware/peripherals/dma.rst
- include/zephyr/drivers/dma.h
- include/zephyr/dt-bindings/dma/
@@ -1184,7 +1311,7 @@ Release Notes:
- samples/drivers/eeprom/
- tests/drivers/eeprom/
- tests/drivers/*/eeprom/
- - doc/hardware/peripherals/eeprom.rst
+ - doc/hardware/peripherals/eeprom/
labels:
- "area: EEPROM"
tests:
@@ -1194,6 +1321,8 @@ Release Notes:
status: maintained
maintainers:
- ceolin
+ collaborators:
+ - tomi-font
files:
- drivers/entropy/
- include/zephyr/drivers/entropy.h
@@ -1231,6 +1360,7 @@ Release Notes:
collaborators:
- decsny
- lmajewski
+ - pdgendt
files:
- drivers/ethernet/
- include/zephyr/dt-bindings/ethernet/
@@ -1250,6 +1380,7 @@ Release Notes:
files:
- drivers/flash/
- dts/bindings/flash_controller/
+ - include/zephyr/dt-bindings/flash_controller/
- include/zephyr/drivers/flash.h
- samples/drivers/flash_shell/
- samples/drivers/soc_flash_nrf/
@@ -1305,6 +1436,7 @@ Release Notes:
files:
- doc/hardware/peripherals/gpio.rst
- drivers/gpio/
+ - dts/bindings/gpio/
- include/zephyr/drivers/gpio/
- include/zephyr/drivers/gpio.h
- include/zephyr/dt-bindings/gpio/
@@ -1318,7 +1450,7 @@ Release Notes:
"Drivers: GNSS":
status: maintained
maintainers:
- - bjarki-trackunit
+ - bjarki-andreasen
collaborators:
- tomi-font
- fabiobaltieri
@@ -1327,6 +1459,8 @@ Release Notes:
- drivers/gnss/
- include/zephyr/drivers/gnss.h
- include/zephyr/drivers/gnss/
+ - dts/bindings/gnss/
+ - include/zephyr/dt-bindings/gnss/
- tests/drivers/build_all/gnss/
- tests/drivers/gnss/
labels:
@@ -1334,6 +1468,22 @@ Release Notes:
tests:
- drivers.gnss
+"Drivers: Haptics":
+ status: maintained
+ maintainers:
+ - rriveramcrus
+ files:
+ - drivers/haptics/
+ - dts/bindings/haptics/
+ - include/zephyr/drivers/haptics.h
+ - doc/hardware/peripherals/haptics.rst
+ - tests/drivers/build_all/haptics/
+ - samples/drivers/haptics/
+ labels:
+ - "area: Haptics"
+ tests:
+ - drivers.haptics
+
"Drivers: HW Info":
status: maintained
maintainers:
@@ -1418,7 +1568,7 @@ Release Notes:
- fgrandel
collaborators:
- rlubos
- - jciupis
+ - ankuns
- cfriedt
- jukkar
files:
@@ -1452,6 +1602,8 @@ Release Notes:
- drivers/memc/
- samples/drivers/memc/
- tests/drivers/memc/
+ - include/zephyr/dt-bindings/memory-controller/
+ - dts/bindings/memory-controllers/
labels:
- "area: MEMC"
tests:
@@ -1467,6 +1619,7 @@ Release Notes:
- drivers/mdio/
- include/zephyr/drivers/mdio.h
- tests/drivers/build_all/mdio/
+ - dts/bindings/mdio/
labels:
- "area: MDIO"
tests:
@@ -1487,6 +1640,26 @@ Release Notes:
tests:
- drivers.mipi_dsi
+"Drivers: MSPI":
+ status: maintained
+ maintainers:
+ - swift-tk
+ files:
+ - drivers/mspi/
+ - drivers/memc/*mspi*
+ - drivers/flash/*mspi*
+ - include/zephyr/drivers/mspi.h
+ - include/zephyr/drivers/mspi/
+ - samples/drivers/mspi/
+ - tests/drivers/mspi/
+ - doc/hardware/peripherals/mspi.rst
+ - dts/bindings/mspi/
+ - dts/bindings/mtd/mspi*
+ labels:
+ - "area: MSPI"
+ tests:
+ - drivers.mspi
+
"Drivers: Reset":
status: odd fixes
collaborators:
@@ -1494,6 +1667,8 @@ Release Notes:
files:
- drivers/reset/
- include/zephyr/drivers/reset.h
+ - dts/bindings/reset/
+ - include/zephyr/dt-bindings/reset/
"Interrupt Handling":
status: odd fixes
@@ -1559,10 +1734,12 @@ Release Notes:
- drivers/led/
- include/zephyr/drivers/led/
- include/zephyr/drivers/led.h
- - samples/drivers/led_*/
+ - samples/drivers/led/
- tests/drivers/led/
- doc/hardware/peripherals/led.rst
- tests/drivers/build_all/led/
+ - dts/bindings/led/
+ - include/zephyr/dt-bindings/led/
labels:
- "area: LED"
tests:
@@ -1571,9 +1748,9 @@ Release Notes:
"Drivers: LED Strip":
status: maintained
maintainers:
- - mbolivar-ampere
- simonguinot
collaborators:
+ - mbolivar-ampere
- soburi
- thedjnK
files:
@@ -1619,10 +1796,10 @@ Release Notes:
"Drivers: Regulators":
status: maintained
maintainers:
- - aasinclair
+ - gmarull
collaborators:
- danieldegrasse
- - gmarull
+ - aasinclair
files:
- drivers/regulator/
- include/zephyr/drivers/regulator/
@@ -1631,6 +1808,7 @@ Release Notes:
- tests/drivers/regulator/
- tests/drivers/build_all/regulator/
- doc/hardware/peripherals/regulators.rst
+ - dts/bindings/regulator/
labels:
- "area: Regulators"
tests:
@@ -1646,6 +1824,7 @@ Release Notes:
- include/zephyr/drivers/retained_mem.h
- tests/drivers/retained_mem/
- doc/hardware/peripherals/retained_mem.rst
+ - dts/bindings/retained_mem/
labels:
- "area: Retained Memory"
tests:
@@ -1654,7 +1833,7 @@ Release Notes:
"Drivers: RTC":
status: maintained
maintainers:
- - bjarki-trackunit
+ - bjarki-andreasen
files:
- drivers/rtc/
- include/zephyr/drivers/rtc/
@@ -1662,6 +1841,7 @@ Release Notes:
- doc/hardware/peripherals/rtc.rst
- include/zephyr/drivers/rtc.h
- tests/drivers/build_all/rtc/
+ - dts/bindings/rtc/
labels:
- "area: RTC"
tests:
@@ -1675,6 +1855,8 @@ Release Notes:
- drivers/pcie/
- include/zephyr/drivers/pcie/
- doc/hardware/peripherals/pcie.rst
+ - dts/bindings/pcie/
+ - include/zephyr/dt-bindings/pcie/
labels:
- "area: PCI"
@@ -1689,6 +1871,7 @@ Release Notes:
- include/zephyr/drivers/peci.h
- samples/drivers/peci/
- doc/hardware/peripherals/peci.rst
+ - dts/bindings/peci/
labels:
- "area: PECI"
tests:
@@ -1746,6 +1929,7 @@ Release Notes:
- include/zephyr/drivers/pm_cpu_ops/
- include/zephyr/drivers/pm_cpu_ops.h
- include/zephyr/arch/arm64/arm-smccc.h
+ - dts/bindings/pm_cpu_ops/
labels:
- "area: PM CPU ops"
@@ -1758,6 +1942,7 @@ Release Notes:
files:
- drivers/pwm/
- dts/bindings/pwm/
+ - include/zephyr/dt-bindings/pwm/
- tests/drivers/pwm/
- include/zephyr/*/pwms.h
- doc/hardware/peripherals/pwm.rst
@@ -1814,6 +1999,7 @@ Release Notes:
- yperess
- tristan-google
- ubieda
+ - jeppenodgaard
files:
- drivers/sensor/
- include/zephyr/drivers/sensor.h
@@ -1823,7 +2009,7 @@ Release Notes:
- dts/bindings/sensor/
- include/zephyr/drivers/sensor/
- include/zephyr/dt-bindings/sensor/
- - doc/hardware/peripherals/sensor.rst
+ - doc/hardware/peripherals/sensor/
- tests/drivers/build_all/sensor/
labels:
- "area: Sensors"
@@ -1856,12 +2042,29 @@ Release Notes:
- drivers/spi/
- include/zephyr/drivers/spi.h
- tests/drivers/spi/
+ - dts/bindings/spi/
+ - include/zephyr/dt-bindings/spi/
- doc/hardware/peripherals/spi.rst
labels:
- "area: SPI"
tests:
- drivers.spi
+"Drivers: Stepper":
+ status: maintained
+ maintainers:
+ - jilaypandya
+ files:
+ - drivers/stepper/
+ - include/zephyr/drivers/stepper.h
+ - dts/bindings/stepper/
+ - doc/hardware/peripherals/stepper.rst
+ - tests/drivers/build_all/stepper/
+ labels:
+ - "area: Stepper"
+ tests:
+ - drivers.stepper
+
"Drivers: System timer":
status: maintained
maintainers:
@@ -1871,6 +2074,8 @@ Release Notes:
files:
- drivers/timer/
- include/zephyr/drivers/timer/
+ - dts/bindings/timer/
+ - include/zephyr/dt-bindings/timer/
labels:
- "area: Timer"
@@ -1878,12 +2083,15 @@ Release Notes:
status: odd fixes
collaborators:
- loicpoulain
+ - josuah
files:
- drivers/video/
- include/zephyr/drivers/video.h
- include/zephyr/drivers/video-controls.h
- doc/hardware/peripherals/video.rst
- tests/drivers/*/video/
+ - dts/bindings/video/
+ - samples/drivers/video/
labels:
- "area: Video"
tests:
@@ -1936,6 +2144,7 @@ Release Notes:
- krish2718
files:
- drivers/wifi/
+ - dts/bindings/wifi/
labels:
- "area: Wi-Fi"
@@ -1951,6 +2160,29 @@ Release Notes:
labels:
- "area: Wi-Fi"
+"Drivers: Wi-Fi as nRF Wi-Fi":
+ status: maintained
+ maintainers:
+ - krish2718
+ - jukkar
+ collaborators:
+ - sachinthegreen
+ files:
+ - drivers/wifi/nrfwifi/
+ - dts/bindings/wifi/nordic,nrf70.yaml
+ - dts/bindings/wifi/nordic,nrf70-qspi.yaml
+ - dts/bindings/wifi/nordic,nrf70-spi.yaml
+ - dts/bindings/wifi/nordic,nrf70-coex.yaml
+ - dts/bindings/wifi/nordic,nrf7002-qspi.yaml
+ - dts/bindings/wifi/nordic,nrf7002-spi.yaml
+ - dts/bindings/wifi/nordic,nrf7000-qspi.yaml
+ - dts/bindings/wifi/nordic,nrf7000-spi.yaml
+ - dts/bindings/wifi/nordic,nrf7001-qspi.yaml
+ - dts/bindings/wifi/nordic,nrf7001-spi.yaml
+ - boards/shields/nrf7002ek/
+ labels:
+ - "area: Wi-Fi"
+
"Drivers: Memory Management":
status: maintained
maintainers:
@@ -1974,6 +2206,7 @@ Release Notes:
files:
- drivers/mipi_dbi/
- dts/bindings/mipi-dbi/
+ - include/zephyr/dt-bindings/mipi_dbi/
labels:
- "area: Display Controller"
@@ -2021,6 +2254,7 @@ Xen Platform:
- arch/arm64/core/xen/
- soc/xen/
- boards/xen/
+ - dts/bindings/xen/
labels:
- "area: Xen Platform"
@@ -2036,6 +2270,7 @@ Filesystems:
- samples/subsys/fs/
- subsys/fs/
- tests/subsys/fs/
+ - dts/bindings/fs/
labels:
- "area: File System"
tests:
@@ -2077,9 +2312,11 @@ Google Platforms:
maintainers:
- fabiobaltieri
- keith-zephyr
+ collaborators:
+ - duda-patryk
files:
- boards/google/
- - samples/boards/google_*/
+ - samples/boards/google/
Hash Utilities:
status: maintained
@@ -2134,6 +2371,8 @@ IPC:
- subsys/ipc/
- tests/subsys/ipc/
- doc/services/ipc/
+ - dts/bindings/ipc/
+ - include/zephyr/dt-bindings/ipc_service/
description: >-
Inter-Processor Communication
labels:
@@ -2300,21 +2539,22 @@ Memory Management:
- tests/lib/mem_blocks/
- doc/services/mem_mgmt/
- include/zephyr/mem_mgmt/mem_attr.h
+ - include/zephyr/dt-bindings/memory-attr/
- tests/lib/mem_blocks_stats/
- tests/drivers/mm/
tests:
- mem_mgmt
-Laird Connectivity platforms:
+Ezurio platforms:
status: maintained
maintainers:
- rerickson1
collaborators:
- greg-leach
files:
- - boards/lairdconnect/
+ - boards/ezurio/
labels:
- - "platform: Laird Connectivity"
+ - "platform: Ezurio"
Linker Scripts:
status: maintained
@@ -2381,6 +2621,8 @@ LoRa and LoRaWAN:
- include/zephyr/lorawan/
- subsys/lorawan/
- samples/subsys/lorawan/
+ - include/zephyr/dt-bindings/lora/
+ - dts/bindings/lora/
- doc/connectivity/lora_lorawan/index.rst
labels:
- "area: LoRa"
@@ -2402,25 +2644,6 @@ MAINTAINERS file:
description: >-
Zephyr Maintainers File
-Mbed TLS:
- status: maintained
- maintainers:
- - d3zd3z
- - ceolin
- collaborators:
- - ithinuel
- files:
- - tests/crypto/mbedtls/
- - doc/services/crypto/
- - tests/benchmarks/mbedtls/
- labels:
- - "area: Crypto / RNG"
- description: >-
- Mbed TLS module implementing the PSA Crypto API and TLS.
- tests:
- - benchmark.crypto.mbedtls
- - crypto.mbedtls
-
MCU Manager:
status: maintained
maintainers:
@@ -2457,7 +2680,7 @@ Modbus:
Modem:
status: maintained
maintainers:
- - bjarki-trackunit
+ - bjarki-andreasen
collaborators:
- tomi-font
files:
@@ -2492,8 +2715,6 @@ hawkBit:
status: maintained
maintainers:
- maass-hamburg
- collaborators:
- - ycsin
files:
- subsys/mgmt/hawkbit/
- include/zephyr/mgmt/hawkbit.h
@@ -2566,9 +2787,11 @@ Networking:
files-exclude:
- doc/connectivity/networking/api/gptp.rst
- doc/connectivity/networking/api/ieee802154.rst
+ - doc/connectivity/networking/api/ptp.rst
- doc/connectivity/networking/api/wifi.rst
- include/zephyr/net/gptp.h
- include/zephyr/net/ieee802154*.h
+ - include/zephyr/net/ptp.h
- include/zephyr/net/wifi*.h
- include/zephyr/net/buf.h
- include/zephyr/net/dhcpv4*.h
@@ -2577,13 +2800,13 @@ Networking:
- samples/net/lwm2m_client/
- samples/net/wifi/
- samples/net/dhcpv4_client/
- - subsys/net/buf*.c
- subsys/net/l2/ethernet/gptp/
- subsys/net/l2/ieee802154/
- subsys/net/l2/wifi/
- subsys/net/lib/coap/
- subsys/net/lib/config/ieee802154*
- subsys/net/lib/lwm2m/
+ - subsys/net/lib/ptp/
- subsys/net/lib/tls_credentials/
- subsys/net/lib/dhcpv4/
- tests/net/dhcpv4/
@@ -2609,7 +2832,7 @@ Networking:
tests:
- net.socket
-"Networking: Buffers":
+"Networking Buffers":
status: maintained
maintainers:
- jhedberg
@@ -2618,13 +2841,15 @@ Networking:
- tbursztyka
- jukkar
files:
+ - doc/services/net_buf/
- include/zephyr/net/buf.h
- - subsys/net/buf*.c
- - tests/net/buf/
+ - include/zephyr/net_buf.h
+ - lib/net_buf/
+ - tests/lib/net_buf/
labels:
- "area: Networking Buffers"
tests:
- - net.buf
+ - libraries.net_buf
"Networking: Connection Manager":
status: maintained
@@ -2739,6 +2964,20 @@ Networking:
tests:
- net.mqtt_sn
+"Networking: PTP":
+ status: maintained
+ maintainers:
+ - awojasinski
+ files:
+ - doc/connectivity/networking/api/ptp.rst
+ - include/zephyr/net/ptp.h
+ - subsys/net/lib/ptp/
+ - samples/net/ptp/
+ labels:
+ - "area: Networking"
+ tests:
+ - sample.net.ptp
+
"Networking: Native IEEE 802.15.4":
status: maintained
maintainers:
@@ -2839,7 +3078,13 @@ Open AMP:
- carlocaione
files:
- lib/open-amp/
-
+ - samples/subsys/ipc/openamp/
+ - samples/subsys/ipc/openamp_rsc_table/
+ - samples/subsys/ipc/rpmsg_service/
+ labels:
+ - "area: Open AMP"
+ tests:
+ - sample.ipc.openamp
POSIX API layer:
status: maintained
@@ -2876,6 +3121,8 @@ Power management:
- tests/subsys/pm/
- doc/services/pm/
- drivers/power_domain/
+ - dts/bindings/power/
+ - include/zephyr/dt-bindings/power/
labels:
- "area: Power Management"
tests:
@@ -2904,6 +3151,8 @@ RISCV arch:
- ycsin
files:
- arch/riscv/
+ - boards/enjoydigital/litex_vexriscv/
+ - boards/lowrisc/opentitan_earlgrey/
- boards/qemu/riscv*/
- boards/sifive/
- boards/sparkfun/red_v_things_plus/
@@ -2961,10 +3210,12 @@ Sensor Subsystem:
- doc/services/sensing/
- subsys/sensing/
- samples/subsys/sensing/
+ - tests/subsys/sensing/
labels:
- "area: Sensor Subsystem"
tests:
- sample.sensing
+ - sensing.api
Stats:
status: odd fixes
@@ -2981,7 +3232,6 @@ Twister:
collaborators:
- PerMac
- hakehuang
- - gopiotr
- golowanow
- gchwier
- LukaszMrugala
@@ -3077,11 +3327,13 @@ State machine framework:
- sambhurst
collaborators:
- keith-zephyr
+ - glenn-andrews
files:
- doc/services/smf/
- include/zephyr/smf.h
- lib/smf/
- tests/lib/smf/
+ - samples/subsys/smf/
labels:
- "area: State Machine Framework"
tests:
@@ -3092,20 +3344,25 @@ ADI Platforms:
maintainers:
- MaureenHelm
collaborators:
+ - ozersa
+ - ttmut
- galak
- microbuilder
files:
- boards/adi/
- - drivers/*/max*
+ - boards/shields/pmod_acl/
+ - drivers/*/*max*
- drivers/*/*max*/
- drivers/dac/dac_ltc*
- drivers/ethernet/eth_adin*
- drivers/mdio/mdio_adin*
- drivers/regulator/regulator_adp5360*
- drivers/sensor/adi/
+ - dts/arm/adi/
- dts/bindings/*/adi,*
- dts/bindings/*/lltc,*
- dts/bindings/*/maxim,*
+ - soc/adi/
labels:
- "platform: ADI"
@@ -3119,11 +3376,11 @@ Broadcom Platforms:
GD32 Platforms:
status: maintained
maintainers:
- - cameled
- nandojve
collaborators:
- gmarull
- soburi
+ - cameled
files:
- boards/gd/
- drivers/*/*gd32*
@@ -3141,9 +3398,9 @@ Synopsys Platforms:
status: maintained
maintainers:
- ruuddw
+ - evgeniy-paltsev
collaborators:
- abrodkin
- - evgeniy-paltsev
files:
- soc/snps/
- boards/snps/
@@ -3153,6 +3410,8 @@ Synopsys Platforms:
- scripts/west_commands/tests/test_mdb.py
- scripts/west_commands/runners/nsim.py
- cmake/emu/nsim.cmake
+ - drivers/serial/uart_hostlink.c
+ - drivers/serial/Kconfig.hostlink
labels:
- "platform: Synopsys"
@@ -3195,6 +3454,19 @@ Nuvoton Numicro Numaker Platforms:
labels:
- "platform: Nuvoton Numicro Numaker"
+Nuvoton NPCM Platforms:
+ status: maintained
+ maintainers:
+ - maxdog988
+ - warp5tw
+ - jc849
+ files:
+ - soc/nuvoton/npcm/
+ - boards/nuvoton/npcm*/
+ - dts/arm/nuvoton/
+ labels:
+ - "platform: Nuvoton NPCM"
+
Raspberry Pi Pico Platforms:
status: maintained
maintainers:
@@ -3214,8 +3486,13 @@ Raspberry Pi Pico Platforms:
labels:
- "platform: Raspberry Pi Pico"
-SiLabs Platforms:
- status: odd fixes
+Silabs Platforms:
+ status: maintained
+ maintainers:
+ - jhedberg
+ collaborators:
+ - jerome-pouiller
+ - asmellby
files:
- soc/silabs/
- boards/silabs/
@@ -3223,7 +3500,28 @@ SiLabs Platforms:
- dts/bindings/*/silabs*
- drivers/*/*_gecko*
labels:
- - "platform: SiLabs"
+ - "platform: Silabs"
+
+Silabs SiM3U Platforms:
+ status: maintained
+ maintainers:
+ - rettichschnidi
+ collaborators:
+ - M1cha
+ - asmellby
+ - jerome-pouiller
+ - jhedberg
+ files:
+ - boards/silabs/dev_kits/sim3u1xx_dk/
+ - drivers/*/*_si32*
+ - drivers/*/Kconfig.si32
+ - dts/arm/silabs/sim3u*
+ - dts/bindings/*/*silabs,si32*
+ - soc/silabs/silabs_sim3/
+ labels:
+ - "platform: Silabs SiM3U"
+ description: >-
+ SiM3U SoCs, dts files, and related drivers. Boards based on SiM3U SoCs.
Intel Platforms (X86):
status: maintained
@@ -3256,7 +3554,6 @@ Intel Platforms (Xtensa):
- andyross
- lyakh
- lgirdwood
- - marc-hb
- kv2019i
- ceolin
- tmleman
@@ -3269,7 +3566,7 @@ Intel Platforms (Xtensa):
- soc/intel/intel_adsp/
- dts/xtensa/intel/
- tests/boards/intel_adsp/
- - samples/boards/intel_adsp/
+ - samples/boards/intel/adsp/
- dts/bindings/*/intel,adsp*
- scripts/west_commands/runners/intel_adsp.py
labels:
@@ -3312,12 +3609,16 @@ NXP Drivers:
status: maintained
maintainers:
- dleach02
- collaborators:
- mmahadevan108
+ collaborators:
- danieldegrasse
- decsny
- manuargue
- dbaluta
+ - MarkWangChinese
+ files-regex:
+ - ^drivers/.*nxp.*
+ - ^drivers/.*mcux.*
files:
- drivers/*/*imx*
- drivers/*/*lpc*.c
@@ -3329,6 +3630,8 @@ NXP Drivers:
- drivers/misc/*/nxp*
- include/zephyr/dt-bindings/*/*nxp*
- include/zephyr/dt-bindings/*/*mcux*
+ - include/zephyr/dt-bindings/inputmux/
+ - include/zephyr/dt-bindings/rdc/
- include/zephyr/drivers/*/*nxp*
- include/zephyr/drivers/*/*mcux*
- arch/arm/core/mpu/nxp_mpu.c
@@ -3343,8 +3646,8 @@ NXP Platforms (MCU):
status: maintained
maintainers:
- dleach02
- collaborators:
- mmahadevan108
+ collaborators:
- danieldegrasse
- DerekSnell
- yvanderv
@@ -3389,7 +3692,7 @@ NXP Platforms (S32):
- drivers/misc/*nxp_s32*/
- dts/bindings/*/nxp,s32*
- dts/arm/nxp/*s32*
- - samples/boards/nxp_s32/
+ - samples/boards/nxp/s32/
- include/zephyr/dt-bindings/*/nxp-s32*
- include/zephyr/dt-bindings/*/nxp_s32*
- include/zephyr/drivers/*/*nxp_s32*
@@ -3449,6 +3752,20 @@ Microchip MEC Platforms:
labels:
- "platform: Microchip MEC"
+Microchip RISC-V Platforms:
+ status: maintained
+ maintainers:
+ - fkokosinski
+ - kgugala
+ - tgorochowik
+ files:
+ - boards/microchip/m2gl025_miv/
+ - boards/microchip/mpfs_icicle/
+ - dts/riscv/microchip/
+ - soc/microchip/miv/
+ labels:
+ - "platform: Microchip RISC-V"
+
Microchip SAM Platforms:
status: maintained
maintainers:
@@ -3482,7 +3799,7 @@ nRF Platforms:
- drivers/*/*nrf*.c
- drivers/*/*nordic*/
- soc/nordic/
- - samples/boards/nrf/
+ - samples/boards/nordic/
- dts/*/nordic/
- dts/bindings/*/nordic,*
- tests/drivers/*/*nrf*/
@@ -3511,6 +3828,8 @@ Renesas SmartBond Platforms:
- ioannis-karachalios
- andrzej-kaczmarek
- blauret
+ collaborators:
+ - ydamigos
files:
- boards/renesas/da14*/
- drivers/*/*smartbond*
@@ -3528,15 +3847,20 @@ Renesas RA Platforms:
status: maintained
maintainers:
- soburi
+ - KhiemNguyenT
+ collaborators:
+ - duynguyenxa
+ - thaoluonguw
files:
- boards/arduino/uno_r4/
+ - boards/renesas/*ra*/
- drivers/*/*renesas_ra*
- drivers/pinctrl/renesas/ra/
- dts/arm/renesas/ra/
- dts/bindings/*/*renesas,ra*
- soc/renesas/ra/
labels:
- - "platforms: Renesas RA"
+ - "platform: Renesas RA"
description: >-
Renesas RA SOCs, dts files, and related drivers. Boards based
on Renesas RA SoCs.
@@ -3586,10 +3910,11 @@ STM32 Platforms:
maintainers:
- erwango
collaborators:
- - ajarmouni-st
- FRASTM
- gautierg-st
- GeorgeCGV
+ - marwaiehm-st
+ - mathieuchopstm
files:
- boards/st/
- drivers/*/*stm32*.c
@@ -3599,7 +3924,7 @@ STM32 Platforms:
- dts/arm/st/
- dts/bindings/*/*stm32*
- soc/st/stm32/
- - samples/boards/stm32/
+ - samples/boards/st/
labels:
- "platform: STM32"
description: >-
@@ -3614,6 +3939,8 @@ Espressif Platforms:
- LucasTambor
- marekmatej
- uLipe
+ - raffarost
+ - wmrsouza
files:
- drivers/*/*esp32*.c
- boards/espressif/
@@ -3621,7 +3948,7 @@ Espressif Platforms:
- dts/xtensa/espressif/
- dts/riscv/espressif/
- dts/bindings/*/*esp32*
- - samples/boards/esp32*/
+ - samples/boards/espressif/
- tests/boards/espressif_esp32/
- drivers/*/*esp32*/
labels:
@@ -3643,7 +3970,7 @@ ITE Platforms:
- drivers/sensor/ite/
- drivers/*/*it8xxx2*.c
- drivers/*/*_ite_*
- - dts/bindings/*/*ite*
+ - dts/bindings/*/ite*
- dts/riscv/ite/
- soc/ite/
labels:
@@ -3731,6 +4058,28 @@ Infineon Platforms:
Infineon SOCs, dts files and related drivers. Infineon Proto, Pioneer, Eval and Relax
boards.
+LiteX Platforms:
+ status: maintained
+ maintainers:
+ - tgorochowik
+ - kgugala
+ - fkokosinski
+ collaborators:
+ - mateusz-holenko
+ - maass-hamburg
+ files:
+ - boards/enjoydigital/litex_vexriscv/
+ - drivers/*/*litex*
+ - drivers/*/Kconfig.litex
+ - dts/bindings/*/litex*
+ - dts/riscv/riscv32-litex-vexriscv.dtsi
+ - include/zephyr/drivers/*/*litex*
+ - samples/boards/enjoydigital/litex/
+ - samples/drivers/*litex/
+ - soc/litex/
+ labels:
+ - "platform: LiteX"
+
Panasonic Platforms:
status: maintained
maintainers:
@@ -3746,6 +4095,7 @@ RTIO:
- teburd
collaborators:
- yperess
+ - ubieda
files:
- samples/subsys/rtio/
- include/zephyr/rtio/
@@ -3836,23 +4186,6 @@ TDK Sensors:
tests:
- sample.drivers.misc.timeaware_gpio
-TF-M Integration:
- status: maintained
- maintainers:
- - d3zd3z
- collaborators:
- - Vge0rge
- - ithinuel
- files:
- - samples/tfm_integration/
- - modules/trusted-firmware-m/
- - doc/services/tfm/
- labels:
- - "area: TF-M"
- tests:
- - tfm
-
-
"Toolchain Integration":
status: maintained
maintainers:
@@ -3877,6 +4210,7 @@ TF-M Integration:
files:
- cmake/*/arcmwdt/
- include/zephyr/toolchain/mwdt.h
+ - include/zephyr/linker/linker-tool-mwdt.h
- lib/libc/arcmwdt/*
labels:
- "area: Toolchains"
@@ -3931,6 +4265,7 @@ USB:
files:
- drivers/usb/
- dts/bindings/usb/
+ - include/zephyr/dt-bindings/usb/
- include/zephyr/*/usb/
- include/zephyr/usb/
- samples/subsys/usb/
@@ -3939,7 +4274,6 @@ USB:
- tests/drivers/usb/
- tests/drivers/udc/
- doc/connectivity/usb/
- - scripts/generate_usb_vif/
labels:
- "area: USB"
tests:
@@ -3955,12 +4289,14 @@ USB-C:
files:
- drivers/usb_c/
- dts/bindings/usb-c/
+ - include/zephyr/dt-bindings/usb-c/
- include/zephyr/*/usb_c/
- include/zephyr/usb_c/
- samples/subsys/usb_c/
- subsys/usb/usb_c/
- doc/connectivity/usb/pd/
- doc/hardware/peripherals/usbc_vbus.rst
+ - scripts/generate_usb_vif/
labels:
- "area: USB-C"
tests:
@@ -4012,12 +4348,12 @@ VFS:
- filesystem
West:
- status: maintained
- maintainers:
- - mbolivar-ampere
+ status: odd fixes
collaborators:
+ - mbolivar-ampere
- carlescufi
- swinslow
+ - pdgendt
files:
- scripts/west-commands.yml
- scripts/west_commands/
@@ -4160,7 +4496,7 @@ West:
files:
- modules/cmsis/
labels:
- - "area: ARM"
+ - "area: CMSIS-Core"
"West project: cmsis-dsp":
status: maintained
@@ -4204,6 +4540,17 @@ West:
labels:
- "area: Storage"
+"West project: hal_adi":
+ status: maintained
+ maintainers:
+ - MaureenHelm
+ collaborators:
+ - ozersa
+ - ttmut
+ files: []
+ labels:
+ - "platform: ADI"
+
"West project: hal_altera":
status: odd fixes
collaborators:
@@ -4267,7 +4614,7 @@ West:
- drivers/misc/ethos_u/
- modules/hal_ethos_u/
labels:
- - "area: ARM"
+ - "platform: ARM"
"West project: hal_gigadevice":
status: maintained
@@ -4315,6 +4662,11 @@ West:
collaborators:
- hubertmis
- nordic-krch
+ - krish2718
+ - sachinthegreen
+ - udaynordic
+ - rajb9
+ - srkanordic
files:
- modules/hal_nordic/
labels:
@@ -4333,8 +4685,8 @@ West:
status: maintained
maintainers:
- dleach02
- collaborators:
- mmahadevan108
+ collaborators:
- danieldegrasse
- manuargue
- PetervdPerk-NXP
@@ -4372,6 +4724,10 @@ West:
collaborators:
- blauret
- andrzej-kaczmarek
+ - ydamigos
+ - soburi
+ - duynguyenxa
+ - thaoluonguw
files: []
labels:
- "platform: Renesas"
@@ -4386,15 +4742,20 @@ West:
- "platform: Raspberry Pi Pico"
"West project: hal_silabs":
- status: odd fixes
+ status: maintained
+ maintainers:
+ - jhedberg
collaborators:
+ - jerome-pouiller
+ - asmellby
- sateeshkotapati
- yonsch
- mnkp
+ - rettichschnidi
files:
- modules/Kconfig.silabs
labels:
- - "platform: SiLabs"
+ - "platform: Silabs"
"West project: hal_st":
status: maintained
@@ -4411,9 +4772,8 @@ West:
- erwango
collaborators:
- FRASTM
- - ABOSTM
- gautierg-st
- - Desvauxm-st
+ - marwaiehm-st
files:
- modules/Kconfig.stm32
labels:
@@ -4516,15 +4876,17 @@ West:
files:
- modules/lvgl/
- tests/lib/gui/lvgl/
+ - include/zephyr/dt-bindings/lvgl/
labels:
- "area: LVGL"
"West project: lz4":
status: odd fixes
collaborators:
- - Navin-Sankar
+ - parthitce
files:
- modules/lz4/
+ - samples/modules/compression/lz4/
labels:
- "area: Compression"
@@ -4535,10 +4897,17 @@ West:
- ceolin
collaborators:
- ithinuel
+ - valeriosetti
+ - tomi-font
files:
- modules/mbedtls/
+ - tests/crypto/mbedtls/
+ - tests/benchmarks/mbedtls/
labels:
- - "area: Crypto / RNG"
+ - "area: mbedTLS / PSA Crypto"
+ tests:
+ - benchmark.crypto.mbedtls
+ - crypto.mbedtls
"West project: mcuboot":
status: maintained
@@ -4549,7 +4918,7 @@ West:
- nordicjm
files:
- modules/Kconfig.mcuboot
- - tests/boot/test_mcuboot/
+ - tests/boot/
labels:
- "area: MCUBoot"
@@ -4658,7 +5027,6 @@ West:
- nashif
- lyakh
- lgirdwood
- - marc-hb
files:
- modules/Kconfig.sof
labels:
@@ -4702,10 +5070,16 @@ West:
collaborators:
- Vge0rge
- ithinuel
+ - valeriosetti
+ - tomi-font
files:
- modules/trusted-firmware-m/
+ - samples/tfm_integration/
+ - doc/services/tfm/
labels:
- "area: TF-M"
+ tests:
+ - trusted-firmware-m
"West project: tf-m-tests":
status: maintained
@@ -4833,6 +5207,7 @@ Continuous Integration:
files:
- .github/
- scripts/ci/
+ - scripts/make_bugs_pickle.py
- .checkpatch.conf
- scripts/gitlint/
- scripts/set_assignees.py
@@ -4854,7 +5229,7 @@ Test Framework (Ztest):
- tests/unit/util/
- tests/subsys/testsuite/
- samples/subsys/testsuite/
- - doc/develop/test/ztest.rst
+ - doc/develop/test/
labels:
- "area: Testsuite"
tests:
@@ -4888,6 +5263,8 @@ Random:
status: maintained
maintainers:
- ceolin
+ collaborators:
+ - tomi-font
files:
- subsys/random/
- include/zephyr/random/
@@ -4896,7 +5273,7 @@ Random:
# This area is to be converted to a subarea
Testing with Renode:
- status: maintained
+ status: odd fixes
collaborators:
- mateusz-holenko
- fkokosinski
@@ -4936,6 +5313,6 @@ zbus:
- subsys/llext/
- doc/services/llext/
labels:
- - "area: Linkable Loadable Extensions"
+ - "area: llext"
tests:
- llext
diff --git a/README.rst b/README.rst
index 45a3998194d..2b44d5ed4a5 100644
--- a/README.rst
+++ b/README.rst
@@ -10,12 +10,15 @@
-
-
-
+
+
+
+
+
+
+
+
+
The Zephyr Project is a scalable real-time operating system (RTOS) supporting
diff --git a/SDK_VERSION b/SDK_VERSION
index e35e56114f4..74aaa3f38cf 100644
--- a/SDK_VERSION
+++ b/SDK_VERSION
@@ -1 +1 @@
-0.16.5-1
+0.16.8
diff --git a/VERSION b/VERSION
index 11cab989b0b..83bbbb1d942 100644
--- a/VERSION
+++ b/VERSION
@@ -1,5 +1,5 @@
VERSION_MAJOR = 3
-VERSION_MINOR = 6
+VERSION_MINOR = 7
PATCHLEVEL = 99
VERSION_TWEAK = 0
EXTRAVERSION =
diff --git a/arch/Kconfig b/arch/Kconfig
index 03432871bd0..da35a1b2751 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -24,6 +24,7 @@ config ARC
imply XIP
select ARCH_HAS_THREAD_LOCAL_STORAGE
select ARCH_SUPPORTS_ROM_START
+ select ARCH_HAS_DIRECTED_IPIS
help
ARC architecture
@@ -31,6 +32,7 @@ config ARM
bool
select ARCH_IS_SET
select ARCH_SUPPORTS_COREDUMP if CPU_CORTEX_M
+ select ARCH_SUPPORTS_COREDUMP_THREADS if CPU_CORTEX_M
# FIXME: current state of the code for all ARM requires this, but
# is really only necessary for Cortex-M with ARM MPU!
select GEN_PRIV_STACKS
@@ -50,6 +52,9 @@ config ARM64
select USE_SWITCH_SUPPORTED
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
select BARRIER_OPERATIONS_ARCH
+ select ARCH_HAS_DIRECTED_IPIS
+ select ARCH_HAS_DEMAND_PAGING
+ select ARCH_HAS_DEMAND_MAPPING
help
ARM64 (AArch64) architecture
@@ -92,6 +97,7 @@ config X86
&& !SOC_HAS_TIMING_FUNCTIONS
select ARCH_HAS_STACK_CANARIES_TLS
select ARCH_SUPPORTS_MEM_MAPPED_STACKS if X86_MMU && !DEMAND_PAGING
+ select ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET if USERSPACE
help
x86 architecture
@@ -108,15 +114,17 @@ config RISCV
bool
select ARCH_IS_SET
select ARCH_SUPPORTS_COREDUMP
- select ARCH_SUPPORTS_ROM_START if !SOC_SERIES_ESP32C3
+ select ARCH_SUPPORTS_ROM_START if !SOC_FAMILY_ESPRESSIF_ESP32
+ select ARCH_SUPPORTS_EMPTY_IRQ_SPURIOUS
select ARCH_HAS_CODE_DATA_RELOCATION
select ARCH_HAS_THREAD_LOCAL_STORAGE
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
select USE_SWITCH_SUPPORTED
select USE_SWITCH
select SCHED_IPI_SUPPORTED if SMP
+ select ARCH_HAS_DIRECTED_IPIS
select BARRIER_OPERATIONS_BUILTIN
- imply XIP
+ select ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET if USERSPACE
help
RISCV architecture
@@ -129,6 +137,9 @@ config XTENSA
select ARCH_HAS_CODE_DATA_RELOCATION
select ARCH_HAS_TIMING_FUNCTIONS
select ARCH_MEM_DOMAIN_DATA if USERSPACE
+ select ARCH_HAS_DIRECTED_IPIS
+ select THREAD_STACK_INFO
+ select ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET if USERSPACE
help
Xtensa architecture
@@ -139,6 +150,7 @@ config ARCH_POSIX
select ARCH_HAS_CUSTOM_SWAP_TO_MAIN
select ARCH_HAS_CUSTOM_BUSY_WAIT
select ARCH_HAS_THREAD_ABORT
+ select ARCH_HAS_THREAD_NAME_HOOK
select NATIVE_BUILD
select HAS_COVERAGE_SUPPORT
select BARRIER_OPERATIONS_BUILTIN
@@ -174,8 +186,9 @@ config BIG_ENDIAN
Little-endian architecture is the default and should leave this option
unselected. This option is selected by arch/$ARCH/Kconfig,
soc/**/Kconfig, or boards/**/Kconfig and the user should generally avoid
- modifying it. The option is used to select linker script OUTPUT_FORMAT
- and command line option for gen_isr_tables.py.
+ modifying it. The option is used to select linker script OUTPUT_FORMAT,
+ the toolchain flags (TOOLCHAIN_C_FLAGS, TOOLCHAIN_LD_FLAGS), and command
+ line option for gen_isr_tables.py.
config LITTLE_ENDIAN
# Hidden Kconfig option representing the default little-endian architecture
@@ -209,7 +222,7 @@ config SRAM_BASE_ADDRESS
hex "SRAM Base Address"
default $(dt_chosen_reg_addr_hex,$(DT_CHOSEN_Z_SRAM))
help
- The SRAM base address. The default value comes from from
+ The SRAM base address. The default value comes from
/chosen/zephyr,sram in devicetree. The user should generally avoid
changing it via menuconfig or in configuration files.
@@ -400,6 +413,28 @@ config NOCACHE_MEMORY
transfers when cache coherence issues are not optimal or can not
be solved using cache maintenance operations.
+config FRAME_POINTER
+ bool "Compile the kernel with frame pointers"
+ select OVERRIDE_FRAME_POINTER_DEFAULT
+ help
+ Select Y here to gain precise stack traces at the expense of slightly
+ increased size and decreased speed.
+
+config ARCH_STACKWALK
+ bool "Compile the stack walking function"
+ default y
+ depends on ARCH_HAS_STACKWALK
+ help
+ Select Y here to compile the `arch_stack_walk()` function
+
+config ARCH_STACKWALK_MAX_FRAMES
+ int "Max depth for stack walk function"
+ default 8
+ depends on ARCH_STACKWALK
+ help
+ Depending on implementation, this can place a hard limit on the depths of the stack
+ for the stack walk function to examine.
+
menu "Interrupt Configuration"
config ISR_TABLES_LOCAL_DECLARATION_SUPPORTED
@@ -577,6 +612,14 @@ config SIMPLIFIED_EXCEPTION_CODES
down to the generic K_ERR_CPU_EXCEPTION, which makes testing code
much more portable.
+config EMPTY_IRQ_SPURIOUS
+ bool "Create empty spurious interrupt handler"
+ depends on ARCH_SUPPORTS_EMPTY_IRQ_SPURIOUS
+ help
+ This option changes body of spurious interrupt handler. When enabled,
+ handler contains only an infinite while loop, when disabled, handler
+ contains the whole Zephyr fault handling procedure.
+
endmenu # Interrupt configuration
config INIT_ARCH_HW_AT_BOOT
@@ -633,12 +676,18 @@ config ARCH_HAS_NESTED_EXCEPTION_DETECTION
config ARCH_SUPPORTS_COREDUMP
bool
+config ARCH_SUPPORTS_COREDUMP_THREADS
+ bool
+
config ARCH_SUPPORTS_ARCH_HW_INIT
bool
config ARCH_SUPPORTS_ROM_START
bool
+config ARCH_SUPPORTS_EMPTY_IRQ_SPURIOUS
+ bool
+
config ARCH_HAS_EXTRA_EXCEPTION_INFO
bool
@@ -668,6 +717,11 @@ config ARCH_SUPPORTS_MEM_MAPPED_STACKS
help
Select when the architecture supports memory mapped stacks.
+config ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET
+ bool
+ help
+ Select when the architecture implements arch_thread_priv_stack_space_get().
+
#
# Other architecture related options
#
@@ -733,6 +787,13 @@ config ARCH_HAS_DEMAND_PAGING
This hidden configuration should be selected by the architecture if
demand paging is supported.
+config ARCH_HAS_DEMAND_MAPPING
+ bool
+ help
+ This hidden configuration should be selected by the architecture if
+ demand paging is supported and arch_mem_map() supports
+ K_MEM_MAP_UNPAGED.
+
config ARCH_HAS_RESERVED_PAGE_FRAMES
bool
help
@@ -741,6 +802,13 @@ config ARCH_HAS_RESERVED_PAGE_FRAMES
memory mappings. The architecture will need to implement
arch_reserved_pages_update().
+config ARCH_HAS_DIRECTED_IPIS
+ bool
+ help
+ This hidden configuration should be selected by the architecture if
+ it has an implementation for arch_sched_directed_ipi() which allows
+ for IPIs to be directed to specific CPUs.
+
config CPU_HAS_DCACHE
bool
help
@@ -776,7 +844,7 @@ config ARCH_MAPS_ALL_RAM
virtual addresses elsewhere, this is limited to only management of the
virtual address space. The kernel's page frame ontology will not consider
this mapping at all; non-kernel pages will be considered free (unless marked
- as reserved) and Z_PAGE_FRAME_MAPPED will not be set.
+ as reserved) and K_MEM_PAGE_FRAME_MAPPED will not be set.
config DCLS
bool "Processor is configured in DCLS mode"
@@ -1041,9 +1109,28 @@ config TOOLCHAIN_HAS_BUILTIN_FFS
help
Hidden option to signal that toolchain has __builtin_ffs*().
-config ARCH_CPU_IDLE_CUSTOM
- bool "Custom arch_cpu_idle implementation"
- default n
+config ARCH_HAS_CUSTOM_CPU_IDLE
+ bool
help
This options allows applications to override the default arch idle implementation with
a custom one.
+
+config ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
+ bool
+ help
+ This options allows applications to override the default arch idle implementation with
+ a custom one.
+
+config ARCH_HAS_CUSTOM_SWAP_TO_MAIN
+ bool
+ help
+ It's possible that an architecture port cannot use _Swap() to swap to
+ the _main() thread, but instead must do something custom. It must
+ enable this option in that case.
+
+config ARCH_HAS_CUSTOM_BUSY_WAIT
+ bool
+ help
+ It's possible that an architecture port cannot or does not want to use
+ the provided k_busy_wait(), but instead must do something custom. It must
+ enable this option in that case.
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 8a3bd5b1b3c..a27b09659ac 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -18,6 +18,7 @@ config CPU_ARCEM
config CPU_ARCHS
bool
select ATOMIC_OPERATIONS_BUILTIN
+ select BARRIER_OPERATIONS_BUILTIN
help
This option signifies the use of an ARC HS CPU
@@ -373,7 +374,9 @@ config ARC_EXCEPTION_STACK_SIZE
endmenu
config ARC_EARLY_SOC_INIT
- bool "Make early stage SoC-specific initialization"
+ bool "Make early stage SoC-specific initialization [DEPRECATED]"
+ select SOC_RESET_HOOK
+ select DEPRECATED
help
Call SoC per-core setup code on early stage initialization
(before C runtime initialization). Setup code is called in form of
diff --git a/arch/arc/core/cache.c b/arch/arc/core/cache.c
index 8c2aab29fed..2688d5b03eb 100644
--- a/arch/arc/core/cache.c
+++ b/arch/arc/core/cache.c
@@ -227,4 +227,8 @@ static int init_dcache(void)
return 0;
}
-SYS_INIT(init_dcache, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
+
+void arch_cache_init(void)
+{
+ init_dcache();
+}
diff --git a/arch/arc/core/cpu_idle.S b/arch/arc/core/cpu_idle.S
index a5996a2ca4b..e0eaf4af2a1 100644
--- a/arch/arc/core/cpu_idle.S
+++ b/arch/arc/core/cpu_idle.S
@@ -26,6 +26,7 @@ SECTION_VAR(BSS, z_arc_cpu_sleep_mode)
.align 4
.word 0
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
/*
* @brief Put the CPU in low-power mode
*
@@ -48,7 +49,9 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
sleep r1
j_s [blink]
nop
+#endif
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
/*
* @brief Put the CPU in low-power mode, entered with IRQs locked
*
@@ -56,6 +59,7 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
*
* void arch_cpu_atomic_idle(unsigned int key)
*/
+
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING
@@ -70,3 +74,4 @@ SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
sleep r1
j_s.d [blink]
seti r0
+#endif
diff --git a/arch/arc/core/fatal.c b/arch/arc/core/fatal.c
index f193c0b09f1..512d1cc442c 100644
--- a/arch/arc/core/fatal.c
+++ b/arch/arc/core/fatal.c
@@ -23,7 +23,7 @@
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_EXCEPTION_DEBUG
-static void dump_arc_esf(const z_arch_esf_t *esf)
+static void dump_arc_esf(const struct arch_esf *esf)
{
ARC_EXCEPTION_DUMP(" r0: 0x%" PRIxPTR " r1: 0x%" PRIxPTR " r2: 0x%" PRIxPTR
" r3: 0x%" PRIxPTR "", esf->r0, esf->r1, esf->r2, esf->r3);
@@ -42,7 +42,7 @@ static void dump_arc_esf(const z_arch_esf_t *esf)
}
#endif
-void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
+void z_arc_fatal_error(unsigned int reason, const struct arch_esf *esf)
{
#ifdef CONFIG_EXCEPTION_DEBUG
if (esf != NULL) {
diff --git a/arch/arc/core/fault.c b/arch/arc/core/fault.c
index 763ed7a2c73..6f9da3cd1e0 100644
--- a/arch/arc/core/fault.c
+++ b/arch/arc/core/fault.c
@@ -346,7 +346,7 @@ static void dump_exception_info(uint32_t vector, uint32_t cause, uint32_t parame
* invokes the user provided routine k_sys_fatal_error_handler() which is
* responsible for implementing the error handling policy.
*/
-void _Fault(z_arch_esf_t *esf, uint32_t old_sp)
+void _Fault(struct arch_esf *esf, uint32_t old_sp)
{
uint32_t vector, cause, parameter;
uint32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA);
diff --git a/arch/arc/core/irq_offload.c b/arch/arc/core/irq_offload.c
index b658b8e353d..d1a3f900ca3 100644
--- a/arch/arc/core/irq_offload.c
+++ b/arch/arc/core/irq_offload.c
@@ -54,7 +54,7 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
}
/* need to be executed on every core in the system */
-int arc_irq_offload_init(void)
+void arch_irq_offload_init(void)
{
IRQ_CONNECT(IRQ_OFFLOAD_LINE, IRQ_OFFLOAD_PRIO, arc_irq_offload_handler, NULL, 0);
@@ -64,8 +64,4 @@ int arc_irq_offload_init(void)
* with generic irq_enable() but via z_arc_v2_irq_unit_int_enable().
*/
z_arc_v2_irq_unit_int_enable(IRQ_OFFLOAD_LINE);
-
- return 0;
}
-
-SYS_INIT(arc_irq_offload_init, POST_KERNEL, 0);
diff --git a/arch/arc/core/isr_wrapper.S b/arch/arc/core/isr_wrapper.S
index 3471e4d7349..4b486ed926d 100644
--- a/arch/arc/core/isr_wrapper.S
+++ b/arch/arc/core/isr_wrapper.S
@@ -26,7 +26,7 @@ GTEXT(_isr_wrapper)
GTEXT(_isr_demux)
#if defined(CONFIG_PM)
-GTEXT(z_pm_save_idle_exit)
+GTEXT(pm_system_resume)
#endif
/*
@@ -253,7 +253,7 @@ rirq_path:
st 0, [r1, _kernel_offset_to_idle] /* zero idle duration */
PUSHR blink
- jl z_pm_save_idle_exit
+ jl pm_system_resume
POPR blink
_skip_pm_save_idle_exit:
diff --git a/arch/arc/core/mpu/Kconfig b/arch/arc/core/mpu/Kconfig
index 7078d88d534..2f288de25c7 100644
--- a/arch/arc/core/mpu/Kconfig
+++ b/arch/arc/core/mpu/Kconfig
@@ -35,5 +35,7 @@ config ARC_MPU
select GEN_PRIV_STACKS if !(ARC_MPU_VER = 4 || ARC_MPU_VER = 8)
select MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT if !(ARC_MPU_VER = 4 || ARC_MPU_VER = 8)
select MPU_REQUIRES_NON_OVERLAPPING_REGIONS if (ARC_MPU_VER = 4 || ARC_MPU_VER = 8)
+ select ARCH_MEM_DOMAIN_SUPPORTS_ISOLATED_STACKS
+ select MEM_DOMAIN_ISOLATED_STACKS
help
Target has ARC MPU
diff --git a/arch/arc/core/mpu/arc_mpu_common_internal.h b/arch/arc/core/mpu/arc_mpu_common_internal.h
index a9ff5518b2f..15758ed15f6 100644
--- a/arch/arc/core/mpu/arc_mpu_common_internal.h
+++ b/arch/arc/core/mpu/arc_mpu_common_internal.h
@@ -238,7 +238,7 @@ int arc_core_mpu_buffer_validate(const void *addr, size_t size, int write)
* This function provides the default configuration mechanism for the Memory
* Protection Unit (MPU).
*/
-static int arc_mpu_init(void)
+void arc_mpu_init(void)
{
uint32_t num_regions = get_num_regions();
@@ -246,7 +246,6 @@ static int arc_mpu_init(void)
if (mpu_config.num_regions > num_regions) {
__ASSERT(0, "Request to configure: %u regions (supported: %u)\n",
mpu_config.num_regions, num_regions);
- return -EINVAL;
}
/* Disable MPU */
@@ -278,10 +277,7 @@ static int arc_mpu_init(void)
/* Enable MPU */
arc_core_mpu_enable();
-
- return 0;
}
-SYS_INIT(arc_mpu_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_COMMON_INTERNAL_H_ */
diff --git a/arch/arc/core/mpu/arc_mpu_v2_internal.h b/arch/arc/core/mpu/arc_mpu_v2_internal.h
index f59b1e9184c..a088df32d9b 100644
--- a/arch/arc/core/mpu/arc_mpu_v2_internal.h
+++ b/arch/arc/core/mpu/arc_mpu_v2_internal.h
@@ -118,7 +118,7 @@ static inline bool _is_enabled_region(uint32_t r_index)
}
/**
- * This internal function check if the given buffer in in the region
+ * This internal function check if the given buffer is in the region
*/
static inline bool _is_in_region(uint32_t r_index, uint32_t start, uint32_t size)
{
diff --git a/arch/arc/core/mpu/arc_mpu_v4_internal.h b/arch/arc/core/mpu/arc_mpu_v4_internal.h
index 3bf6fca6c43..1323f1ebda2 100644
--- a/arch/arc/core/mpu/arc_mpu_v4_internal.h
+++ b/arch/arc/core/mpu/arc_mpu_v4_internal.h
@@ -814,7 +814,7 @@ int arc_core_mpu_buffer_validate(const void *addr, size_t size, int write)
* This function provides the default configuration mechanism for the Memory
* Protection Unit (MPU).
*/
-static int arc_mpu_init(void)
+void arc_mpu_init(void)
{
uint32_t num_regions;
uint32_t i;
@@ -826,7 +826,7 @@ static int arc_mpu_init(void)
__ASSERT(0,
"Request to configure: %u regions (supported: %u)\n",
mpu_config.num_regions, num_regions);
- return -EINVAL;
+ return;
}
static_regions_num = 0U;
@@ -851,7 +851,7 @@ static int arc_mpu_init(void)
MPU_DYNAMIC_REGION_AREAS_NUM) {
LOG_ERR("not enough dynamic regions %d",
dynamic_regions_num);
- return -EINVAL;
+ return;
}
dyn_reg_info[dynamic_regions_num].index = i;
@@ -886,10 +886,8 @@ static int arc_mpu_init(void)
/* Enable MPU */
arc_core_mpu_enable();
- return 0;
+ return;
}
-SYS_INIT(arc_mpu_init, PRE_KERNEL_1,
- CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V4_INTERNAL_H_ */
diff --git a/arch/arc/core/mpu/arc_mpu_v6_internal.h b/arch/arc/core/mpu/arc_mpu_v6_internal.h
index 1dbd50bdc84..3ee88d08320 100644
--- a/arch/arc/core/mpu/arc_mpu_v6_internal.h
+++ b/arch/arc/core/mpu/arc_mpu_v6_internal.h
@@ -156,7 +156,7 @@ static inline bool _is_enabled_region(uint32_t r_index)
}
/**
- * This internal function check if the given buffer in in the region
+ * This internal function check if the given buffer is in the region
*/
static inline bool _is_in_region(uint32_t r_index, uint32_t start, uint32_t size)
{
diff --git a/arch/arc/core/prep_c.c b/arch/arc/core/prep_c.c
index 0e4975cd3fc..bf3ab454a2c 100644
--- a/arch/arc/core/prep_c.c
+++ b/arch/arc/core/prep_c.c
@@ -23,6 +23,8 @@
#include
#include
#include
+#include
+#include
/* XXX - keep for future use in full-featured cache APIs */
#if 0
@@ -113,6 +115,9 @@ static void dev_state_zero(void)
#endif
extern FUNC_NORETURN void z_cstart(void);
+extern void arc_mpu_init(void);
+extern void arc_secureshield_init(void);
+
/**
* @brief Prepare to and run C code
*
@@ -121,6 +126,10 @@ extern FUNC_NORETURN void z_cstart(void);
void z_prep_c(void)
{
+#if defined(CONFIG_SOC_PREP_HOOK)
+ soc_prep_hook();
+#endif
+
#ifdef CONFIG_ISA_ARCV3
arc_cluster_scm_enable();
#endif
@@ -130,6 +139,15 @@ void z_prep_c(void)
dev_state_zero();
#endif
z_data_copy();
+#if CONFIG_ARCH_CACHE
+ arch_cache_init();
+#endif
+#ifdef CONFIG_ARC_MPU
+ arc_mpu_init();
+#endif
+#ifdef CONFIG_ARC_SECURE_FIRMWARE
+ arc_secureshield_init();
+#endif
z_cstart();
CODE_UNREACHABLE;
}
diff --git a/arch/arc/core/reset.S b/arch/arc/core/reset.S
index a2b038d387e..63fa6438ce4 100644
--- a/arch/arc/core/reset.S
+++ b/arch/arc/core/reset.S
@@ -16,8 +16,9 @@
#include
#include
#include
-#ifdef CONFIG_ARC_EARLY_SOC_INIT
- #include
+
+#if defined(CONFIG_SOC_RESET_HOOK)
+GTEXT(soc_reset_hook)
#endif
GDATA(z_interrupt_stacks)
@@ -112,8 +113,8 @@ done_icache_invalidate:
done_dcache_invalidate:
-#ifdef CONFIG_ARC_EARLY_SOC_INIT
- soc_early_asm_init_percpu
+#ifdef CONFIG_SOC_RESET_HOOK
+ bl soc_reset_hook
#endif
_dsp_extension_probe
diff --git a/arch/arc/core/secureshield/arc_sjli.c b/arch/arc/core/secureshield/arc_sjli.c
index 22deebc2718..e5b865445ee 100644
--- a/arch/arc/core/secureshield/arc_sjli.c
+++ b/arch/arc/core/secureshield/arc_sjli.c
@@ -48,7 +48,7 @@ static void sjli_table_init(void)
/*
* @brief initialization of secureshield related functions.
*/
-static int arc_secureshield_init(void)
+void arc_secureshield_init(void)
{
sjli_table_init();
@@ -60,9 +60,4 @@ static int arc_secureshield_init(void)
*
*/
__asm__ volatile("sflag 0x20");
-
- return 0;
}
-
-SYS_INIT(arc_secureshield_init, PRE_KERNEL_1,
- CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
diff --git a/arch/arc/core/smp.c b/arch/arc/core/smp.c
index 9f8ee38a4a1..e8463b7b53b 100644
--- a/arch/arc/core/smp.c
+++ b/arch/arc/core/smp.c
@@ -13,6 +13,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -130,21 +131,27 @@ static void sched_ipi_handler(const void *unused)
z_sched_ipi();
}
-/* arch implementation of sched_ipi */
-void arch_sched_ipi(void)
+void arch_sched_directed_ipi(uint32_t cpu_bitmap)
{
- uint32_t i;
+ unsigned int i;
+ unsigned int num_cpus = arch_num_cpus();
- /* broadcast sched_ipi request to other cores
+ /* Send sched_ipi request to other cores
* if the target is current core, hardware will ignore it
*/
- unsigned int num_cpus = arch_num_cpus();
for (i = 0U; i < num_cpus; i++) {
- z_arc_connect_ici_generate(i);
+ if ((cpu_bitmap & BIT(i)) != 0) {
+ z_arc_connect_ici_generate(i);
+ }
}
}
+void arch_sched_broadcast_ipi(void)
+{
+ arch_sched_directed_ipi(IPI_ALL_CPUS_MASK);
+}
+
int arch_smp_init(void)
{
struct arc_connect_bcr bcr;
@@ -188,5 +195,4 @@ int arch_smp_init(void)
return 0;
}
-SYS_INIT(arch_smp_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif
diff --git a/arch/arc/include/arc_irq_offload.h b/arch/arc/include/arc_irq_offload.h
index c38d0a24b68..243b4655652 100644
--- a/arch/arc/include/arc_irq_offload.h
+++ b/arch/arc/include/arc_irq_offload.h
@@ -9,11 +9,9 @@
#ifdef CONFIG_IRQ_OFFLOAD
-int arc_irq_offload_init(const struct device *unused);
-
static inline void arc_irq_offload_init_smp(void)
{
- arc_irq_offload_init(NULL);
+ arch_irq_offload_init();
}
#else
diff --git a/arch/arc/include/kernel_arch_data.h b/arch/arc/include/kernel_arch_data.h
index efe2bd7d1c6..b0dc733446b 100644
--- a/arch/arc/include/kernel_arch_data.h
+++ b/arch/arc/include/kernel_arch_data.h
@@ -36,7 +36,7 @@ extern "C" {
#endif
#ifdef CONFIG_ARC_HAS_SECURE
-struct _irq_stack_frame {
+struct arch_esf {
#ifdef CONFIG_ARC_HAS_ZOL
uintptr_t lp_end;
uintptr_t lp_start;
@@ -72,7 +72,7 @@ struct _irq_stack_frame {
uintptr_t status32;
};
#else
-struct _irq_stack_frame {
+struct arch_esf {
uintptr_t r0;
uintptr_t r1;
uintptr_t r2;
@@ -108,7 +108,7 @@ struct _irq_stack_frame {
};
#endif
-typedef struct _irq_stack_frame _isf_t;
+typedef struct arch_esf _isf_t;
diff --git a/arch/arc/include/kernel_arch_func.h b/arch/arc/include/kernel_arch_func.h
index 1c46423cb4f..ca382a274f4 100644
--- a/arch/arc/include/kernel_arch_func.h
+++ b/arch/arc/include/kernel_arch_func.h
@@ -62,9 +62,7 @@ extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1,
void *p2, void *p3, uint32_t stack, uint32_t size,
struct k_thread *thread);
-extern void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
-
-extern void arch_sched_ipi(void);
+extern void z_arc_fatal_error(unsigned int reason, const struct arch_esf *esf);
extern void z_arc_switch(void *switch_to, void **switched_from);
diff --git a/arch/arc/include/offsets_short_arch.h b/arch/arc/include/offsets_short_arch.h
index 5bf2c23fc3a..f461112ae79 100644
--- a/arch/arc/include/offsets_short_arch.h
+++ b/arch/arc/include/offsets_short_arch.h
@@ -7,7 +7,7 @@
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_OFFSETS_SHORT_ARCH_H_
-#include
+#include
/* kernel */
diff --git a/arch/arm/CMakeLists.txt b/arch/arm/CMakeLists.txt
index 5aa25f20396..48c78e8d88c 100644
--- a/arch/arm/CMakeLists.txt
+++ b/arch/arm/CMakeLists.txt
@@ -1,5 +1,9 @@
# SPDX-License-Identifier: Apache-2.0
-set_property(GLOBAL PROPERTY PROPERTY_OUTPUT_FORMAT elf32-littlearm)
+if(CONFIG_BIG_ENDIAN)
+ set_property(GLOBAL PROPERTY PROPERTY_OUTPUT_FORMAT elf32-bigarm)
+else()
+ set_property(GLOBAL PROPERTY PROPERTY_OUTPUT_FORMAT elf32-littlearm)
+endif()
add_subdirectory(core)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index bf68ec6faae..c28cf8d29f9 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -35,7 +35,7 @@ config ARM_CUSTOM_INTERRUPT_CONTROLLER
assumes responsibility for handling the NVIC.
config ROMSTART_RELOCATION_ROM
- bool
+ bool "Relocate rom_start region"
default n
help
Relocates the rom_start region containing the boot-vector data and
@@ -66,7 +66,7 @@ config ROMSTART_RELOCATION_ROM
if ROMSTART_RELOCATION_ROM
config ROMSTART_REGION_ADDRESS
- hex
+ hex "Base address of the rom_start region"
default 0x00000000
help
Start address of the rom_start region.
@@ -85,7 +85,7 @@ if ROMSTART_RELOCATION_ROM
$(dt_nodelabel_reg_addr_hex,ocram_s_sys)
config ROMSTART_REGION_SIZE
- hex
+ hex "Size of the rom_start region"
default 1
help
Size of the rom_start region in KB.
diff --git a/arch/arm/core/Kconfig b/arch/arm/core/Kconfig
index e446eb25c1d..2573ca2cac6 100644
--- a/arch/arm/core/Kconfig
+++ b/arch/arm/core/Kconfig
@@ -60,7 +60,7 @@ config CPU_AARCH32_CORTEX_A
select USE_SWITCH_SUPPORTED
# GDBSTUB has not yet been tested on Cortex M or R SoCs
select ARCH_HAS_GDBSTUB
- # GDB on ARM needs the etxra registers
+ # GDB on ARM needs the extra registers
select EXTRA_EXCEPTION_INFO if GDBSTUB
help
This option signifies the use of a CPU of the Cortex-A family.
@@ -166,11 +166,14 @@ config RUNTIME_NMI
needed, enable this option and attach it via z_arm_nmi_set_handler().
config PLATFORM_SPECIFIC_INIT
- bool "Platform (SOC) specific startup hook"
+ bool "Platform (SOC) specific startup hook [DEPRECATED]"
+ select DEPRECATED
help
The platform specific initialization code (z_arm_platform_init) is
executed at the beginning of the startup code (__start).
+ This option is deprecated, use SOC_RESET_HOOK instead.
+
config FAULT_DUMP
int "Fault dump level"
default 2
diff --git a/arch/arm/core/cortex_a_r/Kconfig b/arch/arm/core/cortex_a_r/Kconfig
index 3ec57cc408e..4095a277c61 100644
--- a/arch/arm/core/cortex_a_r/Kconfig
+++ b/arch/arm/core/cortex_a_r/Kconfig
@@ -131,6 +131,7 @@ config AARCH32_ARMV8_R
bool
select ATOMIC_OPERATIONS_BUILTIN
select SCHED_IPI_SUPPORTED if SMP
+ select ARCH_HAS_DIRECTED_IPIS
help
This option signifies the use of an ARMv8-R AArch32 processor
implementation.
diff --git a/arch/arm/core/cortex_a_r/boot.h b/arch/arm/core/cortex_a_r/boot.h
index 7eeba8b6ada..89306798ce4 100644
--- a/arch/arm/core/cortex_a_r/boot.h
+++ b/arch/arm/core/cortex_a_r/boot.h
@@ -26,5 +26,6 @@ extern void __start(void);
#define BOOT_PARAM_UDF_SP_OFFSET 16
#define BOOT_PARAM_SVC_SP_OFFSET 20
#define BOOT_PARAM_SYS_SP_OFFSET 24
+#define BOOT_PARAM_VOTING_OFFSET 28
#endif /* _BOOT_H_ */
diff --git a/arch/arm/core/cortex_a_r/cache.c b/arch/arm/core/cortex_a_r/cache.c
index 533666f0469..31cf27a08e6 100644
--- a/arch/arm/core/cortex_a_r/cache.c
+++ b/arch/arm/core/cortex_a_r/cache.c
@@ -217,3 +217,7 @@ int arch_icache_flush_and_invd_range(void *start_addr, size_t size)
}
#endif
+
+void arch_cache_init(void)
+{
+}
diff --git a/arch/arm/core/cortex_a_r/cpu_idle.S b/arch/arm/core/cortex_a_r/cpu_idle.S
index 5c6ef3f12ed..044c0038239 100644
--- a/arch/arm/core/cortex_a_r/cpu_idle.S
+++ b/arch/arm/core/cortex_a_r/cpu_idle.S
@@ -49,6 +49,7 @@ _skip_\@:
#endif /* CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK */
.endm
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING
push {r0, lr}
@@ -68,6 +69,9 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
bx lr
+#endif
+
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING
push {r0, lr}
@@ -93,3 +97,4 @@ SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
_irq_disabled:
bx lr
+#endif
diff --git a/arch/arm/core/cortex_a_r/fault.c b/arch/arm/core/cortex_a_r/fault.c
index a39efeb96e0..daf1d2345ca 100644
--- a/arch/arm/core/cortex_a_r/fault.c
+++ b/arch/arm/core/cortex_a_r/fault.c
@@ -147,8 +147,9 @@ bool z_arm_fault_undef_instruction_fp(void)
* the FP was already enabled then this was an actual undefined
* instruction.
*/
- if (__get_FPEXC() & FPEXC_EN)
+ if (__get_FPEXC() & FPEXC_EN) {
return true;
+ }
__set_FPEXC(FPEXC_EN);
@@ -162,8 +163,9 @@ bool z_arm_fault_undef_instruction_fp(void)
struct __fpu_sf *spill_esf =
(struct __fpu_sf *)_current_cpu->fp_ctx;
- if (spill_esf == NULL)
+ if (spill_esf == NULL) {
return false;
+ }
_current_cpu->fp_ctx = NULL;
@@ -206,7 +208,7 @@ bool z_arm_fault_undef_instruction_fp(void)
*
* @return Returns true if the fault is fatal
*/
-bool z_arm_fault_undef_instruction(z_arch_esf_t *esf)
+bool z_arm_fault_undef_instruction(struct arch_esf *esf)
{
#if defined(CONFIG_FPU_SHARING)
/*
@@ -243,7 +245,7 @@ bool z_arm_fault_undef_instruction(z_arch_esf_t *esf)
*
* @return Returns true if the fault is fatal
*/
-bool z_arm_fault_prefetch(z_arch_esf_t *esf)
+bool z_arm_fault_prefetch(struct arch_esf *esf)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;
@@ -299,7 +301,7 @@ static const struct z_exc_handle exceptions[] = {
*
* @return true if error is recoverable, otherwise return false.
*/
-static bool memory_fault_recoverable(z_arch_esf_t *esf)
+static bool memory_fault_recoverable(struct arch_esf *esf)
{
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
/* Mask out instruction mode */
@@ -321,7 +323,7 @@ static bool memory_fault_recoverable(z_arch_esf_t *esf)
*
* @return Returns true if the fault is fatal
*/
-bool z_arm_fault_data(z_arch_esf_t *esf)
+bool z_arm_fault_data(struct arch_esf *esf)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;
diff --git a/arch/arm/core/cortex_a_r/irq_manage.c b/arch/arm/core/cortex_a_r/irq_manage.c
index a381fad2a48..48c9ede3327 100644
--- a/arch/arm/core/cortex_a_r/irq_manage.c
+++ b/arch/arm/core/cortex_a_r/irq_manage.c
@@ -71,7 +71,7 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
}
#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
-void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
+void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
/**
*
@@ -98,7 +98,7 @@ void _arch_isr_direct_pm(void)
if (_kernel.idle) {
_kernel.idle = 0;
- z_pm_save_idle_exit();
+ pm_system_resume();
}
irq_unlock(key);
diff --git a/arch/arm/core/cortex_a_r/isr_wrapper.S b/arch/arm/core/cortex_a_r/isr_wrapper.S
index 0cd30e0a343..3dd678c4030 100644
--- a/arch/arm/core/cortex_a_r/isr_wrapper.S
+++ b/arch/arm/core/cortex_a_r/isr_wrapper.S
@@ -156,7 +156,7 @@ _vfp_not_enabled:
* idle, this ensures that the calculation and programming of the
* device for the next timer deadline is not interrupted. For
* non-tickless idle, this ensures that the clearing of the kernel idle
- * state is not interrupted. In each case, z_pm_save_idle_exit
+ * state is not interrupted. In each case, pm_system_resume
* is called with interrupts disabled.
*/
@@ -170,7 +170,7 @@ _vfp_not_enabled:
movs r1, #0
/* clear kernel idle state */
str r1, [r2, #_kernel_offset_to_idle]
- bl z_pm_save_idle_exit
+ bl pm_system_resume
_idle_state_cleared:
#endif /* CONFIG_PM */
@@ -189,7 +189,7 @@ _idle_state_cleared:
*
* Note that interrupts are disabled up to this point on the ARM
* architecture variants other than the Cortex-M. It is also important
- * to note that that most interrupt controllers require that the nested
+ * to note that most interrupt controllers require that the nested
* interrupts are handled after the active interrupt is acknowledged;
* this is be done through the `get_active` interrupt controller
* interface function.
@@ -269,7 +269,7 @@ SECTION_FUNC(TEXT, _isr_wrapper)
* idle, this ensures that the calculation and programming of the
* device for the next timer deadline is not interrupted. For
* non-tickless idle, this ensures that the clearing of the kernel idle
- * state is not interrupted. In each case, z_pm_save_idle_exit
+ * state is not interrupted. In each case, pm_system_resume
* is called with interrupts disabled.
*/
@@ -283,7 +283,7 @@ SECTION_FUNC(TEXT, _isr_wrapper)
movs r1, #0
/* clear kernel idle state */
str r1, [r2, #_kernel_offset_to_idle]
- bl z_pm_save_idle_exit
+ bl pm_system_resume
_idle_state_cleared:
#endif /* CONFIG_PM */
@@ -339,6 +339,15 @@ z_arm_cortex_ar_irq_done:
str r0, [r2, #___cpu_t_nested_OFFSET]
/* Do not context switch if exiting a nested interrupt */
cmp r0, #0
+ /* Note that this function is only called from `z_arm_svc`,
+ * while handling irq_offload, with below modes set:
+ * ```
+ * if (cpu interrupts are nested)
+ * mode=MODE_SYS
+ * else
+ * mode=MODE_IRQ
+ * ```
+ */
bhi __EXIT_INT
/* retrieve pointer to the current thread */
diff --git a/arch/arm/core/cortex_a_r/macro_priv.inc b/arch/arm/core/cortex_a_r/macro_priv.inc
index e02433692f9..aafa87ca9b6 100644
--- a/arch/arm/core/cortex_a_r/macro_priv.inc
+++ b/arch/arm/core/cortex_a_r/macro_priv.inc
@@ -18,6 +18,27 @@
ubfx \rreg0, \rreg0, #0, #24
.endm
+/*
+ * Get CPU logic id by looking up cpu_node_list
+ * returns
+ * reg0: MPID
+ * reg1: logic id (0 ~ CONFIG_MP_MAX_NUM_CPUS - 1)
+ * clobbers: reg0, reg1, reg2, reg3
+ */
+.macro get_cpu_logic_id reg0, reg1, reg2, reg3
+ get_cpu_id \reg0
+ ldr \reg3, =cpu_node_list
+ mov \reg1, #0
+1: ldr \reg2, [\reg3, \reg1, lsl #2]
+ cmp \reg2, \reg0
+ beq 2f
+ add \reg1, \reg1, #1
+ cmp \reg1, #CONFIG_MP_MAX_NUM_CPUS
+ bne 1b
+ b .
+2:
+.endm
+
.macro get_cpu rreg0
/*
* Get CPU pointer.
@@ -33,8 +54,7 @@
*/
srsdb sp!, #MODE_SYS
cps #MODE_SYS
- stmdb sp, {r0-r3, r12, lr}^
- sub sp, #24
+ push {r0-r3, r12, lr}
/* TODO: EXTRA_EXCEPTION_INFO */
mov r0, sp
diff --git a/arch/arm/core/cortex_a_r/prep_c.c b/arch/arm/core/cortex_a_r/prep_c.c
index e510d06ee95..74d0855a620 100644
--- a/arch/arm/core/cortex_a_r/prep_c.c
+++ b/arch/arm/core/cortex_a_r/prep_c.c
@@ -21,6 +21,8 @@
#include
#include
#include
+#include
+#include
#if defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)
#include
@@ -147,6 +149,9 @@ extern FUNC_NORETURN void z_cstart(void);
*/
void z_prep_c(void)
{
+#if defined(CONFIG_SOC_PREP_HOOK)
+ soc_prep_hook();
+#endif
/* Initialize tpidruro with our struct _cpu instance address */
write_tpidruro((uintptr_t)&_kernel.cpus[0]);
@@ -160,6 +165,9 @@ void z_prep_c(void)
z_arm_init_stacks();
#endif
z_arm_interrupt_init();
+#if CONFIG_ARCH_CACHE
+ arch_cache_init();
+#endif
#ifdef CONFIG_ARM_MPU
z_arm_mpu_init();
z_arm_configure_static_mpu_regions();
diff --git a/arch/arm/core/cortex_a_r/reset.S b/arch/arm/core/cortex_a_r/reset.S
index 0b107fbf596..efb04d249ec 100644
--- a/arch/arm/core/cortex_a_r/reset.S
+++ b/arch/arm/core/cortex_a_r/reset.S
@@ -30,8 +30,8 @@ GDATA(z_arm_sys_stack)
GDATA(z_arm_fiq_stack)
GDATA(z_arm_abort_stack)
GDATA(z_arm_undef_stack)
-#if defined(CONFIG_PLATFORM_SPECIFIC_INIT)
-GTEXT(z_arm_platform_init)
+#if defined(CONFIG_SOC_RESET_HOOK)
+GTEXT(soc_reset_hook)
#endif
/**
@@ -200,23 +200,62 @@ EL1_Reset_Handler:
#endif /* CONFIG_DCLS */
ldr r0, =arm_cpu_boot_params
+
#if CONFIG_MP_MAX_NUM_CPUS > 1
- get_cpu_id r1
+ /*
+ * This code uses voting locks, like arch/arm64/core/reset.S, to determine primary CPU.
+ */
- ldrex r2, [r0, #BOOT_PARAM_MPID_OFFSET]
- cmp r2, #-1
- bne 1f
- strex r3, r1, [r0, #BOOT_PARAM_MPID_OFFSET]
- cmp r3, #0
+ /*
+ * Get the "logic" id defined by cpu_node_list statically for voting lock self-identify.
+ * It is worth noting that this is NOT the final logic id (arch_curr_cpu()->id)
+ */
+ get_cpu_logic_id r1, r2, r3, r4 // r1: MPID, r2: logic id
+
+ add r4, r0, #BOOT_PARAM_VOTING_OFFSET
+
+ /* signal our desire to vote */
+ mov r5, #1
+ strb r5, [r4, r2]
+ ldr r3, [r0, #BOOT_PARAM_MPID_OFFSET]
+ cmn r3, #1
+ beq 1f
+
+ /* some core already won, release */
+ mov r7, #0
+ strb r7, [r4, r2]
+ b _secondary_core
+
+ /* suggest current core then release */
+1: str r1, [r0, #BOOT_PARAM_MPID_OFFSET]
+ strb r7, [r4, r2]
+ dmb
+
+ /* then wait until every core else is done voting */
+ mov r5, #0
+2: ldrb r3, [r4, r5]
+ tst r3, #255
+ /* wait */
+ bne 2b
+ add r5, r5, #1
+ cmp r5, #CONFIG_MP_MAX_NUM_CPUS
+ bne 2b
+
+ /* check if current core won */
+ dmb
+ ldr r3, [r0, #BOOT_PARAM_MPID_OFFSET]
+ cmp r3, r1
beq _primary_core
+ /* fallthrough secondary */
-1:
- dmb ld
+ /* loop until our turn comes */
+_secondary_core:
+ dmb
ldr r2, [r0, #BOOT_PARAM_MPID_OFFSET]
cmp r1, r2
- bne 1b
+ bne _secondary_core
- /* we can now move on */
+ /* we can now load our stack pointer values and move on */
ldr r4, =arch_secondary_cpu_init
ldr r5, [r0, #BOOT_PARAM_FIQ_SP_OFFSET]
ldr r6, [r0, #BOOT_PARAM_IRQ_SP_OFFSET]
@@ -266,9 +305,9 @@ _primary_core:
msr CPSR_c, #(MODE_SYS | I_BIT | F_BIT)
mov sp, r10
-#if defined(CONFIG_PLATFORM_SPECIFIC_INIT)
+#if defined(CONFIG_SOC_RESET_HOOK)
/* Execute platform-specific initialisation if applicable */
- bl z_arm_platform_init
+ bl soc_reset_hook
#endif
#if defined(CONFIG_WDOG_INIT)
diff --git a/arch/arm/core/cortex_a_r/smp.c b/arch/arm/core/cortex_a_r/smp.c
index 9e06730f913..6579cb4adde 100644
--- a/arch/arm/core/cortex_a_r/smp.c
+++ b/arch/arm/core/cortex_a_r/smp.c
@@ -7,6 +7,7 @@
#include
#include
#include
+#include
#include "boot.h"
#include "zephyr/cache.h"
#include "zephyr/kernel/thread_stack.h"
@@ -50,6 +51,7 @@ struct boot_params {
char *udf_sp;
char *svc_sp;
char *sys_sp;
+ uint8_t voting[CONFIG_MP_MAX_NUM_CPUS];
arch_cpustart_t fn;
void *arg;
int cpu_num;
@@ -63,6 +65,7 @@ BUILD_ASSERT(offsetof(struct boot_params, abt_sp) == BOOT_PARAM_ABT_SP_OFFSET);
BUILD_ASSERT(offsetof(struct boot_params, udf_sp) == BOOT_PARAM_UDF_SP_OFFSET);
BUILD_ASSERT(offsetof(struct boot_params, svc_sp) == BOOT_PARAM_SVC_SP_OFFSET);
BUILD_ASSERT(offsetof(struct boot_params, sys_sp) == BOOT_PARAM_SYS_SP_OFFSET);
+BUILD_ASSERT(offsetof(struct boot_params, voting) == BOOT_PARAM_VOTING_OFFSET);
volatile struct boot_params arm_cpu_boot_params = {
.mpid = -1,
@@ -74,7 +77,7 @@ volatile struct boot_params arm_cpu_boot_params = {
.sys_sp = (char *)(z_arm_sys_stack + CONFIG_ARMV7_SYS_STACK_SIZE),
};
-static const uint32_t cpu_node_list[] = {
+const uint32_t cpu_node_list[] = {
DT_FOREACH_CHILD_STATUS_OKAY_SEP(DT_PATH(cpus), DT_REG_ADDR, (,))};
/* cpu_map saves the maping of core id and mpid */
@@ -210,7 +213,7 @@ void arch_secondary_cpu_init(void)
#ifdef CONFIG_SMP
-static void broadcast_ipi(unsigned int ipi)
+static void send_ipi(unsigned int ipi, uint32_t cpu_bitmap)
{
uint32_t mpidr = MPIDR_TO_CORE(GET_MPIDR());
@@ -220,6 +223,10 @@ static void broadcast_ipi(unsigned int ipi)
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
+ if ((cpu_bitmap & BIT(i)) == 0) {
+ continue;
+ }
+
uint32_t target_mpidr = cpu_map[i];
uint8_t aff0;
@@ -239,10 +246,14 @@ void sched_ipi_handler(const void *unused)
z_sched_ipi();
}
-/* arch implementation of sched_ipi */
-void arch_sched_ipi(void)
+void arch_sched_broadcast_ipi(void)
{
- broadcast_ipi(SGI_SCHED_IPI);
+ send_ipi(SGI_SCHED_IPI, IPI_ALL_CPUS_MASK);
+}
+
+void arch_sched_directed_ipi(uint32_t cpu_bitmap)
+{
+ send_ipi(SGI_SCHED_IPI, cpu_bitmap);
}
int arch_smp_init(void)
@@ -259,6 +270,4 @@ int arch_smp_init(void)
return 0;
}
-SYS_INIT(arch_smp_init, PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
-
#endif
diff --git a/arch/arm/core/cortex_a_r/thread.c b/arch/arm/core/cortex_a_r/thread.c
index d5ccab76877..b3bd91ce5c1 100644
--- a/arch/arm/core/cortex_a_r/thread.c
+++ b/arch/arm/core/cortex_a_r/thread.c
@@ -95,6 +95,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
iframe->a4 = (uint32_t)p3;
iframe->xpsr = A_BIT | MODE_SYS;
+#if defined(CONFIG_BIG_ENDIAN)
+ iframe->xpsr |= E_BIT;
+#endif /* CONFIG_BIG_ENDIAN */
+
#if defined(CONFIG_COMPILER_ISA_THUMB2)
iframe->xpsr |= T_BIT;
#endif /* CONFIG_COMPILER_ISA_THUMB2 */
diff --git a/arch/arm/core/cortex_a_r/vector_table.S b/arch/arm/core/cortex_a_r/vector_table.S
index 8c1060e6122..e74b6a41c8d 100644
--- a/arch/arm/core/cortex_a_r/vector_table.S
+++ b/arch/arm/core/cortex_a_r/vector_table.S
@@ -41,6 +41,11 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
GTEXT(z_arm_cortex_ar_exit_exc)
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_cortex_ar_exit_exc)
+ /* Note:
+ * This function is expected to be *always* called with
+ * processor mode set to MODE_SYS.
+ */
+
/* decrement exception depth */
get_cpu r2
ldrb r1, [r2, #_cpu_offset_to_exc_depth]
@@ -51,7 +56,6 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_cortex_ar_exit_exc)
* Restore r0-r3, r12, lr, lr_und and spsr_und from the exception stack
* and return to the current thread.
*/
- ldmia sp, {r0-r3, r12, lr}^
- add sp, #24
+ pop {r0-r3, r12, lr}
rfeia sp!
#endif
diff --git a/arch/arm/core/cortex_m/Kconfig b/arch/arm/core/cortex_m/Kconfig
index e2a0fbb42da..c018574429a 100644
--- a/arch/arm/core/cortex_m/Kconfig
+++ b/arch/arm/core/cortex_m/Kconfig
@@ -73,6 +73,17 @@ config CPU_CORTEX_M55
help
This option signifies the use of a Cortex-M55 CPU
+config CPU_CORTEX_M85
+ bool
+ select CPU_CORTEX_M
+ select ARMV8_1_M_MAINLINE
+ select ARMV8_M_SE if CPU_HAS_TEE
+ select ARMV7_M_ARMV8_M_FP if CPU_HAS_FPU
+ select CPU_HAS_DCACHE
+ select CPU_HAS_ICACHE
+ help
+ This option signifies the use of a Cortex-M85 CPU
+
config CPU_CORTEX_M7
bool
select CPU_CORTEX_M
@@ -319,7 +330,7 @@ config ZERO_LATENCY_IRQS
config ZERO_LATENCY_LEVELS
int "Number of interrupt priority levels reserved for zero latency"
depends on ZERO_LATENCY_IRQS
- range 1 255
+ range 1 $(UINT8_MAX)
help
The amount of interrupt priority levels reserved for zero latency
interrupts. Increase this value to reserve more than one priority
diff --git a/arch/arm/core/cortex_m/cache.c b/arch/arm/core/cortex_m/cache.c
index df746eb0474..a56996c8931 100644
--- a/arch/arm/core/cortex_m/cache.c
+++ b/arch/arm/core/cortex_m/cache.c
@@ -110,3 +110,7 @@ int arch_icache_flush_and_invd_range(void *start_addr, size_t size)
{
return -ENOTSUP;
}
+
+void arch_cache_init(void)
+{
+}
diff --git a/arch/arm/core/cortex_m/coredump.c b/arch/arm/core/cortex_m/coredump.c
index 2b4a86a1bb9..c688c91d981 100644
--- a/arch/arm/core/cortex_m/coredump.c
+++ b/arch/arm/core/cortex_m/coredump.c
@@ -41,7 +41,7 @@ struct arm_arch_block {
*/
static struct arm_arch_block arch_blk;
-void arch_coredump_info_dump(const z_arch_esf_t *esf)
+void arch_coredump_info_dump(const struct arch_esf *esf)
{
struct coredump_arch_hdr_t hdr = {
.id = COREDUMP_ARCH_HDR_ID,
diff --git a/arch/arm/core/cortex_m/cpu_idle.c b/arch/arm/core/cortex_m/cpu_idle.c
index 4df091fbbd6..5f373a88c9d 100644
--- a/arch/arm/core/cortex_m/cpu_idle.c
+++ b/arch/arm/core/cortex_m/cpu_idle.c
@@ -53,6 +53,7 @@ void z_arm_cpu_idle_init(void)
} while (false)
#endif
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void)
{
#if defined(CONFIG_TRACING)
@@ -96,7 +97,9 @@ void arch_cpu_idle(void)
__enable_irq();
__ISB();
}
+#endif
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key)
{
#if defined(CONFIG_TRACING)
@@ -135,3 +138,4 @@ void arch_cpu_atomic_idle(unsigned int key)
__enable_irq();
#endif
}
+#endif
diff --git a/arch/arm/core/cortex_m/debug.c b/arch/arm/core/cortex_m/debug.c
index 8d83cd07f2f..61fb6814535 100644
--- a/arch/arm/core/cortex_m/debug.c
+++ b/arch/arm/core/cortex_m/debug.c
@@ -58,7 +58,7 @@ BUILD_ASSERT(!(CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE &
(CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE - 1)),
"the size of the partition must be power of 2");
-static int z_arm_debug_enable_null_pointer_detection(void)
+int z_arm_debug_enable_null_pointer_detection(void)
{
z_arm_dwt_init();
@@ -118,7 +118,4 @@ static int z_arm_debug_enable_null_pointer_detection(void)
return 0;
}
-SYS_INIT(z_arm_debug_enable_null_pointer_detection, PRE_KERNEL_1,
- CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
-
#endif /* CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT */
diff --git a/arch/arm/core/cortex_m/fault.c b/arch/arm/core/cortex_m/fault.c
index 5090381fa31..4cc01f87129 100644
--- a/arch/arm/core/cortex_m/fault.c
+++ b/arch/arm/core/cortex_m/fault.c
@@ -146,7 +146,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
*/
#if (CONFIG_FAULT_DUMP == 1)
-static void fault_show(const z_arch_esf_t *esf, int fault)
+static void fault_show(const struct arch_esf *esf, int fault)
{
PR_EXC("Fault! EXC #%d", fault);
@@ -165,7 +165,7 @@ static void fault_show(const z_arch_esf_t *esf, int fault)
*
* For Dump level 0, no information needs to be generated.
*/
-static void fault_show(const z_arch_esf_t *esf, int fault)
+static void fault_show(const struct arch_esf *esf, int fault)
{
(void)esf;
(void)fault;
@@ -185,7 +185,7 @@ static const struct z_exc_handle exceptions[] = {
*
* @return true if error is recoverable, otherwise return false.
*/
-static bool memory_fault_recoverable(z_arch_esf_t *esf, bool synchronous)
+static bool memory_fault_recoverable(struct arch_esf *esf, bool synchronous)
{
#ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
@@ -228,7 +228,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr,
*
* @return error code to identify the fatal error reason
*/
-static uint32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault,
+static uint32_t mem_manage_fault(struct arch_esf *esf, int from_hard_fault,
bool *recoverable)
{
uint32_t reason = K_ERR_ARM_MEM_GENERIC;
@@ -387,7 +387,7 @@ static uint32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault,
* @return error code to identify the fatal error reason.
*
*/
-static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
+static int bus_fault(struct arch_esf *esf, int from_hard_fault, bool *recoverable)
{
uint32_t reason = K_ERR_ARM_BUS_GENERIC;
@@ -549,7 +549,7 @@ static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
*
* @return error code to identify the fatal error reason
*/
-static uint32_t usage_fault(const z_arch_esf_t *esf)
+static uint32_t usage_fault(const struct arch_esf *esf)
{
uint32_t reason = K_ERR_ARM_USAGE_GENERIC;
@@ -612,7 +612,7 @@ static uint32_t usage_fault(const z_arch_esf_t *esf)
*
* @return error code to identify the fatal error reason
*/
-static uint32_t secure_fault(const z_arch_esf_t *esf)
+static uint32_t secure_fault(const struct arch_esf *esf)
{
uint32_t reason = K_ERR_ARM_SECURE_GENERIC;
@@ -661,7 +661,7 @@ static uint32_t secure_fault(const z_arch_esf_t *esf)
* See z_arm_fault_dump() for example.
*
*/
-static void debug_monitor(z_arch_esf_t *esf, bool *recoverable)
+static void debug_monitor(struct arch_esf *esf, bool *recoverable)
{
*recoverable = false;
@@ -687,7 +687,7 @@ static void debug_monitor(z_arch_esf_t *esf, bool *recoverable)
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
-static inline bool z_arm_is_synchronous_svc(z_arch_esf_t *esf)
+static inline bool z_arm_is_synchronous_svc(struct arch_esf *esf)
{
uint16_t *ret_addr = (uint16_t *)esf->basic.pc;
/* SVC is a 16-bit instruction. On a synchronous SVC
@@ -762,7 +762,7 @@ static inline bool z_arm_is_pc_valid(uintptr_t pc)
*
* @return error code to identify the fatal error reason
*/
-static uint32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
+static uint32_t hard_fault(struct arch_esf *esf, bool *recoverable)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;
@@ -829,7 +829,7 @@ static uint32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
* See z_arm_fault_dump() for example.
*
*/
-static void reserved_exception(const z_arch_esf_t *esf, int fault)
+static void reserved_exception(const struct arch_esf *esf, int fault)
{
ARG_UNUSED(esf);
@@ -839,7 +839,7 @@ static void reserved_exception(const z_arch_esf_t *esf, int fault)
}
/* Handler function for ARM fault conditions. */
-static uint32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable)
+static uint32_t fault_handle(struct arch_esf *esf, int fault, bool *recoverable)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;
@@ -893,7 +893,7 @@ static uint32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable)
*
* @param secure_esf Pointer to the secure stack frame.
*/
-static void secure_stack_dump(const z_arch_esf_t *secure_esf)
+static void secure_stack_dump(const struct arch_esf *secure_esf)
{
/*
* In case a Non-Secure exception interrupted the Secure
@@ -918,7 +918,7 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf)
* Non-Secure exception entry.
*/
top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS;
- secure_esf = (const z_arch_esf_t *)top_of_sec_stack;
+ secure_esf = (const struct arch_esf *)top_of_sec_stack;
sec_ret_addr = secure_esf->basic.pc;
} else {
/* Exception during Non-Secure function call.
@@ -947,11 +947,11 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf)
*
* @return ESF pointer on success, otherwise return NULL
*/
-static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return,
+static inline struct arch_esf *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return,
bool *nested_exc)
{
bool alternative_state_exc = false;
- z_arch_esf_t *ptr_esf = NULL;
+ struct arch_esf *ptr_esf = NULL;
*nested_exc = false;
@@ -979,14 +979,14 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
alternative_state_exc = true;
/* Dump the Secure stack before handling the actual fault. */
- z_arch_esf_t *secure_esf;
+ struct arch_esf *secure_esf;
if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
/* Secure stack pointed by PSP */
- secure_esf = (z_arch_esf_t *)psp;
+ secure_esf = (struct arch_esf *)psp;
} else {
/* Secure stack pointed by MSP */
- secure_esf = (z_arch_esf_t *)msp;
+ secure_esf = (struct arch_esf *)msp;
*nested_exc = true;
}
@@ -997,9 +997,9 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
* and supply it to the fault handing function.
*/
if (exc_return & EXC_RETURN_MODE_THREAD) {
- ptr_esf = (z_arch_esf_t *)__TZ_get_PSP_NS();
+ ptr_esf = (struct arch_esf *)__TZ_get_PSP_NS();
} else {
- ptr_esf = (z_arch_esf_t *)__TZ_get_MSP_NS();
+ ptr_esf = (struct arch_esf *)__TZ_get_MSP_NS();
}
}
#elif defined(CONFIG_ARM_NONSECURE_FIRMWARE)
@@ -1024,10 +1024,10 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
/* Non-Secure stack frame on PSP */
- ptr_esf = (z_arch_esf_t *)psp;
+ ptr_esf = (struct arch_esf *)psp;
} else {
/* Non-Secure stack frame on MSP */
- ptr_esf = (z_arch_esf_t *)msp;
+ ptr_esf = (struct arch_esf *)msp;
}
} else {
/* Exception entry occurred in Non-Secure stack. */
@@ -1046,11 +1046,11 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
if (!alternative_state_exc) {
if (exc_return & EXC_RETURN_MODE_THREAD) {
/* Returning to thread mode */
- ptr_esf = (z_arch_esf_t *)psp;
+ ptr_esf = (struct arch_esf *)psp;
} else {
/* Returning to handler mode */
- ptr_esf = (z_arch_esf_t *)msp;
+ ptr_esf = (struct arch_esf *)msp;
*nested_exc = true;
}
}
@@ -1095,12 +1095,12 @@ void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,
uint32_t reason = K_ERR_CPU_EXCEPTION;
int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
bool recoverable, nested_exc;
- z_arch_esf_t *esf;
+ struct arch_esf *esf;
/* Create a stack-ed copy of the ESF to be used during
* the fault handling process.
*/
- z_arch_esf_t esf_copy;
+ struct arch_esf esf_copy;
/* Force unlock interrupts */
arch_irq_unlock(0);
@@ -1123,13 +1123,13 @@ void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,
/* Copy ESF */
#if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
- memcpy(&esf_copy, esf, sizeof(z_arch_esf_t));
+ memcpy(&esf_copy, esf, sizeof(struct arch_esf));
ARG_UNUSED(callee_regs);
#else
/* the extra exception info is not present in the original esf
* so we only copy the fields before those.
*/
- memcpy(&esf_copy, esf, offsetof(z_arch_esf_t, extra_info));
+ memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info));
esf_copy.extra_info = (struct __extra_esf_info) {
.callee = callee_regs,
.exc_return = exc_return,
@@ -1192,5 +1192,7 @@ void z_arm_fault_init(void)
#endif /* CONFIG_BUILTIN_STACK_GUARD */
#ifdef CONFIG_TRAP_UNALIGNED_ACCESS
SCB->CCR |= SCB_CCR_UNALIGN_TRP_Msk;
+#else
+ SCB->CCR &= ~SCB_CCR_UNALIGN_TRP_Msk;
#endif /* CONFIG_TRAP_UNALIGNED_ACCESS */
}
diff --git a/arch/arm/core/cortex_m/irq_manage.c b/arch/arm/core/cortex_m/irq_manage.c
index 3940d5246d4..cc62386e8ac 100644
--- a/arch/arm/core/cortex_m/irq_manage.c
+++ b/arch/arm/core/cortex_m/irq_manage.c
@@ -94,7 +94,7 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
#endif /* !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) */
-void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
+void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
/**
*
@@ -122,7 +122,7 @@ void _arch_isr_direct_pm(void)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* Lock all interrupts. irq_lock() will on this CPU only disable those
* lower than BASEPRI, which is not what we want. See comments in
- * arch/arm/core/isr_wrapper.S
+ * arch/arm/core/cortex_m/isr_wrapper.c
*/
__asm__ volatile("cpsid i" : : : "memory");
#else
@@ -131,7 +131,7 @@ void _arch_isr_direct_pm(void)
if (_kernel.idle) {
_kernel.idle = 0;
- z_pm_save_idle_exit();
+ pm_system_resume();
}
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
diff --git a/arch/arm/core/cortex_m/isr_wrapper.c b/arch/arm/core/cortex_m/isr_wrapper.c
index 6e6016508c6..46aac160c3b 100644
--- a/arch/arm/core/cortex_m/isr_wrapper.c
+++ b/arch/arm/core/cortex_m/isr_wrapper.c
@@ -42,7 +42,7 @@ void _isr_wrapper(void)
* idle, this ensures that the calculation and programming of the
* device for the next timer deadline is not interrupted. For
* non-tickless idle, this ensures that the clearing of the kernel idle
- * state is not interrupted. In each case, z_pm_save_idle_exit
+ * state is not interrupted. In each case, pm_system_resume
* is called with interrupts disabled.
*/
@@ -59,7 +59,7 @@ void _isr_wrapper(void)
if (_kernel.idle != 0) {
/* clear kernel idle state */
_kernel.idle = 0;
- z_pm_save_idle_exit();
+ pm_system_resume();
}
/* re-enable interrupts */
__enable_irq();
diff --git a/arch/arm/core/cortex_m/pm_s2ram.S b/arch/arm/core/cortex_m/pm_s2ram.S
index 1e5bca04fe2..f9c82b4069b 100644
--- a/arch/arm/core/cortex_m/pm_s2ram.S
+++ b/arch/arm/core/cortex_m/pm_s2ram.S
@@ -27,6 +27,11 @@ SECTION_FUNC(TEXT, arch_pm_s2ram_suspend)
* r0: address of the system_off function
*/
push {r4-r12, lr}
+
+ /* Move system_off to protected register. */
+ mov r4, r0
+
+ /* Store CPU context */
ldr r1, =_cpu_context
mrs r2, msp
@@ -71,7 +76,7 @@ SECTION_FUNC(TEXT, arch_pm_s2ram_suspend)
* Call the system_off function passed as parameter. This should never
* return.
*/
- blx r0
+ blx r4
/*
* The system_off function returns here only when the powering off was
@@ -81,9 +86,10 @@ SECTION_FUNC(TEXT, arch_pm_s2ram_suspend)
/*
* Reset the marking of suspend to RAM, return is ignored.
*/
- push {r0}
bl pm_s2ram_mark_check_and_clear
- pop {r0}
+
+ /* Move system_off back to r0 as return value */
+ mov r0, r4
pop {r4-r12, lr}
bx lr
@@ -93,11 +99,14 @@ GTEXT(arch_pm_s2ram_resume)
SECTION_FUNC(TEXT, arch_pm_s2ram_resume)
/*
* Check if reset occurred after suspending to RAM.
+ * Store LR to ensure we can continue boot when we are not suspended
+ * to RAM. In addition to LR, R0 is pushed too, to ensure "SP mod 8 = 0",
+ * as stated by ARM rule 6.2.1.2 for AAPCS32.
*/
- push {lr}
+ push {r0, lr}
bl pm_s2ram_mark_check_and_clear
cmp r0, #0x1
- pop {lr}
+ pop {r0, lr}
beq resume
bx lr
diff --git a/arch/arm/core/cortex_m/prep_c.c b/arch/arm/core/cortex_m/prep_c.c
index 422d45b57e1..10f78c44a25 100644
--- a/arch/arm/core/cortex_m/prep_c.c
+++ b/arch/arm/core/cortex_m/prep_c.c
@@ -20,6 +20,8 @@
#include
#include
#include
+#include
+#include
#if defined(__GNUC__)
/*
@@ -181,6 +183,10 @@ extern FUNC_NORETURN void z_cstart(void);
*/
void z_prep_c(void)
{
+#if defined(CONFIG_SOC_PREP_HOOK)
+ soc_prep_hook();
+#endif
+
relocate_vector_table();
#if defined(CONFIG_CPU_HAS_FPU)
z_arm_floating_point_init();
@@ -193,6 +199,13 @@ void z_prep_c(void)
#else
z_arm_interrupt_init();
#endif /* CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
+#if CONFIG_ARCH_CACHE
+ arch_cache_init();
+#endif
+
+#ifdef CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT
+ z_arm_debug_enable_null_pointer_detection();
+#endif
z_cstart();
CODE_UNREACHABLE;
}
diff --git a/arch/arm/core/cortex_m/reset.S b/arch/arm/core/cortex_m/reset.S
index 332f1a60c10..bc75ccfceaa 100644
--- a/arch/arm/core/cortex_m/reset.S
+++ b/arch/arm/core/cortex_m/reset.S
@@ -24,8 +24,8 @@ GDATA(z_interrupt_stacks)
#if defined(CONFIG_DEBUG_THREAD_INFO)
GDATA(z_sys_post_kernel)
#endif
-#if defined(CONFIG_PLATFORM_SPECIFIC_INIT)
-GTEXT(z_arm_platform_init)
+#if defined(CONFIG_SOC_RESET_HOOK)
+GTEXT(soc_reset_hook)
#endif
#if defined(CONFIG_INIT_ARCH_HW_AT_BOOT)
GTEXT(z_arm_init_arch_hw_at_boot)
@@ -93,8 +93,8 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
bl arch_pm_s2ram_resume
#endif /* CONFIG_PM_S2RAM */
-#if defined(CONFIG_PLATFORM_SPECIFIC_INIT)
- bl z_arm_platform_init
+#if defined(CONFIG_SOC_RESET_HOOK)
+ bl soc_reset_hook
#endif
#if defined(CONFIG_INIT_ARCH_HW_AT_BOOT)
diff --git a/arch/arm/core/cortex_m/swap.c b/arch/arm/core/cortex_m/swap.c
index b60f6acd675..027fb47a01f 100644
--- a/arch/arm/core/cortex_m/swap.c
+++ b/arch/arm/core/cortex_m/swap.c
@@ -96,11 +96,15 @@ uintptr_t z_arm_pendsv_c(uintptr_t exc_ret)
/* restore mode */
IF_ENABLED(CONFIG_USERSPACE, ({
- CONTROL_Type ctrl = {.w = __get_CONTROL()};
- /* exit privileged state when returing to thread mode. */
- ctrl.b.nPRIV = 0;
- __set_CONTROL(ctrl.w | current->arch.mode);
- }));
+ CONTROL_Type ctrl = {.w = __get_CONTROL()};
+ /* exit privileged state when returning to thread mode. */
+ ctrl.b.nPRIV = 0;
+ /* __set_CONTROL inserts an ISB which is may not be necessary here
+ * (stack pointer may not be touched), but it's recommended to avoid
+ * executing pre-fetched instructions with the previous privilege.
+ */
+ __set_CONTROL(ctrl.w | current->arch.mode);
+ }));
return exc_ret;
}
diff --git a/arch/arm/core/cortex_m/thread.c b/arch/arm/core/cortex_m/thread.c
index e9ab7292ab5..fa500032d3c 100644
--- a/arch/arm/core/cortex_m/thread.c
+++ b/arch/arm/core/cortex_m/thread.c
@@ -588,7 +588,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
"bx r4\n" /* We don’t intend to return, so there is no need to link. */
: "+r" (_main)
: "r" (stack_ptr)
- : "r0", "r1", "r2", "r3", "r4");
+ : "r0", "r1", "r2", "r3", "r4", "ip", "lr");
CODE_UNREACHABLE;
}
@@ -659,7 +659,7 @@ FUNC_NORETURN void z_arm_switch_to_main_no_multithreading(
#ifdef CONFIG_BUILTIN_STACK_GUARD
, [_psplim]"r" (psplim)
#endif
- : "r0", "r1", "r2", "r3"
+ : "r0", "r1", "r2", "ip", "lr"
);
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
diff --git a/arch/arm/core/elf.c b/arch/arm/core/elf.c
index 108c72728d5..5fdc5471a6b 100644
--- a/arch/arm/core/elf.c
+++ b/arch/arm/core/elf.c
@@ -12,6 +12,34 @@
LOG_MODULE_REGISTER(elf, CONFIG_LLEXT_LOG_LEVEL);
+#define R_ARM_NONE 0
+#define R_ARM_PC24 1
+#define R_ARM_ABS32 2
+#define R_ARM_REL32 3
+#define R_ARM_COPY 20
+#define R_ARM_GLOB_DAT 21
+#define R_ARM_JUMP_SLOT 22
+#define R_ARM_RELATIVE 23
+#define R_ARM_CALL 28
+#define R_ARM_JUMP24 29
+#define R_ARM_TARGET1 38
+#define R_ARM_V4BX 40
+#define R_ARM_PREL31 42
+#define R_ARM_MOVW_ABS_NC 43
+#define R_ARM_MOVT_ABS 44
+#define R_ARM_MOVW_PREL_NC 45
+#define R_ARM_MOVT_PREL 46
+#define R_ARM_ALU_PC_G0_NC 57
+#define R_ARM_ALU_PC_G1_NC 59
+#define R_ARM_LDR_PC_G2 63
+
+#define R_ARM_THM_CALL 10
+#define R_ARM_THM_JUMP24 30
+#define R_ARM_THM_MOVW_ABS_NC 47
+#define R_ARM_THM_MOVT_ABS 48
+#define R_ARM_THM_MOVW_PREL_NC 49
+#define R_ARM_THM_MOVT_PREL 50
+
#define OPCODE2ARMMEM(x) ((uint32_t)(x))
#define OPCODE2THM16MEM(x) ((uint16_t)(x))
#define MEM2ARMOPCODE(x) OPCODE2ARMMEM(x)
diff --git a/arch/arm/core/fatal.c b/arch/arm/core/fatal.c
index 4364d48d45d..4532e238f05 100644
--- a/arch/arm/core/fatal.c
+++ b/arch/arm/core/fatal.c
@@ -18,7 +18,7 @@
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_EXCEPTION_DEBUG
-static void esf_dump(const z_arch_esf_t *esf)
+static void esf_dump(const struct arch_esf *esf)
{
LOG_ERR("r0/a1: 0x%08x r1/a2: 0x%08x r2/a3: 0x%08x",
esf->basic.a1, esf->basic.a2, esf->basic.a3);
@@ -66,7 +66,7 @@ static void esf_dump(const z_arch_esf_t *esf)
}
#endif /* CONFIG_EXCEPTION_DEBUG */
-void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
+void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf)
{
#ifdef CONFIG_EXCEPTION_DEBUG
if (esf != NULL) {
@@ -102,7 +102,7 @@ void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
* @param esf exception frame
* @param callee_regs Callee-saved registers (R4-R11)
*/
-void z_do_kernel_oops(const z_arch_esf_t *esf, _callee_saved_t *callee_regs)
+void z_do_kernel_oops(const struct arch_esf *esf, _callee_saved_t *callee_regs)
{
#if !(defined(CONFIG_EXTRA_EXCEPTION_INFO) && defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE))
ARG_UNUSED(callee_regs);
@@ -130,9 +130,9 @@ void z_do_kernel_oops(const z_arch_esf_t *esf, _callee_saved_t *callee_regs)
#if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
z_arm_fatal_error(reason, esf);
#else
- z_arch_esf_t esf_copy;
+ struct arch_esf esf_copy;
- memcpy(&esf_copy, esf, offsetof(z_arch_esf_t, extra_info));
+ memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info));
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* extra exception info is collected in callee_reg param
* on CONFIG_ARMV7_M_ARMV8_M_MAINLINE
@@ -156,7 +156,7 @@ void z_do_kernel_oops(const z_arch_esf_t *esf, _callee_saved_t *callee_regs)
FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
{
uint32_t *ssf_contents = ssf_ptr;
- z_arch_esf_t oops_esf = { 0 };
+ struct arch_esf oops_esf = { 0 };
/* TODO: Copy the rest of the register set out of ssf_ptr */
oops_esf.basic.pc = ssf_contents[3];
diff --git a/arch/arm/core/gdbstub.c b/arch/arm/core/gdbstub.c
index 5386cfa619f..60d16b78c31 100644
--- a/arch/arm/core/gdbstub.c
+++ b/arch/arm/core/gdbstub.c
@@ -42,7 +42,7 @@ static int is_bkpt(unsigned int exc_cause)
}
/* Wrapper function to save and restore execution c */
-void z_gdb_entry(z_arch_esf_t *esf, unsigned int exc_cause)
+void z_gdb_entry(struct arch_esf *esf, unsigned int exc_cause)
{
/* Disable the hardware breakpoint in case it was set */
__asm__ volatile("mcr p14, 0, %0, c0, c0, 5" ::"r"(0x0) :);
diff --git a/arch/arm/core/irq_offload.c b/arch/arm/core/irq_offload.c
index 5dc1feccf7a..65349de331b 100644
--- a/arch/arm/core/irq_offload.c
+++ b/arch/arm/core/irq_offload.c
@@ -42,3 +42,7 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
offload_routine = NULL;
k_sched_unlock();
}
+
+void arch_irq_offload_init(void)
+{
+}
diff --git a/arch/arm/core/mpu/arm_mpu.c b/arch/arm/core/mpu/arm_mpu.c
index fe5d86c822d..3f9cf095754 100644
--- a/arch/arm/core/mpu/arm_mpu.c
+++ b/arch/arm/core/mpu/arm_mpu.c
@@ -54,6 +54,7 @@ static uint8_t static_regions_num;
#elif defined(CONFIG_CPU_CORTEX_M23) || \
defined(CONFIG_CPU_CORTEX_M33) || \
defined(CONFIG_CPU_CORTEX_M55) || \
+ defined(CONFIG_CPU_CORTEX_M85) || \
defined(CONFIG_AARCH32_ARMV8_R)
#include "arm_mpu_v8_internal.h"
#else
diff --git a/arch/arm/core/mpu/arm_mpu_v8_internal.h b/arch/arm/core/mpu/arm_mpu_v8_internal.h
index 66a00a452a7..51ab093d17c 100644
--- a/arch/arm/core/mpu/arm_mpu_v8_internal.h
+++ b/arch/arm/core/mpu/arm_mpu_v8_internal.h
@@ -31,7 +31,7 @@ struct dynamic_region_info {
*/
static struct dynamic_region_info dyn_reg_info[MPU_DYNAMIC_REGION_AREAS_NUM];
#if defined(CONFIG_CPU_CORTEX_M23) || defined(CONFIG_CPU_CORTEX_M33) || \
- defined(CONFIG_CPU_CORTEX_M55)
+ defined(CONFIG_CPU_CORTEX_M55) || defined(CONFIG_CPU_CORTEX_M85)
static inline void mpu_set_mair0(uint32_t mair0)
{
MPU->MAIR0 = mair0;
diff --git a/arch/arm/include/cortex_a_r/exception.h b/arch/arm/include/cortex_a_r/exception.h
index 7519016176c..6daa9c106ee 100644
--- a/arch/arm/include/cortex_a_r/exception.h
+++ b/arch/arm/include/cortex_a_r/exception.h
@@ -38,7 +38,7 @@ static ALWAYS_INLINE bool arch_is_in_isr(void)
return (arch_curr_cpu()->nested != 0U);
}
-static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf)
+static ALWAYS_INLINE bool arch_is_in_nested_exception(const struct arch_esf *esf)
{
return (arch_curr_cpu()->arch.exc_depth > 1U) ? (true) : (false);
}
@@ -48,7 +48,7 @@ static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf)
* This function is used by privileged code to determine if the thread
* associated with the stack frame is in user mode.
*/
-static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const z_arch_esf_t *esf)
+static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const struct arch_esf *esf)
{
return ((esf->basic.xpsr & CPSR_M_Msk) == CPSR_M_USR);
}
diff --git a/arch/arm/include/cortex_a_r/kernel_arch_func.h b/arch/arm/include/cortex_a_r/kernel_arch_func.h
index 88f631ff4b4..3486d7d4d4e 100644
--- a/arch/arm/include/cortex_a_r/kernel_arch_func.h
+++ b/arch/arm/include/cortex_a_r/kernel_arch_func.h
@@ -59,7 +59,7 @@ extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
uint32_t stack_end,
uint32_t stack_start);
-extern void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
+extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
#endif /* _ASMLANGUAGE */
diff --git a/arch/arm/include/cortex_m/exception.h b/arch/arm/include/cortex_m/exception.h
index bf86abd77c7..89bdd4b83e9 100644
--- a/arch/arm/include/cortex_m/exception.h
+++ b/arch/arm/include/cortex_m/exception.h
@@ -68,7 +68,7 @@ static ALWAYS_INLINE bool arch_is_in_isr(void)
* @return true if execution state was in handler mode, before
* the current exception occurred, otherwise false.
*/
-static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf)
+static ALWAYS_INLINE bool arch_is_in_nested_exception(const struct arch_esf *esf)
{
return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false);
}
@@ -80,7 +80,7 @@ static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf)
* @param esf the exception stack frame (unused)
* @return true if the current thread was in unprivileged mode
*/
-static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const z_arch_esf_t *esf)
+static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const struct arch_esf *esf)
{
return z_arm_thread_is_in_user_mode();
}
diff --git a/arch/arm/include/cortex_m/kernel_arch_func.h b/arch/arm/include/cortex_m/kernel_arch_func.h
index 77619c9d6c4..132c056c910 100644
--- a/arch/arm/include/cortex_m/kernel_arch_func.h
+++ b/arch/arm/include/cortex_m/kernel_arch_func.h
@@ -76,7 +76,7 @@ extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
uint32_t stack_end,
uint32_t stack_start);
-extern void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
+extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
#endif /* _ASMLANGUAGE */
diff --git a/arch/arm/include/kernel_arch_data.h b/arch/arm/include/kernel_arch_data.h
index 5ad19db8f84..9b4ca04f66c 100644
--- a/arch/arm/include/kernel_arch_data.h
+++ b/arch/arm/include/kernel_arch_data.h
@@ -42,7 +42,7 @@
extern "C" {
#endif
-typedef struct __esf _esf_t;
+typedef struct arch_esf _esf_t;
typedef struct __basic_sf _basic_sf_t;
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
typedef struct __fpu_sf _fpu_sf_t;
diff --git a/arch/arm/include/offsets_short_arch.h b/arch/arm/include/offsets_short_arch.h
index 4ceb1fc3f7a..ea6af4db92d 100644
--- a/arch/arm/include/offsets_short_arch.h
+++ b/arch/arm/include/offsets_short_arch.h
@@ -7,7 +7,7 @@
#ifndef ZEPHYR_ARCH_ARM_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_ARM_INCLUDE_OFFSETS_SHORT_ARCH_H_
-#include
+#include
/* kernel */
diff --git a/arch/arm64/core/CMakeLists.txt b/arch/arm64/core/CMakeLists.txt
index 05e4be8c0ea..9112e55f302 100644
--- a/arch/arm64/core/CMakeLists.txt
+++ b/arch/arm64/core/CMakeLists.txt
@@ -4,10 +4,12 @@ zephyr_library()
zephyr_library_sources(
cpu_idle.S
+ early_mem_funcs.S
fatal.c
irq_init.c
irq_manage.c
prep_c.c
+ reboot.c
reset.S
reset.c
switch.S
@@ -27,6 +29,7 @@ if(${SRAM_LENGTH} GREATER 11 OR ${KERNEL_VM_LENGTH} GREATER 11)
zephyr_cc_option(-mcmodel=large)
endif()
+zephyr_library_sources_ifdef(CONFIG_LLEXT elf.c)
zephyr_library_sources_ifdef(CONFIG_FPU_SHARING fpu.c fpu.S)
zephyr_library_sources_ifdef(CONFIG_ARM_MMU mmu.c mmu.S)
zephyr_library_sources_ifdef(CONFIG_ARM_MPU cortex_r/arm_mpu.c)
@@ -43,7 +46,7 @@ if ((CONFIG_MP_MAX_NUM_CPUS GREATER 1) OR (CONFIG_SMP))
endif ()
zephyr_cc_option_ifdef(CONFIG_USERSPACE -mno-outline-atomics)
-zephyr_cc_option_ifdef(CONFIG_ARM64_ENABLE_FRAME_POINTER -mno-omit-leaf-frame-pointer)
+zephyr_cc_option_ifdef(CONFIG_FRAME_POINTER -mno-omit-leaf-frame-pointer)
# GCC may generate ldp/stp instructions with the Advanced SIMD Qn registers for
# consecutive 32-byte loads and stores. Saving and restoring the Advanced SIMD
diff --git a/arch/arm64/core/Kconfig b/arch/arm64/core/Kconfig
index 8f09d49a04c..29f2cb6105b 100644
--- a/arch/arm64/core/Kconfig
+++ b/arch/arm64/core/Kconfig
@@ -145,13 +145,30 @@ config ARM64_SAFE_EXCEPTION_STACK
config ARM64_ENABLE_FRAME_POINTER
bool
- default y
depends on OVERRIDE_FRAME_POINTER_DEFAULT && !OMIT_FRAME_POINTER
+ depends on !FRAME_POINTER
+ select DEPRECATED
help
+ Deprecated. Use CONFIG_FRAME_POINTER instead.
Hidden option to simplify access to OVERRIDE_FRAME_POINTER_DEFAULT
and OMIT_FRAME_POINTER. It is automatically enabled when the frame
pointer unwinding is enabled.
+config ARM64_EXCEPTION_STACK_TRACE
+ bool
+ default y
+ depends on FRAME_POINTER
+ help
+ Internal config to enable runtime stack traces on fatal exceptions.
+
+config ARCH_HAS_STACKWALK
+ bool
+ default y
+ depends on FRAME_POINTER
+ help
+ Internal config to indicate that the arch_stack_walk() API is implemented
+ and it can be enabled.
+
config ARM64_SAFE_EXCEPTION_STACK_SIZE
int "The stack size of the safe exception stack"
default 4096
diff --git a/arch/arm64/core/coredump.c b/arch/arm64/core/coredump.c
index 399cf85e3d0..0176b61612e 100644
--- a/arch/arm64/core/coredump.c
+++ b/arch/arm64/core/coredump.c
@@ -13,7 +13,7 @@
#define ARCH_HDR_VER 1
/* Structure to store the architecture registers passed arch_coredump_info_dump
- * As callee saved registers are not provided in z_arch_esf_t structure in Zephyr
+ * As callee saved registers are not provided in struct arch_esf structure in Zephyr
* we just need 22 registers.
*/
struct arm64_arch_block {
@@ -50,7 +50,7 @@ struct arm64_arch_block {
*/
static struct arm64_arch_block arch_blk;
-void arch_coredump_info_dump(const z_arch_esf_t *esf)
+void arch_coredump_info_dump(const struct arch_esf *esf)
{
/* Target architecture information header */
/* Information just relevant to the python parser */
@@ -69,7 +69,7 @@ void arch_coredump_info_dump(const z_arch_esf_t *esf)
/*
* Copies the thread registers to a memory block that will be printed out
- * The thread registers are already provided by structure z_arch_esf_t
+ * The thread registers are already provided by structure struct arch_esf
*/
arch_blk.r.x0 = esf->x0;
arch_blk.r.x1 = esf->x1;
diff --git a/arch/arm64/core/cpu_idle.S b/arch/arm64/core/cpu_idle.S
index e01881ed58d..7681e212054 100644
--- a/arch/arm64/core/cpu_idle.S
+++ b/arch/arm64/core/cpu_idle.S
@@ -13,7 +13,7 @@
#include
_ASM_FILE_PROLOGUE
-
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
GTEXT(arch_cpu_idle)
SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING
@@ -25,7 +25,9 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
wfi
msr daifclr, #(DAIFCLR_IRQ_BIT)
ret
+#endif
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
GTEXT(arch_cpu_atomic_idle)
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING
@@ -41,3 +43,5 @@ SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
msr daifclr, #(DAIFCLR_IRQ_BIT)
_irq_disabled:
ret
+
+#endif
diff --git a/arch/arm64/core/early_mem_funcs.S b/arch/arm64/core/early_mem_funcs.S
new file mode 100644
index 00000000000..383cdec7901
--- /dev/null
+++ b/arch/arm64/core/early_mem_funcs.S
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) BayLibre SAS
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include
+#include
+
+_ASM_FILE_PROLOGUE
+
+/*
+ * These simple memset and memcpy alternatives are necessary as the optimized
+ * ones depend on the MMU to be active (see commit c5b898743a20).
+ *
+ * Furthermore, we can't implement those in C as the compiler is just too
+ * smart for its own good and replaces our simple loops into direct calls
+ * to memset or memcpy on its own.
+ */
+
+/* void z_early_memset(void *dst, int c, size_t n) */
+GTEXT(z_early_memset)
+SECTION_FUNC(TEXT, z_early_memset)
+
+ /* is dst pointer 8-bytes aligned? */
+ tst x0, #0x7
+ b.ne 2f
+
+ /* at least 8 bytes to set? */
+ cmp x2, #8
+ b.lo 2f
+
+ /* spread the byte value across whole 64 bits */
+ and x8, x1, #0xff
+ mov x9, #0x0101010101010101
+ mul x8, x8, x9
+
+1: /* 8 bytes at a time */
+ sub x2, x2, #8
+ cmp x2, #7
+ str x8, [x0], #8
+ b.hi 1b
+
+2: /* at least one byte to set? */
+ cbz x2, 4f
+
+3: /* one byte at a time */
+ subs x2, x2, #1
+ strb w8, [x0], #1
+ b.ne 3b
+
+4: ret
+
+/* void z_early_memcpy(void *dst, const void *src, size_t n) */
+GTEXT(z_early_memcpy)
+SECTION_FUNC(TEXT, z_early_memcpy)
+
+ /* are dst and src pointers 8-bytes aligned? */
+ orr x8, x1, x0
+ tst x8, #0x7
+ b.ne 2f
+
+ /* at least 8 bytes to copy? */
+ cmp x2, #8
+ b.lo 2f
+
+1: /* 8 bytes at a time */
+ ldr x8, [x1], #8
+ sub x2, x2, #8
+ cmp x2, #7
+ str x8, [x0], #8
+ b.hi 1b
+
+2: /* at least one byte to copy? */
+ cbz x2, 4f
+
+3: /* one byte at a time */
+ ldrb w8, [x1], #1
+ subs x2, x2, #1
+ strb w8, [x0], #1
+ b.ne 3b
+
+4: ret
diff --git a/arch/arm64/core/elf.c b/arch/arm64/core/elf.c
new file mode 100644
index 00000000000..66e9f21fc06
--- /dev/null
+++ b/arch/arm64/core/elf.c
@@ -0,0 +1,515 @@
+/*
+ * Copyright (c) 2024 BayLibre SAS
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+LOG_MODULE_REGISTER(elf, CONFIG_LLEXT_LOG_LEVEL);
+
+#define R_ARM_NONE 0
+#define R_AARCH64_NONE 256
+
+/* Static data relocations */
+#define R_AARCH64_ABS64 257
+#define R_AARCH64_ABS32 258
+#define R_AARCH64_ABS16 259
+#define R_AARCH64_PREL64 260
+#define R_AARCH64_PREL32 261
+#define R_AARCH64_PREL16 262
+
+/* Static relocations */
+#define R_AARCH64_MOVW_UABS_G0 263
+#define R_AARCH64_MOVW_UABS_G0_NC 264
+#define R_AARCH64_MOVW_UABS_G1 265
+#define R_AARCH64_MOVW_UABS_G1_NC 266
+#define R_AARCH64_MOVW_UABS_G2 267
+#define R_AARCH64_MOVW_UABS_G2_NC 268
+#define R_AARCH64_MOVW_UABS_G3 269
+#define R_AARCH64_MOVW_SABS_G0 270
+#define R_AARCH64_MOVW_SABS_G1 271
+#define R_AARCH64_MOVW_SABS_G2 272
+#define R_AARCH64_MOVW_PREL_G0 287
+#define R_AARCH64_MOVW_PREL_G0_NC 288
+#define R_AARCH64_MOVW_PREL_G1 289
+#define R_AARCH64_MOVW_PREL_G1_NC 290
+#define R_AARCH64_MOVW_PREL_G2 291
+#define R_AARCH64_MOVW_PREL_G2_NC 292
+#define R_AARCH64_MOVW_PREL_G3 293
+
+#define R_AARCH64_LD_PREL_LO19 273
+#define R_AARCH64_ADR_PREL_LO21 274
+#define R_AARCH64_ADR_PREL_PG_HI21 275
+#define R_AARCH64_ADR_PREL_PG_HI21_NC 276
+#define R_AARCH64_ADD_ABS_LO12_NC 277
+#define R_AARCH64_LDST8_ABS_LO12_NC 278
+#define R_AARCH64_TSTBR14 279
+#define R_AARCH64_CONDBR19 280
+#define R_AARCH64_JUMP26 282
+#define R_AARCH64_CALL26 283
+#define R_AARCH64_LDST16_ABS_LO12_NC 284
+#define R_AARCH64_LDST32_ABS_LO12_NC 285
+#define R_AARCH64_LDST64_ABS_LO12_NC 286
+#define R_AARCH64_LDST128_ABS_LO12_NC 299
+
+/* Masks for immediate values */
+#define AARCH64_MASK_IMM12 BIT_MASK(12)
+#define AARCH64_MASK_IMM14 BIT_MASK(14)
+#define AARCH64_MASK_IMM16 BIT_MASK(16)
+#define AARCH64_MASK_IMM19 BIT_MASK(19)
+#define AARCH64_MASK_IMM26 BIT_MASK(26)
+
+/* MOV instruction helper symbols */
+#define AARCH64_MASK_MOV_OPCODE BIT_MASK(8)
+#define AARCH64_SHIFT_MOV_OPCODE (23)
+#define AARCH64_SHIFT_MOV_IMM16 (5)
+#define AARCH64_OPCODE_MOVN (0b00100101)
+#define AARCH64_OPCODE_MOVZ (0b10100101)
+
+/* ADR instruction helper symbols */
+#define AARCH64_MASK_ADR_IMMLO BIT_MASK(2)
+#define AARCH64_MASK_ADR_IMMHI BIT_MASK(19)
+#define AARCH64_SHIFT_ADR_IMMLO (29)
+#define AARCH64_SHIFT_ADR_IMMHI (5)
+#define AARCH64_ADR_IMMLO_BITS (2)
+
+#define AARCH64_PAGE(expr) ((expr) & ~0xFFF)
+
+enum aarch64_reloc_type {
+ AARCH64_RELOC_TYPE_NONE,
+ AARCH64_RELOC_TYPE_ABS,
+ AARCH64_RELOC_TYPE_PREL,
+ AARCH64_RELOC_TYPE_PAGE,
+};
+
+/**
+ * @brief Function computing a relocation (X in AArch64 ELF).
+ *
+ * @param[in] reloc_type Type of relocation operation.
+ * @param[in] loc Address of an opcode to rewrite (P in AArch64 ELF).
+ * @param[in] sym_base_addr Address of the symbol referenced by relocation (S in AArch64 ELF).
+ * @param[in] addend Addend from RELA relocation.
+ *
+ * @return Result of the relocation operation (X in AArch64 ELF)
+ */
+static uint64_t reloc(enum aarch64_reloc_type reloc_type, uintptr_t loc, uintptr_t sym_base_addr,
+ int64_t addend)
+{
+ switch (reloc_type) {
+ case AARCH64_RELOC_TYPE_ABS:
+ return sym_base_addr + addend;
+ case AARCH64_RELOC_TYPE_PREL:
+ return sym_base_addr + addend - loc;
+ case AARCH64_RELOC_TYPE_PAGE:
+ return AARCH64_PAGE(sym_base_addr + addend) - AARCH64_PAGE(loc);
+ case AARCH64_RELOC_TYPE_NONE:
+ return 0;
+ }
+
+ CODE_UNREACHABLE;
+}
+
+/**
+ * @brief Handler for static data relocations.
+ *
+ * @param[in] rel Relocation data provided by ELF
+ * @param[in] reloc_type Type of relocation operation.
+ * @param[in] loc Address of an opcode to rewrite (P in AArch64 ELF).
+ * @param[in] sym_base_addr Address of the symbol referenced by relocation (S in AArch64 ELF).
+ *
+ * @retval -ERANGE Relocation value overflow
+ * @retval 0 Successful relocation
+ */
+static int data_reloc_handler(elf_rela_t *rel, elf_word reloc_type, uintptr_t loc,
+ uintptr_t sym_base_addr)
+{
+ int64_t x;
+
+ switch (reloc_type) {
+ case R_AARCH64_ABS64:
+ *(int64_t *)loc = reloc(AARCH64_RELOC_TYPE_ABS, loc, sym_base_addr, rel->r_addend);
+ break;
+
+ case R_AARCH64_ABS32:
+ x = reloc(AARCH64_RELOC_TYPE_ABS, loc, sym_base_addr, rel->r_addend);
+ if (x < 0 || x > UINT32_MAX) {
+ return -ERANGE;
+ }
+ *(uint32_t *)loc = (uint32_t)x;
+ break;
+
+ case R_AARCH64_ABS16:
+ x = reloc(AARCH64_RELOC_TYPE_ABS, loc, sym_base_addr, rel->r_addend);
+ if (x < 0 || x > UINT16_MAX) {
+ return -ERANGE;
+ }
+ *(uint16_t *)loc = (uint16_t)x;
+ break;
+
+ case R_AARCH64_PREL64:
+ *(int64_t *)loc = reloc(AARCH64_RELOC_TYPE_PREL, loc, sym_base_addr, rel->r_addend);
+ break;
+
+ case R_AARCH64_PREL32:
+ x = reloc(AARCH64_RELOC_TYPE_PREL, loc, sym_base_addr, rel->r_addend);
+ if (x < INT32_MIN || x > INT32_MAX) {
+ return -ERANGE;
+ }
+ *(int32_t *)loc = (int32_t)x;
+ break;
+
+ case R_AARCH64_PREL16:
+ x = reloc(AARCH64_RELOC_TYPE_PREL, loc, sym_base_addr, rel->r_addend);
+ if (x < INT16_MIN || x > INT16_MAX) {
+ return -ERANGE;
+ }
+ *(int16_t *)loc = (int16_t)x;
+ break;
+
+ default:
+ CODE_UNREACHABLE;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Handler for relocations using MOV* instructions.
+ *
+ * @param[in] rel Relocation data provided by ELF
+ * @param[in] reloc_type Type of relocation operation.
+ * @param[in] loc Address of an opcode to rewrite (P in AArch64 ELF).
+ * @param[in] sym_base_addr Address of the symbol referenced by relocation (S in AArch64 ELF).
+ *
+ * @retval -ERANGE Relocation value overflow
+ * @retval 0 Successful relocation
+ */
+static int movw_reloc_handler(elf_rela_t *rel, elf_word reloc_type, uintptr_t loc,
+ uintptr_t sym_base_addr)
+{
+ int64_t x;
+ uint32_t imm;
+ int lsb = 0; /* LSB of X to be used */
+ bool is_movnz = false;
+ enum aarch64_reloc_type type = AARCH64_RELOC_TYPE_ABS;
+ uint32_t opcode = sys_le32_to_cpu(*(uint32_t *)loc);
+
+ switch (reloc_type) {
+ case R_AARCH64_MOVW_SABS_G0:
+ is_movnz = true;
+ case R_AARCH64_MOVW_UABS_G0_NC:
+ case R_AARCH64_MOVW_UABS_G0:
+ break;
+
+ case R_AARCH64_MOVW_SABS_G1:
+ is_movnz = true;
+ case R_AARCH64_MOVW_UABS_G1_NC:
+ case R_AARCH64_MOVW_UABS_G1:
+ lsb = 16;
+ break;
+
+ case R_AARCH64_MOVW_SABS_G2:
+ is_movnz = true;
+ case R_AARCH64_MOVW_UABS_G2_NC:
+ case R_AARCH64_MOVW_UABS_G2:
+ lsb = 32;
+ break;
+
+ case R_AARCH64_MOVW_UABS_G3:
+ lsb = 48;
+ break;
+
+ case R_AARCH64_MOVW_PREL_G0:
+ is_movnz = true;
+ case R_AARCH64_MOVW_PREL_G0_NC:
+ type = AARCH64_RELOC_TYPE_PREL;
+ break;
+
+ case R_AARCH64_MOVW_PREL_G1:
+ is_movnz = true;
+ case R_AARCH64_MOVW_PREL_G1_NC:
+ type = AARCH64_RELOC_TYPE_PREL;
+ lsb = 16;
+ break;
+
+ case R_AARCH64_MOVW_PREL_G2:
+ is_movnz = true;
+ case R_AARCH64_MOVW_PREL_G2_NC:
+ type = AARCH64_RELOC_TYPE_PREL;
+ lsb = 32;
+ break;
+
+ case R_AARCH64_MOVW_PREL_G3:
+ is_movnz = true;
+ type = AARCH64_RELOC_TYPE_PREL;
+ lsb = 48;
+ break;
+
+ default:
+ CODE_UNREACHABLE;
+ }
+
+ x = reloc(type, loc, sym_base_addr, rel->r_addend);
+ imm = x >> lsb;
+
+ /* Manipulate opcode for signed relocations. Result depends on sign of immediate value. */
+ if (is_movnz) {
+ opcode &= ~(AARCH64_MASK_MOV_OPCODE << AARCH64_SHIFT_MOV_OPCODE);
+
+ if (x >= 0) {
+ opcode |= (AARCH64_OPCODE_MOVN << AARCH64_SHIFT_MOV_OPCODE);
+ } else {
+ opcode |= (AARCH64_OPCODE_MOVZ << AARCH64_SHIFT_MOV_OPCODE);
+ /* Need to invert immediate value for MOVZ. */
+ imm = ~imm;
+ }
+ }
+
+ opcode &= ~(AARCH64_MASK_IMM16 << AARCH64_SHIFT_MOV_IMM16);
+ opcode |= (imm & AARCH64_MASK_IMM16) << AARCH64_SHIFT_MOV_IMM16;
+
+ *(uint32_t *)loc = sys_cpu_to_le32(opcode);
+
+ if (imm > UINT16_MAX) {
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Handler for static relocations except these related to MOV* instructions.
+ *
+ * @param[in] rel Relocation data provided by ELF
+ * @param[in] reloc_type Type of relocation operation.
+ * @param[in] loc Address of an opcode to rewrite (P in AArch64 ELF).
+ * @param[in] sym_base_addr Address of the symbol referenced by relocation (S in AArch64 ELF).
+ *
+ * @retval -ERANGE Relocation value overflow
+ * @retval 0 Successful relocation
+ */
+static int imm_reloc_handler(elf_rela_t *rel, elf_word reloc_type, uintptr_t loc,
+ uintptr_t sym_base_addr)
+{
+ int lsb = 2; /* LSB of X to be used */
+ int len; /* bit length of immediate value */
+ int shift = 10; /* shift of the immediate in instruction encoding */
+ uint64_t imm;
+ uint32_t bitmask = AARCH64_MASK_IMM12;
+ int64_t x;
+ bool is_adr = false;
+ enum aarch64_reloc_type type = AARCH64_RELOC_TYPE_ABS;
+ uint32_t opcode = sys_le32_to_cpu(*(uint32_t *)loc);
+
+ switch (reloc_type) {
+ case R_AARCH64_ADD_ABS_LO12_NC:
+ case R_AARCH64_LDST8_ABS_LO12_NC:
+ lsb = 0;
+ len = 12;
+ break;
+
+ case R_AARCH64_LDST16_ABS_LO12_NC:
+ lsb = 1;
+ len = 11;
+ break;
+
+ case R_AARCH64_LDST32_ABS_LO12_NC:
+ len = 10;
+ break;
+
+ case R_AARCH64_LDST64_ABS_LO12_NC:
+ lsb = 3;
+ len = 9;
+ break;
+
+ case R_AARCH64_LDST128_ABS_LO12_NC:
+ lsb = 4;
+ len = 8;
+ break;
+
+ case R_AARCH64_LD_PREL_LO19:
+ case R_AARCH64_CONDBR19:
+ type = AARCH64_RELOC_TYPE_PREL;
+ bitmask = AARCH64_MASK_IMM19;
+ shift = 5;
+ len = 19;
+ break;
+
+ case R_AARCH64_ADR_PREL_LO21:
+ type = AARCH64_RELOC_TYPE_PREL;
+ is_adr = true;
+ lsb = 0;
+ len = 21;
+ break;
+
+ case R_AARCH64_TSTBR14:
+ type = AARCH64_RELOC_TYPE_PREL;
+ bitmask = AARCH64_MASK_IMM14;
+ shift = 5;
+ len = 14;
+ break;
+
+ case R_AARCH64_ADR_PREL_PG_HI21_NC:
+ case R_AARCH64_ADR_PREL_PG_HI21:
+ type = AARCH64_RELOC_TYPE_PAGE;
+ is_adr = true;
+ lsb = 12;
+ len = 21;
+ break;
+
+ case R_AARCH64_CALL26:
+ case R_AARCH64_JUMP26:
+ type = AARCH64_RELOC_TYPE_PREL;
+ bitmask = AARCH64_MASK_IMM26;
+ shift = 0;
+ len = 26;
+ break;
+
+ default:
+ CODE_UNREACHABLE;
+ }
+
+ x = reloc(type, loc, sym_base_addr, rel->r_addend);
+ x >>= lsb;
+
+ imm = x & BIT_MASK(len);
+
+ /* ADR instruction has immediate value split into two fields. */
+ if (is_adr) {
+ uint32_t immlo, immhi;
+
+ immlo = (imm & AARCH64_MASK_ADR_IMMLO) << AARCH64_SHIFT_ADR_IMMLO;
+ imm >>= AARCH64_ADR_IMMLO_BITS;
+ immhi = (imm & AARCH64_MASK_ADR_IMMHI) << AARCH64_SHIFT_ADR_IMMHI;
+ imm = immlo | immhi;
+
+ shift = 0;
+ bitmask = ((AARCH64_MASK_ADR_IMMLO << AARCH64_SHIFT_ADR_IMMLO) |
+ (AARCH64_MASK_ADR_IMMHI << AARCH64_SHIFT_ADR_IMMHI));
+ }
+
+ opcode &= ~(bitmask << shift);
+ opcode |= (imm & bitmask) << shift;
+
+ *(uint32_t *)loc = sys_cpu_to_le32(opcode);
+
+ /* Mask X sign bit and upper bits. */
+ x = (int64_t)(x & ~BIT_MASK(len - 1)) >> (len - 1);
+
+ /* Incrementing X will either overflow and set it to 0 or
+ * set it 1. Any other case indicates that there was an overflow in relocation.
+ */
+ if ((int64_t)x++ > 1) {
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Architecture specific function for relocating partially linked (static) elf
+ *
+ * Elf files contain a series of relocations described in a section. These relocation
+ * instructions are architecture specific and each architecture supporting extensions
+ * must implement this.
+ *
+ * The relocation codes for arm64 are well documented
+ * https://github.com/ARM-software/abi-aa/blob/main/aaelf64/aaelf64.rst#relocation
+ *
+ * @param[in] rel Relocation data provided by ELF
+ * @param[in] loc Address of an opcode to rewrite (P in AArch64 ELF)
+ * @param[in] sym_base_addr Address of the symbol referenced by relocation (S in AArch64 ELF)
+ * @param[in] sym_name Name of symbol referenced by relocation
+ * @param[in] load_bias `.text` load address
+ * @retval 0 Success
+ * @retval -ENOTSUP Unsupported relocation
+ * @retval -ENOEXEC Invalid relocation
+ */
+int arch_elf_relocate(elf_rela_t *rel, uintptr_t loc, uintptr_t sym_base_addr, const char *sym_name,
+ uintptr_t load_bias)
+{
+ int ret = 0;
+ bool overflow_check = true;
+ elf_word reloc_type = ELF_R_TYPE(rel->r_info);
+
+ switch (reloc_type) {
+ case R_ARM_NONE:
+ case R_AARCH64_NONE:
+ overflow_check = false;
+ break;
+
+ case R_AARCH64_ABS64:
+ case R_AARCH64_PREL64:
+ overflow_check = false;
+ case R_AARCH64_ABS16:
+ case R_AARCH64_ABS32:
+ case R_AARCH64_PREL16:
+ case R_AARCH64_PREL32:
+ ret = data_reloc_handler(rel, reloc_type, loc, sym_base_addr);
+ break;
+
+ case R_AARCH64_MOVW_UABS_G0_NC:
+ case R_AARCH64_MOVW_UABS_G1_NC:
+ case R_AARCH64_MOVW_UABS_G2_NC:
+ case R_AARCH64_MOVW_UABS_G3:
+ case R_AARCH64_MOVW_PREL_G0_NC:
+ case R_AARCH64_MOVW_PREL_G1_NC:
+ case R_AARCH64_MOVW_PREL_G2_NC:
+ case R_AARCH64_MOVW_PREL_G3:
+ overflow_check = false;
+ case R_AARCH64_MOVW_UABS_G0:
+ case R_AARCH64_MOVW_UABS_G1:
+ case R_AARCH64_MOVW_UABS_G2:
+ case R_AARCH64_MOVW_SABS_G0:
+ case R_AARCH64_MOVW_SABS_G1:
+ case R_AARCH64_MOVW_SABS_G2:
+ case R_AARCH64_MOVW_PREL_G0:
+ case R_AARCH64_MOVW_PREL_G1:
+ case R_AARCH64_MOVW_PREL_G2:
+ ret = movw_reloc_handler(rel, reloc_type, loc, sym_base_addr);
+ break;
+
+ case R_AARCH64_ADD_ABS_LO12_NC:
+ case R_AARCH64_LDST8_ABS_LO12_NC:
+ case R_AARCH64_LDST16_ABS_LO12_NC:
+ case R_AARCH64_LDST32_ABS_LO12_NC:
+ case R_AARCH64_LDST64_ABS_LO12_NC:
+ case R_AARCH64_LDST128_ABS_LO12_NC:
+ overflow_check = false;
+ case R_AARCH64_LD_PREL_LO19:
+ case R_AARCH64_ADR_PREL_LO21:
+ case R_AARCH64_TSTBR14:
+ case R_AARCH64_CONDBR19:
+ ret = imm_reloc_handler(rel, reloc_type, loc, sym_base_addr);
+ break;
+
+ case R_AARCH64_ADR_PREL_PG_HI21_NC:
+ overflow_check = false;
+ case R_AARCH64_ADR_PREL_PG_HI21:
+ ret = imm_reloc_handler(rel, reloc_type, loc, sym_base_addr);
+ break;
+
+ case R_AARCH64_CALL26:
+ case R_AARCH64_JUMP26:
+ ret = imm_reloc_handler(rel, reloc_type, loc, sym_base_addr);
+ /* TODO Handle case when address exceeds +/- 128MB */
+ break;
+
+ default:
+ LOG_ERR("unknown relocation: %llu\n", reloc_type);
+ return -ENOEXEC;
+ }
+
+ if (overflow_check && ret == -ERANGE) {
+ LOG_ERR("sym '%s': relocation out of range (%#lx -> %#lx)\n", sym_name, loc,
+ sym_base_addr);
+ return -ENOEXEC;
+ }
+
+ return 0;
+}
diff --git a/arch/arm64/core/fatal.c b/arch/arm64/core/fatal.c
index 84ff767508e..7955b6f7d6d 100644
--- a/arch/arm64/core/fatal.c
+++ b/arch/arm64/core/fatal.c
@@ -13,6 +13,7 @@
* exceptions
*/
+#include
#include
#include
#include
@@ -20,6 +21,8 @@
#include
#include
+#include "paging.h"
+
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK
@@ -180,7 +183,7 @@ static void dump_esr(uint64_t esr, bool *dump_far)
LOG_ERR(" ISS: 0x%llx", GET_ESR_ISS(esr));
}
-static void esf_dump(const z_arch_esf_t *esf)
+static void esf_dump(const struct arch_esf *esf)
{
LOG_ERR("x0: 0x%016llx x1: 0x%016llx", esf->x0, esf->x1);
LOG_ERR("x2: 0x%016llx x3: 0x%016llx", esf->x2, esf->x3);
@@ -193,9 +196,13 @@ static void esf_dump(const z_arch_esf_t *esf)
LOG_ERR("x16: 0x%016llx x17: 0x%016llx", esf->x16, esf->x17);
LOG_ERR("x18: 0x%016llx lr: 0x%016llx", esf->x18, esf->lr);
}
+#endif /* CONFIG_EXCEPTION_DEBUG */
-#ifdef CONFIG_ARM64_ENABLE_FRAME_POINTER
-static void esf_unwind(const z_arch_esf_t *esf)
+#ifdef CONFIG_ARCH_STACKWALK
+typedef bool (*arm64_stacktrace_cb)(void *cookie, unsigned long addr, void *fp);
+
+static void walk_stackframe(arm64_stacktrace_cb cb, void *cookie, const struct arch_esf *esf,
+ int max_frames)
{
/*
* For GCC:
@@ -217,25 +224,64 @@ static void esf_unwind(const z_arch_esf_t *esf)
* + +-----------------+
*/
- uint64_t *fp = (uint64_t *) esf->fp;
- unsigned int count = 0;
+ uint64_t *fp;
uint64_t lr;
- LOG_ERR("");
- while (fp != NULL) {
+ if (esf != NULL) {
+ fp = (uint64_t *) esf->fp;
+ } else {
+ return;
+ }
+
+ for (int i = 0; (fp != NULL) && (i < max_frames); i++) {
lr = fp[1];
- LOG_ERR("backtrace %2d: fp: 0x%016llx lr: 0x%016llx",
- count++, (uint64_t) fp, lr);
+ if (!cb(cookie, lr, fp)) {
+ break;
+ }
fp = (uint64_t *) fp[0];
}
- LOG_ERR("");
}
-#endif
-#endif /* CONFIG_EXCEPTION_DEBUG */
+void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
+ const struct k_thread *thread, const struct arch_esf *esf)
+{
+ ARG_UNUSED(thread);
+
+ walk_stackframe((arm64_stacktrace_cb)callback_fn, cookie, esf,
+ CONFIG_ARCH_STACKWALK_MAX_FRAMES);
+}
+#endif /* CONFIG_ARCH_STACKWALK */
+
+#ifdef CONFIG_EXCEPTION_STACK_TRACE
+static bool print_trace_address(void *arg, unsigned long lr, void *fp)
+{
+ int *i = arg;
+#ifdef CONFIG_SYMTAB
+ uint32_t offset = 0;
+ const char *name = symtab_find_symbol_name(lr, &offset);
+
+ LOG_ERR(" %d: fp: 0x%016llx lr: 0x%016lx [%s+0x%x]", (*i)++, (uint64_t)fp, lr, name,
+ offset);
+#else
+ LOG_ERR(" %d: fp: 0x%016llx lr: 0x%016lx", (*i)++, (uint64_t)fp, lr);
+#endif /* CONFIG_SYMTAB */
+
+ return true;
+}
+
+static void esf_unwind(const struct arch_esf *esf)
+{
+ int i = 0;
+
+ LOG_ERR("");
+ LOG_ERR("call trace:");
+ walk_stackframe(print_trace_address, &i, esf, CONFIG_ARCH_STACKWALK_MAX_FRAMES);
+ LOG_ERR("");
+}
+#endif /* CONFIG_EXCEPTION_STACK_TRACE */
#ifdef CONFIG_ARM64_STACK_PROTECTION
-static bool z_arm64_stack_corruption_check(z_arch_esf_t *esf, uint64_t esr, uint64_t far)
+static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, uint64_t far)
{
uint64_t sp, sp_limit, guard_start;
/* 0x25 means data abort from current EL */
@@ -275,11 +321,12 @@ static bool z_arm64_stack_corruption_check(z_arch_esf_t *esf, uint64_t esr, uint
}
#endif
-static bool is_recoverable(z_arch_esf_t *esf, uint64_t esr, uint64_t far,
+static bool is_recoverable(struct arch_esf *esf, uint64_t esr, uint64_t far,
uint64_t elr)
{
- if (!esf)
+ if (!esf) {
return false;
+ }
#ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
@@ -297,7 +344,7 @@ static bool is_recoverable(z_arch_esf_t *esf, uint64_t esr, uint64_t far,
return false;
}
-void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf)
+void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf)
{
uint64_t esr = 0;
uint64_t elr = 0;
@@ -328,6 +375,12 @@ void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf)
}
#endif
+ if (IS_ENABLED(CONFIG_DEMAND_PAGING) &&
+ reason != K_ERR_STACK_CHK_FAIL &&
+ z_arm64_do_demand_paging(esf, esr, far)) {
+ return;
+ }
+
if (GET_EL(el) != MODE_EL0) {
#ifdef CONFIG_EXCEPTION_DEBUG
bool dump_far = false;
@@ -336,8 +389,9 @@ void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf)
dump_esr(esr, &dump_far);
- if (dump_far)
+ if (dump_far) {
LOG_ERR("FAR_ELn: 0x%016llx", far);
+ }
LOG_ERR("TPIDRRO: 0x%016llx", read_tpidrro_el0());
#endif /* CONFIG_EXCEPTION_DEBUG */
@@ -354,9 +408,9 @@ void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf)
esf_dump(esf);
}
-#ifdef CONFIG_ARM64_ENABLE_FRAME_POINTER
+#ifdef CONFIG_EXCEPTION_STACK_TRACE
esf_unwind(esf);
-#endif /* CONFIG_ARM64_ENABLE_FRAME_POINTER */
+#endif /* CONFIG_EXCEPTION_STACK_TRACE */
#endif /* CONFIG_EXCEPTION_DEBUG */
z_fatal_error(reason, esf);
@@ -370,7 +424,7 @@ void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf)
*
* @param esf exception frame
*/
-void z_arm64_do_kernel_oops(z_arch_esf_t *esf)
+void z_arm64_do_kernel_oops(struct arch_esf *esf)
{
/* x8 holds the exception reason */
unsigned int reason = esf->x8;
diff --git a/arch/arm64/core/fpu.c b/arch/arm64/core/fpu.c
index 0133eed2dca..a585165b943 100644
--- a/arch/arm64/core/fpu.c
+++ b/arch/arm64/core/fpu.c
@@ -159,7 +159,7 @@ void z_arm64_fpu_enter_exc(void)
* simulate them and leave the FPU access disabled. This also avoids the
* need for disabling interrupts in syscalls and IRQ handlers as well.
*/
-static bool simulate_str_q_insn(z_arch_esf_t *esf)
+static bool simulate_str_q_insn(struct arch_esf *esf)
{
/*
* Support only the "FP in exception" cases for now.
@@ -221,7 +221,7 @@ static bool simulate_str_q_insn(z_arch_esf_t *esf)
* don't get interrupted that is. To ensure that we mask interrupts to
* the triggering exception context.
*/
-void z_arm64_fpu_trap(z_arch_esf_t *esf)
+void z_arm64_fpu_trap(struct arch_esf *esf)
{
__ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled");
diff --git a/arch/arm64/core/irq_manage.c b/arch/arm64/core/irq_manage.c
index 4e96ce77bfa..6344d1e3696 100644
--- a/arch/arm64/core/irq_manage.c
+++ b/arch/arm64/core/irq_manage.c
@@ -18,7 +18,7 @@
#include
#include
-void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf);
+void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf);
#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
/*
diff --git a/arch/arm64/core/irq_offload.c b/arch/arm64/core/irq_offload.c
index 1d5e3c829b8..4488e26849c 100644
--- a/arch/arm64/core/irq_offload.c
+++ b/arch/arm64/core/irq_offload.c
@@ -23,3 +23,7 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
: [svid] "i" (_SVC_CALL_IRQ_OFFLOAD),
"r" (x0), "r" (x1));
}
+
+void arch_irq_offload_init(void)
+{
+}
diff --git a/arch/arm64/core/mmu.c b/arch/arm64/core/mmu.c
index 2260d22c101..a914916d605 100644
--- a/arch/arm64/core/mmu.c
+++ b/arch/arm64/core/mmu.c
@@ -11,6 +11,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -21,16 +22,22 @@
#include
#include
#include
+#include
#include "mmu.h"
+#include "paging.h"
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
static uint64_t xlat_tables[CONFIG_MAX_XLAT_TABLES * Ln_XLAT_NUM_ENTRIES]
__aligned(Ln_XLAT_NUM_ENTRIES * sizeof(uint64_t));
-static uint16_t xlat_use_count[CONFIG_MAX_XLAT_TABLES];
+static int xlat_use_count[CONFIG_MAX_XLAT_TABLES];
static struct k_spinlock xlat_lock;
+/* Usage count value range */
+#define XLAT_PTE_COUNT_MASK GENMASK(15, 0)
+#define XLAT_REF_COUNT_UNIT BIT(16)
+
/* Returns a reference to a free table */
static uint64_t *new_table(void)
{
@@ -39,9 +46,9 @@ static uint64_t *new_table(void)
/* Look for a free table. */
for (i = 0U; i < CONFIG_MAX_XLAT_TABLES; i++) {
- if (xlat_use_count[i] == 0U) {
+ if (xlat_use_count[i] == 0) {
table = &xlat_tables[i * Ln_XLAT_NUM_ENTRIES];
- xlat_use_count[i] = 1U;
+ xlat_use_count[i] = XLAT_REF_COUNT_UNIT;
MMU_DEBUG("allocating table [%d]%p\n", i, table);
return table;
}
@@ -59,33 +66,88 @@ static inline unsigned int table_index(uint64_t *pte)
return i;
}
-/* Makes a table free for reuse. */
-static void free_table(uint64_t *table)
+/* Adjusts usage count and returns current count. */
+static int table_usage(uint64_t *table, int adjustment)
{
unsigned int i = table_index(table);
+ int prev_count = xlat_use_count[i];
+ int new_count = prev_count + adjustment;
- MMU_DEBUG("freeing table [%d]%p\n", i, table);
- __ASSERT(xlat_use_count[i] == 1U, "table still in use");
- xlat_use_count[i] = 0U;
+ /* be reasonable not to always create a debug flood */
+ if ((IS_ENABLED(DUMP_PTE) && adjustment != 0) || new_count == 0) {
+ MMU_DEBUG("table [%d]%p: usage %#x -> %#x\n", i, table, prev_count, new_count);
+ }
+
+ __ASSERT(new_count >= 0,
+ "table use count underflow");
+ __ASSERT(new_count == 0 || new_count >= XLAT_REF_COUNT_UNIT,
+ "table in use with no reference to it");
+ __ASSERT((new_count & XLAT_PTE_COUNT_MASK) <= Ln_XLAT_NUM_ENTRIES,
+ "table PTE count overflow");
+
+ xlat_use_count[i] = new_count;
+ return new_count;
}
-/* Adjusts usage count and returns current count. */
-static int table_usage(uint64_t *table, int adjustment)
+static inline void inc_table_ref(uint64_t *table)
{
- unsigned int i = table_index(table);
+ table_usage(table, XLAT_REF_COUNT_UNIT);
+}
- xlat_use_count[i] += adjustment;
- __ASSERT(xlat_use_count[i] > 0, "usage count underflow");
- return xlat_use_count[i];
+static inline void dec_table_ref(uint64_t *table)
+{
+ int ref_unit = XLAT_REF_COUNT_UNIT;
+
+ table_usage(table, -ref_unit);
}
static inline bool is_table_unused(uint64_t *table)
{
- return table_usage(table, 0) == 1;
+ return (table_usage(table, 0) & XLAT_PTE_COUNT_MASK) == 0;
+}
+
+static inline bool is_table_single_referenced(uint64_t *table)
+{
+ return table_usage(table, 0) < (2 * XLAT_REF_COUNT_UNIT);
+}
+
+#ifdef CONFIG_TEST
+/* Hooks to let test code peek at table states */
+
+int arm64_mmu_nb_free_tables(void)
+{
+ int count = 0;
+
+ for (int i = 0; i < CONFIG_MAX_XLAT_TABLES; i++) {
+ if (xlat_use_count[i] == 0) {
+ count++;
+ }
+ }
+
+ return count;
}
+int arm64_mmu_tables_total_usage(void)
+{
+ int count = 0;
+
+ for (int i = 0; i < CONFIG_MAX_XLAT_TABLES; i++) {
+ count += xlat_use_count[i];
+ }
+
+ return count;
+}
+
+#endif /* CONFIG_TEST */
+
static inline bool is_free_desc(uint64_t desc)
{
+ return desc == 0;
+}
+
+static inline bool is_inval_desc(uint64_t desc)
+{
+ /* invalid descriptors aren't necessarily free */
return (desc & PTE_DESC_TYPE_MASK) == PTE_INVALID_DESC;
}
@@ -102,15 +164,15 @@ static inline bool is_block_desc(uint64_t desc)
static inline uint64_t *pte_desc_table(uint64_t desc)
{
- uint64_t address = desc & GENMASK(47, PAGE_SIZE_SHIFT);
+ uint64_t address = desc & PTE_PHYSADDR_MASK;
+ /* tables use a 1:1 physical:virtual mapping */
return (uint64_t *)address;
}
static inline bool is_desc_block_aligned(uint64_t desc, unsigned int level_size)
{
- uint64_t mask = GENMASK(47, PAGE_SIZE_SHIFT);
- bool aligned = !((desc & mask) & (level_size - 1));
+ bool aligned = (desc & PTE_PHYSADDR_MASK & (level_size - 1)) == 0;
if (!aligned) {
MMU_DEBUG("misaligned desc 0x%016llx for block size 0x%x\n",
@@ -123,7 +185,7 @@ static inline bool is_desc_block_aligned(uint64_t desc, unsigned int level_size)
static inline bool is_desc_superset(uint64_t desc1, uint64_t desc2,
unsigned int level)
{
- uint64_t mask = DESC_ATTRS_MASK | GENMASK(47, LEVEL_TO_VA_SIZE_SHIFT(level));
+ uint64_t mask = DESC_ATTRS_MASK | GENMASK64(47, LEVEL_TO_VA_SIZE_SHIFT(level));
return (desc1 & mask) == (desc2 & mask);
}
@@ -139,6 +201,8 @@ static void debug_show_pte(uint64_t *pte, unsigned int level)
return;
}
+ MMU_DEBUG("0x%016llx ", *pte);
+
if (is_table_desc(*pte, level)) {
uint64_t *table = pte_desc_table(*pte);
@@ -148,8 +212,10 @@ static void debug_show_pte(uint64_t *pte, unsigned int level)
if (is_block_desc(*pte)) {
MMU_DEBUG("[Block] ");
- } else {
+ } else if (!is_inval_desc(*pte)) {
MMU_DEBUG("[Page] ");
+ } else {
+ MMU_DEBUG("[paged-out] ");
}
uint8_t mem_type = (*pte >> 2) & MT_TYPE_MASK;
@@ -161,6 +227,7 @@ static void debug_show_pte(uint64_t *pte, unsigned int level)
MMU_DEBUG((*pte & PTE_BLOCK_DESC_AP_ELx) ? "-ELx" : "-ELh");
MMU_DEBUG((*pte & PTE_BLOCK_DESC_PXN) ? "-PXN" : "-PX");
MMU_DEBUG((*pte & PTE_BLOCK_DESC_UXN) ? "-UXN" : "-UX");
+ MMU_DEBUG((*pte & PTE_SW_WRITABLE) ? "-WRITABLE" : "");
MMU_DEBUG("\n");
}
#else
@@ -176,8 +243,15 @@ static void set_pte_table_desc(uint64_t *pte, uint64_t *table, unsigned int leve
static void set_pte_block_desc(uint64_t *pte, uint64_t desc, unsigned int level)
{
- if (desc) {
- desc |= (level == XLAT_LAST_LEVEL) ? PTE_PAGE_DESC : PTE_BLOCK_DESC;
+ if (level != XLAT_LAST_LEVEL) {
+ desc |= PTE_BLOCK_DESC;
+ } else if (!IS_ENABLED(CONFIG_DEMAND_PAGING) || (desc & PTE_BLOCK_DESC_AF) != 0) {
+ desc |= PTE_PAGE_DESC;
+ } else {
+ /*
+ * Demand paging configured and AF unset: leave the descriptor
+ * type to "invalid" as in arch_mem_page_out().
+ */
}
*pte = desc;
debug_show_pte(pte, level);
@@ -225,20 +299,17 @@ static uint64_t *expand_to_table(uint64_t *pte, unsigned int level)
/* Link the new table in place of the pte it replaces */
set_pte_table_desc(pte, table, level);
- table_usage(table, 1);
return table;
}
-static int set_mapping(struct arm_mmu_ptables *ptables,
- uintptr_t virt, size_t size,
+static int set_mapping(uint64_t *top_table, uintptr_t virt, size_t size,
uint64_t desc, bool may_overwrite)
{
- uint64_t *pte, *ptes[XLAT_LAST_LEVEL + 1];
+ uint64_t *table = top_table;
+ uint64_t *pte;
uint64_t level_size;
- uint64_t *table = ptables->base_xlat_table;
unsigned int level = BASE_XLAT_LEVEL;
- int ret = 0;
while (size) {
__ASSERT(level <= XLAT_LAST_LEVEL,
@@ -246,7 +317,6 @@ static int set_mapping(struct arm_mmu_ptables *ptables,
/* Locate PTE for given virtual address and page table level */
pte = &table[XLAT_TABLE_VA_IDX(virt, level)];
- ptes[level] = pte;
if (is_table_desc(*pte, level)) {
/* Move to the next translation table level */
@@ -260,8 +330,7 @@ static int set_mapping(struct arm_mmu_ptables *ptables,
LOG_ERR("entry already in use: "
"level %d pte %p *pte 0x%016llx",
level, pte, *pte);
- ret = -EBUSY;
- break;
+ return -EBUSY;
}
level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level);
@@ -280,8 +349,7 @@ static int set_mapping(struct arm_mmu_ptables *ptables,
/* Range doesn't fit, create subtable */
table = expand_to_table(pte, level);
if (!table) {
- ret = -ENOMEM;
- break;
+ return -ENOMEM;
}
level++;
continue;
@@ -291,32 +359,57 @@ static int set_mapping(struct arm_mmu_ptables *ptables,
if (is_free_desc(*pte)) {
table_usage(pte, 1);
}
- if (!desc) {
- table_usage(pte, -1);
- }
- /* Create (or erase) block/page descriptor */
+ /* Create block/page descriptor */
set_pte_block_desc(pte, desc, level);
- /* recursively free unused tables if any */
- while (level != BASE_XLAT_LEVEL &&
- is_table_unused(pte)) {
- free_table(pte);
- pte = ptes[--level];
- set_pte_block_desc(pte, 0, level);
- table_usage(pte, -1);
- }
-
move_on:
virt += level_size;
- desc += desc ? level_size : 0;
+ desc += level_size;
size -= level_size;
/* Range is mapped, start again for next range */
- table = ptables->base_xlat_table;
+ table = top_table;
level = BASE_XLAT_LEVEL;
}
- return ret;
+ return 0;
+}
+
+static void del_mapping(uint64_t *table, uintptr_t virt, size_t size,
+ unsigned int level)
+{
+ size_t step, level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level);
+ uint64_t *pte, *subtable;
+
+ for ( ; size; virt += step, size -= step) {
+ step = level_size - (virt & (level_size - 1));
+ if (step > size) {
+ step = size;
+ }
+ pte = &table[XLAT_TABLE_VA_IDX(virt, level)];
+
+ if (is_free_desc(*pte)) {
+ continue;
+ }
+
+ if (step != level_size && is_block_desc(*pte)) {
+ /* need to split this block mapping */
+ expand_to_table(pte, level);
+ }
+
+ if (is_table_desc(*pte, level)) {
+ subtable = pte_desc_table(*pte);
+ del_mapping(subtable, virt, step, level + 1);
+ if (!is_table_unused(subtable)) {
+ continue;
+ }
+ dec_table_ref(subtable);
+ }
+
+ /* free this entry */
+ *pte = 0;
+ table_usage(pte, -1);
+ }
}
#ifdef CONFIG_USERSPACE
@@ -324,7 +417,7 @@ static int set_mapping(struct arm_mmu_ptables *ptables,
static uint64_t *dup_table(uint64_t *src_table, unsigned int level)
{
uint64_t *dst_table = new_table();
- int i;
+ int i, usage_count = 0;
if (!dst_table) {
return NULL;
@@ -347,13 +440,14 @@ static uint64_t *dup_table(uint64_t *src_table, unsigned int level)
}
dst_table[i] = src_table[i];
- if (is_table_desc(src_table[i], level)) {
- table_usage(pte_desc_table(src_table[i]), 1);
+ if (is_table_desc(dst_table[i], level)) {
+ inc_table_ref(pte_desc_table(dst_table[i]));
}
if (!is_free_desc(dst_table[i])) {
- table_usage(dst_table, 1);
+ usage_count++;
}
}
+ table_usage(dst_table, usage_count);
return dst_table;
}
@@ -388,8 +482,7 @@ static int privatize_table(uint64_t *dst_table, uint64_t *src_table,
return -ENOMEM;
}
set_pte_table_desc(&dst_table[i], dst_subtable, level);
- table_usage(dst_subtable, 1);
- table_usage(src_subtable, -1);
+ dec_table_ref(src_subtable);
}
ret = privatize_table(dst_subtable, src_subtable,
@@ -433,18 +526,23 @@ static int privatize_page_range(struct arm_mmu_ptables *dst_pt,
static void discard_table(uint64_t *table, unsigned int level)
{
unsigned int i;
+ int free_count = 0;
for (i = 0U; i < Ln_XLAT_NUM_ENTRIES; i++) {
if (is_table_desc(table[i], level)) {
- table_usage(pte_desc_table(table[i]), -1);
- discard_table(pte_desc_table(table[i]), level + 1);
+ uint64_t *subtable = pte_desc_table(table[i]);
+
+ if (is_table_single_referenced(subtable)) {
+ discard_table(subtable, level + 1);
+ }
+ dec_table_ref(subtable);
}
if (!is_free_desc(table[i])) {
table[i] = 0U;
- table_usage(table, -1);
+ free_count++;
}
}
- free_table(table);
+ table_usage(table, -free_count);
}
static int globalize_table(uint64_t *dst_table, uint64_t *src_table,
@@ -466,6 +564,20 @@ static int globalize_table(uint64_t *dst_table, uint64_t *src_table,
continue;
}
+ if (is_free_desc(src_table[i]) &&
+ is_table_desc(dst_table[i], level)) {
+ uint64_t *subtable = pte_desc_table(dst_table[i]);
+
+ del_mapping(subtable, virt, step, level + 1);
+ if (is_table_unused(subtable)) {
+ /* unreference the empty table */
+ dst_table[i] = 0;
+ table_usage(dst_table, -1);
+ dec_table_ref(subtable);
+ }
+ continue;
+ }
+
if (step != level_size) {
/* boundary falls in the middle of this pte */
__ASSERT(is_table_desc(src_table[i], level),
@@ -497,15 +609,15 @@ static int globalize_table(uint64_t *dst_table, uint64_t *src_table,
table_usage(dst_table, -1);
}
if (is_table_desc(src_table[i], level)) {
- table_usage(pte_desc_table(src_table[i]), 1);
+ inc_table_ref(pte_desc_table(src_table[i]));
}
dst_table[i] = src_table[i];
debug_show_pte(&dst_table[i], level);
if (old_table) {
/* we can discard the whole branch */
- table_usage(old_table, -1);
discard_table(old_table, level + 1);
+ dec_table_ref(old_table);
}
}
@@ -563,6 +675,8 @@ static uint64_t get_region_desc(uint32_t attrs)
/* AP bits for Data access permission */
desc |= (attrs & MT_RW) ? PTE_BLOCK_DESC_AP_RW : PTE_BLOCK_DESC_AP_RO;
+ desc |= (IS_ENABLED(CONFIG_DEMAND_PAGING) && (attrs & MT_RW)) ?
+ PTE_SW_WRITABLE : 0;
/* Mirror permissions to EL0 */
desc |= (attrs & MT_RW_AP_ELx) ?
@@ -570,6 +684,11 @@ static uint64_t get_region_desc(uint32_t attrs)
/* the access flag */
desc |= PTE_BLOCK_DESC_AF;
+ if (IS_ENABLED(CONFIG_DEMAND_PAGING) && (attrs & MT_PAGED_OUT) != 0) {
+ /* set it up for demand paging like arch_mem_page_out() */
+ desc &= ~PTE_BLOCK_DESC_AF;
+ desc |= PTE_BLOCK_DESC_AP_RO;
+ }
/* memory attribute index field */
mem_type = MT_TYPE(attrs);
@@ -592,17 +711,20 @@ static uint64_t get_region_desc(uint32_t attrs)
case MT_NORMAL_NC:
case MT_NORMAL:
/* Make Normal RW memory as execute never */
- if ((attrs & MT_RW) || (attrs & MT_P_EXECUTE_NEVER))
+ if ((attrs & MT_RW) || (attrs & MT_P_EXECUTE_NEVER)) {
desc |= PTE_BLOCK_DESC_PXN;
+ }
if (((attrs & MT_RW) && (attrs & MT_RW_AP_ELx)) ||
- (attrs & MT_U_EXECUTE_NEVER))
+ (attrs & MT_U_EXECUTE_NEVER)) {
desc |= PTE_BLOCK_DESC_UXN;
+ }
- if (mem_type == MT_NORMAL)
+ if (mem_type == MT_NORMAL) {
desc |= PTE_BLOCK_DESC_INNER_SHARE;
- else
+ } else {
desc |= PTE_BLOCK_DESC_OUTER_SHARE;
+ }
}
/* non-Global bit */
@@ -625,7 +747,7 @@ static int __add_map(struct arm_mmu_ptables *ptables, const char *name,
__ASSERT(((virt | phys | size) & (CONFIG_MMU_PAGE_SIZE - 1)) == 0,
"address/size are not page aligned\n");
desc |= phys;
- return set_mapping(ptables, virt, size, desc, may_overwrite);
+ return set_mapping(ptables->base_xlat_table, virt, size, desc, may_overwrite);
}
static int add_map(struct arm_mmu_ptables *ptables, const char *name,
@@ -640,20 +762,18 @@ static int add_map(struct arm_mmu_ptables *ptables, const char *name,
return ret;
}
-static int remove_map(struct arm_mmu_ptables *ptables, const char *name,
- uintptr_t virt, size_t size)
+static void remove_map(struct arm_mmu_ptables *ptables, const char *name,
+ uintptr_t virt, size_t size)
{
k_spinlock_key_t key;
- int ret;
MMU_DEBUG("unmmap [%s]: virt %lx size %lx\n", name, virt, size);
__ASSERT(((virt | size) & (CONFIG_MMU_PAGE_SIZE - 1)) == 0,
"address/size are not page aligned\n");
key = k_spin_lock(&xlat_lock);
- ret = set_mapping(ptables, virt, size, 0, true);
+ del_mapping(ptables->base_xlat_table, virt, size, BASE_XLAT_LEVEL);
k_spin_unlock(&xlat_lock, key);
- return ret;
}
static void invalidate_tlb_all(void)
@@ -663,6 +783,12 @@ static void invalidate_tlb_all(void)
: : : "memory");
}
+static inline void invalidate_tlb_page(uintptr_t virt)
+{
+ /* to be refined */
+ invalidate_tlb_all();
+}
+
/* zephyr execution regions with appropriate attributes */
struct arm_mmu_flat_range {
@@ -752,8 +878,9 @@ static void setup_page_tables(struct arm_mmu_ptables *ptables)
uintptr_t max_va = 0, max_pa = 0;
MMU_DEBUG("xlat tables:\n");
- for (index = 0U; index < CONFIG_MAX_XLAT_TABLES; index++)
+ for (index = 0U; index < CONFIG_MAX_XLAT_TABLES; index++) {
MMU_DEBUG("%d: %p\n", index, xlat_tables + index * Ln_XLAT_NUM_ENTRIES);
+ }
for (index = 0U; index < mmu_config.num_regions; index++) {
region = &mmu_config.mmu_regions[index];
@@ -892,7 +1019,7 @@ void z_arm64_mm_init(bool is_primary_core)
enable_mmu_el1(&kernel_ptables, flags);
}
-static void sync_domains(uintptr_t virt, size_t size)
+static void sync_domains(uintptr_t virt, size_t size, const char *name)
{
#ifdef CONFIG_USERSPACE
sys_snode_t *node;
@@ -906,7 +1033,7 @@ static void sync_domains(uintptr_t virt, size_t size)
domain = CONTAINER_OF(node, struct arch_mem_domain, node);
domain_ptables = &domain->ptables;
ret = globalize_page_range(domain_ptables, &kernel_ptables,
- virt, size, "generic");
+ virt, size, name);
if (ret) {
LOG_ERR("globalize_page_range() returned %d", ret);
}
@@ -975,6 +1102,10 @@ static int __arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flag
entry_flags |= MT_RW_AP_ELx;
}
+ if (IS_ENABLED(CONFIG_DEMAND_PAGING) && (flags & K_MEM_MAP_UNPAGED) != 0) {
+ entry_flags |= MT_PAGED_OUT;
+ }
+
return add_map(ptables, "generic", phys, (uintptr_t)virt, size, entry_flags);
}
@@ -988,7 +1119,7 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
} else {
uint32_t mem_flags = flags & K_MEM_CACHE_MASK;
- sync_domains((uintptr_t)virt, size);
+ sync_domains((uintptr_t)virt, size, "mem_map");
invalidate_tlb_all();
switch (mem_flags) {
@@ -1005,14 +1136,9 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
void arch_mem_unmap(void *addr, size_t size)
{
- int ret = remove_map(&kernel_ptables, "generic", (uintptr_t)addr, size);
-
- if (ret) {
- LOG_ERR("remove_map() returned %d", ret);
- } else {
- sync_domains((uintptr_t)addr, size);
- invalidate_tlb_all();
- }
+ remove_map(&kernel_ptables, "generic", (uintptr_t)addr, size);
+ sync_domains((uintptr_t)addr, size, "mem_unmap");
+ invalidate_tlb_all();
}
int arch_page_phys_get(void *virt, uintptr_t *phys)
@@ -1031,7 +1157,7 @@ int arch_page_phys_get(void *virt, uintptr_t *phys)
}
if (phys) {
- *phys = par & GENMASK(47, 12);
+ *phys = par & GENMASK64(47, 12);
}
return 0;
}
@@ -1230,6 +1356,7 @@ static void z_arm64_swap_ptables(struct k_thread *incoming)
return; /* Already the right tables */
}
+ MMU_DEBUG("TTBR0 switch from %#llx to %#llx\n", curr_ttbr0, new_ttbr0);
z_arm64_set_ttbr0(new_ttbr0);
if (get_asid(curr_ttbr0) == get_asid(new_ttbr0)) {
@@ -1241,8 +1368,9 @@ void z_arm64_thread_mem_domains_init(struct k_thread *incoming)
{
struct arm_mmu_ptables *ptables;
- if ((incoming->base.user_options & K_USER) == 0)
+ if ((incoming->base.user_options & K_USER) == 0) {
return;
+ }
ptables = incoming->arch.ptables;
@@ -1258,3 +1386,311 @@ void z_arm64_swap_mem_domains(struct k_thread *incoming)
}
#endif /* CONFIG_USERSPACE */
+
+#ifdef CONFIG_DEMAND_PAGING
+
+static uint64_t *get_pte_location(struct arm_mmu_ptables *ptables,
+ uintptr_t virt)
+{
+ uint64_t *pte;
+ uint64_t *table = ptables->base_xlat_table;
+ unsigned int level = BASE_XLAT_LEVEL;
+
+ for (;;) {
+ pte = &table[XLAT_TABLE_VA_IDX(virt, level)];
+ if (level == XLAT_LAST_LEVEL) {
+ return pte;
+ }
+
+ if (is_table_desc(*pte, level)) {
+ level++;
+ table = pte_desc_table(*pte);
+ continue;
+ }
+
+ /* anything else is unexpected */
+ return NULL;
+ }
+}
+
+void arch_mem_page_out(void *addr, uintptr_t location)
+{
+ uintptr_t virt = (uintptr_t)addr;
+ uint64_t *pte = get_pte_location(&kernel_ptables, virt);
+ uint64_t desc;
+
+ __ASSERT(pte != NULL, "");
+ desc = *pte;
+
+ /* mark the entry invalid to the hardware */
+ desc &= ~PTE_DESC_TYPE_MASK;
+ desc |= PTE_INVALID_DESC;
+
+ /* store the location token in place of the physical address */
+ __ASSERT((location & ~PTE_PHYSADDR_MASK) == 0, "");
+ desc &= ~PTE_PHYSADDR_MASK;
+ desc |= location;
+
+ /*
+ * The location token may be 0. Make sure the whole descriptor
+ * doesn't end up being zero as this would be seen as a free entry.
+ */
+ desc |= PTE_BLOCK_DESC_AP_RO;
+
+ *pte = desc;
+ MMU_DEBUG("page_out: virt=%#lx location=%#lx\n", virt, location);
+ debug_show_pte(pte, XLAT_LAST_LEVEL);
+
+ sync_domains(virt, CONFIG_MMU_PAGE_SIZE, "page_out");
+ invalidate_tlb_page(virt);
+}
+
+void arch_mem_page_in(void *addr, uintptr_t phys)
+{
+ uintptr_t virt = (uintptr_t)addr;
+ uint64_t *pte = get_pte_location(&kernel_ptables, virt);
+ uint64_t desc;
+
+ __ASSERT((phys & ~PTE_PHYSADDR_MASK) == 0, "");
+
+ __ASSERT(pte != NULL, "");
+ desc = *pte;
+ __ASSERT(!is_free_desc(desc), "");
+
+ /* mark the entry valid again to the hardware */
+ desc &= ~PTE_DESC_TYPE_MASK;
+ desc |= PTE_PAGE_DESC;
+
+ /* store the physical address */
+ desc &= ~PTE_PHYSADDR_MASK;
+ desc |= phys;
+
+ /* mark as clean */
+ desc |= PTE_BLOCK_DESC_AP_RO;
+
+ /* and make it initially unaccessible to track unaccessed pages */
+ desc &= ~PTE_BLOCK_DESC_AF;
+
+ *pte = desc;
+ MMU_DEBUG("page_in: virt=%#lx phys=%#lx\n", virt, phys);
+ debug_show_pte(pte, XLAT_LAST_LEVEL);
+
+ sync_domains(virt, CONFIG_MMU_PAGE_SIZE, "page_in");
+ invalidate_tlb_page(virt);
+}
+
+enum arch_page_location arch_page_location_get(void *addr, uintptr_t *location)
+{
+ uintptr_t virt = (uintptr_t)addr;
+ uint64_t *pte = get_pte_location(&kernel_ptables, virt);
+ uint64_t desc;
+ enum arch_page_location status;
+
+ if (!pte) {
+ return ARCH_PAGE_LOCATION_BAD;
+ }
+ desc = *pte;
+ if (is_free_desc(desc)) {
+ return ARCH_PAGE_LOCATION_BAD;
+ }
+
+ switch (desc & PTE_DESC_TYPE_MASK) {
+ case PTE_PAGE_DESC:
+ status = ARCH_PAGE_LOCATION_PAGED_IN;
+ break;
+ case PTE_INVALID_DESC:
+ status = ARCH_PAGE_LOCATION_PAGED_OUT;
+ break;
+ default:
+ return ARCH_PAGE_LOCATION_BAD;
+ }
+
+ *location = desc & PTE_PHYSADDR_MASK;
+ return status;
+}
+
+uintptr_t arch_page_info_get(void *addr, uintptr_t *phys, bool clear_accessed)
+{
+ uintptr_t virt = (uintptr_t)addr;
+ uint64_t *pte = get_pte_location(&kernel_ptables, virt);
+ uint64_t desc;
+ uintptr_t status = 0;
+
+ if (!pte) {
+ return ARCH_DATA_PAGE_NOT_MAPPED;
+ }
+ desc = *pte;
+ if (is_free_desc(desc)) {
+ return ARCH_DATA_PAGE_NOT_MAPPED;
+ }
+
+ switch (desc & PTE_DESC_TYPE_MASK) {
+ case PTE_PAGE_DESC:
+ status |= ARCH_DATA_PAGE_LOADED;
+ break;
+ case PTE_INVALID_DESC:
+ /* page not loaded */
+ break;
+ default:
+ return ARCH_DATA_PAGE_NOT_MAPPED;
+ }
+
+ if (phys) {
+ *phys = desc & PTE_PHYSADDR_MASK;
+ }
+
+ if ((status & ARCH_DATA_PAGE_LOADED) == 0) {
+ return status;
+ }
+
+ if ((desc & PTE_BLOCK_DESC_AF) != 0) {
+ status |= ARCH_DATA_PAGE_ACCESSED;
+ }
+
+ if ((desc & PTE_BLOCK_DESC_AP_RO) == 0) {
+ status |= ARCH_DATA_PAGE_DIRTY;
+ }
+
+ if (clear_accessed) {
+ desc &= ~PTE_BLOCK_DESC_AF;
+ *pte = desc;
+ MMU_DEBUG("page_info: virt=%#lx (clearing AF)\n", virt);
+ debug_show_pte(pte, XLAT_LAST_LEVEL);
+ sync_domains(virt, CONFIG_MMU_PAGE_SIZE, "unaccessible");
+ invalidate_tlb_page(virt);
+ }
+
+ return status;
+}
+
+#define MT_SCRATCH (MT_NORMAL | MT_P_RW_U_NA | MT_DEFAULT_SECURE_STATE)
+
+void arch_mem_scratch(uintptr_t phys)
+{
+ uintptr_t virt = (uintptr_t)K_MEM_SCRATCH_PAGE;
+ size_t size = CONFIG_MMU_PAGE_SIZE;
+ int ret = add_map(&kernel_ptables, "scratch", phys, virt, size, MT_SCRATCH);
+
+ if (ret) {
+ LOG_ERR("add_map() returned %d", ret);
+ } else {
+ sync_domains(virt, size, "scratch");
+ invalidate_tlb_page(virt);
+ }
+}
+
+static bool do_mem_page_fault(struct arch_esf *esf, uintptr_t virt)
+{
+ /*
+ * The k_mem_page_fault() code expects to be called with IRQs enabled
+ * if the fault happened in a context where IRQs were enabled.
+ */
+ if (arch_irq_unlocked(esf->spsr)) {
+ enable_irq();
+ }
+
+ bool ok = k_mem_page_fault((void *)virt);
+
+ disable_irq();
+ return ok;
+}
+
+/* Called from the fault handler. Returns true if the fault is resolved. */
+bool z_arm64_do_demand_paging(struct arch_esf *esf, uint64_t esr, uint64_t far)
+{
+ uintptr_t virt = far;
+ uint64_t *pte, desc;
+ uintptr_t phys;
+
+ /* filter relevant exceptions */
+ switch (GET_ESR_EC(esr)) {
+ case 0x21: /* insn abort from current EL */
+ case 0x25: /* data abort from current EL */
+ break;
+ default:
+ return false;
+ }
+
+ /* make sure the fault happened in the expected range */
+ if (!IN_RANGE(virt,
+ (uintptr_t)K_MEM_VIRT_RAM_START,
+ ((uintptr_t)K_MEM_VIRT_RAM_END - 1))) {
+ return false;
+ }
+
+ virt = ROUND_DOWN(virt, CONFIG_MMU_PAGE_SIZE);
+
+ pte = get_pte_location(&kernel_ptables, virt);
+ if (!pte) {
+ /* page mapping doesn't exist, let the core code do its thing */
+ return do_mem_page_fault(esf, virt);
+ }
+ desc = *pte;
+ if ((desc & PTE_DESC_TYPE_MASK) != PTE_PAGE_DESC) {
+ /* page is not loaded/mapped */
+ return do_mem_page_fault(esf, virt);
+ }
+
+ /*
+ * From this point, we expect only 2 cases:
+ *
+ * 1) the Access Flag was not set so we set it marking the page
+ * as accessed;
+ *
+ * 2) the page was read-only and a write occurred so we clear the
+ * RO flag marking the page dirty.
+ *
+ * We bail out on anything else.
+ *
+ * Fault status codes for Data aborts (DFSC):
+ * 0b0010LL Access flag fault
+ * 0b0011LL Permission fault
+ */
+ uint32_t dfsc = GET_ESR_ISS(esr) & GENMASK(5, 0);
+ bool write = (GET_ESR_ISS(esr) & BIT(6)) != 0; /* WnR */
+
+ if (dfsc == (0b001000 | XLAT_LAST_LEVEL) &&
+ (desc & PTE_BLOCK_DESC_AF) == 0) {
+ /* page is being accessed: set the access flag */
+ desc |= PTE_BLOCK_DESC_AF;
+ if (write) {
+ if ((desc & PTE_SW_WRITABLE) == 0) {
+ /* we don't actually have write permission */
+ return false;
+ }
+ /*
+ * Let's avoid another fault immediately after
+ * returning by making the page read-write right away
+ * effectively marking it "dirty" as well.
+ */
+ desc &= ~PTE_BLOCK_DESC_AP_RO;
+ }
+ *pte = desc;
+ sync_domains(virt, CONFIG_MMU_PAGE_SIZE, "accessed");
+ /* no TLB inval needed after setting AF */
+
+ /* tell the eviction algorithm about it */
+ phys = desc & PTE_PHYSADDR_MASK;
+ k_mem_paging_eviction_accessed(phys);
+ return true;
+ }
+
+ if (dfsc == (0b001100 | XLAT_LAST_LEVEL) && write &&
+ (desc & PTE_BLOCK_DESC_AP_RO) != 0 &&
+ (desc & PTE_SW_WRITABLE) != 0) {
+ /* make it "dirty" i.e. read-write */
+ desc &= ~PTE_BLOCK_DESC_AP_RO;
+ *pte = desc;
+ sync_domains(virt, CONFIG_MMU_PAGE_SIZE, "dirtied");
+ invalidate_tlb_page(virt);
+
+ /* this also counts as an access refresh */
+ phys = desc & PTE_PHYSADDR_MASK;
+ k_mem_paging_eviction_accessed(phys);
+ return true;
+ }
+
+ return false;
+}
+
+#endif /* CONFIG_DEMAND_PAGING */
diff --git a/arch/arm64/core/mmu.h b/arch/arm64/core/mmu.h
index fa43a1fc9f6..47be42db635 100644
--- a/arch/arm64/core/mmu.h
+++ b/arch/arm64/core/mmu.h
@@ -93,3 +93,88 @@
#define DESC_ATTRS_LOWER_MASK GENMASK(11, 2)
#define DESC_ATTRS_MASK (DESC_ATTRS_UPPER_MASK | DESC_ATTRS_LOWER_MASK)
+
+/*
+ * PTE descriptor can be Block descriptor or Table descriptor
+ * or Page descriptor.
+ */
+#define PTE_DESC_TYPE_MASK 3ULL
+#define PTE_BLOCK_DESC 1ULL
+#define PTE_TABLE_DESC 3ULL
+#define PTE_PAGE_DESC 3ULL
+#define PTE_INVALID_DESC 0ULL
+
+/*
+ * Block and Page descriptor attributes fields
+ */
+#define PTE_BLOCK_DESC_MEMTYPE(x) (x << 2)
+#define PTE_BLOCK_DESC_NS (1ULL << 5)
+#define PTE_BLOCK_DESC_AP_ELx (1ULL << 6)
+#define PTE_BLOCK_DESC_AP_EL_HIGHER (0ULL << 6)
+#define PTE_BLOCK_DESC_AP_RO (1ULL << 7)
+#define PTE_BLOCK_DESC_AP_RW (0ULL << 7)
+#define PTE_BLOCK_DESC_NON_SHARE (0ULL << 8)
+#define PTE_BLOCK_DESC_OUTER_SHARE (2ULL << 8)
+#define PTE_BLOCK_DESC_INNER_SHARE (3ULL << 8)
+#define PTE_BLOCK_DESC_AF (1ULL << 10)
+#define PTE_BLOCK_DESC_NG (1ULL << 11)
+#define PTE_BLOCK_DESC_PXN (1ULL << 53)
+#define PTE_BLOCK_DESC_UXN (1ULL << 54)
+
+/*
+ * Descriptor physical address field bits
+ */
+#define PTE_PHYSADDR_MASK GENMASK64(47, PAGE_SIZE_SHIFT)
+
+/*
+ * Descriptor bits 58 to 55 are defined as "Reserved for Software Use".
+ *
+ * When using demand paging, RW memory is marked RO to trap the first write
+ * for dirty page tracking. Bit 55 indicates if memory is actually writable.
+ */
+#define PTE_SW_WRITABLE (1ULL << 55)
+
+/*
+ * TCR definitions.
+ */
+#define TCR_EL1_IPS_SHIFT 32U
+#define TCR_EL2_PS_SHIFT 16U
+#define TCR_EL3_PS_SHIFT 16U
+
+#define TCR_T0SZ_SHIFT 0U
+#define TCR_T0SZ(x) ((64 - (x)) << TCR_T0SZ_SHIFT)
+
+#define TCR_IRGN_NC (0ULL << 8)
+#define TCR_IRGN_WBWA (1ULL << 8)
+#define TCR_IRGN_WT (2ULL << 8)
+#define TCR_IRGN_WBNWA (3ULL << 8)
+#define TCR_IRGN_MASK (3ULL << 8)
+#define TCR_ORGN_NC (0ULL << 10)
+#define TCR_ORGN_WBWA (1ULL << 10)
+#define TCR_ORGN_WT (2ULL << 10)
+#define TCR_ORGN_WBNWA (3ULL << 10)
+#define TCR_ORGN_MASK (3ULL << 10)
+#define TCR_SHARED_NON (0ULL << 12)
+#define TCR_SHARED_OUTER (2ULL << 12)
+#define TCR_SHARED_INNER (3ULL << 12)
+#define TCR_TG0_4K (0ULL << 14)
+#define TCR_TG0_64K (1ULL << 14)
+#define TCR_TG0_16K (2ULL << 14)
+#define TCR_EPD1_DISABLE (1ULL << 23)
+#define TCR_TG1_16K (1ULL << 30)
+#define TCR_TG1_4K (2ULL << 30)
+#define TCR_TG1_64K (3ULL << 30)
+
+#define TCR_PS_BITS_4GB 0x0ULL
+#define TCR_PS_BITS_64GB 0x1ULL
+#define TCR_PS_BITS_1TB 0x2ULL
+#define TCR_PS_BITS_4TB 0x3ULL
+#define TCR_PS_BITS_16TB 0x4ULL
+#define TCR_PS_BITS_256TB 0x5ULL
+
+/*
+ * ARM guarantees at least 8 ASID bits.
+ * We may have more available, but do not make use of them for the time being.
+ */
+#define VM_ASID_BITS 8
+#define TTBR_ASID_SHIFT 48
diff --git a/arch/arm64/core/offsets/offsets.c b/arch/arm64/core/offsets/offsets.c
index 4268692c498..772f0df3a8d 100644
--- a/arch/arm64/core/offsets/offsets.c
+++ b/arch/arm64/core/offsets/offsets.c
@@ -40,7 +40,7 @@ GEN_NAMED_OFFSET_SYM(_callee_saved_t, x27, x27_x28);
GEN_NAMED_OFFSET_SYM(_callee_saved_t, x29, x29_sp_el0);
GEN_NAMED_OFFSET_SYM(_callee_saved_t, sp_elx, sp_elx_lr);
-#ifdef CONFIG_ARM64_ENABLE_FRAME_POINTER
+#ifdef CONFIG_FRAME_POINTER
GEN_NAMED_OFFSET_SYM(_esf_t, fp, fp);
#endif
diff --git a/arch/arm64/core/paging.h b/arch/arm64/core/paging.h
new file mode 100644
index 00000000000..85e3ef7d33f
--- /dev/null
+++ b/arch/arm64/core/paging.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2024 BayLibre SAS
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef Z_ARM64_PAGING_H
+#define Z_ARM64_PAGING_H
+
+bool z_arm64_do_demand_paging(struct arch_esf *esf, uint64_t esr, uint64_t far);
+
+#endif /* Z_ARM64_PAGING_H */
diff --git a/arch/arm64/core/prep_c.c b/arch/arm64/core/prep_c.c
index bfd3b7c1eaa..11c7d2b112b 100644
--- a/arch/arm64/core/prep_c.c
+++ b/arch/arm64/core/prep_c.c
@@ -16,34 +16,13 @@
#include
#include
+#include
+#include
extern void z_arm64_mm_init(bool is_primary_core);
__weak void z_arm64_mm_init(bool is_primary_core) { }
-/*
- * These simple memset/memcpy alternatives are necessary as the optimized
- * ones depend on the MMU to be active (see commit c5b898743a20).
- */
-void z_early_memset(void *dst, int c, size_t n)
-{
- uint8_t *d = dst;
-
- while (n--) {
- *d++ = c;
- }
-}
-
-void z_early_memcpy(void *dst, const void *src, size_t n)
-{
- uint8_t *d = dst;
- const uint8_t *s = src;
-
- while (n--) {
- *d++ = *s++;
- }
-}
-
/**
*
* @brief Prepare to and run C code
@@ -53,6 +32,10 @@ void z_early_memcpy(void *dst, const void *src, size_t n)
*/
void z_prep_c(void)
{
+#if defined(CONFIG_SOC_PREP_HOOK)
+ soc_prep_hook();
+#endif
+
/* Initialize tpidrro_el0 with our struct _cpu instance address */
write_tpidrro_el0((uintptr_t)&_kernel.cpus[0]);
@@ -75,6 +58,9 @@ extern FUNC_NORETURN void arch_secondary_cpu_init(void);
void z_arm64_secondary_prep_c(void)
{
arch_secondary_cpu_init();
+#if CONFIG_ARCH_CACHE
+ arch_cache_init();
+#endif
CODE_UNREACHABLE;
}
diff --git a/arch/arm64/core/reboot.c b/arch/arm64/core/reboot.c
new file mode 100644
index 00000000000..064b44f93bf
--- /dev/null
+++ b/arch/arm64/core/reboot.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2024 NXP
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include
+#include
+#include
+#include
+
+LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
+
+#ifdef CONFIG_PM_CPU_OPS_PSCI
+void __weak sys_arch_reboot(int type)
+{
+ unsigned char reset_type;
+
+ if (type == SYS_REBOOT_COLD) {
+ reset_type = SYS_COLD_RESET;
+ } else if (type == SYS_REBOOT_WARM) {
+ reset_type = SYS_WARM_RESET;
+ } else {
+ LOG_ERR("Invalid reboot type");
+ return;
+ }
+ pm_system_reset(reset_type);
+}
+#else
+void __weak sys_arch_reboot(int type)
+{
+ LOG_WRN("%s is not implemented", __func__);
+ ARG_UNUSED(type);
+}
+#endif
diff --git a/arch/arm64/core/reset.S b/arch/arm64/core/reset.S
index 5e406bea132..a01139ad700 100644
--- a/arch/arm64/core/reset.S
+++ b/arch/arm64/core/reset.S
@@ -7,7 +7,7 @@
#include
#include
#include
-#include
+#include
#include "boot.h"
#include "macro_priv.inc"
diff --git a/arch/arm64/core/smp.c b/arch/arm64/core/smp.c
index 8777c400766..bbb7f963431 100644
--- a/arch/arm64/core/smp.c
+++ b/arch/arm64/core/smp.c
@@ -16,6 +16,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -180,7 +181,7 @@ void arch_secondary_cpu_init(int cpu_num)
#ifdef CONFIG_SMP
-static void broadcast_ipi(unsigned int ipi)
+static void send_ipi(unsigned int ipi, uint32_t cpu_bitmap)
{
uint64_t mpidr = MPIDR_TO_CORE(GET_MPIDR());
@@ -190,6 +191,10 @@ static void broadcast_ipi(unsigned int ipi)
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
+ if ((cpu_bitmap & BIT(i)) == 0) {
+ continue;
+ }
+
uint64_t target_mpidr = cpu_map[i];
uint8_t aff0;
@@ -209,10 +214,14 @@ void sched_ipi_handler(const void *unused)
z_sched_ipi();
}
-/* arch implementation of sched_ipi */
-void arch_sched_ipi(void)
+void arch_sched_broadcast_ipi(void)
+{
+ send_ipi(SGI_SCHED_IPI, IPI_ALL_CPUS_MASK);
+}
+
+void arch_sched_directed_ipi(uint32_t cpu_bitmap)
{
- broadcast_ipi(SGI_SCHED_IPI);
+ send_ipi(SGI_SCHED_IPI, cpu_bitmap);
}
#ifdef CONFIG_USERSPACE
@@ -232,7 +241,7 @@ void mem_cfg_ipi_handler(const void *unused)
void z_arm64_mem_cfg_ipi(void)
{
- broadcast_ipi(SGI_MMCFG_IPI);
+ send_ipi(SGI_MMCFG_IPI, IPI_ALL_CPUS_MASK);
}
#endif
@@ -302,6 +311,5 @@ int arch_smp_init(void)
return 0;
}
-SYS_INIT(arch_smp_init, PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif
diff --git a/arch/arm64/core/thread.c b/arch/arm64/core/thread.c
index a0269501c19..18f49945eda 100644
--- a/arch/arm64/core/thread.c
+++ b/arch/arm64/core/thread.c
@@ -87,7 +87,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
void *p1, void *p2, void *p3)
{
extern void z_arm64_exit_exc(void);
- z_arch_esf_t *pInitCtx;
+ struct arch_esf *pInitCtx;
/*
* Clean the thread->arch to avoid unexpected behavior because the
@@ -102,7 +102,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* dropping into EL0.
*/
- pInitCtx = Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr);
+ pInitCtx = Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr);
pInitCtx->x0 = (uint64_t)entry;
pInitCtx->x1 = (uint64_t)p1;
diff --git a/arch/arm64/core/vector_table.S b/arch/arm64/core/vector_table.S
index 1a1b649d4f2..632304b7029 100644
--- a/arch/arm64/core/vector_table.S
+++ b/arch/arm64/core/vector_table.S
@@ -10,7 +10,7 @@
#include
#include
-#include
+#include
#include
#include
#include
@@ -72,7 +72,7 @@ _ASM_FILE_PROLOGUE
.endif
#endif
-#ifdef CONFIG_ARM64_ENABLE_FRAME_POINTER
+#ifdef CONFIG_FRAME_POINTER
str x29, [sp, ___esf_t_fp_OFFSET]
#endif
@@ -339,7 +339,7 @@ SECTION_FUNC(TEXT, z_arm64_exit_exc)
ldp x16, x17, [sp, ___esf_t_x16_x17_OFFSET]
ldp x18, lr, [sp, ___esf_t_x18_lr_OFFSET]
-#ifdef CONFIG_ARM64_ENABLE_FRAME_POINTER
+#ifdef CONFIG_FRAME_POINTER
ldr x29, [sp, ___esf_t_fp_OFFSET]
#endif
diff --git a/arch/arm64/core/xen/CMakeLists.txt b/arch/arm64/core/xen/CMakeLists.txt
index b0b573b7b9b..cbedb1204e1 100644
--- a/arch/arm64/core/xen/CMakeLists.txt
+++ b/arch/arm64/core/xen/CMakeLists.txt
@@ -4,8 +4,5 @@
# Needed to separate definitions in common Xen headers
zephyr_compile_options($<$:-D__ASSEMBLY__>)
-# Xen interface version used in headers for correct definition
-zephyr_compile_options(-D__XEN_INTERFACE_VERSION__=0x00040e00)
-
zephyr_library_sources(hypercall.S)
zephyr_library_sources(enlighten.c)
diff --git a/arch/arm64/core/xen/Kconfig b/arch/arm64/core/xen/Kconfig
index a4bb0be77e9..f860f9f1d4f 100644
--- a/arch/arm64/core/xen/Kconfig
+++ b/arch/arm64/core/xen/Kconfig
@@ -25,3 +25,11 @@ config XEN_DOM0LESS
help
Configures Zephyr as DomU, that can be started on Dom0less
setup.
+
+config XEN_INTERFACE_VERSION
+ hex "Xen interface version"
+ default 0x00040e00
+ depends on XEN
+ help
+ Xen interface version to use. This is the version of the
+ interface that Zephyr will use to communicate with the hypervisor.
diff --git a/arch/arm64/core/xen/enlighten.c b/arch/arm64/core/xen/enlighten.c
index 164947a09ff..91bf014b762 100644
--- a/arch/arm64/core/xen/enlighten.c
+++ b/arch/arm64/core/xen/enlighten.c
@@ -42,7 +42,7 @@ static int xen_map_shared_info(const shared_info_t *shared_page)
return HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
}
-static int xen_enlighten_init(void)
+int xen_enlighten_init(void)
{
int ret = 0;
shared_info_t *info = (shared_info_t *) shared_info_buf;
@@ -66,5 +66,3 @@ static int xen_enlighten_init(void)
return 0;
}
-
-SYS_INIT(xen_enlighten_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
diff --git a/arch/arm64/include/kernel_arch_data.h b/arch/arm64/include/kernel_arch_data.h
index ec781fc902d..8b607c1dbf4 100644
--- a/arch/arm64/include/kernel_arch_data.h
+++ b/arch/arm64/include/kernel_arch_data.h
@@ -36,7 +36,7 @@
extern "C" {
#endif
-typedef struct __esf _esf_t;
+typedef struct arch_esf _esf_t;
typedef struct __basic_sf _basic_sf_t;
#ifdef __cplusplus
diff --git a/arch/arm64/include/kernel_arch_func.h b/arch/arm64/include/kernel_arch_func.h
index a5c3d59d87a..d2c346be1f0 100644
--- a/arch/arm64/include/kernel_arch_func.h
+++ b/arch/arm64/include/kernel_arch_func.h
@@ -28,8 +28,13 @@ extern "C" {
#ifndef _ASMLANGUAGE
+extern void xen_enlighten_init(void);
+
static ALWAYS_INLINE void arch_kernel_init(void)
{
+#ifdef CONFIG_XEN
+ xen_enlighten_init();
+#endif
}
static inline void arch_switch(void *switch_to, void **switched_from)
@@ -43,7 +48,7 @@ static inline void arch_switch(void *switch_to, void **switched_from)
z_arm64_context_switch(new, old);
}
-extern void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf);
+extern void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf);
extern void z_arm64_set_ttbr0(uint64_t ttbr0);
extern void z_arm64_mem_cfg_ipi(void);
diff --git a/arch/arm64/include/offsets_short_arch.h b/arch/arm64/include/offsets_short_arch.h
index abd93bba7ba..11dd5f64256 100644
--- a/arch/arm64/include/offsets_short_arch.h
+++ b/arch/arm64/include/offsets_short_arch.h
@@ -7,7 +7,7 @@
#ifndef ZEPHYR_ARCH_ARM64_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_ARM64_INCLUDE_OFFSETS_SHORT_ARCH_H_
-#include
+#include
#define _thread_offset_to_exception_depth \
(___thread_t_arch_OFFSET + ___thread_arch_t_exception_depth_OFFSET)
diff --git a/arch/common/CMakeLists.txt b/arch/common/CMakeLists.txt
index 78fc6396ed6..48685151ab1 100644
--- a/arch/common/CMakeLists.txt
+++ b/arch/common/CMakeLists.txt
@@ -17,11 +17,18 @@ if(CONFIG_GEN_ISR_TABLES)
)
endif()
+zephyr_library_sources_ifdef(
+ CONFIG_ISR_TABLE_SHELL
+ isr_tables_shell.c
+)
+
zephyr_library_sources_ifdef(
CONFIG_MULTI_LEVEL_INTERRUPTS
multilevel_irq.c
)
+zephyr_library_sources_ifdef(CONFIG_LEGACY_MULTI_LEVEL_TABLE_GENERATION multilevel_irq_legacy.c)
+
zephyr_library_sources_ifdef(CONFIG_SHARED_INTERRUPTS shared_irq.c)
if(NOT CONFIG_ARCH_HAS_TIMING_FUNCTIONS AND
diff --git a/arch/common/Kconfig b/arch/common/Kconfig
index aabc599a4d5..2dcf2f5791b 100644
--- a/arch/common/Kconfig
+++ b/arch/common/Kconfig
@@ -15,3 +15,25 @@ config SEMIHOST
https://github.com/riscv/riscv-semihosting-spec/blob/main/riscv-semihosting-spec.adoc
This option is compatible with hardware and with QEMU, through the
(automatic) use of the -semihosting-config switch when invoking it.
+
+config LEGACY_MULTI_LEVEL_TABLE_GENERATION
+ bool "Auto generates the multi-level interrupt LUT (deprecated)"
+ default y
+ select DEPRECATED
+ depends on MULTI_LEVEL_INTERRUPTS
+ depends on !PLIC
+ depends on !NXP_IRQSTEER
+ depends on !RV32M1_INTMUX
+ depends on !CAVS_ICTL
+ depends on !DW_ICTL_ACE
+ depends on !DW_ICTL
+ help
+ A make-shift Kconfig to continue generating the multi-level interrupt LUT
+ with the legacy way using DT macros.
+
+config ISR_TABLE_SHELL
+ bool "Shell command to dump the ISR tables"
+ depends on GEN_SW_ISR_TABLE
+ depends on SHELL
+ help
+ This option enables a shell command to dump the ISR tables.
diff --git a/arch/common/isr_tables.c b/arch/common/isr_tables.c
index 050597b7b1d..183f80738fe 100644
--- a/arch/common/isr_tables.c
+++ b/arch/common/isr_tables.c
@@ -15,11 +15,11 @@
struct int_list_header {
uint32_t table_size;
uint32_t offset;
-#if IS_ENABLED(CONFIG_ISR_TABLES_LOCAL_DECLARATION)
+#if defined(CONFIG_ISR_TABLES_LOCAL_DECLARATION)
uint32_t swi_table_entry_size;
uint32_t shared_isr_table_entry_size;
uint32_t shared_isr_client_num_offset;
-#endif /* IS_ENABLED(CONFIG_ISR_TABLES_LOCAL_DECLARATION) */
+#endif /* defined(CONFIG_ISR_TABLES_LOCAL_DECLARATION) */
};
/* These values are not included in the resulting binary, but instead form the
@@ -29,13 +29,13 @@ struct int_list_header {
Z_GENERIC_SECTION(.irq_info) __used struct int_list_header _iheader = {
.table_size = IRQ_TABLE_SIZE,
.offset = CONFIG_GEN_IRQ_START_VECTOR,
-#if IS_ENABLED(CONFIG_ISR_TABLES_LOCAL_DECLARATION)
+#if defined(CONFIG_ISR_TABLES_LOCAL_DECLARATION)
.swi_table_entry_size = sizeof(struct _isr_table_entry),
-#if IS_ENABLED(CONFIG_SHARED_INTERRUPTS)
+#if defined(CONFIG_SHARED_INTERRUPTS)
.shared_isr_table_entry_size = sizeof(struct z_shared_isr_table_entry),
.shared_isr_client_num_offset = offsetof(struct z_shared_isr_table_entry, client_num),
-#endif /* IS_ENABLED(CONFIG_SHARED_INTERRUPTS) */
-#endif /* IS_ENABLED(CONFIG_ISR_TABLES_LOCAL_DECLARATION) */
+#endif /* defined(CONFIG_SHARED_INTERRUPTS) */
+#endif /* defined(CONFIG_ISR_TABLES_LOCAL_DECLARATION) */
};
/* These are placeholder tables. They will be replaced by the real tables
@@ -90,7 +90,7 @@ uintptr_t __irq_vector_table _irq_vector_table[IRQ_TABLE_SIZE] = {
#ifdef CONFIG_GEN_SW_ISR_TABLE
struct _isr_table_entry __sw_isr_table _sw_isr_table[IRQ_TABLE_SIZE] = {
[0 ...(IRQ_TABLE_SIZE - 1)] = {(const void *)0x42,
- (void *)&z_irq_spurious},
+ &z_irq_spurious},
};
#endif
diff --git a/arch/common/isr_tables_shell.c b/arch/common/isr_tables_shell.c
new file mode 100644
index 00000000000..232878f2b29
--- /dev/null
+++ b/arch/common/isr_tables_shell.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2024 Meta Platforms.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include
+#include
+#include
+
+static void dump_isr_table_entry(const struct shell *sh, int idx, struct _isr_table_entry *entry)
+{
+
+ if ((entry->isr == z_irq_spurious) || (entry->isr == NULL)) {
+ return;
+ }
+#ifdef CONFIG_SYMTAB
+ const char *name = symtab_find_symbol_name((uintptr_t)entry->isr, NULL);
+
+ shell_print(sh, "%4d: %s(%p)", idx, name, entry->arg);
+#else
+ shell_print(sh, "%4d: %p(%p)", idx, entry->isr, entry->arg);
+#endif /* CONFIG_SYMTAB */
+}
+
+static int cmd_sw_isr_table(const struct shell *sh, size_t argc, char **argv)
+{
+ shell_print(sh, "_sw_isr_table[%d]\n", IRQ_TABLE_SIZE);
+
+ for (int idx = 0; idx < IRQ_TABLE_SIZE; idx++) {
+ dump_isr_table_entry(sh, idx, &_sw_isr_table[idx]);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_SHARED_INTERRUPTS
+static int cmd_shared_sw_isr_table(const struct shell *sh, size_t argc, char **argv)
+{
+ shell_print(sh, "z_shared_sw_isr_table[%d][%d]\n", IRQ_TABLE_SIZE,
+ CONFIG_SHARED_IRQ_MAX_NUM_CLIENTS);
+
+ for (int idx = 0; idx < IRQ_TABLE_SIZE; idx++) {
+ for (int c = 0; c < z_shared_sw_isr_table[idx].client_num; c++) {
+ dump_isr_table_entry(sh, idx, &z_shared_sw_isr_table[idx].clients[c]);
+ }
+ }
+
+ return 0;
+}
+#endif /* CONFIG_SHARED_INTERRUPTS */
+
+SHELL_STATIC_SUBCMD_SET_CREATE(isr_table_cmds,
+ SHELL_CMD_ARG(sw_isr_table, NULL,
+ "Dump _sw_isr_table.\n"
+ "Usage: isr_table sw_isr_table",
+ cmd_sw_isr_table, 1, 0),
+#ifdef CONFIG_SHARED_INTERRUPTS
+ SHELL_CMD_ARG(shared_sw_isr_table, NULL,
+ "Dump z_shared_sw_isr_table.\n"
+ "Usage: isr_table shared_sw_isr_table",
+ cmd_shared_sw_isr_table, 1, 0),
+#endif /* CONFIG_SHARED_INTERRUPTS */
+ SHELL_SUBCMD_SET_END);
+
+SHELL_CMD_ARG_REGISTER(isr_table, &isr_table_cmds, "ISR tables shell command",
+ NULL, 0, 0);
diff --git a/arch/common/multilevel_irq.c b/arch/common/multilevel_irq.c
index 53f8e03a4d8..55bd3b277f0 100644
--- a/arch/common/multilevel_irq.c
+++ b/arch/common/multilevel_irq.c
@@ -11,172 +11,85 @@
#include
#include
-BUILD_ASSERT((CONFIG_NUM_2ND_LEVEL_AGGREGATORS * CONFIG_MAX_IRQ_PER_AGGREGATOR) <=
- BIT(CONFIG_2ND_LEVEL_INTERRUPT_BITS),
+BUILD_ASSERT(CONFIG_MAX_IRQ_PER_AGGREGATOR < BIT(CONFIG_2ND_LEVEL_INTERRUPT_BITS),
"L2 bits not enough to cover the number of L2 IRQs");
-
-/*
- * Insert code if the node_id is an interrupt controller
- */
-#define Z_IF_DT_IS_INTC(node_id, code) \
- IF_ENABLED(DT_NODE_HAS_PROP(node_id, interrupt_controller), (code))
-
-/*
- * Expands to node_id if its IRQN is equal to `_irq`, nothing otherwise
- * This only works for `_irq` between 0 & 4095, see `IS_EQ`
- */
-#define Z_IF_DT_INTC_IRQN_EQ(node_id, _irq) IF_ENABLED(IS_EQ(DT_IRQ(node_id, irq), _irq), (node_id))
-
-/*
- * Expands to node_id if it's an interrupt controller & its IRQN is `irq`, or nothing otherwise
- */
-#define Z_DT_INTC_GET_IRQN(node_id, _irq) \
- Z_IF_DT_IS_INTC(node_id, Z_IF_DT_INTC_IRQN_EQ(node_id, _irq))
-
-/**
- * Loop through child of "/soc" and get root interrupt controllers with `_irq` as IRQN,
- * this assumes only one device has the IRQN
- * @param _irq irq number
- * @return node_id(s) that has the `_irq` number, or empty if none of them has the `_irq`
- */
-#define INTC_DT_IRQN_GET(_irq) \
- DT_FOREACH_CHILD_STATUS_OKAY_VARGS(DT_PATH(soc), Z_DT_INTC_GET_IRQN, _irq)
-
-/* If can't find any matching interrupt controller, fills with `NULL` */
-#define INTC_DEVICE_INIT(node_id) .dev = DEVICE_DT_GET_OR_NULL(node_id),
-
-#define INIT_IRQ_PARENT_OFFSET(d, i, o) { \
- INTC_DEVICE_INIT(d) \
- .irq = i, \
- .offset = o, \
-}
-
-#define IRQ_INDEX_TO_OFFSET(i, base) (base + i * CONFIG_MAX_IRQ_PER_AGGREGATOR)
-
-#define CAT_2ND_LVL_LIST(i, base) \
- INIT_IRQ_PARENT_OFFSET(INTC_DT_IRQN_GET(CONFIG_2ND_LVL_INTR_0##i##_OFFSET), \
- CONFIG_2ND_LVL_INTR_0##i##_OFFSET, IRQ_INDEX_TO_OFFSET(i, base))
-const struct _irq_parent_entry _lvl2_irq_list[CONFIG_NUM_2ND_LEVEL_AGGREGATORS]
- = { LISTIFY(CONFIG_NUM_2ND_LEVEL_AGGREGATORS, CAT_2ND_LVL_LIST, (,),
- CONFIG_2ND_LVL_ISR_TBL_OFFSET) };
-
#ifdef CONFIG_3RD_LEVEL_INTERRUPTS
-
-BUILD_ASSERT((CONFIG_NUM_3RD_LEVEL_AGGREGATORS * CONFIG_MAX_IRQ_PER_AGGREGATOR) <=
- BIT(CONFIG_3RD_LEVEL_INTERRUPT_BITS),
+BUILD_ASSERT(CONFIG_MAX_IRQ_PER_AGGREGATOR < BIT(CONFIG_3RD_LEVEL_INTERRUPT_BITS),
"L3 bits not enough to cover the number of L3 IRQs");
-
-#define CAT_3RD_LVL_LIST(i, base) \
- INIT_IRQ_PARENT_OFFSET(INTC_DT_IRQN_GET(CONFIG_3RD_LVL_INTR_0##i##_OFFSET), \
- CONFIG_3RD_LVL_INTR_0##i##_OFFSET, IRQ_INDEX_TO_OFFSET(i, base))
-
-const struct _irq_parent_entry _lvl3_irq_list[CONFIG_NUM_3RD_LEVEL_AGGREGATORS]
- = { LISTIFY(CONFIG_NUM_3RD_LEVEL_AGGREGATORS, CAT_3RD_LVL_LIST, (,),
- CONFIG_3RD_LVL_ISR_TBL_OFFSET) };
-
#endif /* CONFIG_3RD_LEVEL_INTERRUPTS */
-static const struct _irq_parent_entry *get_parent_entry(unsigned int parent_irq,
- const struct _irq_parent_entry list[],
- unsigned int length)
+/**
+ * @brief Get the aggregator that's responsible for the given irq
+ *
+ * @param irq IRQ number to query
+ *
+ * @return Aggregator entry, NULL if irq is level 1 or not found.
+ */
+static const struct _irq_parent_entry *get_intc_entry_for_irq(unsigned int irq)
{
- unsigned int i;
- const struct _irq_parent_entry *entry = NULL;
+ const unsigned int level = irq_get_level(irq);
- for (i = 0U; i < length; ++i) {
- if (list[i].irq == parent_irq) {
- entry = &list[i];
- break;
- }
+ /* 1st level aggregator is not registered */
+ if (level == 1) {
+ return NULL;
}
- __ASSERT(i != length, "Invalid argument: %i", parent_irq);
+ const unsigned int intc_irq = irq_get_intc_irq(irq);
+
+ /* Find an aggregator entry that matches the level & intc_irq */
+ STRUCT_SECTION_FOREACH_ALTERNATE(intc_table, _irq_parent_entry, intc) {
+ if ((intc->level == level) && (intc->irq == intc_irq)) {
+ return intc;
+ }
+ }
- return entry;
+ return NULL;
}
const struct device *z_get_sw_isr_device_from_irq(unsigned int irq)
{
- const struct device *dev = NULL;
- unsigned int level, parent_irq;
- const struct _irq_parent_entry *entry = NULL;
-
- level = irq_get_level(irq);
+ const struct _irq_parent_entry *intc = get_intc_entry_for_irq(irq);
- if (level == 2U) {
- parent_irq = irq_parent_level_2(irq);
- entry = get_parent_entry(parent_irq,
- _lvl2_irq_list,
- CONFIG_NUM_2ND_LEVEL_AGGREGATORS);
- }
-#ifdef CONFIG_3RD_LEVEL_INTERRUPTS
- else if (level == 3U) {
- parent_irq = irq_parent_level_3(irq);
- entry = get_parent_entry(parent_irq,
- _lvl3_irq_list,
- CONFIG_NUM_3RD_LEVEL_AGGREGATORS);
- }
-#endif /* CONFIG_3RD_LEVEL_INTERRUPTS */
- dev = entry != NULL ? entry->dev : NULL;
+ __ASSERT(intc != NULL, "can't find an aggregator to handle irq(%X)", irq);
- return dev;
+ return intc != NULL ? intc->dev : NULL;
}
unsigned int z_get_sw_isr_irq_from_device(const struct device *dev)
{
- for (size_t i = 0U; i < CONFIG_NUM_2ND_LEVEL_AGGREGATORS; ++i) {
- if (_lvl2_irq_list[i].dev == dev) {
- return _lvl2_irq_list[i].irq;
+ /* Get the IRQN for the aggregator */
+ STRUCT_SECTION_FOREACH_ALTERNATE(intc_table, _irq_parent_entry, intc) {
+ if (intc->dev == dev) {
+ return intc->irq;
}
}
-#ifdef CONFIG_3RD_LEVEL_INTERRUPTS
- for (size_t i = 0U; i < CONFIG_NUM_3RD_LEVEL_AGGREGATORS; ++i) {
- if (_lvl3_irq_list[i].dev == dev) {
- return _lvl3_irq_list[i].irq;
- }
- }
-#endif /* CONFIG_3RD_LEVEL_INTERRUPTS */
+ __ASSERT(false, "dev(%p) not found", dev);
return 0;
}
unsigned int z_get_sw_isr_table_idx(unsigned int irq)
{
- unsigned int table_idx, level, parent_irq, local_irq, parent_offset;
- const struct _irq_parent_entry *entry = NULL;
-
- level = irq_get_level(irq);
+ unsigned int table_idx, local_irq;
+ const struct _irq_parent_entry *intc = get_intc_entry_for_irq(irq);
+ const unsigned int level = irq_get_level(irq);
- if (level == 2U) {
- local_irq = irq_from_level_2(irq);
+ if (intc != NULL) {
+ local_irq = irq_from_level(irq, level);
__ASSERT_NO_MSG(local_irq < CONFIG_MAX_IRQ_PER_AGGREGATOR);
- parent_irq = irq_parent_level_2(irq);
- entry = get_parent_entry(parent_irq,
- _lvl2_irq_list,
- CONFIG_NUM_2ND_LEVEL_AGGREGATORS);
- parent_offset = entry != NULL ? entry->offset : 0U;
- table_idx = parent_offset + local_irq;
- }
-#ifdef CONFIG_3RD_LEVEL_INTERRUPTS
- else if (level == 3U) {
- local_irq = irq_from_level_3(irq);
- __ASSERT_NO_MSG(local_irq < CONFIG_MAX_IRQ_PER_AGGREGATOR);
- parent_irq = irq_parent_level_3(irq);
- entry = get_parent_entry(parent_irq,
- _lvl3_irq_list,
- CONFIG_NUM_3RD_LEVEL_AGGREGATORS);
- parent_offset = entry != NULL ? entry->offset : 0U;
- table_idx = parent_offset + local_irq;
- }
-#endif /* CONFIG_3RD_LEVEL_INTERRUPTS */
- else {
+
+ table_idx = intc->offset + local_irq;
+ } else {
+ /* irq level must be 1 if no intc entry */
+ __ASSERT(level == 1, "can't find an aggregator to handle irq(%X)", irq);
table_idx = irq;
}
table_idx -= CONFIG_GEN_IRQ_START_VECTOR;
- __ASSERT_NO_MSG(table_idx < IRQ_TABLE_SIZE);
+ __ASSERT(table_idx < IRQ_TABLE_SIZE, "table_idx(%d) < IRQ_TABLE_SIZE(%d)", table_idx,
+ IRQ_TABLE_SIZE);
return table_idx;
}
diff --git a/arch/common/multilevel_irq_legacy.c b/arch/common/multilevel_irq_legacy.c
new file mode 100644
index 00000000000..dd4fe68b5ac
--- /dev/null
+++ b/arch/common/multilevel_irq_legacy.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Intel Corporation.
+ * Copyright (c) 2024 Meta.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include
+#include
+
+/**
+ * @file
+ * @brief This file houses the deprecated legacy macros-generated multi-level interrupt lookup
+ * table code, compiled when `CONFIG_LEGACY_MULTI_LEVEL_TABLE_GENERATION` is enabled.
+ */
+
+/*
+ * Insert code if the node_id is an interrupt controller
+ */
+#define Z_IF_DT_IS_INTC(node_id, code) \
+ IF_ENABLED(DT_NODE_HAS_PROP(node_id, interrupt_controller), (code))
+
+/*
+ * Expands to node_id if its IRQN is equal to `_irq`, nothing otherwise
+ * This only works for `_irq` between 0 & 4095, see `IS_EQ`
+ */
+#define Z_IF_DT_INTC_IRQN_EQ(node_id, _irq) IF_ENABLED(IS_EQ(DT_IRQ(node_id, irq), _irq), (node_id))
+
+/*
+ * Expands to node_id if it's an interrupt controller & its IRQN is `irq`, or nothing otherwise
+ */
+#define Z_DT_INTC_GET_IRQN(node_id, _irq) \
+ Z_IF_DT_IS_INTC(node_id, Z_IF_DT_INTC_IRQN_EQ(node_id, _irq))
+
+/**
+ * Loop through child of "/soc" and get root interrupt controllers with `_irq` as IRQN,
+ * this assumes only one device has the IRQN
+ * @param _irq irq number
+ * @return node_id(s) that has the `_irq` number, or empty if none of them has the `_irq`
+ */
+#define INTC_DT_IRQN_GET(_irq) \
+ DT_FOREACH_CHILD_STATUS_OKAY_VARGS(DT_PATH(soc), Z_DT_INTC_GET_IRQN, _irq)
+
+#define INIT_IRQ_PARENT_OFFSET_2ND(n, d, i, o) \
+ IRQ_PARENT_ENTRY_DEFINE(intc_l2_##n, DEVICE_DT_GET_OR_NULL(d), i, o, 2)
+
+#define IRQ_INDEX_TO_OFFSET(i, base) (base + i * CONFIG_MAX_IRQ_PER_AGGREGATOR)
+
+#define CAT_2ND_LVL_LIST(i, base) \
+ INIT_IRQ_PARENT_OFFSET_2ND(i, INTC_DT_IRQN_GET(CONFIG_2ND_LVL_INTR_0##i##_OFFSET), \
+ CONFIG_2ND_LVL_INTR_0##i##_OFFSET, \
+ IRQ_INDEX_TO_OFFSET(i, base))
+
+LISTIFY(CONFIG_NUM_2ND_LEVEL_AGGREGATORS, CAT_2ND_LVL_LIST, (;), CONFIG_2ND_LVL_ISR_TBL_OFFSET);
+
+#ifdef CONFIG_3RD_LEVEL_INTERRUPTS
+
+BUILD_ASSERT((CONFIG_NUM_3RD_LEVEL_AGGREGATORS * CONFIG_MAX_IRQ_PER_AGGREGATOR) <=
+ BIT(CONFIG_3RD_LEVEL_INTERRUPT_BITS),
+ "L3 bits not enough to cover the number of L3 IRQs");
+
+#define INIT_IRQ_PARENT_OFFSET_3RD(n, d, i, o) \
+ IRQ_PARENT_ENTRY_DEFINE(intc_l3_##n, DEVICE_DT_GET_OR_NULL(d), i, o, 3)
+
+#define CAT_3RD_LVL_LIST(i, base) \
+ INIT_IRQ_PARENT_OFFSET_3RD(i, INTC_DT_IRQN_GET(CONFIG_3RD_LVL_INTR_0##i##_OFFSET), \
+ CONFIG_3RD_LVL_INTR_0##i##_OFFSET, \
+ IRQ_INDEX_TO_OFFSET(i, base))
+
+LISTIFY(CONFIG_NUM_3RD_LEVEL_AGGREGATORS, CAT_3RD_LVL_LIST, (;), CONFIG_3RD_LVL_ISR_TBL_OFFSET);
+
+#endif /* CONFIG_3RD_LEVEL_INTERRUPTS */
diff --git a/arch/common/nocache.ld b/arch/common/nocache.ld
index a4e500e8b17..24e81cdefcf 100644
--- a/arch/common/nocache.ld
+++ b/arch/common/nocache.ld
@@ -7,6 +7,12 @@
/* Copied from linker.ld */
+#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_nocache_ram), okay)
+#define NOCACHE_REGION LINKER_DT_NODE_REGION_NAME_TOKEN(DT_CHOSEN(zephyr_nocache_ram))
+#else
+#define NOCACHE_REGION RAMABLE_REGION
+#endif
+
/* Non-cached region of RAM */
SECTION_DATA_PROLOGUE(_NOCACHE_SECTION_NAME,(NOLOAD),)
{
@@ -27,5 +33,5 @@ SECTION_DATA_PROLOGUE(_NOCACHE_SECTION_NAME,(NOLOAD),)
MPU_ALIGN(_nocache_ram_size);
#endif
_nocache_ram_end = .;
-} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
+} GROUP_DATA_LINK_IN(NOCACHE_REGION, NOCACHE_REGION)
_nocache_ram_size = _nocache_ram_end - _nocache_ram_start;
diff --git a/arch/common/shared_irq.c b/arch/common/shared_irq.c
index b4226ba7205..56c8d0cbc7d 100644
--- a/arch/common/shared_irq.c
+++ b/arch/common/shared_irq.c
@@ -92,8 +92,8 @@ void z_isr_install(unsigned int irq, void (*routine)(const void *),
for (i = 0; i < shared_entry->client_num; i++) {
client = &shared_entry->clients[i];
- __ASSERT(client->isr != routine && client->arg != param,
- "trying to register duplicate ISR/arg pair");
+ __ASSERT((client->isr == routine && client->arg == param) == false,
+ "ISR/arg combination is already registered");
}
shared_entry->clients[shared_entry->client_num].isr = routine;
diff --git a/arch/mips/core/cpu_idle.c b/arch/mips/core/cpu_idle.c
index d91a6b3ce4b..fd6621284a5 100644
--- a/arch/mips/core/cpu_idle.c
+++ b/arch/mips/core/cpu_idle.c
@@ -19,12 +19,16 @@ static ALWAYS_INLINE void mips_idle(unsigned int key)
__asm__ volatile("wait");
}
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void)
{
mips_idle(1);
}
+#endif
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key)
{
mips_idle(key);
}
+#endif
diff --git a/arch/mips/core/fatal.c b/arch/mips/core/fatal.c
index 16011241666..a53e5bb0f5e 100644
--- a/arch/mips/core/fatal.c
+++ b/arch/mips/core/fatal.c
@@ -9,7 +9,7 @@
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
FUNC_NORETURN void z_mips_fatal_error(unsigned int reason,
- const z_arch_esf_t *esf)
+ const struct arch_esf *esf)
{
#ifdef CONFIG_EXCEPTION_DEBUG
if (esf != NULL) {
@@ -84,7 +84,7 @@ static char *cause_str(unsigned long cause)
}
}
-void _Fault(z_arch_esf_t *esf)
+void _Fault(struct arch_esf *esf)
{
unsigned long cause;
diff --git a/arch/mips/core/irq_offload.c b/arch/mips/core/irq_offload.c
index 2a62ae071e0..5c85ada4d61 100644
--- a/arch/mips/core/irq_offload.c
+++ b/arch/mips/core/irq_offload.c
@@ -48,3 +48,7 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
irq_unlock(key);
}
+
+void arch_irq_offload_init(void)
+{
+}
diff --git a/arch/mips/core/isr.S b/arch/mips/core/isr.S
index 44babb2149b..86d05d19833 100644
--- a/arch/mips/core/isr.S
+++ b/arch/mips/core/isr.S
@@ -14,7 +14,7 @@
#include
#include
-#define ESF_O(FIELD) __z_arch_esf_t_##FIELD##_OFFSET
+#define ESF_O(FIELD) __struct_arch_esf_##FIELD##_OFFSET
#define THREAD_O(FIELD) _thread_offset_to_##FIELD
/* Convenience macros for loading/storing register states. */
@@ -58,12 +58,12 @@
op v1, ESF_O(v1)(sp) ;
#define STORE_CALLER_SAVED() \
- addi sp, sp, -__z_arch_esf_t_SIZEOF ;\
+ addi sp, sp, -__struct_arch_esf_SIZEOF ;\
DO_CALLER_SAVED(OP_STOREREG) ;
#define LOAD_CALLER_SAVED() \
DO_CALLER_SAVED(OP_LOADREG) ;\
- addi sp, sp, __z_arch_esf_t_SIZEOF ;
+ addi sp, sp, __struct_arch_esf_SIZEOF ;
/* imports */
GTEXT(_Fault)
diff --git a/arch/mips/core/offsets/offsets.c b/arch/mips/core/offsets/offsets.c
index 24b477e9558..c70ce3c39fc 100644
--- a/arch/mips/core/offsets/offsets.c
+++ b/arch/mips/core/offsets/offsets.c
@@ -23,32 +23,32 @@ GEN_OFFSET_SYM(_callee_saved_t, s6);
GEN_OFFSET_SYM(_callee_saved_t, s7);
GEN_OFFSET_SYM(_callee_saved_t, s8);
-GEN_OFFSET_SYM(z_arch_esf_t, ra);
-GEN_OFFSET_SYM(z_arch_esf_t, gp);
-GEN_OFFSET_SYM(z_arch_esf_t, t0);
-GEN_OFFSET_SYM(z_arch_esf_t, t1);
-GEN_OFFSET_SYM(z_arch_esf_t, t2);
-GEN_OFFSET_SYM(z_arch_esf_t, t3);
-GEN_OFFSET_SYM(z_arch_esf_t, t4);
-GEN_OFFSET_SYM(z_arch_esf_t, t5);
-GEN_OFFSET_SYM(z_arch_esf_t, t6);
-GEN_OFFSET_SYM(z_arch_esf_t, t7);
-GEN_OFFSET_SYM(z_arch_esf_t, t8);
-GEN_OFFSET_SYM(z_arch_esf_t, t9);
-GEN_OFFSET_SYM(z_arch_esf_t, a0);
-GEN_OFFSET_SYM(z_arch_esf_t, a1);
-GEN_OFFSET_SYM(z_arch_esf_t, a2);
-GEN_OFFSET_SYM(z_arch_esf_t, a3);
-GEN_OFFSET_SYM(z_arch_esf_t, v0);
-GEN_OFFSET_SYM(z_arch_esf_t, v1);
-GEN_OFFSET_SYM(z_arch_esf_t, at);
-GEN_OFFSET_SYM(z_arch_esf_t, epc);
-GEN_OFFSET_SYM(z_arch_esf_t, badvaddr);
-GEN_OFFSET_SYM(z_arch_esf_t, hi);
-GEN_OFFSET_SYM(z_arch_esf_t, lo);
-GEN_OFFSET_SYM(z_arch_esf_t, status);
-GEN_OFFSET_SYM(z_arch_esf_t, cause);
+GEN_OFFSET_STRUCT(arch_esf, ra);
+GEN_OFFSET_STRUCT(arch_esf, gp);
+GEN_OFFSET_STRUCT(arch_esf, t0);
+GEN_OFFSET_STRUCT(arch_esf, t1);
+GEN_OFFSET_STRUCT(arch_esf, t2);
+GEN_OFFSET_STRUCT(arch_esf, t3);
+GEN_OFFSET_STRUCT(arch_esf, t4);
+GEN_OFFSET_STRUCT(arch_esf, t5);
+GEN_OFFSET_STRUCT(arch_esf, t6);
+GEN_OFFSET_STRUCT(arch_esf, t7);
+GEN_OFFSET_STRUCT(arch_esf, t8);
+GEN_OFFSET_STRUCT(arch_esf, t9);
+GEN_OFFSET_STRUCT(arch_esf, a0);
+GEN_OFFSET_STRUCT(arch_esf, a1);
+GEN_OFFSET_STRUCT(arch_esf, a2);
+GEN_OFFSET_STRUCT(arch_esf, a3);
+GEN_OFFSET_STRUCT(arch_esf, v0);
+GEN_OFFSET_STRUCT(arch_esf, v1);
+GEN_OFFSET_STRUCT(arch_esf, at);
+GEN_OFFSET_STRUCT(arch_esf, epc);
+GEN_OFFSET_STRUCT(arch_esf, badvaddr);
+GEN_OFFSET_STRUCT(arch_esf, hi);
+GEN_OFFSET_STRUCT(arch_esf, lo);
+GEN_OFFSET_STRUCT(arch_esf, status);
+GEN_OFFSET_STRUCT(arch_esf, cause);
-GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, STACK_ROUND_UP(sizeof(z_arch_esf_t)));
+GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, STACK_ROUND_UP(sizeof(struct arch_esf)));
GEN_ABS_SYM_END
diff --git a/arch/mips/core/prep_c.c b/arch/mips/core/prep_c.c
index 19673273b8a..0247f90df62 100644
--- a/arch/mips/core/prep_c.c
+++ b/arch/mips/core/prep_c.c
@@ -11,6 +11,8 @@
#include
#include
+#include
+#include
static void interrupt_init(void)
{
@@ -44,9 +46,15 @@ static void interrupt_init(void)
void z_prep_c(void)
{
+#if defined(CONFIG_SOC_PREP_HOOK)
+ soc_prep_hook();
+#endif
z_bss_zero();
interrupt_init();
+#if CONFIG_ARCH_CACHE
+ arch_cache_init();
+#endif
z_cstart();
CODE_UNREACHABLE;
diff --git a/arch/mips/core/thread.c b/arch/mips/core/thread.c
index e551674d521..7966ff462f5 100644
--- a/arch/mips/core/thread.c
+++ b/arch/mips/core/thread.c
@@ -19,11 +19,11 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *stack_ptr, k_thread_entry_t entry,
void *p1, void *p2, void *p3)
{
- struct __esf *stack_init;
+ struct arch_esf *stack_init;
/* Initial stack frame for thread */
- stack_init = (struct __esf *)Z_STACK_PTR_ALIGN(
- Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr)
+ stack_init = (struct arch_esf *)Z_STACK_PTR_ALIGN(
+ Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr)
);
/* Setup the initial stack frame */
diff --git a/arch/mips/include/kernel_arch_func.h b/arch/mips/include/kernel_arch_func.h
index ad89f75dd7f..b01cc1a4c65 100644
--- a/arch/mips/include/kernel_arch_func.h
+++ b/arch/mips/include/kernel_arch_func.h
@@ -35,7 +35,7 @@ arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
}
FUNC_NORETURN void z_mips_fatal_error(unsigned int reason,
- const z_arch_esf_t *esf);
+ const struct arch_esf *esf);
static inline bool arch_is_in_isr(void)
{
diff --git a/arch/mips/include/offsets_short_arch.h b/arch/mips/include/offsets_short_arch.h
index bd64deef114..8440f0ff701 100644
--- a/arch/mips/include/offsets_short_arch.h
+++ b/arch/mips/include/offsets_short_arch.h
@@ -9,7 +9,7 @@
#ifndef ZEPHYR_ARCH_MIPS_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_MIPS_INCLUDE_OFFSETS_SHORT_ARCH_H_
-#include
+#include
#define _thread_offset_to_sp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET)
diff --git a/arch/nios2/core/cpu_idle.c b/arch/nios2/core/cpu_idle.c
index ecdea13f5e4..b201ecfa84e 100644
--- a/arch/nios2/core/cpu_idle.c
+++ b/arch/nios2/core/cpu_idle.c
@@ -7,6 +7,7 @@
#include
#include
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void)
{
/* Do nothing but unconditionally unlock interrupts and return to the
@@ -14,7 +15,9 @@ void arch_cpu_idle(void)
*/
irq_unlock(NIOS2_STATUS_PIE_MSK);
}
+#endif
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key)
{
/* Do nothing but restore IRQ state. This CPU does not have any
@@ -22,3 +25,4 @@ void arch_cpu_atomic_idle(unsigned int key)
*/
irq_unlock(key);
}
+#endif
diff --git a/arch/nios2/core/exception.S b/arch/nios2/core/exception.S
index 6b003262bb0..ab2d3463dd4 100644
--- a/arch/nios2/core/exception.S
+++ b/arch/nios2/core/exception.S
@@ -35,35 +35,35 @@ GTEXT(_offload_routine)
*/
SECTION_FUNC(exception.entry, _exception)
/* Reserve thread stack space for saving context */
- subi sp, sp, __z_arch_esf_t_SIZEOF
+ subi sp, sp, __struct_arch_esf_SIZEOF
/* Preserve all caller-saved registers onto the thread's stack */
- stw ra, __z_arch_esf_t_ra_OFFSET(sp)
- stw r1, __z_arch_esf_t_r1_OFFSET(sp)
- stw r2, __z_arch_esf_t_r2_OFFSET(sp)
- stw r3, __z_arch_esf_t_r3_OFFSET(sp)
- stw r4, __z_arch_esf_t_r4_OFFSET(sp)
- stw r5, __z_arch_esf_t_r5_OFFSET(sp)
- stw r6, __z_arch_esf_t_r6_OFFSET(sp)
- stw r7, __z_arch_esf_t_r7_OFFSET(sp)
- stw r8, __z_arch_esf_t_r8_OFFSET(sp)
- stw r9, __z_arch_esf_t_r9_OFFSET(sp)
- stw r10, __z_arch_esf_t_r10_OFFSET(sp)
- stw r11, __z_arch_esf_t_r11_OFFSET(sp)
- stw r12, __z_arch_esf_t_r12_OFFSET(sp)
- stw r13, __z_arch_esf_t_r13_OFFSET(sp)
- stw r14, __z_arch_esf_t_r14_OFFSET(sp)
- stw r15, __z_arch_esf_t_r15_OFFSET(sp)
+ stw ra, __struct_arch_esf_ra_OFFSET(sp)
+ stw r1, __struct_arch_esf_r1_OFFSET(sp)
+ stw r2, __struct_arch_esf_r2_OFFSET(sp)
+ stw r3, __struct_arch_esf_r3_OFFSET(sp)
+ stw r4, __struct_arch_esf_r4_OFFSET(sp)
+ stw r5, __struct_arch_esf_r5_OFFSET(sp)
+ stw r6, __struct_arch_esf_r6_OFFSET(sp)
+ stw r7, __struct_arch_esf_r7_OFFSET(sp)
+ stw r8, __struct_arch_esf_r8_OFFSET(sp)
+ stw r9, __struct_arch_esf_r9_OFFSET(sp)
+ stw r10, __struct_arch_esf_r10_OFFSET(sp)
+ stw r11, __struct_arch_esf_r11_OFFSET(sp)
+ stw r12, __struct_arch_esf_r12_OFFSET(sp)
+ stw r13, __struct_arch_esf_r13_OFFSET(sp)
+ stw r14, __struct_arch_esf_r14_OFFSET(sp)
+ stw r15, __struct_arch_esf_r15_OFFSET(sp)
/* Store value of estatus control register */
rdctl et, estatus
- stw et, __z_arch_esf_t_estatus_OFFSET(sp)
+ stw et, __struct_arch_esf_estatus_OFFSET(sp)
/* ea-4 is the address of the instruction when the exception happened,
* put this in the stack frame as well
*/
addi r15, ea, -4
- stw r15, __z_arch_esf_t_instr_OFFSET(sp)
+ stw r15, __struct_arch_esf_instr_OFFSET(sp)
/* Figure out whether we are here because of an interrupt or an
* exception. If an interrupt, switch stacks and enter IRQ handling
@@ -157,7 +157,7 @@ not_interrupt:
*
* We earlier put ea - 4 in the stack frame, replace it with just ea
*/
- stw ea, __z_arch_esf_t_instr_OFFSET(sp)
+ stw ea, __struct_arch_esf_instr_OFFSET(sp)
#ifdef CONFIG_IRQ_OFFLOAD
/* Check the contents of _offload_routine. If non-NULL, jump into
@@ -193,35 +193,35 @@ _exception_exit:
* and return to the interrupted context */
/* Return address from the exception */
- ldw ea, __z_arch_esf_t_instr_OFFSET(sp)
+ ldw ea, __struct_arch_esf_instr_OFFSET(sp)
/* Restore estatus
* XXX is this right??? */
- ldw r5, __z_arch_esf_t_estatus_OFFSET(sp)
+ ldw r5, __struct_arch_esf_estatus_OFFSET(sp)
wrctl estatus, r5
/* Restore caller-saved registers */
- ldw ra, __z_arch_esf_t_ra_OFFSET(sp)
- ldw r1, __z_arch_esf_t_r1_OFFSET(sp)
- ldw r2, __z_arch_esf_t_r2_OFFSET(sp)
- ldw r3, __z_arch_esf_t_r3_OFFSET(sp)
- ldw r4, __z_arch_esf_t_r4_OFFSET(sp)
- ldw r5, __z_arch_esf_t_r5_OFFSET(sp)
- ldw r6, __z_arch_esf_t_r6_OFFSET(sp)
- ldw r7, __z_arch_esf_t_r7_OFFSET(sp)
- ldw r8, __z_arch_esf_t_r8_OFFSET(sp)
- ldw r9, __z_arch_esf_t_r9_OFFSET(sp)
- ldw r10, __z_arch_esf_t_r10_OFFSET(sp)
- ldw r11, __z_arch_esf_t_r11_OFFSET(sp)
- ldw r12, __z_arch_esf_t_r12_OFFSET(sp)
- ldw r13, __z_arch_esf_t_r13_OFFSET(sp)
- ldw r14, __z_arch_esf_t_r14_OFFSET(sp)
- ldw r15, __z_arch_esf_t_r15_OFFSET(sp)
+ ldw ra, __struct_arch_esf_ra_OFFSET(sp)
+ ldw r1, __struct_arch_esf_r1_OFFSET(sp)
+ ldw r2, __struct_arch_esf_r2_OFFSET(sp)
+ ldw r3, __struct_arch_esf_r3_OFFSET(sp)
+ ldw r4, __struct_arch_esf_r4_OFFSET(sp)
+ ldw r5, __struct_arch_esf_r5_OFFSET(sp)
+ ldw r6, __struct_arch_esf_r6_OFFSET(sp)
+ ldw r7, __struct_arch_esf_r7_OFFSET(sp)
+ ldw r8, __struct_arch_esf_r8_OFFSET(sp)
+ ldw r9, __struct_arch_esf_r9_OFFSET(sp)
+ ldw r10, __struct_arch_esf_r10_OFFSET(sp)
+ ldw r11, __struct_arch_esf_r11_OFFSET(sp)
+ ldw r12, __struct_arch_esf_r12_OFFSET(sp)
+ ldw r13, __struct_arch_esf_r13_OFFSET(sp)
+ ldw r14, __struct_arch_esf_r14_OFFSET(sp)
+ ldw r15, __struct_arch_esf_r15_OFFSET(sp)
/* Put the stack pointer back where it was when we entered
* exception state
*/
- addi sp, sp, __z_arch_esf_t_SIZEOF
+ addi sp, sp, __struct_arch_esf_SIZEOF
/* All done, copy estatus into status and transfer to ea */
eret
diff --git a/arch/nios2/core/fatal.c b/arch/nios2/core/fatal.c
index ac64b5bc309..b531bb41e17 100644
--- a/arch/nios2/core/fatal.c
+++ b/arch/nios2/core/fatal.c
@@ -12,7 +12,7 @@
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
- const z_arch_esf_t *esf)
+ const struct arch_esf *esf)
{
#if CONFIG_EXCEPTION_DEBUG
if (esf != NULL) {
@@ -102,7 +102,7 @@ static char *cause_str(uint32_t cause_code)
}
#endif
-FUNC_NORETURN void _Fault(const z_arch_esf_t *esf)
+FUNC_NORETURN void _Fault(const struct arch_esf *esf)
{
#if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
/* Unfortunately, completely unavailable on Nios II/e cores */
diff --git a/arch/nios2/core/irq_offload.c b/arch/nios2/core/irq_offload.c
index d33882f9f02..0c918896be9 100644
--- a/arch/nios2/core/irq_offload.c
+++ b/arch/nios2/core/irq_offload.c
@@ -41,3 +41,7 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
irq_unlock(key);
}
+
+void arch_irq_offload_init(void)
+{
+}
diff --git a/arch/nios2/core/offsets/offsets.c b/arch/nios2/core/offsets/offsets.c
index 8f3b3f748c1..9d381d87446 100644
--- a/arch/nios2/core/offsets/offsets.c
+++ b/arch/nios2/core/offsets/offsets.c
@@ -44,24 +44,24 @@ GEN_OFFSET_SYM(_callee_saved_t, sp);
GEN_OFFSET_SYM(_callee_saved_t, key);
GEN_OFFSET_SYM(_callee_saved_t, retval);
-GEN_OFFSET_SYM(z_arch_esf_t, ra);
-GEN_OFFSET_SYM(z_arch_esf_t, r1);
-GEN_OFFSET_SYM(z_arch_esf_t, r2);
-GEN_OFFSET_SYM(z_arch_esf_t, r3);
-GEN_OFFSET_SYM(z_arch_esf_t, r4);
-GEN_OFFSET_SYM(z_arch_esf_t, r5);
-GEN_OFFSET_SYM(z_arch_esf_t, r6);
-GEN_OFFSET_SYM(z_arch_esf_t, r7);
-GEN_OFFSET_SYM(z_arch_esf_t, r8);
-GEN_OFFSET_SYM(z_arch_esf_t, r9);
-GEN_OFFSET_SYM(z_arch_esf_t, r10);
-GEN_OFFSET_SYM(z_arch_esf_t, r11);
-GEN_OFFSET_SYM(z_arch_esf_t, r12);
-GEN_OFFSET_SYM(z_arch_esf_t, r13);
-GEN_OFFSET_SYM(z_arch_esf_t, r14);
-GEN_OFFSET_SYM(z_arch_esf_t, r15);
-GEN_OFFSET_SYM(z_arch_esf_t, estatus);
-GEN_OFFSET_SYM(z_arch_esf_t, instr);
-GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, sizeof(z_arch_esf_t));
+GEN_OFFSET_STRUCT(arch_esf, ra);
+GEN_OFFSET_STRUCT(arch_esf, r1);
+GEN_OFFSET_STRUCT(arch_esf, r2);
+GEN_OFFSET_STRUCT(arch_esf, r3);
+GEN_OFFSET_STRUCT(arch_esf, r4);
+GEN_OFFSET_STRUCT(arch_esf, r5);
+GEN_OFFSET_STRUCT(arch_esf, r6);
+GEN_OFFSET_STRUCT(arch_esf, r7);
+GEN_OFFSET_STRUCT(arch_esf, r8);
+GEN_OFFSET_STRUCT(arch_esf, r9);
+GEN_OFFSET_STRUCT(arch_esf, r10);
+GEN_OFFSET_STRUCT(arch_esf, r11);
+GEN_OFFSET_STRUCT(arch_esf, r12);
+GEN_OFFSET_STRUCT(arch_esf, r13);
+GEN_OFFSET_STRUCT(arch_esf, r14);
+GEN_OFFSET_STRUCT(arch_esf, r15);
+GEN_OFFSET_STRUCT(arch_esf, estatus);
+GEN_OFFSET_STRUCT(arch_esf, instr);
+GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, sizeof(struct arch_esf));
GEN_ABS_SYM_END
diff --git a/arch/nios2/core/prep_c.c b/arch/nios2/core/prep_c.c
index 74a3454af48..c5996205956 100644
--- a/arch/nios2/core/prep_c.c
+++ b/arch/nios2/core/prep_c.c
@@ -21,6 +21,8 @@
#include
#include
#include
+#include
+#include
/**
* @brief Prepare to and run C code
@@ -30,6 +32,10 @@
void z_prep_c(void)
{
+#if defined(CONFIG_SOC_PREP_HOOK)
+ soc_prep_hook();
+#endif
+
z_bss_zero();
z_data_copy();
/* In most XIP scenarios we copy the exception code into RAM, so need
@@ -44,6 +50,9 @@ void z_prep_c(void)
*/
z_nios2_dcache_flush_all();
#endif
+#endif
+#if CONFIG_ARCH_CACHE
+ arch_cache_init();
#endif
z_cstart();
CODE_UNREACHABLE;
diff --git a/arch/nios2/include/kernel_arch_func.h b/arch/nios2/include/kernel_arch_func.h
index 2f2030c1c73..2df268a1c62 100644
--- a/arch/nios2/include/kernel_arch_func.h
+++ b/arch/nios2/include/kernel_arch_func.h
@@ -39,7 +39,7 @@ arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
}
FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
- const z_arch_esf_t *esf);
+ const struct arch_esf *esf);
static inline bool arch_is_in_isr(void)
{
diff --git a/arch/nios2/include/offsets_short_arch.h b/arch/nios2/include/offsets_short_arch.h
index b3f60972c3b..3b961e1fcb9 100644
--- a/arch/nios2/include/offsets_short_arch.h
+++ b/arch/nios2/include/offsets_short_arch.h
@@ -7,7 +7,7 @@
#ifndef ZEPHYR_ARCH_NIOS2_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_NIOS2_INCLUDE_OFFSETS_SHORT_ARCH_H_
-#include
+#include
/* kernel */
diff --git a/arch/posix/core/CMakeLists.txt b/arch/posix/core/CMakeLists.txt
index 12ec5261635..8c46147bc0a 100644
--- a/arch/posix/core/CMakeLists.txt
+++ b/arch/posix/core/CMakeLists.txt
@@ -21,10 +21,12 @@ endif()
if(CONFIG_NATIVE_APPLICATION)
zephyr_include_directories(
nsi_compat/
+ ${ZEPHYR_BASE}/scripts/native_simulator/common/src/include/
)
zephyr_library_sources(
- posix_core.c
+ posix_core_nsi.c
nsi_compat/nsi_compat.c
+ ${ZEPHYR_BASE}/scripts/native_simulator/common/src/nct.c
${ZEPHYR_BASE}/scripts/native_simulator/common/src/nce.c
${ZEPHYR_BASE}/scripts/native_simulator/common/src/nsi_host_trampolines.c
)
diff --git a/arch/posix/core/irq.c b/arch/posix/core/irq.c
index 11d99e782b1..a1d3568c154 100644
--- a/arch/posix/core/irq.c
+++ b/arch/posix/core/irq.c
@@ -14,6 +14,10 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
{
posix_irq_offload(routine, parameter);
}
+
+void arch_irq_offload_init(void)
+{
+}
#endif
void arch_irq_enable(unsigned int irq)
diff --git a/arch/posix/core/nsi_compat/nce_if.h b/arch/posix/core/nsi_compat/nce_if.h
deleted file mode 100644
index be5772a2b73..00000000000
--- a/arch/posix/core/nsi_compat/nce_if.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2023 Nordic Semiconductor ASA
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-#ifndef NSI_COMMON_SRC_INCL_NCE_IF_H
-#define NSI_COMMON_SRC_INCL_NCE_IF_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Native simulator CPU start/stop emulation module interface */
-
-void *nce_init(void);
-void nce_terminate(void *this);
-void nce_boot_cpu(void *this, void (*start_routine)(void));
-void nce_halt_cpu(void *this);
-void nce_wake_cpu(void *this);
-int nce_is_cpu_running(void *this);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* NSI_COMMON_SRC_INCL_NCE_IF_H */
diff --git a/arch/posix/core/nsi_compat/nsi_host_trampolines.h b/arch/posix/core/nsi_compat/nsi_host_trampolines.h
deleted file mode 100644
index f0a2e06c1ca..00000000000
--- a/arch/posix/core/nsi_compat/nsi_host_trampolines.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2023 Nordic Semiconductor ASA
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Note: This is a provisional header which exists to allow
- * old POSIX arch based boards (i.e. native_posix) to provide access
- * to the host C library as if the native simulator trampolines
- * existed.
- *
- * Boards based on the native simulator do NOT use this file
- */
-
-#ifndef ARCH_POSIX_CORE_NSI_COMPAT_NSI_HOST_TRAMPOLINES_H
-#define ARCH_POSIX_CORE_NSI_COMPAT_NSI_HOST_TRAMPOLINES_H
-
-#include "../scripts/native_simulator/common/src/include/nsi_host_trampolines.h"
-
-#endif /* ARCH_POSIX_CORE_NSI_COMPAT_NSI_HOST_TRAMPOLINES_H */
diff --git a/arch/posix/core/nsi_compat/nsi_safe_call.h b/arch/posix/core/nsi_compat/nsi_safe_call.h
deleted file mode 100644
index 6227cb187ae..00000000000
--- a/arch/posix/core/nsi_compat/nsi_safe_call.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (c) 2023 Nordic Semiconductor ASA
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-
-#ifndef ARCH_POSIX_CORE_NSI_SAFE_CALLL_H
-#define ARCH_POSIX_CORE_NSI_SAFE_CALLL_H
-
-#include "nsi_tracing.h"
-#include "posix_arch_internal.h"
-
-#define NSI_SAFE_CALL PC_SAFE_CALL
-
-#endif /* ARCH_POSIX_CORE_NSI_SAFE_CALLL_H */
diff --git a/arch/posix/core/nsi_compat/nsi_tracing.h b/arch/posix/core/nsi_compat/nsi_tracing.h
deleted file mode 100644
index 854873bf3c8..00000000000
--- a/arch/posix/core/nsi_compat/nsi_tracing.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2023 Nordic Semiconductor ASA
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-
-#ifndef ARCH_POSIX_CORE_NSI_TRACING_H
-#define ARCH_POSIX_CORE_NSI_TRACING_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void nsi_print_error_and_exit(const char *format, ...);
-void nsi_print_warning(const char *format, ...);
-void nsi_print_trace(const char *format, ...);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* ARCH_POSIX_CORE_NSI_TRACING_H */
diff --git a/arch/posix/core/posix_core.c b/arch/posix/core/posix_core.c
deleted file mode 100644
index 89310f11d94..00000000000
--- a/arch/posix/core/posix_core.c
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- * Copyright (c) 2017 Oticon A/S
- * Copyright (c) 2023 Nordic Semiconductor ASA
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-
-/**
- * Here is where things actually happen for the POSIX arch
- *
- * We isolate all functions here, to ensure they can be compiled as
- * independently as possible to the remainder of Zephyr to avoid name clashes
- * as Zephyr does provide functions with the same names as the POSIX threads
- * functions
- */
-/**
- * Principle of operation:
- *
- * The Zephyr OS and its app run as a set of native pthreads.
- * The Zephyr OS only sees one of this thread executing at a time.
- * Which is running is controlled using {cond|mtx}_threads and
- * currently_allowed_thread.
- *
- * The main part of the execution of each thread will occur in a fully
- * synchronous and deterministic manner, and only when commanded by the Zephyr
- * kernel.
- * But the creation of a thread will spawn a new pthread whose start
- * is asynchronous to the rest, until synchronized in posix_wait_until_allowed()
- * below.
- * Similarly aborting and canceling threads execute a tail in a quite
- * asynchronous manner.
- *
- * This implementation is meant to be portable in between POSIX systems.
- * A table (threads_table) is used to abstract the native pthreads.
- * And index in this table is used to identify threads in the IF to the kernel.
- *
- */
-
-#include
-#include
-#include
-#include
-
-#include "posix_core.h"
-#include "posix_arch_internal.h"
-
-#define PREFIX "POSIX arch core: "
-#define ERPREFIX PREFIX"error on "
-#define NO_MEM_ERR PREFIX"Can't allocate memory\n"
-
-#define PC_ENABLE_CANCEL 0 /* See Note.c1 */
-#define PC_ALLOC_CHUNK_SIZE 64
-#define PC_REUSE_ABORTED_ENTRIES 0
-/* tests/kernel/threads/scheduling/schedule_api fails when setting
- * PC_REUSE_ABORTED_ENTRIES => don't set it by now
- */
-
-static int threads_table_size;
-struct threads_table_el {
- enum {NOTUSED = 0, USED, ABORTING, ABORTED, FAILED} state;
- bool running; /* Is this the currently running thread */
- pthread_t thread; /* Actual pthread_t as returned by native kernel */
- int thead_cnt; /* For debugging: Unique, consecutive, thread number */
- /* Pointer to the status kept in the Zephyr thread stack */
- posix_thread_status_t *t_status;
-};
-
-static struct threads_table_el *threads_table;
-
-static int thread_create_count; /* For debugging. Thread creation counter */
-
-/*
- * Conditional variable to block/awake all threads during swaps()
- * (we only need 1 mutex and 1 cond variable for all threads)
- */
-static pthread_cond_t cond_threads = PTHREAD_COND_INITIALIZER;
-/* Mutex for the conditional variable posix_core_cond_threads */
-static pthread_mutex_t mtx_threads = PTHREAD_MUTEX_INITIALIZER;
-/* Token which tells which process is allowed to run now */
-static int currently_allowed_thread;
-
-static bool terminate; /* Are we terminating the program == cleaning up */
-
-static void posix_wait_until_allowed(int this_th_nbr);
-static void *posix_thread_starter(void *arg);
-static void posix_preexit_cleanup(void);
-extern void posix_arch_thread_entry(void *pa_thread_status);
-
-/**
- * Helper function, run by a thread is being aborted
- */
-static void abort_tail(int this_th_nbr)
-{
- PC_DEBUG("Thread [%i] %i: %s: Aborting (exiting) (rel mut)\n",
- threads_table[this_th_nbr].thead_cnt,
- this_th_nbr,
- __func__);
-
- threads_table[this_th_nbr].running = false;
- threads_table[this_th_nbr].state = ABORTED;
- posix_preexit_cleanup();
- pthread_exit(NULL);
-}
-
-/**
- * Helper function to block this thread until it is allowed again
- * (somebody calls posix_let_run() with this thread number
- *
- * Note that we go out of this function (the while loop below)
- * with the mutex locked by this particular thread.
- * In normal circumstances, the mutex is only unlocked internally in
- * pthread_cond_wait() while waiting for cond_threads to be signaled
- */
-static void posix_wait_until_allowed(int this_th_nbr)
-{
- threads_table[this_th_nbr].running = false;
-
- PC_DEBUG("Thread [%i] %i: %s: Waiting to be allowed to run (rel mut)\n",
- threads_table[this_th_nbr].thead_cnt,
- this_th_nbr,
- __func__);
-
- while (this_th_nbr != currently_allowed_thread) {
- pthread_cond_wait(&cond_threads, &mtx_threads);
-
- if (threads_table &&
- (threads_table[this_th_nbr].state == ABORTING)) {
- abort_tail(this_th_nbr);
- }
- }
-
- threads_table[this_th_nbr].running = true;
-
- PC_DEBUG("Thread [%i] %i: %s(): I'm allowed to run! (hav mut)\n",
- threads_table[this_th_nbr].thead_cnt,
- this_th_nbr,
- __func__);
-}
-
-
-/**
- * Helper function to let the thread run
- * Note: posix_let_run() can only be called with the mutex locked
- */
-static void posix_let_run(int next_allowed_th)
-{
- PC_DEBUG("%s: We let thread [%i] %i run\n",
- __func__,
- threads_table[next_allowed_th].thead_cnt,
- next_allowed_th);
-
-
- currently_allowed_thread = next_allowed_th;
-
- /*
- * We let all threads know one is able to run now (it may even be us
- * again if fancied)
- * Note that as we hold the mutex, they are going to be blocked until
- * we reach our own posix_wait_until_allowed() while loop
- */
- PC_SAFE_CALL(pthread_cond_broadcast(&cond_threads));
-}
-
-
-static void posix_preexit_cleanup(void)
-{
- /*
- * Release the mutex so the next allowed thread can run
- */
- PC_SAFE_CALL(pthread_mutex_unlock(&mtx_threads));
-
- /* We detach ourselves so nobody needs to join to us */
- pthread_detach(pthread_self());
-}
-
-
-/**
- * Let the ready thread run and block this thread until it is allowed again
- *
- * called from arch_swap() which does the picking from the kernel structures
- */
-void posix_swap(int next_allowed_thread_nbr, int this_th_nbr)
-{
- posix_let_run(next_allowed_thread_nbr);
-
- if (threads_table[this_th_nbr].state == ABORTING) {
- PC_DEBUG("Thread [%i] %i: %s: Aborting curr.\n",
- threads_table[this_th_nbr].thead_cnt,
- this_th_nbr,
- __func__);
- abort_tail(this_th_nbr);
- } else {
- posix_wait_until_allowed(this_th_nbr);
- }
-}
-
-/**
- * Let the ready thread (main) run, and exit this thread (init)
- *
- * Called from arch_switch_to_main_thread() which does the picking from the
- * kernel structures
- *
- * Note that we could have just done a swap(), but that would have left the
- * init thread lingering. Instead here we exit the init thread after enabling
- * the new one
- */
-void posix_main_thread_start(int next_allowed_thread_nbr)
-{
- posix_let_run(next_allowed_thread_nbr);
- PC_DEBUG("%s: Init thread dying now (rel mut)\n",
- __func__);
- posix_preexit_cleanup();
- pthread_exit(NULL);
-}
-
-/**
- * Handler called when any thread is cancelled or exits
- */
-static void posix_cleanup_handler(void *arg)
-{
- /*
- * If we are not terminating, this is just an aborted thread,
- * and the mutex was already released
- * Otherwise, release the mutex so other threads which may be
- * caught waiting for it could terminate
- */
-
- if (!terminate) {
- return;
- }
-
-#if POSIX_ARCH_DEBUG_PRINTS
- posix_thread_status_t *ptr = (posix_thread_status_t *) arg;
-
- PC_DEBUG("Thread %i: %s: Canceling (rel mut)\n",
- ptr->thread_idx,
- __func__);
-#endif
-
-
- PC_SAFE_CALL(pthread_mutex_unlock(&mtx_threads));
-
- /* We detach ourselves so nobody needs to join to us */
- pthread_detach(pthread_self());
-}
-
-/**
- * Helper function to start a Zephyr thread as a POSIX thread:
- * It will block the thread until a arch_swap() is called for it
- *
- * Spawned from posix_new_thread() below
- */
-static void *posix_thread_starter(void *arg)
-{
- int thread_idx = (intptr_t)arg;
-
- PC_DEBUG("Thread [%i] %i: %s: Starting\n",
- threads_table[thread_idx].thead_cnt,
- thread_idx,
- __func__);
-
- /*
- * We block until all other running threads reach the while loop
- * in posix_wait_until_allowed() and they release the mutex
- */
- PC_SAFE_CALL(pthread_mutex_lock(&mtx_threads));
-
- /*
- * The program may have been finished before this thread ever got to run
- */
- /* LCOV_EXCL_START */ /* See Note1 */
- if (!threads_table) {
- posix_cleanup_handler(arg);
- pthread_exit(NULL);
- }
- /* LCOV_EXCL_STOP */
-
- pthread_cleanup_push(posix_cleanup_handler, arg);
-
- PC_DEBUG("Thread [%i] %i: %s: After start mutex (hav mut)\n",
- threads_table[thread_idx].thead_cnt,
- thread_idx,
- __func__);
-
- /*
- * The thread would try to execute immediately, so we block it
- * until allowed
- */
- posix_wait_until_allowed(thread_idx);
-
- posix_thread_status_t *ptr = threads_table[thread_idx].t_status;
-
- posix_arch_thread_entry(ptr);
-
- /*
- * We only reach this point if the thread actually returns which should
- * not happen. But we handle it gracefully just in case
- */
- /* LCOV_EXCL_START */
- posix_print_trace(PREFIX"Thread [%i] %i [%lu] ended!?!\n",
- threads_table[thread_idx].thead_cnt,
- thread_idx,
- pthread_self());
-
-
- threads_table[thread_idx].running = false;
- threads_table[thread_idx].state = FAILED;
-
- pthread_cleanup_pop(1);
-
- return NULL;
- /* LCOV_EXCL_STOP */
-}
-
-/**
- * Return the first free entry index in the threads table
- */
-static int ttable_get_empty_slot(void)
-{
-
- for (int i = 0; i < threads_table_size; i++) {
- if ((threads_table[i].state == NOTUSED)
- || (PC_REUSE_ABORTED_ENTRIES
- && (threads_table[i].state == ABORTED))) {
- return i;
- }
- }
-
- /*
- * else, we run out table without finding an index
- * => we expand the table
- */
-
- threads_table = realloc(threads_table,
- (threads_table_size + PC_ALLOC_CHUNK_SIZE)
- * sizeof(struct threads_table_el));
- if (threads_table == NULL) { /* LCOV_EXCL_BR_LINE */
- posix_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
- }
-
- /* Clear new piece of table */
- (void)memset(&threads_table[threads_table_size], 0,
- PC_ALLOC_CHUNK_SIZE * sizeof(struct threads_table_el));
-
- threads_table_size += PC_ALLOC_CHUNK_SIZE;
-
- /* The first newly created entry is good: */
- return threads_table_size - PC_ALLOC_CHUNK_SIZE;
-}
-
-/**
- * Called from arch_new_thread(),
- * Create a new POSIX thread for the new Zephyr thread.
- * arch_new_thread() picks from the kernel structures what it is that we need
- * to call with what parameters
- */
-int posix_new_thread(void *ptr)
-{
- int t_slot;
-
- t_slot = ttable_get_empty_slot();
- threads_table[t_slot].state = USED;
- threads_table[t_slot].running = false;
- threads_table[t_slot].thead_cnt = thread_create_count++;
- threads_table[t_slot].t_status = ptr;
-
- /*
- * Note: If you are here due to a valgrind reported memory leak in
- * pthread_create() please use the provided valgrind.supp suppression file.
- */
- PC_SAFE_CALL(pthread_create(&threads_table[t_slot].thread,
- NULL,
- posix_thread_starter,
- (void *)(intptr_t)t_slot));
-
- PC_DEBUG("%s created thread [%i] %i [%lu]\n",
- __func__,
- threads_table[t_slot].thead_cnt,
- t_slot,
- threads_table[t_slot].thread);
-
- return t_slot;
-}
-
-/*
- * Initialize the posix architecture
- *
- * Prepare whatever needs to be prepared to be able to start threads
- */
-void posix_arch_init(void)
-{
- thread_create_count = 0;
-
- currently_allowed_thread = -1;
-
- threads_table = calloc(PC_ALLOC_CHUNK_SIZE,
- sizeof(struct threads_table_el));
- if (threads_table == NULL) { /* LCOV_EXCL_BR_LINE */
- posix_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
- }
-
- threads_table_size = PC_ALLOC_CHUNK_SIZE;
-
-
- PC_SAFE_CALL(pthread_mutex_lock(&mtx_threads));
-}
-
-/*
- * Free any allocated memory by the posix core and clean up.
- * Note that this function cannot be called from a SW thread
- * (the CPU is assumed halted. Otherwise we will cancel ourselves)
- *
- * This function cannot guarantee the threads will be cancelled before the HW
- * thread exists. The only way to do that, would be to wait for each of them in
- * a join (without detaching them, but that could lead to locks in some
- * convoluted cases. As a call to this function can come from an ASSERT or other
- * error termination, we better do not assume things are working fine.
- * => we prefer the supposed memory leak report from valgrind, and ensure we
- * will not hang
- */
-void posix_arch_clean_up(void)
-{
-
- if (!threads_table) { /* LCOV_EXCL_BR_LINE */
- return; /* LCOV_EXCL_LINE */
- }
-
- terminate = true;
-
-#if (PC_ENABLE_CANCEL)
- for (int i = 0; i < threads_table_size; i++) {
- if (threads_table[i].state != USED) {
- continue;
- }
-
- /* LCOV_EXCL_START */
- if (pthread_cancel(threads_table[i].thread)) {
- posix_print_warning(
- PREFIX"cleanup: could not stop thread %i\n",
- i);
- }
- /* LCOV_EXCL_STOP */
- }
-#endif
-
- free(threads_table);
- threads_table = NULL;
-}
-
-void posix_abort_thread(int thread_idx)
-{
- if (thread_idx == currently_allowed_thread) {
- PC_DEBUG("Thread [%i] %i: %s Marked myself "
- "as aborting\n",
- threads_table[thread_idx].thead_cnt,
- thread_idx,
- __func__);
- } else {
- if (threads_table[thread_idx].state != USED) { /* LCOV_EXCL_BR_LINE */
- /* The thread may have been already aborted before */
- return; /* LCOV_EXCL_LINE */
- }
-
- PC_DEBUG("Aborting not scheduled thread [%i] %i\n",
- threads_table[thread_idx].thead_cnt,
- thread_idx);
- }
-
- threads_table[thread_idx].state = ABORTING;
- /*
- * Note: the native thread will linger in RAM until it catches the
- * mutex or awakes on the condition.
- * Note that even if we would pthread_cancel() the thread here, that
- * would be the case, but with a pthread_cancel() the mutex state would
- * be uncontrolled
- */
-
-}
-
-int posix_arch_get_unique_thread_id(int thread_idx)
-{
- return threads_table[thread_idx].thead_cnt;
-}
-
-/*
- * Notes about coverage:
- *
- * Note1:
- *
- * This condition will only be triggered in very unlikely cases
- * (once every few full regression runs).
- * It is therefore excluded from the coverage report to avoid confusing
- * developers.
- *
- * Background: This arch creates a pthread as soon as the Zephyr kernel creates
- * a Zephyr thread. A pthread creation is an asynchronous process handled by the
- * host kernel.
- *
- * This architecture normally keeps only 1 thread executing at a time.
- * But part of the pre-initialization during creation of a new thread
- * and some cleanup at the tail of the thread termination are executed
- * in parallel to other threads.
- * That is, the execution of those code paths is a bit indeterministic.
- *
- * Only when the Zephyr kernel attempts to swap to a new thread does this
- * architecture need to wait until its pthread is ready and initialized
- * (has reached posix_wait_until_allowed())
- *
- * In some cases (tests) threads are created which are never actually needed
- * (typically the idle thread). That means the test may finish before this
- * thread's underlying pthread has reached posix_wait_until_allowed().
- *
- * In this unlikely cases the initialization or cleanup of the thread follows
- * non-typical code paths.
- * This code paths are there to ensure things work always, no matter
- * the load of the host. Without them, very rare & mysterious segfault crashes
- * would occur.
- * But as they are very atypical and only triggered with some host loads,
- * they will be covered in the coverage reports only rarely.
- *
- * Note2:
- *
- * Some other code will never or only very rarely trigger and is therefore
- * excluded with LCOV_EXCL_LINE
- *
- *
- * Notes about (memory) cleanup:
- *
- * Note.c1:
- *
- * In some very rare cases in very loaded machines, a race in the glibc pthread_cancel()
- * seems to be triggered.
- * In this, the cancelled thread cleanup overtakes the pthread_cancel() code, and frees the
- * pthread structure before pthread_cancel() has finished, resulting in a dereference into already
- * free'd memory, and therefore a segfault.
- * Calling pthread_cancel() during cleanup is not required beyond preventing a valgrind
- * memory leak report (all threads will be canceled immediately on exit).
- * Therefore we do not do this, to avoid this very rare crashes.
- */
diff --git a/arch/posix/core/posix_core_nsi.c b/arch/posix/core/posix_core_nsi.c
index 496ce12a3bf..39ed2de0ea6 100644
--- a/arch/posix/core/posix_core_nsi.c
+++ b/arch/posix/core/posix_core_nsi.c
@@ -57,3 +57,8 @@ int posix_arch_get_unique_thread_id(int thread_idx)
{
return nct_get_unique_thread_id(te_state, thread_idx);
}
+
+int posix_arch_thread_name_set(int thread_idx, const char *str)
+{
+ return nct_thread_name_set(te_state, thread_idx, str);
+}
diff --git a/arch/posix/core/swap.c b/arch/posix/core/swap.c
index 67ca4c5bb9b..d4ec5e50b5e 100644
--- a/arch/posix/core/swap.c
+++ b/arch/posix/core/swap.c
@@ -112,7 +112,7 @@ void posix_irq_check_idle_exit(void)
{
if (_kernel.idle) {
_kernel.idle = 0;
- z_pm_save_idle_exit();
+ pm_system_resume();
}
}
#endif
diff --git a/arch/posix/core/thread.c b/arch/posix/core/thread.c
index cc7fc355554..4e443e2283b 100644
--- a/arch/posix/core/thread.c
+++ b/arch/posix/core/thread.c
@@ -13,6 +13,7 @@
* architecture
*/
+#include
#include
#include
#include
@@ -54,6 +55,40 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
thread_status->thread_idx = posix_new_thread((void *)thread_status);
}
+int arch_thread_name_set(struct k_thread *thread, const char *str)
+{
+#define MAX_HOST_THREAD_NAME 16
+
+ int ret;
+ int thread_index;
+ posix_thread_status_t *thread_status;
+ char th_name[MAX_HOST_THREAD_NAME];
+
+ thread_status = thread->callee_saved.thread_status;
+ if (!thread_status) {
+ return -EAGAIN;
+ }
+
+ thread_index = thread_status->thread_idx;
+
+ if (!str) {
+ return -EAGAIN;
+ }
+
+ snprintf(th_name, MAX_HOST_THREAD_NAME,
+ #if (CONFIG_NATIVE_SIMULATOR_NUMBER_MCUS > 1)
+ STRINGIFY(CONFIG_NATIVE_SIMULATOR_MCU_N) ":"
+ #endif
+ "%s", str);
+
+ ret = posix_arch_thread_name_set(thread_index, th_name);
+ if (ret) {
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
void posix_arch_thread_entry(void *pa_thread_status)
{
posix_thread_status_t *ptr = pa_thread_status;
diff --git a/arch/posix/include/offsets_short_arch.h b/arch/posix/include/offsets_short_arch.h
index b33414b3f6e..5281d68e092 100644
--- a/arch/posix/include/offsets_short_arch.h
+++ b/arch/posix/include/offsets_short_arch.h
@@ -7,7 +7,7 @@
#ifndef ZEPHYR_ARCH_POSIX_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_POSIX_INCLUDE_OFFSETS_SHORT_ARCH_H_
-#include
+#include
/* kernel */
diff --git a/arch/posix/include/posix_core.h b/arch/posix/include/posix_core.h
index c16d17afbd9..983f80a3abd 100644
--- a/arch/posix/include/posix_core.h
+++ b/arch/posix/include/posix_core.h
@@ -47,6 +47,7 @@ void posix_main_thread_start(int next_allowed_thread_nbr);
int posix_new_thread(void *payload);
void posix_abort_thread(int thread_idx);
int posix_arch_get_unique_thread_id(int thread_idx);
+int posix_arch_thread_name_set(int thread_idx, const char *str);
#ifndef POSIX_ARCH_DEBUG_PRINTS
#define POSIX_ARCH_DEBUG_PRINTS 0
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 1c4d547b29c..575c9871c13 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -30,7 +30,6 @@ config RISCV_GP
config RISCV_ALWAYS_SWITCH_THROUGH_ECALL
bool "Do not use mret outside a trap handler context"
depends on MULTITHREADING
- depends on !RISCV_PMP
help
Use mret instruction only when in a trap handler.
This is for RISC-V implementations that require every mret to be
@@ -38,30 +37,14 @@ config RISCV_ALWAYS_SWITCH_THROUGH_ECALL
and most people should say n here to minimize context switching
overhead.
-config RISCV_ENABLE_FRAME_POINTER
- bool
- default y
- depends on OVERRIDE_FRAME_POINTER_DEFAULT && !OMIT_FRAME_POINTER
- help
- Hidden option to simplify access to OVERRIDE_FRAME_POINTER_DEFAULT
- and OMIT_FRAME_POINTER. It is automatically enabled when the frame
- pointer unwinding is enabled.
-
-config RISCV_EXCEPTION_STACK_TRACE
- bool
- default y
- depends on EXCEPTION_STACK_TRACE
- imply THREAD_STACK_INFO
- help
- Internal config to enable runtime stack traces on fatal exceptions.
-
menu "RISCV Processor Options"
config INCLUDE_RESET_VECTOR
- bool "Include Reset vector"
+ bool "Jumps to __initialize directly"
help
- Include the reset vector stub, which initializes the stack and
- prepares for running C code.
+ Select 'y' here to use the Zephyr provided default implementation that
+ jumps to `__initialize` directly. Otherwise a SOC needs to provide its
+ custom `__reset` routine.
config RISCV_PRIVILEGED
bool
@@ -98,7 +81,7 @@ config RISCV_SOC_HAS_ISR_STACKING
guarded by !_ASMLANGUAGE. The ESF should be defined to account for
the hardware stacked registers in the proper order as they are
saved on the stack by the hardware, and the registers saved by the
- software macros. The structure must be called '__esf'.
+ software macros. The structure must be called 'struct arch_esf'.
config RISCV_SOC_HAS_CUSTOM_IRQ_HANDLING
bool
@@ -255,6 +238,14 @@ config RISCV_HART_MASK
i.e. 128, 129, ..(0x80, 8x81, ..), this can be configured to 63 (0x7f)
such that we can extract the bits that start from 0.
+config EXTRA_EXCEPTION_INFO
+ bool "Collect extra exception info"
+ depends on EXCEPTION_DEBUG
+ help
+ This option enables the collection of extra information, such as
+ register state, when a fault occurs. This information can be useful
+ to collect for post-mortem analysis and debug of issues.
+
config RISCV_PMP
bool "RISC-V PMP Support"
select THREAD_STACK_INFO
@@ -379,6 +370,7 @@ config ARCH_IRQ_VECTOR_TABLE_ALIGN
config RISCV_TRAP_HANDLER_ALIGNMENT
int "Alignment of RISC-V trap handler in bytes"
+ default 64 if RISCV_HAS_CLIC
default 4
help
This value configures the alignment of RISC-V trap handling
@@ -392,6 +384,14 @@ config GEN_IRQ_VECTOR_TABLE
config ARCH_HAS_SINGLE_THREAD_SUPPORT
default y if !SMP
+config ARCH_HAS_STACKWALK
+ bool
+ default y
+ imply THREAD_STACK_INFO
+ help
+ Internal config to indicate that the arch_stack_walk() API is implemented
+ and it can be enabled.
+
rsource "Kconfig.isa"
endmenu
diff --git a/arch/riscv/core/CMakeLists.txt b/arch/riscv/core/CMakeLists.txt
index 7ffcffd65c8..3e97d36d7f8 100644
--- a/arch/riscv/core/CMakeLists.txt
+++ b/arch/riscv/core/CMakeLists.txt
@@ -6,7 +6,6 @@ zephyr_library_sources(
cpu_idle.c
fatal.c
irq_manage.c
- isr.S
prep_c.c
reboot.c
reset.S
@@ -21,9 +20,10 @@ endif ()
zephyr_library_sources_ifdef(CONFIG_FPU_SHARING fpu.c fpu.S)
zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP coredump.c)
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)
+zephyr_library_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr.S)
zephyr_library_sources_ifdef(CONFIG_RISCV_PMP pmp.c pmp.S)
zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE tls.c)
zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace.S)
zephyr_library_sources_ifdef(CONFIG_SEMIHOST semihost.c)
-zephyr_library_sources_ifdef(CONFIG_RISCV_EXCEPTION_STACK_TRACE stacktrace.c)
+zephyr_library_sources_ifdef(CONFIG_ARCH_STACKWALK stacktrace.c)
zephyr_linker_sources(ROM_START SORT_KEY 0x0vectors vector_table.ld)
diff --git a/arch/riscv/core/coredump.c b/arch/riscv/core/coredump.c
index f232816433a..70d7a9976d4 100644
--- a/arch/riscv/core/coredump.c
+++ b/arch/riscv/core/coredump.c
@@ -67,7 +67,7 @@ struct riscv_arch_block {
*/
static struct riscv_arch_block arch_blk;
-void arch_coredump_info_dump(const z_arch_esf_t *esf)
+void arch_coredump_info_dump(const struct arch_esf *esf)
{
struct coredump_arch_hdr_t hdr = {
.id = COREDUMP_ARCH_HDR_ID,
diff --git a/arch/riscv/core/cpu_idle.c b/arch/riscv/core/cpu_idle.c
index 1d47680cef2..413b911a53e 100644
--- a/arch/riscv/core/cpu_idle.c
+++ b/arch/riscv/core/cpu_idle.c
@@ -7,16 +7,20 @@
#include
#include
-void __weak arch_cpu_idle(void)
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
+void arch_cpu_idle(void)
{
sys_trace_idle();
__asm__ volatile("wfi");
irq_unlock(MSTATUS_IEN);
}
+#endif
-void __weak arch_cpu_atomic_idle(unsigned int key)
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
+void arch_cpu_atomic_idle(unsigned int key)
{
sys_trace_idle();
__asm__ volatile("wfi");
irq_unlock(key);
}
+#endif
diff --git a/arch/riscv/core/fatal.c b/arch/riscv/core/fatal.c
index 8efffd37371..d5cbd2f4dc1 100644
--- a/arch/riscv/core/fatal.c
+++ b/arch/riscv/core/fatal.c
@@ -29,15 +29,15 @@ static const struct z_exc_handle exceptions[] = {
#endif
/* Stack trace function */
-void z_riscv_unwind_stack(const z_arch_esf_t *esf);
+void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf);
-uintptr_t z_riscv_get_sp_before_exc(const z_arch_esf_t *esf)
+uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf)
{
/*
* Kernel stack pointer prior this exception i.e. before
* storing the exception stack frame.
*/
- uintptr_t sp = (uintptr_t)esf + sizeof(z_arch_esf_t);
+ uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf);
#ifdef CONFIG_USERSPACE
if ((esf->mstatus & MSTATUS_MPP) == PRV_U) {
@@ -52,15 +52,55 @@ uintptr_t z_riscv_get_sp_before_exc(const z_arch_esf_t *esf)
return sp;
}
+const char *z_riscv_mcause_str(unsigned long cause)
+{
+ static const char *const mcause_str[17] = {
+ [0] = "Instruction address misaligned",
+ [1] = "Instruction Access fault",
+ [2] = "Illegal instruction",
+ [3] = "Breakpoint",
+ [4] = "Load address misaligned",
+ [5] = "Load access fault",
+ [6] = "Store/AMO address misaligned",
+ [7] = "Store/AMO access fault",
+ [8] = "Environment call from U-mode",
+ [9] = "Environment call from S-mode",
+ [10] = "unknown",
+ [11] = "Environment call from M-mode",
+ [12] = "Instruction page fault",
+ [13] = "Load page fault",
+ [14] = "unknown",
+ [15] = "Store/AMO page fault",
+ [16] = "unknown",
+ };
+
+ return mcause_str[MIN(cause, ARRAY_SIZE(mcause_str) - 1)];
+}
+
FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
- const z_arch_esf_t *esf)
+ const struct arch_esf *esf)
{
z_riscv_fatal_error_csf(reason, esf, NULL);
}
-FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const z_arch_esf_t *esf,
+FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arch_esf *esf,
const _callee_saved_t *csf)
{
+ unsigned long mcause;
+
+ __asm__ volatile("csrr %0, mcause" : "=r" (mcause));
+
+ mcause &= CONFIG_RISCV_MCAUSE_EXCEPTION_MASK;
+ LOG_ERR("");
+ LOG_ERR(" mcause: %ld, %s", mcause, z_riscv_mcause_str(mcause));
+
+#ifndef CONFIG_SOC_OPENISA_RV32M1
+ unsigned long mtval;
+
+ __asm__ volatile("csrr %0, mtval" : "=r" (mtval));
+ LOG_ERR(" mtval: %lx", mtval);
+#endif /* CONFIG_SOC_OPENISA_RV32M1 */
+
#ifdef CONFIG_EXCEPTION_DEBUG
if (esf != NULL) {
LOG_ERR(" a0: " PR_REG " t0: " PR_REG, esf->a0, esf->t0);
@@ -98,60 +138,24 @@ FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const z_arch_esf
#endif /* CONFIG_RISCV_ISA_RV32E */
LOG_ERR("");
}
+#endif /* CONFIG_EXCEPTION_DEBUG */
- if (IS_ENABLED(CONFIG_RISCV_EXCEPTION_STACK_TRACE)) {
- z_riscv_unwind_stack(esf);
- }
+#ifdef CONFIG_EXCEPTION_STACK_TRACE
+ z_riscv_unwind_stack(esf, csf);
+#endif /* CONFIG_EXCEPTION_STACK_TRACE */
-#endif /* CONFIG_EXCEPTION_DEBUG */
z_fatal_error(reason, esf);
CODE_UNREACHABLE;
}
-static char *cause_str(unsigned long cause)
-{
- switch (cause) {
- case 0:
- return "Instruction address misaligned";
- case 1:
- return "Instruction Access fault";
- case 2:
- return "Illegal instruction";
- case 3:
- return "Breakpoint";
- case 4:
- return "Load address misaligned";
- case 5:
- return "Load access fault";
- case 6:
- return "Store/AMO address misaligned";
- case 7:
- return "Store/AMO access fault";
- case 8:
- return "Environment call from U-mode";
- case 9:
- return "Environment call from S-mode";
- case 11:
- return "Environment call from M-mode";
- case 12:
- return "Instruction page fault";
- case 13:
- return "Load page fault";
- case 15:
- return "Store/AMO page fault";
- default:
- return "unknown";
- }
-}
-
-static bool bad_stack_pointer(z_arch_esf_t *esf)
+static bool bad_stack_pointer(struct arch_esf *esf)
{
#ifdef CONFIG_PMP_STACK_GUARD
/*
* Check if the kernel stack pointer prior this exception (before
* storing the exception stack frame) was in the stack guard area.
*/
- uintptr_t sp = (uintptr_t)esf + sizeof(z_arch_esf_t);
+ uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf);
#ifdef CONFIG_USERSPACE
if (_current->arch.priv_stack_start != 0 &&
@@ -189,7 +193,7 @@ static bool bad_stack_pointer(z_arch_esf_t *esf)
return false;
}
-void _Fault(z_arch_esf_t *esf)
+void _Fault(struct arch_esf *esf)
{
#ifdef CONFIG_USERSPACE
/*
@@ -207,25 +211,16 @@ void _Fault(z_arch_esf_t *esf)
}
#endif /* CONFIG_USERSPACE */
- unsigned long mcause;
-
- __asm__ volatile("csrr %0, mcause" : "=r" (mcause));
-
-#ifndef CONFIG_SOC_OPENISA_RV32M1
- unsigned long mtval;
- __asm__ volatile("csrr %0, mtval" : "=r" (mtval));
-#endif
-
- mcause &= CONFIG_RISCV_MCAUSE_EXCEPTION_MASK;
- LOG_ERR("");
- LOG_ERR(" mcause: %ld, %s", mcause, cause_str(mcause));
-#ifndef CONFIG_SOC_OPENISA_RV32M1
- LOG_ERR(" mtval: %lx", mtval);
-#endif
-
unsigned int reason = K_ERR_CPU_EXCEPTION;
if (bad_stack_pointer(esf)) {
+#ifdef CONFIG_PMP_STACK_GUARD
+ /*
+ * Remove the thread's PMP setting to prevent triggering a stack
+ * overflow error again due to the previous configuration.
+ */
+ z_riscv_pmp_stackguard_disable();
+#endif /* CONFIG_PMP_STACK_GUARD */
reason = K_ERR_STACK_CHK_FAIL;
}
@@ -241,7 +236,7 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
void z_impl_user_fault(unsigned int reason)
{
- z_arch_esf_t *oops_esf = _current->syscall_frame;
+ struct arch_esf *oops_esf = _current->syscall_frame;
if (((_current->base.user_options & K_USER) != 0) &&
reason != K_ERR_STACK_CHK_FAIL) {
@@ -255,6 +250,6 @@ static void z_vrfy_user_fault(unsigned int reason)
z_impl_user_fault(reason);
}
-#include
+#include
#endif /* CONFIG_USERSPACE */
diff --git a/arch/riscv/core/fpu.S b/arch/riscv/core/fpu.S
index 2708d11fec1..981bd576cb7 100644
--- a/arch/riscv/core/fpu.S
+++ b/arch/riscv/core/fpu.S
@@ -7,7 +7,7 @@
#include
#include
-#include
+#include
#ifdef CONFIG_CPU_HAS_FPU_DOUBLE_PRECISION
#define LOAD fld
diff --git a/arch/riscv/core/fpu.c b/arch/riscv/core/fpu.c
index da5d07b3146..318e97e0002 100644
--- a/arch/riscv/core/fpu.c
+++ b/arch/riscv/core/fpu.c
@@ -204,7 +204,7 @@ void z_riscv_fpu_enter_exc(void)
* Note that the exception depth count was not incremented before this call
* as no further exceptions are expected before returning to normal mode.
*/
-void z_riscv_fpu_trap(z_arch_esf_t *esf)
+void z_riscv_fpu_trap(struct arch_esf *esf)
{
__ASSERT((esf->mstatus & MSTATUS_FS) == 0 &&
(csr_read(mstatus) & MSTATUS_FS) == 0,
@@ -293,7 +293,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
* This is called on every exception exit except for z_riscv_fpu_trap().
* In that case the exception level of interest is 1 (soon to be 0).
*/
-void z_riscv_fpu_exit_exc(z_arch_esf_t *esf)
+void z_riscv_fpu_exit_exc(struct arch_esf *esf)
{
if (fpu_access_allowed(1)) {
esf->mstatus &= ~MSTATUS_FS;
diff --git a/arch/riscv/core/irq_manage.c b/arch/riscv/core/irq_manage.c
index 358b07534be..8ba7b615b42 100644
--- a/arch/riscv/core/irq_manage.c
+++ b/arch/riscv/core/irq_manage.c
@@ -19,6 +19,12 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
FUNC_NORETURN void z_irq_spurious(const void *unused)
{
+#ifdef CONFIG_EMPTY_IRQ_SPURIOUS
+ while (1) {
+ }
+
+ CODE_UNREACHABLE;
+#else
unsigned long mcause;
ARG_UNUSED(unused);
@@ -37,6 +43,7 @@ FUNC_NORETURN void z_irq_spurious(const void *unused)
}
#endif
z_riscv_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
+#endif /* CONFIG_EMPTY_IRQ_SPURIOUS */
}
#ifdef CONFIG_DYNAMIC_INTERRUPTS
diff --git a/arch/riscv/core/irq_offload.c b/arch/riscv/core/irq_offload.c
index d325bc2dcd6..73d4b04c2a1 100644
--- a/arch/riscv/core/irq_offload.c
+++ b/arch/riscv/core/irq_offload.c
@@ -11,3 +11,7 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
{
arch_syscall_invoke2((uintptr_t)routine, (uintptr_t)parameter, RV_ECALL_IRQ_OFFLOAD);
}
+
+void arch_irq_offload_init(void)
+{
+}
diff --git a/arch/riscv/core/isr.S b/arch/riscv/core/isr.S
index e9a3d523127..7e885da93a4 100644
--- a/arch/riscv/core/isr.S
+++ b/arch/riscv/core/isr.S
@@ -24,22 +24,22 @@
/* Convenience macro for loading/storing register states. */
#define DO_CALLER_SAVED(op) \
- RV_E( op t0, __z_arch_esf_t_t0_OFFSET(sp) );\
- RV_E( op t1, __z_arch_esf_t_t1_OFFSET(sp) );\
- RV_E( op t2, __z_arch_esf_t_t2_OFFSET(sp) );\
- RV_I( op t3, __z_arch_esf_t_t3_OFFSET(sp) );\
- RV_I( op t4, __z_arch_esf_t_t4_OFFSET(sp) );\
- RV_I( op t5, __z_arch_esf_t_t5_OFFSET(sp) );\
- RV_I( op t6, __z_arch_esf_t_t6_OFFSET(sp) );\
- RV_E( op a0, __z_arch_esf_t_a0_OFFSET(sp) );\
- RV_E( op a1, __z_arch_esf_t_a1_OFFSET(sp) );\
- RV_E( op a2, __z_arch_esf_t_a2_OFFSET(sp) );\
- RV_E( op a3, __z_arch_esf_t_a3_OFFSET(sp) );\
- RV_E( op a4, __z_arch_esf_t_a4_OFFSET(sp) );\
- RV_E( op a5, __z_arch_esf_t_a5_OFFSET(sp) );\
- RV_I( op a6, __z_arch_esf_t_a6_OFFSET(sp) );\
- RV_I( op a7, __z_arch_esf_t_a7_OFFSET(sp) );\
- RV_E( op ra, __z_arch_esf_t_ra_OFFSET(sp) )
+ RV_E( op t0, __struct_arch_esf_t0_OFFSET(sp) );\
+ RV_E( op t1, __struct_arch_esf_t1_OFFSET(sp) );\
+ RV_E( op t2, __struct_arch_esf_t2_OFFSET(sp) );\
+ RV_I( op t3, __struct_arch_esf_t3_OFFSET(sp) );\
+ RV_I( op t4, __struct_arch_esf_t4_OFFSET(sp) );\
+ RV_I( op t5, __struct_arch_esf_t5_OFFSET(sp) );\
+ RV_I( op t6, __struct_arch_esf_t6_OFFSET(sp) );\
+ RV_E( op a0, __struct_arch_esf_a0_OFFSET(sp) );\
+ RV_E( op a1, __struct_arch_esf_a1_OFFSET(sp) );\
+ RV_E( op a2, __struct_arch_esf_a2_OFFSET(sp) );\
+ RV_E( op a3, __struct_arch_esf_a3_OFFSET(sp) );\
+ RV_E( op a4, __struct_arch_esf_a4_OFFSET(sp) );\
+ RV_E( op a5, __struct_arch_esf_a5_OFFSET(sp) );\
+ RV_I( op a6, __struct_arch_esf_a6_OFFSET(sp) );\
+ RV_I( op a7, __struct_arch_esf_a7_OFFSET(sp) );\
+ RV_E( op ra, __struct_arch_esf_ra_OFFSET(sp) )
#ifdef CONFIG_EXCEPTION_DEBUG
/* Convenience macro for storing callee saved register [s0 - s11] states. */
@@ -157,7 +157,7 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
/* Save user stack value. Coming from user space, we know this
* can't overflow the privileged stack. The esf will be allocated
* later but it is safe to store our saved user sp here. */
- sr t0, (-__z_arch_esf_t_SIZEOF + __z_arch_esf_t_sp_OFFSET)(sp)
+ sr t0, (-__struct_arch_esf_SIZEOF + __struct_arch_esf_sp_OFFSET)(sp)
/* Make sure tls pointer is sane */
lr t0, ___cpu_t_current_OFFSET(s0)
@@ -180,21 +180,21 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
SOC_ISR_SW_STACKING
#else
/* Save caller-saved registers on current thread stack. */
- addi sp, sp, -__z_arch_esf_t_SIZEOF
+ addi sp, sp, -__struct_arch_esf_SIZEOF
DO_CALLER_SAVED(sr) ;
#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
/* Save s0 in the esf and load it with &_current_cpu. */
- sr s0, __z_arch_esf_t_s0_OFFSET(sp)
+ sr s0, __struct_arch_esf_s0_OFFSET(sp)
get_current_cpu s0
/* Save MEPC register */
csrr t0, mepc
- sr t0, __z_arch_esf_t_mepc_OFFSET(sp)
+ sr t0, __struct_arch_esf_mepc_OFFSET(sp)
/* Save MSTATUS register */
csrr t2, mstatus
- sr t2, __z_arch_esf_t_mstatus_OFFSET(sp)
+ sr t2, __struct_arch_esf_mstatus_OFFSET(sp)
#if defined(CONFIG_FPU_SHARING)
/* determine if FPU access was disabled */
@@ -301,7 +301,7 @@ no_fp: /* increment _current->arch.exception_depth */
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
/* Handle context saving at SOC level. */
- addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
+ addi a0, sp, __struct_arch_esf_soc_context_OFFSET
jal ra, __soc_save_context
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
@@ -347,11 +347,26 @@ no_fp: /* increment _current->arch.exception_depth */
*/
li t1, RISCV_EXC_ECALLU
beq t0, t1, is_user_syscall
+
+#ifdef CONFIG_PMP_STACK_GUARD
+ /*
+ * Determine if we come from user space. If so, reconfigure the PMP for
+ * kernel mode stack guard.
+ */
+ csrr t0, mstatus
+ li t1, MSTATUS_MPP
+ and t0, t0, t1
+ bnez t0, 1f
+ lr a0, ___cpu_t_current_OFFSET(s0)
+ call z_riscv_pmp_stackguard_enable
+1:
+#endif /* CONFIG_PMP_STACK_GUARD */
+
#endif /* CONFIG_USERSPACE */
/*
* Call _Fault to handle exception.
- * Stack pointer is pointing to a z_arch_esf_t structure, pass it
+ * Stack pointer is pointing to a struct_arch_esf structure, pass it
* to _Fault (via register a0).
* If _Fault shall return, set return address to
* no_reschedule to restore stack.
@@ -370,9 +385,9 @@ is_kernel_syscall:
* It's safe to always increment by 4, even with compressed
* instructions, because the ecall instruction is always 4 bytes.
*/
- lr t0, __z_arch_esf_t_mepc_OFFSET(sp)
+ lr t0, __struct_arch_esf_mepc_OFFSET(sp)
addi t0, t0, 4
- sr t0, __z_arch_esf_t_mepc_OFFSET(sp)
+ sr t0, __struct_arch_esf_mepc_OFFSET(sp)
#ifdef CONFIG_PMP_STACK_GUARD
/* Re-activate PMP for m-mode */
@@ -383,7 +398,7 @@ is_kernel_syscall:
#endif
/* Determine what to do. Operation code is in t0. */
- lr t0, __z_arch_esf_t_t0_OFFSET(sp)
+ lr t0, __struct_arch_esf_t0_OFFSET(sp)
.if RV_ECALL_RUNTIME_EXCEPT != 0; .err; .endif
beqz t0, do_fault
@@ -396,8 +411,24 @@ is_kernel_syscall:
#ifdef CONFIG_RISCV_ALWAYS_SWITCH_THROUGH_ECALL
li t1, RV_ECALL_SCHEDULE
bne t0, t1, skip_schedule
- lr a0, __z_arch_esf_t_a0_OFFSET(sp)
- lr a1, __z_arch_esf_t_a1_OFFSET(sp)
+ lr a0, __struct_arch_esf_a0_OFFSET(sp)
+ lr a1, __struct_arch_esf_a1_OFFSET(sp)
+
+#ifdef CONFIG_FPU_SHARING
+ /*
+ * When an ECALL is used for a context-switch, the current thread has
+ * been updated to the next thread.
+ * Add the exception_depth back to the previous thread.
+ */
+ lb t1, _thread_offset_to_exception_depth(a0)
+ add t1, t1, -1
+ sb t1, _thread_offset_to_exception_depth(a0)
+
+ lb t1, _thread_offset_to_exception_depth(a1)
+ add t1, t1, 1
+ sb t1, _thread_offset_to_exception_depth(a1)
+#endif
+
j reschedule
skip_schedule:
#endif
@@ -408,7 +439,7 @@ skip_schedule:
do_fault:
/* Handle RV_ECALL_RUNTIME_EXCEPT. Retrieve reason in a0, esf in A1. */
- lr a0, __z_arch_esf_t_a0_OFFSET(sp)
+ lr a0, __struct_arch_esf_a0_OFFSET(sp)
1: mv a1, sp
#ifdef CONFIG_EXCEPTION_DEBUG
@@ -419,10 +450,15 @@ do_fault:
STORE_CALLEE_SAVED() ;
mv a2, sp
+#ifdef CONFIG_EXTRA_EXCEPTION_INFO
+ /* Store csf's addr into esf (a1 still holds the pointer to the esf at this point) */
+ sr a2 __struct_arch_esf_csf_OFFSET(a1)
+#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
+
tail z_riscv_fatal_error_csf
#else
tail z_riscv_fatal_error
-#endif
+#endif /* CONFIG_EXCEPTION_DEBUG */
#if defined(CONFIG_IRQ_OFFLOAD)
do_irq_offload:
@@ -431,8 +467,8 @@ do_irq_offload:
* Routine pointer is in saved a0, argument in saved a1
* so we load them with a1/a0 (reversed).
*/
- lr a1, __z_arch_esf_t_a0_OFFSET(sp)
- lr a0, __z_arch_esf_t_a1_OFFSET(sp)
+ lr a1, __struct_arch_esf_a0_OFFSET(sp)
+ lr a0, __struct_arch_esf_a1_OFFSET(sp)
/* Increment _current_cpu->nested */
lw t1, ___cpu_t_nested_OFFSET(s0)
@@ -474,18 +510,18 @@ is_user_syscall:
* Same as for is_kernel_syscall: increment saved MEPC by 4 to
* prevent triggering the same ecall again upon exiting the ISR.
*/
- lr t1, __z_arch_esf_t_mepc_OFFSET(sp)
+ lr t1, __struct_arch_esf_mepc_OFFSET(sp)
addi t1, t1, 4
- sr t1, __z_arch_esf_t_mepc_OFFSET(sp)
+ sr t1, __struct_arch_esf_mepc_OFFSET(sp)
/* Restore argument registers from user stack */
- lr a0, __z_arch_esf_t_a0_OFFSET(sp)
- lr a1, __z_arch_esf_t_a1_OFFSET(sp)
- lr a2, __z_arch_esf_t_a2_OFFSET(sp)
- lr a3, __z_arch_esf_t_a3_OFFSET(sp)
- lr a4, __z_arch_esf_t_a4_OFFSET(sp)
- lr a5, __z_arch_esf_t_a5_OFFSET(sp)
- lr t0, __z_arch_esf_t_t0_OFFSET(sp)
+ lr a0, __struct_arch_esf_a0_OFFSET(sp)
+ lr a1, __struct_arch_esf_a1_OFFSET(sp)
+ lr a2, __struct_arch_esf_a2_OFFSET(sp)
+ lr a3, __struct_arch_esf_a3_OFFSET(sp)
+ lr a4, __struct_arch_esf_a4_OFFSET(sp)
+ lr a5, __struct_arch_esf_a5_OFFSET(sp)
+ lr t0, __struct_arch_esf_t0_OFFSET(sp)
#if defined(CONFIG_RISCV_ISA_RV32E)
/* Stack alignment for RV32E is 4 bytes */
addi sp, sp, -4
@@ -519,7 +555,7 @@ valid_syscall_id:
#endif /* CONFIG_RISCV_ISA_RV32E */
/* Update a0 (return value) on the stack */
- sr a0, __z_arch_esf_t_a0_OFFSET(sp)
+ sr a0, __struct_arch_esf_a0_OFFSET(sp)
/* Disable IRQs again before leaving */
csrc mstatus, MSTATUS_IEN
@@ -534,7 +570,7 @@ is_interrupt:
* If we came from userspace then we need to reconfigure the
* PMP for kernel mode stack guard.
*/
- lr t0, __z_arch_esf_t_mstatus_OFFSET(sp)
+ lr t0, __struct_arch_esf_mstatus_OFFSET(sp)
li t1, MSTATUS_MPP
and t0, t0, t1
bnez t0, 1f
@@ -665,7 +701,7 @@ no_reschedule:
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
/* Restore context at SOC level */
- addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
+ addi a0, sp, __struct_arch_esf_soc_context_OFFSET
jal ra, __soc_restore_context
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
@@ -683,8 +719,8 @@ fp_trap_exit:
#endif
/* Restore MEPC and MSTATUS registers */
- lr t0, __z_arch_esf_t_mepc_OFFSET(sp)
- lr t2, __z_arch_esf_t_mstatus_OFFSET(sp)
+ lr t0, __struct_arch_esf_mepc_OFFSET(sp)
+ lr t2, __struct_arch_esf_mstatus_OFFSET(sp)
csrw mepc, t0
csrw mstatus, t2
@@ -711,7 +747,7 @@ fp_trap_exit:
sb t1, %tprel_lo(is_user_mode)(t0)
/* preserve stack pointer for next exception entry */
- add t0, sp, __z_arch_esf_t_SIZEOF
+ add t0, sp, __struct_arch_esf_SIZEOF
sr t0, _curr_cpu_arch_user_exc_sp(s0)
j 2f
@@ -720,13 +756,13 @@ fp_trap_exit:
* We are returning to kernel mode. Store the stack pointer to
* be re-loaded further down.
*/
- addi t0, sp, __z_arch_esf_t_SIZEOF
- sr t0, __z_arch_esf_t_sp_OFFSET(sp)
+ addi t0, sp, __struct_arch_esf_SIZEOF
+ sr t0, __struct_arch_esf_sp_OFFSET(sp)
2:
#endif
/* Restore s0 (it is no longer ours) */
- lr s0, __z_arch_esf_t_s0_OFFSET(sp)
+ lr s0, __struct_arch_esf_s0_OFFSET(sp)
#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
SOC_ISR_SW_UNSTACKING
@@ -736,10 +772,10 @@ fp_trap_exit:
#ifdef CONFIG_USERSPACE
/* retrieve saved stack pointer */
- lr sp, __z_arch_esf_t_sp_OFFSET(sp)
+ lr sp, __struct_arch_esf_sp_OFFSET(sp)
#else
/* remove esf from the stack */
- addi sp, sp, __z_arch_esf_t_SIZEOF
+ addi sp, sp, __struct_arch_esf_SIZEOF
#endif
#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
diff --git a/arch/riscv/core/offsets/offsets.c b/arch/riscv/core/offsets/offsets.c
index 9bc9306c2e9..99eba096824 100644
--- a/arch/riscv/core/offsets/offsets.c
+++ b/arch/riscv/core/offsets/offsets.c
@@ -13,6 +13,7 @@
* structures.
*/
+#include
#include
#include
#include
@@ -88,43 +89,47 @@ GEN_OFFSET_SYM(_thread_arch_t, exception_depth);
#endif /* CONFIG_FPU_SHARING */
/* esf member offsets */
-GEN_OFFSET_SYM(z_arch_esf_t, ra);
-GEN_OFFSET_SYM(z_arch_esf_t, t0);
-GEN_OFFSET_SYM(z_arch_esf_t, t1);
-GEN_OFFSET_SYM(z_arch_esf_t, t2);
-GEN_OFFSET_SYM(z_arch_esf_t, a0);
-GEN_OFFSET_SYM(z_arch_esf_t, a1);
-GEN_OFFSET_SYM(z_arch_esf_t, a2);
-GEN_OFFSET_SYM(z_arch_esf_t, a3);
-GEN_OFFSET_SYM(z_arch_esf_t, a4);
-GEN_OFFSET_SYM(z_arch_esf_t, a5);
+GEN_OFFSET_STRUCT(arch_esf, ra);
+GEN_OFFSET_STRUCT(arch_esf, t0);
+GEN_OFFSET_STRUCT(arch_esf, t1);
+GEN_OFFSET_STRUCT(arch_esf, t2);
+GEN_OFFSET_STRUCT(arch_esf, a0);
+GEN_OFFSET_STRUCT(arch_esf, a1);
+GEN_OFFSET_STRUCT(arch_esf, a2);
+GEN_OFFSET_STRUCT(arch_esf, a3);
+GEN_OFFSET_STRUCT(arch_esf, a4);
+GEN_OFFSET_STRUCT(arch_esf, a5);
#if !defined(CONFIG_RISCV_ISA_RV32E)
-GEN_OFFSET_SYM(z_arch_esf_t, t3);
-GEN_OFFSET_SYM(z_arch_esf_t, t4);
-GEN_OFFSET_SYM(z_arch_esf_t, t5);
-GEN_OFFSET_SYM(z_arch_esf_t, t6);
-GEN_OFFSET_SYM(z_arch_esf_t, a6);
-GEN_OFFSET_SYM(z_arch_esf_t, a7);
+GEN_OFFSET_STRUCT(arch_esf, t3);
+GEN_OFFSET_STRUCT(arch_esf, t4);
+GEN_OFFSET_STRUCT(arch_esf, t5);
+GEN_OFFSET_STRUCT(arch_esf, t6);
+GEN_OFFSET_STRUCT(arch_esf, a6);
+GEN_OFFSET_STRUCT(arch_esf, a7);
#endif /* !CONFIG_RISCV_ISA_RV32E */
-GEN_OFFSET_SYM(z_arch_esf_t, mepc);
-GEN_OFFSET_SYM(z_arch_esf_t, mstatus);
+GEN_OFFSET_STRUCT(arch_esf, mepc);
+GEN_OFFSET_STRUCT(arch_esf, mstatus);
-GEN_OFFSET_SYM(z_arch_esf_t, s0);
+GEN_OFFSET_STRUCT(arch_esf, s0);
#ifdef CONFIG_USERSPACE
-GEN_OFFSET_SYM(z_arch_esf_t, sp);
+GEN_OFFSET_STRUCT(arch_esf, sp);
#endif
+#ifdef CONFIG_EXTRA_EXCEPTION_INFO
+GEN_OFFSET_STRUCT(arch_esf, csf);
+#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
+
#if defined(CONFIG_RISCV_SOC_CONTEXT_SAVE)
-GEN_OFFSET_SYM(z_arch_esf_t, soc_context);
+GEN_OFFSET_STRUCT(arch_esf, soc_context);
#endif
#if defined(CONFIG_RISCV_SOC_OFFSETS)
GEN_SOC_OFFSET_SYMS();
#endif
-GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, sizeof(z_arch_esf_t));
+GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, sizeof(struct arch_esf));
#ifdef CONFIG_EXCEPTION_DEBUG
GEN_ABSOLUTE_SYM(__callee_saved_t_SIZEOF, ROUND_UP(sizeof(_callee_saved_t), ARCH_STACK_PTR_ALIGN));
diff --git a/arch/riscv/core/pmp.c b/arch/riscv/core/pmp.c
index 50b5dd58f5d..e41eb8d4bb0 100644
--- a/arch/riscv/core/pmp.c
+++ b/arch/riscv/core/pmp.c
@@ -204,6 +204,34 @@ static bool set_pmp_entry(unsigned int *index_p, uint8_t perm,
return ok;
}
+static inline bool set_pmp_mprv_catchall(unsigned int *index_p,
+ unsigned long *pmp_addr, unsigned long *pmp_cfg,
+ unsigned int index_limit)
+{
+ /*
+ * We'll be using MPRV. Make a fallback entry with everything
+ * accessible as if no PMP entries were matched which is otherwise
+ * the default behavior for m-mode without MPRV.
+ */
+ bool ok = set_pmp_entry(index_p, PMP_R | PMP_W | PMP_X,
+ 0, 0, pmp_addr, pmp_cfg, index_limit);
+
+#ifdef CONFIG_QEMU_TARGET
+ if (ok) {
+ /*
+ * Workaround: The above produced 0x1fffffff which is correct.
+ * But there is a QEMU bug that prevents it from interpreting
+ * this value correctly. Hardcode the special case used by
+ * QEMU to bypass this bug for now. The QEMU fix is here:
+ * https://lists.gnu.org/archive/html/qemu-devel/2022-04/msg00961.html
+ */
+ pmp_addr[*index_p - 1] = -1L;
+ }
+#endif
+
+ return ok;
+}
+
/**
* @brief Write a range of PMP entries to corresponding PMP registers
*
@@ -320,8 +348,8 @@ static unsigned int global_pmp_end_index;
*/
void z_riscv_pmp_init(void)
{
- unsigned long pmp_addr[4];
- unsigned long pmp_cfg[1];
+ unsigned long pmp_addr[5];
+ unsigned long pmp_cfg[2];
unsigned int index = 0;
/* The read-only area is always there for every mode */
@@ -351,10 +379,28 @@ void z_riscv_pmp_init(void)
(uintptr_t)z_interrupt_stacks[_current_cpu->id],
Z_RISCV_STACK_GUARD_SIZE,
pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
-#endif
+ /*
+ * This early, the kernel init code uses the IRQ stack and we want to
+ * safeguard it as soon as possible. But we need a temporary default
+ * "catch all" PMP entry for MPRV to work. Later on, this entry will
+ * be set for each thread by z_riscv_pmp_stackguard_prepare().
+ */
+ set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
+
+ /* Write those entries to PMP regs. */
write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
+ /* Activate our non-locked PMP entries for m-mode */
+ csr_set(mstatus, MSTATUS_MPRV);
+
+ /* And forget about that last entry as we won't need it later */
+ index--;
+#else
+ /* Write those entries to PMP regs. */
+ write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
+#endif
+
#ifdef CONFIG_SMP
#ifdef CONFIG_PMP_STACK_GUARD
/*
@@ -373,6 +419,7 @@ void z_riscv_pmp_init(void)
}
#endif
+ __ASSERT(index <= PMPCFG_STRIDE, "provision for one global word only");
global_pmp_cfg[0] = pmp_cfg[0];
global_pmp_last_addr = pmp_addr[index - 1];
global_pmp_end_index = index;
@@ -429,24 +476,7 @@ void z_riscv_pmp_stackguard_prepare(struct k_thread *thread)
set_pmp_entry(&index, PMP_NONE,
stack_bottom, Z_RISCV_STACK_GUARD_SIZE,
PMP_M_MODE(thread));
-
- /*
- * We'll be using MPRV. Make a fallback entry with everything
- * accessible as if no PMP entries were matched which is otherwise
- * the default behavior for m-mode without MPRV.
- */
- set_pmp_entry(&index, PMP_R | PMP_W | PMP_X,
- 0, 0, PMP_M_MODE(thread));
-#ifdef CONFIG_QEMU_TARGET
- /*
- * Workaround: The above produced 0x1fffffff which is correct.
- * But there is a QEMU bug that prevents it from interpreting this
- * value correctly. Hardcode the special case used by QEMU to
- * bypass this bug for now. The QEMU fix is here:
- * https://lists.gnu.org/archive/html/qemu-devel/2022-04/msg00961.html
- */
- thread->arch.m_mode_pmpaddr_regs[index-1] = -1L;
-#endif
+ set_pmp_mprv_catchall(&index, PMP_M_MODE(thread));
/* remember how many entries we use */
thread->arch.m_mode_pmp_end_index = index;
@@ -481,6 +511,37 @@ void z_riscv_pmp_stackguard_enable(struct k_thread *thread)
csr_set(mstatus, MSTATUS_MPRV);
}
+/**
+ * @brief Remove PMP stackguard content to actual PMP registers
+ */
+void z_riscv_pmp_stackguard_disable(void)
+{
+
+ unsigned long pmp_addr[PMP_M_MODE_SLOTS];
+ unsigned long pmp_cfg[PMP_M_MODE_SLOTS / sizeof(unsigned long)];
+ unsigned int index = global_pmp_end_index;
+
+ /* Retrieve the pmpaddr value matching the last global PMP slot. */
+ pmp_addr[global_pmp_end_index - 1] = global_pmp_last_addr;
+
+ /* Disable (non-locked) PMP entries for m-mode while we update them. */
+ csr_clear(mstatus, MSTATUS_MPRV);
+
+ /*
+ * Set a temporary default "catch all" PMP entry for MPRV to work,
+ * except for the global locked entries.
+ */
+ set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
+
+ /* Write "catch all" entry and clear unlocked entries to PMP regs. */
+ write_pmp_entries(global_pmp_end_index, index,
+ true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
+
+ if (PMP_DEBUG_DUMP) {
+ dump_pmp_regs("catch all register dump");
+ }
+}
+
#endif /* CONFIG_PMP_STACK_GUARD */
#ifdef CONFIG_USERSPACE
diff --git a/arch/riscv/core/prep_c.c b/arch/riscv/core/prep_c.c
index b0fdd3a0569..e74a570cb66 100644
--- a/arch/riscv/core/prep_c.c
+++ b/arch/riscv/core/prep_c.c
@@ -19,6 +19,8 @@
#include
#include
#include
+#include
+#include
#if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT)
void soc_interrupt_init(void);
@@ -33,10 +35,17 @@ void soc_interrupt_init(void);
void z_prep_c(void)
{
+#if defined(CONFIG_SOC_PREP_HOOK)
+ soc_prep_hook();
+#endif
+
z_bss_zero();
z_data_copy();
#if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT)
soc_interrupt_init();
+#endif
+#if CONFIG_ARCH_CACHE
+ arch_cache_init();
#endif
z_cstart();
CODE_UNREACHABLE;
diff --git a/arch/riscv/core/reset.S b/arch/riscv/core/reset.S
index e9424e7a8e2..af96cb9757e 100644
--- a/arch/riscv/core/reset.S
+++ b/arch/riscv/core/reset.S
@@ -8,7 +8,7 @@
#include
#include
#include
-#include
+#include
#include "asm_macros.inc"
/* exports */
@@ -62,7 +62,8 @@ boot_first_core:
#ifdef CONFIG_INIT_STACKS
/* Pre-populate all bytes in z_interrupt_stacks with 0xAA */
la t0, z_interrupt_stacks
- li t1, __z_interrupt_stack_SIZEOF
+ /* Total size of all cores' IRQ stack */
+ li t1, __z_interrupt_all_stacks_SIZEOF
add t1, t1, t0
/* Populate z_interrupt_stacks with 0xaaaaaaaa */
@@ -71,7 +72,7 @@ aa_loop:
sw t2, 0x00(t0)
addi t0, t0, 4
blt t0, t1, aa_loop
-#endif
+#endif /* CONFIG_INIT_STACKS */
/*
* Initially, setup stack pointer to
diff --git a/arch/riscv/core/smp.c b/arch/riscv/core/smp.c
index 68147f8880a..4ef287c4a7a 100644
--- a/arch/riscv/core/smp.c
+++ b/arch/riscv/core/smp.c
@@ -7,6 +7,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -86,14 +87,15 @@ static atomic_val_t cpu_pending_ipi[CONFIG_MP_MAX_NUM_CPUS];
#define IPI_SCHED 0
#define IPI_FPU_FLUSH 1
-void arch_sched_ipi(void)
+void arch_sched_directed_ipi(uint32_t cpu_bitmap)
{
unsigned int key = arch_irq_lock();
unsigned int id = _current_cpu->id;
unsigned int num_cpus = arch_num_cpus();
for (unsigned int i = 0; i < num_cpus; i++) {
- if (i != id && _kernel.cpus[i].arch.online) {
+ if ((i != id) && _kernel.cpus[i].arch.online &&
+ ((cpu_bitmap & BIT(i)) != 0)) {
atomic_set_bit(&cpu_pending_ipi[i], IPI_SCHED);
MSIP(_kernel.cpus[i].arch.hartid) = 1;
}
@@ -102,6 +104,11 @@ void arch_sched_ipi(void)
arch_irq_unlock(key);
}
+void arch_sched_broadcast_ipi(void)
+{
+ arch_sched_directed_ipi(IPI_ALL_CPUS_MASK);
+}
+
#ifdef CONFIG_FPU_SHARING
void arch_flush_fpu_ipi(unsigned int cpu)
{
@@ -165,5 +172,4 @@ int arch_smp_init(void)
return 0;
}
-SYS_INIT(arch_smp_init, PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* CONFIG_SMP */
diff --git a/arch/riscv/core/stacktrace.c b/arch/riscv/core/stacktrace.c
index a85dcfbd82d..361e152f00c 100644
--- a/arch/riscv/core/stacktrace.c
+++ b/arch/riscv/core/stacktrace.c
@@ -4,6 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
+#include
#include
#include
#include
@@ -11,51 +12,80 @@
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
-uintptr_t z_riscv_get_sp_before_exc(const z_arch_esf_t *esf);
+uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf);
-#if __riscv_xlen == 32
- #define PR_REG "%08" PRIxPTR
-#elif __riscv_xlen == 64
- #define PR_REG "%016" PRIxPTR
-#endif
+typedef bool (*riscv_stacktrace_cb)(void *cookie, unsigned long addr, unsigned long sfp);
-#define MAX_STACK_FRAMES 8
+#define MAX_STACK_FRAMES CONFIG_ARCH_STACKWALK_MAX_FRAMES
struct stackframe {
uintptr_t fp;
uintptr_t ra;
};
-static bool in_stack_bound(uintptr_t addr)
+typedef bool (*stack_verify_fn)(uintptr_t, const struct k_thread *const, const struct arch_esf *);
+
+static inline bool in_irq_stack_bound(uintptr_t addr, uint8_t cpu_id)
+{
+ uintptr_t start, end;
+
+ start = (uintptr_t)K_KERNEL_STACK_BUFFER(z_interrupt_stacks[cpu_id]);
+ end = start + CONFIG_ISR_STACK_SIZE;
+
+ return (addr >= start) && (addr < end);
+}
+
+static inline bool in_kernel_thread_stack_bound(uintptr_t addr, const struct k_thread *const thread)
{
#ifdef CONFIG_THREAD_STACK_INFO
uintptr_t start, end;
- if (_current == NULL || arch_is_in_isr()) {
- /* We were servicing an interrupt */
- int cpu_id;
+ start = thread->stack_info.start;
+ end = Z_STACK_PTR_ALIGN(thread->stack_info.start + thread->stack_info.size);
-#ifdef CONFIG_SMP
- cpu_id = arch_curr_cpu()->id;
+ return (addr >= start) && (addr < end);
#else
- cpu_id = 0;
+ ARG_UNUSED(addr);
+ ARG_UNUSED(thread);
+ /* Return false as we can't check if the addr is in the thread stack without stack info */
+ return false;
#endif
+}
- start = (uintptr_t)K_KERNEL_STACK_BUFFER(z_interrupt_stacks[cpu_id]);
- end = start + CONFIG_ISR_STACK_SIZE;
#ifdef CONFIG_USERSPACE
- /* TODO: handle user threads */
-#endif
+static inline bool in_user_thread_stack_bound(uintptr_t addr, const struct k_thread *const thread)
+{
+ uintptr_t start, end;
+
+ /* See: zephyr/include/zephyr/arch/riscv/arch.h */
+ if (IS_ENABLED(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT)) {
+ start = thread->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE;
} else {
- start = _current->stack_info.start;
- end = Z_STACK_PTR_ALIGN(_current->stack_info.start + _current->stack_info.size);
+ start = thread->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE;
}
+ end = Z_STACK_PTR_ALIGN(thread->arch.priv_stack_start + K_KERNEL_STACK_RESERVED +
+ CONFIG_PRIVILEGED_STACK_SIZE);
return (addr >= start) && (addr < end);
-#else
- ARG_UNUSED(addr);
- return true;
-#endif /* CONFIG_THREAD_STACK_INFO */
+}
+#endif /* CONFIG_USERSPACE */
+
+static bool in_stack_bound(uintptr_t addr, const struct k_thread *const thread,
+ const struct arch_esf *esf)
+{
+ ARG_UNUSED(esf);
+
+ if (!IS_ALIGNED(addr, sizeof(uintptr_t))) {
+ return false;
+ }
+
+#ifdef CONFIG_USERSPACE
+ if ((thread->base.user_options & K_USER) != 0) {
+ return in_user_thread_stack_bound(addr, thread);
+ }
+#endif /* CONFIG_USERSPACE */
+
+ return in_kernel_thread_stack_bound(addr, thread);
}
static inline bool in_text_region(uintptr_t addr)
@@ -65,62 +95,192 @@ static inline bool in_text_region(uintptr_t addr)
return (addr >= (uintptr_t)&__text_region_start) && (addr < (uintptr_t)&__text_region_end);
}
-#ifdef CONFIG_RISCV_ENABLE_FRAME_POINTER
-void z_riscv_unwind_stack(const z_arch_esf_t *esf)
+#ifdef CONFIG_FRAME_POINTER
+static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k_thread *thread,
+ const struct arch_esf *esf, stack_verify_fn vrfy,
+ const _callee_saved_t *csf)
{
- uintptr_t fp = esf->s0;
+ uintptr_t fp, last_fp = 0;
uintptr_t ra;
struct stackframe *frame;
- if (esf == NULL) {
- return;
+ if (esf != NULL) {
+ /* Unwind the provided exception stack frame */
+ fp = esf->s0;
+ ra = esf->mepc;
+ } else if ((csf == NULL) || (csf == &_current->callee_saved)) {
+ /* Unwind current thread (default case when nothing is provided ) */
+ fp = (uintptr_t)__builtin_frame_address(0);
+ ra = (uintptr_t)walk_stackframe;
+ } else {
+ /* Unwind the provided thread */
+ fp = csf->s0;
+ ra = csf->ra;
}
- LOG_ERR("call trace:");
+ for (int i = 0; (i < MAX_STACK_FRAMES) && vrfy(fp, thread, esf) && (fp > last_fp); i++) {
+ if (in_text_region(ra) && !cb(cookie, ra, fp)) {
+ break;
+ }
+ last_fp = fp;
- for (int i = 0; (i < MAX_STACK_FRAMES) && (fp != 0U) && in_stack_bound(fp);) {
+ /* Unwind to the previous frame */
frame = (struct stackframe *)fp - 1;
- ra = frame->ra;
- if (in_text_region(ra)) {
- LOG_ERR(" %2d: fp: " PR_REG " ra: " PR_REG, i, fp, ra);
- /*
- * Increment the iterator only if `ra` is within the text region to get the
- * most out of it
+
+ if ((i == 0) && (esf != NULL)) {
+ /* Print `esf->ra` if we are at the top of the stack */
+ if (in_text_region(esf->ra) && !cb(cookie, esf->ra, fp)) {
+ break;
+ }
+ /**
+ * For the first stack frame, the `ra` is not stored in the frame if the
+ * preempted function doesn't call any other function, we can observe:
+ *
+ * .-------------.
+ * frame[0]->fp ---> | frame[0] fp |
+ * :-------------:
+ * frame[0]->ra ---> | frame[1] fp |
+ * | frame[1] ra |
+ * :~~~~~~~~~~~~~:
+ * | frame[N] fp |
+ *
+ * Instead of:
+ *
+ * .-------------.
+ * frame[0]->fp ---> | frame[0] fp |
+ * frame[0]->ra ---> | frame[1] ra |
+ * :-------------:
+ * | frame[1] fp |
+ * | frame[1] ra |
+ * :~~~~~~~~~~~~~:
+ * | frame[N] fp |
+ *
+ * Check if `frame->ra` actually points to a `fp`, and adjust accordingly
*/
- i++;
+ if (vrfy(frame->ra, thread, esf)) {
+ fp = frame->ra;
+ frame = (struct stackframe *)fp;
+ }
}
+
fp = frame->fp;
+ ra = frame->ra;
}
-
- LOG_ERR("");
}
-#else /* !CONFIG_RISCV_ENABLE_FRAME_POINTER */
-void z_riscv_unwind_stack(const z_arch_esf_t *esf)
+#else /* !CONFIG_FRAME_POINTER */
+register uintptr_t current_stack_pointer __asm__("sp");
+static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k_thread *thread,
+ const struct arch_esf *esf, stack_verify_fn vrfy,
+ const _callee_saved_t *csf)
{
- uintptr_t sp = z_riscv_get_sp_before_exc(esf);
+ uintptr_t sp;
uintptr_t ra;
- uintptr_t *ksp = (uintptr_t *)sp;
+ uintptr_t *ksp, last_ksp = 0;
- if (esf == NULL) {
- return;
+ if (esf != NULL) {
+ /* Unwind the provided exception stack frame */
+ sp = z_riscv_get_sp_before_exc(esf);
+ ra = esf->mepc;
+ } else if ((csf == NULL) || (csf == &_current->callee_saved)) {
+ /* Unwind current thread (default case when nothing is provided ) */
+ sp = current_stack_pointer;
+ ra = (uintptr_t)walk_stackframe;
+ } else {
+ /* Unwind the provided thread */
+ sp = csf->sp;
+ ra = csf->ra;
}
- LOG_ERR("call trace:");
-
- for (int i = 0;
- (i < MAX_STACK_FRAMES) && ((uintptr_t)ksp != 0U) && in_stack_bound((uintptr_t)ksp);
- ksp++) {
- ra = *ksp;
+ ksp = (uintptr_t *)sp;
+ for (int i = 0; (i < MAX_STACK_FRAMES) && vrfy((uintptr_t)ksp, thread, esf) &&
+ ((uintptr_t)ksp > last_ksp);) {
if (in_text_region(ra)) {
- LOG_ERR(" %2d: sp: " PR_REG " ra: " PR_REG, i, (uintptr_t)ksp, ra);
+ if (!cb(cookie, ra, POINTER_TO_UINT(ksp))) {
+ break;
+ }
/*
* Increment the iterator only if `ra` is within the text region to get the
* most out of it
*/
i++;
}
+ last_ksp = (uintptr_t)ksp;
+ /* Unwind to the previous frame */
+ ra = ((struct arch_esf *)ksp++)->ra;
+ }
+}
+#endif /* CONFIG_FRAME_POINTER */
+
+void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
+ const struct k_thread *thread, const struct arch_esf *esf)
+{
+ if (thread == NULL) {
+ /* In case `thread` is NULL, default that to `_current` and try to unwind */
+ thread = _current;
+ }
+
+ walk_stackframe((riscv_stacktrace_cb)callback_fn, cookie, thread, esf, in_stack_bound,
+ &thread->callee_saved);
+}
+
+#ifdef CONFIG_EXCEPTION_STACK_TRACE
+static bool in_fatal_stack_bound(uintptr_t addr, const struct k_thread *const thread,
+ const struct arch_esf *esf)
+{
+ if (!IS_ALIGNED(addr, sizeof(uintptr_t))) {
+ return false;
+ }
+
+ if ((thread == NULL) || arch_is_in_isr()) {
+ /* We were servicing an interrupt */
+ uint8_t cpu_id = IS_ENABLED(CONFIG_SMP) ? arch_curr_cpu()->id : 0U;
+
+ return in_irq_stack_bound(addr, cpu_id);
}
+ return in_stack_bound(addr, thread, esf);
+}
+
+#if __riscv_xlen == 32
+#define PR_REG "%08" PRIxPTR
+#elif __riscv_xlen == 64
+#define PR_REG "%016" PRIxPTR
+#endif
+
+#ifdef CONFIG_FRAME_POINTER
+#define SFP "fp"
+#else
+#define SFP "sp"
+#endif /* CONFIG_FRAME_POINTER */
+
+#ifdef CONFIG_SYMTAB
+#define LOG_STACK_TRACE(idx, sfp, ra, name, offset) \
+ LOG_ERR(" %2d: " SFP ": " PR_REG " ra: " PR_REG " [%s+0x%x]", idx, sfp, ra, name, \
+ offset)
+#else
+#define LOG_STACK_TRACE(idx, sfp, ra, name, offset) \
+ LOG_ERR(" %2d: " SFP ": " PR_REG " ra: " PR_REG, idx, sfp, ra)
+#endif /* CONFIG_SYMTAB */
+
+static bool print_trace_address(void *arg, unsigned long ra, unsigned long sfp)
+{
+ int *i = arg;
+#ifdef CONFIG_SYMTAB
+ uint32_t offset = 0;
+ const char *name = symtab_find_symbol_name(ra, &offset);
+#endif /* CONFIG_SYMTAB */
+
+ LOG_STACK_TRACE((*i)++, sfp, ra, name, offset);
+
+ return true;
+}
+
+void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf)
+{
+ int i = 0;
+
+ LOG_ERR("call trace:");
+ walk_stackframe(print_trace_address, &i, _current, esf, in_fatal_stack_bound, csf);
LOG_ERR("");
}
-#endif /* CONFIG_RISCV_ENABLE_FRAME_POINTER */
+#endif /* CONFIG_EXCEPTION_STACK_TRACE */
diff --git a/arch/riscv/core/thread.c b/arch/riscv/core/thread.c
index de739f02895..59adbc42e46 100644
--- a/arch/riscv/core/thread.c
+++ b/arch/riscv/core/thread.c
@@ -23,15 +23,15 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
void *p1, void *p2, void *p3)
{
extern void z_riscv_thread_start(void);
- struct __esf *stack_init;
+ struct arch_esf *stack_init;
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
const struct soc_esf soc_esf_init = {SOC_ESF_INIT};
#endif
/* Initial stack frame for thread */
- stack_init = (struct __esf *)Z_STACK_PTR_ALIGN(
- Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr)
+ stack_init = (struct arch_esf *)Z_STACK_PTR_ALIGN(
+ Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr)
);
/* Setup the initial stack frame */
@@ -144,6 +144,12 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
K_KERNEL_STACK_RESERVED +
CONFIG_PRIVILEGED_STACK_SIZE);
+#ifdef CONFIG_INIT_STACKS
+ /* Initialize the privileged stack */
+ (void)memset((void *)_current->arch.priv_stack_start, 0xaa,
+ Z_STACK_PTR_ALIGN(K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE));
+#endif /* CONFIG_INIT_STACKS */
+
top_of_user_stack = Z_STACK_PTR_ALIGN(
_current->stack_info.start +
_current->stack_info.size -
@@ -189,6 +195,18 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
CODE_UNREACHABLE;
}
+int arch_thread_priv_stack_space_get(const struct k_thread *thread, size_t *stack_size,
+ size_t *unused_ptr)
+{
+ if ((thread->base.user_options & K_USER) != K_USER) {
+ return -EINVAL;
+ }
+
+ *stack_size = Z_STACK_PTR_ALIGN(K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE);
+
+ return z_stack_space_get((void *)thread->arch.priv_stack_start, *stack_size, unused_ptr);
+}
+
#endif /* CONFIG_USERSPACE */
#ifndef CONFIG_MULTITHREADING
diff --git a/arch/riscv/include/kernel_arch_func.h b/arch/riscv/include/kernel_arch_func.h
index bdfc0527b95..c5ed6ff3f7f 100644
--- a/arch/riscv/include/kernel_arch_func.h
+++ b/arch/riscv/include/kernel_arch_func.h
@@ -71,9 +71,9 @@ arch_switch(void *switch_to, void **switched_from)
/* Thin wrapper around z_riscv_fatal_error_csf */
FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
- const z_arch_esf_t *esf);
+ const struct arch_esf *esf);
-FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const z_arch_esf_t *esf,
+FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arch_esf *esf,
const _callee_saved_t *csf);
static inline bool arch_is_in_isr(void)
diff --git a/arch/riscv/include/offsets_short_arch.h b/arch/riscv/include/offsets_short_arch.h
index 3d3a878f16e..27c01a77461 100644
--- a/arch/riscv/include/offsets_short_arch.h
+++ b/arch/riscv/include/offsets_short_arch.h
@@ -7,7 +7,7 @@
#ifndef ZEPHYR_ARCH_RISCV_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_RISCV_INCLUDE_OFFSETS_SHORT_ARCH_H_
-#include
+#include
#define _thread_offset_to_sp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET)
diff --git a/arch/riscv/include/pmp.h b/arch/riscv/include/pmp.h
index 20877f14fd2..ca4f37f3a2a 100644
--- a/arch/riscv/include/pmp.h
+++ b/arch/riscv/include/pmp.h
@@ -10,6 +10,7 @@
void z_riscv_pmp_init(void);
void z_riscv_pmp_stackguard_prepare(struct k_thread *thread);
void z_riscv_pmp_stackguard_enable(struct k_thread *thread);
+void z_riscv_pmp_stackguard_disable(void);
void z_riscv_pmp_usermode_init(struct k_thread *thread);
void z_riscv_pmp_usermode_prepare(struct k_thread *thread);
void z_riscv_pmp_usermode_enable(struct k_thread *thread);
diff --git a/arch/sparc/core/fatal.c b/arch/sparc/core/fatal.c
index 55100606b92..40fd9d16792 100644
--- a/arch/sparc/core/fatal.c
+++ b/arch/sparc/core/fatal.c
@@ -122,7 +122,7 @@ static const struct {
{ .tt = 0x0A, .desc = "tag_overflow", },
};
-static void print_trap_type(const z_arch_esf_t *esf)
+static void print_trap_type(const struct arch_esf *esf)
{
const int tt = (esf->tbr & TBR_TT) >> TBR_TT_BIT;
const char *desc = "unknown";
@@ -142,7 +142,7 @@ static void print_trap_type(const z_arch_esf_t *esf)
LOG_ERR("tt = 0x%02X, %s", tt, desc);
}
-static void print_integer_registers(const z_arch_esf_t *esf)
+static void print_integer_registers(const struct arch_esf *esf)
{
const struct savearea *flushed = (struct savearea *) esf->out[6];
@@ -159,7 +159,7 @@ static void print_integer_registers(const z_arch_esf_t *esf)
}
}
-static void print_special_registers(const z_arch_esf_t *esf)
+static void print_special_registers(const struct arch_esf *esf)
{
LOG_ERR(
"psr: %08x wim: %08x tbr: %08x y: %08x",
@@ -168,7 +168,7 @@ static void print_special_registers(const z_arch_esf_t *esf)
LOG_ERR(" pc: %08x npc: %08x", esf->pc, esf->npc);
}
-static void print_backtrace(const z_arch_esf_t *esf)
+static void print_backtrace(const struct arch_esf *esf)
{
const int MAX_LOGLINES = 40;
const struct savearea *s = (struct savearea *) esf->out[6];
@@ -190,7 +190,7 @@ static void print_backtrace(const z_arch_esf_t *esf)
}
}
-static void print_all(const z_arch_esf_t *esf)
+static void print_all(const struct arch_esf *esf)
{
LOG_ERR("");
print_trap_type(esf);
@@ -205,7 +205,7 @@ static void print_all(const z_arch_esf_t *esf)
#endif /* CONFIG_EXCEPTION_DEBUG */
FUNC_NORETURN void z_sparc_fatal_error(unsigned int reason,
- const z_arch_esf_t *esf)
+ const struct arch_esf *esf)
{
#if CONFIG_EXCEPTION_DEBUG
if (esf != NULL) {
diff --git a/arch/sparc/core/fault_trap.S b/arch/sparc/core/fault_trap.S
index c1a8977ba23..53b3d9f0b98 100644
--- a/arch/sparc/core/fault_trap.S
+++ b/arch/sparc/core/fault_trap.S
@@ -72,7 +72,7 @@ SECTION_FUNC(TEXT, __sparc_trap_except_reason)
mov %l5, %g3
/* Allocate an ABI stack frame and exception stack frame */
- sub %fp, 96 + __z_arch_esf_t_SIZEOF, %sp
+ sub %fp, 96 + __struct_arch_esf_SIZEOF, %sp
/*
* %fp: %sp of interrupted task
* %sp: %sp of interrupted task - ABI_frame - esf
@@ -81,19 +81,19 @@ SECTION_FUNC(TEXT, __sparc_trap_except_reason)
mov %l7, %o0
/* Fill in the content of the exception stack frame */
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
- std %i0, [%sp + 96 + __z_arch_esf_t_out_OFFSET + 0x00]
- std %i2, [%sp + 96 + __z_arch_esf_t_out_OFFSET + 0x08]
- std %i4, [%sp + 96 + __z_arch_esf_t_out_OFFSET + 0x10]
- std %i6, [%sp + 96 + __z_arch_esf_t_out_OFFSET + 0x18]
- std %g0, [%sp + 96 + __z_arch_esf_t_global_OFFSET + 0x00]
- std %g2, [%sp + 96 + __z_arch_esf_t_global_OFFSET + 0x08]
- std %g4, [%sp + 96 + __z_arch_esf_t_global_OFFSET + 0x10]
- std %g6, [%sp + 96 + __z_arch_esf_t_global_OFFSET + 0x18]
+ std %i0, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x00]
+ std %i2, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x08]
+ std %i4, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x10]
+ std %i6, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x18]
+ std %g0, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x00]
+ std %g2, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x08]
+ std %g4, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x10]
+ std %g6, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x18]
#endif
- std %l0, [%sp + 96 + __z_arch_esf_t_psr_OFFSET] /* psr pc */
- std %l2, [%sp + 96 + __z_arch_esf_t_npc_OFFSET] /* npc wim */
+ std %l0, [%sp + 96 + __struct_arch_esf_psr_OFFSET] /* psr pc */
+ std %l2, [%sp + 96 + __struct_arch_esf_npc_OFFSET] /* npc wim */
rd %y, %l7
- std %l6, [%sp + 96 + __z_arch_esf_t_tbr_OFFSET] /* tbr y */
+ std %l6, [%sp + 96 + __struct_arch_esf_tbr_OFFSET] /* tbr y */
/* Enable traps, raise PIL to mask all maskable interrupts. */
or %l0, PSR_PIL, %o2
diff --git a/arch/sparc/core/irq_offload.c b/arch/sparc/core/irq_offload.c
index cd5acc08ba2..f36e957084b 100644
--- a/arch/sparc/core/irq_offload.c
+++ b/arch/sparc/core/irq_offload.c
@@ -39,3 +39,7 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
irq_unlock(key);
}
+
+void arch_irq_offload_init(void)
+{
+}
diff --git a/arch/sparc/core/offsets/offsets.c b/arch/sparc/core/offsets/offsets.c
index 3796117ac09..023ef7452c4 100644
--- a/arch/sparc/core/offsets/offsets.c
+++ b/arch/sparc/core/offsets/offsets.c
@@ -31,11 +31,11 @@ GEN_OFFSET_SYM(_callee_saved_t, i6);
GEN_OFFSET_SYM(_callee_saved_t, o6);
/* esf member offsets */
-GEN_OFFSET_SYM(z_arch_esf_t, out);
-GEN_OFFSET_SYM(z_arch_esf_t, global);
-GEN_OFFSET_SYM(z_arch_esf_t, npc);
-GEN_OFFSET_SYM(z_arch_esf_t, psr);
-GEN_OFFSET_SYM(z_arch_esf_t, tbr);
-GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, STACK_ROUND_UP(sizeof(z_arch_esf_t)));
+GEN_OFFSET_STRUCT(arch_esf, out);
+GEN_OFFSET_STRUCT(arch_esf, global);
+GEN_OFFSET_STRUCT(arch_esf, npc);
+GEN_OFFSET_STRUCT(arch_esf, psr);
+GEN_OFFSET_STRUCT(arch_esf, tbr);
+GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, sizeof(struct arch_esf));
GEN_ABS_SYM_END
diff --git a/arch/sparc/core/prep_c.c b/arch/sparc/core/prep_c.c
index 9ad3955a190..5b4a440a63c 100644
--- a/arch/sparc/core/prep_c.c
+++ b/arch/sparc/core/prep_c.c
@@ -10,6 +10,8 @@
*/
#include
+#include
+#include
/**
* @brief Prepare to and run C code
@@ -19,7 +21,13 @@
void z_prep_c(void)
{
+#if defined(CONFIG_SOC_PREP_HOOK)
+ soc_prep_hook();
+#endif
z_data_copy();
+#if CONFIG_ARCH_CACHE
+ arch_cache_init();
+#endif
z_cstart();
CODE_UNREACHABLE;
}
diff --git a/arch/sparc/include/kernel_arch_func.h b/arch/sparc/include/kernel_arch_func.h
index 41f48ccc44a..8b79b130ad6 100644
--- a/arch/sparc/include/kernel_arch_func.h
+++ b/arch/sparc/include/kernel_arch_func.h
@@ -43,7 +43,7 @@ static inline void arch_switch(void *switch_to, void **switched_from)
}
FUNC_NORETURN void z_sparc_fatal_error(unsigned int reason,
- const z_arch_esf_t *esf);
+ const struct arch_esf *esf);
static inline bool arch_is_in_isr(void)
{
diff --git a/arch/sparc/include/offsets_short_arch.h b/arch/sparc/include/offsets_short_arch.h
index c53f2b3705b..0f9272f332b 100644
--- a/arch/sparc/include/offsets_short_arch.h
+++ b/arch/sparc/include/offsets_short_arch.h
@@ -7,7 +7,7 @@
#ifndef ZEPHYR_ARCH_SPARC_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_SPARC_INCLUDE_OFFSETS_SHORT_ARCH_H_
-#include
+#include
#define _thread_offset_to_y \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_y_OFFSET)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 487d6e94bd9..52ef70a0d13 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -205,7 +205,7 @@ config MAX_IRQ_LINES
config IRQ_OFFLOAD_VECTOR
int "IDT vector to use for IRQ offload"
default 33
- range 32 255
+ range 32 $(UINT8_MAX)
depends on IRQ_OFFLOAD
config PIC_DISABLE
diff --git a/arch/x86/core/Kconfig.ia32 b/arch/x86/core/Kconfig.ia32
index d789c87eb80..75a3c2bbb7c 100644
--- a/arch/x86/core/Kconfig.ia32
+++ b/arch/x86/core/Kconfig.ia32
@@ -163,12 +163,15 @@ config X86_DYNAMIC_IRQ_STUBS
endmenu
-config X86_EXCEPTION_STACK_TRACE
+config ARCH_HAS_STACKWALK
bool
default y
- depends on EXCEPTION_STACK_TRACE
+ select DEBUG_INFO
+ select THREAD_STACK_INFO
+ depends on !OMIT_FRAME_POINTER
help
- Internal config to enable runtime stack traces on fatal exceptions.
+ Internal config to indicate that the arch_stack_walk() API is implemented
+ and it can be enabled.
config X86_USE_THREAD_LOCAL_STORAGE
bool
diff --git a/arch/x86/core/Kconfig.intel64 b/arch/x86/core/Kconfig.intel64
index 2e6e7ebd00a..7b5359a7ecd 100644
--- a/arch/x86/core/Kconfig.intel64
+++ b/arch/x86/core/Kconfig.intel64
@@ -29,24 +29,27 @@ config X86_EXCEPTION_STACK_SIZE
support limited call-tree depth and must fit into the low core,
so they are typically smaller than the ISR stacks.
-config X86_EXCEPTION_STACK_TRACE
+config ARCH_HAS_STACKWALK
bool
default y
- depends on EXCEPTION_STACK_TRACE
+ select DEBUG_INFO
+ select THREAD_STACK_INFO
+ depends on !OMIT_FRAME_POINTER
depends on NO_OPTIMIZATIONS
help
- Internal config to enable runtime stack traces on fatal exceptions.
+ Internal config to indicate that the arch_stack_walk() API is implemented
+ and it can be enabled.
config SCHED_IPI_VECTOR
int "IDT vector to use for scheduler IPI"
default 34
- range 33 255
+ range 33 $(UINT8_MAX)
depends on SMP
config TLB_IPI_VECTOR
int "IDT vector to use for TLB shootdown IPI"
default 35
- range 33 255
+ range 33 $(UINT8_MAX)
depends on SMP
# We should really only have to provide one of the following two values,
diff --git a/arch/x86/core/cache.c b/arch/x86/core/cache.c
index e80cb6d1dbf..476632115c1 100644
--- a/arch/x86/core/cache.c
+++ b/arch/x86/core/cache.c
@@ -119,3 +119,7 @@ int arch_dcache_flush_and_invd_range(void *start_addr, size_t size)
{
return arch_dcache_flush_range(start_addr, size);
}
+
+void arch_cache_init(void)
+{
+}
diff --git a/arch/x86/core/common.S b/arch/x86/core/common.S
index 1f390df42fb..07c0de8c8dc 100644
--- a/arch/x86/core/common.S
+++ b/arch/x86/core/common.S
@@ -22,9 +22,35 @@
*/
cmpl $MULTIBOOT_EAX_MAGIC, %eax
+
+#ifndef CONFIG_DYNAMIC_BOOTARGS
je 1f
xorl %ebx, %ebx
1:
+#else
+ movl $multiboot_cmdline, %edi
+ je setup_copy_cmdline
+ xorl %ebx, %ebx
+ jmp end_cmdline
+
+setup_copy_cmdline:
+ testl $MULTIBOOT_INFO_FLAGS_CMDLINE, __multiboot_info_t_flags_OFFSET(%ebx)
+ jz end_cmdline
+
+ movl $multiboot_cmdline + CONFIG_BOOTARGS_ARGS_BUFFER_SIZE - 1, %edx
+ movl __multiboot_info_t_cmdline_OFFSET(%ebx), %esi
+copy_cmdline:
+ cmpl %esi, %edx
+ je end_cmdline
+ cmpb $0, (%esi)
+ je end_cmdline
+
+ movsb
+ jmp copy_cmdline
+end_cmdline:
+ movb $0, (%edi)
+#endif
+
#endif
#ifdef CONFIG_PIC_DISABLE
diff --git a/arch/x86/core/cpuhalt.c b/arch/x86/core/cpuhalt.c
index 77c727bd3f9..b3f53a243e0 100644
--- a/arch/x86/core/cpuhalt.c
+++ b/arch/x86/core/cpuhalt.c
@@ -7,6 +7,7 @@
#include
#include
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
__pinned_func
void arch_cpu_idle(void)
{
@@ -15,7 +16,9 @@ void arch_cpu_idle(void)
"sti\n\t"
"hlt\n\t");
}
+#endif
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
__pinned_func
void arch_cpu_atomic_idle(unsigned int key)
{
@@ -42,3 +45,4 @@ void arch_cpu_atomic_idle(unsigned int key)
__asm__ volatile("cli");
}
}
+#endif
diff --git a/arch/x86/core/efi.c b/arch/x86/core/efi.c
index 425b0dcde86..64faeb9efa5 100644
--- a/arch/x86/core/efi.c
+++ b/arch/x86/core/efi.c
@@ -18,6 +18,10 @@ static uint64_t __aligned(64) efi_stack[1024];
struct efi_boot_arg *efi;
+#ifdef CONFIG_DYNAMIC_BOOTARGS
+__pinned_noinit char efi_bootargs[CONFIG_BOOTARGS_ARGS_BUFFER_SIZE];
+#endif
+
void *efi_get_acpi_rsdp(void)
{
if (efi == NULL) {
@@ -33,8 +37,8 @@ void efi_init(struct efi_boot_arg *efi_arg)
return;
}
- z_phys_map((uint8_t **)&efi, (uintptr_t)efi_arg,
- sizeof(struct efi_boot_arg), 0);
+ k_mem_map_phys_bare((uint8_t **)&efi, (uintptr_t)efi_arg,
+ sizeof(struct efi_boot_arg), 0);
}
/* EFI thunk. Not a lot of code, but lots of context:
@@ -169,3 +173,10 @@ int arch_printk_char_out(int c)
return efi_console_putchar(c);
}
#endif
+
+#ifdef CONFIG_DYNAMIC_BOOTARGS
+const char *get_bootargs(void)
+{
+ return efi_bootargs;
+}
+#endif /* CONFIG_DYNAMIC_BOOTARGS */
diff --git a/arch/x86/core/fatal.c b/arch/x86/core/fatal.c
index 370386d4af9..d43499a08d2 100644
--- a/arch/x86/core/fatal.c
+++ b/arch/x86/core/fatal.c
@@ -35,7 +35,7 @@ FUNC_NORETURN void arch_system_halt(unsigned int reason)
#ifdef CONFIG_THREAD_STACK_INFO
-static inline uintptr_t esf_get_sp(const z_arch_esf_t *esf)
+static inline uintptr_t esf_get_sp(const struct arch_esf *esf)
{
#ifdef CONFIG_X86_64
return esf->rsp;
@@ -84,7 +84,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
return (addr <= start) || (addr + size > end);
}
-#endif
+#endif /* CONFIG_THREAD_STACK_INFO */
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
/**
@@ -120,40 +120,37 @@ bool z_x86_check_guard_page(uintptr_t addr)
}
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
-#ifdef CONFIG_EXCEPTION_DEBUG
-
-static inline uintptr_t esf_get_code(const z_arch_esf_t *esf)
-{
-#ifdef CONFIG_X86_64
- return esf->code;
-#else
- return esf->errorCode;
-#endif
-}
-
-#if defined(CONFIG_X86_EXCEPTION_STACK_TRACE)
+#if defined(CONFIG_ARCH_STACKWALK)
struct stack_frame {
uintptr_t next;
uintptr_t ret_addr;
-#ifndef CONFIG_X86_64
- uintptr_t args;
-#endif
};
-#define MAX_STACK_FRAMES 8
-
-__pinned_func
-static void unwind_stack(uintptr_t base_ptr, uint16_t cs)
+__pinned_func static void walk_stackframe(stack_trace_callback_fn cb, void *cookie,
+ const struct arch_esf *esf, int max_frames)
{
+ uintptr_t base_ptr;
+ uint16_t cs;
struct stack_frame *frame;
int i;
+ if (esf != NULL) {
+#ifdef CONFIG_X86_64
+ base_ptr = esf->rbp;
+#else /* x86 32-bit */
+ base_ptr = esf->ebp;
+#endif /* CONFIG_X86_64 */
+ cs = esf->cs;
+ } else {
+ return;
+ }
+
if (base_ptr == 0U) {
LOG_ERR("NULL base ptr");
return;
}
- for (i = 0; i < MAX_STACK_FRAMES; i++) {
+ for (i = 0; i < max_frames; i++) {
if (base_ptr % sizeof(base_ptr) != 0U) {
LOG_ERR("unaligned frame ptr");
return;
@@ -178,17 +175,58 @@ static void unwind_stack(uintptr_t base_ptr, uint16_t cs)
if (frame->ret_addr == 0U) {
break;
}
+
+ if (!cb(cookie, frame->ret_addr)) {
+ break;
+ }
+
+ base_ptr = frame->next;
+ }
+}
+
+void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
+ const struct k_thread *thread, const struct arch_esf *esf)
+{
+ ARG_UNUSED(thread);
+
+ walk_stackframe(callback_fn, cookie, esf,
+ CONFIG_ARCH_STACKWALK_MAX_FRAMES);
+}
+#endif /* CONFIG_ARCH_STACKWALK */
+
+#if defined(CONFIG_EXCEPTION_STACK_TRACE)
+static bool print_trace_address(void *arg, unsigned long addr)
+{
+ int *i = arg;
+
#ifdef CONFIG_X86_64
- LOG_ERR(" 0x%016lx", frame->ret_addr);
+ LOG_ERR(" %d: 0x%016lx", (*i)++, addr);
#else
- LOG_ERR(" 0x%08lx (0x%lx)", frame->ret_addr, frame->args);
+ LOG_ERR(" %d: 0x%08lx", (*i)++, addr);
#endif
- base_ptr = frame->next;
- }
+
+ return true;
}
-#endif /* CONFIG_X86_EXCEPTION_STACK_TRACE */
-static inline uintptr_t get_cr3(const z_arch_esf_t *esf)
+static ALWAYS_INLINE void unwind_stack(const struct arch_esf *esf)
+{
+ int i = 0;
+
+ walk_stackframe(print_trace_address, &i, esf, CONFIG_ARCH_STACKWALK_MAX_FRAMES);
+}
+#endif /* CONFIG_EXCEPTION_STACK_TRACE */
+
+#ifdef CONFIG_EXCEPTION_DEBUG
+static inline uintptr_t esf_get_code(const struct arch_esf *esf)
+{
+#ifdef CONFIG_X86_64
+ return esf->code;
+#else
+ return esf->errorCode;
+#endif
+}
+
+static inline uintptr_t get_cr3(const struct arch_esf *esf)
{
#if defined(CONFIG_USERSPACE) && defined(CONFIG_X86_KPTI)
/* If the interrupted thread was in user mode, we did a page table
@@ -206,14 +244,14 @@ static inline uintptr_t get_cr3(const z_arch_esf_t *esf)
return z_x86_cr3_get();
}
-static inline pentry_t *get_ptables(const z_arch_esf_t *esf)
+static inline pentry_t *get_ptables(const struct arch_esf *esf)
{
- return z_mem_virt_addr(get_cr3(esf));
+ return k_mem_virt_addr(get_cr3(esf));
}
#ifdef CONFIG_X86_64
__pinned_func
-static void dump_regs(const z_arch_esf_t *esf)
+static void dump_regs(const struct arch_esf *esf)
{
LOG_ERR("RAX: 0x%016lx RBX: 0x%016lx RCX: 0x%016lx RDX: 0x%016lx",
esf->rax, esf->rbx, esf->rcx, esf->rdx);
@@ -226,17 +264,11 @@ static void dump_regs(const z_arch_esf_t *esf)
LOG_ERR("RSP: 0x%016lx RFLAGS: 0x%016lx CS: 0x%04lx CR3: 0x%016lx",
esf->rsp, esf->rflags, esf->cs & 0xFFFFU, get_cr3(esf));
-#ifdef CONFIG_X86_EXCEPTION_STACK_TRACE
- LOG_ERR("call trace:");
-#endif
LOG_ERR("RIP: 0x%016lx", esf->rip);
-#ifdef CONFIG_X86_EXCEPTION_STACK_TRACE
- unwind_stack(esf->rbp, esf->cs);
-#endif
}
#else /* 32-bit */
__pinned_func
-static void dump_regs(const z_arch_esf_t *esf)
+static void dump_regs(const struct arch_esf *esf)
{
LOG_ERR("EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x",
esf->eax, esf->ebx, esf->ecx, esf->edx);
@@ -245,13 +277,7 @@ static void dump_regs(const z_arch_esf_t *esf)
LOG_ERR("EFLAGS: 0x%08x CS: 0x%04x CR3: 0x%08lx", esf->eflags,
esf->cs & 0xFFFFU, get_cr3(esf));
-#ifdef CONFIG_X86_EXCEPTION_STACK_TRACE
- LOG_ERR("call trace:");
-#endif
LOG_ERR("EIP: 0x%08x", esf->eip);
-#ifdef CONFIG_X86_EXCEPTION_STACK_TRACE
- unwind_stack(esf->ebp, esf->cs);
-#endif
}
#endif /* CONFIG_X86_64 */
@@ -327,7 +353,7 @@ static void log_exception(uintptr_t vector, uintptr_t code)
}
__pinned_func
-static void dump_page_fault(z_arch_esf_t *esf)
+static void dump_page_fault(struct arch_esf *esf)
{
uintptr_t err;
void *cr2;
@@ -362,12 +388,16 @@ static void dump_page_fault(z_arch_esf_t *esf)
__pinned_func
FUNC_NORETURN void z_x86_fatal_error(unsigned int reason,
- const z_arch_esf_t *esf)
+ const struct arch_esf *esf)
{
if (esf != NULL) {
#ifdef CONFIG_EXCEPTION_DEBUG
dump_regs(esf);
#endif
+#ifdef CONFIG_EXCEPTION_STACK_TRACE
+ LOG_ERR("call trace:");
+ unwind_stack(esf);
+#endif /* CONFIG_EXCEPTION_STACK_TRACE */
#if defined(CONFIG_ASSERT) && defined(CONFIG_X86_64)
if (esf->rip == 0xb9) {
/* See implementation of __resume in locore.S. This is
@@ -385,7 +415,7 @@ FUNC_NORETURN void z_x86_fatal_error(unsigned int reason,
__pinned_func
FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector,
- const z_arch_esf_t *esf)
+ const struct arch_esf *esf)
{
#ifdef CONFIG_EXCEPTION_DEBUG
log_exception(vector, esf_get_code(esf));
@@ -404,7 +434,7 @@ static const struct z_exc_handle exceptions[] = {
#endif
__pinned_func
-void z_x86_page_fault_handler(z_arch_esf_t *esf)
+void z_x86_page_fault_handler(struct arch_esf *esf)
{
#ifdef CONFIG_DEMAND_PAGING
if ((esf->errorCode & PF_P) == 0) {
@@ -434,7 +464,7 @@ void z_x86_page_fault_handler(z_arch_esf_t *esf)
} else
#else
{
- was_valid_access = z_page_fault(virt);
+ was_valid_access = k_mem_page_fault(virt);
}
#endif /* CONFIG_X86_KPTI */
if (was_valid_access) {
@@ -488,7 +518,7 @@ void z_x86_page_fault_handler(z_arch_esf_t *esf)
}
__pinned_func
-void z_x86_do_kernel_oops(const z_arch_esf_t *esf)
+void z_x86_do_kernel_oops(const struct arch_esf *esf)
{
uintptr_t reason;
diff --git a/arch/x86/core/ia32/coredump.c b/arch/x86/core/ia32/coredump.c
index b49373aab77..fb7d0fcfd8c 100644
--- a/arch/x86/core/ia32/coredump.c
+++ b/arch/x86/core/ia32/coredump.c
@@ -34,7 +34,7 @@ struct x86_arch_block {
*/
static struct x86_arch_block arch_blk;
-void arch_coredump_info_dump(const z_arch_esf_t *esf)
+void arch_coredump_info_dump(const struct arch_esf *esf)
{
struct coredump_arch_hdr_t hdr = {
.id = COREDUMP_ARCH_HDR_ID,
diff --git a/arch/x86/core/ia32/crt0.S b/arch/x86/core/ia32/crt0.S
index 32513a95790..0c7ea821280 100644
--- a/arch/x86/core/ia32/crt0.S
+++ b/arch/x86/core/ia32/crt0.S
@@ -60,7 +60,7 @@
* Until we enable these page tables, only physical memory addresses
* work.
*/
- movl $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax
+ movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax
movl %eax, %cr3
#ifdef CONFIG_X86_PAE
@@ -89,7 +89,7 @@
orl $(CR0_PG | CR0_WP), %eax
movl %eax, %cr0
-#ifdef Z_VM_KERNEL
+#ifdef K_MEM_IS_VM_KERNEL
/* Jump to a virtual address, which works because the identity and
* virtual mappings both are to the same physical address.
*/
@@ -98,7 +98,7 @@ vm_enter:
/* We are now executing in virtual memory. We'll un-map the identity
* mappings later once we are in the C domain
*/
-#endif /* Z_VM_KERNEL */
+#endif /* K_MEM_IS_VM_KERNEL */
#endif /* CONFIG_X86_MMU */
.endm
@@ -126,7 +126,7 @@ SECTION_FUNC(BOOT_TEXT, __start)
*/
#if CONFIG_SET_GDT
/* load 32-bit operand size GDT */
- lgdt Z_MEM_PHYS_ADDR(_gdt_rom)
+ lgdt K_MEM_PHYS_ADDR(_gdt_rom)
/* If we set our own GDT, update the segment registers as well.
*/
@@ -138,7 +138,7 @@ SECTION_FUNC(BOOT_TEXT, __start)
movw %ax, %fs /* Zero FS */
movw %ax, %gs /* Zero GS */
- ljmp $CODE_SEG, $Z_MEM_PHYS_ADDR(__csSet) /* set CS = 0x08 */
+ ljmp $CODE_SEG, $K_MEM_PHYS_ADDR(__csSet) /* set CS = 0x08 */
__csSet:
#endif /* CONFIG_SET_GDT */
@@ -180,7 +180,8 @@ __csSet:
andl $~0x400, %eax /* CR4[OSXMMEXCPT] = 0 */
movl %eax, %cr4 /* move EAX to CR4 */
- ldmxcsr Z_MEM_PHYS_ADDR(_sse_mxcsr_default_value) /* initialize SSE control/status reg */
+ /* initialize SSE control/status reg */
+ ldmxcsr K_MEM_PHYS_ADDR(_sse_mxcsr_default_value)
#endif /* CONFIG_X86_SSE */
@@ -199,7 +200,7 @@ __csSet:
*/
#ifdef CONFIG_INIT_STACKS
movl $0xAAAAAAAA, %eax
- leal Z_MEM_PHYS_ADDR(z_interrupt_stacks), %edi
+ leal K_MEM_PHYS_ADDR(z_interrupt_stacks), %edi
#ifdef CONFIG_X86_STACK_PROTECTION
addl $4096, %edi
#endif
@@ -208,7 +209,7 @@ __csSet:
rep stosl
#endif
- movl $Z_MEM_PHYS_ADDR(z_interrupt_stacks), %esp
+ movl $K_MEM_PHYS_ADDR(z_interrupt_stacks), %esp
#ifdef CONFIG_X86_STACK_PROTECTION
/* In this configuration, all stacks, including IRQ stack, are declared
* with a 4K non-present guard page preceding the stack buffer
@@ -243,7 +244,7 @@ __csSet:
ltr %ax
#endif
-#ifdef Z_VM_KERNEL
+#ifdef K_MEM_IS_VM_KERNEL
/* Need to reset the stack to virtual address after
* page table is loaded.
*/
@@ -254,7 +255,7 @@ __csSet:
#else
addl $CONFIG_ISR_STACK_SIZE, %esp
#endif
-#endif /* Z_VM_KERNEL */
+#endif /* K_MEM_IS_VM_KERNEL */
#ifdef CONFIG_THREAD_LOCAL_STORAGE
pushl %esp
@@ -272,7 +273,7 @@ __csSet:
/* Don't clear BSS if the section is not present
* in memory at boot. Or else it would cause page
* faults. Zeroing BSS will be done later once the
- * the paging mechanism has been initialized.
+ * paging mechanism has been initialized.
*/
call z_bss_zero
#endif
@@ -347,9 +348,9 @@ _gdt:
* descriptor here */
/* Limit on GDT */
- .word Z_MEM_PHYS_ADDR(_gdt_rom_end) - Z_MEM_PHYS_ADDR(_gdt_rom) - 1
+ .word K_MEM_PHYS_ADDR(_gdt_rom_end) - K_MEM_PHYS_ADDR(_gdt_rom) - 1
/* table address: _gdt_rom */
- .long Z_MEM_PHYS_ADDR(_gdt_rom)
+ .long K_MEM_PHYS_ADDR(_gdt_rom)
.word 0x0000
/* Entry 1 (selector=0x0008): Code descriptor: DPL0 */
diff --git a/arch/x86/core/ia32/excstub.S b/arch/x86/core/ia32/excstub.S
index 9c5f3f03191..6c0a13a37cd 100644
--- a/arch/x86/core/ia32/excstub.S
+++ b/arch/x86/core/ia32/excstub.S
@@ -161,12 +161,12 @@ SECTION_FUNC(PINNED_TEXT, _exception_enter)
/* ESP is still pointing to the ESF at this point */
- testl $0x200, __z_arch_esf_t_eflags_OFFSET(%esp)
+ testl $0x200, __struct_arch_esf_eflags_OFFSET(%esp)
je allDone
sti
allDone:
- pushl %esp /* push z_arch_esf_t * parameter */
+ pushl %esp /* push struct_arch_esf * parameter */
call *%ecx /* call exception handler */
addl $0x4, %esp
diff --git a/arch/x86/core/ia32/fatal.c b/arch/x86/core/ia32/fatal.c
index 597f21a01ad..3ae8a6b67da 100644
--- a/arch/x86/core/ia32/fatal.c
+++ b/arch/x86/core/ia32/fatal.c
@@ -27,10 +27,10 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
unsigned int z_x86_exception_vector;
#endif
-__weak void z_debug_fatal_hook(const z_arch_esf_t *esf) { ARG_UNUSED(esf); }
+__weak void z_debug_fatal_hook(const struct arch_esf *esf) { ARG_UNUSED(esf); }
__pinned_func
-void z_x86_spurious_irq(const z_arch_esf_t *esf)
+void z_x86_spurious_irq(const struct arch_esf *esf)
{
int vector = z_irq_controller_isr_vector_get();
@@ -46,7 +46,7 @@ void arch_syscall_oops(void *ssf)
{
struct _x86_syscall_stack_frame *ssf_ptr =
(struct _x86_syscall_stack_frame *)ssf;
- z_arch_esf_t oops = {
+ struct arch_esf oops = {
.eip = ssf_ptr->eip,
.cs = ssf_ptr->cs,
.eflags = ssf_ptr->eflags
@@ -66,7 +66,7 @@ NANO_CPU_INT_REGISTER(_kernel_oops_handler, NANO_SOFT_IRQ,
#if CONFIG_EXCEPTION_DEBUG
__pinned_func
FUNC_NORETURN static void generic_exc_handle(unsigned int vector,
- const z_arch_esf_t *pEsf)
+ const struct arch_esf *pEsf)
{
#ifdef CONFIG_DEBUG_COREDUMP
z_x86_exception_vector = vector;
@@ -77,7 +77,7 @@ FUNC_NORETURN static void generic_exc_handle(unsigned int vector,
#define _EXC_FUNC(vector) \
__pinned_func \
-FUNC_NORETURN __used static void handle_exc_##vector(const z_arch_esf_t *pEsf) \
+FUNC_NORETURN __used static void handle_exc_##vector(const struct arch_esf *pEsf) \
{ \
generic_exc_handle(vector, pEsf); \
}
@@ -120,7 +120,7 @@ EXC_FUNC_NOCODE(IV_MACHINE_CHECK, 0);
_EXCEPTION_CONNECT_CODE(z_x86_page_fault_handler, IV_PAGE_FAULT, 0);
#ifdef CONFIG_X86_ENABLE_TSS
-static __pinned_noinit volatile z_arch_esf_t _df_esf;
+static __pinned_noinit volatile struct arch_esf _df_esf;
/* Very tiny stack; just enough for the bogus error code pushed by the CPU
* and a frame pointer push by the compiler. All df_handler_top does is
@@ -156,7 +156,7 @@ struct task_state_segment _df_tss = {
.ss = DATA_SEG,
.eip = (uint32_t)df_handler_top,
.cr3 = (uint32_t)
- Z_MEM_PHYS_ADDR(POINTER_TO_UINT(&z_x86_kernel_ptables[0]))
+ K_MEM_PHYS_ADDR(POINTER_TO_UINT(&z_x86_kernel_ptables[0]))
};
__pinned_func
@@ -182,14 +182,14 @@ static __used void df_handler_bottom(void)
reason = K_ERR_STACK_CHK_FAIL;
}
#endif
- z_x86_fatal_error(reason, (z_arch_esf_t *)&_df_esf);
+ z_x86_fatal_error(reason, (struct arch_esf *)&_df_esf);
}
__pinned_func
static FUNC_NORETURN __used void df_handler_top(void)
{
/* State of the system when the double-fault forced a task switch
- * will be in _main_tss. Set up a z_arch_esf_t and copy system state into
+ * will be in _main_tss. Set up a struct arch_esf and copy system state into
* it
*/
_df_esf.esp = _main_tss.esp;
@@ -213,7 +213,7 @@ static FUNC_NORETURN __used void df_handler_top(void)
_main_tss.es = DATA_SEG;
_main_tss.ss = DATA_SEG;
_main_tss.eip = (uint32_t)df_handler_bottom;
- _main_tss.cr3 = z_mem_phys_addr(z_x86_kernel_ptables);
+ _main_tss.cr3 = k_mem_phys_addr(z_x86_kernel_ptables);
_main_tss.eflags = 0U;
/* NT bit is set in EFLAGS so we will task switch back to _main_tss
diff --git a/arch/x86/core/ia32/float.c b/arch/x86/core/ia32/float.c
index a33a40a0a78..c89bf7accd5 100644
--- a/arch/x86/core/ia32/float.c
+++ b/arch/x86/core/ia32/float.c
@@ -302,7 +302,7 @@ int z_float_disable(struct k_thread *thread)
* instruction is executed while CR0[TS]=1. The handler then enables the
* current thread to use all supported floating point registers.
*/
-void _FpNotAvailableExcHandler(z_arch_esf_t *pEsf)
+void _FpNotAvailableExcHandler(struct arch_esf *pEsf)
{
ARG_UNUSED(pEsf);
diff --git a/arch/x86/core/ia32/gdbstub.c b/arch/x86/core/ia32/gdbstub.c
index 692ea78baf4..252f15d79ff 100644
--- a/arch/x86/core/ia32/gdbstub.c
+++ b/arch/x86/core/ia32/gdbstub.c
@@ -78,7 +78,7 @@ static unsigned int get_exception(unsigned int vector)
/*
* Debug exception handler.
*/
-static void z_gdb_interrupt(unsigned int vector, z_arch_esf_t *esf)
+static void z_gdb_interrupt(unsigned int vector, struct arch_esf *esf)
{
debug_ctx.exception = get_exception(vector);
@@ -212,7 +212,7 @@ size_t arch_gdb_reg_writeone(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen,
return ret;
}
-static __used void z_gdb_debug_isr(z_arch_esf_t *esf)
+static __used void z_gdb_debug_isr(struct arch_esf *esf)
{
#ifdef CONFIG_GDBSTUB_TRACE
printk("gdbstub:enter %s (IV_DEBUG)\n", __func__);
@@ -225,7 +225,7 @@ static __used void z_gdb_debug_isr(z_arch_esf_t *esf)
#endif
}
-static __used void z_gdb_break_isr(z_arch_esf_t *esf)
+static __used void z_gdb_break_isr(struct arch_esf *esf)
{
#ifdef CONFIG_GDBSTUB_TRACE
printk("gdbstub:enter %s (IV_BREAKPOINT)\n", __func__);
diff --git a/arch/x86/core/ia32/intstub.S b/arch/x86/core/ia32/intstub.S
index 0dc08c67c24..dd454670dd8 100644
--- a/arch/x86/core/ia32/intstub.S
+++ b/arch/x86/core/ia32/intstub.S
@@ -33,7 +33,7 @@
GTEXT(arch_swap)
#ifdef CONFIG_PM
- GTEXT(z_pm_save_idle_exit)
+ GTEXT(pm_system_resume)
#endif
/**
@@ -112,11 +112,12 @@ SECTION_FUNC(PINNED_TEXT, _interrupt_enter)
* EAX = isr_param, EDX = isr
*/
- /* Push EDI as we will use it for scratch space.
+ /* Push EBP as we will use it for scratch space.
+ * Also it helps in stack unwinding
* Rest of the callee-saved regs get saved by invocation of C
* functions (isr handler, arch_swap(), etc)
*/
- pushl %edi
+ pushl %ebp
/* load %ecx with &_kernel */
@@ -131,17 +132,17 @@ SECTION_FUNC(PINNED_TEXT, _interrupt_enter)
jne alreadyOnIntStack
/*
- * switch to base of the interrupt stack: save esp in edi, then load
+ * switch to base of the interrupt stack: save esp in ebp, then load
* irq_stack pointer
*/
- movl %esp, %edi
+ movl %esp, %ebp
movl _kernel_offset_to_irq_stack(%ecx), %esp
/* save thread's stack pointer onto base of interrupt stack */
- pushl %edi /* Save stack pointer */
+ pushl %ebp /* Save stack pointer */
#ifdef CONFIG_PM
cmpl $0, _kernel_offset_to_idle(%ecx)
@@ -265,7 +266,7 @@ alreadyOnIntStack:
#endif /* CONFIG_LAZY_FPU_SHARING */
/* Restore volatile registers and return to the interrupted thread */
- popl %edi
+ popl %ebp
popl %ecx
popl %edx
popl %eax
@@ -298,7 +299,7 @@ noReschedule:
*/
nestedInterrupt:
- popl %edi
+ popl %ebp
popl %ecx /* pop volatile registers in reverse order */
popl %edx
popl %eax
@@ -314,13 +315,13 @@ handle_idle:
movl $0, _kernel_offset_to_idle(%ecx)
/*
- * Beware that a timer driver's z_pm_save_idle_exit() implementation might
+ * Beware that a timer driver's pm_system_resume() implementation might
* expect that interrupts are disabled when invoked. This ensures that
* the calculation and programming of the device for the next timer
* deadline is not interrupted.
*/
- call z_pm_save_idle_exit
+ call pm_system_resume
popl %edx
popl %eax
jmp alreadyOnIntStack
diff --git a/arch/x86/core/ia32/irq_offload.c b/arch/x86/core/ia32/irq_offload.c
index 71e8363ece3..c7d68b34370 100644
--- a/arch/x86/core/ia32/irq_offload.c
+++ b/arch/x86/core/ia32/irq_offload.c
@@ -47,3 +47,7 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
irq_unlock(key);
}
+
+void arch_irq_offload_init(void)
+{
+}
diff --git a/arch/x86/core/ia32/userspace.S b/arch/x86/core/ia32/userspace.S
index bf21a0cc1a2..a4ad4c69fb8 100644
--- a/arch/x86/core/ia32/userspace.S
+++ b/arch/x86/core/ia32/userspace.S
@@ -51,7 +51,7 @@ SECTION_FUNC(PINNED_TEXT, z_x86_trampoline_to_kernel)
pushl %edi
/* Switch to kernel page table */
- movl $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %esi
+ movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %esi
movl %esi, %cr3
/* Save old trampoline stack pointer in %edi */
@@ -156,7 +156,7 @@ SECTION_FUNC(TEXT, z_x86_syscall_entry_stub)
pushl %edi
/* Switch to kernel page table */
- movl $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %esi
+ movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %esi
movl %esi, %cr3
/* Save old trampoline stack pointer in %edi */
diff --git a/arch/x86/core/intel64/coredump.c b/arch/x86/core/intel64/coredump.c
index f1c1a15eaff..65a9306ca07 100644
--- a/arch/x86/core/intel64/coredump.c
+++ b/arch/x86/core/intel64/coredump.c
@@ -46,7 +46,7 @@ struct x86_64_arch_block {
*/
static struct x86_64_arch_block arch_blk;
-void arch_coredump_info_dump(const z_arch_esf_t *esf)
+void arch_coredump_info_dump(const struct arch_esf *esf)
{
struct coredump_arch_hdr_t hdr = {
.id = COREDUMP_ARCH_HDR_ID,
diff --git a/arch/x86/core/intel64/fatal.c b/arch/x86/core/intel64/fatal.c
index 9dd97614dc1..9eed95bfaa3 100644
--- a/arch/x86/core/intel64/fatal.c
+++ b/arch/x86/core/intel64/fatal.c
@@ -13,14 +13,14 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
/* NMI handlers should override weak implementation
* return true if NMI is handled, false otherwise
*/
-__weak bool z_x86_do_kernel_nmi(const z_arch_esf_t *esf)
+__weak bool z_x86_do_kernel_nmi(const struct arch_esf *esf)
{
ARG_UNUSED(esf);
return false;
}
-void z_x86_exception(z_arch_esf_t *esf)
+void z_x86_exception(struct arch_esf *esf)
{
switch (esf->vector) {
case Z_X86_OOPS_VECTOR:
diff --git a/arch/x86/core/intel64/irq.c b/arch/x86/core/intel64/irq.c
index f8e251b8046..51410646dba 100644
--- a/arch/x86/core/intel64/irq.c
+++ b/arch/x86/core/intel64/irq.c
@@ -100,8 +100,8 @@ void z_x86_irq_connect_on_vector(unsigned int irq,
*/
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
- void (*func)(const void *arg),
- const void *arg, uint32_t flags)
+ void (*routine)(const void *parameter),
+ const void *parameter, uint32_t flags)
{
uint32_t key;
int vector;
@@ -124,7 +124,7 @@ int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
#endif /* CONFIG_INTEL_VTD_ICTL */
z_irq_controller_irq_config(vector, irq, flags);
- z_x86_irq_connect_on_vector(irq, vector, func, arg);
+ z_x86_irq_connect_on_vector(irq, vector, routine, parameter);
}
irq_unlock(key);
diff --git a/arch/x86/core/intel64/irq_offload.c b/arch/x86/core/intel64/irq_offload.c
index 0146321f7d9..03ae8a9439b 100644
--- a/arch/x86/core/intel64/irq_offload.c
+++ b/arch/x86/core/intel64/irq_offload.c
@@ -9,6 +9,7 @@
*/
#include
+#include
#include
#include
@@ -17,12 +18,33 @@
extern void (*x86_irq_funcs[NR_IRQ_VECTORS])(const void *arg);
extern const void *x86_irq_args[NR_IRQ_VECTORS];
+static void (*irq_offload_funcs[CONFIG_MP_NUM_CPUS])(const void *arg);
+static const void *irq_offload_args[CONFIG_MP_NUM_CPUS];
+
+static void dispatcher(const void *arg)
+{
+ uint8_t cpu_id = _current_cpu->id;
+
+ if (irq_offload_funcs[cpu_id] != NULL) {
+ irq_offload_funcs[cpu_id](irq_offload_args[cpu_id]);
+ }
+}
void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
{
- x86_irq_funcs[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = routine;
- x86_irq_args[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = parameter;
+ int key = arch_irq_lock();
+ uint8_t cpu_id = _current_cpu->id;
+
+ irq_offload_funcs[cpu_id] = routine;
+ irq_offload_args[cpu_id] = parameter;
+
__asm__ volatile("int %0" : : "i" (CONFIG_IRQ_OFFLOAD_VECTOR)
: "memory");
- x86_irq_funcs[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = NULL;
+
+ arch_irq_unlock(key);
+}
+
+void arch_irq_offload_init(void)
+{
+ x86_irq_funcs[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = dispatcher;
}
diff --git a/arch/x86/core/intel64/locore.S b/arch/x86/core/intel64/locore.S
index 68f89c90398..108d9f15d37 100644
--- a/arch/x86/core/intel64/locore.S
+++ b/arch/x86/core/intel64/locore.S
@@ -44,7 +44,7 @@
/* Page tables created at build time by gen_mmu.py
* NOTE: Presumes phys=virt
*/
- movl $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax
+ movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax
movl %eax, %cr3
set_efer
@@ -66,7 +66,7 @@
clts
/* NOTE: Presumes phys=virt */
- movq $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax
+ movq $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax
movq %rax, %cr3
set_efer
diff --git a/arch/x86/core/intel64/smp.c b/arch/x86/core/intel64/smp.c
index a73ba9c8f38..1e0aeb3e443 100644
--- a/arch/x86/core/intel64/smp.c
+++ b/arch/x86/core/intel64/smp.c
@@ -34,9 +34,7 @@ int arch_smp_init(void)
* it is not clear exactly how/where/why to abstract this, as it
* assumes the use of a local APIC (but there's no other mechanism).
*/
-void arch_sched_ipi(void)
+void arch_sched_broadcast_ipi(void)
{
z_loapic_ipi(0, LOAPIC_ICR_IPI_OTHERS, CONFIG_SCHED_IPI_VECTOR);
}
-
-SYS_INIT(arch_smp_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
diff --git a/arch/x86/core/intel64/userspace.S b/arch/x86/core/intel64/userspace.S
index ab09381c7af..d3610c300cb 100644
--- a/arch/x86/core/intel64/userspace.S
+++ b/arch/x86/core/intel64/userspace.S
@@ -87,7 +87,7 @@ z_x86_syscall_entry_stub:
pushq %rax
/* NOTE: Presumes phys=virt */
- movq $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax
+ movq $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax
movq %rax, %cr3
popq %rax
movq $0, -8(%rsp) /* Delete stashed RAX data */
diff --git a/arch/x86/core/legacy_bios.c b/arch/x86/core/legacy_bios.c
index 6a7159a9b1c..6245a6175c5 100644
--- a/arch/x86/core/legacy_bios.c
+++ b/arch/x86/core/legacy_bios.c
@@ -17,18 +17,18 @@ static uintptr_t bios_search_rsdp_buff(uintptr_t search_phy_add, uint32_t search
{
uint64_t *search_buff;
- z_phys_map((uint8_t **)&search_buff, search_phy_add, search_length, 0);
+ k_mem_map_phys_bare((uint8_t **)&search_buff, search_phy_add, search_length, 0);
if (!search_buff) {
return 0;
}
for (int i = 0; i < search_length / 8u; i++) {
if (search_buff[i] == RSDP_SIGNATURE) {
- z_phys_unmap((uint8_t *)search_buff, search_length);
+ k_mem_unmap_phys_bare((uint8_t *)search_buff, search_length);
return (search_phy_add + (i * 8u));
}
}
- z_phys_unmap((uint8_t *)search_buff, search_length);
+ k_mem_unmap_phys_bare((uint8_t *)search_buff, search_length);
return 0;
}
@@ -38,10 +38,10 @@ void *bios_acpi_rsdp_get(void)
uint8_t *bios_ext_data, *zero_page_base;
uintptr_t search_phy_add, rsdp_phy_add;
- z_phys_map(&zero_page_base, 0, DATA_SIZE_K(4u), 0);
+ k_mem_map_phys_bare(&zero_page_base, 0, DATA_SIZE_K(4u), 0);
bios_ext_data = EBDA_ADD + zero_page_base;
search_phy_add = (uintptr_t)((*(uint16_t *)bios_ext_data) << 4u);
- z_phys_unmap(zero_page_base, DATA_SIZE_K(4u));
+ k_mem_unmap_phys_bare(zero_page_base, DATA_SIZE_K(4u));
if ((search_phy_add >= BIOS_EXT_DATA_LOW) && (search_phy_add < BIOS_EXT_DATA_HIGH)) {
rsdp_phy_add = bios_search_rsdp_buff(search_phy_add, DATA_SIZE_K(1u));
diff --git a/arch/x86/core/multiboot.c b/arch/x86/core/multiboot.c
index 2dab56128d1..b6112b75f82 100644
--- a/arch/x86/core/multiboot.c
+++ b/arch/x86/core/multiboot.c
@@ -11,6 +11,15 @@
struct multiboot_info multiboot_info;
+#ifdef CONFIG_DYNAMIC_BOOTARGS
+__pinned_noinit char multiboot_cmdline[CONFIG_BOOTARGS_ARGS_BUFFER_SIZE];
+
+const char *get_bootargs(void)
+{
+ return multiboot_cmdline;
+}
+#endif /* CONFIG_DYNAMIC_BOOTARGS */
+
/*
* called very early in the boot process to fetch data out of the multiboot
* info struct. we need to grab the relevant data before any dynamic memory
@@ -41,8 +50,8 @@ void z_multiboot_init(struct multiboot_info *info_pa)
*/
info = info_pa;
#else
- z_phys_map((uint8_t **)&info, POINTER_TO_UINT(info_pa),
- sizeof(*info_pa), K_MEM_CACHE_NONE);
+ k_mem_map_phys_bare((uint8_t **)&info, POINTER_TO_UINT(info_pa),
+ sizeof(*info_pa), K_MEM_CACHE_NONE);
#endif /* CONFIG_ARCH_MAPS_ALL_RAM */
if (info == NULL) {
@@ -70,8 +79,8 @@ void z_multiboot_init(struct multiboot_info *info_pa)
#else
uint8_t *address_va;
- z_phys_map(&address_va, info->mmap_addr, info->mmap_length,
- K_MEM_CACHE_NONE);
+ k_mem_map_phys_bare(&address_va, info->mmap_addr, info->mmap_length,
+ K_MEM_CACHE_NONE);
address = POINTER_TO_UINT(address_va);
#endif /* CONFIG_ARCH_MAPS_ALL_RAM */
diff --git a/arch/x86/core/offsets/ia32_offsets.c b/arch/x86/core/offsets/ia32_offsets.c
index 61a7f25bb2a..2dfbb5c38ef 100644
--- a/arch/x86/core/offsets/ia32_offsets.c
+++ b/arch/x86/core/offsets/ia32_offsets.c
@@ -52,7 +52,6 @@ GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF,
GEN_OFFSET_SYM(_callee_saved_t, esp);
-/* z_arch_esf_t structure member offsets */
-
-GEN_OFFSET_SYM(z_arch_esf_t, eflags);
+/* struct arch_esf structure member offsets */
+GEN_OFFSET_STRUCT(arch_esf, eflags);
#endif /* _X86_OFFSETS_INC_ */
diff --git a/arch/x86/core/offsets/offsets.c b/arch/x86/core/offsets/offsets.c
index 53b432e74cb..2e6b323fa7c 100644
--- a/arch/x86/core/offsets/offsets.c
+++ b/arch/x86/core/offsets/offsets.c
@@ -14,9 +14,18 @@
#include "ia32_offsets.c"
#endif
+#ifdef CONFIG_MULTIBOOT_INFO
+#include
+#endif
+
GEN_OFFSET_SYM(x86_boot_arg_t, boot_type);
GEN_OFFSET_SYM(x86_boot_arg_t, arg);
GEN_OFFSET_SYM(_thread_arch_t, flags);
+#ifdef CONFIG_MULTIBOOT_INFO
+GEN_OFFSET_SYM(multiboot_info_t, flags);
+GEN_OFFSET_SYM(multiboot_info_t, cmdline);
+#endif
+
GEN_ABS_SYM_END
diff --git a/arch/x86/core/prep_c.c b/arch/x86/core/prep_c.c
index d9b62b0b711..96c76f6af26 100644
--- a/arch/x86/core/prep_c.c
+++ b/arch/x86/core/prep_c.c
@@ -9,6 +9,8 @@
#include
#include
#include
+#include
+#include
extern FUNC_NORETURN void z_cstart(void);
extern void x86_64_irq_init(void);
@@ -17,6 +19,10 @@ extern void x86_64_irq_init(void);
__pinned_data x86_boot_arg_t x86_cpu_boot_arg;
#endif
+
+
+extern int spec_ctrl_init(void);
+
/* Early global initialization functions, C domain. This runs only on the first
* CPU for SMP systems.
*/
@@ -25,6 +31,9 @@ FUNC_NORETURN void z_prep_c(void *arg)
{
x86_boot_arg_t *cpu_arg = arg;
+#if defined(CONFIG_SOC_PREP_HOOK)
+ soc_prep_hook();
+#endif
_kernel.cpus[0].nested = 0;
#ifdef CONFIG_MMU
@@ -72,9 +81,11 @@ FUNC_NORETURN void z_prep_c(void *arg)
z_x86_set_stack_guard(z_interrupt_stacks[i]);
}
#endif
-
-#if defined(CONFIG_SMP)
- arch_smp_init();
+#if CONFIG_ARCH_CACHE
+ arch_cache_init();
+#endif
+#if defined(CONFIG_X86_DISABLE_SSBD) || defined(CONFIG_X86_ENABLE_EXTENDED_IBRS)
+ spec_ctrl_init();
#endif
z_cstart();
diff --git a/arch/x86/core/spec_ctrl.c b/arch/x86/core/spec_ctrl.c
index 50daa74bd1a..f5358e596ee 100644
--- a/arch/x86/core/spec_ctrl.c
+++ b/arch/x86/core/spec_ctrl.c
@@ -17,7 +17,7 @@
*/
#if defined(CONFIG_X86_DISABLE_SSBD) || defined(CONFIG_X86_ENABLE_EXTENDED_IBRS)
-static int spec_ctrl_init(void)
+int spec_ctrl_init(void)
{
uint32_t enable_bits = 0U;
@@ -43,5 +43,4 @@ static int spec_ctrl_init(void)
return 0;
}
-SYS_INIT(spec_ctrl_init, PRE_KERNEL_1, 0);
#endif /* CONFIG_X86_DISABLE_SSBD || CONFIG_X86_ENABLE_EXTENDED_IBRS */
diff --git a/arch/x86/core/userspace.c b/arch/x86/core/userspace.c
index dbe40b2bda0..436bc18edb7 100644
--- a/arch/x86/core/userspace.c
+++ b/arch/x86/core/userspace.c
@@ -4,6 +4,8 @@
* SPDX-License-Identifier: Apache-2.0
*/
+#include
+
#include
#include
#include
@@ -70,15 +72,18 @@ void *z_x86_userspace_prepare_thread(struct k_thread *thread)
{
void *initial_entry;
- struct z_x86_thread_stack_header *header =
+ if (z_stack_is_user_capable(thread->stack_obj)) {
+ struct z_x86_thread_stack_header *header =
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
- (struct z_x86_thread_stack_header *)thread->stack_info.mapped.addr;
+ (struct z_x86_thread_stack_header *)thread->stack_info.mapped.addr;
#else
- (struct z_x86_thread_stack_header *)thread->stack_obj;
+ (struct z_x86_thread_stack_header *)thread->stack_obj;
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
- thread->arch.psp =
- header->privilege_stack + sizeof(header->privilege_stack);
+ thread->arch.psp = header->privilege_stack + sizeof(header->privilege_stack);
+ } else {
+ thread->arch.psp = NULL;
+ }
#ifndef CONFIG_X86_COMMON_PAGE_TABLE
/* Important this gets cleared, so that arch_mem_domain_* APIs
@@ -90,6 +95,28 @@ void *z_x86_userspace_prepare_thread(struct k_thread *thread)
if ((thread->base.user_options & K_USER) != 0U) {
initial_entry = arch_user_mode_enter;
+
+#ifdef CONFIG_INIT_STACKS
+ /* setup_thread_stack() does not initialize the architecture specific
+ * privileged stack. So we need to do it manually here as this function
+ * is called by arch_new_thread() via z_setup_new_thread() after
+ * setup_thread_stack() but before thread starts running.
+ *
+ * Note that only user threads have privileged stacks and kernel
+ * only threads do not.
+ *
+ * Also note that this needs to be done before calling
+ * z_x86_userspace_enter() where it clears the user stack.
+ * That function requires using the privileged stack for
+ * code execution so we cannot clear that at the same time.
+ */
+ struct z_x86_thread_stack_header *hdr_stack_obj =
+ (struct z_x86_thread_stack_header *)thread->stack_obj;
+
+ (void)memset(&hdr_stack_obj->privilege_stack[0], 0xaa,
+ sizeof(hdr_stack_obj->privilege_stack));
+#endif
+
} else {
initial_entry = z_thread_entry;
}
@@ -158,3 +185,19 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
_current->stack_info.start);
CODE_UNREACHABLE;
}
+
+int arch_thread_priv_stack_space_get(const struct k_thread *thread, size_t *stack_size,
+ size_t *unused_ptr)
+{
+ struct z_x86_thread_stack_header *hdr_stack_obj;
+
+ if ((thread->base.user_options & K_USER) != K_USER) {
+ return -EINVAL;
+ }
+
+ hdr_stack_obj = (struct z_x86_thread_stack_header *)thread->stack_obj;
+
+ return z_stack_space_get(&hdr_stack_obj->privilege_stack[0],
+ sizeof(hdr_stack_obj->privilege_stack),
+ unused_ptr);
+}
diff --git a/arch/x86/core/x86_mmu.c b/arch/x86/core/x86_mmu.c
index 7bfa2088a41..566dd2cdfd4 100644
--- a/arch/x86/core/x86_mmu.c
+++ b/arch/x86/core/x86_mmu.c
@@ -313,7 +313,7 @@ static inline uintptr_t get_entry_phys(pentry_t entry, int level)
__pinned_func
static inline pentry_t *next_table(pentry_t entry, int level)
{
- return z_mem_virt_addr(get_entry_phys(entry, level));
+ return k_mem_virt_addr(get_entry_phys(entry, level));
}
/* Number of table entries at this level */
@@ -416,12 +416,12 @@ void z_x86_tlb_ipi(const void *arg)
* if KPTI is turned on
*/
ptables_phys = z_x86_cr3_get();
- __ASSERT(ptables_phys == z_mem_phys_addr(&z_x86_kernel_ptables), "");
+ __ASSERT(ptables_phys == k_mem_phys_addr(&z_x86_kernel_ptables), "");
#else
/* We might have been moved to another memory domain, so always invoke
* z_x86_thread_page_tables_get() instead of using current CR3 value.
*/
- ptables_phys = z_mem_phys_addr(z_x86_thread_page_tables_get(_current));
+ ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(_current));
#endif
/*
* In the future, we can consider making this smarter, such as
@@ -593,7 +593,7 @@ static void print_entries(pentry_t entries_array[], uint8_t *base, int level,
if (phys == virt) {
/* Identity mappings */
COLOR(YELLOW);
- } else if (phys + Z_MEM_VM_OFFSET == virt) {
+ } else if (phys + K_MEM_VIRT_OFFSET == virt) {
/* Permanent RAM mappings */
COLOR(GREEN);
} else {
@@ -661,7 +661,7 @@ static void dump_ptables(pentry_t *table, uint8_t *base, int level)
#endif
printk("%s at %p (0x%" PRIxPTR "): ", info->name, table,
- z_mem_phys_addr(table));
+ k_mem_phys_addr(table));
if (level == 0) {
printk("entire address space\n");
} else {
@@ -826,7 +826,7 @@ static inline pentry_t pte_finalize_value(pentry_t val, bool user_table,
{
#ifdef CONFIG_X86_KPTI
static const uintptr_t shared_phys_addr =
- Z_MEM_PHYS_ADDR(POINTER_TO_UINT(&z_shared_kernel_page_start));
+ K_MEM_PHYS_ADDR(POINTER_TO_UINT(&z_shared_kernel_page_start));
if (user_table && (val & MMU_US) == 0 && (val & MMU_P) != 0 &&
get_entry_phys(val, level) != shared_phys_addr) {
@@ -1307,7 +1307,7 @@ void arch_mem_unmap(void *addr, size_t size)
ARG_UNUSED(ret);
}
-#ifdef Z_VM_KERNEL
+#ifdef K_MEM_IS_VM_KERNEL
__boot_func
static void identity_map_remove(uint32_t level)
{
@@ -1346,7 +1346,7 @@ static void identity_map_remove(uint32_t level)
__boot_func
void z_x86_mmu_init(void)
{
-#ifdef Z_VM_KERNEL
+#ifdef K_MEM_IS_VM_KERNEL
/* We booted with physical address space being identity mapped.
* As we are now executing in virtual address space,
* the identity map is no longer needed. So remove them.
@@ -1720,7 +1720,7 @@ static int copy_page_table(pentry_t *dst, pentry_t *src, int level)
* cast needed for PAE case where sizeof(void *) and
* sizeof(pentry_t) are not the same.
*/
- dst[i] = ((pentry_t)z_mem_phys_addr(child_dst) |
+ dst[i] = ((pentry_t)k_mem_phys_addr(child_dst) |
INT_FLAGS);
ret = copy_page_table(child_dst,
@@ -1924,11 +1924,11 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
* z_x86_current_stack_perms()
*/
if (is_migration) {
- old_ptables = z_mem_virt_addr(thread->arch.ptables);
+ old_ptables = k_mem_virt_addr(thread->arch.ptables);
set_stack_perms(thread, domain->arch.ptables);
}
- thread->arch.ptables = z_mem_phys_addr(domain->arch.ptables);
+ thread->arch.ptables = k_mem_phys_addr(domain->arch.ptables);
LOG_DBG("set thread %p page tables to 0x%" PRIxPTR, thread,
thread->arch.ptables);
@@ -2004,11 +2004,12 @@ static void mark_addr_page_reserved(uintptr_t addr, size_t len)
uintptr_t end = ROUND_UP(addr + len, CONFIG_MMU_PAGE_SIZE);
for (; pos < end; pos += CONFIG_MMU_PAGE_SIZE) {
- if (!z_is_page_frame(pos)) {
+ if (!k_mem_is_page_frame(pos)) {
continue;
}
- z_page_frame_set(z_phys_to_page_frame(pos), Z_PAGE_FRAME_RESERVED);
+ k_mem_page_frame_set(k_mem_phys_to_page_frame(pos),
+ K_MEM_PAGE_FRAME_RESERVED);
}
}
@@ -2112,7 +2113,7 @@ void arch_mem_page_in(void *addr, uintptr_t phys)
__pinned_func
void arch_mem_scratch(uintptr_t phys)
{
- page_map_set(z_x86_page_tables_get(), Z_SCRATCH_PAGE,
+ page_map_set(z_x86_page_tables_get(), K_MEM_SCRATCH_PAGE,
phys | MMU_P | MMU_RW | MMU_XD, NULL, MASK_ALL,
OPTION_FLUSH);
}
@@ -2230,7 +2231,7 @@ bool z_x86_kpti_is_access_ok(void *addr, pentry_t *ptables)
/* Might as well also check if it's un-mapped, normally we don't
* fetch the PTE from the page tables until we are inside
- * z_page_fault() and call arch_page_fault_status_get()
+ * k_mem_page_fault() and call arch_page_fault_status_get()
*/
if (level != PTE_LEVEL || pte == 0 || is_flipped_pte(pte)) {
return false;
diff --git a/arch/x86/ia32.cmake b/arch/x86/ia32.cmake
index ee2c7a7f01e..5bf5018a32a 100644
--- a/arch/x86/ia32.cmake
+++ b/arch/x86/ia32.cmake
@@ -126,7 +126,7 @@ add_bin_file_to_the_next_link(gen_idt_output irq_int_vector_map)
add_bin_file_to_the_next_link(gen_idt_output irq_vectors_alloc)
if(CONFIG_GDT_DYNAMIC)
- # Use gen_gdt.py and objcopy to generate gdt.o from from the elf
+ # Use gen_gdt.py and objcopy to generate gdt.o from the elf
# file ${ZEPHYR_PREBUILT_EXECUTABLE}, creating the temp file gdt.bin along the
# way.
#
diff --git a/arch/x86/include/ia32/exception.h b/arch/x86/include/ia32/exception.h
index 27119709c2a..1b0ce9ee3b5 100644
--- a/arch/x86/include/ia32/exception.h
+++ b/arch/x86/include/ia32/exception.h
@@ -62,7 +62,7 @@
* Assign an exception handler to a particular vector in the IDT.
*
* @param handler A handler function of the prototype
- * void handler(const z_arch_esf_t *esf)
+ * void handler(const struct arch_esf *esf)
* @param vector Vector index in the IDT
*/
#define _EXCEPTION_CONNECT_NOCODE(handler, vector, dpl) \
@@ -75,7 +75,7 @@
* The error code will be accessible in esf->errorCode
*
* @param handler A handler function of the prototype
- * void handler(const z_arch_esf_t *esf)
+ * void handler(const struct arch_esf *esf)
* @param vector Vector index in the IDT
*/
#define _EXCEPTION_CONNECT_CODE(handler, vector, dpl) \
diff --git a/arch/x86/include/ia32/offsets_short_arch.h b/arch/x86/include/ia32/offsets_short_arch.h
index 2033a5585f7..cb5cdb218f0 100644
--- a/arch/x86/include/ia32/offsets_short_arch.h
+++ b/arch/x86/include/ia32/offsets_short_arch.h
@@ -7,7 +7,7 @@
#ifndef ZEPHYR_ARCH_X86_INCLUDE_IA32_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_X86_INCLUDE_IA32_OFFSETS_SHORT_ARCH_H_
-#include
+#include
/* kernel */
diff --git a/arch/x86/include/intel64/kernel_arch_func.h b/arch/x86/include/intel64/kernel_arch_func.h
index a749a9b9af1..abf022fe5fd 100644
--- a/arch/x86/include/intel64/kernel_arch_func.h
+++ b/arch/x86/include/intel64/kernel_arch_func.h
@@ -36,7 +36,7 @@ void x86_sse_init(struct k_thread *thread);
void z_x86_syscall_entry_stub(void);
-bool z_x86_do_kernel_nmi(const z_arch_esf_t *esf);
+bool z_x86_do_kernel_nmi(const struct arch_esf *esf);
#endif /* _ASMLANGUAGE */
diff --git a/arch/x86/include/intel64/offsets_short_arch.h b/arch/x86/include/intel64/offsets_short_arch.h
index 4252ac687db..1ffabc899c2 100644
--- a/arch/x86/include/intel64/offsets_short_arch.h
+++ b/arch/x86/include/intel64/offsets_short_arch.h
@@ -6,7 +6,7 @@
#ifndef ZEPHYR_ARCH_X86_INCLUDE_INTEL64_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_X86_INCLUDE_INTEL64_OFFSETS_SHORT_ARCH_H_
-#include
+#include
#define _thread_offset_to_rsp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_rsp_OFFSET)
diff --git a/arch/x86/include/kernel_arch_func.h b/arch/x86/include/kernel_arch_func.h
index 00b411978ec..9bc7cfe4212 100644
--- a/arch/x86/include/kernel_arch_func.h
+++ b/arch/x86/include/kernel_arch_func.h
@@ -49,16 +49,16 @@ void z_x86_early_serial_init(void);
* interesting info and call z_x86_fatal_error()
*/
FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector,
- const z_arch_esf_t *esf);
+ const struct arch_esf *esf);
/* Called upon unrecoverable error; dump registers and transfer control to
* kernel via z_fatal_error()
*/
FUNC_NORETURN void z_x86_fatal_error(unsigned int reason,
- const z_arch_esf_t *esf);
+ const struct arch_esf *esf);
/* Common handling for page fault exceptions */
-void z_x86_page_fault_handler(z_arch_esf_t *esf);
+void z_x86_page_fault_handler(struct arch_esf *esf);
#ifdef CONFIG_THREAD_STACK_INFO
/**
@@ -90,7 +90,7 @@ void *z_x86_userspace_prepare_thread(struct k_thread *thread);
#endif /* CONFIG_USERSPACE */
-void z_x86_do_kernel_oops(const z_arch_esf_t *esf);
+void z_x86_do_kernel_oops(const struct arch_esf *esf);
/*
* Find a free IRQ vector at the specified priority, or return -1 if none left.
diff --git a/arch/x86/include/x86_mmu.h b/arch/x86/include/x86_mmu.h
index 31c8526cb7a..ed6bb59b37c 100644
--- a/arch/x86/include/x86_mmu.h
+++ b/arch/x86/include/x86_mmu.h
@@ -182,7 +182,7 @@ static inline uintptr_t z_x86_cr3_get(void)
/* Return the virtual address of the page tables installed in this CPU in CR3 */
static inline pentry_t *z_x86_page_tables_get(void)
{
- return z_mem_virt_addr(z_x86_cr3_get());
+ return k_mem_virt_addr(z_x86_cr3_get());
}
/* Return cr2 value, which contains the page fault linear address.
@@ -215,7 +215,7 @@ static inline pentry_t *z_x86_thread_page_tables_get(struct k_thread *thread)
* the kernel's page tables and not the page tables associated
* with their memory domain.
*/
- return z_mem_virt_addr(thread->arch.ptables);
+ return k_mem_virt_addr(thread->arch.ptables);
}
#else
ARG_UNUSED(thread);
diff --git a/arch/x86/zefi/efi.h b/arch/x86/zefi/efi.h
index 9be17c53e14..d0773177132 100644
--- a/arch/x86/zefi/efi.h
+++ b/arch/x86/zefi/efi.h
@@ -10,6 +10,7 @@
#ifndef _ASMLANGUAGE
#include
+#include
#define __abi __attribute__((ms_abi))
@@ -618,6 +619,24 @@ struct efi_system_table {
struct efi_configuration_table *ConfigurationTable;
};
+#ifdef CONFIG_DYNAMIC_BOOTARGS
+struct efi_loaded_image_protocol {
+ uint32_t Revision;
+ void *ParentHandle;
+ struct efi_system_table *SystemTable;
+ void *DeviceHandle;
+ void *FilePath;
+ void *Reserved;
+ uint32_t LoadOptionsSize;
+ void *LoadOptions;
+ void *ImageBase;
+ uint64_t ImageSize;
+ enum efi_memory_type ImageCodeType;
+ enum efi_memory_type ImageDataType;
+ efi_unload_image_t Unload;
+};
+#endif /* CONFIG_DYNAMIC_BOOTARGS */
+
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_EFI_H_ */
diff --git a/arch/x86/zefi/zefi.c b/arch/x86/zefi/zefi.c
index 56d0701e79b..66a54b538b1 100644
--- a/arch/x86/zefi/zefi.c
+++ b/arch/x86/zefi/zefi.c
@@ -32,6 +32,14 @@
.Data4 = { 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81 }, \
}
+#define EFI_LOADED_IMAGE_PROTOCOL_GUID \
+ { \
+ .Data1 = 0x5b1b31a1, \
+ .Data2 = 0x9562, \
+ .Data3 = 0x11d2, \
+ .Data4 = { 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b } \
+ }
+
/* The linker places this dummy last in the data memory. We can't use
* traditional linker address symbols because we're relocatable; the
* linker doesn't know what the runtime address will be. The compiler
@@ -135,7 +143,9 @@ static void disable_hpet(void)
*/
uintptr_t __abi efi_entry(void *img_handle, struct efi_system_table *sys_tab)
{
+#ifndef CONFIG_DYNAMIC_BOOTARGS
(void)img_handle;
+#endif /* CONFIG_DYNAMIC_BOOTARGS */
efi = sys_tab;
z_putchar = efi_putchar;
@@ -180,6 +190,30 @@ uintptr_t __abi efi_entry(void *img_handle, struct efi_system_table *sys_tab)
}
}
+#ifdef CONFIG_DYNAMIC_BOOTARGS
+ char *dst_bootargs = (char *)zefi_bootargs;
+ struct efi_loaded_image_protocol *loaded_image;
+ efi_guid_t loaded_image_protocol = EFI_LOADED_IMAGE_PROTOCOL_GUID;
+ efi_status_t loaded_image_status = sys_tab->BootServices->HandleProtocol(
+ img_handle,
+ &loaded_image_protocol,
+ (void **)&loaded_image
+ );
+
+ if (loaded_image_status == EFI_SUCCESS) {
+ uint16_t *src_bootargs = (uint16_t *)loaded_image->LoadOptions;
+
+ while (*src_bootargs != '\0' &&
+ dst_bootargs + 1 <
+ (char *)zefi_bootargs + CONFIG_BOOTARGS_ARGS_BUFFER_SIZE) {
+ *dst_bootargs++ = *src_bootargs++ & 0x7f;
+ }
+ *dst_bootargs = '\0';
+ } else {
+ *dst_bootargs = '\0';
+ }
+#endif /* CONFIG_DYNAMIC_BOOTARGS */
+
unsigned char *code = (void *)zefi_entry;
efi_arg.efi_systab = efi;
diff --git a/arch/x86/zefi/zefi.py b/arch/x86/zefi/zefi.py
index 99c188ecd08..2f9c3482bb4 100755
--- a/arch/x86/zefi/zefi.py
+++ b/arch/x86/zefi/zefi.py
@@ -8,6 +8,9 @@
import argparse
ENTRY_SYM = "__start64"
+BOOTARGS_SYM = "efi_bootargs"
+
+args = None
def verbose(msg):
if args.verbose:
@@ -93,6 +96,9 @@ def build_elf(elf_file, include_dirs):
cf.write("static uintptr_t zefi_entry = 0x%xUL;\n" % (entry_addr))
+ if symtab.get_symbol_by_name(BOOTARGS_SYM):
+ cf.write("static uintptr_t zefi_bootargs = 0x%xUL;\n" % (symtab.get_symbol_by_name(BOOTARGS_SYM)[0].entry.st_value))
+
cf.close()
verbose("Metadata header generated.")
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 82a44f820c5..8722c879e8b 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -103,6 +103,13 @@ config XTENSA_NUM_SPIN_RELAX_NOPS
Specify the number of NOPs in Xtensa specific
arch_spin_relax().
+config XTENSA_BREAK_ON_UNRECOVERABLE_EXCEPTIONS
+ bool "Use BREAK instruction on unrecoverable exceptions"
+ help
+ Use BREAK instruction when unrecoverable exceptions are
+ encountered. This requires a debugger attached to catch
+ the BREAK.
+
menu "Xtensa HiFi Options"
config XTENSA_CPU_HAS_HIFI
@@ -150,6 +157,12 @@ endif # XTENSA_CPU_HAS_HIFI
endmenu # Xtensa HiFi Options
+config XTENSA_INTERRUPT_NONPREEMPTABLE
+ bool "Xtensa exceptions and interrupts cannot be pre-empted"
+ help
+ Allow use of medium and high priority interrupts without
+ pre-empting low priority interrupts and exceptions.
+
if CPU_HAS_MMU
config XTENSA_MMU
diff --git a/arch/xtensa/core/CMakeLists.txt b/arch/xtensa/core/CMakeLists.txt
index 4c2ce8173ca..d03e3641b42 100644
--- a/arch/xtensa/core/CMakeLists.txt
+++ b/arch/xtensa/core/CMakeLists.txt
@@ -12,6 +12,7 @@ zephyr_library_sources(
irq_manage.c
thread.c
vector_handlers.c
+ prep_c.c
)
zephyr_library_sources_ifdef(CONFIG_XTENSA_USE_CORE_CRT1 crt1.S)
@@ -48,7 +49,7 @@ add_subdirectory(startup)
# are the official places where we find core-isa.h. (Also that we
# undefine __XCC_ because that compiler actually trips an error trying
# to build this file to protect against mismatched versions.)
-set(CORE_ISA_DM ${CMAKE_BINARY_DIR}/zephyr/include/generated/core-isa-dM.h)
+set(CORE_ISA_DM ${CMAKE_BINARY_DIR}/zephyr/include/generated/zephyr/core-isa-dM.h)
set(CORE_ISA_IN ${CMAKE_BINARY_DIR}/zephyr/include/generated/core-isa-dM.c)
file(WRITE ${CORE_ISA_IN} "#include \n")
add_custom_command(OUTPUT ${CORE_ISA_DM}
@@ -73,7 +74,7 @@ else()
endif()
# Generates a list of device-specific scratch register choices
-set(ZSR_H ${CMAKE_BINARY_DIR}/zephyr/include/generated/zsr.h)
+set(ZSR_H ${CMAKE_BINARY_DIR}/zephyr/include/generated/zephyr/zsr.h)
add_custom_command(OUTPUT ${ZSR_H} DEPENDS ${CORE_ISA_DM}
COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/gen_zsr.py
$<$:--mmu>
diff --git a/arch/xtensa/core/README_MMU.txt b/arch/xtensa/core/README_MMU.txt
index 499a251cdf2..4332ffdbd43 100644
--- a/arch/xtensa/core/README_MMU.txt
+++ b/arch/xtensa/core/README_MMU.txt
@@ -254,7 +254,7 @@ of access variability. But it also means that the TLB entries end up
being stored twice in the same CPU, wasting transistors that could
presumably store other useful data.
-But it it also important to note that the L1 data cache on Xtensa is
+But it is also important to note that the L1 data cache on Xtensa is
incoherent! The cache being used for refill reflects the last access
on the current CPU only, and not of the underlying memory being
mapped. Page table changes in the data cache of one CPU will be
diff --git a/arch/xtensa/core/coredump.c b/arch/xtensa/core/coredump.c
index a2eec620774..0ee1f8992a6 100644
--- a/arch/xtensa/core/coredump.c
+++ b/arch/xtensa/core/coredump.c
@@ -7,7 +7,7 @@
#include
#include
#include
-#include
+#include
#define ARCH_HDR_VER 1
#define XTENSA_BLOCK_HDR_VER 2
@@ -91,7 +91,7 @@ struct xtensa_arch_block {
*/
static struct xtensa_arch_block arch_blk;
-void arch_coredump_info_dump(const z_arch_esf_t *esf)
+void arch_coredump_info_dump(const struct arch_esf *esf)
{
struct coredump_arch_hdr_t hdr = {
.id = COREDUMP_ARCH_HDR_ID,
diff --git a/arch/xtensa/core/cpu_idle.c b/arch/xtensa/core/cpu_idle.c
index dae79f023ff..48ddc58cb0a 100644
--- a/arch/xtensa/core/cpu_idle.c
+++ b/arch/xtensa/core/cpu_idle.c
@@ -6,7 +6,7 @@
#include
#include
-#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void)
{
sys_trace_idle();
@@ -14,6 +14,7 @@ void arch_cpu_idle(void)
}
#endif
+#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key)
{
sys_trace_idle();
@@ -21,3 +22,4 @@ void arch_cpu_atomic_idle(unsigned int key)
"wsr.ps %0\n\t"
"rsync" :: "a"(key));
}
+#endif
diff --git a/arch/xtensa/core/crt1.S b/arch/xtensa/core/crt1.S
index c616b0889d7..5e3bf9e8a2c 100644
--- a/arch/xtensa/core/crt1.S
+++ b/arch/xtensa/core/crt1.S
@@ -4,7 +4,7 @@
*/
/*
- * Control arrives here at _start from the reset vector or from crt0-app.S.
+ * Control arrives here at _start from the reset vector.
*/
#include
@@ -22,7 +22,7 @@
*/
.global __start
-.type z_cstart, @function
+.type z_prep_c, @function
/* Macros to abstract away ABI differences */
@@ -52,7 +52,7 @@ _start:
/*
* _start is typically NOT at the beginning of the text segment --
* it is always called from either the reset vector (__start) or other
- * code that does equivalent initialization (such as crt0-app.S).
+ * code that does equivalent initialization.
*
* Assumptions on entry to _start:
* - low (level-one) and medium priority interrupts are disabled
@@ -189,6 +189,6 @@ _start:
#endif /* !XCHAL_HAVE_BOOTLOADER */
/* Enter C domain, never returns from here */
- CALL z_cstart
+ CALL z_prep_c
.size _start, . - _start
diff --git a/arch/xtensa/core/debug_helpers_asm.S b/arch/xtensa/core/debug_helpers_asm.S
index 3dacc1a4587..6ed5ced8c61 100644
--- a/arch/xtensa/core/debug_helpers_asm.S
+++ b/arch/xtensa/core/debug_helpers_asm.S
@@ -10,7 +10,7 @@
#include
#include
-#include
+#include
.section .iram1, "ax"
.align 4
diff --git a/arch/xtensa/core/fatal.c b/arch/xtensa/core/fatal.c
index 6ec5549f2e4..41a7a8d1409 100644
--- a/arch/xtensa/core/fatal.c
+++ b/arch/xtensa/core/fatal.c
@@ -84,10 +84,10 @@ char *xtensa_exccause(unsigned int cause_code)
#endif
}
-void xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
+void xtensa_fatal_error(unsigned int reason, const struct arch_esf *esf)
{
#ifdef CONFIG_EXCEPTION_DEBUG
- if (esf) {
+ if (esf != NULL) {
/* Don't want to get elbowed by xtensa_switch
* in between printing registers and dumping them;
* corrupts backtrace
@@ -154,6 +154,6 @@ static void z_vrfy_xtensa_user_fault(unsigned int reason)
z_impl_xtensa_user_fault(reason);
}
-#include
+#include
#endif /* CONFIG_USERSPACE */
diff --git a/arch/xtensa/core/gdbstub.c b/arch/xtensa/core/gdbstub.c
index 4df72f0d355..0ebc9cc68cc 100644
--- a/arch/xtensa/core/gdbstub.c
+++ b/arch/xtensa/core/gdbstub.c
@@ -422,7 +422,7 @@ static unsigned int get_gdb_exception_reason(unsigned int reason)
* @param ctx GDB context
* @param stack Pointer to the stack frame
*/
-static void copy_to_ctx(struct gdb_ctx *ctx, const z_arch_esf_t *stack)
+static void copy_to_ctx(struct gdb_ctx *ctx, const struct arch_esf *stack)
{
struct xtensa_register *reg;
int idx, num_laddr_regs;
@@ -513,7 +513,7 @@ static void copy_to_ctx(struct gdb_ctx *ctx, const z_arch_esf_t *stack)
* @param ctx GDB context
* @param stack Pointer to the stack frame
*/
-static void restore_from_ctx(struct gdb_ctx *ctx, const z_arch_esf_t *stack)
+static void restore_from_ctx(struct gdb_ctx *ctx, const struct arch_esf *stack)
{
struct xtensa_register *reg;
int idx, num_laddr_regs;
@@ -913,7 +913,7 @@ int arch_gdb_remove_breakpoint(struct gdb_ctx *ctx, uint8_t type,
return ret;
}
-void z_gdb_isr(z_arch_esf_t *esf)
+void z_gdb_isr(struct arch_esf *esf)
{
uint32_t reg;
diff --git a/arch/xtensa/core/gen_zsr.py b/arch/xtensa/core/gen_zsr.py
index 26797a7fe57..6d9a228303f 100755
--- a/arch/xtensa/core/gen_zsr.py
+++ b/arch/xtensa/core/gen_zsr.py
@@ -31,7 +31,7 @@ def parse_args():
NEEDED = ["A0SAVE", "CPU"]
if args.mmu:
- NEEDED += ["MMU_0", "MMU_1", "DBLEXC"]
+ NEEDED += ["DBLEXC", "DEPC_SAVE", "EXCCAUSE_SAVE"]
if args.coherence:
NEEDED += ["FLUSH"]
diff --git a/arch/xtensa/core/irq_offload.c b/arch/xtensa/core/irq_offload.c
index ad35ef4cd0e..0e83520c809 100644
--- a/arch/xtensa/core/irq_offload.c
+++ b/arch/xtensa/core/irq_offload.c
@@ -5,7 +5,7 @@
#include
#include
-#include
+#include
#include
static struct {
@@ -37,3 +37,7 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
:: "r"(intenable), "r"(BIT(ZSR_IRQ_OFFLOAD_INT)));
arch_irq_unlock(key);
}
+
+void arch_irq_offload_init(void)
+{
+}
diff --git a/arch/xtensa/core/mpu.c b/arch/xtensa/core/mpu.c
index 76036b32c70..09385323dc7 100644
--- a/arch/xtensa/core/mpu.c
+++ b/arch/xtensa/core/mpu.c
@@ -33,6 +33,9 @@ extern char _heap_start[];
/** MPU foreground map for kernel mode. */
static struct xtensa_mpu_map xtensa_mpu_map_fg_kernel;
+/** Make sure write to the MPU region is atomic. */
+static struct k_spinlock xtensa_mpu_lock;
+
/*
* Additional information about the MPU maps: foreground and background
* maps.
@@ -443,6 +446,8 @@ static int mpu_map_region_add(struct xtensa_mpu_map *map,
xtensa_mpu_entry_set(entry_slot_s, start_addr, true,
access_rights, memory_type);
+ first_enabled_idx = XTENSA_MPU_NUM_ENTRIES - 1;
+ goto end;
} else {
/*
* Populate the last two entries to indicate
@@ -459,6 +464,8 @@ static int mpu_map_region_add(struct xtensa_mpu_map *map,
xtensa_mpu_entry_set(entry_slot_e, end_addr, false,
XTENSA_MPU_ACCESS_P_NA_U_NA,
CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE);
+ first_enabled_idx = XTENSA_MPU_NUM_ENTRIES - 2;
+ goto end;
}
ret = 0;
@@ -595,6 +602,7 @@ static int mpu_map_region_add(struct xtensa_mpu_map *map,
xtensa_mpu_entry_attributes_set(&entries[idx], access_rights, memory_type);
}
+end:
if (first_idx != NULL) {
*first_idx = first_enabled_idx;
}
@@ -624,6 +632,9 @@ void xtensa_mpu_map_write(struct xtensa_mpu_map *map)
#endif
{
int entry;
+ k_spinlock_key_t key;
+
+ key = k_spin_lock(&xtensa_mpu_lock);
#ifdef CONFIG_USERSPACE
struct xtensa_mpu_map *map = thread->arch.mpu_map;
@@ -647,6 +658,8 @@ void xtensa_mpu_map_write(struct xtensa_mpu_map *map)
__asm__ volatile("wptlb %0, %1\n\t"
: : "a"(map->entries[entry].at), "a"(map->entries[entry].as));
}
+
+ k_spin_unlock(&xtensa_mpu_lock, key);
}
/**
@@ -760,6 +773,7 @@ int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
{
int ret;
uint32_t perm;
+ struct k_thread *cur_thread;
struct xtensa_mpu_map *map = &domain->arch.mpu_map;
struct k_mem_partition *partition = &domain->partitions[partition_id];
uintptr_t end_addr = partition->start + partition->size;
@@ -828,6 +842,15 @@ int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
NULL);
+ /*
+ * Need to update hardware MPU regions if we are removing
+ * partition from the domain of the current running thread.
+ */
+ cur_thread = _current_cpu->current;
+ if (cur_thread->mem_domain_info.mem_domain == domain) {
+ xtensa_mpu_map_write(cur_thread);
+ }
+
out:
return ret;
}
@@ -836,6 +859,7 @@ int arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id)
{
int ret;
+ struct k_thread *cur_thread;
struct xtensa_mpu_map *map = &domain->arch.mpu_map;
struct k_mem_partition *partition = &domain->partitions[partition_id];
uintptr_t end_addr = partition->start + partition->size;
@@ -850,6 +874,20 @@ int arch_mem_domain_partition_add(struct k_mem_domain *domain,
CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
NULL);
+ /*
+ * Need to update hardware MPU regions if we are removing
+ * partition from the domain of the current running thread.
+ *
+ * Note that this function can be called with dummy thread
+ * at boot so we need to avoid writing MPU regions to
+ * hardware.
+ */
+ cur_thread = _current_cpu->current;
+ if (((cur_thread->base.thread_state & _THREAD_DUMMY) != _THREAD_DUMMY) &&
+ (cur_thread->mem_domain_info.mem_domain == domain)) {
+ xtensa_mpu_map_write(cur_thread);
+ }
+
out:
return ret;
}
@@ -984,6 +1022,14 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
offset += XCHAL_MPU_ALIGN) {
uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset);
+ if ((probed & XTENSA_MPU_PROBE_VALID_ENTRY_MASK) == 0U) {
+ /* There is no foreground or background entry associated
+ * with the region.
+ */
+ ret = -EPERM;
+ goto out;
+ }
+
uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK)
>> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT;
@@ -1032,6 +1078,95 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
return ret;
}
+bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write)
+{
+ uintptr_t aligned_addr;
+ size_t aligned_size, addr_offset;
+ bool ret = true;
+
+ /* addr/size arbitrary, fix this up into an aligned region */
+ aligned_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN);
+ addr_offset = (uintptr_t)addr - aligned_addr;
+ aligned_size = ROUND_UP(size + addr_offset, XCHAL_MPU_ALIGN);
+
+ for (size_t offset = 0; offset < aligned_size;
+ offset += XCHAL_MPU_ALIGN) {
+ uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset);
+
+ if ((probed & XTENSA_MPU_PROBE_VALID_ENTRY_MASK) == 0U) {
+ /* There is no foreground or background entry associated
+ * with the region.
+ */
+ ret = false;
+ goto out;
+ }
+
+ uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK)
+ >> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT;
+
+
+ if (write != 0) {
+ /* Need to check write permission. */
+ switch (access_rights) {
+ case XTENSA_MPU_ACCESS_P_RW_U_NA:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RWX_U_NA:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_WO_U_WO:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RW_U_RWX:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RW_U_RO:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RWX_U_RX:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RW_U_RW:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
+ /* These permissions are okay. */
+ break;
+ default:
+ ret = false;
+ goto out;
+ }
+ } else {
+ /* Only check read permission. */
+ switch (access_rights) {
+ case XTENSA_MPU_ACCESS_P_RO_U_NA:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RX_U_NA:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RW_U_NA:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RWX_U_NA:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RW_U_RWX:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RW_U_RO:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RWX_U_RX:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RO_U_RO:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RX_U_RX:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RW_U_RW:
+ __fallthrough;
+ case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
+ /* These permissions are okay. */
+ break;
+ default:
+ ret = false;
+ goto out;
+ }
+ }
+ }
+
+out:
+ return ret;
+}
+
+
void xtensa_user_stack_perms(struct k_thread *thread)
{
int ret;
diff --git a/arch/xtensa/core/prep_c.c b/arch/xtensa/core/prep_c.c
new file mode 100644
index 00000000000..990915c5a46
--- /dev/null
+++ b/arch/xtensa/core/prep_c.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2024 Intel Corporation.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+#include
+#include
+#include
+#include
+
+extern FUNC_NORETURN void z_cstart(void);
+
+/* defined by the SoC in case of CONFIG_SOC_HAS_RUNTIME_NUM_CPUS=y */
+extern void soc_num_cpus_init(void);
+
+/**
+ *
+ * @brief Prepare to and run C code
+ *
+ * This routine prepares for the execution of and runs C code.
+ *
+ */
+void z_prep_c(void)
+{
+#if defined(CONFIG_SOC_PREP_HOOK)
+ soc_prep_hook();
+#endif
+#if CONFIG_SOC_HAS_RUNTIME_NUM_CPUS
+ soc_num_cpus_init();
+#endif
+
+ _cpu_t *cpu0 = &_kernel.cpus[0];
+
+#ifdef CONFIG_KERNEL_COHERENCE
+ /* Make sure we don't have live data for unexpected cached
+ * regions due to boot firmware
+ */
+ sys_cache_data_flush_and_invd_all();
+
+ /* Our cache top stash location might have junk in it from a
+ * pre-boot environment. Must be zero or valid!
+ */
+ XTENSA_WSR(ZSR_FLUSH_STR, 0);
+#endif
+
+ cpu0->nested = 0;
+
+ /* The asm2 scheme keeps the kernel pointer in a scratch SR
+ * (see zsr.h for generation specifics) for easy access. That
+ * saves 4 bytes of immediate value to store the address when
+ * compared to the legacy scheme. But in SMP this record is a
+ * per-CPU thing and having it stored in a SR already is a big
+ * win.
+ */
+ XTENSA_WSR(ZSR_CPU_STR, cpu0);
+
+#ifdef CONFIG_INIT_STACKS
+ char *stack_start = K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]);
+ size_t stack_sz = K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]);
+ char *stack_end = stack_start + stack_sz;
+
+ uint32_t sp;
+
+ __asm__ volatile("mov %0, sp" : "=a"(sp));
+
+ /* Only clear the interrupt stack if the current stack pointer
+ * is not within the interrupt stack. Or else we would be
+ * wiping the in-use stack.
+ */
+ if (((uintptr_t)sp < (uintptr_t)stack_start) ||
+ ((uintptr_t)sp >= (uintptr_t)stack_end)) {
+ memset(stack_start, 0xAA, stack_sz);
+ }
+#endif
+#if CONFIG_ARCH_CACHE
+ arch_cache_init();
+#endif
+
+#ifdef CONFIG_XTENSA_MMU
+ xtensa_mmu_init();
+#endif
+
+#ifdef CONFIG_XTENSA_MPU
+ xtensa_mpu_init();
+#endif
+ z_cstart();
+ CODE_UNREACHABLE;
+}
diff --git a/arch/xtensa/core/ptables.c b/arch/xtensa/core/ptables.c
index f44e17ad6a9..c02ecc64b0d 100644
--- a/arch/xtensa/core/ptables.c
+++ b/arch/xtensa/core/ptables.c
@@ -267,6 +267,12 @@ static void map_memory(const uint32_t start, const uint32_t end,
static void xtensa_init_page_tables(void)
{
volatile uint8_t entry;
+ static bool already_inited;
+
+ if (already_inited) {
+ return;
+ }
+ already_inited = true;
init_page_table(xtensa_kernel_ptables, XTENSA_L1_PAGE_TABLE_ENTRIES);
atomic_set_bit(l1_page_table_track, 0);
@@ -305,18 +311,35 @@ __weak void arch_xtensa_mmu_post_init(bool is_core0)
void xtensa_mmu_init(void)
{
- if (_current_cpu->id == 0) {
- /* This is normally done via arch_kernel_init() inside z_cstart().
- * However, before that is called, we go through the sys_init of
- * INIT_LEVEL_EARLY, which is going to result in TLB misses.
- * So setup whatever necessary so the exception handler can work
- * properly.
- */
- xtensa_init_page_tables();
- }
+ xtensa_init_page_tables();
xtensa_init_paging(xtensa_kernel_ptables);
+ /*
+ * This is used to determine whether we are faulting inside double
+ * exception if this is not zero. Sometimes SoC starts with this not
+ * being set to zero. So clear it during boot.
+ */
+ XTENSA_WSR(ZSR_DEPC_SAVE_STR, 0);
+
+ arch_xtensa_mmu_post_init(_current_cpu->id == 0);
+}
+
+void xtensa_mmu_reinit(void)
+{
+ /* First initialize the hardware */
+ xtensa_init_paging(xtensa_kernel_ptables);
+
+#ifdef CONFIG_USERSPACE
+ struct k_thread *thread = _current_cpu->current;
+ struct arch_mem_domain *domain =
+ &(thread->mem_domain_info.mem_domain->arch);
+
+
+ /* Set the page table for current context */
+ xtensa_set_paging(domain->asid, domain->ptables);
+#endif /* CONFIG_USERSPACE */
+
arch_xtensa_mmu_post_init(_current_cpu->id == 0);
}
@@ -335,7 +358,7 @@ __weak void arch_reserved_pages_update(void)
for (page = CONFIG_SRAM_BASE_ADDRESS, idx = 0;
page < (uintptr_t)z_mapped_start;
page += CONFIG_MMU_PAGE_SIZE, idx++) {
- z_page_frame_set(&z_page_frames[idx], Z_PAGE_FRAME_RESERVED);
+ k_mem_page_frame_set(&k_mem_page_frames[idx], K_MEM_PAGE_FRAME_RESERVED);
}
}
#endif /* CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES */
@@ -1058,15 +1081,13 @@ static bool page_validate(uint32_t *ptables, uint32_t page, uint8_t ring, bool w
return true;
}
-int arch_buffer_validate(const void *addr, size_t size, int write)
+static int mem_buffer_validate(const void *addr, size_t size, int write, int ring)
{
int ret = 0;
uint8_t *virt;
size_t aligned_size;
const struct k_thread *thread = _current;
uint32_t *ptables = thread_page_tables_get(thread);
- uint8_t ring = ((thread->base.user_options & K_USER) != 0) ?
- XTENSA_MMU_USER_RING : XTENSA_MMU_KERNEL_RING;
/* addr/size arbitrary, fix this up into an aligned region */
k_mem_region_align((uintptr_t *)&virt, &aligned_size,
@@ -1083,6 +1104,16 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
return ret;
}
+bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write)
+{
+ return mem_buffer_validate(addr, size, write, XTENSA_MMU_KERNEL_RING) == 0;
+}
+
+int arch_buffer_validate(const void *addr, size_t size, int write)
+{
+ return mem_buffer_validate(addr, size, write, XTENSA_MMU_USER_RING);
+}
+
void xtensa_swap_update_page_tables(struct k_thread *incoming)
{
uint32_t *ptables = incoming->arch.ptables;
diff --git a/arch/xtensa/core/smp.c b/arch/xtensa/core/smp.c
index ffd08ab805c..71d8150025b 100644
--- a/arch/xtensa/core/smp.c
+++ b/arch/xtensa/core/smp.c
@@ -19,3 +19,13 @@ void arch_spin_relax(void)
#undef NOP1
}
#endif /* CONFIG_XTENSA_MORE_SPIN_RELAX_NOPS */
+
+
+/**
+ * init for multi-core/smp is done on the SoC level. Add this here for
+ * compatibility with other SMP systems.
+ */
+int arch_smp_init(void)
+{
+ return 0;
+}
diff --git a/arch/xtensa/core/syscall_helper.c b/arch/xtensa/core/syscall_helper.c
index da2a5551c9a..24feda91c80 100644
--- a/arch/xtensa/core/syscall_helper.c
+++ b/arch/xtensa/core/syscall_helper.c
@@ -4,8 +4,13 @@
* SPDX-License-Identifier: Apache-2.0
*/
+#include
+
#include
+#include
+#include
+
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
uintptr_t xtensa_syscall_helper(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
@@ -47,3 +52,41 @@ bool xtensa_is_user_context(void)
return ret != 0;
}
#endif /* XCHAL_HAVE_THREADPTR */
+
+size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
+{
+ /* Check if we can actually read the whole length.
+ *
+ * arch_user_string_nlen() is supposed to naively go through
+ * the string passed from user thread, and relies on page faults
+ * to catch inaccessible strings, such that user thread can pass
+ * a string that is shorter than the max length this function
+ * caller expects. So at least we want to make sure kernel has
+ * access to the whole length, aka. memory being mapped.
+ * Note that arch_user_string_nlen() should never result in
+ * thread termination due to page faults, and must always
+ * return to the caller with err_arg set or cleared.
+ * For MMU systems, unmapped memory will result in a DTLB miss
+ * and that might trigger an infinite DTLB miss storm if
+ * the corresponding L2 page table never exists in the first
+ * place (which would result in DTLB misses through L1 page
+ * table), until some other exceptions occur to break
+ * the cycle.
+ * For MPU systems, this would simply results in access errors
+ * and the exception handler will terminate the thread.
+ */
+ if (!xtensa_mem_kernel_has_access((void *)s, maxsize, 0)) {
+ /*
+ * API says we need to set err_arg to -1 if there are
+ * any errors.
+ */
+ *err_arg = -1;
+
+ return 0;
+ }
+
+ /* No error and we can proceed to getting the string length. */
+ *err_arg = 0;
+
+ return strnlen(s, maxsize);
+}
diff --git a/arch/xtensa/core/thread.c b/arch/xtensa/core/thread.c
index 4ba0150f705..f59c8274994 100644
--- a/arch/xtensa/core/thread.c
+++ b/arch/xtensa/core/thread.c
@@ -4,6 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
+#include
#include
#include
@@ -72,6 +73,18 @@ static void *init_stack(struct k_thread *thread, int *stack_top,
frame->bsa.ps = PS_WOE | PS_UM | PS_CALLINC(1);
#ifdef CONFIG_USERSPACE
if ((thread->base.user_options & K_USER) == K_USER) {
+#ifdef CONFIG_INIT_STACKS
+ /* setup_thread_stack() does not initialize the architecture specific
+ * privileged stack. So we need to do it manually here as this function
+ * is called by arch_new_thread() via z_setup_new_thread() after
+ * setup_thread_stack() but before thread starts running.
+ *
+ * Note that only user threads have privileged stacks and kernel
+ * only threads do not.
+ */
+ (void)memset(&header->privilege_stack[0], 0xaa, sizeof(header->privilege_stack));
+#endif
+
frame->bsa.pc = (uintptr_t)arch_user_mode_enter;
} else {
frame->bsa.pc = (uintptr_t)z_thread_entry;
@@ -158,4 +171,20 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
CODE_UNREACHABLE;
}
+
+int arch_thread_priv_stack_space_get(const struct k_thread *thread, size_t *stack_size,
+ size_t *unused_ptr)
+{
+ struct xtensa_thread_stack_header *hdr_stack_obj;
+
+ if ((thread->base.user_options & K_USER) != K_USER) {
+ return -EINVAL;
+ }
+
+ hdr_stack_obj = (struct xtensa_thread_stack_header *)thread->stack_obj;
+
+ return z_stack_space_get(&hdr_stack_obj->privilege_stack[0],
+ sizeof(hdr_stack_obj->privilege_stack),
+ unused_ptr);
+}
#endif /* CONFIG_USERSPACE */
diff --git a/arch/xtensa/core/userspace.S b/arch/xtensa/core/userspace.S
index 1578e3be971..3db5d8cea8d 100644
--- a/arch/xtensa/core/userspace.S
+++ b/arch/xtensa/core/userspace.S
@@ -5,10 +5,10 @@
*/
#include
-#include
+#include
#include
#include
-#include
+#include
#include
@@ -352,45 +352,3 @@ xtensa_userspace_enter:
movi a0, 0
rfi 2
-
-/*
- * size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
- */
-.global arch_user_string_nlen
-.type arch_user_string_nlen, @function
-.align 4
-arch_user_string_nlen:
- entry a1, 32
-
- /* error value, set to -1. */
- movi a5, -1
- s32i a5, a4, 0
-
- /* length count */
- xor a5, a5, a5
-
- /* This code might page fault */
-strlen_loop:
-.global xtensa_user_string_nlen_fault_start
-xtensa_user_string_nlen_fault_start:
- l8ui a6, a2, 0 /* Current char */
-
-.global xtensa_user_string_nlen_fault_end
-xtensa_user_string_nlen_fault_end:
- beqz a6, strlen_done
- addi a5, a5, 1
- addi a2, a2, 1
- beq a5, a3, strlen_done
- j strlen_loop
-
-strlen_done:
- /* Set return value */
- mov a2, a5
-
- /* Set error value to 0 since we succeeded */
- movi a5, 0x0
- s32i a5, a4, 0
-
-.global xtensa_user_string_nlen_fixup
-xtensa_user_string_nlen_fixup:
- retw
diff --git a/arch/xtensa/core/vector_handlers.c b/arch/xtensa/core/vector_handlers.c
index f0b0a9175ff..fa58b9c2133 100644
--- a/arch/xtensa/core/vector_handlers.c
+++ b/arch/xtensa/core/vector_handlers.c
@@ -12,8 +12,8 @@
#include
#include
#include
-#include
-#include
+#include
+#include
#include
#ifdef CONFIG_XTENSA_GEN_HANDLERS
@@ -22,28 +22,136 @@
#include <_soc_inthandlers.h>
#endif
+#include
#include
+#include
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
extern char xtensa_arch_except_epc[];
extern char xtensa_arch_kernel_oops_epc[];
+bool xtensa_is_outside_stack_bounds(uintptr_t addr, size_t sz, uint32_t ps)
+{
+ uintptr_t start, end;
+ struct k_thread *thread = _current;
+ bool was_in_isr, invalid;
+
+ /* Without userspace, there is no privileged stack so the thread stack
+ * is the whole stack (minus reserved area). So there is no need to
+ * check for PS == UINT32_MAX for special treatment.
+ */
+ ARG_UNUSED(ps);
+
+ /* Since both level 1 interrupts and exceptions go through
+ * the same interrupt vector, both of them increase the nested
+ * counter in the CPU struct. The architecture vector handler
+ * moves execution to the interrupt stack when nested goes from
+ * zero to one. Afterwards, any nested interrupts/exceptions will
+ * continue running in interrupt stack. Therefore, only when
+ * nested > 1, then it was running in the interrupt stack, and
+ * we should check bounds against the interrupt stack.
+ */
+ was_in_isr = arch_curr_cpu()->nested > 1;
+
+ if ((thread == NULL) || was_in_isr) {
+ /* We were servicing an interrupt or in early boot environment
+ * and are supposed to be on the interrupt stack.
+ */
+ int cpu_id;
+
+#ifdef CONFIG_SMP
+ cpu_id = arch_curr_cpu()->id;
+#else
+ cpu_id = 0;
+#endif
+
+ start = (uintptr_t)K_KERNEL_STACK_BUFFER(z_interrupt_stacks[cpu_id]);
+ end = start + CONFIG_ISR_STACK_SIZE;
#ifdef CONFIG_USERSPACE
-Z_EXC_DECLARE(xtensa_user_string_nlen);
+ } else if (ps == UINT32_MAX) {
+ /* Since the stashed PS is inside struct pointed by frame->ptr_to_bsa,
+ * we need to verify that both frame and frame->ptr_to_bsa are valid
+ * pointer within the thread stack. Also without PS, we have no idea
+ * whether we were in kernel mode (using privileged stack) or user
+ * mode (normal thread stack). So we need to check the whole stack
+ * area.
+ *
+ * And... we cannot account for reserved area since we have no idea
+ * which to use: ARCH_KERNEL_STACK_RESERVED or ARCH_THREAD_STACK_RESERVED
+ * as we don't know whether we were in kernel or user mode.
+ */
+ start = (uintptr_t)thread->stack_obj;
+ end = Z_STACK_PTR_ALIGN(thread->stack_info.start + thread->stack_info.size);
+ } else if (((ps & PS_RING_MASK) == 0U) &&
+ ((thread->base.user_options & K_USER) == K_USER)) {
+ /* Check if this is a user thread, and that it was running in
+ * kernel mode. If so, we must have been doing a syscall, so
+ * check with privileged stack bounds.
+ */
+ start = thread->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE;
+ end = thread->stack_info.start;
+#endif
+ } else {
+ start = thread->stack_info.start;
+ end = Z_STACK_PTR_ALIGN(thread->stack_info.start + thread->stack_info.size);
+ }
-static const struct z_exc_handle exceptions[] = {
- Z_EXC_HANDLE(xtensa_user_string_nlen)
-};
-#endif /* CONFIG_USERSPACE */
+ invalid = (addr <= start) || ((addr + sz) >= end);
+
+ return invalid;
+}
-void xtensa_dump_stack(const z_arch_esf_t *stack)
+bool xtensa_is_frame_pointer_valid(_xtensa_irq_stack_frame_raw_t *frame)
+{
+ _xtensa_irq_bsa_t *bsa;
+
+ /* Check if the pointer to the frame is within stack bounds. If not, there is no
+ * need to test if the BSA (base save area) pointer is also valid as it is
+ * possibly invalid.
+ */
+ if (xtensa_is_outside_stack_bounds((uintptr_t)frame, sizeof(*frame), UINT32_MAX)) {
+ return false;
+ }
+
+ /* Need to test if the BSA area is also within stack bounds. The information
+ * contained within the BSA is only valid if within stack bounds.
+ */
+ bsa = frame->ptr_to_bsa;
+ if (xtensa_is_outside_stack_bounds((uintptr_t)bsa, sizeof(*bsa), UINT32_MAX)) {
+ return false;
+ }
+
+#ifdef CONFIG_USERSPACE
+ /* With usespace, we have privileged stack and normal thread stack within
+ * one stack object. So we need to further test whether the frame pointer
+ * resides in the correct stack based on kernel/user mode.
+ */
+ if (xtensa_is_outside_stack_bounds((uintptr_t)frame, sizeof(*frame), bsa->ps)) {
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+void xtensa_dump_stack(const void *stack)
{
_xtensa_irq_stack_frame_raw_t *frame = (void *)stack;
- _xtensa_irq_bsa_t *bsa = frame->ptr_to_bsa;
+ _xtensa_irq_bsa_t *bsa;
uintptr_t num_high_regs;
int reg_blks_remaining;
+ /* Don't dump stack if the stack pointer is invalid as any frame elements
+ * obtained via de-referencing the frame pointer are probably also invalid.
+ * Or worse, cause another access violation.
+ */
+ if (!xtensa_is_frame_pointer_valid(frame)) {
+ return;
+ }
+
+ bsa = frame->ptr_to_bsa;
+
/* Calculate number of high registers. */
num_high_regs = (uint8_t *)bsa - (uint8_t *)frame + sizeof(void *);
num_high_regs /= sizeof(uintptr_t);
@@ -116,15 +224,30 @@ static void print_fatal_exception(void *print_stack, int cause,
uint32_t ps, vaddr;
_xtensa_irq_bsa_t *bsa = (void *)*(int **)print_stack;
- ps = bsa->ps;
- pc = (void *)bsa->pc;
-
__asm__ volatile("rsr.excvaddr %0" : "=r"(vaddr));
- LOG_ERR(" ** FATAL EXCEPTION%s", (is_dblexc ? " (DOUBLE)" : ""));
+ if (is_dblexc) {
+ LOG_ERR(" ** FATAL EXCEPTION (DOUBLE)");
+ } else {
+ LOG_ERR(" ** FATAL EXCEPTION");
+ }
+
LOG_ERR(" ** CPU %d EXCCAUSE %d (%s)",
arch_curr_cpu()->id, cause,
xtensa_exccause(cause));
+
+ /* Don't print information if the BSA area is invalid as any elements
+ * obtained via de-referencing the pointer are probably also invalid.
+ * Or worse, cause another access violation.
+ */
+ if (xtensa_is_outside_stack_bounds((uintptr_t)bsa, sizeof(*bsa), UINT32_MAX)) {
+ LOG_ERR(" ** VADDR %p Invalid SP %p", (void *)vaddr, print_stack);
+ return;
+ }
+
+ ps = bsa->ps;
+ pc = (void *)bsa->pc;
+
LOG_ERR(" ** PC %p VADDR %p", pc, (void *)vaddr);
if (is_dblexc) {
@@ -218,9 +341,10 @@ static inline DEF_INT_C_HANDLER(1)
* different because exceptions and interrupts land at the same
* vector; other interrupt levels have their own vectors.
*/
-void *xtensa_excint1_c(int *interrupted_stack)
+void *xtensa_excint1_c(void *esf)
{
- int cause;
+ int cause, reason;
+ int *interrupted_stack = &((struct arch_esf *)esf)->dummy;
_xtensa_irq_bsa_t *bsa = (void *)*(int **)interrupted_stack;
bool is_fatal_error = false;
bool is_dblexc = false;
@@ -228,19 +352,24 @@ void *xtensa_excint1_c(int *interrupted_stack)
void *pc, *print_stack = (void *)interrupted_stack;
uint32_t depc = 0;
- __asm__ volatile("rsr.exccause %0" : "=r"(cause));
-
#ifdef CONFIG_XTENSA_MMU
- __asm__ volatile("rsr.depc %0" : "=r"(depc));
+ depc = XTENSA_RSR(ZSR_DEPC_SAVE_STR);
+ cause = XTENSA_RSR(ZSR_EXCCAUSE_SAVE_STR);
is_dblexc = (depc != 0U);
+#else /* CONFIG_XTENSA_MMU */
+ __asm__ volatile("rsr.exccause %0" : "=r"(cause));
#endif /* CONFIG_XTENSA_MMU */
switch (cause) {
case EXCCAUSE_LEVEL1_INTERRUPT:
+#ifdef CONFIG_XTENSA_MMU
if (!is_dblexc) {
return xtensa_int1_c(interrupted_stack);
}
+#else
+ return xtensa_int1_c(interrupted_stack);
+#endif /* CONFIG_XTENSA_MMU */
break;
#ifndef CONFIG_USERSPACE
/* Syscalls are handled earlier in assembly if MMU is enabled.
@@ -260,26 +389,17 @@ void *xtensa_excint1_c(int *interrupted_stack)
break;
#endif /* !CONFIG_USERSPACE */
default:
- ps = bsa->ps;
- pc = (void *)bsa->pc;
-
-#ifdef CONFIG_USERSPACE
- /* If the faulting address is from one of the known
- * exceptions that should not be fatal, return to
- * the fixup address.
- */
- for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
- if ((pc >= exceptions[i].start) &&
- (pc < exceptions[i].end)) {
- bsa->pc = (uintptr_t)exceptions[i].fixup;
+ reason = K_ERR_CPU_EXCEPTION;
- goto fixup_out;
- }
+ /* If the BSA area is invalid, we cannot trust anything coming out of it. */
+ if (xtensa_is_outside_stack_bounds((uintptr_t)bsa, sizeof(*bsa), UINT32_MAX)) {
+ goto skip_checks;
}
-#endif /* CONFIG_USERSPACE */
+
+ ps = bsa->ps;
+ pc = (void *)bsa->pc;
/* Default for exception */
- int reason = K_ERR_CPU_EXCEPTION;
is_fatal_error = true;
/* We need to distinguish between an ill in xtensa_arch_except,
@@ -313,6 +433,7 @@ void *xtensa_excint1_c(int *interrupted_stack)
}
}
+skip_checks:
if (reason != K_ERR_KERNEL_OOPS) {
print_fatal_exception(print_stack, cause, is_dblexc, depc);
}
@@ -369,15 +490,11 @@ void *xtensa_excint1_c(int *interrupted_stack)
_current_cpu->nested = 1;
}
-#if defined(CONFIG_XTENSA_MMU) || defined(CONFIG_XTENSA_MPU)
-#ifdef CONFIG_USERSPACE
-fixup_out:
-#endif
+#if defined(CONFIG_XTENSA_MMU)
if (is_dblexc) {
- __asm__ volatile("wsr.depc %0" : : "r"(0));
+ XTENSA_WSR(ZSR_DEPC_SAVE_STR, 0);
}
-#endif /* CONFIG_XTENSA_MMU || CONFIG_XTENSA_MPU */
-
+#endif /* CONFIG_XTENSA_MMU */
return return_to(interrupted_stack);
}
@@ -385,7 +502,7 @@ void *xtensa_excint1_c(int *interrupted_stack)
#if defined(CONFIG_GDBSTUB)
void *xtensa_debugint_c(int *interrupted_stack)
{
- extern void z_gdb_isr(z_arch_esf_t *esf);
+ extern void z_gdb_isr(struct arch_esf *esf);
z_gdb_isr((void *)interrupted_stack);
diff --git a/arch/xtensa/core/window_vectors.S b/arch/xtensa/core/window_vectors.S
index 90eba495bde..6c4e8c44803 100644
--- a/arch/xtensa/core/window_vectors.S
+++ b/arch/xtensa/core/window_vectors.S
@@ -4,7 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
#include
-#include
+#include
/* WINDOW OVERFLOW AND UNDERFLOW EXCEPTION VECTORS AND ALLOCA EXCEPTION
* HANDLER
diff --git a/arch/xtensa/core/xtensa_asm2_util.S b/arch/xtensa/core/xtensa_asm2_util.S
index dad8f199359..11b18013033 100644
--- a/arch/xtensa/core/xtensa_asm2_util.S
+++ b/arch/xtensa/core/xtensa_asm2_util.S
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: Apache-2.0
*/
#include
-#include
-#include
+#include
+#include
#if defined(CONFIG_SIMULATOR_XTENSA) || defined(XT_SIMULATOR)
#include
@@ -489,13 +489,29 @@ _DoubleExceptionVector:
addi a0, a0, -EXCCAUSE_DTLB_MISS
beqz a0, _handle_tlb_miss_dblexc
+ /* Need to stash the DEPC for used by the C handler.
+ * If we encounter any DTLB misses when PS.EXCM is set,
+ * this vector will be used and the DEPC register will
+ * have the new address instead of the one resulted in
+ * double exception.
+ */
+ rsr.depc a0
+ wsr a0, ZSR_DEPC_SAVE
+
rsr a0, ZSR_DBLEXC
j _Level1Vector
-#else
-#if defined(CONFIG_SIMULATOR_XTENSA) || defined(XT_SIMULATOR)
-1:
+_TripleFault:
+#endif /* CONFIG_XTENSA_MMU */
+
+#if XCHAL_HAVE_DEBUG && defined(CONFIG_XTENSA_BREAK_ON_UNRECOVERABLE_EXCEPTIONS)
+ /* Signals an unhandled double exception, and unrecoverable exceptions.
+ * Definitely needs debugger to be attached to the hardware or simulator
+ * to catch this.
+ */
+ break 1, 4
+#elif defined(CONFIG_SIMULATOR_XTENSA) || defined(XT_SIMULATOR)
/* Tell simulator to stop executing here, instead of trying to do
* an infinite loop (see below). Greatly help with using tracing in
* simulator so that traces will not have infinite iterations of
@@ -504,14 +520,9 @@ _DoubleExceptionVector:
movi a3, 1
movi a2, SYS_exit
simcall
-#elif XCHAL_HAVE_DEBUG
-/* Signals an unhandled double exception */
-1: break 1, 4
-#else
-1:
#endif
+1:
j 1b
-#endif /* CONFIG_XTENSA_MMU */
#ifdef CONFIG_XTENSA_MMU
_handle_tlb_miss_dblexc:
diff --git a/arch/xtensa/core/xtensa_backtrace.c b/arch/xtensa/core/xtensa_backtrace.c
index 8570fa5dbd6..145d52ce58d 100644
--- a/arch/xtensa/core/xtensa_backtrace.c
+++ b/arch/xtensa/core/xtensa_backtrace.c
@@ -14,6 +14,10 @@
#elif defined(CONFIG_SOC_XTENSA_DC233C)
#include "backtrace_helpers.h"
#endif
+
+#include
+#include
+
static int mask, cause;
static inline uint32_t xtensa_cpu_process_stack_pc(uint32_t pc)
@@ -36,15 +40,25 @@ static inline uint32_t xtensa_cpu_process_stack_pc(uint32_t pc)
static inline bool xtensa_stack_ptr_is_sane(uint32_t sp)
{
+ bool valid;
+
#if defined(CONFIG_SOC_SERIES_ESP32)
- return esp_stack_ptr_is_sane(sp);
+ valid = esp_stack_ptr_is_sane(sp);
#elif defined(CONFIG_SOC_FAMILY_INTEL_ADSP)
- return intel_adsp_ptr_is_sane(sp);
-#elif defined(CONFIG_SOC_XTENSA_DC233C)
- return xtensa_dc233c_stack_ptr_is_sane(sp);
+ valid = intel_adsp_ptr_is_sane(sp);
#else
-#warning "xtensa_stack_ptr_is_sane is not defined for this platform"
+ /* Platform does not have additional requirements on
+ * whether stack pointer is valid. So use the generic
+ * test below.
+ */
+ valid = true;
#endif
+
+ if (valid) {
+ valid = !xtensa_is_outside_stack_bounds(sp, 0, UINT32_MAX);
+ }
+
+ return valid;
}
static inline bool xtensa_ptr_executable(const void *p)
@@ -62,6 +76,13 @@ static inline bool xtensa_ptr_executable(const void *p)
bool xtensa_backtrace_get_next_frame(struct xtensa_backtrace_frame_t *frame)
{
+ /* Do not continue backtrace when we encounter an invalid stack
+ * frame pointer.
+ */
+ if (xtensa_is_outside_stack_bounds((uintptr_t)frame->sp, 0, UINT32_MAX)) {
+ return false;
+ }
+
/* Use frame(i-1)'s BS area located below frame(i)'s
* sp to get frame(i-1)'s sp and frame(i-2)'s pc
*/
@@ -91,14 +112,27 @@ int xtensa_backtrace_print(int depth, int *interrupted_stack)
return -1;
}
+ _xtensa_irq_stack_frame_raw_t *frame = (void *)interrupted_stack;
+ _xtensa_irq_bsa_t *bsa;
+
+ /* Don't dump stack if the stack pointer is invalid as
+ * any frame elements obtained via de-referencing the
+ * frame pointer are probably also invalid. Or worse,
+ * cause another access violation.
+ */
+ if (!xtensa_is_frame_pointer_valid(frame)) {
+ return -1;
+ }
+
+ bsa = frame->ptr_to_bsa;
+ cause = bsa->exccause;
+
/* Initialize stk_frame with first frame of stack */
struct xtensa_backtrace_frame_t stk_frame;
xtensa_backtrace_get_start(&(stk_frame.pc), &(stk_frame.sp),
&(stk_frame.next_pc), interrupted_stack);
- __asm__ volatile("l32i a4, a3, 0");
- __asm__ volatile("l32i a4, a4, 4");
- __asm__ volatile("mov %0, a4" : "=r"(cause));
+
if (cause != EXCCAUSE_INSTR_PROHIBITED) {
mask = stk_frame.pc & 0xc0000000;
}
diff --git a/arch/xtensa/core/xtensa_hifi.S b/arch/xtensa/core/xtensa_hifi.S
index 3c311acab0f..dff714cce18 100644
--- a/arch/xtensa/core/xtensa_hifi.S
+++ b/arch/xtensa/core/xtensa_hifi.S
@@ -4,7 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
-#include
+#include
#include