diff --git a/.github/actions/do-build/action.yml b/.github/actions/do-build/action.yml
index 3deb7f4b8f8..79eddf8c70f 100644
--- a/.github/actions/do-build/action.yml
+++ b/.github/actions/do-build/action.yml
@@ -66,7 +66,7 @@ runs:
shell: bash
- name: 'Upload build logs'
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: failure-logs-${{ inputs.platform }}${{ inputs.debug-suffix }}
path: failure-logs
@@ -74,7 +74,7 @@ runs:
# This is the best way I found to abort the job with an error message
- name: 'Notify about build failures'
- uses: actions/github-script@v6
+ uses: actions/github-script@v7
with:
script: core.setFailed('Build failed. See summary for details.')
if: steps.check.outputs.failure == 'true'
diff --git a/.github/actions/get-bootjdk/action.yml b/.github/actions/get-bootjdk/action.yml
index 1e569dd47c5..25ee1d8dfa0 100644
--- a/.github/actions/get-bootjdk/action.yml
+++ b/.github/actions/get-bootjdk/action.yml
@@ -65,7 +65,7 @@ runs:
- name: 'Check cache for BootJDK'
id: get-cached-bootjdk
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: bootjdk/jdk
key: boot-jdk-${{ inputs.platform }}-${{ steps.sha256.outputs.value }}
diff --git a/.github/actions/get-bundles/action.yml b/.github/actions/get-bundles/action.yml
index 956e1520cfb..0e52320a350 100644
--- a/.github/actions/get-bundles/action.yml
+++ b/.github/actions/get-bundles/action.yml
@@ -48,14 +48,14 @@ runs:
steps:
- name: 'Download bundles artifact'
id: download-bundles
- uses: actions/download-artifact@v3
+ uses: actions/download-artifact@v4
with:
name: bundles-${{ inputs.platform }}${{ inputs.debug-suffix }}
path: bundles
continue-on-error: true
- name: 'Download bundles artifact (retry)'
- uses: actions/download-artifact@v3
+ uses: actions/download-artifact@v4
with:
name: bundles-${{ inputs.platform }}${{ inputs.debug-suffix }}
path: bundles
diff --git a/.github/actions/get-jtreg/action.yml b/.github/actions/get-jtreg/action.yml
index a45c0c1e6a9..faedcc18807 100644
--- a/.github/actions/get-jtreg/action.yml
+++ b/.github/actions/get-jtreg/action.yml
@@ -41,7 +41,7 @@ runs:
- name: 'Check cache for JTReg'
id: get-cached-jtreg
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: jtreg/installed
key: jtreg-${{ steps.version.outputs.value }}
diff --git a/.github/actions/get-msys2/action.yml b/.github/actions/get-msys2/action.yml
index 7dac1538536..843b77ac064 100644
--- a/.github/actions/get-msys2/action.yml
+++ b/.github/actions/get-msys2/action.yml
@@ -30,8 +30,7 @@ runs:
using: composite
steps:
- name: 'Install MSYS2'
- # use a specific release of msys2/setup-msys2 to prevent jtreg build failures on newer release
- uses: msys2/setup-msys2@7efe20baefed56359985e327d329042cde2434ff
+ uses: msys2/setup-msys2@v2.22.0
with:
install: 'autoconf tar unzip zip make'
path-type: minimal
diff --git a/.github/actions/upload-bundles/action.yml b/.github/actions/upload-bundles/action.yml
index 88f7f6e8107..b35ee3a42e9 100644
--- a/.github/actions/upload-bundles/action.yml
+++ b/.github/actions/upload-bundles/action.yml
@@ -69,7 +69,7 @@ runs:
shell: bash
- name: 'Upload bundles artifact'
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: bundles-${{ inputs.platform }}${{ inputs.debug-suffix }}
path: bundles
diff --git a/.github/workflows/build-cross-compile.yml b/.github/workflows/build-cross-compile.yml
index 5db69f07d98..1c1aee9061f 100644
--- a/.github/workflows/build-cross-compile.yml
+++ b/.github/workflows/build-cross-compile.yml
@@ -61,27 +61,32 @@ jobs:
debian-arch: arm64
debian-repository: https://httpredir.debian.org/debian/
debian-version: bullseye
+ tolerate-sysroot-errors: false
- target-cpu: arm
gnu-arch: arm
debian-arch: armhf
debian-repository: https://httpredir.debian.org/debian/
debian-version: bullseye
+ tolerate-sysroot-errors: false
gnu-abi: eabihf
- target-cpu: s390x
gnu-arch: s390x
debian-arch: s390x
debian-repository: https://httpredir.debian.org/debian/
debian-version: bullseye
+ tolerate-sysroot-errors: false
- target-cpu: ppc64le
gnu-arch: powerpc64le
debian-arch: ppc64el
debian-repository: https://httpredir.debian.org/debian/
debian-version: bullseye
+ tolerate-sysroot-errors: false
- target-cpu: riscv64
gnu-arch: riscv64
debian-arch: riscv64
debian-repository: https://httpredir.debian.org/debian/
debian-version: sid
+ tolerate-sysroot-errors: true
steps:
- name: 'Checkout the JDK source'
@@ -93,13 +98,6 @@ jobs:
with:
platform: linux-x64
- # Use linux-x64 JDK bundle as build JDK
- - name: 'Get build JDK'
- id: buildjdk
- uses: ./.github/actions/get-bundles
- with:
- platform: linux-x64
-
- name: 'Get GTest'
id: gtest
uses: ./.github/actions/get-gtest
@@ -120,7 +118,7 @@ jobs:
- name: 'Check cache for sysroot'
id: get-cached-sysroot
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: sysroot
key: sysroot-${{ matrix.debian-arch }}-${{ hashFiles('./.github/workflows/build-cross-compile.yml') }}
@@ -130,6 +128,7 @@ jobs:
if: steps.get-cached-sysroot.outputs.cache-hit != 'true'
- name: 'Create sysroot'
+ id: create-sysroot
run: >
sudo debootstrap
--arch=${{ matrix.debian-arch }}
@@ -140,6 +139,7 @@ jobs:
${{ matrix.debian-version }}
sysroot
${{ matrix.debian-repository }}
+ continue-on-error: ${{ matrix.tolerate-sysroot-errors }}
if: steps.get-cached-sysroot.outputs.cache-hit != 'true'
- name: 'Prepare sysroot'
@@ -151,7 +151,12 @@ jobs:
rm -rf sysroot/usr/{sbin,bin,share}
rm -rf sysroot/usr/lib/{apt,gcc,udev,systemd}
rm -rf sysroot/usr/libexec/gcc
- if: steps.get-cached-sysroot.outputs.cache-hit != 'true'
+ if: steps.create-sysroot.outcome == 'success' && steps.get-cached-sysroot.outputs.cache-hit != 'true'
+
+ - name: 'Remove broken sysroot'
+ run: |
+ sudo rm -rf sysroot/
+ if: steps.create-sysroot.outcome != 'success' && steps.get-cached-sysroot.outputs.cache-hit != 'true'
- name: 'Configure'
run: >
@@ -165,7 +170,6 @@ jobs:
--disable-precompiled-headers
--openjdk-target=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}
--with-sysroot=sysroot
- --with-build-jdk=${{ steps.buildjdk.outputs.jdk-path }}
--with-jmod-compress=zip-1
CC=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}-gcc-${{ inputs.gcc-major-version }}
CXX=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}-g++-${{ inputs.gcc-major-version }}
@@ -173,6 +177,7 @@ jobs:
echo "Dumping config.log:" &&
cat config.log &&
exit 1)
+ if: steps.create-sysroot.outcome == 'success' || steps.get-cached-sysroot.outputs.cache-hit == 'true'
- name: 'Build'
id: build
@@ -180,3 +185,4 @@ jobs:
with:
make-target: 'hotspot ${{ inputs.make-arguments }}'
platform: linux-${{ matrix.target-cpu }}
+ if: steps.create-sysroot.outcome == 'success' || steps.get-cached-sysroot.outputs.cache-hit == 'true'
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 686d5ba4eae..8e110eac738 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -132,8 +132,7 @@ jobs:
gcc-major-version: '10'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- # The linux-x64 jdk bundle is used as buildjdk for the cross-compile job
- if: needs.select.outputs.linux-x64 == 'true' || needs.select.outputs.linux-cross-compile == 'true'
+ if: needs.select.outputs.linux-x64 == 'true'
build-linux-x86:
name: linux-x86
@@ -213,7 +212,6 @@ jobs:
name: linux-cross-compile
needs:
- select
- - build-linux-x64
uses: ./.github/workflows/build-cross-compile.yml
with:
gcc-major-version: '10'
@@ -367,7 +365,7 @@ jobs:
# Hack to get hold of the api environment variables that are only defined for actions
- name: 'Get API configuration'
id: api
- uses: actions/github-script@v6
+ uses: actions/github-script@v7
with:
script: 'return { url: process.env["ACTIONS_RUNTIME_URL"], token: process.env["ACTIONS_RUNTIME_TOKEN"] }'
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 8808ab80d0e..a8885866c12 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -211,7 +211,7 @@ jobs:
if: always()
- name: 'Upload test results'
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
path: results
name: ${{ steps.package.outputs.artifact-name }}
@@ -219,7 +219,7 @@ jobs:
# This is the best way I found to abort the job with an error message
- name: 'Notify about test failures'
- uses: actions/github-script@v6
+ uses: actions/github-script@v7
with:
script: core.setFailed('${{ steps.run-tests.outputs.error-message }}')
if: steps.run-tests.outputs.failure == 'true'
diff --git a/.jcheck/conf b/.jcheck/conf
index e2ca212ab3a..dac5c3e0c81 100644
--- a/.jcheck/conf
+++ b/.jcheck/conf
@@ -1,7 +1,7 @@
[general]
project=jdk-updates
jbs=JDK
-version=21.0.3
+version=21.0.4
[checks]
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists
diff --git a/doc/testing.html b/doc/testing.html
index 19d937df1ea..f13400920c6 100644
--- a/doc/testing.html
+++ b/doc/testing.html
@@ -179,8 +179,9 @@
Test selection
The test specifications given in TEST
is parsed into
fully qualified test descriptors, which clearly and unambigously show
which tests will be run. As an example, :tier1
will expand
-to
-jtreg:$(TOPDIR)/test/hotspot/jtreg:tier1 jtreg:$(TOPDIR)/test/jdk:tier1 jtreg:$(TOPDIR)/test/langtools:tier1 jtreg:$(TOPDIR)/test/nashorn:tier1 jtreg:$(TOPDIR)/test/jaxp:tier1
.
+to include all subcomponent test directories that define `tier1`,
+for example:
+jtreg:$(TOPDIR)/test/hotspot/jtreg:tier1 jtreg:$(TOPDIR)/test/jdk:tier1 jtreg:$(TOPDIR)/test/langtools:tier1 ...
.
You can always submit a list of fully qualified test descriptors in the
TEST
variable if you want to shortcut the parser.
Common Test Groups
diff --git a/doc/testing.md b/doc/testing.md
index 9756a691a8c..bc154e40ae7 100644
--- a/doc/testing.md
+++ b/doc/testing.md
@@ -102,11 +102,11 @@ test runs, the `test TEST="x"` solution needs to be used.
The test specifications given in `TEST` is parsed into fully qualified test
descriptors, which clearly and unambigously show which tests will be run. As an
-example, `:tier1` will expand to `jtreg:$(TOPDIR)/test/hotspot/jtreg:tier1
-jtreg:$(TOPDIR)/test/jdk:tier1 jtreg:$(TOPDIR)/test/langtools:tier1
-jtreg:$(TOPDIR)/test/nashorn:tier1 jtreg:$(TOPDIR)/test/jaxp:tier1`. You can
-always submit a list of fully qualified test descriptors in the `TEST` variable
-if you want to shortcut the parser.
+example, `:tier1` will expand to include all subcomponent test directories
+that define `tier1`, for example: `jtreg:$(TOPDIR)/test/hotspot/jtreg:tier1
+jtreg:$(TOPDIR)/test/jdk:tier1 jtreg:$(TOPDIR)/test/langtools:tier1 ...`. You
+can always submit a list of fully qualified test descriptors in the `TEST`
+variable if you want to shortcut the parser.
### Common Test Groups
diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4
index 06a62c9a8f1..f93d42f1634 100644
--- a/make/autoconf/flags-cflags.m4
+++ b/make/autoconf/flags-cflags.m4
@@ -28,7 +28,7 @@
# Setup flags for C/C++ compiler
#
-###############################################################################
+################################################################################
#
# How to compile shared libraries.
#
@@ -37,7 +37,10 @@ AC_DEFUN([FLAGS_SETUP_SHARED_LIBS],
if test "x$TOOLCHAIN_TYPE" = xgcc; then
# Default works for linux, might work on other platforms as well.
SHARED_LIBRARY_FLAGS='-shared'
- SET_EXECUTABLE_ORIGIN='-Wl,-rpath,\$$ORIGIN[$]1'
+ # --disable-new-dtags forces use of RPATH instead of RUNPATH for rpaths.
+ # This protects internal library dependencies within the JDK from being
+ # overridden using LD_LIBRARY_PATH. See JDK-8326891 for more information.
+ SET_EXECUTABLE_ORIGIN='-Wl,-rpath,\$$ORIGIN[$]1 -Wl,--disable-new-dtags'
SET_SHARED_LIBRARY_ORIGIN="-Wl,-z,origin $SET_EXECUTABLE_ORIGIN"
SET_SHARED_LIBRARY_NAME='-Wl,-soname=[$]1'
SET_SHARED_LIBRARY_MAPFILE='-Wl,-version-script=[$]1'
@@ -63,6 +66,9 @@ AC_DEFUN([FLAGS_SETUP_SHARED_LIBS],
# Default works for linux, might work on other platforms as well.
SHARED_LIBRARY_FLAGS='-shared'
SET_EXECUTABLE_ORIGIN='-Wl,-rpath,\$$ORIGIN[$]1'
+ if test "x$OPENJDK_TARGET_OS" = xlinux; then
+ SET_EXECUTABLE_ORIGIN="$SET_EXECUTABLE_ORIGIN -Wl,--disable-new-dtags"
+ fi
SET_SHARED_LIBRARY_NAME='-Wl,-soname=[$]1'
SET_SHARED_LIBRARY_MAPFILE='-Wl,-version-script=[$]1'
@@ -122,6 +128,11 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
# Add debug prefix map gcc system include paths, as they cause
# non-deterministic debug paths depending on gcc path location.
DEBUG_PREFIX_MAP_GCC_INCLUDE_PATHS
+
+ # Add debug prefix map for OUTPUTDIR to handle the scenario when
+ # it is not located within WORKSPACE_ROOT
+ outputdir_slash="${OUTPUTDIR%/}/"
+ DEBUG_PREFIX_CFLAGS="$DEBUG_PREFIX_CFLAGS -fdebug-prefix-map=${outputdir_slash}="
]
)
fi
@@ -485,7 +496,7 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
CFLAGS_OS_DEF_JVM="-D_ALLBSD_SOURCE -D_DARWIN_C_SOURCE -D_XOPEN_SOURCE"
CFLAGS_OS_DEF_JDK="-D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT"
elif test "x$OPENJDK_TARGET_OS" = xaix; then
- CFLAGS_OS_DEF_JVM="-DAIX"
+ CFLAGS_OS_DEF_JVM="-DAIX -D_LARGE_FILES"
elif test "x$OPENJDK_TARGET_OS" = xbsd; then
CFLAGS_OS_DEF_JDK="-D_ALLBSD_SOURCE"
elif test "x$OPENJDK_TARGET_OS" = xwindows; then
diff --git a/make/autoconf/jdk-options.m4 b/make/autoconf/jdk-options.m4
index f56081223a6..58e04be8a99 100644
--- a/make/autoconf/jdk-options.m4
+++ b/make/autoconf/jdk-options.m4
@@ -190,6 +190,17 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
fi
AC_SUBST(INCLUDE_SA)
+ # Setup default CDS alignment. On platforms where one build may run on machines with different
+ # page sizes, the JVM choses a compatible alignment to fit all possible page sizes. This slightly
+ # increases archive size.
+ # The only platform having this problem at the moment is Linux on aarch64, which may encounter
+ # three different page sizes: 4K, 64K, and if run on Mac m1 hardware, 16K.
+ COMPATIBLE_CDS_ALIGNMENT_DEFAULT=false
+ if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64"; then
+ COMPATIBLE_CDS_ALIGNMENT_DEFAULT=true
+ fi
+ AC_SUBST(COMPATIBLE_CDS_ALIGNMENT_DEFAULT)
+
# Compress jars
COMPRESS_JARS=false
@@ -491,7 +502,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_UNDEFINED_BEHAVIOR_SANITIZER],
[
# GCC reports lots of likely false positives for stringop-truncation and format-overflow.
# Silence them for now.
- UBSAN_CHECKS="-fsanitize=undefined -fsanitize=float-divide-by-zero -fno-sanitize=shift-base"
+ UBSAN_CHECKS="-fsanitize=undefined -fsanitize=float-divide-by-zero -fno-sanitize=shift-base -fno-sanitize=alignment"
UBSAN_CFLAGS="$UBSAN_CHECKS -Wno-stringop-truncation -Wno-format-overflow -fno-omit-frame-pointer -DUNDEFINED_BEHAVIOR_SANITIZER"
UBSAN_LDFLAGS="$UBSAN_CHECKS"
UTIL_ARG_ENABLE(NAME: ubsan, DEFAULT: false, RESULT: UBSAN_ENABLED,
@@ -673,7 +684,7 @@ AC_DEFUN([JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE],
#
AC_DEFUN([JDKOPT_ENABLE_DISABLE_COMPATIBLE_CDS_ALIGNMENT],
[
- UTIL_ARG_ENABLE(NAME: compatible-cds-alignment, DEFAULT: false,
+ UTIL_ARG_ENABLE(NAME: compatible-cds-alignment, DEFAULT: $COMPATIBLE_CDS_ALIGNMENT_DEFAULT,
RESULT: ENABLE_COMPATIBLE_CDS_ALIGNMENT,
DESC: [enable use alternative compatible cds core region alignment],
DEFAULT_DESC: [disabled],
diff --git a/make/common/MakeBase.gmk b/make/common/MakeBase.gmk
index 252d9dd50da..3858b652ee6 100644
--- a/make/common/MakeBase.gmk
+++ b/make/common/MakeBase.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -152,6 +152,10 @@ define SetupLogging
endif
endif
+ ifneq ($$(findstring $$(LOG_LEVEL), debug trace),)
+ SHELL := $$(SHELL) -x
+ endif
+
ifeq ($$(LOG_LEVEL), trace)
SHELL_NO_RECURSE := $$(SHELL)
# Shell redefinition trick inspired by http://www.cmcrossroads.com/ask-mr-make/6535-tracing-rule-execution-in-gnu-make
diff --git a/make/conf/github-actions.conf b/make/conf/github-actions.conf
index 3a08380a8b6..0a9f21e97c4 100644
--- a/make/conf/github-actions.conf
+++ b/make/conf/github-actions.conf
@@ -29,17 +29,17 @@ GTEST_VERSION=1.13.0
JTREG_VERSION=7.3.1+1
LINUX_X64_BOOT_JDK_EXT=tar.gz
-LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_linux-x64_bin.tar.gz
-LINUX_X64_BOOT_JDK_SHA256=bb863b2d542976d1ae4b7b81af3e78b1e4247a64644350b552d298d8dc5980dc
+LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin21-binaries/releases/download/jdk-21.0.3%2B9/OpenJDK21U-jdk_x64_linux_hotspot_21.0.3_9.tar.gz
+LINUX_X64_BOOT_JDK_SHA256=fffa52c22d797b715a962e6c8d11ec7d79b90dd819b5bc51d62137ea4b22a340
MACOS_X64_BOOT_JDK_EXT=tar.gz
-MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_macos-x64_bin.tar.gz
-MACOS_X64_BOOT_JDK_SHA256=47cf960d9bb89dbe987535a389f7e26c42de7c984ef5108612d77c81aa8cc6a4
+MACOS_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin21-binaries/releases/download/jdk-21.0.3%2B9/OpenJDK21U-jdk_x64_mac_hotspot_21.0.3_9.tar.gz
+MACOS_X64_BOOT_JDK_SHA256=f777103aab94330d14a29bd99f3a26d60abbab8e2c375cec9602746096721a7c
MACOS_AARCH64_BOOT_JDK_EXT=tar.gz
-MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_macos-aarch64_bin.tar.gz
-MACOS_AARCH64_BOOT_JDK_SHA256=d020f5c512c043cfb7119a591bc7e599a5bfd76d866d939f5562891d9db7c9b3
+MACOS_AARCH64_BOOT_JDK_URL=https://github.com/adoptium/temurin21-binaries/releases/download/jdk-21.0.3%2B9/OpenJDK21U-jdk_aarch64_mac_hotspot_21.0.3_9.tar.gz
+MACOS_AARCH64_BOOT_JDK_SHA256=b6be6a9568be83695ec6b7cb977f4902f7be47d74494c290bc2a5c3c951e254f
WINDOWS_X64_BOOT_JDK_EXT=zip
-WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_windows-x64_bin.zip
-WINDOWS_X64_BOOT_JDK_SHA256=c92fae5e42b9aecf444a66c8ec563c652f60b1e231dfdd33a4f5a3e3603058fb
+WINDOWS_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin21-binaries/releases/download/jdk-21.0.3%2B9/OpenJDK21U-jdk_x64_windows_hotspot_21.0.3_9.zip
+WINDOWS_X64_BOOT_JDK_SHA256=c43a66cff7a403d56c5c5e1ff10d3d5f95961abf80f97f0e35380594909f0e4d
diff --git a/make/conf/version-numbers.conf b/make/conf/version-numbers.conf
index 819f8796e2e..bd10acd75dc 100644
--- a/make/conf/version-numbers.conf
+++ b/make/conf/version-numbers.conf
@@ -28,15 +28,15 @@
DEFAULT_VERSION_FEATURE=21
DEFAULT_VERSION_INTERIM=0
-DEFAULT_VERSION_UPDATE=3
+DEFAULT_VERSION_UPDATE=4
DEFAULT_VERSION_PATCH=0
DEFAULT_VERSION_EXTRA1=0
DEFAULT_VERSION_EXTRA2=0
DEFAULT_VERSION_EXTRA3=0
-DEFAULT_VERSION_DATE=2024-04-16
+DEFAULT_VERSION_DATE=2024-07-16
DEFAULT_VERSION_CLASSFILE_MAJOR=65 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="20 21"
DEFAULT_JDK_SOURCE_TARGET_VERSION=21
-DEFAULT_PROMOTED_VERSION_PRE=
+DEFAULT_PROMOTED_VERSION_PRE=ea
diff --git a/make/devkit/createJMHBundle.sh b/make/devkit/createJMHBundle.sh
index c3c97947dab..b2b10769d15 100644
--- a/make/devkit/createJMHBundle.sh
+++ b/make/devkit/createJMHBundle.sh
@@ -1,6 +1,6 @@
#!/bin/bash -e
#
-# Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
JMH_VERSION=1.37
COMMONS_MATH3_VERSION=3.6.1
JOPT_SIMPLE_VERSION=5.0.4
+MAVEN_MIRROR=${MAVEN_MIRROR:-https://repo.maven.apache.org/maven2}
BUNDLE_NAME=jmh-$JMH_VERSION.tar.gz
@@ -41,7 +42,7 @@ cd $JAR_DIR
rm -f *
fetchJar() {
- url="https://repo.maven.apache.org/maven2/$1/$2/$3/$2-$3.jar"
+ url="${MAVEN_MIRROR}/$1/$2/$3/$2-$3.jar"
if command -v curl > /dev/null; then
curl -O --fail $url
elif command -v wget > /dev/null; then
diff --git a/make/hotspot/gensrc/GensrcAdlc.gmk b/make/hotspot/gensrc/GensrcAdlc.gmk
index 0898d91e1c2..bb356476847 100644
--- a/make/hotspot/gensrc/GensrcAdlc.gmk
+++ b/make/hotspot/gensrc/GensrcAdlc.gmk
@@ -51,7 +51,7 @@ ifeq ($(call check-jvm-feature, compiler2), true)
endif
# Set the C++ standard
- ADLC_CFLAGS += $(ADLC_LANGSTD_CXXFLAG)
+ ADLC_CFLAGS += $(ADLC_LANGSTD_CXXFLAGS)
# NOTE: The old build didn't set -DASSERT for windows but it doesn't seem to
# hurt.
diff --git a/make/modules/java.base/Launcher.gmk b/make/modules/java.base/Launcher.gmk
index 11af61b082e..64db79060c4 100644
--- a/make/modules/java.base/Launcher.gmk
+++ b/make/modules/java.base/Launcher.gmk
@@ -78,7 +78,8 @@ ifeq ($(call isTargetOs, macosx aix linux), true)
NAME := jspawnhelper, \
SRC := $(TOPDIR)/src/$(MODULE)/unix/native/jspawnhelper, \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKEXE) -I$(TOPDIR)/src/$(MODULE)/unix/native/libjava, \
+ CFLAGS := $(CFLAGS_JDKEXE) $(VERSION_CFLAGS) \
+ -I$(TOPDIR)/src/$(MODULE)/unix/native/libjava, \
EXTRA_OBJECT_FILES := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjava/childproc$(OBJ_SUFFIX), \
LDFLAGS := $(LDFLAGS_JDKEXE), \
OUTPUT_DIR := $(SUPPORT_OUTPUTDIR)/modules_libs/$(MODULE), \
diff --git a/make/modules/java.base/lib/CoreLibraries.gmk b/make/modules/java.base/lib/CoreLibraries.gmk
index 8b1a0a90fd4..6aa9fd6586c 100644
--- a/make/modules/java.base/lib/CoreLibraries.gmk
+++ b/make/modules/java.base/lib/CoreLibraries.gmk
@@ -59,6 +59,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJAVA, \
CFLAGS := $(CFLAGS_JDKLIB) \
$(LIBJAVA_CFLAGS), \
jdk_util.c_CFLAGS := $(VERSION_CFLAGS), \
+ ProcessImpl_md.c_CFLAGS := $(VERSION_CFLAGS), \
WARNINGS_AS_ERRORS_xlc := false, \
DISABLED_WARNINGS_gcc_ProcessImpl_md.c := unused-result, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
diff --git a/make/modules/java.desktop/lib/Awt2dLibraries.gmk b/make/modules/java.desktop/lib/Awt2dLibraries.gmk
index d6a4e6df4fc..a33f219e83e 100644
--- a/make/modules/java.desktop/lib/Awt2dLibraries.gmk
+++ b/make/modules/java.desktop/lib/Awt2dLibraries.gmk
@@ -477,8 +477,10 @@ else
# noexcept-type required for GCC 7 builds. Not required for GCC 8+.
# expansion-to-defined required for GCC 9 builds. Not required for GCC 10+.
# maybe-uninitialized required for GCC 8 builds. Not required for GCC 9+.
+ # calloc-transposed-args required for GCC 14 builds. (fixed upstream in Harfbuzz 032c931e1c0cfb20f18e5acb8ba005775242bd92)
HARFBUZZ_DISABLED_WARNINGS_CXX_gcc := class-memaccess noexcept-type \
- expansion-to-defined dangling-reference maybe-uninitialized
+ expansion-to-defined dangling-reference maybe-uninitialized \
+ calloc-transposed-args
HARFBUZZ_DISABLED_WARNINGS_clang := missing-field-initializers range-loop-analysis
HARFBUZZ_DISABLED_WARNINGS_microsoft := 4267 4244
diff --git a/make/test/JtregNativeHotspot.gmk b/make/test/JtregNativeHotspot.gmk
index 5d998a4d4b1..c73783e3b0a 100644
--- a/make/test/JtregNativeHotspot.gmk
+++ b/make/test/JtregNativeHotspot.gmk
@@ -867,7 +867,7 @@ BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exesigtest := -ljvm
ifeq ($(call isTargetOs, windows), true)
BUILD_HOTSPOT_JTREG_EXECUTABLES_CFLAGS_exeFPRegs := -MT
- BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c libterminatedThread.c libTestJNI.c libCompleteExit.c libTestPsig.c libnativeStack.c exeGetCreatedJavaVMs.c
+ BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c libterminatedThread.c libTestJNI.c libCompleteExit.c libMonitorWithDeadObjectTest.c libTestPsig.c libnativeStack.c exeGetCreatedJavaVMs.c
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libatExit := jvm.lib
BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exedaemonDestroy := jvm.lib
else
@@ -1508,8 +1508,11 @@ else
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libterminatedThread += -lpthread
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libatExit += -ljvm
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libCompleteExit += -lpthread
+ BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMonitorWithDeadObjectTest += -lpthread
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libnativeStack += -lpthread
BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeGetCreatedJavaVMs := -ljvm -lpthread
+
+ BUILD_HOTSPOT_JTREG_EXCLUDE += libNativeException.c
endif
ifeq ($(ASAN_ENABLED), true)
diff --git a/src/demo/share/java2d/J2DBench/Makefile b/src/demo/share/java2d/J2DBench/Makefile
index 04b0818a2c3..edc4494e131 100644
--- a/src/demo/share/java2d/J2DBench/Makefile
+++ b/src/demo/share/java2d/J2DBench/Makefile
@@ -29,6 +29,23 @@
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
+
+ifndef SOURCE
+export SOURCE := 7
+endif
+ifndef TARGET
+export TARGET := 7
+endif
+ifndef JAVAC
+export JAVAC := javac
+endif
+ifndef JAVA
+export JAVA := java
+endif
+ifndef JAR
+export JAR := jar
+endif
+
SOURCEPATH=src
CLASSES=build
DIST=dist
@@ -80,18 +97,18 @@ SCM_DIRs = .hg .svn CVS RCS SCCS Codemgr_wsdata deleted_files
all: mkdirs J2DBench.jar J2DAnalyzer.jar
run: mkdirs J2DBench.jar
- java -jar $(DIST)/J2DBench.jar
+ $(JAVA) -jar $(DIST)/J2DBench.jar
analyze: mkdirs J2DAnalyzer.jar
- java -jar $(DIST)/J2DAnalyzer.jar
+ $(JAVA) -jar $(DIST)/J2DAnalyzer.jar
J2DBench.jar: \
$(J2DBENCH_CLASSES) $(J2DBENCH_RESOURCES) \
$(CLASSES)/j2dbench.manifest
- jar cvmf $(CLASSES)/j2dbench.manifest $(DIST)/J2DBench.jar -C $(CLASSES) j2dbench
+ $(JAR) cvmf $(CLASSES)/j2dbench.manifest $(DIST)/J2DBench.jar -C $(CLASSES) j2dbench
J2DAnalyzer.jar: $(J2DANALYZER_CLASSES) $(CLASSES)/j2danalyzer.manifest
- jar cvmf $(CLASSES)/j2danalyzer.manifest \
+ $(JAR) cvmf $(CLASSES)/j2danalyzer.manifest \
$(DIST)/J2DAnalyzer.jar -C $(CLASSES) j2dbench/report
$(CLASSES)/j2dbench/tests/iio/images: $(RESOURCES)/images
@@ -120,7 +137,7 @@ $(CLASSES):
mkdirs: $(DIST) $(CLASSES)
$(CLASSES)/j2dbench/%.class: $(SOURCEPATH)/j2dbench/%.java
- javac -g:none -source 1.7 -target 1.7 -d $(CLASSES) -sourcepath $(SOURCEPATH) $<
+ $(JAVAC) -g:none -source $(SOURCE) -target $(TARGET) -d $(CLASSES) -sourcepath $(SOURCEPATH) $<
clean:
rm -rf $(CLASSES)
diff --git a/src/demo/share/java2d/J2DBench/README b/src/demo/share/java2d/J2DBench/README
index 3b9f25c13f1..513c984a655 100644
--- a/src/demo/share/java2d/J2DBench/README
+++ b/src/demo/share/java2d/J2DBench/README
@@ -23,6 +23,9 @@ The benchmark requires at least jdk1.4 to compile and run. Note that
source/target is set to 1.7 in the makefile and build.xml, because of
support in jdk 14 compiler. To check compatibility with jdk1.4 you can
use "-source 1.4 -target 1.4" options and jdk1.7.
+Yo can use TARGET/SOURCE of makefile and -Dtarget/surce to set them up for your convinience.
+Similarly you can set JAVA/JAVAC/JAR and -Djava/javac to select diffferent java/javac then is on yoru PATH
+Unluckily in ant, you can not set jar, but ant should honor JAVA_HOME
-----------------------------------------------------------------------
How To Compile
diff --git a/src/demo/share/java2d/J2DBench/build.xml b/src/demo/share/java2d/J2DBench/build.xml
index 7b202946cf1..415c315899e 100644
--- a/src/demo/share/java2d/J2DBench/build.xml
+++ b/src/demo/share/java2d/J2DBench/build.xml
@@ -39,6 +39,27 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -49,13 +70,14 @@
-
+
@@ -64,6 +86,7 @@
description="run J2DAnalyzer" >
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp
index afeb19e906e..c7b867a4207 100644
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp
@@ -187,6 +187,26 @@ void Address::lea(MacroAssembler *as, Register r) const {
zrf(Rd, 0);
}
+// This encoding is similar (but not quite identical) to the encoding used
+// by literal ld/st. see JDK-8324123.
+// PRFM does not support writeback or pre/post index.
+void Assembler::prfm(const Address &adr, prfop pfop) {
+ Address::mode mode = adr.getMode();
+ // PRFM does not support pre/post index
+ guarantee((mode != Address::pre) && (mode != Address::post), "prfm does not support pre/post indexing");
+ if (mode == Address::literal) {
+ starti;
+ f(0b11, 31, 30), f(0b011, 29, 27), f(0b000, 26, 24);
+ f(pfop, 4, 0);
+ int64_t offset = (adr.target() - pc()) >> 2;
+ sf(offset, 23, 5);
+ } else {
+ assert((mode == Address::base_plus_offset)
+ || (mode == Address::base_plus_offset_reg), "must be base_plus_offset/base_plus_offset_reg");
+ ld_st2(as_Register(pfop), adr, 0b11, 0b10);
+ }
+}
+
// An "all-purpose" add/subtract immediate, per ARM documentation:
// A "programmer-friendly" assembler may accept a negative immediate
// between -(2^24 -1) and -1 inclusive, causing it to convert a
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
index a53d8329645..dd143ac004a 100644
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
@@ -795,6 +795,8 @@ class Assembler : public AbstractAssembler {
void adrp(Register Rd, const Address &dest, uint64_t &offset) = delete;
+ void prfm(const Address &adr, prfop pfop = PLDL1KEEP);
+
#undef INSN
void add_sub_immediate(Instruction_aarch64 ¤t_insn, Register Rd, Register Rn,
@@ -1572,17 +1574,6 @@ class Assembler : public AbstractAssembler {
#undef INSN
-#define INSN(NAME, size, op) \
- void NAME(const Address &adr, prfop pfop = PLDL1KEEP) { \
- ld_st2(as_Register(pfop), adr, size, op); \
- }
-
- INSN(prfm, 0b11, 0b10); // FIXME: PRFM should not be used with
- // writeback modes, but the assembler
- // doesn't enfore that.
-
-#undef INSN
-
#define INSN(NAME, size, op) \
void NAME(FloatRegister Rt, const Address &adr) { \
ld_st2(as_Register(Rt), adr, size, op, 1); \
diff --git a/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp
index 6204f212703..a9c53da3d01 100644
--- a/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp
@@ -142,9 +142,11 @@
// * 63-48 Fixed (16-bits, always zero)
//
-// Default value if probing is not implemented for a certain platform: 128TB
-static const size_t DEFAULT_MAX_ADDRESS_BIT = 47;
-// Minimum value returned, if probing fails: 64GB
+// Default value if probing is not implemented for a certain platform
+// Max address bit is restricted by implicit assumptions in the code, for instance
+// the bit layout of XForwardingEntry or Partial array entry (see XMarkStackEntry) in mark stack
+static const size_t DEFAULT_MAX_ADDRESS_BIT = 46;
+// Minimum value returned, if probing fails
static const size_t MINIMUM_MAX_ADDRESS_BIT = 36;
static size_t probe_valid_max_address_bit() {
diff --git a/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp
index 6c3cea73d1a..e140525bcbc 100644
--- a/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp
@@ -36,9 +36,11 @@
#include
#endif // LINUX
-// Default value if probing is not implemented for a certain platform: 128TB
-static const size_t DEFAULT_MAX_ADDRESS_BIT = 47;
-// Minimum value returned, if probing fails: 64GB
+// Default value if probing is not implemented for a certain platform
+// Max address bit is restricted by implicit assumptions in the code, for instance
+// the bit layout of XForwardingEntry or Partial array entry (see XMarkStackEntry) in mark stack
+static const size_t DEFAULT_MAX_ADDRESS_BIT = 46;
+// Minimum value returned, if probing fail
static const size_t MINIMUM_MAX_ADDRESS_BIT = 36;
static size_t probe_valid_max_address_bit() {
diff --git a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
index 1146324e19c..2293d70c8da 100644
--- a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
@@ -27,6 +27,7 @@
#define CPU_AARCH64_GLOBALDEFINITIONS_AARCH64_HPP
const int StackAlignmentInBytes = 16;
+const size_t pd_segfault_address = 1024;
// Indicates whether the C calling conventions require that
// 32-bit integer argument values are extended to 64 bits.
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 5a90cf189ce..1343e7d4f26 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -389,13 +389,13 @@ static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) {
return false;
}
-class Decoder : public RelocActions {
- virtual reloc_insn adrpMem() { return &Decoder::adrpMem_impl; }
- virtual reloc_insn adrpAdd() { return &Decoder::adrpAdd_impl; }
- virtual reloc_insn adrpMovk() { return &Decoder::adrpMovk_impl; }
+class AArch64Decoder : public RelocActions {
+ virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; }
+ virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; }
+ virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; }
public:
- Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {}
+ AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {}
virtual int loadStore(address insn_addr, address &target) {
intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5);
@@ -491,7 +491,7 @@ class Decoder : public RelocActions {
};
address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
- Decoder decoder(insn_addr, insn);
+ AArch64Decoder decoder(insn_addr, insn);
address target;
decoder.run(insn_addr, target);
return target;
diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
index 2335a70c9fe..cd2567434f4 100644
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
@@ -310,7 +310,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
uint int_args = 0;
uint fp_args = 0;
- uint stk_args = 0; // inc by 2 each time
+ uint stk_args = 0;
for (int i = 0; i < total_args_passed; i++) {
switch (sig_bt[i]) {
@@ -322,8 +322,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
if (int_args < Argument::n_int_register_parameters_j) {
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
} else {
+ stk_args = align_up(stk_args, 2);
regs[i].set1(VMRegImpl::stack2reg(stk_args));
- stk_args += 2;
+ stk_args += 1;
}
break;
case T_VOID:
@@ -340,6 +341,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
if (int_args < Argument::n_int_register_parameters_j) {
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
} else {
+ stk_args = align_up(stk_args, 2);
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
@@ -348,8 +350,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
if (fp_args < Argument::n_float_register_parameters_j) {
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
} else {
+ stk_args = align_up(stk_args, 2);
regs[i].set1(VMRegImpl::stack2reg(stk_args));
- stk_args += 2;
+ stk_args += 1;
}
break;
case T_DOUBLE:
@@ -357,6 +360,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
if (fp_args < Argument::n_float_register_parameters_j) {
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
} else {
+ stk_args = align_up(stk_args, 2);
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
@@ -367,7 +371,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
}
}
- return align_up(stk_args, 2);
+ return stk_args;
}
// Patch the callers callsite with entry to compiled code if it exists.
diff --git a/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp b/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp
index 57cc9fe6274..45a1af83e8c 100644
--- a/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp
@@ -241,9 +241,13 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
__ mov_metadata(rmethod, entry);
__ str(rmethod, Address(rthread, JavaThread::callee_target_offset())); // just in case callee is deoptimized
+ __ push_cont_fastpath(rthread);
+
__ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
__ blr(rscratch1);
+ __ pop_cont_fastpath(rthread);
+
// return value shuffle
if (!needs_return_buffer) {
#ifdef ASSERT
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
index f33e64b3af0..b76bb7efef3 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
@@ -144,11 +144,19 @@ void VM_Version::initialize() {
}
}
- // Ampere CPUs: Ampere-1 and Ampere-1A
- if (_cpu == CPU_AMPERE && ((_model == CPU_MODEL_AMPERE_1) || (_model == CPU_MODEL_AMPERE_1A))) {
+ // Ampere CPUs
+ if (_cpu == CPU_AMPERE && ((_model == CPU_MODEL_AMPERE_1) ||
+ (_model == CPU_MODEL_AMPERE_1A) ||
+ (_model == CPU_MODEL_AMPERE_1B))) {
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
}
+ if (FLAG_IS_DEFAULT(OnSpinWaitInst)) {
+ FLAG_SET_DEFAULT(OnSpinWaitInst, "isb");
+ }
+ if (FLAG_IS_DEFAULT(OnSpinWaitInstCount)) {
+ FLAG_SET_DEFAULT(OnSpinWaitInstCount, 2);
+ }
}
// ThunderX
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
index a141127387e..0380f14e5c3 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
@@ -110,7 +110,8 @@ enum Ampere_CPU_Model {
CPU_MODEL_ALTRA = 0xd0c, /* CPU implementer is CPU_ARM, Neoverse N1 */
CPU_MODEL_ALTRAMAX = 0xd0c, /* CPU implementer is CPU_ARM, Neoverse N1 */
CPU_MODEL_AMPERE_1 = 0xac3, /* CPU implementer is CPU_AMPERE */
- CPU_MODEL_AMPERE_1A = 0xac4 /* CPU implementer is CPU_AMPERE */
+ CPU_MODEL_AMPERE_1A = 0xac4, /* CPU implementer is CPU_AMPERE */
+ CPU_MODEL_AMPERE_1B = 0xac5 /* AMPERE_1B core Implements ARMv8.7 with CSSC, MTE, SM3/SM4 extensions */
};
#define CPU_FEATURE_FLAGS(decl) \
diff --git a/src/hotspot/cpu/arm/globalDefinitions_arm.hpp b/src/hotspot/cpu/arm/globalDefinitions_arm.hpp
index ba180fb0f87..2041cf9e17e 100644
--- a/src/hotspot/cpu/arm/globalDefinitions_arm.hpp
+++ b/src/hotspot/cpu/arm/globalDefinitions_arm.hpp
@@ -26,6 +26,7 @@
#define CPU_ARM_GLOBALDEFINITIONS_ARM_HPP
const int StackAlignmentInBytes = 8;
+const size_t pd_segfault_address = 1024;
// Indicates whether the C calling conventions require that
// 32-bit integer argument values are extended to 64 bits.
diff --git a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
index e4f4107da0f..d55cdfb0251 100644
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
@@ -444,7 +444,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
}
}
- if (slot & 1) slot++;
return slot;
}
diff --git a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
index 1a00c9ad268..dc70c73d4b3 100644
--- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
@@ -456,6 +456,9 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
__ extsw(R7_ARG5, length()->as_register());
ce->emit_static_call_stub();
+ if (ce->compilation()->bailed_out()) {
+ return; // CodeCache is full
+ }
bool success = ce->emit_trampoline_stub_for_call(SharedRuntime::get_resolve_static_call_stub());
if (!success) { return; }
diff --git a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp
index 8ac5c39b831..f124477bc32 100644
--- a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp
+++ b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp
@@ -31,6 +31,12 @@ const int BytesPerInstWord = 4;
const int StackAlignmentInBytes = 16;
+#ifdef AIX
+const size_t pd_segfault_address = -1;
+#else
+const size_t pd_segfault_address = 1024;
+#endif
+
// Indicates whether the C calling conventions require that
// 32-bit integer argument values are extended to 64 bits.
const bool CCallingConventionRequiresIntsAsLongs = true;
diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad
index 7fe5203aa15..d6eaeb7c9af 100644
--- a/src/hotspot/cpu/ppc/ppc.ad
+++ b/src/hotspot/cpu/ppc/ppc.ad
@@ -2062,7 +2062,10 @@ int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
- if (base == NULL) return 0; // CodeBuffer::expand failed
+ if (base == nullptr) {
+ ciEnv::current()->record_failure("CodeCache is full");
+ return 0; // CodeBuffer::expand failed
+ }
int offset = __ offset();
__ b64_patchable((address)OptoRuntime::exception_blob()->content_begin(),
@@ -2079,7 +2082,10 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
- if (base == NULL) return 0; // CodeBuffer::expand failed
+ if (base == nullptr) {
+ ciEnv::current()->record_failure("CodeCache is full");
+ return 0; // CodeBuffer::expand failed
+ }
int offset = __ offset();
__ bl64_patchable((address)SharedRuntime::deopt_blob()->unpack(),
@@ -2798,15 +2804,16 @@ encode %{
intptr_t val = $src$$constant;
relocInfo::relocType constant_reloc = $src->constant_reloc(); // src
address const_toc_addr;
+ RelocationHolder r; // Initializes type to none.
if (constant_reloc == relocInfo::oop_type) {
// Create an oop constant and a corresponding relocation.
- AddressLiteral a = __ allocate_oop_address((jobject)val);
+ AddressLiteral a = __ constant_oop_address((jobject)val);
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
- __ relocate(a.rspec());
+ r = a.rspec();
} else if (constant_reloc == relocInfo::metadata_type) {
+ // Notify OOP recorder (don't need the relocation)
AddressLiteral a = __ constant_metadata_address((Metadata *)val);
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
- __ relocate(a.rspec());
} else {
// Create a non-oop constant, no relocation needed.
const_toc_addr = __ long_constant((jlong)$src$$constant);
@@ -2816,6 +2823,7 @@ encode %{
ciEnv::current()->record_out_of_memory_failure();
return;
}
+ __ relocate(r); // If set above.
// Get the constant's TOC offset.
toc_offset = __ offset_to_method_toc(const_toc_addr);
@@ -2829,15 +2837,16 @@ encode %{
intptr_t val = $src$$constant;
relocInfo::relocType constant_reloc = $src->constant_reloc(); // src
address const_toc_addr;
+ RelocationHolder r; // Initializes type to none.
if (constant_reloc == relocInfo::oop_type) {
// Create an oop constant and a corresponding relocation.
- AddressLiteral a = __ allocate_oop_address((jobject)val);
+ AddressLiteral a = __ constant_oop_address((jobject)val);
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
- __ relocate(a.rspec());
+ r = a.rspec();
} else if (constant_reloc == relocInfo::metadata_type) {
+ // Notify OOP recorder (don't need the relocation)
AddressLiteral a = __ constant_metadata_address((Metadata *)val);
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
- __ relocate(a.rspec());
} else { // non-oop pointers, e.g. card mark base, heap top
// Create a non-oop constant, no relocation needed.
const_toc_addr = __ long_constant((jlong)$src$$constant);
@@ -2847,6 +2856,7 @@ encode %{
ciEnv::current()->record_out_of_memory_failure();
return;
}
+ __ relocate(r); // If set above.
// Get the constant's TOC offset.
const int toc_offset = __ offset_to_method_toc(const_toc_addr);
// Store the toc offset of the constant.
diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
index 401d4f4efa8..2281c083b98 100644
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
@@ -734,7 +734,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
ShouldNotReachHere();
}
}
- return align_up(stk, 2);
+ return stk;
}
#if defined(COMPILER1) || defined(COMPILER2)
diff --git a/src/hotspot/cpu/ppc/upcallLinker_ppc.cpp b/src/hotspot/cpu/ppc/upcallLinker_ppc.cpp
index 4cb86ad573c..871c8e08bf5 100644
--- a/src/hotspot/cpu/ppc/upcallLinker_ppc.cpp
+++ b/src/hotspot/cpu/ppc/upcallLinker_ppc.cpp
@@ -239,10 +239,14 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
__ load_const_optimized(R19_method, (intptr_t)entry);
__ std(R19_method, in_bytes(JavaThread::callee_target_offset()), R16_thread);
+ __ push_cont_fastpath();
+
__ ld(call_target_address, in_bytes(Method::from_compiled_offset()), R19_method);
__ mtctr(call_target_address);
__ bctrl();
+ __ pop_cont_fastpath();
+
// return value shuffle
if (!needs_return_buffer) {
// CallArranger can pick a return type that goes in the same reg for both CCs.
diff --git a/src/hotspot/cpu/riscv/frame_riscv.hpp b/src/hotspot/cpu/riscv/frame_riscv.hpp
index ae6242a75bd..74425d23d7b 100644
--- a/src/hotspot/cpu/riscv/frame_riscv.hpp
+++ b/src/hotspot/cpu/riscv/frame_riscv.hpp
@@ -131,7 +131,7 @@
// Entry frames
// n.b. these values are determined by the layout defined in
// stubGenerator for the Java call stub
- entry_frame_after_call_words = 34,
+ entry_frame_after_call_words = 35,
entry_frame_call_wrapper_offset = -10,
// we don't need a save area
diff --git a/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp b/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp
index 2a8cff71c55..f40ffbeefa7 100644
--- a/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp
+++ b/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp
@@ -28,6 +28,7 @@
#define CPU_RISCV_GLOBALDEFINITIONS_RISCV_HPP
const int StackAlignmentInBytes = 16;
+const size_t pd_segfault_address = 1024;
// Indicates whether the C calling conventions require that
// 32-bit integer argument values are extended to 64 bits.
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
index fd5739d3d40..dd2922240b2 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
@@ -720,7 +720,7 @@ void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Reg
void MacroAssembler::la(Register Rd, const address dest) {
int64_t offset = dest - pc();
- if (is_simm32(offset)) {
+ if (is_valid_32bit_offset(offset)) {
auipc(Rd, (int32_t)offset + 0x800); //0x800, Note:the 11th sign bit
addi(Rd, Rd, ((int64_t)offset << 52) >> 52);
} else {
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
index 21f64f4b20e..ca64e2e8152 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
@@ -640,6 +640,14 @@ class MacroAssembler: public Assembler {
int pop_v(unsigned int bitset, Register stack);
#endif // COMPILER2
+ // The signed 20-bit upper imm can materialize at most negative 0xF...F80000000, two G.
+ // The following signed 12-bit imm can at max subtract 0x800, two K, from that previously loaded two G.
+ bool is_valid_32bit_offset(int64_t x) {
+ constexpr int64_t twoG = (2 * G);
+ constexpr int64_t twoK = (2 * K);
+ return x < (twoG - twoK) && x >= (-twoG - twoK);
+ }
+
public:
void push_reg(Register Rs);
void pop_reg(Register Rd);
@@ -794,7 +802,7 @@ class MacroAssembler: public Assembler {
void NAME(Register Rd, address dest) { \
assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \
- if (is_simm32(distance)) { \
+ if (is_valid_32bit_offset(distance)) { \
auipc(Rd, (int32_t)distance + 0x800); \
Assembler::NAME(Rd, Rd, ((int32_t)distance << 20) >> 20); \
} else { \
@@ -851,7 +859,7 @@ class MacroAssembler: public Assembler {
void NAME(FloatRegister Rd, address dest, Register temp = t0) { \
assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \
- if (is_simm32(distance)) { \
+ if (is_valid_32bit_offset(distance)) { \
auipc(temp, (int32_t)distance + 0x800); \
Assembler::NAME(Rd, temp, ((int32_t)distance << 20) >> 20); \
} else { \
@@ -912,7 +920,7 @@ class MacroAssembler: public Assembler {
assert_cond(dest != nullptr); \
assert_different_registers(Rs, temp); \
int64_t distance = dest - pc(); \
- if (is_simm32(distance)) { \
+ if (is_valid_32bit_offset(distance)) { \
auipc(temp, (int32_t)distance + 0x800); \
Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \
} else { \
@@ -957,7 +965,7 @@ class MacroAssembler: public Assembler {
void NAME(FloatRegister Rs, address dest, Register temp = t0) { \
assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \
- if (is_simm32(distance)) { \
+ if (is_valid_32bit_offset(distance)) { \
auipc(temp, (int32_t)distance + 0x800); \
Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \
} else { \
diff --git a/src/hotspot/cpu/riscv/riscv_v.ad b/src/hotspot/cpu/riscv/riscv_v.ad
index 06826ecf046..145453bb538 100644
--- a/src/hotspot/cpu/riscv/riscv_v.ad
+++ b/src/hotspot/cpu/riscv/riscv_v.ad
@@ -2942,7 +2942,6 @@ instruct vloadcon(vReg dst, immI0 src) %{
__ vsetvli_helper(bt, Matcher::vector_length(this));
__ vid_v(as_VectorRegister($dst$$reg));
if (is_floating_point_type(bt)) {
- __ csrwi(CSR_FRM, C2_MacroAssembler::rne);
__ vfcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg));
}
%}
@@ -3156,7 +3155,6 @@ instruct vcvtBtoX(vReg dst, vReg src) %{
if (is_floating_point_type(bt)) {
__ integer_extend_v(as_VectorRegister($dst$$reg), bt == T_FLOAT ? T_INT : T_LONG,
Matcher::vector_length(this), as_VectorRegister($src$$reg), T_BYTE);
- __ csrwi(CSR_FRM, C2_MacroAssembler::rne);
__ vfcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg));
} else {
__ integer_extend_v(as_VectorRegister($dst$$reg), bt,
@@ -3203,7 +3201,6 @@ instruct vcvtStoX_fp_extend(vReg dst, vReg src) %{
__ integer_extend_v(as_VectorRegister($dst$$reg), (bt == T_FLOAT ? T_INT : T_LONG),
Matcher::vector_length(this), as_VectorRegister($src$$reg), T_SHORT);
__ vsetvli_helper(bt, Matcher::vector_length(this));
- __ csrwi(CSR_FRM, C2_MacroAssembler::rne);
__ vfcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg));
%}
ins_pipe(pipe_slow);
@@ -3242,7 +3239,6 @@ instruct vcvtItoF(vReg dst, vReg src) %{
format %{ "vcvtItoF $dst, $src" %}
ins_encode %{
__ vsetvli_helper(T_FLOAT, Matcher::vector_length(this));
- __ csrwi(CSR_FRM, C2_MacroAssembler::rne);
__ vfcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg));
%}
ins_pipe(pipe_slow);
@@ -3255,7 +3251,6 @@ instruct vcvtItoD(vReg dst, vReg src) %{
format %{ "vcvtItoD $dst, $src" %}
ins_encode %{
__ vsetvli_helper(T_INT, Matcher::vector_length(this), Assembler::mf2);
- __ csrwi(CSR_FRM, C2_MacroAssembler::rne);
__ vfwcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg));
%}
ins_pipe(pipe_slow);
@@ -3283,7 +3278,6 @@ instruct vcvtLtoF(vReg dst, vReg src) %{
format %{ "vcvtLtoF $dst, $src" %}
ins_encode %{
__ vsetvli_helper(T_FLOAT, Matcher::vector_length(this), Assembler::mf2);
- __ csrwi(CSR_FRM, C2_MacroAssembler::rne);
__ vfncvt_f_x_w(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg));
%}
ins_pipe(pipe_slow);
@@ -3295,7 +3289,6 @@ instruct vcvtLtoD(vReg dst, vReg src) %{
format %{ "vcvtLtoD $dst, $src" %}
ins_encode %{
__ vsetvli_helper(T_DOUBLE, Matcher::vector_length(this));
- __ csrwi(CSR_FRM, C2_MacroAssembler::rne);
__ vfcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg));
%}
ins_pipe(pipe_slow);
@@ -3353,7 +3346,6 @@ instruct vcvtFtoD(vReg dst, vReg src) %{
format %{ "vcvtFtoD $dst, $src" %}
ins_encode %{
__ vsetvli_helper(T_FLOAT, Matcher::vector_length(this), Assembler::mf2);
- __ csrwi(CSR_FRM, C2_MacroAssembler::rne);
__ vfwcvt_f_f_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg));
%}
ins_pipe(pipe_slow);
@@ -3401,7 +3393,6 @@ instruct vcvtDtoF(vReg dst, vReg src) %{
format %{ "vcvtDtoF $dst, $src" %}
ins_encode %{
__ vsetvli_helper(T_FLOAT, Matcher::vector_length(this), Assembler::mf2);
- __ csrwi(CSR_FRM, C2_MacroAssembler::rne);
__ vfncvt_f_f_w(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg));
%}
ins_pipe(pipe_slow);
diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
index 691dfa1bd70..e34ace1b02c 100644
--- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
+++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
@@ -266,7 +266,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
uint int_args = 0;
uint fp_args = 0;
- uint stk_args = 0; // inc by 2 each time
+ uint stk_args = 0;
for (int i = 0; i < total_args_passed; i++) {
switch (sig_bt[i]) {
@@ -278,8 +278,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
if (int_args < Argument::n_int_register_parameters_j) {
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
} else {
+ stk_args = align_up(stk_args, 2);
regs[i].set1(VMRegImpl::stack2reg(stk_args));
- stk_args += 2;
+ stk_args += 1;
}
break;
case T_VOID:
@@ -295,6 +296,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
if (int_args < Argument::n_int_register_parameters_j) {
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
} else {
+ stk_args = align_up(stk_args, 2);
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
@@ -303,8 +305,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
if (fp_args < Argument::n_float_register_parameters_j) {
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
} else {
+ stk_args = align_up(stk_args, 2);
regs[i].set1(VMRegImpl::stack2reg(stk_args));
- stk_args += 2;
+ stk_args += 1;
}
break;
case T_DOUBLE:
@@ -312,6 +315,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
if (fp_args < Argument::n_float_register_parameters_j) {
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
} else {
+ stk_args = align_up(stk_args, 2);
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
@@ -321,7 +325,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
}
}
- return align_up(stk_args, 2);
+ return stk_args;
}
// Patch the callers callsite with entry to compiled code if it exists.
diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp
index aab65019619..8c5e1c097ef 100644
--- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp
+++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp
@@ -126,8 +126,9 @@ class StubGenerator: public StubCodeGenerator {
// [ return_from_Java ] <--- sp
// [ argument word n ]
// ...
- // -34 [ argument word 1 ]
- // -33 [ saved f27 ] <--- sp_after_call
+ // -35 [ argument word 1 ]
+ // -34 [ saved FRM in Floating-point Control and Status Register ] <--- sp_after_call
+ // -33 [ saved f27 ]
// -32 [ saved f26 ]
// -31 [ saved f25 ]
// -30 [ saved f24 ]
@@ -164,8 +165,9 @@ class StubGenerator: public StubCodeGenerator {
// Call stub stack layout word offsets from fp
enum call_stub_layout {
- sp_after_call_off = -33,
+ sp_after_call_off = -34,
+ frm_off = sp_after_call_off,
f27_off = -33,
f26_off = -32,
f25_off = -31,
@@ -213,6 +215,7 @@ class StubGenerator: public StubCodeGenerator {
const Address sp_after_call (fp, sp_after_call_off * wordSize);
+ const Address frm_save (fp, frm_off * wordSize);
const Address call_wrapper (fp, call_wrapper_off * wordSize);
const Address result (fp, result_off * wordSize);
const Address result_type (fp, result_type_off * wordSize);
@@ -295,6 +298,16 @@ class StubGenerator: public StubCodeGenerator {
__ fsd(f26, f26_save);
__ fsd(f27, f27_save);
+ __ frrm(t0);
+ __ sd(t0, frm_save);
+ // Set frm to the state we need. We do want Round to Nearest. We
+ // don't want non-IEEE rounding modes.
+ Label skip_fsrmi;
+ guarantee(__ RoundingMode::rne == 0, "must be");
+ __ beqz(t0, skip_fsrmi);
+ __ fsrmi(__ RoundingMode::rne);
+ __ bind(skip_fsrmi);
+
// install Java thread in global register now we have saved
// whatever value it held
__ mv(xthread, c_rarg7);
@@ -414,6 +427,14 @@ class StubGenerator: public StubCodeGenerator {
__ ld(x9, x9_save);
+ // restore frm
+ Label skip_fsrm;
+ __ ld(t0, frm_save);
+ __ frrm(t1);
+ __ beq(t0, t1, skip_fsrm);
+ __ fsrm(t0);
+ __ bind(skip_fsrm);
+
__ ld(c_rarg0, call_wrapper);
__ ld(c_rarg1, result);
__ ld(c_rarg2, result_type);
diff --git a/src/hotspot/cpu/riscv/upcallLinker_riscv.cpp b/src/hotspot/cpu/riscv/upcallLinker_riscv.cpp
index 6d605d716af..4acf4975d3d 100644
--- a/src/hotspot/cpu/riscv/upcallLinker_riscv.cpp
+++ b/src/hotspot/cpu/riscv/upcallLinker_riscv.cpp
@@ -263,9 +263,13 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
__ mov_metadata(xmethod, entry);
__ sd(xmethod, Address(xthread, JavaThread::callee_target_offset())); // just in case callee is deoptimized
+ __ push_cont_fastpath(xthread);
+
__ ld(t0, Address(xmethod, Method::from_compiled_offset()));
__ jalr(t0);
+ __ pop_cont_fastpath(xthread);
+
// return value shuffle
if (!needs_return_buffer) {
#ifdef ASSERT
diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.hpp b/src/hotspot/cpu/riscv/vm_version_riscv.hpp
index 93ebd9e4e7d..0de6e9a19e1 100644
--- a/src/hotspot/cpu/riscv/vm_version_riscv.hpp
+++ b/src/hotspot/cpu/riscv/vm_version_riscv.hpp
@@ -55,6 +55,10 @@ class VM_Version : public Abstract_VM_Version {
_enabled = true;
_value = value;
}
+ void disable_feature() {
+ _enabled = false;
+ _value = -1;
+ }
const char* const pretty() { return _pretty; }
const uint64_t feature_bit() { return _feature_bit; }
const bool feature_string() { return _feature_string; }
@@ -63,16 +67,21 @@ class VM_Version : public Abstract_VM_Version {
virtual void update_flag() = 0;
};
- #define UPDATE_DEFAULT(flag) \
- void update_flag() { \
- assert(enabled(), "Must be."); \
- if (FLAG_IS_DEFAULT(flag)) { \
- FLAG_SET_DEFAULT(flag, true); \
- } \
- } \
-
- #define NO_UPDATE_DEFAULT \
- void update_flag() {} \
+ #define UPDATE_DEFAULT(flag) \
+ void update_flag() { \
+ assert(enabled(), "Must be."); \
+ if (FLAG_IS_DEFAULT(flag)) { \
+ FLAG_SET_DEFAULT(flag, true); \
+ } else { \
+ /* Sync CPU features with flags */ \
+ if (!flag) { \
+ disable_feature(); \
+ } \
+ } \
+ } \
+
+ #define NO_UPDATE_DEFAULT \
+ void update_flag() {} \
// Frozen standard extensions
// I RV64I
diff --git a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
index 200f7ee978d..b7f1d360568 100644
--- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -428,6 +428,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
"must be aligned");
ce->emit_static_call_stub();
+ CHECK_BAILOUT();
// Prepend each BRASL with a nop.
__ relocate(relocInfo::static_call_type);
diff --git a/src/hotspot/cpu/s390/downcallLinker_s390.cpp b/src/hotspot/cpu/s390/downcallLinker_s390.cpp
index f831da90755..dc443666bae 100644
--- a/src/hotspot/cpu/s390/downcallLinker_s390.cpp
+++ b/src/hotspot/cpu/s390/downcallLinker_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -166,10 +166,10 @@ void DowncallStubGenerator::generate() {
locs.set(StubLocations::TARGET_ADDRESS, _abi._scratch2);
if (_captured_state_mask != 0) {
- __ block_comment("{ _captured_state_mask is set");
+ __ block_comment("_captured_state_mask_is_set {");
locs.set_frame_data(StubLocations::CAPTURED_STATE_BUFFER, allocated_frame_size);
allocated_frame_size += BytesPerWord;
- __ block_comment("} _captured_state_mask is set");
+ __ block_comment("} _captured_state_mask_is_set");
}
allocated_frame_size = align_up(allocated_frame_size, StackAlignmentInBytes);
@@ -184,7 +184,7 @@ void DowncallStubGenerator::generate() {
_frame_complete = __ pc() - start; // frame build complete.
if (_needs_transition) {
- __ block_comment("{ thread java2native");
+ __ block_comment("thread_java2native {");
__ get_PC(Z_R1_scratch);
address the_pc = __ pc();
__ set_last_Java_frame(Z_SP, Z_R1_scratch);
@@ -194,18 +194,18 @@ void DowncallStubGenerator::generate() {
// State transition
__ set_thread_state(_thread_in_native);
- __ block_comment("} thread java2native");
+ __ block_comment("} thread_java2native");
}
- __ block_comment("{ argument shuffle");
+ __ block_comment("argument_shuffle {");
arg_shuffle.generate(_masm, shuffle_reg, frame::z_jit_out_preserve_size, _abi._shadow_space_bytes, locs);
- __ block_comment("} argument shuffle");
+ __ block_comment("} argument_shuffle");
__ call(as_Register(locs.get(StubLocations::TARGET_ADDRESS)));
//////////////////////////////////////////////////////////////////////////////
if (_captured_state_mask != 0) {
- __ block_comment("{ save thread local");
+ __ block_comment("save_thread_local {");
out_reg_spiller.generate_spill(_masm, spill_offset);
@@ -216,7 +216,7 @@ void DowncallStubGenerator::generate() {
out_reg_spiller.generate_fill(_masm, spill_offset);
- __ block_comment("} save thread local");
+ __ block_comment("} save_thread_local");
}
//////////////////////////////////////////////////////////////////////////////
@@ -227,7 +227,7 @@ void DowncallStubGenerator::generate() {
Label L_after_reguard;
if (_needs_transition) {
- __ block_comment("{ thread native2java");
+ __ block_comment("thread_native2java {");
__ set_thread_state(_thread_in_native_trans);
if (!UseSystemMemoryBarrier) {
@@ -244,14 +244,16 @@ void DowncallStubGenerator::generate() {
// change thread state
__ set_thread_state(_thread_in_Java);
- __ block_comment("reguard stack check");
- __ z_cli(Address(Z_thread, JavaThread::stack_guard_state_offset() + in_ByteSize(sizeof(StackOverflow::StackGuardState) - 1)),
- StackOverflow::stack_guard_yellow_reserved_disabled);
+ __ block_comment("reguard_stack_check {");
+ __ z_cli(Address(Z_thread,
+ JavaThread::stack_guard_state_offset() + in_ByteSize(sizeof(StackOverflow::StackGuardState) - 1)),
+ StackOverflow::stack_guard_yellow_reserved_disabled);
__ z_bre(L_reguard);
+ __ block_comment("} reguard_stack_check");
__ bind(L_after_reguard);
__ reset_last_Java_frame();
- __ block_comment("} thread native2java");
+ __ block_comment("} thread_native2java");
}
__ pop_frame();
@@ -261,7 +263,7 @@ void DowncallStubGenerator::generate() {
//////////////////////////////////////////////////////////////////////////////
if (_needs_transition) {
- __ block_comment("{ L_safepoint_poll_slow_path");
+ __ block_comment("L_safepoint_poll_slow_path {");
__ bind(L_safepoint_poll_slow_path);
// Need to save the native result registers around any runtime calls.
@@ -277,7 +279,7 @@ void DowncallStubGenerator::generate() {
__ block_comment("} L_safepoint_poll_slow_path");
//////////////////////////////////////////////////////////////////////////////
- __ block_comment("{ L_reguard");
+ __ block_comment("L_reguard {");
__ bind(L_reguard);
// Need to save the native result registers around any runtime calls.
diff --git a/src/hotspot/cpu/s390/foreignGlobals_s390.cpp b/src/hotspot/cpu/s390/foreignGlobals_s390.cpp
index 9796ab4ffe4..67aee9af7d9 100644
--- a/src/hotspot/cpu/s390/foreignGlobals_s390.cpp
+++ b/src/hotspot/cpu/s390/foreignGlobals_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -166,7 +166,7 @@ static void move_stack(MacroAssembler* masm, Register tmp_reg, int in_stk_bias,
case StorageType::INTEGER:
switch (from_reg.stack_size()) {
case 8: __ mem2reg_opt(as_Register(to_reg), from_addr, true);break;
- case 4: __ mem2reg_opt(as_Register(to_reg), from_addr, false);break;
+ case 4: __ mem2reg_signed_opt(as_Register(to_reg), from_addr);break;
default: ShouldNotReachHere();
}
break;
diff --git a/src/hotspot/cpu/s390/globalDefinitions_s390.hpp b/src/hotspot/cpu/s390/globalDefinitions_s390.hpp
index 2232215a587..39baf5bf047 100644
--- a/src/hotspot/cpu/s390/globalDefinitions_s390.hpp
+++ b/src/hotspot/cpu/s390/globalDefinitions_s390.hpp
@@ -30,6 +30,10 @@
const int StackAlignmentInBytes = 8;
+// All faults on s390x give the address only on page granularity.
+// Set Pdsegfault_address to minimum one page address.
+const size_t pd_segfault_address = 4096;
+
#define SUPPORTS_NATIVE_CX8
#define CPU_MULTI_COPY_ATOMIC
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
index 8a56f3e4c2b..2dc3167dea6 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2023 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -3149,28 +3149,32 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
Register temp = temp2;
NearLabel done, object_has_monitor;
+ const int hdr_offset = oopDesc::mark_offset_in_bytes();
+
+ assert_different_registers(temp1, temp2, oop, box);
+
BLOCK_COMMENT("compiler_fast_lock_object {");
// Load markWord from oop into mark.
- z_lg(displacedHeader, 0, oop);
+ z_lg(displacedHeader, hdr_offset, oop);
if (DiagnoseSyncOnValueBasedClasses != 0) {
- load_klass(Z_R1_scratch, oop);
- z_l(Z_R1_scratch, Address(Z_R1_scratch, Klass::access_flags_offset()));
+ load_klass(temp, oop);
+ z_l(temp, Address(temp, Klass::access_flags_offset()));
assert((JVM_ACC_IS_VALUE_BASED_CLASS & 0xFFFF) == 0, "or change following instruction");
- z_nilh(Z_R1_scratch, JVM_ACC_IS_VALUE_BASED_CLASS >> 16);
+ z_nilh(temp, JVM_ACC_IS_VALUE_BASED_CLASS >> 16);
z_brne(done);
}
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
- z_lgr(temp, displacedHeader);
- z_nill(temp, markWord::monitor_value);
- z_brne(object_has_monitor);
+ z_tmll(displacedHeader, markWord::monitor_value);
+ z_brnaz(object_has_monitor);
if (LockingMode == LM_MONITOR) {
// Set NE to indicate 'failure' -> take slow-path
+ // From loading the markWord, we know that oop != nullptr
z_ltgr(oop, oop);
z_bru(done);
} else if (LockingMode == LM_LEGACY) {
@@ -3182,23 +3186,24 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
// Initialize the box (must happen before we update the object mark).
z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
- // Memory Fence (in cmpxchgd)
- // Compare object markWord with mark and if equal exchange scratch1 with object markWord.
-
- // If the compare-and-swap succeeded, then we found an unlocked object and we
- // have now locked it.
- z_csg(displacedHeader, box, 0, oop);
+ // Compare object markWord with mark and if equal, exchange box with object markWork.
+ // If the compare-and-swap succeeds, then we found an unlocked object and have now locked it.
+ z_csg(displacedHeader, box, hdr_offset, oop);
assert(currentHeader == displacedHeader, "must be same register"); // Identified two registers from z/Architecture.
z_bre(done);
- // We did not see an unlocked object so try the fast recursive case.
-
+ // We did not see an unlocked object
+ // currentHeader contains what is currently stored in the oop's markWord.
+ // We might have a recursive case. Verify by checking if the owner is self.
+ // To do so, compare the value in the markWord (currentHeader) with the stack pointer.
z_sgr(currentHeader, Z_SP);
load_const_optimized(temp, (~(os::vm_page_size() - 1) | markWord::lock_mask_in_place));
z_ngr(currentHeader, temp);
- // z_brne(done);
- // z_release();
+
+ // result zero: owner is self -> recursive lock. Indicate that by storing 0 in the box.
+ // result not-zero: attempt failed. We don't hold the lock -> go for slow case.
+
z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
z_bru(done);
@@ -3208,28 +3213,34 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
z_bru(done);
}
+ bind(object_has_monitor);
+
Register zero = temp;
Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
- bind(object_has_monitor);
// The object's monitor m is unlocked iff m->owner is null,
// otherwise m->owner may contain a thread or a stack address.
- //
+
// Try to CAS m->owner from null to current thread.
- z_lghi(zero, 0);
// If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
+ // Otherwise, register zero is filled with the current owner.
+ z_lghi(zero, 0);
z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
if (LockingMode != LM_LIGHTWEIGHT) {
// Store a non-null value into the box.
z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
}
-#ifdef ASSERT
- z_brne(done);
- // We've acquired the monitor, check some invariants.
- // Invariant 1: _recursions should be 0.
- asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged,
- "monitor->_recursions should be 0", -1);
- z_ltgr(zero, zero); // Set CR=EQ.
-#endif
+
+ z_bre(done); // acquired the lock for the first time.
+
+ BLOCK_COMMENT("fast_path_recursive_lock {");
+ // Check if we are already the owner (recursive lock)
+ z_cgr(Z_thread, zero); // owner is stored in zero by "z_csg" above
+ z_brne(done); // not a recursive lock
+
+ // Current thread already owns the lock. Just increment recursion count.
+ z_agsi(Address(monitor_tagged, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 1ll);
+ z_cgr(zero, zero); // set the CC to EQUAL
+ BLOCK_COMMENT("} fast_path_recursive_lock");
bind(done);
BLOCK_COMMENT("} compiler_fast_lock_object");
@@ -3242,11 +3253,12 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg
Register displacedHeader = temp1;
Register currentHeader = temp2;
Register temp = temp1;
- Register monitor = temp2;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
- Label done, object_has_monitor;
+ assert_different_registers(temp1, temp2, oop, box);
+
+ Label done, object_has_monitor, not_recursive;
BLOCK_COMMENT("compiler_fast_unlock_object {");
@@ -3261,30 +3273,25 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg
// The object has an existing monitor iff (mark & monitor_value) != 0.
z_lg(currentHeader, hdr_offset, oop);
guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
- if (LockingMode == LM_LIGHTWEIGHT) {
- z_lgr(temp, currentHeader);
- }
- z_nill(currentHeader, markWord::monitor_value);
- z_brne(object_has_monitor);
+
+ z_tmll(currentHeader, markWord::monitor_value);
+ z_brnaz(object_has_monitor);
if (LockingMode == LM_MONITOR) {
// Set NE to indicate 'failure' -> take slow-path
z_ltgr(oop, oop);
z_bru(done);
} else if (LockingMode == LM_LEGACY) {
- // Check if it is still a light weight lock, this is true if we see
+ // Check if it is still a lightweight lock, this is true if we see
// the stack address of the basicLock in the markWord of the object
// copy box to currentHeader such that csg does not kill it.
z_lgr(currentHeader, box);
- z_csg(currentHeader, displacedHeader, 0, oop);
+ z_csg(currentHeader, displacedHeader, hdr_offset, oop);
z_bru(done); // csg sets CR as desired.
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- // don't load currentHead again from stack-top after monitor check, as it is possible
- // some other thread modified it.
- // currentHeader is altered, but it's contents are copied in temp as well
- lightweight_unlock(oop, temp, currentHeader, done);
+ lightweight_unlock(oop, currentHeader, displacedHeader, done);
z_bru(done);
}
@@ -3293,11 +3300,22 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg
// Handle existing monitor.
bind(object_has_monitor);
- z_lg(currentHeader, hdr_offset, oop); // CurrentHeader is tagged with monitor_value set.
- load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
- z_brne(done);
- load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+
+ z_cg(Z_thread, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
z_brne(done);
+
+ BLOCK_COMMENT("fast_path_recursive_unlock {");
+ load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
+ z_bre(not_recursive); // if 0 then jump, it's not recursive locking
+
+ // Recursive inflated unlock
+ z_agsi(Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), -1ll);
+ z_cgr(currentHeader, currentHeader); // set the CC to EQUAL
+ BLOCK_COMMENT("} fast_path_recursive_unlock");
+ z_bru(done);
+
+ bind(not_recursive);
+
load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
z_brne(done);
load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad
index e1d3df97edf..32e5323b6b2 100644
--- a/src/hotspot/cpu/s390/s390.ad
+++ b/src/hotspot/cpu/s390/s390.ad
@@ -1,6 +1,6 @@
//
// Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
-// Copyright (c) 2017, 2022 SAP SE. All rights reserved.
+// Copyright (c) 2017, 2024 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -1447,6 +1447,7 @@ int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
address base = __ start_a_stub(size_exception_handler());
if (base == NULL) {
+ ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
@@ -1468,6 +1469,7 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
address base = __ start_a_stub(size_deopt_handler());
if (base == NULL) {
+ ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
index 05b607ec03c..d28399ef819 100644
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
@@ -755,7 +755,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
ShouldNotReachHere();
}
}
- return align_up(stk, 2);
+ return stk;
}
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
diff --git a/src/hotspot/cpu/s390/upcallLinker_s390.cpp b/src/hotspot/cpu/s390/upcallLinker_s390.cpp
index b748ec547cc..93f21ab10b6 100644
--- a/src/hotspot/cpu/s390/upcallLinker_s390.cpp
+++ b/src/hotspot/cpu/s390/upcallLinker_s390.cpp
@@ -63,7 +63,7 @@ static void preserve_callee_saved_registers(MacroAssembler* _masm, const ABIDesc
int offset = reg_save_area_offset;
- __ block_comment("{ preserve_callee_saved_regs ");
+ __ block_comment("preserve_callee_saved_regs {");
for (int i = 0; i < Register::number_of_registers; i++) {
Register reg = as_Register(i);
// Z_SP saved/restored by prologue/epilogue
@@ -82,7 +82,7 @@ static void preserve_callee_saved_registers(MacroAssembler* _masm, const ABIDesc
}
}
- __ block_comment("} preserve_callee_saved_regs ");
+ __ block_comment("} preserve_callee_saved_regs");
}
static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescriptor& abi, int reg_save_area_offset) {
@@ -92,7 +92,7 @@ static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescr
int offset = reg_save_area_offset;
- __ block_comment("{ restore_callee_saved_regs ");
+ __ block_comment("restore_callee_saved_regs {");
for (int i = 0; i < Register::number_of_registers; i++) {
Register reg = as_Register(i);
// Z_SP saved/restored by prologue/epilogue
@@ -111,7 +111,7 @@ static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescr
}
}
- __ block_comment("} restore_callee_saved_regs ");
+ __ block_comment("} restore_callee_saved_regs");
}
static const int upcall_stub_code_base_size = 1024; // depends on GC (resolve_jobject)
@@ -199,7 +199,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
// Java methods won't preserve them, so save them here:
preserve_callee_saved_registers(_masm, abi, reg_save_area_offset);
- __ block_comment("{ on_entry");
+ __ block_comment("on_entry {");
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, UpcallLinker::on_entry));
__ z_aghik(Z_ARG1, Z_SP, frame_data_offset);
__ call(call_target_address);
@@ -207,14 +207,14 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
__ block_comment("} on_entry");
arg_spiller.generate_fill(_masm, arg_save_area_offset);
- __ block_comment("{ argument shuffle");
+ __ block_comment("argument_shuffle {");
arg_shuffle.generate(_masm, shuffle_reg, abi._shadow_space_bytes, frame::z_jit_out_preserve_size, locs);
- __ block_comment("} argument shuffle");
+ __ block_comment("} argument_shuffle");
- __ block_comment("{ receiver ");
+ __ block_comment("receiver {");
__ load_const_optimized(Z_ARG1, (intptr_t)receiver);
__ resolve_jobject(Z_ARG1, Z_tmp_1, Z_tmp_2);
- __ block_comment("} receiver ");
+ __ block_comment("} receiver");
__ load_const_optimized(Z_method, (intptr_t)entry);
__ z_stg(Z_method, Address(Z_thread, in_bytes(JavaThread::callee_target_offset())));
@@ -250,7 +250,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
result_spiller.generate_spill(_masm, res_save_area_offset);
- __ block_comment("{ on_exit");
+ __ block_comment("on_exit {");
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, UpcallLinker::on_exit));
__ z_aghik(Z_ARG1, Z_SP, frame_data_offset);
__ call(call_target_address);
@@ -266,7 +266,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
//////////////////////////////////////////////////////////////////////////////
- __ block_comment("{ exception handler");
+ __ block_comment("exception_handler {");
intptr_t exception_handler_offset = __ pc() - start;
@@ -277,7 +277,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
__ call_c(call_target_address);
__ should_not_reach_here();
- __ block_comment("} exception handler");
+ __ block_comment("} exception_handler");
_masm->flush();
diff --git a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp
index bdf9ac440d0..7eff43471b5 100644
--- a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp
+++ b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp
@@ -26,6 +26,7 @@
#define CPU_X86_GLOBALDEFINITIONS_X86_HPP
const int StackAlignmentInBytes = 16;
+const size_t pd_segfault_address = 1024;
// Indicates whether the C calling conventions require that
// 32-bit integer argument values are extended to 64 bits.
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
index c391349cfa3..7a7aa8e9070 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
@@ -528,8 +528,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
}
}
- // return value can be odd number of VMRegImpl stack slots make multiple of 2
- return align_up(stack, 2);
+ return stack;
}
// Patch the callers callsite with entry to compiled code if it exists.
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
index 0712ba50c07..f5847a0dc6d 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
@@ -497,7 +497,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
uint int_args = 0;
uint fp_args = 0;
- uint stk_args = 0; // inc by 2 each time
+ uint stk_args = 0;
for (int i = 0; i < total_args_passed; i++) {
switch (sig_bt[i]) {
@@ -509,8 +509,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
if (int_args < Argument::n_int_register_parameters_j) {
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
} else {
+ stk_args = align_up(stk_args, 2);
regs[i].set1(VMRegImpl::stack2reg(stk_args));
- stk_args += 2;
+ stk_args += 1;
}
break;
case T_VOID:
@@ -527,6 +528,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
if (int_args < Argument::n_int_register_parameters_j) {
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
} else {
+ stk_args = align_up(stk_args, 2);
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
@@ -535,8 +537,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
if (fp_args < Argument::n_float_register_parameters_j) {
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
} else {
+ stk_args = align_up(stk_args, 2);
regs[i].set1(VMRegImpl::stack2reg(stk_args));
- stk_args += 2;
+ stk_args += 1;
}
break;
case T_DOUBLE:
@@ -544,6 +547,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
if (fp_args < Argument::n_float_register_parameters_j) {
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
} else {
+ stk_args = align_up(stk_args, 2);
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
@@ -554,7 +558,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
}
}
- return align_up(stk_args, 2);
+ return stk_args;
}
// Patch the callers callsite with entry to compiled code if it exists.
diff --git a/src/hotspot/cpu/x86/upcallLinker_x86_64.cpp b/src/hotspot/cpu/x86/upcallLinker_x86_64.cpp
index dfce6aef52d..b687d929364 100644
--- a/src/hotspot/cpu/x86/upcallLinker_x86_64.cpp
+++ b/src/hotspot/cpu/x86/upcallLinker_x86_64.cpp
@@ -296,8 +296,12 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
__ mov_metadata(rbx, entry);
__ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx); // just in case callee is deoptimized
+ __ push_cont_fastpath();
+
__ call(Address(rbx, Method::from_compiled_offset()));
+ __ pop_cont_fastpath();
+
// return value shuffle
if (!needs_return_buffer) {
#ifdef ASSERT
diff --git a/src/hotspot/cpu/zero/globalDefinitions_zero.hpp b/src/hotspot/cpu/zero/globalDefinitions_zero.hpp
index ca11d106c26..c0c74e0987a 100644
--- a/src/hotspot/cpu/zero/globalDefinitions_zero.hpp
+++ b/src/hotspot/cpu/zero/globalDefinitions_zero.hpp
@@ -43,5 +43,12 @@
// Indicates whether the C calling conventions require that
// 32-bit integer argument values are extended to 64 bits.
const bool CCallingConventionRequiresIntsAsLongs = false;
+#if defined(AIX)
+const size_t pd_segfault_address = -1;
+#elif defined(S390)
+const size_t pd_segfault_address = 4096;
+#else
+const size_t pd_segfault_address = 1024;
+#endif
#endif // CPU_ZERO_GLOBALDEFINITIONS_ZERO_HPP
diff --git a/src/hotspot/os/aix/attachListener_aix.cpp b/src/hotspot/os/aix/attachListener_aix.cpp
index 02bf4727194..616976e633f 100644
--- a/src/hotspot/os/aix/attachListener_aix.cpp
+++ b/src/hotspot/os/aix/attachListener_aix.cpp
@@ -477,14 +477,14 @@ AttachOperation* AttachListener::dequeue() {
void AttachListener::vm_start() {
char fn[UNIX_PATH_MAX];
- struct stat64 st;
+ struct stat st;
int ret;
int n = snprintf(fn, UNIX_PATH_MAX, "%s/.java_pid%d",
os::get_temp_directory(), os::current_process_id());
assert(n < (int)UNIX_PATH_MAX, "java_pid file name buffer overflow");
- RESTARTABLE(::stat64(fn, &st), ret);
+ RESTARTABLE(::stat(fn, &st), ret);
if (ret == 0) {
ret = ::unlink(fn);
if (ret == -1) {
@@ -504,8 +504,8 @@ int AttachListener::pd_init() {
bool AttachListener::check_socket_file() {
int ret;
- struct stat64 st;
- ret = stat64(AixAttachListener::path(), &st);
+ struct stat st;
+ ret = stat(AixAttachListener::path(), &st);
if (ret == -1) { // need to restart attach listener.
log_debug(attach)("Socket file %s does not exist - Restart Attach Listener",
AixAttachListener::path());
@@ -544,14 +544,14 @@ bool AttachListener::is_init_trigger() {
}
char fn[PATH_MAX + 1];
int ret;
- struct stat64 st;
+ struct stat st;
os::snprintf_checked(fn, sizeof(fn), ".attach_pid%d", os::current_process_id());
- RESTARTABLE(::stat64(fn, &st), ret);
+ RESTARTABLE(::stat(fn, &st), ret);
if (ret == -1) {
log_trace(attach)("Failed to find attach file: %s, trying alternate", fn);
snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
os::get_temp_directory(), os::current_process_id());
- RESTARTABLE(::stat64(fn, &st), ret);
+ RESTARTABLE(::stat(fn, &st), ret);
if (ret == -1) {
log_debug(attach)("Failed to find attach file: %s", fn);
}
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index 3321c32a687..ac9b4541db3 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -113,6 +113,10 @@
#include
#include
+#ifndef _LARGE_FILES
+#error Hotspot on AIX must be compiled with -D_LARGE_FILES
+#endif
+
// Missing prototypes for various system APIs.
extern "C"
int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
@@ -133,7 +137,7 @@ extern "C" int getargs(procsinfo*, int, char*, int);
#define ERROR_MP_VMGETINFO_FAILED 102
#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
-// excerpts from systemcfg.h that might be missing on older os levels
+// excerpts from sys/systemcfg.h that might be missing on older os levels
#ifndef PV_7
#define PV_7 0x200000 /* Power PC 7 */
#endif
@@ -152,7 +156,12 @@ extern "C" int getargs(procsinfo*, int, char*, int);
#ifndef PV_9_Compat
#define PV_9_Compat 0x408000 /* Power PC 9 */
#endif
-
+#ifndef PV_10
+ #define PV_10 0x500000 /* Power PC 10 */
+#endif
+#ifndef PV_10_Compat
+ #define PV_10_Compat 0x508000 /* Power PC 10 */
+#endif
static address resolve_function_descriptor_to_code_pointer(address p);
@@ -316,27 +325,14 @@ static char cpu_arch[] = "ppc64";
#error Add appropriate cpu_arch setting
#endif
-// Wrap the function "vmgetinfo" which is not available on older OS releases.
-static int checked_vmgetinfo(void *out, int command, int arg) {
- if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
- guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
- }
- return ::vmgetinfo(out, command, arg);
-}
-
// Given an address, returns the size of the page backing that address.
size_t os::Aix::query_pagesize(void* addr) {
-
- if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
- // AS/400 older than V6R1: no vmgetinfo here, default to 4K
- return 4*K;
- }
-
vm_page_info pi;
pi.addr = (uint64_t)addr;
- if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
+ if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
return pi.pagesize;
} else {
+ trcVerbose("vmgetinfo(VM_PAGE_INFO) failed (errno: %d)", errno);
assert(false, "vmgetinfo failed to retrieve page size");
return 4*K;
}
@@ -438,7 +434,7 @@ static void query_multipage_support() {
{
const int MAX_PAGE_SIZES = 4;
psize_t sizes[MAX_PAGE_SIZES];
- const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
+ const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
if (num_psizes == -1) {
trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
trcVerbose("disabling multipage support.");
@@ -594,17 +590,6 @@ void os::init_system_properties_values() {
#undef EXTENSIONS_DIR
}
-////////////////////////////////////////////////////////////////////////////////
-// breakpoint support
-
-void os::breakpoint() {
- BREAKPOINT;
-}
-
-extern "C" void breakpoint() {
- // use debugger to set breakpoint here
-}
-
// retrieve memory information.
// Returns false if something went wrong;
// content of pmi undefined in this case.
@@ -769,10 +754,8 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
// Make sure we run in 1:1 kernel-user-thread mode.
- if (os::Aix::on_aix()) {
- guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
- guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
- }
+ guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
+ guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
// Start in suspended state, and in os::thread_start, wake the thread up.
guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
@@ -1102,10 +1085,9 @@ bool os::dll_address_to_library_name(address addr, char* buf,
return true;
}
-void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
+static void* dll_load_library(const char *filename, char *ebuf, int ebuflen) {
log_info(os)("attempting shared library load of %s", filename);
-
if (ebuf && ebuflen > 0) {
ebuf[0] = '\0';
ebuf[ebuflen - 1] = '\0';
@@ -1151,6 +1133,26 @@ void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
}
return nullptr;
}
+// Load library named
+// If filename matches .so, and loading fails, repeat with .a.
+void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
+ void* result = nullptr;
+ char* const file_path = strdup(filename);
+ char* const pointer_to_dot = strrchr(file_path, '.');
+ const char old_extension[] = ".so";
+ const char new_extension[] = ".a";
+ STATIC_ASSERT(sizeof(old_extension) >= sizeof(new_extension));
+ // First try to load the existing file.
+ result = dll_load_library(filename, ebuf, ebuflen);
+ // If the load fails,we try to reload by changing the extension to .a for .so files only.
+ // Shared object in .so format dont have braces, hence they get removed for archives with members.
+ if (result == nullptr && pointer_to_dot != nullptr && strcmp(pointer_to_dot, old_extension) == 0) {
+ snprintf(pointer_to_dot, sizeof(old_extension), "%s", new_extension);
+ result = dll_load_library(file_path, ebuf, ebuflen);
+ }
+ FREE_C_HEAP_ARRAY(char, file_path);
+ return result;
+}
void os::print_dll_info(outputStream *st) {
st->print_cr("Dynamic libraries:");
@@ -1256,22 +1258,10 @@ void os::print_memory_info(outputStream* st) {
os::Aix::meminfo_t mi;
if (os::Aix::get_meminfo(&mi)) {
- if (os::Aix::on_aix()) {
- st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
- st->print_cr("physical free : " SIZE_FORMAT, mi.real_free);
- st->print_cr("swap total : " SIZE_FORMAT, mi.pgsp_total);
- st->print_cr("swap free : " SIZE_FORMAT, mi.pgsp_free);
- } else {
- // PASE - Numbers are result of QWCRSSTS; they mean:
- // real_total: Sum of all system pools
- // real_free: always 0
- // pgsp_total: we take the size of the system ASP
- // pgsp_free: size of system ASP times percentage of system ASP unused
- st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
- st->print_cr("system asp total : " SIZE_FORMAT, mi.pgsp_total);
- st->print_cr("%% system asp used : %.2f",
- mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
- }
+ st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
+ st->print_cr("physical free : " SIZE_FORMAT, mi.real_free);
+ st->print_cr("swap total : " SIZE_FORMAT, mi.pgsp_total);
+ st->print_cr("swap free : " SIZE_FORMAT, mi.pgsp_free);
}
st->cr();
@@ -1294,6 +1284,9 @@ void os::print_memory_info(outputStream* st) {
void os::get_summary_cpu_info(char* buf, size_t buflen) {
// read _system_configuration.version
switch (_system_configuration.version) {
+ case PV_10:
+ strncpy(buf, "Power PC 10", buflen);
+ break;
case PV_9:
strncpy(buf, "Power PC 9", buflen);
break;
@@ -1333,6 +1326,9 @@ void os::get_summary_cpu_info(char* buf, size_t buflen) {
case PV_9_Compat:
strncpy(buf, "PV_9_Compat", buflen);
break;
+ case PV_10_Compat:
+ strncpy(buf, "PV_10_Compat", buflen);
+ break;
default:
strncpy(buf, "unknown", buflen);
}
@@ -2001,7 +1997,7 @@ static bool checked_mprotect(char* addr, size_t size, int prot) {
//
// See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
- Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
+ Events::log_memprotect(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
if (!rc) {
@@ -2151,15 +2147,6 @@ char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool
return addr;
}
-// Used to convert frequent JVM_Yield() to nops
-bool os::dont_yield() {
- return DontYieldALot;
-}
-
-void os::naked_yield() {
- sched_yield();
-}
-
////////////////////////////////////////////////////////////////////////////////
// thread priority support
@@ -2346,9 +2333,7 @@ void os::init(void) {
}
// Reset the perfstat information provided by ODM.
- if (os::Aix::on_aix()) {
- libperfstat::perfstat_reset();
- }
+ libperfstat::perfstat_reset();
// Now initialize basic system properties. Note that for some of the values we
// need libperfstat etc.
@@ -2495,10 +2480,10 @@ int os::open(const char *path, int oflag, int mode) {
// IV90804: OPENING A FILE IN AFS WITH O_CLOEXEC FAILS WITH AN EINVAL ERROR APPLIES TO AIX 7100-04 17/04/14 PTF PECHANGE
int oflag_with_o_cloexec = oflag | O_CLOEXEC;
- int fd = ::open64(path, oflag_with_o_cloexec, mode);
+ int fd = ::open(path, oflag_with_o_cloexec, mode);
if (fd == -1) {
// we might fail in the open call when O_CLOEXEC is set, so try again without (see IV90804)
- fd = ::open64(path, oflag, mode);
+ fd = ::open(path, oflag, mode);
if (fd == -1) {
return -1;
}
@@ -2506,8 +2491,8 @@ int os::open(const char *path, int oflag, int mode) {
// If the open succeeded, the file might still be a directory.
{
- struct stat64 buf64;
- int ret = ::fstat64(fd, &buf64);
+ struct stat buf64;
+ int ret = ::fstat(fd, &buf64);
int st_mode = buf64.st_mode;
if (ret != -1) {
@@ -2561,17 +2546,17 @@ int os::open(const char *path, int oflag, int mode) {
int os::create_binary_file(const char* path, bool rewrite_existing) {
int oflags = O_WRONLY | O_CREAT;
oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
- return ::open64(path, oflags, S_IREAD | S_IWRITE);
+ return ::open(path, oflags, S_IREAD | S_IWRITE);
}
// return current position of file pointer
jlong os::current_file_offset(int fd) {
- return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
+ return (jlong)::lseek(fd, (off_t)0, SEEK_CUR);
}
// move file pointer to the specified offset
jlong os::seek_to_file_offset(int fd, jlong offset) {
- return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
+ return (jlong)::lseek(fd, (off_t)offset, SEEK_SET);
}
// Map a block of memory.
@@ -2919,9 +2904,7 @@ void os::Aix::initialize_libo4() {
}
}
-// AIX: initialize the libperfstat library.
void os::Aix::initialize_libperfstat() {
- assert(os::Aix::on_aix(), "AIX only");
if (!libperfstat::init()) {
trcVerbose("libperfstat initialization failed.");
assert(false, "libperfstat initialization failed");
diff --git a/src/hotspot/os/aix/os_aix.hpp b/src/hotspot/os/aix/os_aix.hpp
index a1db2b2be3c..22fb5327bf9 100644
--- a/src/hotspot/os/aix/os_aix.hpp
+++ b/src/hotspot/os/aix/os_aix.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013, 2023 SAP SE. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -101,13 +101,6 @@ class os::Aix {
return _on_pase ? true : false;
}
- // Function returns true if we run on AIX, false if we run on OS/400
- // (pase).
- static bool on_aix() {
- assert(_on_pase != -1, "not initialized");
- return _on_pase ? false : true;
- }
-
// Get 4 byte AIX kernel version number:
// highest 2 bytes: Version, Release
// if available: lowest 2 bytes: Tech Level, Service Pack.
@@ -130,11 +123,6 @@ class os::Aix {
return on_pase() && os_version_short() <= 0x0504;
}
- // Convenience method: returns true if running on AIX 5.3 or older.
- static bool on_aix_53_or_older() {
- return on_aix() && os_version_short() <= 0x0503;
- }
-
// Returns true if we run in SPEC1170 compliant mode (XPG_SUS_ENV=ON).
static bool xpg_sus_mode() {
assert(_xpg_sus_mode != -1, "not initialized");
diff --git a/src/hotspot/os/aix/porting_aix.cpp b/src/hotspot/os/aix/porting_aix.cpp
index 68233097b49..630bdf22c44 100644
--- a/src/hotspot/os/aix/porting_aix.cpp
+++ b/src/hotspot/os/aix/porting_aix.cpp
@@ -906,10 +906,11 @@ struct TableLocker {
~TableLocker() { pthread_mutex_unlock(&g_handletable_mutex); }
};
struct handletableentry{
- void* handle;
- ino64_t inode;
- dev64_t devid;
- uint refcount;
+ void* handle;
+ ino64_t inode;
+ dev64_t devid;
+ char* member;
+ uint refcount;
};
constexpr unsigned init_num_handles = 128;
static unsigned max_handletable = 0;
@@ -1049,6 +1050,14 @@ void* Aix_dlopen(const char* filename, int Flags, const char** error_report) {
return nullptr;
}
else {
+ // extract member string if exist duplicate it and store pointer of it
+ // if member does not exist store nullptr
+ char* member = nullptr;
+ const char* substr;
+ if (filename[strlen(filename) - 1] == ')' && (substr = strrchr(filename, '('))) {
+ member = os::strdup(substr);
+ }
+
unsigned i = 0;
TableLocker lock;
// check if library belonging to filename is already loaded.
@@ -1056,7 +1065,10 @@ void* Aix_dlopen(const char* filename, int Flags, const char** error_report) {
for (i = 0; i < g_handletable_used; i++) {
if ((p_handletable + i)->handle &&
(p_handletable + i)->inode == libstat.st_ino &&
- (p_handletable + i)->devid == libstat.st_dev) {
+ (p_handletable + i)->devid == libstat.st_dev &&
+ (((p_handletable + i)->member == nullptr && member == nullptr) ||
+ ((p_handletable + i)->member != nullptr && member != nullptr &&
+ strcmp((p_handletable + i)->member, member) == 0))) {
(p_handletable + i)->refcount++;
result = (p_handletable + i)->handle;
break;
@@ -1084,6 +1096,7 @@ void* Aix_dlopen(const char* filename, int Flags, const char** error_report) {
(p_handletable + i)->handle = result;
(p_handletable + i)->inode = libstat.st_ino;
(p_handletable + i)->devid = libstat.st_dev;
+ (p_handletable + i)->member = member;
(p_handletable + i)->refcount = 1;
}
else {
@@ -1131,7 +1144,7 @@ bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) {
// while in the second case we simply have to nag.
res = (0 == ::dlclose(libhandle));
if (!res) {
- // error analysis when dlopen fails
+ // error analysis when dlclose fails
const char* error_report = ::dlerror();
if (error_report == nullptr) {
error_report = "dlerror returned no error description";
@@ -1145,7 +1158,11 @@ bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) {
if (i < g_handletable_used) {
if (res) {
// First case: libhandle was found (with refcount == 0) and ::dlclose successful,
- // so delete entry from array
+ // so delete entry from array (do not forget to free member-string space if member exists)
+ if ((p_handletable + i)->member) {
+ os::free((p_handletable + i)->member);
+ (p_handletable + i)->member = nullptr;
+ }
g_handletable_used--;
// If the entry was the last one of the array, the previous g_handletable_used--
// is sufficient to remove the entry from the array, otherwise we move the last
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 4a2922cb728..0a786ddf35c 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -506,17 +506,6 @@ void os::init_system_properties_values() {
#undef EXTENSIONS_DIR
}
-////////////////////////////////////////////////////////////////////////////////
-// breakpoint support
-
-void os::breakpoint() {
- BREAKPOINT;
-}
-
-extern "C" void breakpoint() {
- // use debugger to set breakpoint here
-}
-
//////////////////////////////////////////////////////////////////////////////
// create new thread
@@ -1522,7 +1511,7 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
#if defined(__OpenBSD__)
// XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
- Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
+ Events::log_memprotect(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
if (::mprotect(addr, size, prot) == 0) {
return true;
}
@@ -1624,7 +1613,7 @@ char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info
bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
#if defined(__OpenBSD__)
// XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
- Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with PROT_NONE", p2i(addr), p2i(addr+size));
+ Events::log_memprotect(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with PROT_NONE", p2i(addr), p2i(addr+size));
return ::mprotect(addr, size, PROT_NONE) == 0;
#elif defined(__APPLE__)
if (exec) {
@@ -1694,7 +1683,7 @@ static bool bsd_mprotect(char* addr, size_t size, int prot) {
assert(addr == bottom, "sanity check");
size = align_up(pointer_delta(addr, bottom, 1) + size, os::vm_page_size());
- Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
+ Events::log_memprotect(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
return ::mprotect(bottom, size, prot) == 0;
}
@@ -1795,15 +1784,6 @@ char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool
return nullptr;
}
-// Used to convert frequent JVM_Yield() to nops
-bool os::dont_yield() {
- return DontYieldALot;
-}
-
-void os::naked_yield() {
- sched_yield();
-}
-
////////////////////////////////////////////////////////////////////////////////
// thread priority support
diff --git a/src/hotspot/os/bsd/os_perf_bsd.cpp b/src/hotspot/os/bsd/os_perf_bsd.cpp
index f91dfa87f07..631d2135b64 100644
--- a/src/hotspot/os/bsd/os_perf_bsd.cpp
+++ b/src/hotspot/os/bsd/os_perf_bsd.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,7 @@
#include
#include
#include
+ #include
#endif
static const double NANOS_PER_SEC = 1000000000.0;
@@ -47,10 +48,10 @@ class CPUPerformanceInterface::CPUPerformance : public CHeapObj {
friend class CPUPerformanceInterface;
private:
#ifdef __APPLE__
- uint64_t _total_cpu_nanos;
+ uint64_t _jvm_real;
uint64_t _total_csr_nanos;
- uint64_t _jvm_user_nanos;
- uint64_t _jvm_system_nanos;
+ uint64_t _jvm_user;
+ uint64_t _jvm_system;
long _jvm_context_switches;
long _used_ticks;
long _total_ticks;
@@ -86,11 +87,11 @@ class CPUPerformanceInterface::CPUPerformance : public CHeapObj {
CPUPerformanceInterface::CPUPerformance::CPUPerformance() {
#ifdef __APPLE__
- _total_cpu_nanos= 0;
+ _jvm_real = 0;
_total_csr_nanos= 0;
_jvm_context_switches = 0;
- _jvm_user_nanos = 0;
- _jvm_system_nanos = 0;
+ _jvm_user = 0;
+ _jvm_system = 0;
_used_ticks = 0;
_total_ticks = 0;
_active_processor_count = 0;
@@ -152,42 +153,35 @@ int CPUPerformanceInterface::CPUPerformance::cpu_load_total_process(double* cpu_
int CPUPerformanceInterface::CPUPerformance::cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad) {
#ifdef __APPLE__
int result = cpu_load_total_process(psystemTotalLoad);
- mach_port_t task = mach_task_self();
- mach_msg_type_number_t task_info_count = TASK_INFO_MAX;
- task_info_data_t task_info_data;
- kern_return_t kr = task_info(task, TASK_ABSOLUTETIME_INFO, (task_info_t)task_info_data, &task_info_count);
- if (kr != KERN_SUCCESS) {
+
+ struct tms buf;
+ clock_t jvm_real = times(&buf);
+ if (jvm_real == (clock_t) (-1)) {
return OS_ERR;
}
- task_absolutetime_info_t absolutetime_info = (task_absolutetime_info_t)task_info_data;
int active_processor_count = os::active_processor_count();
- uint64_t jvm_user_nanos = absolutetime_info->total_user;
- uint64_t jvm_system_nanos = absolutetime_info->total_system;
-
- uint64_t total_cpu_nanos;
- if(!now_in_nanos(&total_cpu_nanos)) {
- return OS_ERR;
- }
+ uint64_t jvm_user = buf.tms_utime;
+ uint64_t jvm_system = buf.tms_stime;
- if (_total_cpu_nanos == 0 || active_processor_count != _active_processor_count) {
- // First call or change in active processor count
+ if (active_processor_count != _active_processor_count) {
+ // Change in active processor count
result = OS_ERR;
} else {
- uint64_t delta_nanos = active_processor_count * (total_cpu_nanos - _total_cpu_nanos);
- if (delta_nanos == 0) {
+ uint64_t delta = active_processor_count * (jvm_real - _jvm_real);
+ if (delta == 0) {
// Avoid division by zero
return OS_ERR;
}
- *pjvmUserLoad = normalize((double)(jvm_user_nanos - _jvm_user_nanos)/delta_nanos);
- *pjvmKernelLoad = normalize((double)(jvm_system_nanos - _jvm_system_nanos)/delta_nanos);
+ *pjvmUserLoad = normalize((double)(jvm_user - _jvm_user) / delta);
+ *pjvmKernelLoad = normalize((double)(jvm_system - _jvm_system) / delta);
}
_active_processor_count = active_processor_count;
- _total_cpu_nanos = total_cpu_nanos;
- _jvm_user_nanos = jvm_user_nanos;
- _jvm_system_nanos = jvm_system_nanos;
+ _jvm_real = jvm_real;
+ _jvm_user = jvm_user;
+ _jvm_system = jvm_system;
return result;
#else
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index f71a20207c5..b0504aff9c2 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -89,6 +89,8 @@
#endif
// put OS-includes here
+# include
+# include
# include
# include
# include
@@ -315,6 +317,22 @@ static void next_line(FILE *f) {
} while (c != '\n' && c != EOF);
}
+void os::Linux::kernel_version(long* major, long* minor) {
+ *major = -1;
+ *minor = -1;
+
+ struct utsname buffer;
+ int ret = uname(&buffer);
+ if (ret != 0) {
+ log_warning(os)("uname(2) failed to get kernel version: %s", os::errno_name(ret));
+ return;
+ }
+ int nr_matched = sscanf(buffer.release, "%ld.%ld", major, minor);
+ if (nr_matched != 2) {
+ log_warning(os)("Parsing kernel version failed, expected 2 version numbers, only matched %d", nr_matched);
+ }
+}
+
bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu) {
FILE* fh;
uint64_t userTicks, niceTicks, systemTicks, idleTicks;
@@ -565,17 +583,6 @@ void os::init_system_properties_values() {
#undef EXTENSIONS_DIR
}
-////////////////////////////////////////////////////////////////////////////////
-// breakpoint support
-
-void os::breakpoint() {
- BREAKPOINT;
-}
-
-extern "C" void breakpoint() {
- // use debugger to set breakpoint here
-}
-
//////////////////////////////////////////////////////////////////////////////
// detecting pthread library
@@ -2151,6 +2158,8 @@ void os::Linux::print_proc_sys_info(outputStream* st) {
"/proc/sys/kernel/threads-max", st);
_print_ascii_file_h("/proc/sys/vm/max_map_count (maximum number of memory map areas a process may have)",
"/proc/sys/vm/max_map_count", st);
+ _print_ascii_file_h("/proc/sys/vm/swappiness (control to define how aggressively the kernel swaps out anonymous memory)",
+ "/proc/sys/vm/swappiness", st);
_print_ascii_file_h("/proc/sys/kernel/pid_max (system-wide limit on number of process identifiers)",
"/proc/sys/kernel/pid_max", st);
}
@@ -3554,7 +3563,7 @@ static bool linux_mprotect(char* addr, size_t size, int prot) {
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
if (addr != g_assert_poison)
#endif
- Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
+ Events::log_memprotect(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
return ::mprotect(bottom, size, prot) == 0;
}
@@ -4245,25 +4254,6 @@ char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool
return nullptr;
}
-// Used to convert frequent JVM_Yield() to nops
-bool os::dont_yield() {
- return DontYieldALot;
-}
-
-// Linux CFS scheduler (since 2.6.23) does not guarantee sched_yield(2) will
-// actually give up the CPU. Since skip buddy (v2.6.28):
-//
-// * Sets the yielding task as skip buddy for current CPU's run queue.
-// * Picks next from run queue, if empty, picks a skip buddy (can be the yielding task).
-// * Clears skip buddies for this run queue (yielding task no longer a skip buddy).
-//
-// An alternative is calling os::naked_short_nanosleep with a small number to avoid
-// getting re-scheduled immediately.
-//
-void os::naked_yield() {
- sched_yield();
-}
-
////////////////////////////////////////////////////////////////////////////////
// thread priority support
diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp
index 4baf381b2f2..78bf68a72e9 100644
--- a/src/hotspot/os/linux/os_linux.hpp
+++ b/src/hotspot/os/linux/os_linux.hpp
@@ -115,6 +115,8 @@ class os::Linux {
bool has_steal_ticks;
};
+ static void kernel_version(long* major, long* minor);
+
// which_logical_cpu=-1 returns accumulated ticks for all cpus.
static bool get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu);
static bool _stack_is_executable;
diff --git a/src/hotspot/os/linux/systemMemoryBarrier_linux.cpp b/src/hotspot/os/linux/systemMemoryBarrier_linux.cpp
index d67deeda164..96269fef53d 100644
--- a/src/hotspot/os/linux/systemMemoryBarrier_linux.cpp
+++ b/src/hotspot/os/linux/systemMemoryBarrier_linux.cpp
@@ -30,7 +30,7 @@
#include "precompiled.hpp"
#include "logging/log.hpp"
-#include "runtime/os.hpp"
+#include "os_linux.hpp"
#include "utilities/debug.hpp"
#include "utilities/systemMemoryBarrier.hpp"
@@ -47,6 +47,8 @@
#define SYS_membarrier 365
#elif defined(AARCH64)
#define SYS_membarrier 283
+ #elif defined(ARM32)
+ #define SYS_membarrier 389
#elif defined(ALPHA)
#define SYS_membarrier 517
#elif defined(LOONGARCH)
@@ -78,6 +80,18 @@ static int membarrier(int cmd, unsigned int flags, int cpu_id) {
}
bool LinuxSystemMemoryBarrier::initialize() {
+#if defined(RISCV)
+// RISCV port was introduced in kernel 4.4.
+// 4.4 also made membar private expedited mandatory.
+// But RISCV actually don't support it until 6.9.
+ long major, minor;
+ os::Linux::kernel_version(&major, &minor);
+ if (!(major > 6 || (major == 6 && minor >= 9))) {
+ log_info(os)("Linux kernel %ld.%ld does not support MEMBARRIER PRIVATE_EXPEDITED on RISC-V.",
+ major, minor);
+ return false;
+ }
+#endif
int ret = membarrier(MEMBARRIER_CMD_QUERY, 0, 0);
if (ret < 0) {
log_info(os)("MEMBARRIER_CMD_QUERY unsupported");
diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp
index af7de184b14..7f95560a19e 100644
--- a/src/hotspot/os/posix/os_posix.cpp
+++ b/src/hotspot/os/posix/os_posix.cpp
@@ -194,6 +194,17 @@ size_t os::lasterror(char *buf, size_t len) {
return n;
}
+////////////////////////////////////////////////////////////////////////////////
+// breakpoint support
+
+void os::breakpoint() {
+ BREAKPOINT;
+}
+
+extern "C" void breakpoint() {
+ // use debugger to set breakpoint here
+}
+
// Return true if user is running as root.
bool os::have_special_privileges() {
static bool privileges = (getuid() != geteuid()) || (getgid() != getegid());
@@ -753,11 +764,11 @@ void os::dll_unload(void *lib) {
}
jlong os::lseek(int fd, jlong offset, int whence) {
- return (jlong) AIX_ONLY(::lseek64) NOT_AIX(::lseek)(fd, offset, whence);
+ return (jlong) ::lseek(fd, offset, whence);
}
int os::ftruncate(int fd, jlong length) {
- return AIX_ONLY(::ftruncate64) NOT_AIX(::ftruncate)(fd, length);
+ return ::ftruncate(fd, length);
}
const char* os::get_current_directory(char *buf, size_t buflen) {
@@ -829,6 +840,14 @@ void os::_exit(int num) {
ALLOW_C_FUNCTION(::_exit, ::_exit(num);)
}
+bool os::dont_yield() {
+ return DontYieldALot;
+}
+
+void os::naked_yield() {
+ sched_yield();
+}
+
// Builds a platform dependent Agent_OnLoad_ function name
// which is used to find statically linked in agents.
// Parameters:
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index a6aa82012e8..3359bd0c63e 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -280,10 +280,12 @@ void os::run_periodic_checks(outputStream* st) {
return;
}
+#ifndef _WIN64
// previous UnhandledExceptionFilter, if there is one
static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = nullptr;
+#endif
-LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
+static LONG WINAPI Uncaught_Exception_Handler(struct _EXCEPTION_POINTERS* exceptionInfo);
void os::init_system_properties_values() {
// sysclasspath, java_home, dll_dir
@@ -397,7 +399,7 @@ void os::init_system_properties_values() {
#ifndef _WIN64
// set our UnhandledExceptionFilter and save any previous one
- prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
+ prev_uef_handler = SetUnhandledExceptionFilter(Uncaught_Exception_Handler);
#endif
// Done
@@ -2491,9 +2493,7 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
#if defined(_M_AMD64) || defined(_M_IX86)
//-----------------------------------------------------------------------------
-LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
- PCONTEXT ctx = exceptionInfo->ContextRecord;
-#ifndef _WIN64
+static bool handle_FLT_exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
// handle exception caused by native method modifying control word
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
@@ -2504,34 +2504,48 @@ LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
case EXCEPTION_FLT_INVALID_OPERATION:
case EXCEPTION_FLT_OVERFLOW:
case EXCEPTION_FLT_STACK_CHECK:
- case EXCEPTION_FLT_UNDERFLOW:
+ case EXCEPTION_FLT_UNDERFLOW: {
+ PCONTEXT ctx = exceptionInfo->ContextRecord;
+#ifndef _WIN64
jint fp_control_word = (* (jint*) StubRoutines::x86::addr_fpu_cntrl_wrd_std());
if (fp_control_word != ctx->FloatSave.ControlWord) {
// Restore FPCW and mask out FLT exceptions
ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
// Mask out pending FLT exceptions
ctx->FloatSave.StatusWord &= 0xffffff00;
- return EXCEPTION_CONTINUE_EXECUTION;
+ return true;
}
+#else // !_WIN64
+ // On Windows, the mxcsr control bits are non-volatile across calls
+ // See also CR 6192333
+ //
+ jint MxCsr = INITIAL_MXCSR;
+ // we can't use StubRoutines::x86::addr_mxcsr_std()
+ // because in Win64 mxcsr is not saved there
+ if (MxCsr != ctx->MxCsr) {
+ ctx->MxCsr = MxCsr;
+ return true;
+ }
+#endif // !_WIN64
+ }
}
+ return false;
+}
+#endif
+
+#ifndef _WIN64
+static LONG WINAPI Uncaught_Exception_Handler(struct _EXCEPTION_POINTERS* exceptionInfo) {
+ if (handle_FLT_exception(exceptionInfo)) {
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+
+ // we only override this on 32 bits, so only check it there
if (prev_uef_handler != nullptr) {
// We didn't handle this exception so pass it to the previous
// UnhandledExceptionFilter.
return (prev_uef_handler)(exceptionInfo);
}
-#else // !_WIN64
- // On Windows, the mxcsr control bits are non-volatile across calls
- // See also CR 6192333
- //
- jint MxCsr = INITIAL_MXCSR;
- // we can't use StubRoutines::x86::addr_mxcsr_std()
- // because in Win64 mxcsr is not saved there
- if (MxCsr != ctx->MxCsr) {
- ctx->MxCsr = MxCsr;
- return EXCEPTION_CONTINUE_EXECUTION;
- }
-#endif // !_WIN64
return EXCEPTION_CONTINUE_SEARCH;
}
@@ -2788,9 +2802,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
}
#if defined(_M_AMD64) || defined(_M_IX86)
- if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
- LONG result=Handle_FLT_Exception(exceptionInfo);
- if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
+ if ((in_java || in_native) && handle_FLT_exception(exceptionInfo)) {
+ return EXCEPTION_CONTINUE_EXECUTION;
}
#endif
diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
index 2e603ac0690..30d7833ab96 100644
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2021 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -255,7 +255,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
stub = SharedRuntime::get_handle_wrong_method_stub();
}
- else if ((sig == USE_POLL_BIT_ONLY ? SIGTRAP : SIGSEGV) &&
+ else if ((sig == (USE_POLL_BIT_ONLY ? SIGTRAP : SIGSEGV)) &&
// A linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults
// in 64bit mode (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6),
// especially when we try to read from the safepoint polling page. So the check
diff --git a/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp
index fdb8b340ab9..b3e35d6cc10 100644
--- a/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp
+++ b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp
@@ -35,7 +35,7 @@
#if defined(__clang_major__)
#define FULL_COMPILER_ATOMIC_SUPPORT
-#elif (__GNUC__ > 13) || ((__GNUC__ == 13) && (__GNUC_MINOR__ >= 2))
+#elif (__GNUC__ > 13) || ((__GNUC__ == 13) && (__GNUC_MINOR__ > 2))
#define FULL_COMPILER_ATOMIC_SUPPORT
#endif
@@ -114,6 +114,44 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest __attribute__((
}
#endif
+#ifndef FULL_COMPILER_ATOMIC_SUPPORT
+// The implementation of `__atomic_compare_exchange` lacks sign extensions
+// in GCC 13.2 and lower when using with 32-bit unsigned integers on RV64,
+// so we should implement it manually.
+// GCC bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=114130.
+// See also JDK-8326936.
+template<>
+template
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__((unused)),
+ T compare_value,
+ T exchange_value,
+ atomic_memory_order order) const {
+ STATIC_ASSERT(4 == sizeof(T));
+
+ int32_t old_value;
+ uint64_t rc_temp;
+
+ if (order != memory_order_relaxed) {
+ FULL_MEM_BARRIER;
+ }
+
+ __asm__ __volatile__ (
+ "1: lr.w %0, %2 \n\t"
+ " bne %0, %3, 2f \n\t"
+ " sc.w %1, %4, %2 \n\t"
+ " bnez %1, 1b \n\t"
+ "2: \n\t"
+ : /*%0*/"=&r" (old_value), /*%1*/"=&r" (rc_temp), /*%2*/"+A" (*dest)
+ : /*%3*/"r" ((int64_t)(int32_t)compare_value), /*%4*/"r" (exchange_value)
+ : "memory" );
+
+ if (order != memory_order_relaxed) {
+ FULL_MEM_BARRIER;
+ }
+ return (T)old_value;
+}
+#endif
+
template
template
inline T Atomic::PlatformXchg::operator()(T volatile* dest,
@@ -148,54 +186,21 @@ inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest __attri
atomic_memory_order order) const {
#ifndef FULL_COMPILER_ATOMIC_SUPPORT
- STATIC_ASSERT(byte_size >= 4);
+ STATIC_ASSERT(byte_size > 4);
#endif
STATIC_ASSERT(byte_size == sizeof(T));
- T value = compare_value;
if (order != memory_order_relaxed) {
FULL_MEM_BARRIER;
}
- __atomic_compare_exchange(dest, &value, &exchange_value, /* weak */ false,
+ __atomic_compare_exchange(dest, &compare_value, &exchange_value, /* weak */ false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
if (order != memory_order_relaxed) {
FULL_MEM_BARRIER;
}
- return value;
-}
-
-template<>
-template
-inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__((unused)),
- T compare_value,
- T exchange_value,
- atomic_memory_order order) const {
- STATIC_ASSERT(4 == sizeof(T));
-
- T old_value;
- long rc;
-
- if (order != memory_order_relaxed) {
- FULL_MEM_BARRIER;
- }
-
- __asm__ __volatile__ (
- "1: sext.w %1, %3 \n\t" // sign-extend compare_value
- " lr.w %0, %2 \n\t"
- " bne %0, %1, 2f \n\t"
- " sc.w %1, %4, %2 \n\t"
- " bnez %1, 1b \n\t"
- "2: \n\t"
- : /*%0*/"=&r" (old_value), /*%1*/"=&r" (rc), /*%2*/"+A" (*dest)
- : /*%3*/"r" (compare_value), /*%4*/"r" (exchange_value)
- : "memory" );
-
- if (order != memory_order_relaxed) {
- FULL_MEM_BARRIER;
- }
- return old_value;
+ return compare_value;
}
template
diff --git a/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp b/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp
index f890bfbdc02..7b4381cb590 100644
--- a/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp
+++ b/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp
@@ -108,13 +108,22 @@ void VM_Version::setup_cpu_available_features() {
char buf[1024] = {};
if (uarch != nullptr && strcmp(uarch, "") != 0) {
// Use at max half the buffer.
- snprintf(buf, sizeof(buf)/2, "%s,", uarch);
+ snprintf(buf, sizeof(buf)/2, "%s ", uarch);
}
os::free((void*) uarch);
strcat(buf, "rv64");
int i = 0;
while (_feature_list[i] != nullptr) {
if (_feature_list[i]->enabled()) {
+ // Change flag default
+ _feature_list[i]->update_flag();
+
+ // Feature will be disabled by update_flag() if flag
+ // is set to false by the user on the command line.
+ if (!_feature_list[i]->enabled()) {
+ continue;
+ }
+
log_debug(os, cpu)("Enabled RV64 feature \"%s\" (%ld)",
_feature_list[i]->pretty(),
_feature_list[i]->value());
@@ -122,13 +131,14 @@ void VM_Version::setup_cpu_available_features() {
if (_feature_list[i]->feature_string()) {
const char* tmp = _feature_list[i]->pretty();
if (strlen(tmp) == 1) {
+ strcat(buf, " ");
strcat(buf, tmp);
} else {
// Feature string is expected to be lower case.
// Turn Zxxx into zxxx
char prebuf[3] = {};
assert(strlen(tmp) > 1, "Must be");
- prebuf[0] = '_';
+ prebuf[0] = ' ';
prebuf[1] = (char)tolower(tmp[0]);
strcat(buf, prebuf);
strcat(buf, &tmp[1]);
@@ -138,8 +148,6 @@ void VM_Version::setup_cpu_available_features() {
if (_feature_list[i]->feature_bit() != 0) {
_features |= _feature_list[i]->feature_bit();
}
- // Change flag default
- _feature_list[i]->update_flag();
}
i++;
}
diff --git a/src/hotspot/share/adlc/adlparse.cpp b/src/hotspot/share/adlc/adlparse.cpp
index 0d2da75906f..e8f67e99c62 100644
--- a/src/hotspot/share/adlc/adlparse.cpp
+++ b/src/hotspot/share/adlc/adlparse.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -5192,7 +5192,7 @@ void ADLParser::skipws_common(bool do_preproc) {
if (*_ptr == '\n') { // keep proper track of new lines
if (!do_preproc) break; // let caller handle the newline
next_line();
- _ptr = _curline; next = _ptr + 1;
+ _ptr = _curline; if (_ptr != nullptr) next = _ptr + 1;
}
else if ((*_ptr == '/') && (*next == '/')) // C++ comment
do { _ptr++; next++; } while(*_ptr != '\n'); // So go to end of line
diff --git a/src/hotspot/share/asm/codeBuffer.hpp b/src/hotspot/share/asm/codeBuffer.hpp
index b92bb24cea9..02a42fc0d82 100644
--- a/src/hotspot/share/asm/codeBuffer.hpp
+++ b/src/hotspot/share/asm/codeBuffer.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -209,7 +209,7 @@ class CodeSection {
}
void set_locs_point(address pc) {
assert(pc >= locs_point(), "relocation addr may not decrease");
- assert(allocates2(pc), "relocation addr must be in this section");
+ assert(allocates2(pc), "relocation addr " INTPTR_FORMAT " must be in this section from " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(_start), p2i(_limit));
_locs_point = pc;
}
diff --git a/src/hotspot/share/c1/c1_FrameMap.cpp b/src/hotspot/share/c1/c1_FrameMap.cpp
index be72b1c1459..8439887a397 100644
--- a/src/hotspot/share/c1/c1_FrameMap.cpp
+++ b/src/hotspot/share/c1/c1_FrameMap.cpp
@@ -72,7 +72,7 @@ CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signa
}
}
- intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
+ intptr_t out_preserve = align_up(SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs), 2);
LIR_OprList* args = new LIR_OprList(signature->length());
for (i = 0; i < sizeargs;) {
BasicType t = sig_bt[i];
diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp
index 74208152988..fa02bc3766c 100644
--- a/src/hotspot/share/ci/ciEnv.cpp
+++ b/src/hotspot/share/ci/ciEnv.cpp
@@ -120,7 +120,6 @@ ciEnv::ciEnv(CompileTask* task)
_oop_recorder = nullptr;
_debug_info = nullptr;
_dependencies = nullptr;
- _failure_reason = nullptr;
_inc_decompile_count_on_failure = true;
_compilable = MethodCompilable;
_break_at_compile = false;
@@ -249,7 +248,6 @@ ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
_oop_recorder = nullptr;
_debug_info = nullptr;
_dependencies = nullptr;
- _failure_reason = nullptr;
_inc_decompile_count_on_failure = true;
_compilable = MethodCompilable_never;
_break_at_compile = false;
@@ -1232,9 +1230,9 @@ int ciEnv::num_inlined_bytecodes() const {
// ------------------------------------------------------------------
// ciEnv::record_failure()
void ciEnv::record_failure(const char* reason) {
- if (_failure_reason == nullptr) {
+ if (_failure_reason.get() == nullptr) {
// Record the first failure reason.
- _failure_reason = reason;
+ _failure_reason.set(reason);
}
}
@@ -1264,7 +1262,7 @@ void ciEnv::record_method_not_compilable(const char* reason, bool all_tiers) {
_compilable = new_compilable;
// Reset failure reason; this one is more important.
- _failure_reason = nullptr;
+ _failure_reason.clear();
record_failure(reason);
}
}
@@ -1317,13 +1315,7 @@ void ciEnv::record_best_dyno_loc(const InstanceKlass* ik) {
return;
}
const char *loc0;
- if (dyno_loc(ik, loc0)) {
- // TODO: found multiple references, see if we can improve
- if (Verbose) {
- tty->print_cr("existing call site @ %s for %s",
- loc0, ik->external_name());
- }
- } else {
+ if (!dyno_loc(ik, loc0)) {
set_dyno_loc(ik);
}
}
diff --git a/src/hotspot/share/ci/ciEnv.hpp b/src/hotspot/share/ci/ciEnv.hpp
index added1ae358..ef3afa3eb70 100644
--- a/src/hotspot/share/ci/ciEnv.hpp
+++ b/src/hotspot/share/ci/ciEnv.hpp
@@ -33,6 +33,7 @@
#include "code/exceptionHandlerTable.hpp"
#include "compiler/compiler_globals.hpp"
#include "compiler/compilerThread.hpp"
+#include "compiler/cHeapStringHolder.hpp"
#include "oops/methodData.hpp"
#include "runtime/javaThread.hpp"
@@ -57,7 +58,7 @@ class ciEnv : StackObj {
OopRecorder* _oop_recorder;
DebugInformationRecorder* _debug_info;
Dependencies* _dependencies;
- const char* _failure_reason;
+ CHeapStringHolder _failure_reason;
bool _inc_decompile_count_on_failure;
int _compilable;
bool _break_at_compile;
@@ -319,10 +320,10 @@ class ciEnv : StackObj {
// This is true if the compilation is not going to produce code.
// (It is reasonable to retry failed compilations.)
- bool failing() { return _failure_reason != nullptr; }
+ bool failing() const { return _failure_reason.get() != nullptr; }
// Reason this compilation is failing, such as "too many basic blocks".
- const char* failure_reason() { return _failure_reason; }
+ const char* failure_reason() const { return _failure_reason.get(); }
// Return state of appropriate compatibility
int compilable() { return _compilable; }
diff --git a/src/hotspot/share/classfile/classLoaderData.cpp b/src/hotspot/share/classfile/classLoaderData.cpp
index 282a025c31d..be91f26bdef 100644
--- a/src/hotspot/share/classfile/classLoaderData.cpp
+++ b/src/hotspot/share/classfile/classLoaderData.cpp
@@ -558,9 +558,6 @@ void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
void ClassLoaderData::remove_class(Klass* scratch_class) {
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
- // Adjust global class iterator.
- ClassLoaderDataGraph::adjust_saved_class(scratch_class);
-
Klass* prev = nullptr;
for (Klass* k = _klasses; k != nullptr; k = k->next_link()) {
if (k == scratch_class) {
@@ -602,7 +599,7 @@ void ClassLoaderData::unload() {
free_deallocate_list_C_heap_structures();
// Clean up class dependencies and tell serviceability tools
- // these classes are unloading. Must be called
+ // these classes are unloading. This must be called
// after erroneous classes are released.
classes_do(InstanceKlass::unload_class);
@@ -620,9 +617,6 @@ void ClassLoaderData::unload() {
if (_jmethod_ids != nullptr) {
Method::clear_jmethod_ids(this);
}
-
- // Clean up global class iterator for compiler
- ClassLoaderDataGraph::adjust_saved_class(this);
}
ModuleEntryTable* ClassLoaderData::modules() {
diff --git a/src/hotspot/share/classfile/classLoaderData.hpp b/src/hotspot/share/classfile/classLoaderData.hpp
index 6b5cfdc0664..c9d025aded1 100644
--- a/src/hotspot/share/classfile/classLoaderData.hpp
+++ b/src/hotspot/share/classfile/classLoaderData.hpp
@@ -186,12 +186,16 @@ class ClassLoaderData : public CHeapObj {
ClassLoaderData* next() const;
void unlink_next();
- void set_unloading_next(ClassLoaderData* unloading_next);
- ClassLoaderData* unloading_next() const;
-
ClassLoaderData(Handle h_class_loader, bool has_class_mirror_holder);
+
+public:
~ClassLoaderData();
+ void set_unloading_next(ClassLoaderData* unloading_next);
+ ClassLoaderData* unloading_next() const;
+ void unload();
+
+private:
// The CLD are not placed in the Heap, so the Card Table or
// the Mod Union Table can't be used to mark when CLD have modified oops.
// The CT and MUT bits saves this information for the whole class loader data.
@@ -203,11 +207,11 @@ class ClassLoaderData : public CHeapObj {
oop holder_no_keepalive() const;
oop holder() const;
+ void classes_do(void f(Klass* const));
+
private:
- void unload();
bool keep_alive() const { return _keep_alive > 0; }
- void classes_do(void f(Klass* const));
void loaded_classes_do(KlassClosure* klass_closure);
void classes_do(void f(InstanceKlass*));
void methods_do(void f(Method*));
diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.cpp b/src/hotspot/share/classfile/classLoaderDataGraph.cpp
index 5645ab4aafe..2046286651e 100644
--- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp
@@ -31,6 +31,7 @@
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "code/dependencyContext.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
@@ -80,85 +81,6 @@ void ClassLoaderDataGraph::verify_claimed_marks_cleared(int claim) {
#endif
}
-// Class iterator used by the compiler. It gets some number of classes at
-// a safepoint to decay invocation counters on the methods.
-class ClassLoaderDataGraphKlassIteratorStatic {
- ClassLoaderData* _current_loader_data;
- Klass* _current_class_entry;
- public:
-
- ClassLoaderDataGraphKlassIteratorStatic() : _current_loader_data(nullptr), _current_class_entry(nullptr) {}
-
- InstanceKlass* try_get_next_class() {
- assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
- size_t max_classes = ClassLoaderDataGraph::num_instance_classes();
- assert(max_classes > 0, "should not be called with no instance classes");
- for (size_t i = 0; i < max_classes; ) {
-
- if (_current_class_entry != nullptr) {
- Klass* k = _current_class_entry;
- _current_class_entry = _current_class_entry->next_link();
-
- if (k->is_instance_klass()) {
- InstanceKlass* ik = InstanceKlass::cast(k);
- i++; // count all instance classes found
- // Not yet loaded classes are counted in max_classes
- // but only return loaded classes.
- if (ik->is_loaded()) {
- return ik;
- }
- }
- } else {
- // Go to next CLD
- if (_current_loader_data != nullptr) {
- _current_loader_data = _current_loader_data->next();
- }
- // Start at the beginning
- if (_current_loader_data == nullptr) {
- _current_loader_data = ClassLoaderDataGraph::_head;
- }
-
- _current_class_entry = _current_loader_data->klasses();
- }
- }
- // Should never be reached unless all instance classes have failed or are not fully loaded.
- // Caller handles null.
- return nullptr;
- }
-
- // If the current class for the static iterator is a class being unloaded or
- // deallocated, adjust the current class.
- void adjust_saved_class(ClassLoaderData* cld) {
- if (_current_loader_data == cld) {
- _current_loader_data = cld->next();
- if (_current_loader_data != nullptr) {
- _current_class_entry = _current_loader_data->klasses();
- } // else try_get_next_class will start at the head
- }
- }
-
- void adjust_saved_class(Klass* klass) {
- if (_current_class_entry == klass) {
- _current_class_entry = klass->next_link();
- }
- }
-};
-
-static ClassLoaderDataGraphKlassIteratorStatic static_klass_iterator;
-
-InstanceKlass* ClassLoaderDataGraph::try_get_next_class() {
- assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
- return static_klass_iterator.try_get_next_class();
-}
-
-void ClassLoaderDataGraph::adjust_saved_class(ClassLoaderData* cld) {
- return static_klass_iterator.adjust_saved_class(cld);
-}
-
-void ClassLoaderDataGraph::adjust_saved_class(Klass* klass) {
- return static_klass_iterator.adjust_saved_class(klass);
-}
-
void ClassLoaderDataGraph::clean_deallocate_lists(bool walk_previous_versions) {
assert(SafepointSynchronize::is_at_safepoint(), "must only be called at safepoint");
uint loaders_processed = 0;
@@ -203,7 +125,6 @@ void ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces() {
// List head of all class loader data.
ClassLoaderData* volatile ClassLoaderDataGraph::_head = nullptr;
-ClassLoaderData* ClassLoaderDataGraph::_unloading_head = nullptr;
bool ClassLoaderDataGraph::_should_clean_deallocate_lists = false;
bool ClassLoaderDataGraph::_safepoint_cleanup_needed = false;
@@ -421,11 +342,7 @@ void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) {
}
void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) {
- assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
- for (ClassLoaderData* cld = _unloading_head; cld != nullptr; cld = cld->unloading_next()) {
- assert(cld->is_unloading(), "invariant");
- cld->classes_do(f);
- }
+ ClassUnloadingContext::context()->classes_unloading_do(f);
}
void ClassLoaderDataGraph::verify_dictionary() {
@@ -494,7 +411,6 @@ bool ClassLoaderDataGraph::do_unloading() {
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
ClassLoaderData* prev = nullptr;
- bool seen_dead_loader = false;
uint loaders_processed = 0;
uint loaders_removed = 0;
@@ -505,8 +421,8 @@ bool ClassLoaderDataGraph::do_unloading() {
} else {
// Found dead CLD.
loaders_removed++;
- seen_dead_loader = true;
- data->unload();
+
+ ClassUnloadingContext::context()->register_unloading_class_loader_data(data);
// Move dead CLD to unloading list.
if (prev != nullptr) {
@@ -516,14 +432,12 @@ bool ClassLoaderDataGraph::do_unloading() {
// The GC might be walking this concurrently
Atomic::store(&_head, data->next());
}
- data->set_unloading_next(_unloading_head);
- _unloading_head = data;
}
}
log_debug(class, loader, data)("do_unloading: loaders processed %u, loaders removed %u", loaders_processed, loaders_removed);
- return seen_dead_loader;
+ return loaders_removed != 0;
}
// There's at least one dead class loader. Purge refererences of healthy module
@@ -550,16 +464,9 @@ void ClassLoaderDataGraph::clean_module_and_package_info() {
}
void ClassLoaderDataGraph::purge(bool at_safepoint) {
- ClassLoaderData* list = _unloading_head;
- _unloading_head = nullptr;
- ClassLoaderData* next = list;
- bool classes_unloaded = false;
- while (next != nullptr) {
- ClassLoaderData* purge_me = next;
- next = purge_me->unloading_next();
- delete purge_me;
- classes_unloaded = true;
- }
+ ClassUnloadingContext::context()->purge_class_loader_data();
+
+ bool classes_unloaded = ClassUnloadingContext::context()->has_unloaded_classes();
Metaspace::purge(classes_unloaded);
if (classes_unloaded) {
diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.hpp b/src/hotspot/share/classfile/classLoaderDataGraph.hpp
index c27a03ec671..3de2c10850e 100644
--- a/src/hotspot/share/classfile/classLoaderDataGraph.hpp
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.hpp
@@ -43,8 +43,6 @@ class ClassLoaderDataGraph : public AllStatic {
private:
// All CLDs (except unlinked CLDs) can be reached by walking _head->_next->...
static ClassLoaderData* volatile _head;
- // All unlinked CLDs
- static ClassLoaderData* _unloading_head;
// Set if there's anything to purge in the deallocate lists or previous versions
// during a safepoint after class unloading in a full GC.
@@ -97,11 +95,6 @@ class ClassLoaderDataGraph : public AllStatic {
// Called from VMOperation
static void walk_metadata_and_clean_metaspaces();
- // VM_CounterDecay iteration support
- static InstanceKlass* try_get_next_class();
- static void adjust_saved_class(ClassLoaderData* cld);
- static void adjust_saved_class(Klass* klass);
-
static void verify_dictionary();
static void print_dictionary(outputStream* st);
static void print_table_statistics(outputStream* st);
diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp
index 610c6baa1da..24ea8ab77f8 100644
--- a/src/hotspot/share/classfile/javaClasses.cpp
+++ b/src/hotspot/share/classfile/javaClasses.cpp
@@ -1986,24 +1986,28 @@ int java_lang_VirtualThread::state(oop vthread) {
JavaThreadStatus java_lang_VirtualThread::map_state_to_thread_status(int state) {
JavaThreadStatus status = JavaThreadStatus::NEW;
- switch (state) {
- case NEW :
+ switch (state & ~SUSPENDED) {
+ case NEW:
status = JavaThreadStatus::NEW;
break;
- case STARTED :
- case RUNNABLE :
- case RUNNABLE_SUSPENDED :
- case RUNNING :
- case PARKING :
- case YIELDING :
+ case STARTED:
+ case RUNNING:
+ case PARKING:
+ case TIMED_PARKING:
+ case UNPARKED:
+ case YIELDING:
+ case YIELDED:
status = JavaThreadStatus::RUNNABLE;
break;
- case PARKED :
- case PARKED_SUSPENDED :
- case PINNED :
+ case PARKED:
+ case PINNED:
status = JavaThreadStatus::PARKED;
break;
- case TERMINATED :
+ case TIMED_PARKED:
+ case TIMED_PINNED:
+ status = JavaThreadStatus::PARKED_TIMED;
+ break;
+ case TERMINATED:
status = JavaThreadStatus::TERMINATED;
break;
default:
diff --git a/src/hotspot/share/classfile/javaClasses.hpp b/src/hotspot/share/classfile/javaClasses.hpp
index 851ec68416e..ab0b8f444b4 100644
--- a/src/hotspot/share/classfile/javaClasses.hpp
+++ b/src/hotspot/share/classfile/javaClasses.hpp
@@ -519,20 +519,22 @@ class java_lang_VirtualThread : AllStatic {
JFR_ONLY(static int _jfr_epoch_offset;)
public:
enum {
- NEW = 0,
- STARTED = 1,
- RUNNABLE = 2,
- RUNNING = 3,
- PARKING = 4,
- PARKED = 5,
- PINNED = 6,
- YIELDING = 7,
- TERMINATED = 99,
-
- // can be suspended from scheduling when unmounted
- SUSPENDED = 1 << 8,
- RUNNABLE_SUSPENDED = (RUNNABLE | SUSPENDED),
- PARKED_SUSPENDED = (PARKED | SUSPENDED)
+ NEW = 0,
+ STARTED = 1,
+ RUNNING = 2,
+ PARKING = 3,
+ PARKED = 4,
+ PINNED = 5,
+ TIMED_PARKING = 6,
+ TIMED_PARKED = 7,
+ TIMED_PINNED = 8,
+ UNPARKED = 9,
+ YIELDING = 10,
+ YIELDED = 11,
+ TERMINATED = 99,
+
+ // additional state bits
+ SUSPENDED = 1 << 8, // suspended when unmounted
};
static void compute_offsets();
diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp
index afb807065ab..40d63419e7c 100644
--- a/src/hotspot/share/code/codeBlob.cpp
+++ b/src/hotspot/share/code/codeBlob.cpp
@@ -164,7 +164,7 @@ RuntimeBlob::RuntimeBlob(
void RuntimeBlob::free(RuntimeBlob* blob) {
assert(blob != nullptr, "caller must check for nullptr");
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
- blob->flush();
+ blob->purge(true /* free_code_cache_data */, true /* unregister_nmethod */);
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free(blob);
@@ -173,9 +173,11 @@ void RuntimeBlob::free(RuntimeBlob* blob) {
MemoryService::track_code_cache_memory_usage();
}
-void CodeBlob::flush() {
- FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
- _oop_maps = nullptr;
+void CodeBlob::purge(bool free_code_cache_data, bool unregister_nmethod) {
+ if (_oop_maps != nullptr) {
+ delete _oop_maps;
+ _oop_maps = nullptr;
+ }
NOT_PRODUCT(_asm_remarks.clear());
NOT_PRODUCT(_dbg_strings.clear());
}
@@ -190,7 +192,6 @@ void CodeBlob::set_oop_maps(OopMapSet* p) {
}
}
-
void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
// Do not hold the CodeCache lock during name formatting.
assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp
index 6c40fe992ae..c1c34a06c75 100644
--- a/src/hotspot/share/code/codeBlob.hpp
+++ b/src/hotspot/share/code/codeBlob.hpp
@@ -143,7 +143,7 @@ class CodeBlob {
static unsigned int align_code_offset(int offset);
// Deletion
- virtual void flush();
+ virtual void purge(bool free_code_cache_data, bool unregister_nmethod);
// Typing
virtual bool is_buffer_blob() const { return false; }
diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
index 3bc1db70251..9b0bdc3643d 100644
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -37,6 +37,7 @@
#include "compiler/compilerDefinitions.inline.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "jfr/jfrEvents.hpp"
#include "jvm_io.h"
@@ -606,7 +607,7 @@ void CodeCache::free(CodeBlob* cb) {
cb->~CodeBlob();
// Get heap for given CodeBlob and deallocate
- get_code_heap(cb)->deallocate(cb);
+ heap->deallocate(cb);
assert(heap->blob_count() >= 0, "sanity check");
}
@@ -970,36 +971,8 @@ void CodeCache::purge_exception_caches() {
_exception_cache_purge_list = nullptr;
}
-// Register an is_unloading nmethod to be flushed after unlinking
-void CodeCache::register_unlinked(nmethod* nm) {
- assert(nm->unlinked_next() == nullptr, "Only register for unloading once");
- for (;;) {
- // Only need acquire when reading the head, when the next
- // pointer is walked, which it is not here.
- nmethod* head = Atomic::load(&_unlinked_head);
- nmethod* next = head != nullptr ? head : nm; // Self looped means end of list
- nm->set_unlinked_next(next);
- if (Atomic::cmpxchg(&_unlinked_head, head, nm) == head) {
- break;
- }
- }
-}
-
-// Flush all the nmethods the GC unlinked
-void CodeCache::flush_unlinked_nmethods() {
- nmethod* nm = _unlinked_head;
- _unlinked_head = nullptr;
- size_t freed_memory = 0;
- while (nm != nullptr) {
- nmethod* next = nm->unlinked_next();
- freed_memory += nm->total_size();
- nm->flush();
- if (next == nm) {
- // Self looped means end of list
- break;
- }
- nm = next;
- }
+// Restart compiler if possible and required..
+void CodeCache::maybe_restart_compiler(size_t freed_memory) {
// Try to start the compiler again if we freed any memory
if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
@@ -1013,7 +986,6 @@ void CodeCache::flush_unlinked_nmethods() {
}
uint8_t CodeCache::_unloading_cycle = 1;
-nmethod* volatile CodeCache::_unlinked_head = nullptr;
void CodeCache::increment_unloading_cycle() {
// 2-bit value (see IsUnloadingState in nmethod.cpp for details)
@@ -1024,7 +996,7 @@ void CodeCache::increment_unloading_cycle() {
}
}
-CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
+CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive)
: _is_unloading_behaviour(is_alive)
{
_saved_behaviour = IsUnloadingBehaviour::current();
@@ -1033,10 +1005,9 @@ CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
DependencyContext::cleaning_start();
}
-CodeCache::UnloadingScope::~UnloadingScope() {
+CodeCache::UnlinkingScope::~UnlinkingScope() {
IsUnloadingBehaviour::set_current(_saved_behaviour);
DependencyContext::cleaning_end();
- CodeCache::flush_unlinked_nmethods();
}
void CodeCache::verify_oops() {
diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp
index 8abc4043ae6..fbcaefd2a62 100644
--- a/src/hotspot/share/code/codeCache.hpp
+++ b/src/hotspot/share/code/codeCache.hpp
@@ -106,7 +106,6 @@ class CodeCache : AllStatic {
static TruncatedSeq _unloading_gc_intervals;
static TruncatedSeq _unloading_allocation_rates;
static volatile bool _unloading_threshold_gc_requested;
- static nmethod* volatile _unlinked_head;
static ExceptionCache* volatile _exception_cache_purge_list;
@@ -179,17 +178,17 @@ class CodeCache : AllStatic {
// GC support
static void verify_oops();
- // If any oops are not marked this method unloads (i.e., breaks root links
- // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
- // to "true" iff some code got unloaded.
- // "unloading_occurred" controls whether metadata should be cleaned because of class unloading.
- class UnloadingScope: StackObj {
+
+ // Helper scope object managing code cache unlinking behavior, i.e. sets and
+ // restores the closure that determines which nmethods are going to be removed
+ // during the unlinking part of code cache unloading.
+ class UnlinkingScope : StackObj {
ClosureIsUnloadingBehaviour _is_unloading_behaviour;
IsUnloadingBehaviour* _saved_behaviour;
public:
- UnloadingScope(BoolObjectClosure* is_alive);
- ~UnloadingScope();
+ UnlinkingScope(BoolObjectClosure* is_alive);
+ ~UnlinkingScope();
};
// Code cache unloading heuristics
@@ -213,8 +212,7 @@ class CodeCache : AllStatic {
// nmethod::is_cold.
static void arm_all_nmethods();
- static void flush_unlinked_nmethods();
- static void register_unlinked(nmethod* nm);
+ static void maybe_restart_compiler(size_t freed_memory);
static void do_unloading(bool unloading_occurred);
static uint8_t unloading_cycle() { return _unloading_cycle; }
diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp
index c6294ff5627..b966b580984 100644
--- a/src/hotspot/share/code/compiledIC.cpp
+++ b/src/hotspot/share/code/compiledIC.cpp
@@ -561,15 +561,14 @@ void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
bool CompiledIC::is_icholder_entry(address entry) {
CodeBlob* cb = CodeCache::find_blob(entry);
- if (cb != nullptr && cb->is_adapter_blob()) {
- return true;
+ if (cb == nullptr) {
+ return false;
}
- // itable stubs also use CompiledICHolder
- if (cb != nullptr && cb->is_vtable_blob()) {
- VtableStub* s = VtableStubs::entry_point(entry);
- return (s != nullptr) && s->is_itable_stub();
+ if (cb->is_adapter_blob()) {
+ return true;
+ } else if (cb->is_vtable_blob()) {
+ return VtableStubs::is_icholder_entry(entry);
}
-
return false;
}
diff --git a/src/hotspot/share/code/compiledMethod.hpp b/src/hotspot/share/code/compiledMethod.hpp
index 912ca1b3f88..ca441d9ae64 100644
--- a/src/hotspot/share/code/compiledMethod.hpp
+++ b/src/hotspot/share/code/compiledMethod.hpp
@@ -174,7 +174,7 @@ class CompiledMethod : public CodeBlob {
void* _gc_data;
- virtual void flush() = 0;
+ virtual void purge(bool free_code_cache_data, bool unregister_nmethod) = 0;
private:
DeoptimizationStatus deoptimization_status() const {
diff --git a/src/hotspot/share/code/dependencyContext.cpp b/src/hotspot/share/code/dependencyContext.cpp
index 904b0927014..d7ce8e92acf 100644
--- a/src/hotspot/share/code/dependencyContext.cpp
+++ b/src/hotspot/share/code/dependencyContext.cpp
@@ -70,36 +70,30 @@ void DependencyContext::init() {
void DependencyContext::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes) {
for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
nmethod* nm = b->get_nmethod();
- if (b->count() > 0) {
- if (nm->is_marked_for_deoptimization()) {
- deopt_scope->dependent(nm);
- } else if (nm->check_dependency_on(changes)) {
- LogTarget(Info, dependencies) lt;
- if (lt.is_enabled()) {
- ResourceMark rm;
- LogStream ls(<);
- ls.print_cr("Marked for deoptimization");
- changes.print_on(&ls);
- nm->print_on(&ls);
- nm->print_dependencies_on(&ls);
- }
- deopt_scope->mark(nm, !changes.is_call_site_change());
+ if (nm->is_marked_for_deoptimization()) {
+ deopt_scope->dependent(nm);
+ } else if (nm->check_dependency_on(changes)) {
+ LogTarget(Info, dependencies) lt;
+ if (lt.is_enabled()) {
+ ResourceMark rm;
+ LogStream ls(<);
+ ls.print_cr("Marked for deoptimization");
+ changes.print_on(&ls);
+ nm->print_on(&ls);
+ nm->print_dependencies_on(&ls);
}
+ deopt_scope->mark(nm, !changes.is_call_site_change());
}
}
}
//
// Add an nmethod to the dependency context.
-// It's possible that an nmethod has multiple dependencies on a klass
-// so a count is kept for each bucket to guarantee that creation and
-// deletion of dependencies is consistent.
//
void DependencyContext::add_dependent_nmethod(nmethod* nm) {
assert_lock_strong(CodeCache_lock);
for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
if (nm == b->get_nmethod()) {
- b->increment();
return;
}
}
@@ -117,8 +111,7 @@ void DependencyContext::add_dependent_nmethod(nmethod* nm) {
}
void DependencyContext::release(nmethodBucket* b) {
- bool expunge = Atomic::load(&_cleaning_epoch) == 0;
- if (expunge) {
+ if (delete_on_release()) {
assert_locked_or_safepoint(CodeCache_lock);
delete b;
if (UsePerfData) {
@@ -184,9 +177,41 @@ nmethodBucket* DependencyContext::release_and_get_next_not_unloading(nmethodBuck
//
// Invalidate all dependencies in the context
void DependencyContext::remove_all_dependents() {
- nmethodBucket* b = dependencies_not_unloading();
+ // Assume that the dependency is not deleted immediately but moved into the
+ // purge list when calling this.
+ assert(!delete_on_release(), "should not delete on release");
+
+ nmethodBucket* first = Atomic::load_acquire(_dependency_context_addr);
+ if (first == nullptr) {
+ return;
+ }
+
+ nmethodBucket* cur = first;
+ nmethodBucket* last = cur;
+ jlong count = 0;
+ for (; cur != nullptr; cur = cur->next()) {
+ assert(cur->get_nmethod()->is_unloading(), "must be");
+ last = cur;
+ count++;
+ }
+
+ // Add the whole list to the purge list at once.
+ nmethodBucket* old_purge_list_head = Atomic::load(&_purge_list);
+ for (;;) {
+ last->set_purge_list_next(old_purge_list_head);
+ nmethodBucket* next_purge_list_head = Atomic::cmpxchg(&_purge_list, old_purge_list_head, first);
+ if (old_purge_list_head == next_purge_list_head) {
+ break;
+ }
+ old_purge_list_head = next_purge_list_head;
+ }
+
+ if (UsePerfData) {
+ _perf_total_buckets_stale_count->inc(count);
+ _perf_total_buckets_stale_acc_count->inc(count);
+ }
+
set_dependencies(nullptr);
- assert(b == nullptr, "All dependents should be unloading");
}
void DependencyContext::remove_and_mark_for_deoptimization_all_dependents(DeoptimizationScope* deopt_scope) {
@@ -194,11 +219,9 @@ void DependencyContext::remove_and_mark_for_deoptimization_all_dependents(Deopti
set_dependencies(nullptr);
while (b != nullptr) {
nmethod* nm = b->get_nmethod();
- if (b->count() > 0) {
- // Also count already (concurrently) marked nmethods to make sure
- // deoptimization is triggered before execution in this thread continues.
- deopt_scope->mark(nm);
- }
+ // Also count already (concurrently) marked nmethods to make sure
+ // deoptimization is triggered before execution in this thread continues.
+ deopt_scope->mark(nm);
b = release_and_get_next_not_unloading(b);
}
}
@@ -208,7 +231,7 @@ void DependencyContext::print_dependent_nmethods(bool verbose) {
int idx = 0;
for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
nmethod* nm = b->get_nmethod();
- tty->print("[%d] count=%d { ", idx++, b->count());
+ tty->print("[%d] { ", idx++);
if (!verbose) {
nm->print_on(tty, "nmethod");
tty->print_cr(" } ");
@@ -224,20 +247,12 @@ void DependencyContext::print_dependent_nmethods(bool verbose) {
bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
if (nm == b->get_nmethod()) {
-#ifdef ASSERT
- int count = b->count();
- assert(count >= 0, "count shouldn't be negative: %d", count);
-#endif
return true;
}
}
return false;
}
-int nmethodBucket::decrement() {
- return Atomic::sub(&_count, 1);
-}
-
// We use a monotonically increasing epoch counter to track the last epoch a given
// dependency context was cleaned. GC threads claim cleanup tasks by performing
// a CAS on this value.
@@ -250,6 +265,10 @@ bool DependencyContext::claim_cleanup() {
return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
}
+bool DependencyContext::delete_on_release() {
+ return Atomic::load(&_cleaning_epoch) == 0;
+}
+
// Retrieve the first nmethodBucket that has a dependent that does not correspond to
// an is_unloading nmethod. Any nmethodBucket entries observed from the original head
// that is_unloading() will be unlinked and placed on the purge list.
diff --git a/src/hotspot/share/code/dependencyContext.hpp b/src/hotspot/share/code/dependencyContext.hpp
index 972a593f82e..e8d2ac41d0d 100644
--- a/src/hotspot/share/code/dependencyContext.hpp
+++ b/src/hotspot/share/code/dependencyContext.hpp
@@ -39,27 +39,19 @@ class DepChange;
// nmethodBucket is used to record dependent nmethods for
// deoptimization. nmethod dependencies are actually
// pairs but we really only care about the klass part for purposes of
-// finding nmethods which might need to be deoptimized. Instead of
-// recording the method, a count of how many times a particular nmethod
-// was recorded is kept. This ensures that any recording errors are
-// noticed since an nmethod should be removed as many times are it's
-// added.
+// finding nmethods which might need to be deoptimized.
//
class nmethodBucket: public CHeapObj {
friend class VMStructs;
private:
nmethod* _nmethod;
- volatile int _count;
nmethodBucket* volatile _next;
nmethodBucket* volatile _purge_list_next;
public:
nmethodBucket(nmethod* nmethod, nmethodBucket* next) :
- _nmethod(nmethod), _count(1), _next(next), _purge_list_next(nullptr) {}
+ _nmethod(nmethod), _next(next), _purge_list_next(nullptr) {}
- int count() { return _count; }
- int increment() { _count += 1; return _count; }
- int decrement();
nmethodBucket* next();
nmethodBucket* next_not_unloading();
void set_next(nmethodBucket* b);
@@ -83,6 +75,7 @@ class DependencyContext : public StackObj {
volatile uint64_t* _last_cleanup_addr;
bool claim_cleanup();
+ static bool delete_on_release();
void set_dependencies(nmethodBucket* b);
nmethodBucket* dependencies();
nmethodBucket* dependencies_not_unloading();
diff --git a/src/hotspot/share/code/icBuffer.cpp b/src/hotspot/share/code/icBuffer.cpp
index 520a8c75259..dbceefb7c8e 100644
--- a/src/hotspot/share/code/icBuffer.cpp
+++ b/src/hotspot/share/code/icBuffer.cpp
@@ -34,6 +34,7 @@
#include "memory/resourceArea.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/mutexLocker.hpp"
@@ -44,8 +45,8 @@ DEF_STUB_INTERFACE(ICStub);
StubQueue* InlineCacheBuffer::_buffer = nullptr;
-CompiledICHolder* InlineCacheBuffer::_pending_released = nullptr;
-int InlineCacheBuffer::_pending_count = 0;
+CompiledICHolder* volatile InlineCacheBuffer::_pending_released = nullptr;
+volatile int InlineCacheBuffer::_pending_count = 0;
#ifdef ASSERT
ICRefillVerifier::ICRefillVerifier()
@@ -247,26 +248,42 @@ void* InlineCacheBuffer::cached_value_for(CompiledIC *ic) {
// Free CompiledICHolder*s that are no longer in use
void InlineCacheBuffer::release_pending_icholders() {
assert(SafepointSynchronize::is_at_safepoint(), "should only be called during a safepoint");
- CompiledICHolder* holder = _pending_released;
+ CompiledICHolder* holder = Atomic::load(&_pending_released);
_pending_released = nullptr;
+ int count = 0;
while (holder != nullptr) {
CompiledICHolder* next = holder->next();
delete holder;
holder = next;
- _pending_count--;
+ count++;
}
- assert(_pending_count == 0, "wrong count");
+ assert(pending_icholder_count() == count, "wrong count");
+ Atomic::store(&_pending_count, 0);
}
// Enqueue this icholder for release during the next safepoint. It's
-// not safe to free them until them since they might be visible to
+// not safe to free them until then since they might be visible to
// another thread.
void InlineCacheBuffer::queue_for_release(CompiledICHolder* icholder) {
- MutexLocker mex(InlineCacheBuffer_lock, Mutex::_no_safepoint_check_flag);
- icholder->set_next(_pending_released);
- _pending_released = icholder;
- _pending_count++;
+ assert(icholder->next() == nullptr, "multiple enqueue?");
+
+ CompiledICHolder* old = Atomic::load(&_pending_released);
+ for (;;) {
+ icholder->set_next(old);
+ // The only reader runs at a safepoint serially so there is no need for a more strict atomic.
+ CompiledICHolder* cur = Atomic::cmpxchg(&_pending_released, old, icholder, memory_order_relaxed);
+ if (cur == old) {
+ break;
+ }
+ old = cur;
+ }
+ Atomic::inc(&_pending_count, memory_order_relaxed);
+
if (TraceICBuffer) {
tty->print_cr("enqueueing icholder " INTPTR_FORMAT " to be freed", p2i(icholder));
}
}
+
+int InlineCacheBuffer::pending_icholder_count() {
+ return Atomic::load(&_pending_count);
+}
diff --git a/src/hotspot/share/code/icBuffer.hpp b/src/hotspot/share/code/icBuffer.hpp
index d385b99d59d..c2da3abdca3 100644
--- a/src/hotspot/share/code/icBuffer.hpp
+++ b/src/hotspot/share/code/icBuffer.hpp
@@ -146,8 +146,8 @@ class InlineCacheBuffer: public AllStatic {
static StubQueue* _buffer;
- static CompiledICHolder* _pending_released;
- static int _pending_count;
+ static CompiledICHolder* volatile _pending_released;
+ static volatile int _pending_count;
static StubQueue* buffer() { return _buffer; }
@@ -176,7 +176,7 @@ class InlineCacheBuffer: public AllStatic {
static void release_pending_icholders();
static void queue_for_release(CompiledICHolder* icholder);
- static int pending_icholder_count() { return _pending_count; }
+ static int pending_icholder_count();
// New interface
static bool create_transition_stub(CompiledIC *ic, void* cached_value, address entry);
diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp
index fb2e5bd78b0..c07b5e28c17 100644
--- a/src/hotspot/share/code/nmethod.cpp
+++ b/src/hotspot/share/code/nmethod.cpp
@@ -42,6 +42,7 @@
#include "compiler/oopMap.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/bytecode.hpp"
#include "jvm.h"
@@ -639,7 +640,7 @@ nmethod::nmethod(
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps )
: CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
- _unlinked_next(nullptr),
+ _is_unlinked(false),
_native_receiver_sp_offset(basic_lock_owner_sp_offset),
_native_basic_lock_sp_offset(basic_lock_sp_offset),
_is_unloading_state(0)
@@ -783,7 +784,7 @@ nmethod::nmethod(
#endif
)
: CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
- _unlinked_next(nullptr),
+ _is_unlinked(false),
_native_receiver_sp_offset(in_ByteSize(-1)),
_native_basic_lock_sp_offset(in_ByteSize(-1)),
_is_unloading_state(0)
@@ -1406,7 +1407,7 @@ bool nmethod::make_not_entrant() {
// For concurrent GCs, there must be a handshake between unlink and flush
void nmethod::unlink() {
- if (_unlinked_next != nullptr) {
+ if (_is_unlinked) {
// Already unlinked. It can be invoked twice because concurrent code cache
// unloading might need to restart when inline cache cleaning fails due to
// running out of ICStubs, which can only be refilled at safepoints
@@ -1440,14 +1441,16 @@ void nmethod::unlink() {
// Register for flushing when it is safe. For concurrent class unloading,
// that would be after the unloading handshake, and for STW class unloading
// that would be when getting back to the VM thread.
- CodeCache::register_unlinked(this);
+ ClassUnloadingContext::context()->register_unlinked_nmethod(this);
}
-void nmethod::flush() {
+void nmethod::purge(bool free_code_cache_data, bool unregister_nmethod) {
+ assert(!free_code_cache_data, "must only call not freeing code cache data");
+
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// completely deallocate this method
- Events::log(Thread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this));
+ Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this));
log_debug(codecache)("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
"/Free CodeCache:" SIZE_FORMAT "Kb",
is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
@@ -1463,11 +1466,13 @@ void nmethod::flush() {
ec = next;
}
- Universe::heap()->unregister_nmethod(this);
+ if (unregister_nmethod) {
+ Universe::heap()->unregister_nmethod(this);
+ }
+
CodeCache::unregister_old_nmethod(this);
- CodeBlob::flush();
- CodeCache::free(this);
+ CodeBlob::purge(free_code_cache_data, unregister_nmethod);
}
oop nmethod::oop_at(int index) const {
@@ -1700,7 +1705,7 @@ class IsUnloadingState: public AllStatic {
};
bool nmethod::is_unloading() {
- uint8_t state = RawAccess::load(&_is_unloading_state);
+ uint8_t state = Atomic::load(&_is_unloading_state);
bool state_is_unloading = IsUnloadingState::is_unloading(state);
if (state_is_unloading) {
return true;
@@ -1736,7 +1741,7 @@ bool nmethod::is_unloading() {
void nmethod::clear_unloading_state() {
uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
- RawAccess::store(&_is_unloading_state, state);
+ Atomic::store(&_is_unloading_state, state);
}
@@ -3059,7 +3064,7 @@ void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bo
assert(sig_index == sizeargs, "");
}
const char* spname = "sp"; // make arch-specific?
- intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
+ SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
int stack_slot_offset = this->frame_size() * wordSize;
int tab1 = 14, tab2 = 24;
int sig_index = 0;
diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp
index cbe2bbb65a9..f428aa4ef3d 100644
--- a/src/hotspot/share/code/nmethod.hpp
+++ b/src/hotspot/share/code/nmethod.hpp
@@ -196,7 +196,7 @@ class nmethod : public CompiledMethod {
address _verified_entry_point; // entry point without class check
address _osr_entry_point; // entry point for on stack replacement
- nmethod* _unlinked_next;
+ bool _is_unlinked;
// Shared fields for all nmethod's
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
@@ -441,8 +441,8 @@ class nmethod : public CompiledMethod {
virtual bool is_unloading();
virtual void do_unloading(bool unloading_occurred);
- nmethod* unlinked_next() const { return _unlinked_next; }
- void set_unlinked_next(nmethod* next) { _unlinked_next = next; }
+ bool is_unlinked() const { return _is_unlinked; }
+ void set_is_unlinked() { assert(!_is_unlinked, "already unlinked"); _is_unlinked = true; }
#if INCLUDE_RTM_OPT
// rtm state accessing and manipulating
@@ -522,7 +522,7 @@ class nmethod : public CompiledMethod {
void unlink();
// Deallocate this nmethod - called by the GC
- void flush();
+ void purge(bool free_code_cache_data, bool unregister_nmethod);
// See comment at definition of _last_seen_on_stack
void mark_as_maybe_on_stack();
diff --git a/src/hotspot/share/code/vtableStubs.cpp b/src/hotspot/share/code/vtableStubs.cpp
index 774a81f569b..c8b58dd459c 100644
--- a/src/hotspot/share/code/vtableStubs.cpp
+++ b/src/hotspot/share/code/vtableStubs.cpp
@@ -101,8 +101,7 @@ void VtableStub::print() const { print_on(tty); }
// hash value). Each list is anchored in a little hash _table, indexed
// by that hash value.
-VtableStub* VtableStubs::_table[VtableStubs::N];
-int VtableStubs::_number_of_vtable_stubs = 0;
+VtableStub* volatile VtableStubs::_table[VtableStubs::N];
int VtableStubs::_vtab_stub_size = 0;
int VtableStubs::_itab_stub_size = 0;
@@ -136,13 +135,13 @@ int VtableStubs::_itab_stub_size = 0;
void VtableStubs::initialize() {
+ assert(VtableStub::_receiver_location == VMRegImpl::Bad(), "initialized multiple times?");
+
VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
{
MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
- assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");
- assert(is_power_of_2(int(N)), "N must be a power of 2");
for (int i = 0; i < N; i++) {
- _table[i] = nullptr;
+ Atomic::store(&_table[i], (VtableStub*)nullptr);
}
}
}
@@ -269,7 +268,7 @@ inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
assert_lock_strong(VtableStubs_lock);
unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
- VtableStub* s = _table[hash];
+ VtableStub* s = Atomic::load(&_table[hash]);
while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
return s;
}
@@ -279,10 +278,10 @@ void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
assert_lock_strong(VtableStubs_lock);
assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
- // enter s at the beginning of the corresponding list
- s->set_next(_table[h]);
- _table[h] = s;
- _number_of_vtable_stubs++;
+ // Insert s at the beginning of the corresponding list.
+ s->set_next(Atomic::load(&_table[h]));
+ // Make sure that concurrent readers not taking the mutex observe the writing of "next".
+ Atomic::release_store(&_table[h], s);
}
VtableStub* VtableStubs::entry_point(address pc) {
@@ -290,10 +289,17 @@ VtableStub* VtableStubs::entry_point(address pc) {
VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
VtableStub* s;
- for (s = _table[hash]; s != nullptr && s != stub; s = s->next()) {}
+ for (s = Atomic::load(&_table[hash]); s != nullptr && s != stub; s = s->next()) {}
return (s == stub) ? s : nullptr;
}
+bool VtableStubs::is_icholder_entry(address pc) {
+ assert(contains(pc), "must contain all vtable blobs");
+ VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
+ // itable stubs use CompiledICHolder.
+ return stub->is_itable_stub();
+}
+
bool VtableStubs::contains(address pc) {
// simple solution for now - we may want to use
// a faster way if this function is called often
@@ -302,11 +308,8 @@ bool VtableStubs::contains(address pc) {
VtableStub* VtableStubs::stub_containing(address pc) {
- // Note: No locking needed since any change to the data structure
- // happens with an atomic store into it (we don't care about
- // consistency with the _number_of_vtable_stubs counter).
for (int i = 0; i < N; i++) {
- for (VtableStub* s = _table[i]; s != nullptr; s = s->next()) {
+ for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
if (s->contains(pc)) return s;
}
}
@@ -318,11 +321,11 @@ void vtableStubs_init() {
}
void VtableStubs::vtable_stub_do(void f(VtableStub*)) {
- for (int i = 0; i < N; i++) {
- for (VtableStub* s = _table[i]; s != nullptr; s = s->next()) {
- f(s);
- }
+ for (int i = 0; i < N; i++) {
+ for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
+ f(s);
}
+ }
}
diff --git a/src/hotspot/share/code/vtableStubs.hpp b/src/hotspot/share/code/vtableStubs.hpp
index bd037e62cce..0a7308d9038 100644
--- a/src/hotspot/share/code/vtableStubs.hpp
+++ b/src/hotspot/share/code/vtableStubs.hpp
@@ -79,10 +79,11 @@ class VtableStubs : AllStatic {
mask = N - 1
};
+ static_assert(is_power_of_2((int)N), "N must be a power of 2");
+
private:
friend class VtableStub;
- static VtableStub* _table[N]; // table of existing stubs
- static int _number_of_vtable_stubs; // number of stubs created so far (for statistics)
+ static VtableStub* volatile _table[N]; // table of existing stubs
static int _vtab_stub_size; // current size estimate for vtable stub (quasi-constant)
static int _itab_stub_size; // current size estimate for itable stub (quasi-constant)
@@ -105,9 +106,9 @@ class VtableStubs : AllStatic {
static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
static VtableStub* entry_point(address pc); // vtable stub entry point for a pc
+ static bool is_icholder_entry(address pc); // is the blob containing pc (which must be a vtable blob) an icholder?
static bool contains(address pc); // is pc within any stub?
static VtableStub* stub_containing(address pc); // stub containing pc or nullptr
- static int number_of_vtable_stubs() { return _number_of_vtable_stubs; }
static void initialize();
static void vtable_stub_do(void f(VtableStub*)); // iterates over all vtable stubs
};
diff --git a/src/hotspot/share/compiler/cHeapStringHolder.cpp b/src/hotspot/share/compiler/cHeapStringHolder.cpp
new file mode 100644
index 00000000000..0383e738a47
--- /dev/null
+++ b/src/hotspot/share/compiler/cHeapStringHolder.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "compiler/cHeapStringHolder.hpp"
+
+void CHeapStringHolder::set(const char* string) {
+ clear();
+ if (string != nullptr) {
+ size_t len = strlen(string);
+ _string = NEW_C_HEAP_ARRAY(char, len + 1, mtCompiler);
+ ::memcpy(_string, string, len);
+ _string[len] = 0; // terminating null
+ }
+}
+
+void CHeapStringHolder::clear() {
+ if (_string != nullptr) {
+ FREE_C_HEAP_ARRAY(char, _string);
+ _string = nullptr;
+ }
+}
diff --git a/src/hotspot/share/compiler/cHeapStringHolder.hpp b/src/hotspot/share/compiler/cHeapStringHolder.hpp
new file mode 100644
index 00000000000..46659c2a521
--- /dev/null
+++ b/src/hotspot/share/compiler/cHeapStringHolder.hpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_COMPILER_CHEAPSTRINGHOLDER_HPP
+#define SHARE_COMPILER_CHEAPSTRINGHOLDER_HPP
+
+#include "memory/allocation.hpp"
+
+// Holder for a C-Heap allocated String
+// The user must ensure that the destructor is called, or at least clear.
+class CHeapStringHolder : public StackObj {
+private:
+ char* _string;
+
+public:
+ CHeapStringHolder() : _string(nullptr) {}
+ ~CHeapStringHolder() { clear(); };
+ NONCOPYABLE(CHeapStringHolder);
+
+ // Allocate memory to hold a copy of string
+ void set(const char* string);
+
+ // Release allocated memory
+ void clear();
+
+ const char* get() const { return _string; };
+};
+
+#endif // SHARE_COMPILER_CHEAPSTRINGHOLDER_HPP
diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp
index 8a83a864f62..9cf5d2ab7bb 100644
--- a/src/hotspot/share/compiler/compileBroker.cpp
+++ b/src/hotspot/share/compiler/compileBroker.cpp
@@ -1775,7 +1775,7 @@ bool CompileBroker::init_compiler_runtime() {
void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) {
BufferBlob* blob = thread->get_buffer_blob();
if (blob != nullptr) {
- blob->flush();
+ blob->purge(true /* free_code_cache_data */, true /* unregister_nmethod */);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free(blob);
}
@@ -2289,7 +2289,9 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
compilable = ci_env.compilable();
if (ci_env.failing()) {
- failure_reason = ci_env.failure_reason();
+ // Duplicate the failure reason string, so that it outlives ciEnv
+ failure_reason = os::strdup(ci_env.failure_reason(), mtCompiler);
+ failure_reason_on_C_heap = true;
retry_message = ci_env.retry_message();
ci_env.report_failure(failure_reason);
}
diff --git a/src/hotspot/share/compiler/oopMap.cpp b/src/hotspot/share/compiler/oopMap.cpp
index 58e9daa43a5..ede9f91025f 100644
--- a/src/hotspot/share/compiler/oopMap.cpp
+++ b/src/hotspot/share/compiler/oopMap.cpp
@@ -871,6 +871,9 @@ ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set)
return builder.build();
}
+void ImmutableOopMapSet::operator delete(void* p) {
+ FREE_C_HEAP_ARRAY(unsigned char, p);
+}
//------------------------------DerivedPointerTable---------------------------
diff --git a/src/hotspot/share/compiler/oopMap.hpp b/src/hotspot/share/compiler/oopMap.hpp
index 4b20464f69c..146637f3654 100644
--- a/src/hotspot/share/compiler/oopMap.hpp
+++ b/src/hotspot/share/compiler/oopMap.hpp
@@ -334,7 +334,10 @@ class ImmutableOopMapSet {
address data() const { return (address) this + sizeof(*this) + sizeof(ImmutableOopMapPair) * _count; }
public:
+ void operator delete(void* p);
+
ImmutableOopMapSet(const OopMapSet* oopmap_set, int size) : _count(oopmap_set->size()), _size(size) {}
+ ~ImmutableOopMapSet() = default;
ImmutableOopMap* oopmap_at_offset(int offset) const {
assert(offset >= 0 && offset < _size, "must be within boundaries");
diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
index 3907d904e2f..3cd5665e045 100644
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2022, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -124,17 +124,22 @@ HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) {
}
// Expand and loop back if space is available
- size_t space_left = max_capacity() - capacity();
- size_t want_space = MAX2(size, EpsilonMinHeapExpand);
-
- if (want_space < space_left) {
+ size_t size_in_bytes = size * HeapWordSize;
+ size_t uncommitted_space = max_capacity() - capacity();
+ size_t unused_space = max_capacity() - used();
+ size_t want_space = MAX2(size_in_bytes, EpsilonMinHeapExpand);
+ assert(unused_space >= uncommitted_space,
+ "Unused (" SIZE_FORMAT ") >= uncommitted (" SIZE_FORMAT ")",
+ unused_space, uncommitted_space);
+
+ if (want_space < uncommitted_space) {
// Enough space to expand in bulk:
bool expand = _virtual_space.expand_by(want_space);
assert(expand, "Should be able to expand");
- } else if (size < space_left) {
+ } else if (size_in_bytes < unused_space) {
// No space to expand in bulk, and this allocation is still possible,
// take all the remaining space:
- bool expand = _virtual_space.expand_by(space_left);
+ bool expand = _virtual_space.expand_by(uncommitted_space);
assert(expand, "Should be able to expand");
} else {
// No space left:
diff --git a/src/hotspot/share/gc/g1/g1CardSet.cpp b/src/hotspot/share/gc/g1/g1CardSet.cpp
index f39e2066739..98cd1943224 100644
--- a/src/hotspot/share/gc/g1/g1CardSet.cpp
+++ b/src/hotspot/share/gc/g1/g1CardSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -286,7 +286,10 @@ class G1CardSetHashTable : public CHeapObj {
size_t initial_log_table_size = InitialLogTableSize) :
_inserted_card(false),
_mm(mm),
- _table(mm, initial_log_table_size, false),
+ _table(Mutex::service-1,
+ mm,
+ initial_log_table_size,
+ false /* enable_statistics */),
_table_scanner(&_table, BucketClaimSize) {
}
diff --git a/src/hotspot/share/gc/g1/g1CodeRootSet.cpp b/src/hotspot/share/gc/g1/g1CodeRootSet.cpp
index bbb1f3b9006..a1eb7b8543a 100644
--- a/src/hotspot/share/gc/g1/g1CodeRootSet.cpp
+++ b/src/hotspot/share/gc/g1/g1CodeRootSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,82 +28,277 @@
#include "code/nmethod.hpp"
#include "gc/g1/g1CodeRootSet.hpp"
#include "gc/g1/heapRegion.hpp"
+#include "memory/allocation.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/concurrentHashTable.inline.hpp"
+#include "utilities/concurrentHashTableTasks.inline.hpp"
-void G1CodeRootSet::add(nmethod* nm) {
- assert(_is_iterating == false, "should not mutate while iterating the table");
- bool added = false;
- if (_table == nullptr) {
- _table = new (mtGC) Table(SmallSize, LargeSize);
+class G1CodeRootSetHashTableConfig : public StackObj {
+public:
+ using Value = nmethod*;
+
+ static uintx get_hash(Value const& value, bool* is_dead);
+
+ static void* allocate_node(void* context, size_t size, Value const& value) {
+ return AllocateHeap(size, mtGC);
+ }
+
+ static void free_node(void* context, void* memory, Value const& value) {
+ FreeHeap(memory);
+ }
+};
+
+// Storage container for the code root set.
+class G1CodeRootSetHashTable : public CHeapObj {
+ using HashTable = ConcurrentHashTable;
+ using HashTableScanTask = HashTable::ScanTask;
+
+ // Default (log2) number of buckets; small since typically we do not expect many
+ // entries.
+ static const size_t Log2DefaultNumBuckets = 2;
+ static const uint BucketClaimSize = 16;
+
+ HashTable _table;
+ HashTableScanTask _table_scanner;
+
+ size_t volatile _num_entries;
+
+ bool is_empty() const { return number_of_entries() == 0; }
+
+ class HashTableLookUp : public StackObj {
+ nmethod* _nmethod;
+
+ public:
+ explicit HashTableLookUp(nmethod* nmethod) : _nmethod(nmethod) { }
+ uintx get_hash() const;
+ bool equals(nmethod** value);
+ bool is_dead(nmethod** value) const { return false; }
+ };
+
+ class HashTableIgnore : public StackObj {
+ public:
+ HashTableIgnore() { }
+ void operator()(nmethod** value) { /* do nothing */ }
+ };
+
+public:
+ G1CodeRootSetHashTable() :
+ _table(Mutex::service-1,
+ nullptr,
+ Log2DefaultNumBuckets,
+ false /* enable_statistics */),
+ _table_scanner(&_table, BucketClaimSize), _num_entries(0) {
+ clear();
+ }
+
+ // Robert Jenkins 1996 & Thomas Wang 1997
+ // http://web.archive.org/web/20071223173210/http://www.concentric.net/~Ttwang/tech/inthash.htm
+ static uint32_t hash(uint32_t key) {
+ key = ~key + (key << 15);
+ key = key ^ (key >> 12);
+ key = key + (key << 2);
+ key = key ^ (key >> 4);
+ key = key * 2057;
+ key = key ^ (key >> 16);
+ return key;
+ }
+
+ static uintx get_hash(nmethod* nmethod) {
+ uintptr_t value = (uintptr_t)nmethod;
+ // The CHT only uses the bits smaller than HashTable::DEFAULT_MAX_SIZE_LOG2, so
+ // try to increase the randomness by incorporating the upper bits of the
+ // address too.
+ STATIC_ASSERT(HashTable::DEFAULT_MAX_SIZE_LOG2 <= sizeof(uint32_t) * BitsPerByte);
+#ifdef _LP64
+ return hash((uint32_t)value ^ (uint32_t(value >> 32)));
+#else
+ return hash((uint32_t)value);
+#endif
+ }
+
+ void insert(nmethod* method) {
+ HashTableLookUp lookup(method);
+ bool grow_hint = false;
+ bool inserted = _table.insert(Thread::current(), lookup, method, &grow_hint);
+ if (inserted) {
+ Atomic::inc(&_num_entries);
+ }
+ if (grow_hint) {
+ _table.grow(Thread::current());
+ }
+ }
+
+ bool remove(nmethod* method) {
+ HashTableLookUp lookup(method);
+ bool removed = _table.remove(Thread::current(), lookup);
+ if (removed) {
+ Atomic::dec(&_num_entries);
+ }
+ return removed;
+ }
+
+ bool contains(nmethod* method) {
+ HashTableLookUp lookup(method);
+ HashTableIgnore ignore;
+ return _table.get(Thread::current(), lookup, ignore);
+ }
+
+ void clear() {
+ // Remove all entries.
+ auto always_true = [] (nmethod** value) {
+ return true;
+ };
+ clean(always_true);
+ }
+
+ void iterate_at_safepoint(CodeBlobClosure* blk) {
+ assert_at_safepoint();
+ // A lot of code root sets are typically empty.
+ if (is_empty()) {
+ return;
+ }
+
+ auto do_value =
+ [&] (nmethod** value) {
+ blk->do_code_blob(*value);
+ return true;
+ };
+ _table_scanner.do_safepoint_scan(do_value);
+ }
+
+ // Removes entries as indicated by the given EVAL closure.
+ template
+ void clean(EVAL& eval) {
+ // A lot of code root sets are typically empty.
+ if (is_empty()) {
+ return;
+ }
+
+ size_t num_deleted = 0;
+ auto do_delete =
+ [&] (nmethod** value) {
+ num_deleted++;
+ };
+ bool succeeded = _table.try_bulk_delete(Thread::current(), eval, do_delete);
+ guarantee(succeeded, "unable to clean table");
+
+ if (num_deleted != 0) {
+ size_t current_size = Atomic::sub(&_num_entries, num_deleted);
+ shrink_to_match(current_size);
+ }
+ }
+
+ // Removes dead/unlinked entries.
+ void bulk_remove() {
+ auto delete_check = [&] (nmethod** value) {
+ return (*value)->is_unlinked();
+ };
+
+ clean(delete_check);
+ }
+
+ // Calculate the log2 of the table size we want to shrink to.
+ size_t log2_target_shrink_size(size_t current_size) const {
+ // A table with the new size should be at most filled by this factor. Otherwise
+ // we would grow again quickly.
+ const float WantedLoadFactor = 0.5;
+ size_t min_expected_size = checked_cast(ceil(current_size / WantedLoadFactor));
+
+ size_t result = Log2DefaultNumBuckets;
+ if (min_expected_size != 0) {
+ size_t log2_bound = checked_cast(log2i_exact(round_up_power_of_2(min_expected_size)));
+ result = clamp(log2_bound, Log2DefaultNumBuckets, HashTable::DEFAULT_MAX_SIZE_LOG2);
+ }
+ return result;
+ }
+
+ // Shrink to keep table size appropriate to the given number of entries.
+ void shrink_to_match(size_t current_size) {
+ size_t prev_log2size = _table.get_size_log2(Thread::current());
+ size_t new_log2_table_size = log2_target_shrink_size(current_size);
+ if (new_log2_table_size < prev_log2size) {
+ _table.shrink(Thread::current(), new_log2_table_size);
+ }
}
- added = _table->put(nm, nm);
- if (added && _table->table_size() == SmallSize && length() == Threshold) {
- _table->resize(LargeSize);
+
+ void reset_table_scanner() {
+ _table_scanner.set(&_table, BucketClaimSize);
+ }
+
+ size_t mem_size() { return sizeof(*this) + _table.get_mem_size(Thread::current()); }
+
+ size_t number_of_entries() const { return Atomic::load(&_num_entries); }
+};
+
+uintx G1CodeRootSetHashTable::HashTableLookUp::get_hash() const {
+ return G1CodeRootSetHashTable::get_hash(_nmethod);
+}
+
+bool G1CodeRootSetHashTable::HashTableLookUp::equals(nmethod** value) {
+ return *value == _nmethod;
+}
+
+uintx G1CodeRootSetHashTableConfig::get_hash(Value const& value, bool* is_dead) {
+ *is_dead = false;
+ return G1CodeRootSetHashTable::get_hash(value);
+}
+
+size_t G1CodeRootSet::length() const { return _table->number_of_entries(); }
+
+void G1CodeRootSet::add(nmethod* method) {
+ if (!contains(method)) {
+ assert(!_is_iterating, "must be");
+ _table->insert(method);
}
}
+G1CodeRootSet::G1CodeRootSet() :
+ _table(new G1CodeRootSetHashTable())
+ DEBUG_ONLY(COMMA _is_iterating(false)) { }
+
G1CodeRootSet::~G1CodeRootSet() {
delete _table;
}
bool G1CodeRootSet::remove(nmethod* method) {
- assert(_is_iterating == false, "should not mutate while iterating the table");
- bool removed = false;
- if (_table != nullptr) {
- removed = _table->remove(method);
- }
- if (removed) {
- if (length() == 0) {
- clear();
- }
- }
- return removed;
+ assert(!_is_iterating, "should not mutate while iterating the table");
+ return _table->remove(method);
+}
+
+void G1CodeRootSet::bulk_remove() {
+ assert(!_is_iterating, "should not mutate while iterating the table");
+ _table->bulk_remove();
}
bool G1CodeRootSet::contains(nmethod* method) {
- if (_table != nullptr) {
- return _table->contains(method);
- }
- return false;
+ return _table->contains(method);
}
void G1CodeRootSet::clear() {
- assert(_is_iterating == false, "should not mutate while iterating the table");
- delete _table;
- _table = nullptr;
+ assert(!_is_iterating, "should not mutate while iterating the table");
+ _table->clear();
}
size_t G1CodeRootSet::mem_size() {
- return (_table == nullptr)
- ? sizeof(*this)
- : sizeof(*this) + _table->mem_size();
+ return sizeof(*this) + _table->mem_size();
+}
+
+void G1CodeRootSet::reset_table_scanner() {
+ _table->reset_table_scanner();
}
void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
DEBUG_ONLY(_is_iterating = true;)
- if (_table != nullptr) {
- _table->iterate_all([&](nmethod* nm, nmethod* _) {
- blk->do_code_blob(nm);
- });
- }
+ _table->iterate_at_safepoint(blk);
DEBUG_ONLY(_is_iterating = false;)
}
class CleanCallback : public StackObj {
NONCOPYABLE(CleanCallback); // can not copy, _blobs will point to old copy
+
class PointsIntoHRDetectionClosure : public OopClosure {
HeapRegion* _hr;
- public:
- bool _points_into;
- PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {}
-
- void do_oop(narrowOop* o) {
- do_oop_work(o);
- }
-
- void do_oop(oop* o) {
- do_oop_work(o);
- }
template
void do_oop_work(T* p) {
@@ -111,6 +306,14 @@ class CleanCallback : public StackObj {
_points_into = true;
}
}
+
+ public:
+ bool _points_into;
+ PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {}
+
+ void do_oop(narrowOop* o) { do_oop_work(o); }
+
+ void do_oop(oop* o) { do_oop_work(o); }
};
PointsIntoHRDetectionClosure _detector;
@@ -119,20 +322,16 @@ class CleanCallback : public StackObj {
public:
CleanCallback(HeapRegion* hr) : _detector(hr), _blobs(&_detector, !CodeBlobToOopClosure::FixRelocations) {}
- bool do_entry(nmethod* nm, nmethod* _) {
+ bool operator()(nmethod** value) {
_detector._points_into = false;
- _blobs.do_code_blob(nm);
+ _blobs.do_code_blob(*value);
return !_detector._points_into;
}
};
void G1CodeRootSet::clean(HeapRegion* owner) {
- assert(_is_iterating == false, "should not mutate while iterating the table");
- CleanCallback should_clean(owner);
- if (_table != nullptr) {
- _table->unlink(&should_clean);
- }
- if (length() == 0) {
- clear();
- }
+ assert(!_is_iterating, "should not mutate while iterating the table");
+
+ CleanCallback eval(owner);
+ _table->clean(eval);
}
diff --git a/src/hotspot/share/gc/g1/g1CodeRootSet.hpp b/src/hotspot/share/gc/g1/g1CodeRootSet.hpp
index 087c3635cf0..9c5ccdd1202 100644
--- a/src/hotspot/share/gc/g1/g1CodeRootSet.hpp
+++ b/src/hotspot/share/gc/g1/g1CodeRootSet.hpp
@@ -27,43 +27,38 @@
#include "code/codeCache.hpp"
#include "utilities/globalDefinitions.hpp"
-#include "utilities/resizeableResourceHash.hpp"
+class G1CodeRootSetHashTable;
class HeapRegion;
class nmethod;
// Implements storage for a set of code roots.
-// This class is not thread safe, locks are needed.
+// This class is thread safe.
class G1CodeRootSet {
- friend class G1CodeRootSetTest;
- friend class G1CodeRootSetTest_g1_code_cache_rem_set_vm_Test;
-
- private:
- const static size_t SmallSize = 32;
- const static size_t Threshold = 24;
- const static size_t LargeSize = 512;
-
- using Table = ResizeableResourceHashtable;
- Table* _table;
+ G1CodeRootSetHashTable* _table;
DEBUG_ONLY(mutable bool _is_iterating;)
public:
- G1CodeRootSet() : _table(nullptr) DEBUG_ONLY(COMMA _is_iterating(false)) {}
+ G1CodeRootSet();
~G1CodeRootSet();
void add(nmethod* method);
bool remove(nmethod* method);
+ void bulk_remove();
bool contains(nmethod* method);
void clear();
+
+ // Prepare for MT iteration. Must be called before nmethods_do.
+ void reset_table_scanner();
void nmethods_do(CodeBlobClosure* blk) const;
- // Remove all nmethods which no longer contain pointers into our "owner" region
+ // Remove all nmethods which no longer contain pointers into our "owner" region.
void clean(HeapRegion* owner);
bool is_empty() { return length() == 0;}
// Length in elements
- size_t length() const { return _table == nullptr ? 0 : _table->number_of_entries(); }
+ size_t length() const;
// Memory size in bytes taken by this set.
size_t mem_size();
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index eeaa54cee85..f59de9b44e3 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/metadataOnStackMark.hpp"
-#include "classfile/stringTable.hpp"
+#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "compiler/oopMap.hpp"
@@ -74,6 +74,7 @@
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.inline.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/concurrentGCBreakpoints.hpp"
#include "gc/shared/gcBehaviours.hpp"
#include "gc/shared/gcHeapSummary.hpp"
@@ -855,10 +856,6 @@ void G1CollectedHeap::verify_before_full_collection() {
}
void G1CollectedHeap::prepare_for_mutator_after_full_collection() {
- // Delete metaspaces for unloaded class loaders and clean up loader_data graph
- ClassLoaderDataGraph::purge(/*at_safepoint*/true);
- DEBUG_ONLY(MetaspaceUtils::verify();)
-
// Prepare heap for normal collections.
assert(num_free_regions() == 0, "we should not have added any free regions");
rebuild_region_sets(false /* free_list_only */);
@@ -2596,6 +2593,65 @@ void G1CollectedHeap::complete_cleaning(bool class_unloading_occurred) {
workers()->run_task(&unlink_task);
}
+void G1CollectedHeap::unload_classes_and_code(const char* description, BoolObjectClosure* is_alive, GCTimer* timer) {
+ GCTraceTime(Debug, gc, phases) debug(description, timer);
+
+ ClassUnloadingContext ctx(workers()->active_workers(),
+ false /* unregister_nmethods_during_purge */,
+ false /* lock_codeblob_free_separately */);
+ {
+ CodeCache::UnlinkingScope scope(is_alive);
+ bool unloading_occurred = SystemDictionary::do_unloading(timer);
+ GCTraceTime(Debug, gc, phases) t("G1 Complete Cleaning", timer);
+ complete_cleaning(unloading_occurred);
+ }
+ {
+ GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", timer);
+ ctx.purge_nmethods();
+ }
+ {
+ GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", timer);
+ G1CollectedHeap::heap()->bulk_unregister_nmethods();
+ }
+ {
+ GCTraceTime(Debug, gc, phases) t("Free Code Blobs", timer);
+ ctx.free_code_blobs();
+ }
+ {
+ GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", timer);
+ ClassLoaderDataGraph::purge(true /* at_safepoint */);
+ DEBUG_ONLY(MetaspaceUtils::verify();)
+ }
+}
+
+class G1BulkUnregisterNMethodTask : public WorkerTask {
+ HeapRegionClaimer _hrclaimer;
+
+ class UnregisterNMethodsHeapRegionClosure : public HeapRegionClosure {
+ public:
+
+ bool do_heap_region(HeapRegion* hr) {
+ hr->rem_set()->bulk_remove_code_roots();
+ return false;
+ }
+ } _cl;
+
+public:
+ G1BulkUnregisterNMethodTask(uint num_workers)
+ : WorkerTask("G1 Remove Unlinked NMethods From Code Root Set Task"),
+ _hrclaimer(num_workers) { }
+
+ void work(uint worker_id) {
+ G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hrclaimer, worker_id);
+ }
+};
+
+void G1CollectedHeap::bulk_unregister_nmethods() {
+ uint num_workers = workers()->active_workers();
+ G1BulkUnregisterNMethodTask t(num_workers);
+ workers()->run_task(&t);
+}
+
bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
assert(obj != nullptr, "must not be null");
assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
@@ -2761,7 +2817,7 @@ bool G1CollectedHeap::check_young_list_empty() {
// Remove the given HeapRegion from the appropriate region set.
void G1CollectedHeap::prepare_region_for_full_compaction(HeapRegion* hr) {
- if (hr->is_humongous()) {
+ if (hr->is_humongous()) {
_humongous_set.remove(hr);
} else if (hr->is_old()) {
_old_set.remove(hr);
@@ -3008,33 +3064,7 @@ class RegisterNMethodOopClosure: public OopClosure {
" starting at " HR_FORMAT,
p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
- // HeapRegion::add_code_root_locked() avoids adding duplicate entries.
- hr->add_code_root_locked(_nm);
- }
- }
-
- void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-};
-
-class UnregisterNMethodOopClosure: public OopClosure {
- G1CollectedHeap* _g1h;
- nmethod* _nm;
-
-public:
- UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
- _g1h(g1h), _nm(nm) {}
-
- void do_oop(oop* p) {
- oop heap_oop = RawAccess<>::oop_load(p);
- if (!CompressedOops::is_null(heap_oop)) {
- oop obj = CompressedOops::decode_not_null(heap_oop);
- HeapRegion* hr = _g1h->heap_region_containing(obj);
- assert(!hr->is_continues_humongous(),
- "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
- " starting at " HR_FORMAT,
- p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
-
- hr->remove_code_root(_nm);
+ hr->add_code_root(_nm);
}
}
@@ -3048,9 +3078,8 @@ void G1CollectedHeap::register_nmethod(nmethod* nm) {
}
void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
- guarantee(nm != nullptr, "sanity");
- UnregisterNMethodOopClosure reg_cl(this, nm);
- nm->oops_do(®_cl, true);
+ // We always unregister nmethods in bulk during code unloading only.
+ ShouldNotReachHere();
}
void G1CollectedHeap::update_used_after_gc(bool evacuation_failed) {
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index e1b95bf616d..447535f2f86 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -1265,6 +1265,10 @@ class G1CollectedHeap : public CollectedHeap {
// Performs cleaning of data structures after class unloading.
void complete_cleaning(bool class_unloading_occurred);
+ void unload_classes_and_code(const char* description, BoolObjectClosure* cl, GCTimer* timer);
+
+ void bulk_unregister_nmethods();
+
// Verification
// Perform any cleanup actions necessary before allowing a verification.
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index 248a5043777..fb773a025e7 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -25,8 +25,6 @@
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataGraph.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BatchedTask.hpp"
#include "gc/g1/g1CardSetMemory.hpp"
@@ -1121,13 +1119,10 @@ class G1UpdateRemSetTrackingBeforeRebuildTask : public WorkerTask {
// Distribute the given marked bytes across the humongous object starting
// with hr and note end of marking for these regions.
void distribute_marked_bytes(HeapRegion* hr, size_t marked_bytes) {
- size_t const obj_size_in_words = cast_to_oop(hr->bottom())->size();
-
- // "Distributing" zero words means that we only note end of marking for these
- // regions.
- assert(marked_bytes == 0 || obj_size_in_words * HeapWordSize == marked_bytes,
+ // Dead humongous objects (marked_bytes == 0) may have already been unloaded.
+ assert(marked_bytes == 0 || cast_to_oop(hr->bottom())->size() * HeapWordSize == marked_bytes,
"Marked bytes should either be 0 or the same as humongous object (%zu) but is %zu",
- obj_size_in_words * HeapWordSize, marked_bytes);
+ cast_to_oop(hr->bottom())->size() * HeapWordSize, marked_bytes);
auto distribute_bytes = [&] (HeapRegion* r) {
size_t const bytes_to_add = MIN2(HeapRegion::GrainBytes, marked_bytes);
@@ -1138,10 +1133,6 @@ class G1UpdateRemSetTrackingBeforeRebuildTask : public WorkerTask {
marked_bytes -= bytes_to_add;
};
_g1h->humongous_obj_regions_iterate(hr, distribute_bytes);
-
- assert(marked_bytes == 0,
- "%zu bytes left after distributing space across %zu regions",
- marked_bytes, G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words));
}
void update_marked_bytes(HeapRegion* hr) {
@@ -1252,6 +1243,12 @@ void G1ConcurrentMark::remark() {
if (mark_finished) {
weak_refs_work();
+ // Unload Klasses, String, Code Cache, etc.
+ if (ClassUnloadingWithConcurrentMark) {
+ G1CMIsAliveClosure is_alive(_g1h);
+ _g1h->unload_classes_and_code("Class Unloading", &is_alive, _gc_timer_cm);
+ }
+
SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
// We're done with marking.
// This is the end of the marking cycle, we're expected all
@@ -1289,12 +1286,6 @@ void G1ConcurrentMark::remark() {
reclaim_empty_regions();
}
- // Clean out dead classes
- if (ClassUnloadingWithConcurrentMark) {
- GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
- ClassLoaderDataGraph::purge(/*at_safepoint*/true);
- }
-
_g1h->resize_heap_if_necessary();
_g1h->uncommit_regions_if_necessary();
@@ -1619,9 +1610,6 @@ class G1CMRefProcProxyTask : public RefProcProxyTask {
void G1ConcurrentMark::weak_refs_work() {
ResourceMark rm;
- // Is alive closure.
- G1CMIsAliveClosure g1_is_alive(_g1h);
-
{
GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
@@ -1678,15 +1666,8 @@ void G1ConcurrentMark::weak_refs_work() {
{
GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
- WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1);
- }
-
- // Unload Klasses, String, Code Cache, etc.
- if (ClassUnloadingWithConcurrentMark) {
- GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
- CodeCache::UnloadingScope scope(&g1_is_alive);
- bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm);
- _g1h->complete_cleaning(purged_classes);
+ G1CMIsAliveClosure is_alive(_g1h);
+ WeakProcessor::weak_oops_do(_g1h->workers(), &is_alive, &do_nothing_cl, 1);
}
}
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp
index 4381d515c6a..df1afe0d3e9 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp
@@ -24,9 +24,6 @@
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
-#include "compiler/oopMap.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1FullCollector.inline.hpp"
#include "gc/g1/g1FullGCAdjustTask.hpp"
@@ -41,6 +38,7 @@
#include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/preservedMarks.inline.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/verifyOption.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
@@ -171,6 +169,7 @@ class PrepareRegionsClosure : public HeapRegionClosure {
PrepareRegionsClosure(G1FullCollector* collector) : _collector(collector) { }
bool do_heap_region(HeapRegion* hr) {
+ hr->prepare_for_full_gc();
G1CollectedHeap::heap()->prepare_region_for_full_compaction(hr);
_collector->before_marking_update_attribute_table(hr);
return false;
@@ -318,11 +317,7 @@ void G1FullCollector::phase1_mark_live_objects() {
// Class unloading and cleanup.
if (ClassUnloading) {
- GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
- CodeCache::UnloadingScope unloading_scope(&_is_alive);
- // Unload classes and purge the SystemDictionary.
- bool purged_class = SystemDictionary::do_unloading(scope()->timer());
- _heap->complete_cleaning(purged_class);
+ _heap->unload_classes_and_code("Phase 1: Class Unloading and Cleanup", &_is_alive, scope()->timer());
}
{
diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
index 5fc8b54df0d..2639f9ea166 100644
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
@@ -123,6 +123,10 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
_gc_par_phases[MergeLB]->create_thread_work_items("Dirty Cards:", MergeLBDirtyCards);
_gc_par_phases[MergeLB]->create_thread_work_items("Skipped Cards:", MergeLBSkippedCards);
+ _gc_par_phases[CodeRoots]->create_thread_work_items("Scanned Nmethods", CodeRootsScannedNMethods);
+
+ _gc_par_phases[OptCodeRoots]->create_thread_work_items("Scanned Nmethods", CodeRootsScannedNMethods);
+
_gc_par_phases[MergePSS]->create_thread_work_items("Copied Bytes", MergePSSCopiedBytes);
_gc_par_phases[MergePSS]->create_thread_work_items("LAB Waste", MergePSSLABWasteBytes);
_gc_par_phases[MergePSS]->create_thread_work_items("LAB Undo Waste", MergePSSLABUndoWasteBytes);
diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
index d06341e3ece..5b2b5714dff 100644
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
@@ -134,6 +134,10 @@ class G1GCPhaseTimes : public CHeapObj {
MergeLBSkippedCards
};
+ enum GCCodeRootsWorkItems {
+ CodeRootsScannedNMethods
+ };
+
enum GCMergePSSWorkItems {
MergePSSCopiedBytes,
MergePSSLABSize,
diff --git a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
index 1a7c11a685b..8645b847045 100644
--- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
+++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
@@ -217,7 +217,14 @@ size_t G1HeapSizingPolicy::full_collection_resize_amount(bool& expand) {
// Capacity, free and used after the GC counted as full regions to
// include the waste in the following calculations.
const size_t capacity_after_gc = _g1h->capacity();
- const size_t used_after_gc = capacity_after_gc - _g1h->unused_committed_regions_in_bytes();
+ const size_t used_after_gc = capacity_after_gc -
+ _g1h->unused_committed_regions_in_bytes() -
+ // Discount space used by current Eden to establish a
+ // situation during Remark similar to at the end of full
+ // GC where eden is empty. During Remark there can be an
+ // arbitrary number of eden regions which would skew the
+ // results.
+ _g1h->eden_regions_count() * HeapRegion::GrainBytes;
size_t minimum_desired_capacity = target_heap_capacity(used_after_gc, MinHeapFreeRatio);
size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio);
diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp
index 92c83301c2a..5f19861e8fb 100644
--- a/src/hotspot/share/gc/g1/g1Policy.cpp
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp
@@ -1276,7 +1276,7 @@ void G1Policy::abandon_collection_set_candidates() {
// Clear remembered sets of remaining candidate regions and the actual candidate
// set.
for (HeapRegion* r : *candidates()) {
- r->rem_set()->clear_locked(true /* only_cardset */);
+ r->rem_set()->clear(true /* only_cardset */);
}
_collection_set->abandon_all_candidates();
}
diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp
index b29125037b7..58adb87f86c 100644
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp
@@ -257,7 +257,6 @@ class G1RemSetScanState : public CHeapObj {
public:
G1RemSetScanState() :
_max_reserved_regions(0),
- _collection_set_iter_state(nullptr),
_card_table_scan_state(nullptr),
_scan_chunks_per_region(G1CollectedHeap::get_chunks_per_region()),
_log_scan_chunks_per_region(log2i(_scan_chunks_per_region)),
@@ -270,16 +269,14 @@ class G1RemSetScanState : public CHeapObj {
}
~G1RemSetScanState() {
- FREE_C_HEAP_ARRAY(G1RemsetIterState, _collection_set_iter_state);
FREE_C_HEAP_ARRAY(uint, _card_table_scan_state);
FREE_C_HEAP_ARRAY(bool, _region_scan_chunks);
FREE_C_HEAP_ARRAY(HeapWord*, _scan_top);
}
void initialize(size_t max_reserved_regions) {
- assert(_collection_set_iter_state == nullptr, "Must not be initialized twice");
+ assert(_card_table_scan_state == nullptr, "Must not be initialized twice");
_max_reserved_regions = max_reserved_regions;
- _collection_set_iter_state = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_reserved_regions, mtGC);
_card_table_scan_state = NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC);
_num_total_scan_chunks = max_reserved_regions * _scan_chunks_per_region;
_region_scan_chunks = NEW_C_HEAP_ARRAY(bool, _num_total_scan_chunks, mtGC);
@@ -294,7 +291,6 @@ class G1RemSetScanState : public CHeapObj {
// become used during the collection these values must be valid
// for those regions as well.
for (size_t i = 0; i < _max_reserved_regions; i++) {
- reset_region_claim((uint)i);
clear_scan_top((uint)i);
}
@@ -399,20 +395,6 @@ class G1RemSetScanState : public CHeapObj {
} while (cur != start_pos);
}
- void reset_region_claim(uint region_idx) {
- _collection_set_iter_state[region_idx] = false;
- }
-
- // Attempt to claim the given region in the collection set for iteration. Returns true
- // if this call caused the transition from Unclaimed to Claimed.
- inline bool claim_collection_set_region(uint region) {
- assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- if (_collection_set_iter_state[region]) {
- return false;
- }
- return !Atomic::cmpxchg(&_collection_set_iter_state[region], false, true);
- }
-
bool has_cards_to_scan(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
return _card_table_scan_state[region] < HeapRegion::CardsPerRegion;
@@ -757,6 +739,25 @@ void G1RemSet::scan_heap_roots(G1ParScanThreadState* pss,
p->record_or_add_thread_work_item(scan_phase, worker_id, cl.heap_roots_found(), G1GCPhaseTimes::ScanHRFoundRoots);
}
+// Wrapper around a CodeBlobClosure to count the number of code blobs scanned.
+class G1ScanAndCountCodeBlobClosure : public CodeBlobClosure {
+ CodeBlobClosure* _cl;
+ size_t _count;
+
+public:
+ G1ScanAndCountCodeBlobClosure(CodeBlobClosure* cl) : _cl(cl), _count(0) {
+ }
+
+ void do_code_blob(CodeBlob* cb) override {
+ _cl->do_code_blob(cb);
+ _count++;
+ }
+
+ size_t count() const {
+ return _count;
+ }
+};
+
// Heap region closure to be applied to all regions in the current collection set
// increment to fix up non-card related roots.
class G1ScanCollectionSetRegionClosure : public HeapRegionClosure {
@@ -768,6 +769,8 @@ class G1ScanCollectionSetRegionClosure : public HeapRegionClosure {
uint _worker_id;
+ size_t _code_roots_scanned;
+
size_t _opt_roots_scanned;
size_t _opt_refs_scanned;
size_t _opt_refs_memory_used;
@@ -798,6 +801,7 @@ class G1ScanCollectionSetRegionClosure : public HeapRegionClosure {
_scan_phase(scan_phase),
_code_roots_phase(code_roots_phase),
_worker_id(worker_id),
+ _code_roots_scanned(0),
_opt_roots_scanned(0),
_opt_refs_scanned(0),
_opt_refs_memory_used(0),
@@ -807,8 +811,6 @@ class G1ScanCollectionSetRegionClosure : public HeapRegionClosure {
_rem_set_opt_trim_partially_time() { }
bool do_heap_region(HeapRegion* r) {
- uint const region_idx = r->hrm_index();
-
// The individual references for the optional remembered set are per-worker, so we
// always need to scan them.
if (r->has_index_in_opt_cset()) {
@@ -819,11 +821,16 @@ class G1ScanCollectionSetRegionClosure : public HeapRegionClosure {
event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_scan_phase));
}
- if (_scan_state->claim_collection_set_region(region_idx)) {
+ // Scan code root remembered sets.
+ {
EventGCPhaseParallel event;
G1EvacPhaseWithTrimTimeTracker timer(_pss, _code_root_scan_time, _code_trim_partially_time);
+ G1ScanAndCountCodeBlobClosure cl(_pss->closures()->weak_codeblobs());
+
// Scan the code root list attached to the current region
- r->code_roots_do(_pss->closures()->weak_codeblobs());
+ r->code_roots_do(&cl);
+
+ _code_roots_scanned = cl.count();
event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_code_roots_phase));
}
@@ -834,6 +841,8 @@ class G1ScanCollectionSetRegionClosure : public HeapRegionClosure {
Tickspan code_root_scan_time() const { return _code_root_scan_time; }
Tickspan code_root_trim_partially_time() const { return _code_trim_partially_time; }
+ size_t code_roots_scanned() const { return _code_roots_scanned; }
+
Tickspan rem_set_opt_root_scan_time() const { return _rem_set_opt_root_scan_time; }
Tickspan rem_set_opt_trim_partially_time() const { return _rem_set_opt_trim_partially_time; }
@@ -856,6 +865,8 @@ void G1RemSet::scan_collection_set_regions(G1ParScanThreadState* pss,
p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_opt_trim_partially_time().seconds());
p->record_or_add_time_secs(coderoots_phase, worker_id, cl.code_root_scan_time().seconds());
+ p->record_or_add_thread_work_item(coderoots_phase, worker_id, cl.code_roots_scanned(), G1GCPhaseTimes::CodeRootsScannedNMethods);
+
p->add_time_secs(objcopy_phase, worker_id, cl.code_root_trim_partially_time().seconds());
// At this time we record some metrics only for the evacuations after the initial one.
@@ -1180,7 +1191,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
// implicitly rebuild anything else during eager reclaim. Note that at the moment
// (and probably never) we do not enter this path if there are other kind of
// remembered sets for this region.
- r->rem_set()->clear_locked(true /* only_cardset */);
+ r->rem_set()->clear(true /* only_cardset */);
// Clear_locked() above sets the state to Empty. However we want to continue
// collecting remembered set entries for humongous regions that were not
// reclaimed.
diff --git a/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp
index 6d1633786e6..0a91f2aea50 100644
--- a/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp
+++ b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp
@@ -142,7 +142,7 @@ void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
[&] (HeapRegion* r) {
assert(!r->is_continues_humongous() || r->rem_set()->is_empty(),
"Continues humongous region %u remset should be empty", r->hrm_index());
- r->rem_set()->clear_locked(true /* only_cardset */);
+ r->rem_set()->clear(true /* only_cardset */);
});
}
G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
diff --git a/src/hotspot/share/gc/g1/heapRegion.cpp b/src/hotspot/share/gc/g1/heapRegion.cpp
index b3e64c064e0..a34d6b85458 100644
--- a/src/hotspot/share/gc/g1/heapRegion.cpp
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp
@@ -107,7 +107,7 @@ void HeapRegion::handle_evacuation_failure() {
move_to_old();
_rem_set->clean_code_roots(this);
- _rem_set->clear_locked(true /* only_cardset */);
+ _rem_set->clear(true /* only_cardset */);
}
void HeapRegion::unlink_from_list() {
@@ -124,7 +124,7 @@ void HeapRegion::hr_clear(bool clear_space) {
set_free();
reset_pre_dummy_top();
- rem_set()->clear_locked();
+ rem_set()->clear();
init_top_at_mark_start();
if (clear_space) clear(SpaceDecorator::Mangle);
@@ -207,7 +207,7 @@ void HeapRegion::clear_humongous() {
}
void HeapRegion::prepare_remset_for_scan() {
- return _rem_set->reset_table_scanner();
+ _rem_set->reset_table_scanner();
}
HeapRegion::HeapRegion(uint hrm_index,
@@ -288,24 +288,15 @@ void HeapRegion::note_self_forward_chunk_done(size_t garbage_bytes) {
// Code roots support
void HeapRegion::add_code_root(nmethod* nm) {
- HeapRegionRemSet* hrrs = rem_set();
- hrrs->add_code_root(nm);
-}
-
-void HeapRegion::add_code_root_locked(nmethod* nm) {
- assert_locked_or_safepoint(CodeCache_lock);
- HeapRegionRemSet* hrrs = rem_set();
- hrrs->add_code_root_locked(nm);
+ rem_set()->add_code_root(nm);
}
void HeapRegion::remove_code_root(nmethod* nm) {
- HeapRegionRemSet* hrrs = rem_set();
- hrrs->remove_code_root(nm);
+ rem_set()->remove_code_root(nm);
}
void HeapRegion::code_roots_do(CodeBlobClosure* blk) const {
- HeapRegionRemSet* hrrs = rem_set();
- hrrs->code_roots_do(blk);
+ rem_set()->code_roots_do(blk);
}
class VerifyCodeRootOopClosure: public OopClosure {
@@ -608,7 +599,6 @@ class G1VerifyLiveAndRemSetClosure : public BasicOopIterateClosure {
template
void do_oop_work(T* p) {
assert(_containing_obj != nullptr, "must be");
- assert(!G1CollectedHeap::heap()->is_obj_dead_cond(_containing_obj, _vo), "Precondition");
if (num_failures() >= G1MaxVerifyFailures) {
return;
@@ -640,6 +630,7 @@ class G1VerifyLiveAndRemSetClosure : public BasicOopIterateClosure {
_num_failures(0) { }
void set_containing_obj(oop const obj) {
+ assert(!G1CollectedHeap::heap()->is_obj_dead_cond(obj, _vo), "Precondition");
_containing_obj = obj;
}
diff --git a/src/hotspot/share/gc/g1/heapRegion.hpp b/src/hotspot/share/gc/g1/heapRegion.hpp
index 05a3299889b..195b7e9d911 100644
--- a/src/hotspot/share/gc/g1/heapRegion.hpp
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp
@@ -174,6 +174,7 @@ class HeapRegion : public CHeapObj {
void update_bot_for_block(HeapWord* start, HeapWord* end);
+ void prepare_for_full_gc();
// Update heap region that has been compacted to be consistent after Full GC.
void reset_compacted_after_full_gc(HeapWord* new_top);
// Update skip-compacting heap region to be consistent after Full GC.
@@ -233,11 +234,17 @@ class HeapRegion : public CHeapObj {
HeapWord* volatile _top_at_mark_start;
// The area above this limit is fully parsable. This limit
- // is equal to bottom except from Remark and until the region has been
- // scrubbed concurrently. The scrubbing ensures that all dead objects (with
- // possibly unloaded classes) have beenreplaced with filler objects that
- // are parsable. Below this limit the marking bitmap must be used to
- // determine size and liveness.
+ // is equal to bottom except
+ //
+ // * from Remark and until the region has been scrubbed concurrently. The
+ // scrubbing ensures that all dead objects (with possibly unloaded classes)
+ // have been replaced with filler objects that are parsable.
+ // * after the marking phase in the Full GC pause until the objects have been
+ // moved. Some (debug) code iterates over the heap after marking but before
+ // compaction.
+ //
+ // Below this limit the marking bitmap must be used to determine size and
+ // liveness.
HeapWord* volatile _parsable_bottom;
// Amount of dead data in the region.
@@ -549,7 +556,6 @@ class HeapRegion : public CHeapObj {
// Routines for managing a list of code roots (attached to the
// this region's RSet) that point into this heap region.
void add_code_root(nmethod* nm);
- void add_code_root_locked(nmethod* nm);
void remove_code_root(nmethod* nm);
// Applies blk->do_code_blob() to each of the entries in
diff --git a/src/hotspot/share/gc/g1/heapRegion.inline.hpp b/src/hotspot/share/gc/g1/heapRegion.inline.hpp
index 5109c6a1e58..f28ff13c460 100644
--- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp
+++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp
@@ -180,6 +180,13 @@ inline size_t HeapRegion::block_size(const HeapWord* p, HeapWord* const pb) cons
return cast_to_oop(p)->size();
}
+inline void HeapRegion::prepare_for_full_gc() {
+ // After marking and class unloading the heap temporarily contains dead objects
+ // with unloaded klasses. Moving parsable_bottom makes some (debug) code correctly
+ // skip dead objects.
+ _parsable_bottom = top();
+}
+
inline void HeapRegion::reset_compacted_after_full_gc(HeapWord* new_top) {
set_top(new_top);
// After a compaction the mark bitmap in a movable region is invalid.
@@ -201,7 +208,7 @@ inline void HeapRegion::reset_skip_compacting_after_full_gc() {
inline void HeapRegion::reset_after_full_gc_common() {
// Everything above bottom() is parsable and live.
- _parsable_bottom = bottom();
+ reset_parsable_bottom();
// Clear unused heap memory in debug builds.
if (ZapUnusedHeapArea) {
diff --git a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp
index 8289cdf553b..4a124c20749 100644
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp
@@ -57,7 +57,6 @@ void HeapRegionRemSet::initialize(MemRegion reserved) {
HeapRegionRemSet::HeapRegionRemSet(HeapRegion* hr,
G1CardSetConfiguration* config) :
- _m(Mutex::service - 1, FormatBuffer<128>("HeapRegionRemSet#%u_lock", hr->hrm_index())),
_code_roots(),
_card_set_mm(config, G1CollectedHeap::heap()->card_set_freelist_pool()),
_card_set(config, &_card_set_mm),
@@ -69,11 +68,6 @@ void HeapRegionRemSet::clear_fcc() {
}
void HeapRegionRemSet::clear(bool only_cardset) {
- MutexLocker x(&_m, Mutex::_no_safepoint_check_flag);
- clear_locked(only_cardset);
-}
-
-void HeapRegionRemSet::clear_locked(bool only_cardset) {
if (!only_cardset) {
_code_roots.clear();
}
@@ -84,6 +78,7 @@ void HeapRegionRemSet::clear_locked(bool only_cardset) {
}
void HeapRegionRemSet::reset_table_scanner() {
+ _code_roots.reset_table_scanner();
_card_set.reset_table_scanner();
}
@@ -103,40 +98,22 @@ void HeapRegionRemSet::print_static_mem_size(outputStream* out) {
// When not at safepoint the CodeCache_lock must be held during modifications.
void HeapRegionRemSet::add_code_root(nmethod* nm) {
- assert(nm != nullptr, "sanity");
- assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()),
- "should call add_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s",
- BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()));
-
- MutexLocker ml(&_m, Mutex::_no_safepoint_check_flag);
- add_code_root_locked(nm);
-}
-
-void HeapRegionRemSet::add_code_root_locked(nmethod* nm) {
- assert(nm != nullptr, "sanity");
- assert((CodeCache_lock->owned_by_self() ||
- (SafepointSynchronize::is_at_safepoint() &&
- (_m.owned_by_self() || Thread::current()->is_VM_thread()))),
- "not safely locked. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s, _m.owned_by_self(): %s, Thread::current()->is_VM_thread(): %s",
- BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),
- BOOL_TO_STR(_m.owned_by_self()), BOOL_TO_STR(Thread::current()->is_VM_thread()));
-
- if (!_code_roots.contains(nm)) { // with this test, we can assert that we do not modify the hash table while iterating over it
- _code_roots.add(nm);
- }
+ _code_roots.add(nm);
}
void HeapRegionRemSet::remove_code_root(nmethod* nm) {
assert(nm != nullptr, "sanity");
- assert_locked_or_safepoint(CodeCache_lock);
- MutexLocker ml(CodeCache_lock->owned_by_self() ? nullptr : &_m, Mutex::_no_safepoint_check_flag);
_code_roots.remove(nm);
// Check that there were no duplicates
guarantee(!_code_roots.contains(nm), "duplicate entry found");
}
+void HeapRegionRemSet::bulk_remove_code_roots() {
+ _code_roots.bulk_remove();
+}
+
void HeapRegionRemSet::code_roots_do(CodeBlobClosure* blk) const {
_code_roots.nmethods_do(blk);
}
diff --git a/src/hotspot/share/gc/g1/heapRegionRemSet.hpp b/src/hotspot/share/gc/g1/heapRegionRemSet.hpp
index 584275e9d8b..facefde918f 100644
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.hpp
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.hpp
@@ -40,7 +40,6 @@ class outputStream;
class HeapRegionRemSet : public CHeapObj {
friend class VMStructs;
- Mutex _m;
// A set of code blobs (nmethods) whose code contains pointers into
// the region that owns this RSet.
G1CodeRootSet _code_roots;
@@ -118,7 +117,6 @@ class HeapRegionRemSet : public CHeapObj {
// The region is being reclaimed; clear its remset, and any mention of
// entries for this region in other remsets.
void clear(bool only_cardset = false);
- void clear_locked(bool only_cardset = false);
void reset_table_scanner();
@@ -153,6 +151,7 @@ class HeapRegionRemSet : public CHeapObj {
void add_code_root(nmethod* nm);
void add_code_root_locked(nmethod* nm);
void remove_code_root(nmethod* nm);
+ void bulk_remove_code_roots();
// Applies blk->do_code_blob() to each of the entries in _code_roots
void code_roots_do(CodeBlobClosure* blk) const;
@@ -167,7 +166,6 @@ class HeapRegionRemSet : public CHeapObj {
// Returns true if the code roots contains the given
// nmethod.
bool code_roots_list_contains(nmethod* nm) {
- MutexLocker ml(&_m, Mutex::_no_safepoint_check_flag);
return _code_roots.contains(nm);
}
diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp
index c6462f8c9d7..cdbd0ac4813 100644
--- a/src/hotspot/share/gc/parallel/mutableSpace.cpp
+++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp
@@ -120,11 +120,12 @@ void MutableSpace::initialize(MemRegion mr,
}
if (AlwaysPreTouch) {
+ size_t pretouch_page_size = UseLargePages ? page_size : os::vm_page_size();
PretouchTask::pretouch("ParallelGC PreTouch head", (char*)head.start(), (char*)head.end(),
- page_size, pretouch_workers);
+ pretouch_page_size, pretouch_workers);
PretouchTask::pretouch("ParallelGC PreTouch tail", (char*)tail.start(), (char*)tail.end(),
- page_size, pretouch_workers);
+ pretouch_page_size, pretouch_workers);
}
// Remember where we stopped so that we can continue later.
@@ -235,7 +236,18 @@ void MutableSpace::oop_iterate(OopIterateClosure* cl) {
void MutableSpace::object_iterate(ObjectClosure* cl) {
HeapWord* p = bottom();
while (p < top()) {
- cl->do_object(cast_to_oop(p));
+ oop obj = cast_to_oop(p);
+ // When promotion-failure occurs during Young GC, eden/from space is not cleared,
+ // so we can encounter objects with "forwarded" markword.
+ // They are essentially dead, so skipping them
+ if (!obj->is_forwarded()) {
+ cl->do_object(obj);
+ }
+#ifdef ASSERT
+ else {
+ assert(obj->forwardee() != obj, "must not be self-forwarded");
+ }
+#endif
p += cast_to_oop(p)->size();
}
}
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
index 45e734231a5..40488093f7d 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
@@ -527,6 +527,14 @@ void ParallelScavengeHeap::resize_all_tlabs() {
CollectedHeap::resize_all_tlabs();
}
+void ParallelScavengeHeap::prune_scavengable_nmethods() {
+ ScavengableNMethods::prune_nmethods_not_into_young();
+}
+
+void ParallelScavengeHeap::prune_unlinked_nmethods() {
+ ScavengableNMethods::prune_unlinked_nmethods();
+}
+
// This method is used by System.gc() and JVMTI.
void ParallelScavengeHeap::collect(GCCause::Cause cause) {
assert(!Heap_lock->owned_by_self(),
@@ -858,10 +866,6 @@ void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
ScavengableNMethods::verify_nmethod(nm);
}
-void ParallelScavengeHeap::prune_scavengable_nmethods() {
- ScavengableNMethods::prune_nmethods();
-}
-
GrowableArray ParallelScavengeHeap::memory_managers() {
GrowableArray memory_managers(2);
memory_managers.append(_young_manager);
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
index abf87b0e019..e71dc9515aa 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
@@ -174,6 +174,7 @@ class ParallelScavengeHeap : public CollectedHeap {
void verify_nmethod(nmethod* nm) override;
void prune_scavengable_nmethods();
+ void prune_unlinked_nmethods();
size_t max_capacity() const override;
diff --git a/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp b/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp
index f97727c8bc8..e5817ca6f5c 100644
--- a/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp
+++ b/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp
@@ -169,22 +169,6 @@ void PSAdaptiveSizePolicy::major_collection_end(size_t amount_live,
_major_timer.start();
}
-// If the remaining free space in the old generation is less that
-// that expected to be needed by the next collection, do a full
-// collection now.
-bool PSAdaptiveSizePolicy::should_full_GC(size_t old_free_in_bytes) {
-
- // A similar test is done in the scavenge's should_attempt_scavenge(). If
- // this is changed, decide if that test should also be changed.
- bool result = padded_average_promoted_in_bytes() > (float) old_free_in_bytes;
- log_trace(gc, ergo)("%s after scavenge average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT,
- result ? "Full" : "No full",
- (size_t) average_promoted_in_bytes(),
- (size_t) padded_average_promoted_in_bytes(),
- old_free_in_bytes);
- return result;
-}
-
void PSAdaptiveSizePolicy::clear_generation_free_space_flags() {
AdaptiveSizePolicy::clear_generation_free_space_flags();
diff --git a/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.hpp b/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.hpp
index c39514922fc..1b058e2dd29 100644
--- a/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.hpp
+++ b/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.hpp
@@ -306,10 +306,6 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
}
float major_collection_slope() { return _major_collection_estimator->slope();}
- // Given the amount of live data in the heap, should we
- // perform a Full GC?
- bool should_full_GC(size_t live_in_old_gen);
-
// Calculates optimal (free) space sizes for both the young and old
// generations. Stores results in _eden_size and _promo_size.
// Takes current used space in all generations as input, as well
diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
index 64b2c0475f4..741d415fe91 100644
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
@@ -42,6 +42,7 @@
#include "gc/parallel/psScavenge.hpp"
#include "gc/parallel/psStringDedup.hpp"
#include "gc/parallel/psYoungGen.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcId.hpp"
@@ -1024,9 +1025,12 @@ void PSParallelCompact::post_compact()
ct->dirty_MemRegion(old_mr);
}
- // Delete metaspaces for unloaded class loaders and clean up loader_data graph
- ClassLoaderDataGraph::purge(/*at_safepoint*/true);
- DEBUG_ONLY(MetaspaceUtils::verify();)
+ {
+ // Delete metaspaces for unloaded class loaders and clean up loader_data graph
+ GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer());
+ ClassLoaderDataGraph::purge(true /* at_safepoint */);
+ DEBUG_ONLY(MetaspaceUtils::verify();)
+ }
// Need to clear claim bits for the next mark.
ClassLoaderDataGraph::clear_claimed_marks();
@@ -1764,6 +1768,10 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
ref_processor()->start_discovery(maximum_heap_compaction);
+ ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
+ false /* unregister_nmethods_during_purge */,
+ false /* lock_codeblob_free_separately */);
+
marking_phase(&_gc_tracer);
bool max_on_system_gc = UseMaximumCompactionOnSystemGC
@@ -2052,19 +2060,39 @@ void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
{
GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
- CodeCache::UnloadingScope scope(is_alive_closure());
- // Follow system dictionary roots and unload classes.
- bool purged_class = SystemDictionary::do_unloading(&_gc_timer);
+ ClassUnloadingContext* ctx = ClassUnloadingContext::context();
+
+ bool unloading_occurred;
+ {
+ CodeCache::UnlinkingScope scope(is_alive_closure());
+
+ // Follow system dictionary roots and unload classes.
+ unloading_occurred = SystemDictionary::do_unloading(&_gc_timer);
+
+ // Unload nmethods.
+ CodeCache::do_unloading(unloading_occurred);
+ }
- // Unload nmethods.
- CodeCache::do_unloading(purged_class);
+ {
+ GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
+ // Release unloaded nmethod's memory.
+ ctx->purge_nmethods();
+ }
+ {
+ GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer);
+ ParallelScavengeHeap::heap()->prune_unlinked_nmethods();
+ }
+ {
+ GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
+ ctx->free_code_blobs();
+ }
// Prune dead klasses from subklass/sibling/implementor lists.
- Klass::clean_weak_klass_links(purged_class);
+ Klass::clean_weak_klass_links(unloading_occurred);
// Clean JVMCI metadata handles.
- JVMCI_ONLY(JVMCI::do_unloading(purged_class));
+ JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
}
{
diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp
index 6c35ed6b593..7da789261c0 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -239,8 +239,7 @@ bool PSScavenge::invoke() {
IsGCActiveMark mark;
const bool scavenge_done = PSScavenge::invoke_no_policy();
- const bool need_full_gc = !scavenge_done ||
- policy->should_full_GC(heap->old_gen()->free_in_bytes());
+ const bool need_full_gc = !scavenge_done;
bool full_gc_done = false;
if (UsePerfData) {
@@ -707,16 +706,16 @@ bool PSScavenge::should_attempt_scavenge() {
// Test to see if the scavenge will likely fail.
PSAdaptiveSizePolicy* policy = heap->size_policy();
- // A similar test is done in the policy's should_full_GC(). If this is
- // changed, decide if that test should also be changed.
size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
- bool result = promotion_estimate < old_gen->free_in_bytes();
+ // Total free size after possible old gen expansion
+ size_t free_in_old_gen = old_gen->max_gen_size() - old_gen->used_in_bytes();
+ bool result = promotion_estimate < free_in_old_gen;
log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT,
result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
(size_t) policy->padded_average_promoted_in_bytes(),
- old_gen->free_in_bytes());
+ free_in_old_gen);
if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) {
log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
}
diff --git a/src/hotspot/share/gc/serial/genMarkSweep.cpp b/src/hotspot/share/gc/serial/genMarkSweep.cpp
index 07c5396bc86..7d06fe588ac 100644
--- a/src/hotspot/share/gc/serial/genMarkSweep.cpp
+++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp
@@ -35,6 +35,7 @@
#include "gc/serial/cardTableRS.hpp"
#include "gc/serial/genMarkSweep.hpp"
#include "gc/serial/serialGcRefProcProxyTask.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcTimer.hpp"
@@ -195,19 +196,39 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
{
GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer());
- CodeCache::UnloadingScope scope(&is_alive);
- // Unload classes and purge the SystemDictionary.
- bool purged_class = SystemDictionary::do_unloading(gc_timer());
-
- // Unload nmethods.
- CodeCache::do_unloading(purged_class);
+ ClassUnloadingContext* ctx = ClassUnloadingContext::context();
+
+ bool unloading_occurred;
+ {
+ CodeCache::UnlinkingScope scope(&is_alive);
+
+ // Unload classes and purge the SystemDictionary.
+ unloading_occurred = SystemDictionary::do_unloading(gc_timer());
+
+ // Unload nmethods.
+ CodeCache::do_unloading(unloading_occurred);
+ }
+
+ {
+ GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
+ // Release unloaded nmethod's memory.
+ ctx->purge_nmethods();
+ }
+ {
+ GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", gc_timer());
+ gch->prune_unlinked_nmethods();
+ }
+ {
+ GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
+ ctx->free_code_blobs();
+ }
// Prune dead klasses from subklass/sibling/implementor lists.
- Klass::clean_weak_klass_links(purged_class);
+ Klass::clean_weak_klass_links(unloading_occurred);
// Clean JVMCI metadata handles.
- JVMCI_ONLY(JVMCI::do_unloading(purged_class));
+ JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
}
{
diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp
index 8f126c3129e..9361a4e5eb9 100644
--- a/src/hotspot/share/gc/serial/serialHeap.cpp
+++ b/src/hotspot/share/gc/serial/serialHeap.cpp
@@ -28,6 +28,7 @@
#include "gc/serial/tenuredGeneration.inline.hpp"
#include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/genMemoryPools.hpp"
+#include "gc/shared/scavengableNMethods.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "memory/universe.hpp"
diff --git a/src/hotspot/share/gc/shared/classUnloadingContext.cpp b/src/hotspot/share/gc/shared/classUnloadingContext.cpp
new file mode 100644
index 00000000000..6d9674ef801
--- /dev/null
+++ b/src/hotspot/share/gc/shared/classUnloadingContext.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "classfile/classLoaderData.inline.hpp"
+#include "code/nmethod.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "utilities/growableArray.hpp"
+
+ClassUnloadingContext* ClassUnloadingContext::_context = nullptr;
+
+ClassUnloadingContext::ClassUnloadingContext(uint num_workers,
+ bool unregister_nmethods_during_purge,
+ bool lock_codeblob_free_separately) :
+ _cld_head(nullptr),
+ _num_nmethod_unlink_workers(num_workers),
+ _unlinked_nmethods(nullptr),
+ _unregister_nmethods_during_purge(unregister_nmethods_during_purge),
+ _lock_codeblob_free_separately(lock_codeblob_free_separately) {
+
+ assert(_context == nullptr, "context already set");
+ _context = this;
+
+ assert(num_workers > 0, "must be");
+
+ _unlinked_nmethods = NEW_C_HEAP_ARRAY(NMethodSet*, num_workers, mtGC);
+ for (uint i = 0; i < num_workers; ++i) {
+ _unlinked_nmethods[i] = new NMethodSet();
+ }
+}
+
+ClassUnloadingContext::~ClassUnloadingContext() {
+ for (uint i = 0; i < _num_nmethod_unlink_workers; ++i) {
+ delete _unlinked_nmethods[i];
+ }
+ FREE_C_HEAP_ARRAY(NMethodSet*, _unlinked_nmethods);
+
+ assert(_context == this, "context not set correctly");
+ _context = nullptr;
+}
+
+bool ClassUnloadingContext::has_unloaded_classes() const {
+ return _cld_head != nullptr;
+}
+
+void ClassUnloadingContext::register_unloading_class_loader_data(ClassLoaderData* cld) {
+ assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
+
+ cld->unload();
+
+ cld->set_unloading_next(_cld_head);
+ _cld_head = cld;
+}
+
+void ClassUnloadingContext::purge_class_loader_data() {
+ for (ClassLoaderData* cld = _cld_head; cld != nullptr;) {
+ assert(cld->is_unloading(), "invariant");
+
+ ClassLoaderData* next = cld->unloading_next();
+ delete cld;
+ cld = next;
+ }
+}
+
+void ClassUnloadingContext::classes_unloading_do(void f(Klass* const)) {
+ assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
+ for (ClassLoaderData* cld = _cld_head; cld != nullptr; cld = cld->unloading_next()) {
+ assert(cld->is_unloading(), "invariant");
+ cld->classes_do(f);
+ }
+}
+
+void ClassUnloadingContext::register_unlinked_nmethod(nmethod* nm) {
+ assert(_context != nullptr, "no context set");
+
+ assert(!nm->is_unlinked(), "Only register for unloading once");
+ assert(_num_nmethod_unlink_workers == 1 || Thread::current()->is_Worker_thread(), "must be worker thread if parallel");
+
+ uint worker_id = _num_nmethod_unlink_workers == 1 ? 0 : WorkerThread::worker_id();
+ assert(worker_id < _num_nmethod_unlink_workers, "larger than expected worker id %u", worker_id);
+
+ _unlinked_nmethods[worker_id]->append(nm);
+
+ nm->set_is_unlinked();
+}
+
+void ClassUnloadingContext::purge_nmethods() {
+ assert(_context != nullptr, "no context set");
+
+ size_t freed_memory = 0;
+
+ for (uint i = 0; i < _num_nmethod_unlink_workers; ++i) {
+ NMethodSet* set = _unlinked_nmethods[i];
+ for (nmethod* nm : *set) {
+ freed_memory += nm->size();
+ nm->purge(false /* free_code_cache_data */, _unregister_nmethods_during_purge);
+ }
+ }
+
+ CodeCache::maybe_restart_compiler(freed_memory);
+}
+
+void ClassUnloadingContext::free_code_blobs() {
+ assert(_context != nullptr, "no context set");
+
+ // Sort nmethods before freeing to benefit from optimizations. If Nmethods were
+ // collected in parallel, use a new temporary buffer for the result, otherwise
+ // sort in-place.
+ NMethodSet* nmethod_set = nullptr;
+
+ bool is_parallel = _num_nmethod_unlink_workers > 1;
+
+ // Merge all collected nmethods into a huge array.
+ if (is_parallel) {
+ int num_nmethods = 0;
+
+ for (uint i = 0; i < _num_nmethod_unlink_workers; ++i) {
+ num_nmethods += _unlinked_nmethods[i]->length();
+ }
+ nmethod_set = new NMethodSet(num_nmethods);
+ for (uint i = 0; i < _num_nmethod_unlink_workers; ++i) {
+ nmethod_set->appendAll(_unlinked_nmethods[i]);
+ }
+ } else {
+ nmethod_set = _unlinked_nmethods[0];
+ }
+
+ // Sort by ascending address.
+ auto sort_nmethods = [] (nmethod** a, nmethod** b) -> int {
+ uintptr_t u_a = (uintptr_t)*a;
+ uintptr_t u_b = (uintptr_t)*b;
+ if (u_a == u_b) return 0;
+ if (u_a < u_b) return -1;
+ return 1;
+ };
+ nmethod_set->sort(sort_nmethods);
+
+ // And free. Duplicate loop for clarity depending on where we want the locking.
+ if (_lock_codeblob_free_separately) {
+ for (nmethod* nm : *nmethod_set) {
+ MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ CodeCache::free(nm);
+ }
+ } else {
+ MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ for (nmethod* nm : *nmethod_set) {
+ CodeCache::free(nm);
+ }
+ }
+
+ if (is_parallel) {
+ delete nmethod_set;
+ }
+}
diff --git a/src/hotspot/share/gc/shared/classUnloadingContext.hpp b/src/hotspot/share/gc/shared/classUnloadingContext.hpp
new file mode 100644
index 00000000000..30930967d38
--- /dev/null
+++ b/src/hotspot/share/gc/shared/classUnloadingContext.hpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_CLASSUNLOADINGCONTEXT_HPP
+#define SHARE_GC_SHARED_CLASSUNLOADINGCONTEXT_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/growableArray.hpp"
+
+class ClassLoaderData;
+class Klass;
+class nmethod;
+
+class ClassUnloadingContext : public CHeapObj {
+ static ClassUnloadingContext* _context;
+
+ ClassLoaderData* volatile _cld_head;
+
+ const uint _num_nmethod_unlink_workers;
+
+ using NMethodSet = GrowableArrayCHeap;
+ NMethodSet** _unlinked_nmethods;
+
+ bool _unregister_nmethods_during_purge;
+ bool _lock_codeblob_free_separately;
+
+public:
+ static ClassUnloadingContext* context() { assert(_context != nullptr, "context not set"); return _context; }
+
+ // Num_nmethod_unlink_workers configures the maximum numbers of threads unlinking
+ // nmethods.
+ // unregister_nmethods_during_purge determines whether unloaded nmethods should
+ // be unregistered from the garbage collector during purge. If not, ,the caller
+ // is responsible to do that later.
+ // lock_codeblob_free_separately determines whether freeing the code blobs takes
+ // the CodeCache_lock during the whole operation (=false) or per code blob
+ // free operation (=true).
+ ClassUnloadingContext(uint num_nmethod_unlink_workers,
+ bool unregister_nmethods_during_purge,
+ bool lock_codeblob_free_separately);
+ ~ClassUnloadingContext();
+
+ bool has_unloaded_classes() const;
+
+ void register_unloading_class_loader_data(ClassLoaderData* cld);
+ void purge_class_loader_data();
+
+ void classes_unloading_do(void f(Klass* const));
+
+ // Register unloading nmethods, potentially in parallel.
+ void register_unlinked_nmethod(nmethod* nm);
+ void purge_nmethods();
+ void free_code_blobs();
+
+ void purge_and_free_nmethods() {
+ purge_nmethods();
+ free_code_blobs();
+ }
+};
+
+#endif // SHARE_GC_SHARED_CLASSUNLOADINGCONTEXT_HPP
diff --git a/src/hotspot/share/gc/shared/genArguments.cpp b/src/hotspot/share/gc/shared/genArguments.cpp
index c53fb72c459..6d569d6f717 100644
--- a/src/hotspot/share/gc/shared/genArguments.cpp
+++ b/src/hotspot/share/gc/shared/genArguments.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -276,6 +276,9 @@ void GenArguments::initialize_size_info() {
// and maximum heap size since no explicit flags exist
// for setting the old generation maximum.
MaxOldSize = MAX2(MaxHeapSize - max_young_size, GenAlignment);
+ MinOldSize = MIN3(MaxOldSize,
+ InitialHeapSize - initial_young_size,
+ MinHeapSize - MinNewSize);
size_t initial_old_size = OldSize;
@@ -287,9 +290,8 @@ void GenArguments::initialize_size_info() {
// with the overall heap size). In either case make
// the minimum, maximum and initial sizes consistent
// with the young sizes and the overall heap sizes.
- MinOldSize = GenAlignment;
initial_old_size = clamp(InitialHeapSize - initial_young_size, MinOldSize, MaxOldSize);
- // MaxOldSize has already been made consistent above.
+ // MaxOldSize and MinOldSize have already been made consistent above.
} else {
// OldSize has been explicitly set on the command line. Use it
// for the initial size but make sure the minimum allow a young
@@ -304,9 +306,10 @@ void GenArguments::initialize_size_info() {
", -XX:OldSize flag is being ignored",
MaxHeapSize);
initial_old_size = MaxOldSize;
+ } else if (initial_old_size < MinOldSize) {
+ log_warning(gc, ergo)("Inconsistency between initial old size and minimum old size");
+ MinOldSize = initial_old_size;
}
-
- MinOldSize = MIN2(initial_old_size, MinHeapSize - MinNewSize);
}
// The initial generation sizes should match the initial heap size,
diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
index 36f229ab817..d67a4cb636b 100644
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
@@ -36,6 +36,7 @@
#include "gc/serial/markSweep.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/collectorCounters.hpp"
#include "gc/shared/continuationGCSupport.inline.hpp"
@@ -555,6 +556,10 @@ void GenCollectedHeap::do_collection(bool full,
CodeCache::on_gc_marking_cycle_start();
+ ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
+ false /* unregister_nmethods_during_purge */,
+ false /* lock_codeblob_free_separately */);
+
collect_generation(_old_gen,
full,
size,
@@ -570,7 +575,7 @@ void GenCollectedHeap::do_collection(bool full,
_young_gen->compute_new_size();
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
- ClassLoaderDataGraph::purge(/*at_safepoint*/true);
+ ClassLoaderDataGraph::purge(true /* at_safepoint */);
DEBUG_ONLY(MetaspaceUtils::verify();)
// Need to clear claim bits for the next mark.
@@ -611,7 +616,11 @@ void GenCollectedHeap::verify_nmethod(nmethod* nm) {
}
void GenCollectedHeap::prune_scavengable_nmethods() {
- ScavengableNMethods::prune_nmethods();
+ ScavengableNMethods::prune_nmethods_not_into_young();
+}
+
+void GenCollectedHeap::prune_unlinked_nmethods() {
+ ScavengableNMethods::prune_unlinked_nmethods();
}
HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.hpp b/src/hotspot/share/gc/shared/genCollectedHeap.hpp
index f279c7588de..271703b6dc0 100644
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp
@@ -205,6 +205,7 @@ class GenCollectedHeap : public CollectedHeap {
void verify_nmethod(nmethod* nm) override;
void prune_scavengable_nmethods();
+ void prune_unlinked_nmethods();
// Iteration functions.
void oop_iterate(OopIterateClosure* cl);
diff --git a/src/hotspot/share/gc/shared/scavengableNMethods.cpp b/src/hotspot/share/gc/shared/scavengableNMethods.cpp
index ec9983da4a9..9f961ff4bf8 100644
--- a/src/hotspot/share/gc/shared/scavengableNMethods.cpp
+++ b/src/hotspot/share/gc/shared/scavengableNMethods.cpp
@@ -59,18 +59,8 @@ void ScavengableNMethods::register_nmethod(nmethod* nm) {
}
void ScavengableNMethods::unregister_nmethod(nmethod* nm) {
- assert_locked_or_safepoint(CodeCache_lock);
-
- if (gc_data(nm).on_list()) {
- nmethod* prev = nullptr;
- for (nmethod* cur = _head; cur != nullptr; cur = gc_data(cur).next()) {
- if (cur == nm) {
- unlist_nmethod(cur, prev);
- return;
- }
- prev = cur;
- }
- }
+ // All users of this method only unregister in bulk during code unloading.
+ ShouldNotReachHere();
}
#ifndef PRODUCT
@@ -172,10 +162,37 @@ void ScavengableNMethods::nmethods_do_and_prune(CodeBlobToOopClosure* cl) {
debug_only(verify_unlisted_nmethods(nullptr));
}
-void ScavengableNMethods::prune_nmethods() {
+void ScavengableNMethods::prune_nmethods_not_into_young() {
nmethods_do_and_prune(nullptr /* No closure */);
}
+void ScavengableNMethods::prune_unlinked_nmethods() {
+ assert_locked_or_safepoint(CodeCache_lock);
+
+ debug_only(mark_on_list_nmethods());
+
+ nmethod* prev = nullptr;
+ nmethod* cur = _head;
+ while (cur != nullptr) {
+ ScavengableNMethodsData data = gc_data(cur);
+ debug_only(data.clear_marked());
+ assert(data.on_list(), "else shouldn't be on this list");
+
+ nmethod* const next = data.next();
+
+ if (cur->is_unlinked()) {
+ unlist_nmethod(cur, prev);
+ } else {
+ prev = cur;
+ }
+
+ cur = next;
+ }
+
+ // Check for stray marks.
+ debug_only(verify_unlisted_nmethods(nullptr));
+}
+
// Walk the list of methods which might contain oops to the java heap.
void ScavengableNMethods::nmethods_do(CodeBlobToOopClosure* cl) {
nmethods_do_and_prune(cl);
@@ -218,8 +235,9 @@ void ScavengableNMethods::mark_on_list_nmethods() {
nmethod* nm = iter.method();
ScavengableNMethodsData data = gc_data(nm);
assert(data.not_marked(), "clean state");
- if (data.on_list())
+ if (data.on_list()) {
data.set_marked();
+ }
}
}
@@ -230,7 +248,10 @@ void ScavengableNMethods::verify_unlisted_nmethods(CodeBlobClosure* cl) {
while(iter.next()) {
nmethod* nm = iter.method();
- verify_nmethod(nm);
+ // Can not verify already unlinked nmethods as they are partially invalid already.
+ if (!nm->is_unlinked()) {
+ verify_nmethod(nm);
+ }
if (cl != nullptr && !gc_data(nm).on_list()) {
cl->do_code_blob(nm);
diff --git a/src/hotspot/share/gc/shared/scavengableNMethods.hpp b/src/hotspot/share/gc/shared/scavengableNMethods.hpp
index 4852e6d32fb..94d594cd529 100644
--- a/src/hotspot/share/gc/shared/scavengableNMethods.hpp
+++ b/src/hotspot/share/gc/shared/scavengableNMethods.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,8 +46,10 @@ class ScavengableNMethods : public AllStatic {
static void unregister_nmethod(nmethod* nm);
static void verify_nmethod(nmethod* nm);
- // Remove nmethods that no longer have scavengable oops.
- static void prune_nmethods();
+ // Remove nmethods that no longer have oops into young gen.
+ static void prune_nmethods_not_into_young();
+ // Remvoe unlinked (dead) nmethods.
+ static void prune_unlinked_nmethods();
// Apply closure to every scavengable nmethod.
// Remove nmethods that no longer have scavengable oops.
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
index da75706ac4c..acf4b8f0860 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
@@ -1728,11 +1728,26 @@ bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoo
return true;
}
+bool ShenandoahBarrierC2Support::merge_point_safe(Node* region) {
+ for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
+ Node* n = region->fast_out(i);
+ if (n->is_LoadStore()) {
+ // Splitting a LoadStore node through phi, causes it to lose its SCMemProj: the split if code doesn't have support
+ // for a LoadStore at the region the if is split through because that's not expected to happen (LoadStore nodes
+ // should be between barrier nodes). It does however happen with Shenandoah though because barriers can get
+ // expanded around a LoadStore node.
+ return false;
+ }
+ }
+ return true;
+}
+
+
void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
assert(is_heap_stable_test(n), "no other tests");
if (identical_backtoback_ifs(n, phase)) {
Node* n_ctrl = n->in(0);
- if (phase->can_split_if(n_ctrl)) {
+ if (phase->can_split_if(n_ctrl) && merge_point_safe(n_ctrl)) {
IfNode* dom_if = phase->idom(n_ctrl)->as_If();
if (is_heap_stable_test(n)) {
Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp
index 032f338aa88..7a6ed74f563 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp
@@ -65,6 +65,7 @@ class ShenandoahBarrierC2Support : public AllStatic {
static void test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase);
static void move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase);
static void merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase);
+ static bool merge_point_safe(Node* region);
static bool identical_backtoback_ifs(Node *n, PhaseIdealLoop* phase);
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase);
static IfNode* find_unswitching_candidate(const IdealLoopTree *loop, PhaseIdealLoop* phase);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp
index 92d447258f2..c5b6d787b95 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp
@@ -26,6 +26,7 @@
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
@@ -235,7 +236,7 @@ void ShenandoahCodeRoots::unlink(WorkerThreads* workers, bool unloading_occurred
void ShenandoahCodeRoots::purge() {
assert(ShenandoahHeap::heap()->unload_classes(), "Only when running concurrent class unloading");
- CodeCache::flush_unlinked_nmethods();
+ ClassUnloadingContext::context()->purge_and_free_nmethods();
}
ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index f1dcbf5a8bc..91fc66b1204 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/universe.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
@@ -467,6 +468,7 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
_num_regions(0),
_regions(nullptr),
_update_refs_iterator(this),
+ _gc_state_changed(false),
_control_thread(nullptr),
_shenandoah_policy(policy),
_gc_mode(nullptr),
@@ -1681,27 +1683,32 @@ void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
_update_refs_iterator.reset();
}
-void ShenandoahHeap::set_gc_state_all_threads(char state) {
- for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
- ShenandoahThreadLocalData::set_gc_state(t, state);
+void ShenandoahHeap::propagate_gc_state_to_java_threads() {
+ assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
+ if (_gc_state_changed) {
+ _gc_state_changed = false;
+ char state = gc_state();
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+ ShenandoahThreadLocalData::set_gc_state(t, state);
+ }
}
}
-void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
+void ShenandoahHeap::set_gc_state(uint mask, bool value) {
+ assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
_gc_state.set_cond(mask, value);
- set_gc_state_all_threads(_gc_state.raw_value());
+ _gc_state_changed = true;
}
void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
assert(!has_forwarded_objects(), "Not expected before/after mark phase");
- set_gc_state_mask(MARKING, in_progress);
+ set_gc_state(MARKING, in_progress);
ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
}
void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
- set_gc_state_mask(EVACUATION, in_progress);
+ set_gc_state(EVACUATION, in_progress);
}
void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
@@ -1713,7 +1720,7 @@ void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
}
void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
- set_gc_state_mask(WEAK_ROOTS, cond);
+ set_gc_state(WEAK_ROOTS, cond);
}
GCTracer* ShenandoahHeap::tracer() {
@@ -1761,27 +1768,35 @@ void ShenandoahHeap::stop() {
void ShenandoahHeap::stw_unload_classes(bool full_gc) {
if (!unload_classes()) return;
+ ClassUnloadingContext ctx(_workers->active_workers(),
+ true /* unregister_nmethods_during_purge */,
+ false /* lock_codeblob_free_separately */);
+
// Unload classes and purge SystemDictionary.
{
ShenandoahPhaseTimings::Phase phase = full_gc ?
ShenandoahPhaseTimings::full_gc_purge_class_unload :
ShenandoahPhaseTimings::degen_gc_purge_class_unload;
ShenandoahIsAliveSelector is_alive;
- CodeCache::UnloadingScope scope(is_alive.is_alive_closure());
- ShenandoahGCPhase gc_phase(phase);
- ShenandoahGCWorkerPhase worker_phase(phase);
- bool purged_class = SystemDictionary::do_unloading(gc_timer());
-
- uint num_workers = _workers->active_workers();
- ShenandoahClassUnloadingTask unlink_task(phase, num_workers, purged_class);
- _workers->run_task(&unlink_task);
+ {
+ CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
+ ShenandoahGCPhase gc_phase(phase);
+ ShenandoahGCWorkerPhase worker_phase(phase);
+ bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
+
+ uint num_workers = _workers->active_workers();
+ ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
+ _workers->run_task(&unlink_task);
+ }
+ // Release unloaded nmethods's memory.
+ ClassUnloadingContext::context()->purge_and_free_nmethods();
}
{
ShenandoahGCPhase phase(full_gc ?
ShenandoahPhaseTimings::full_gc_purge_cldg :
ShenandoahPhaseTimings::degen_gc_purge_cldg);
- ClassLoaderDataGraph::purge(/*at_safepoint*/true);
+ ClassLoaderDataGraph::purge(true /* at_safepoint */);
}
// Resize and verify metaspace
MetaspaceGC::compute_new_size();
@@ -1832,7 +1847,7 @@ void ShenandoahHeap::parallel_cleaning(bool full_gc) {
}
void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
- set_gc_state_mask(HAS_FORWARDED, cond);
+ set_gc_state(HAS_FORWARDED, cond);
}
void ShenandoahHeap::set_unload_classes(bool uc) {
@@ -1871,7 +1886,7 @@ void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
}
void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
- set_gc_state_mask(UPDATEREFS, in_progress);
+ set_gc_state(UPDATEREFS, in_progress);
}
void ShenandoahHeap::register_nmethod(nmethod* nm) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index 642faef807e..bc9a6eed701 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -281,6 +281,7 @@ class ShenandoahHeap : public CollectedHeap {
};
private:
+ bool _gc_state_changed;
ShenandoahSharedBitmap _gc_state;
ShenandoahSharedFlag _degenerated_gc_in_progress;
ShenandoahSharedFlag _full_gc_in_progress;
@@ -288,12 +289,20 @@ class ShenandoahHeap : public CollectedHeap {
ShenandoahSharedFlag _progress_last_gc;
ShenandoahSharedFlag _concurrent_strong_root_in_progress;
- void set_gc_state_all_threads(char state);
- void set_gc_state_mask(uint mask, bool value);
+ // This updates the singlular, global gc state. This must happen on a safepoint.
+ void set_gc_state(uint mask, bool value);
public:
char gc_state() const;
+ // This copies the global gc state into a thread local variable for java threads.
+ // It is primarily intended to support quick access at barriers.
+ void propagate_gc_state_to_java_threads();
+
+ // This is public to support assertions that the state hasn't been changed off of
+ // a safepoint and that any changes were propagated to java threads after the safepoint.
+ bool has_gc_state_changed() const { return _gc_state_changed; }
+
void set_concurrent_mark_in_progress(bool in_progress);
void set_evacuation_in_progress(bool in_progress);
void set_update_refs_in_progress(bool in_progress);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp
index 422595e9313..9eec573cc56 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp
@@ -89,6 +89,7 @@ class ShenandoahThreadLocalData {
}
static char gc_state(Thread* thread) {
+ assert(thread->is_Java_thread(), "GC state is only synchronized to java threads");
return data(thread)->_gc_state;
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp
index afd10efdfdd..bb13e9b8e22 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp
@@ -30,6 +30,7 @@
#include "code/codeCache.hpp"
#include "code/dependencyContext.hpp"
#include "gc/shared/gcBehaviours.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shenandoah/shenandoahNMethod.inline.hpp"
#include "gc/shenandoah/shenandoahLock.hpp"
@@ -138,6 +139,10 @@ void ShenandoahUnload::unload() {
assert(ClassUnloading, "Filtered by caller");
assert(heap->is_concurrent_weak_root_in_progress(), "Filtered by caller");
+ ClassUnloadingContext ctx(heap->workers()->active_workers(),
+ true /* unregister_nmethods_during_purge */,
+ true /* lock_codeblob_free_separately */);
+
// Unlink stale metadata and nmethods
{
ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_class_unload_unlink);
@@ -181,7 +186,7 @@ void ShenandoahUnload::unload() {
{
ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_class_unload_purge_cldg);
- ClassLoaderDataGraph::purge(/*at_safepoint*/false);
+ ClassLoaderDataGraph::purge(false /* at_safepoint */);
}
{
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
index 4a97e599f3e..eeeb1dcad19 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
@@ -35,12 +35,23 @@
#include "interpreter/oopMapCache.hpp"
#include "memory/universe.hpp"
+bool VM_ShenandoahOperation::doit_prologue() {
+ assert(!ShenandoahHeap::heap()->has_gc_state_changed(), "GC State can only be changed on a safepoint.");
+ return true;
+}
+
+void VM_ShenandoahOperation::doit_epilogue() {
+ assert(!ShenandoahHeap::heap()->has_gc_state_changed(), "GC State was not synchronized to java threads.");
+}
+
bool VM_ShenandoahReferenceOperation::doit_prologue() {
+ VM_ShenandoahOperation::doit_prologue();
Heap_lock->lock();
return true;
}
void VM_ShenandoahReferenceOperation::doit_epilogue() {
+ VM_ShenandoahOperation::doit_epilogue();
OopMapCache::cleanup_old_entries();
if (Universe::has_reference_pending_list()) {
Heap_lock->notify_all();
@@ -51,34 +62,41 @@ void VM_ShenandoahReferenceOperation::doit_epilogue() {
void VM_ShenandoahInitMark::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Init Mark", SvcGCMarker::CONCURRENT);
_gc->entry_init_mark();
+ ShenandoahHeap::heap()->propagate_gc_state_to_java_threads();
}
void VM_ShenandoahFinalMarkStartEvac::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Final Mark", SvcGCMarker::CONCURRENT);
_gc->entry_final_mark();
+ ShenandoahHeap::heap()->propagate_gc_state_to_java_threads();
}
void VM_ShenandoahFullGC::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Full GC", SvcGCMarker::FULL);
_full_gc->entry_full(_gc_cause);
+ ShenandoahHeap::heap()->propagate_gc_state_to_java_threads();
}
void VM_ShenandoahDegeneratedGC::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Degenerated GC", SvcGCMarker::CONCURRENT);
_gc->entry_degenerated();
+ ShenandoahHeap::heap()->propagate_gc_state_to_java_threads();
}
void VM_ShenandoahInitUpdateRefs::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Init Update Refs", SvcGCMarker::CONCURRENT);
_gc->entry_init_updaterefs();
+ ShenandoahHeap::heap()->propagate_gc_state_to_java_threads();
}
void VM_ShenandoahFinalUpdateRefs::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Final Update Refs", SvcGCMarker::CONCURRENT);
_gc->entry_final_updaterefs();
+ ShenandoahHeap::heap()->propagate_gc_state_to_java_threads();
}
void VM_ShenandoahFinalRoots::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Final Roots", SvcGCMarker::CONCURRENT);
_gc->entry_final_roots();
+ ShenandoahHeap::heap()->propagate_gc_state_to_java_threads();
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
index 65ddd8b1f11..1b78766935f 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
@@ -47,14 +47,16 @@ class VM_ShenandoahOperation : public VM_Operation {
uint _gc_id;
public:
VM_ShenandoahOperation() : _gc_id(GCId::current()) {};
- virtual bool skip_thread_oop_barriers() const { return true; }
+ bool skip_thread_oop_barriers() const override { return true; }
+ bool doit_prologue() override;
+ void doit_epilogue() override;
};
class VM_ShenandoahReferenceOperation : public VM_ShenandoahOperation {
public:
VM_ShenandoahReferenceOperation() : VM_ShenandoahOperation() {};
- bool doit_prologue();
- void doit_epilogue();
+ bool doit_prologue() override;
+ void doit_epilogue() override;
};
class VM_ShenandoahInitMark: public VM_ShenandoahOperation {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
index 1d5d962a4ec..f67cafdb8fe 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
@@ -620,6 +620,8 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label,
guarantee(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "only when nothing else happens");
guarantee(ShenandoahVerify, "only when enabled, and bitmap is initialized in ShenandoahHeap::initialize");
+ ShenandoahHeap::heap()->propagate_gc_state_to_java_threads();
+
// Avoid side-effect of changing workers' active thread count, but bypass concurrent/parallel protocol check
ShenandoahPushWorkerScope verify_worker_scope(_heap->workers(), _heap->max_workers(), false /*bypass check*/);
diff --git a/src/hotspot/share/gc/x/xHeap.cpp b/src/hotspot/share/gc/x/xHeap.cpp
index a242a8063be..14661330e13 100644
--- a/src/hotspot/share/gc/x/xHeap.cpp
+++ b/src/hotspot/share/gc/x/xHeap.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "gc/shared/gc_globals.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/locationPrinter.hpp"
#include "gc/shared/tlab_globals.hpp"
#include "gc/x/xAddress.inline.hpp"
@@ -320,6 +321,10 @@ void XHeap::process_non_strong_references() {
// Process weak roots
_weak_roots_processor.process_weak_roots();
+ ClassUnloadingContext ctx(_workers.active_workers(),
+ true /* unregister_nmethods_during_purge */,
+ true /* lock_codeblob_free_separately */);
+
// Unlink stale metadata and nmethods
_unload.unlink();
diff --git a/src/hotspot/share/gc/x/xNMethod.cpp b/src/hotspot/share/gc/x/xNMethod.cpp
index d86828aa847..613e1908502 100644
--- a/src/hotspot/share/gc/x/xNMethod.cpp
+++ b/src/hotspot/share/gc/x/xNMethod.cpp
@@ -27,6 +27,7 @@
#include "code/icBuffer.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/x/xBarrier.inline.hpp"
#include "gc/x/xGlobals.hpp"
@@ -362,5 +363,5 @@ void XNMethod::unlink(XWorkers* workers, bool unloading_occurred) {
}
void XNMethod::purge() {
- CodeCache::flush_unlinked_nmethods();
+ ClassUnloadingContext::context()->purge_and_free_nmethods();
}
diff --git a/src/hotspot/share/gc/z/zAddress.inline.hpp b/src/hotspot/share/gc/z/zAddress.inline.hpp
index 7088a71ef7b..39fa7b6e0b3 100644
--- a/src/hotspot/share/gc/z/zAddress.inline.hpp
+++ b/src/hotspot/share/gc/z/zAddress.inline.hpp
@@ -133,6 +133,10 @@ inline bool operator<(zoffset first, zoffset_end second) {
return untype(first) < untype(second);
}
+inline bool operator<=(zoffset_end first, zoffset second) {
+ return untype(first) <= untype(second);
+}
+
inline bool operator>(zoffset first, zoffset_end second) {
return untype(first) > untype(second);
}
diff --git a/src/hotspot/share/gc/z/zArray.inline.hpp b/src/hotspot/share/gc/z/zArray.inline.hpp
index e4de7a37040..2ec87a76156 100644
--- a/src/hotspot/share/gc/z/zArray.inline.hpp
+++ b/src/hotspot/share/gc/z/zArray.inline.hpp
@@ -96,7 +96,7 @@ ZActivatedArray::ZActivatedArray(bool locked)
_array() {}
template
-ZActivatedArray::~ZActivatedArray() {
+ZActivatedArray::~ZActivatedArray() {
FreeHeap(_lock);
}
diff --git a/src/hotspot/share/gc/z/zGeneration.cpp b/src/hotspot/share/gc/z/zGeneration.cpp
index 1b4afd4eefb..0b131c65248 100644
--- a/src/hotspot/share/gc/z/zGeneration.cpp
+++ b/src/hotspot/share/gc/z/zGeneration.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "code/nmethod.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/isGCActiveMark.hpp"
@@ -1320,6 +1321,10 @@ void ZGenerationOld::process_non_strong_references() {
// Process weak roots
_weak_roots_processor.process_weak_roots();
+ ClassUnloadingContext ctx(_workers.active_workers(),
+ true /* unregister_nmethods_during_purge */,
+ true /* lock_codeblob_free_separately */);
+
// Unlink stale metadata and nmethods
_unload.unlink();
diff --git a/src/hotspot/share/gc/z/zNMethod.cpp b/src/hotspot/share/gc/z/zNMethod.cpp
index e4c24660365..71d514face1 100644
--- a/src/hotspot/share/gc/z/zNMethod.cpp
+++ b/src/hotspot/share/gc/z/zNMethod.cpp
@@ -28,6 +28,7 @@
#include "code/icBuffer.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
+#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/z/zAddress.hpp"
#include "gc/z/zArray.inline.hpp"
@@ -443,5 +444,5 @@ void ZNMethod::unlink(ZWorkers* workers, bool unloading_occurred) {
}
void ZNMethod::purge() {
- CodeCache::flush_unlinked_nmethods();
+ ClassUnloadingContext::context()->purge_and_free_nmethods();
}
diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.hpp b/src/hotspot/share/gc/z/zPhysicalMemory.hpp
index 2244732a146..e5e0a19d1c5 100644
--- a/src/hotspot/share/gc/z/zPhysicalMemory.hpp
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.hpp
@@ -32,16 +32,16 @@
class ZPhysicalMemorySegment : public CHeapObj {
private:
- zoffset _start;
- zoffset _end;
- bool _committed;
+ zoffset _start;
+ zoffset_end _end;
+ bool _committed;
public:
ZPhysicalMemorySegment();
ZPhysicalMemorySegment(zoffset start, size_t size, bool committed);
zoffset start() const;
- zoffset end() const;
+ zoffset_end end() const;
size_t size() const;
bool is_committed() const;
diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp b/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp
index cbfd3842b35..744c68daa7e 100644
--- a/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp
@@ -31,19 +31,19 @@
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment()
: _start(zoffset(UINTPTR_MAX)),
- _end(zoffset(UINTPTR_MAX)),
+ _end(zoffset_end(UINTPTR_MAX)),
_committed(false) {}
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(zoffset start, size_t size, bool committed)
: _start(start),
- _end(start + size),
+ _end(to_zoffset_end(start, size)),
_committed(committed) {}
inline zoffset ZPhysicalMemorySegment::start() const {
return _start;
}
-inline zoffset ZPhysicalMemorySegment::end() const {
+inline zoffset_end ZPhysicalMemorySegment::end() const {
return _end;
}
diff --git a/src/hotspot/share/interpreter/invocationCounter.cpp b/src/hotspot/share/interpreter/invocationCounter.cpp
index 965369dc167..fecad2c2f1a 100644
--- a/src/hotspot/share/interpreter/invocationCounter.cpp
+++ b/src/hotspot/share/interpreter/invocationCounter.cpp
@@ -59,10 +59,6 @@ void InvocationCounter::reset() {
update(0);
}
-void InvocationCounter::decay() {
- update(count() >> 1);
-}
-
void InvocationCounter::print() {
uint counter = raw_counter();
tty->print_cr("invocation count: up = %d, limit = %d, carry = %s",
diff --git a/src/hotspot/share/interpreter/invocationCounter.hpp b/src/hotspot/share/interpreter/invocationCounter.hpp
index 3732ed28d48..381d4e9efa8 100644
--- a/src/hotspot/share/interpreter/invocationCounter.hpp
+++ b/src/hotspot/share/interpreter/invocationCounter.hpp
@@ -60,7 +60,6 @@ class InvocationCounter {
// Manipulation
void reset();
void init();
- void decay(); // decay counter (divide by two)
void set_carry_on_overflow();
void set(uint count);
void increment() { _counter += count_increment; }
diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.hpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.hpp
index c7a3ee959fe..1d40c001c43 100644
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.hpp
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.hpp
@@ -44,7 +44,6 @@ class EventEmitter : public CHeapObj {
const JfrTicks& _end_time;
Thread* _thread;
JfrThreadLocal* _jfr_thread_local;
- traceid _thread_id;
EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time);
~EventEmitter();
diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml
index ac73a0b36c7..47245923da4 100644
--- a/src/hotspot/share/jfr/metadata/metadata.xml
+++ b/src/hotspot/share/jfr/metadata/metadata.xml
@@ -740,7 +740,7 @@
-
+
diff --git a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp
index f4e7c620862..48db2fd8715 100644
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp
@@ -138,9 +138,9 @@ void JfrStackFrame::write(JfrCheckpointWriter& cpw) const {
class JfrVframeStream : public vframeStreamCommon {
private:
+ bool _vthread;
const ContinuationEntry* _cont_entry;
bool _async_mode;
- bool _vthread;
bool step_to_sender();
void next_frame();
public:
@@ -165,8 +165,9 @@ JfrVframeStream::JfrVframeStream(JavaThread* jt, const frame& fr, bool stop_at_j
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::skip,
walk_continuation(jt))),
- _cont_entry(JfrThreadLocal::is_vthread(jt) ? jt->last_continuation() : nullptr),
- _async_mode(async_mode), _vthread(JfrThreadLocal::is_vthread(jt)) {
+ _vthread(JfrThreadLocal::is_vthread(jt)),
+ _cont_entry(_vthread ? jt->last_continuation() : nullptr),
+ _async_mode(async_mode) {
assert(!_vthread || _cont_entry != nullptr, "invariant");
_reg_map.set_async(async_mode);
_frame = fr;
diff --git a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp
index 6e4e6644080..175ec98da44 100644
--- a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp
+++ b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp
@@ -395,11 +395,14 @@ traceid JfrThreadLocal::thread_id(const Thread* t) {
return t->jfr_thread_local()->_thread_id_alias;
}
JfrThreadLocal* const tl = t->jfr_thread_local();
- if (!t->is_Java_thread() || !Atomic::load_acquire(&tl->_vthread)) {
+ if (!t->is_Java_thread()) {
return jvm_thread_id(t, tl);
}
- // virtual thread
const JavaThread* jt = JavaThread::cast(t);
+ if (!is_vthread(jt)) {
+ return jvm_thread_id(t, tl);
+ }
+ // virtual thread
const traceid tid = vthread_id(jt);
assert(tid != 0, "invariant");
if (!tl->is_vthread_excluded()) {
@@ -456,7 +459,7 @@ traceid JfrThreadLocal::jvm_thread_id(const Thread* t) {
bool JfrThreadLocal::is_vthread(const JavaThread* jt) {
assert(jt != nullptr, "invariant");
- return Atomic::load_acquire(&jt->jfr_thread_local()->_vthread);
+ return Atomic::load_acquire(&jt->jfr_thread_local()->_vthread) && jt->last_continuation() != nullptr;
}
inline bool is_virtual(const JavaThread* jt, oop thread) {
diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
index ae13fc86143..5b45499f965 100644
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
@@ -52,6 +52,7 @@
#include "prims/jvmtiExport.hpp"
#include "prims/methodHandles.hpp"
#include "prims/nativeLookup.hpp"
+#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
@@ -585,6 +586,18 @@ C2V_VMENTRY_NULL(jobject, lookupType, (JNIEnv* env, jobject, jstring jname, ARGU
JVMCI_THROW_MSG_0(InternalError, err_msg("Primitive type %s should be handled in Java code", str));
}
+#ifdef ASSERT
+ const char* val = Arguments::PropertyList_get_value(Arguments::system_properties(), "test.jvmci.lookupTypeException");
+ if (val != nullptr) {
+ if (strstr(val, "") != nullptr) {
+ tty->print_cr("CompilerToVM.lookupType: %s", str);
+ } else if (strstr(val, str) != nullptr) {
+ THROW_MSG_0(vmSymbols::java_lang_Exception(),
+ err_msg("lookupTypeException: %s", str));
+ }
+ }
+#endif
+
JVMCIKlassHandle resolved_klass(THREAD);
Klass* accessing_klass = UNPACK_PAIR(Klass, accessing_klass);
Handle class_loader;
diff --git a/src/hotspot/share/jvmci/jvmciEnv.cpp b/src/hotspot/share/jvmci/jvmciEnv.cpp
index 32759a1889a..329afe8cfa5 100644
--- a/src/hotspot/share/jvmci/jvmciEnv.cpp
+++ b/src/hotspot/share/jvmci/jvmciEnv.cpp
@@ -505,8 +505,7 @@ class HotSpotToSharedLibraryExceptionTranslation : public ExceptionTranslation {
private:
const Handle& _throwable;
- int encode(JavaThread* THREAD, jlong buffer, int buffer_size) {
- Klass* vmSupport = SystemDictionary::resolve_or_fail(vmSymbols::jdk_internal_vm_VMSupport(), true, THREAD);
+ bool handle_pending_exception(JavaThread* THREAD, jlong buffer, int buffer_size) {
if (HAS_PENDING_EXCEPTION) {
Handle throwable = Handle(THREAD, PENDING_EXCEPTION);
Symbol *ex_name = throwable->klass()->name();
@@ -523,6 +522,14 @@ class HotSpotToSharedLibraryExceptionTranslation : public ExceptionTranslation {
JVMCI_event_1("error translating exception: %s", char_buffer);
decode(THREAD, _encode_fail, buffer);
}
+ return true;
+ }
+ return false;
+ }
+
+ int encode(JavaThread* THREAD, jlong buffer, int buffer_size) {
+ Klass* vmSupport = SystemDictionary::resolve_or_fail(vmSymbols::jdk_internal_vm_VMSupport(), true, THREAD);
+ if (handle_pending_exception(THREAD, buffer, buffer_size)) {
return 0;
}
JavaCallArguments jargs;
@@ -534,6 +541,9 @@ class HotSpotToSharedLibraryExceptionTranslation : public ExceptionTranslation {
vmSupport,
vmSymbols::encodeThrowable_name(),
vmSymbols::encodeThrowable_signature(), &jargs, THREAD);
+ if (handle_pending_exception(THREAD, buffer, buffer_size)) {
+ return 0;
+ }
return result.get_jint();
}
diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp
index 6a1bc38b4d5..73a19d01173 100644
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp
@@ -1231,11 +1231,13 @@ JNIEnv* JVMCIRuntime::init_shared_library_javavm(int* create_JavaVM_err) {
MutexLocker locker(_lock);
JavaVM* javaVM = _shared_library_javavm;
if (javaVM == nullptr) {
+#ifdef ASSERT
const char* val = Arguments::PropertyList_get_value(Arguments::system_properties(), "test.jvmci.forceEnomemOnLibjvmciInit");
if (val != nullptr && strcmp(val, "true") == 0) {
*create_JavaVM_err = JNI_ENOMEM;
return nullptr;
}
+#endif
char* sl_path;
void* sl_handle = JVMCI::get_shared_library(sl_path, true);
@@ -2059,12 +2061,14 @@ void JVMCIRuntime::compile_method(JVMCIEnv* JVMCIENV, JVMCICompiler* compiler, c
JVMCIObject result_object = JVMCIENV->call_HotSpotJVMCIRuntime_compileMethod(receiver, jvmci_method, entry_bci,
(jlong) compile_state, compile_state->task()->compile_id());
+#ifdef ASSERT
if (JVMCIENV->has_pending_exception()) {
const char* val = Arguments::PropertyList_get_value(Arguments::system_properties(), "test.jvmci.compileMethodExceptionIsFatal");
if (val != nullptr && strcmp(val, "true") == 0) {
fatal_exception(JVMCIENV, "testing JVMCI fatal exception handling");
}
}
+#endif
if (after_compiler_upcall(JVMCIENV, compiler, method, "call_HotSpotJVMCIRuntime_compileMethod")) {
return;
diff --git a/src/hotspot/share/memory/metaspace/metachunk.cpp b/src/hotspot/share/memory/metaspace/metachunk.cpp
index bc514c856e7..3d67aac8016 100644
--- a/src/hotspot/share/memory/metaspace/metachunk.cpp
+++ b/src/hotspot/share/memory/metaspace/metachunk.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -34,6 +34,7 @@
#include "utilities/align.hpp"
#include "utilities/copy.hpp"
#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
namespace metaspace {
@@ -285,7 +286,9 @@ void Metachunk::verify() const {
const size_t required_alignment = word_size() * sizeof(MetaWord);
assert_is_aligned(base(), required_alignment);
- // Test accessing the committed area.
+ // Test accessing the committed area. But not for ASAN. We don't know which portions
+ // of the chunk are still poisoned.
+#if !INCLUDE_ASAN
SOMETIMES(
if (_committed_words > 0) {
for (const MetaWord* p = _base; p < _base + _committed_words; p += os::vm_page_size()) {
@@ -294,6 +297,7 @@ void Metachunk::verify() const {
dummy = *(_base + _committed_words - 1);
}
)
+#endif // !INCLUDE_ASAN
}
#endif // ASSERT
diff --git a/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp b/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp
index 1ebda015eb3..099ba80f2dd 100644
--- a/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp
@@ -48,6 +48,7 @@
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
namespace metaspace {
@@ -433,6 +434,9 @@ void VirtualSpaceNode::verify_locked() const {
_commit_mask.verify();
// Verify memory against commit mask.
+ // Down here, from ASAN's view, this memory may be poisoned, since we only unpoison
+ // way up at the ChunkManager level.
+#if !INCLUDE_ASAN
SOMETIMES(
for (MetaWord* p = base(); p < base() + used_words(); p += os::vm_page_size()) {
if (_commit_mask.is_committed_address(p)) {
@@ -440,6 +444,7 @@ void VirtualSpaceNode::verify_locked() const {
}
}
)
+#endif // !INCLUDE_ASAN
assert(committed_words() <= word_size(), "Sanity");
assert_is_aligned(committed_words(), Settings::commit_granule_words());
diff --git a/src/hotspot/share/oops/compiledICHolder.cpp b/src/hotspot/share/oops/compiledICHolder.cpp
index c6ca6be2030..8bfa55bcce7 100644
--- a/src/hotspot/share/oops/compiledICHolder.cpp
+++ b/src/hotspot/share/oops/compiledICHolder.cpp
@@ -32,7 +32,7 @@ volatile int CompiledICHolder::_live_not_claimed_count;
#endif
CompiledICHolder::CompiledICHolder(Metadata* metadata, Klass* klass, bool is_method)
- : _holder_metadata(metadata), _holder_klass(klass), _is_metadata_method(is_method) {
+ : _holder_metadata(metadata), _holder_klass(klass), _next(nullptr), _is_metadata_method(is_method) {
#ifdef ASSERT
Atomic::inc(&_live_count);
Atomic::inc(&_live_not_claimed_count);
diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp
index 4ebcc5329c0..c7c605aa157 100644
--- a/src/hotspot/share/oops/klass.cpp
+++ b/src/hotspot/share/oops/klass.cpp
@@ -61,10 +61,6 @@ void Klass::set_java_mirror(Handle m) {
_java_mirror = class_loader_data()->add_handle(m);
}
-oop Klass::java_mirror_no_keepalive() const {
- return _java_mirror.peek();
-}
-
bool Klass::is_cloneable() const {
return _access_flags.is_cloneable_fast() ||
is_subtype_of(vmClasses::Cloneable_klass());
diff --git a/src/hotspot/share/oops/klass.inline.hpp b/src/hotspot/share/oops/klass.inline.hpp
index af2480e149b..a72868a08d8 100644
--- a/src/hotspot/share/oops/klass.inline.hpp
+++ b/src/hotspot/share/oops/klass.inline.hpp
@@ -56,6 +56,10 @@ inline oop Klass::java_mirror() const {
return _java_mirror.resolve();
}
+inline oop Klass::java_mirror_no_keepalive() const {
+ return _java_mirror.peek();
+}
+
inline klassVtable Klass::vtable() const {
return klassVtable(const_cast(this), start_of_vtable(), vtable_length() / vtableEntry::size());
}
diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp
index a4f8f3075ef..1e2234e103b 100644
--- a/src/hotspot/share/oops/method.hpp
+++ b/src/hotspot/share/oops/method.hpp
@@ -444,7 +444,8 @@ class Method : public Metadata {
void remove_unshareable_flags() NOT_CDS_RETURN;
// the number of argument reg slots that the compiled method uses on the stack.
- int num_stack_arg_slots() const { return constMethod()->num_stack_arg_slots(); }
+ int num_stack_arg_slots(bool rounded = true) const {
+ return rounded ? align_up(constMethod()->num_stack_arg_slots(), 2) : constMethod()->num_stack_arg_slots(); }
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
virtual MetaspaceObj::Type type() const { return MethodType; }
diff --git a/src/hotspot/share/oops/stackChunkOop.hpp b/src/hotspot/share/oops/stackChunkOop.hpp
index 36b06ecd324..abfe47ad3f1 100644
--- a/src/hotspot/share/oops/stackChunkOop.hpp
+++ b/src/hotspot/share/oops/stackChunkOop.hpp
@@ -155,7 +155,7 @@ class stackChunkOopDesc : public instanceOopDesc {
inline void* gc_data() const;
inline BitMapView bitmap() const;
- inline BitMap::idx_t bit_index_for(intptr_t* p) const;
+ inline BitMap::idx_t bit_index_for(address p) const;
inline intptr_t* address_for_bit(BitMap::idx_t index) const;
template inline BitMap::idx_t bit_index_for(OopT* p) const;
template inline OopT* address_for_bit(BitMap::idx_t index) const;
diff --git a/src/hotspot/share/oops/stackChunkOop.inline.hpp b/src/hotspot/share/oops/stackChunkOop.inline.hpp
index 37a41655672..8ac313e6b1d 100644
--- a/src/hotspot/share/oops/stackChunkOop.inline.hpp
+++ b/src/hotspot/share/oops/stackChunkOop.inline.hpp
@@ -262,12 +262,13 @@ inline BitMapView stackChunkOopDesc::bitmap() const {
return bitmap;
}
-inline BitMap::idx_t stackChunkOopDesc::bit_index_for(intptr_t* p) const {
+inline BitMap::idx_t stackChunkOopDesc::bit_index_for(address p) const {
return UseCompressedOops ? bit_index_for((narrowOop*)p) : bit_index_for((oop*)p);
}
template
inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
+ assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p));
assert(p >= (OopT*)start_address(), "Address not in chunk");
return p - (OopT*)start_address();
}
diff --git a/src/hotspot/share/oops/symbolHandle.cpp b/src/hotspot/share/oops/symbolHandle.cpp
new file mode 100644
index 00000000000..350f0dd96c8
--- /dev/null
+++ b/src/hotspot/share/oops/symbolHandle.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "oops/symbolHandle.hpp"
+#include "runtime/atomic.hpp"
+
+Symbol* volatile TempSymbolCleanupDelayer::_queue[QueueSize] = {};
+volatile uint TempSymbolCleanupDelayer::_index = 0;
+
+// Keep this symbol alive for some time to allow for reuse.
+// Temp symbols for the same string can often be created in quick succession,
+// and this queue allows them to be reused instead of churning.
+void TempSymbolCleanupDelayer::delay_cleanup(Symbol* sym) {
+ assert(sym != nullptr, "precondition");
+ sym->increment_refcount();
+ uint i = Atomic::add(&_index, 1u) % QueueSize;
+ Symbol* old = Atomic::xchg(&_queue[i], sym);
+ Symbol::maybe_decrement_refcount(old);
+}
+
+void TempSymbolCleanupDelayer::drain_queue() {
+ for (uint i = 0; i < QueueSize; i++) {
+ Symbol* sym = Atomic::xchg(&_queue[i], (Symbol*) nullptr);
+ Symbol::maybe_decrement_refcount(sym);
+ }
+}
diff --git a/src/hotspot/share/oops/symbolHandle.hpp b/src/hotspot/share/oops/symbolHandle.hpp
index 249da936761..f1b2d2470c5 100644
--- a/src/hotspot/share/oops/symbolHandle.hpp
+++ b/src/hotspot/share/oops/symbolHandle.hpp
@@ -28,6 +28,16 @@
#include "memory/allocation.hpp"
#include "oops/symbol.hpp"
+class TempSymbolCleanupDelayer : AllStatic {
+ static Symbol* volatile _queue[];
+ static volatile uint _index;
+
+public:
+ static const uint QueueSize = 128;
+ static void delay_cleanup(Symbol* s);
+ static void drain_queue();
+};
+
// TempNewSymbol acts as a handle class in a handle/body idiom and is
// responsible for proper resource management of the body (which is a Symbol*).
// The body is resource managed by a reference counting scheme.
@@ -49,10 +59,17 @@ class SymbolHandleBase : public StackObj {
SymbolHandleBase() : _temp(nullptr) { }
// Conversion from a Symbol* to a SymbolHandleBase.
- // Does not increment the current reference count if temporary.
SymbolHandleBase(Symbol *s) : _temp(s) {
if (!TEMP) {
Symbol::maybe_increment_refcount(_temp);
+ return;
+ }
+
+ // Delay cleanup for temp symbols. Refcount is incremented while in
+ // queue. But don't requeue existing entries, or entries that are held
+ // elsewhere - it's a waste of effort.
+ if (s != nullptr && s->refcount() == 1) {
+ TempSymbolCleanupDelayer::delay_cleanup(s);
}
}
diff --git a/src/hotspot/share/opto/chaitin.hpp b/src/hotspot/share/opto/chaitin.hpp
index dd917571b06..d3c9e14bc7c 100644
--- a/src/hotspot/share/opto/chaitin.hpp
+++ b/src/hotspot/share/opto/chaitin.hpp
@@ -292,7 +292,7 @@ class PhaseIFG : public Phase {
#endif
//--------------- Live Range Accessors
- LRG &lrgs(uint idx) const { assert(idx < _maxlrg, "oob"); return _lrgs[idx]; }
+ LRG &lrgs(uint idx) const { assert(idx < _maxlrg, "oob: index %u not smaller than %u", idx, _maxlrg); return _lrgs[idx]; }
// Compute and set effective degree. Might be folded into SquareUp().
void Compute_Effective_Degree();
diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp
index 40005a687f5..ccb24de6741 100644
--- a/src/hotspot/share/opto/compile.cpp
+++ b/src/hotspot/share/opto/compile.cpp
@@ -628,7 +628,6 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
_env(ci_env),
_directive(directive),
_log(ci_env->log()),
- _failure_reason(nullptr),
_intrinsics (comp_arena(), 0, 0, nullptr),
_macro_nodes (comp_arena(), 8, 0, nullptr),
_parse_predicate_opaqs (comp_arena(), 8, 0, nullptr),
@@ -925,7 +924,6 @@ Compile::Compile( ciEnv* ci_env,
_env(ci_env),
_directive(directive),
_log(ci_env->log()),
- _failure_reason(nullptr),
_congraph(nullptr),
NOT_PRODUCT(_igv_printer(nullptr) COMMA)
_dead_node_list(comp_arena()),
@@ -1942,7 +1940,7 @@ void Compile::process_for_unstable_if_traps(PhaseIterGVN& igvn) {
if (!live_locals.at(i) && !local->is_top() && local != lhs && local!= rhs) {
uint idx = jvms->locoff() + i;
#ifdef ASSERT
- if (Verbose) {
+ if (PrintOpto && Verbose) {
tty->print("[unstable_if] kill local#%d: ", idx);
local->dump();
tty->cr();
@@ -4335,9 +4333,9 @@ void Compile::record_failure(const char* reason) {
if (log() != nullptr) {
log()->elem("failure reason='%s' phase='compile'", reason);
}
- if (_failure_reason == nullptr) {
+ if (_failure_reason.get() == nullptr) {
// Record the first failure reason.
- _failure_reason = reason;
+ _failure_reason.set(reason);
}
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
@@ -4916,7 +4914,16 @@ void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
const Type* t_no_spec = t->remove_speculative();
if (t_no_spec != t) {
bool in_hash = igvn.hash_delete(n);
- assert(in_hash, "node should be in igvn hash table");
+#ifdef ASSERT
+ if (!in_hash) {
+ tty->print_cr("current graph:");
+ n->dump_bfs(MaxNodeLimit, nullptr, "S$");
+ tty->cr();
+ tty->print_cr("erroneous node:");
+ n->dump();
+ assert(false, "node should be in igvn hash table");
+ }
+#endif
tn->set_type(t_no_spec);
igvn.hash_insert(n);
igvn._worklist.push(n); // give it a chance to go away
diff --git a/src/hotspot/share/opto/compile.hpp b/src/hotspot/share/opto/compile.hpp
index e5b881065ac..c843ed27faf 100644
--- a/src/hotspot/share/opto/compile.hpp
+++ b/src/hotspot/share/opto/compile.hpp
@@ -32,6 +32,7 @@
#include "compiler/compilerOracle.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compilerEvent.hpp"
+#include "compiler/cHeapStringHolder.hpp"
#include "libadt/dict.hpp"
#include "libadt/vectset.hpp"
#include "memory/resourceArea.hpp"
@@ -350,7 +351,7 @@ class Compile : public Phase {
ciEnv* _env; // CI interface
DirectiveSet* _directive; // Compiler directive
CompileLog* _log; // from CompilerThread
- const char* _failure_reason; // for record_failure/failing pattern
+ CHeapStringHolder _failure_reason; // for record_failure/failing pattern
GrowableArray _intrinsics; // List of intrinsics.
GrowableArray _macro_nodes; // List of nodes which need to be expanded before matching.
GrowableArray _parse_predicate_opaqs; // List of Opaque1 nodes for the Parse Predicates.
@@ -775,11 +776,22 @@ class Compile : public Phase {
Arena* comp_arena() { return &_comp_arena; }
ciEnv* env() const { return _env; }
CompileLog* log() const { return _log; }
- bool failing() const { return _env->failing() || _failure_reason != nullptr; }
- const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; }
+
+ bool failing() const {
+ return _env->failing() ||
+ _failure_reason.get() != nullptr;
+ }
+
+ const char* failure_reason() const {
+ return _env->failing() ? _env->failure_reason()
+ : _failure_reason.get();
+ }
bool failure_reason_is(const char* r) const {
- return (r == _failure_reason) || (r != nullptr && _failure_reason != nullptr && strcmp(r, _failure_reason) == 0);
+ return (r == _failure_reason.get()) ||
+ (r != nullptr &&
+ _failure_reason.get() != nullptr &&
+ strcmp(r, _failure_reason.get()) == 0);
}
void record_failure(const char* reason);
diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp
index f7fa5e9d8cc..401cb35e227 100644
--- a/src/hotspot/share/opto/doCall.cpp
+++ b/src/hotspot/share/opto/doCall.cpp
@@ -856,7 +856,7 @@ void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
#ifndef PRODUCT
// We do not expect the same handler bci to take both cold unloaded
// and hot loaded exceptions. But, watch for it.
- if ((Verbose || WizardMode) && extype->is_loaded()) {
+ if (PrintOpto && (Verbose || WizardMode) && extype->is_loaded()) {
tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci());
method()->print_name(); tty->cr();
} else if (PrintOpto && (Verbose || WizardMode)) {
diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp
index 1c60de9b9d8..53454cfdece 100644
--- a/src/hotspot/share/opto/graphKit.cpp
+++ b/src/hotspot/share/opto/graphKit.cpp
@@ -2702,7 +2702,18 @@ Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, No
// Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x;
// Here, the type of 'fa' is often exact, so the store check
// of fa[1]=x will fold up, without testing the nullness of x.
- switch (C->static_subtype_check(superk, subk)) {
+ //
+ // Do not skip the static sub type check with StressReflectiveCode during
+ // parsing (i.e. with ExpandSubTypeCheckAtParseTime) because the
+ // associated CheckCastNodePP could already be folded when the type
+ // system can prove it's an impossible type. Therefore, we should also
+ // do the static sub type check here to ensure control is folded as well.
+ // Otherwise, the graph is left in a broken state.
+ // At macro expansion, we would have already folded the SubTypeCheckNode
+ // being expanded here because we always perform the static sub type
+ // check in SubTypeCheckNode::sub() regardless of whether
+ // StressReflectiveCode is set or not.
+ switch (C->static_subtype_check(superk, subk, !ExpandSubTypeCheckAtParseTime)) {
case Compile::SSC_always_false:
{
Node* always_fail = *ctrl;
diff --git a/src/hotspot/share/opto/loopPredicate.cpp b/src/hotspot/share/opto/loopPredicate.cpp
index 9ae24cb2055..fa39301cbef 100644
--- a/src/hotspot/share/opto/loopPredicate.cpp
+++ b/src/hotspot/share/opto/loopPredicate.cpp
@@ -1146,7 +1146,7 @@ bool PhaseIdealLoop::loop_predication_should_follow_branches(IdealLoopTree* loop
CountedLoopNode* cl = head->as_CountedLoop();
if (cl->phi() != nullptr) {
const TypeInt* t = _igvn.type(cl->phi())->is_int();
- float worst_case_trip_cnt = ((float)t->_hi - t->_lo) / ABS(cl->stride_con());
+ float worst_case_trip_cnt = ((float)t->_hi - t->_lo) / ABS((float)cl->stride_con());
if (worst_case_trip_cnt < loop_trip_cnt) {
loop_trip_cnt = worst_case_trip_cnt;
}
diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp
index e102d9ac9e9..52467e75010 100644
--- a/src/hotspot/share/opto/loopnode.cpp
+++ b/src/hotspot/share/opto/loopnode.cpp
@@ -801,13 +801,14 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
}
#endif
- jlong stride_con = head->stride_con();
- assert(stride_con != 0, "missed some peephole opt");
+ jlong stride_con_long = head->stride_con();
+ assert(stride_con_long != 0, "missed some peephole opt");
// We can't iterate for more than max int at a time.
- if (stride_con != (jint)stride_con) {
+ if (stride_con_long != (jint)stride_con_long || stride_con_long == min_jint) {
assert(bt == T_LONG, "only for long loops");
return false;
}
+ jint stride_con = checked_cast(stride_con_long);
// The number of iterations for the integer count loop: guarantee no
// overflow: max_jint - stride_con max. -1 so there's no need for a
// loop limit check if the exit test is <= or >=.
@@ -945,7 +946,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
}
// Clone the iv data nodes as an integer iv
- Node* int_stride = _igvn.intcon(checked_cast(stride_con));
+ Node* int_stride = _igvn.intcon(stride_con);
set_ctrl(int_stride, C->root());
Node* inner_phi = new PhiNode(x->in(0), TypeInt::INT);
Node* inner_incr = new AddINode(inner_phi, int_stride);
@@ -1040,7 +1041,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
register_new_node(outer_phi, outer_head);
}
- transform_long_range_checks(checked_cast(stride_con), range_checks, outer_phi, inner_iters_actual_int,
+ transform_long_range_checks(stride_con, range_checks, outer_phi, inner_iters_actual_int,
inner_phi, iv_add, inner_head);
// Peel one iteration of the loop and use the safepoint at the end
// of the peeled iteration to insert Parse Predicates. If no well
@@ -1077,7 +1078,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
return true;
}
-int PhaseIdealLoop::extract_long_range_checks(const IdealLoopTree* loop, jlong stride_con, int iters_limit, PhiNode* phi,
+int PhaseIdealLoop::extract_long_range_checks(const IdealLoopTree* loop, jint stride_con, int iters_limit, PhiNode* phi,
Node_List& range_checks) {
const jlong min_iters = 2;
jlong reduced_iters_limit = iters_limit;
@@ -1093,7 +1094,9 @@ int PhaseIdealLoop::extract_long_range_checks(const IdealLoopTree* loop, jlong s
jlong scale = 0;
if (loop->is_range_check_if(if_proj, this, T_LONG, phi, range, offset, scale) &&
loop->is_invariant(range) && loop->is_invariant(offset) &&
- original_iters_limit / ABS(scale * stride_con) >= min_iters) {
+ scale != min_jlong &&
+ original_iters_limit / ABS(scale) >= min_iters * ABS(stride_con)) {
+ assert(scale == (jint)scale, "scale should be an int");
reduced_iters_limit = MIN2(reduced_iters_limit, original_iters_limit/ABS(scale));
range_checks.push(c);
}
diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp
index 6d44434d71e..868a4fd153c 100644
--- a/src/hotspot/share/opto/loopnode.hpp
+++ b/src/hotspot/share/opto/loopnode.hpp
@@ -1695,8 +1695,8 @@ class PhaseIdealLoop : public PhaseTransform {
LoopNode* create_inner_head(IdealLoopTree* loop, BaseCountedLoopNode* head, IfNode* exit_test);
- int extract_long_range_checks(const IdealLoopTree* loop, jlong stride_con, int iters_limit, PhiNode* phi,
- Node_List &range_checks);
+ int extract_long_range_checks(const IdealLoopTree* loop, jint stride_con, int iters_limit, PhiNode* phi,
+ Node_List &range_checks);
void transform_long_range_checks(int stride_con, const Node_List &range_checks, Node* outer_phi,
Node* inner_iters_actual_int, Node* inner_phi,
diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp
index be2ec7dbc07..be9f5599d53 100644
--- a/src/hotspot/share/opto/loopopts.cpp
+++ b/src/hotspot/share/opto/loopopts.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -521,8 +521,8 @@ Node* PhaseIdealLoop::remix_address_expressions(Node* n) {
}
// Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V),
- // but not if I2 is a constant.
- if (n_op == Op_AddP) {
+ // but not if I2 is a constant. Skip for irreducible loops.
+ if (n_op == Op_AddP && n_loop->_head->is_Loop()) {
if (n2_loop == n_loop && n3_loop != n_loop) {
if (n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con()) {
Node* n22_ctrl = get_ctrl(n->in(2)->in(2));
diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp
index e2fdbedd804..6746c08498e 100644
--- a/src/hotspot/share/opto/memnode.cpp
+++ b/src/hotspot/share/opto/memnode.cpp
@@ -3343,6 +3343,7 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
my_mem = load_node;
} else {
assert(my_mem->unique_out() == this, "sanity");
+ assert(!trailing_load_store(), "load store node can't be eliminated");
del_req(Precedent);
phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later
my_mem = nullptr;
diff --git a/src/hotspot/share/opto/superword.cpp b/src/hotspot/share/opto/superword.cpp
index 6a641a44b32..cb1a2a769fc 100644
--- a/src/hotspot/share/opto/superword.cpp
+++ b/src/hotspot/share/opto/superword.cpp
@@ -4200,6 +4200,25 @@ SWPointer::SWPointer(MemNode* mem, SuperWord* slp, Node_Stack *nstack, bool anal
NOT_PRODUCT(if(_slp->is_trace_alignment()) _tracer.restore_depth();)
NOT_PRODUCT(_tracer.ctor_6(mem);)
+ // In the pointer analysis, and especially the AlignVector, analysis we assume that
+ // stride and scale are not too large. For example, we multiply "scale * stride",
+ // and assume that this does not overflow the int range. We also take "abs(scale)"
+ // and "abs(stride)", which would overflow for min_int = -(2^31). Still, we want
+ // to at least allow small and moderately large stride and scale. Therefore, we
+ // allow values up to 2^30, which is only a factor 2 smaller than the max/min int.
+ // Normal performance relevant code will have much lower values. And the restriction
+ // allows us to keep the rest of the autovectorization code much simpler, since we
+ // do not have to deal with overflows.
+ jlong long_scale = _scale;
+ jlong long_stride = slp->lp()->stride_is_con() ? slp->iv_stride() : 0;
+ jlong max_val = 1 << 30;
+ if (abs(long_scale) >= max_val ||
+ abs(long_stride) >= max_val ||
+ abs(long_scale * long_stride) >= max_val) {
+ assert(!valid(), "adr stride*scale is too large");
+ return;
+ }
+
_base = base;
_adr = adr;
assert(valid(), "Usable");
diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp
index 4e0bd43be5c..5a764f7cad1 100644
--- a/src/hotspot/share/opto/type.cpp
+++ b/src/hotspot/share/opto/type.cpp
@@ -6458,14 +6458,19 @@ template bool TypePtr::maybe_java_subtype_of_helper_for_arr
if (other->klass() == ciEnv::current()->Object_klass() && other->_interfaces->empty() && other_exact) {
return true;
}
- int dummy;
- bool this_top_or_bottom = (this_one->base_element_type(dummy) == Type::TOP || this_one->base_element_type(dummy) == Type::BOTTOM);
- if (!this_one->is_loaded() || !other->is_loaded() || this_top_or_bottom) {
+ if (!this_one->is_loaded() || !other->is_loaded()) {
return true;
}
if (this_one->is_instance_type(other)) {
return other->klass()->equals(ciEnv::current()->Object_klass()) && other->_interfaces->intersection_with(this_one->_interfaces)->eq(other->_interfaces);
}
+
+ int dummy;
+ bool this_top_or_bottom = (this_one->base_element_type(dummy) == Type::TOP || this_one->base_element_type(dummy) == Type::BOTTOM);
+ if (this_top_or_bottom) {
+ return true;
+ }
+
assert(this_one->is_array_type(other), "");
const T1* other_ary = this_one->is_array_type(other);
diff --git a/src/hotspot/share/prims/foreignGlobals.cpp b/src/hotspot/share/prims/foreignGlobals.cpp
index 261be4ead59..b11ec1a4552 100644
--- a/src/hotspot/share/prims/foreignGlobals.cpp
+++ b/src/hotspot/share/prims/foreignGlobals.cpp
@@ -170,7 +170,7 @@ int NativeCallingConvention::calling_convention(const BasicType* sig_bt, VMStora
int JavaCallingConvention::calling_convention(const BasicType* sig_bt, VMStorage* regs, int num_args) const {
VMRegPair* vm_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_args);
- int slots = SharedRuntime::java_calling_convention(sig_bt, vm_regs, num_args);
+ int slots = align_up(SharedRuntime::java_calling_convention(sig_bt, vm_regs, num_args), 2);
for (int i = 0; i < num_args; i++) {
VMRegPair pair = vm_regs[i];
// note, we ignore second here. Signature should consist of register-size values. So there should be
diff --git a/src/hotspot/share/prims/jvmtiEnvBase.cpp b/src/hotspot/share/prims/jvmtiEnvBase.cpp
index 105902b9642..7c67a0eebfa 100644
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp
@@ -988,7 +988,7 @@ JvmtiEnvBase::get_owned_monitors(JavaThread *calling_thread, JavaThread* java_th
// Get off stack monitors. (e.g. acquired via jni MonitorEnter).
JvmtiMonitorClosure jmc(calling_thread, owned_monitors_list, this);
- ObjectSynchronizer::monitors_iterate(&jmc, java_thread);
+ ObjectSynchronizer::owned_monitors_iterate(&jmc, java_thread);
err = jmc.error();
return err;
@@ -1015,7 +1015,7 @@ JvmtiEnvBase::get_owned_monitors(JavaThread* calling_thread, JavaThread* java_th
// Get off stack monitors. (e.g. acquired via jni MonitorEnter).
JvmtiMonitorClosure jmc(calling_thread, owned_monitors_list, this);
- ObjectSynchronizer::monitors_iterate(&jmc, java_thread);
+ ObjectSynchronizer::owned_monitors_iterate(&jmc, java_thread);
err = jmc.error();
return err;
@@ -2178,6 +2178,13 @@ JvmtiMonitorClosure::do_monitor(ObjectMonitor* mon) {
}
// Filter out on stack monitors collected during stack walk.
oop obj = mon->object();
+
+ if (obj == nullptr) {
+ // This can happen if JNI code drops all references to the
+ // owning object.
+ return;
+ }
+
bool found = false;
for (int j = 0; j < _owned_monitors_list->length(); j++) {
jobject jobj = ((jvmtiMonitorStackDepthInfo*)_owned_monitors_list->at(j))->monitor;
diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp
index f68147fea4f..4a7e69ca8b4 100644
--- a/src/hotspot/share/prims/whitebox.cpp
+++ b/src/hotspot/share/prims/whitebox.cpp
@@ -1833,7 +1833,7 @@ WB_END
WB_ENTRY(jboolean, WB_DeflateIdleMonitors(JNIEnv* env, jobject wb))
log_info(monitorinflation)("WhiteBox initiated DeflateIdleMonitors");
- return ObjectSynchronizer::request_deflate_idle_monitors();
+ return ObjectSynchronizer::request_deflate_idle_monitors_from_wb();
WB_END
WB_ENTRY(void, WB_ForceSafepoint(JNIEnv* env, jobject wb))
diff --git a/src/hotspot/share/runtime/continuationFreezeThaw.cpp b/src/hotspot/share/runtime/continuationFreezeThaw.cpp
index 51125dd80b1..d08380c4ca9 100644
--- a/src/hotspot/share/runtime/continuationFreezeThaw.cpp
+++ b/src/hotspot/share/runtime/continuationFreezeThaw.cpp
@@ -395,7 +395,7 @@ class FreezeBase : public StackObj {
inline int size_if_fast_freeze_available();
#ifdef ASSERT
- bool interpreted_native_or_deoptimized_on_stack();
+ bool check_valid_fast_path();
#endif
protected:
@@ -1486,7 +1486,10 @@ static bool monitors_on_stack(JavaThread* thread) {
return false;
}
-bool FreezeBase::interpreted_native_or_deoptimized_on_stack() {
+// There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c
+// adapter or called Deoptimization::unpack_frames. As for native frames, upcalls from JNI also go through the
+// interpreter (see JavaCalls::call_helper), while the UpcallLinker explicitly sets cont_fastpath.
+bool FreezeBase::check_valid_fast_path() {
ContinuationEntry* ce = _thread->last_continuation();
RegisterMap map(_thread,
RegisterMap::UpdateMap::skip,
@@ -1494,11 +1497,11 @@ bool FreezeBase::interpreted_native_or_deoptimized_on_stack() {
RegisterMap::WalkContinuation::skip);
map.set_include_argument_oops(false);
for (frame f = freeze_start_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
- if (f.is_interpreted_frame() || f.is_native_frame() || f.is_deoptimized_frame()) {
- return true;
+ if (!f.is_compiled_frame() || f.is_deoptimized_frame()) {
+ return false;
}
}
- return false;
+ return true;
}
#endif // ASSERT
@@ -1560,11 +1563,7 @@ static inline int freeze_internal(JavaThread* current, intptr_t* const sp) {
Freeze freeze(current, cont, sp);
- // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c
- // adapter or called Deoptimization::unpack_frames. Calls from native frames also go through the interpreter
- // (see JavaCalls::call_helper).
- assert(!current->cont_fastpath()
- || (current->cont_fastpath_thread_state() && !freeze.interpreted_native_or_deoptimized_on_stack()), "");
+ assert(!current->cont_fastpath() || freeze.check_valid_fast_path(), "");
bool fast = UseContinuationFastPath && current->cont_fastpath();
if (fast && freeze.size_if_fast_freeze_available() > 0) {
freeze.freeze_fast_existing_chunk();
@@ -1747,7 +1746,7 @@ class ThawBase : public StackObj {
inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
inline void after_thaw_java_frame(const frame& f, bool bottom);
inline void patch(frame& f, const frame& caller, bool bottom);
- void clear_bitmap_bits(intptr_t* start, int range);
+ void clear_bitmap_bits(address start, address end);
NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
@@ -2128,13 +2127,22 @@ inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
}
-void ThawBase::clear_bitmap_bits(intptr_t* start, int range) {
+void ThawBase::clear_bitmap_bits(address start, address end) {
+ assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
+ assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
+
// we need to clear the bits that correspond to arguments as they reside in the caller frame
- // or they will keep objects that are otherwise unreachable alive
- log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(start+range));
+ // or they will keep objects that are otherwise unreachable alive.
+
+ // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
+ // `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned.
+ // If that's the case the bit range corresponding to the last stack slot should not have bits set
+ // anyways and we assert that before returning.
+ address effective_end = UseCompressedOops ? end : align_down(end, wordSize);
+ log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end));
stackChunkOop chunk = _cont.tail();
- chunk->bitmap().clear_range(chunk->bit_index_for(start),
- chunk->bit_index_for(start+range));
+ chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
+ assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
}
NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames) {
@@ -2187,7 +2195,9 @@ NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& c
_cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance);
} else if (_cont.tail()->has_bitmap() && locals > 0) {
assert(hf.is_heap_frame(), "should be");
- clear_bitmap_bits(heap_frame_bottom - locals, locals);
+ address start = (address)(heap_frame_bottom - locals);
+ address end = (address)heap_frame_bottom;
+ clear_bitmap_bits(start, end);
}
DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
@@ -2260,7 +2270,10 @@ void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int n
// can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
_cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance);
} else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
- clear_bitmap_bits(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top, added_argsize);
+ address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
+ int stack_args_slots = f.cb()->as_compiled_method()->method()->num_stack_arg_slots(false /* rounded */);
+ int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
+ clear_bitmap_bits(start, start + argsize_in_bytes);
}
DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
diff --git a/src/hotspot/share/runtime/frame.cpp b/src/hotspot/share/runtime/frame.cpp
index 6266bd004ea..51d66b8d9f8 100644
--- a/src/hotspot/share/runtime/frame.cpp
+++ b/src/hotspot/share/runtime/frame.cpp
@@ -1439,7 +1439,7 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m
assert(sig_index == sizeargs, "");
}
int stack_arg_slots = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
- assert(stack_arg_slots == m->num_stack_arg_slots(), "");
+ assert(stack_arg_slots == m->num_stack_arg_slots(false /* rounded */), "");
int out_preserve = SharedRuntime::out_preserve_stack_slots();
int sig_index = 0;
int arg_index = (m->is_static() ? 0 : -1);
diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp
index 7137270fd17..a066d0d3749 100644
--- a/src/hotspot/share/runtime/globals.hpp
+++ b/src/hotspot/share/runtime/globals.hpp
@@ -732,6 +732,10 @@ const int ObjectAlignmentInBytes = 8;
"at one time (minimum is 1024).") \
range(1024, max_jint) \
\
+ product(intx, MonitorUnlinkBatch, 500, DIAGNOSTIC, \
+ "The maximum number of monitors to unlink in one batch. ") \
+ range(1, max_jint) \
+ \
product(intx, MonitorUsedDeflationThreshold, 90, DIAGNOSTIC, \
"Percentage of used monitors before triggering deflation (0 is " \
"off). The check is performed on GuaranteedSafepointInterval, " \
diff --git a/src/hotspot/share/runtime/interfaceSupport.inline.hpp b/src/hotspot/share/runtime/interfaceSupport.inline.hpp
index 9de1f8126d1..ee204cb2e1c 100644
--- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp
+++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp
@@ -409,6 +409,7 @@ extern "C" { \
#define JVM_ENTRY_FROM_LEAF(env, result_type, header) \
{ { \
JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+ MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
ThreadInVMfromNative __tiv(thread); \
debug_only(VMNativeEntryWrapper __vew;) \
VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)
diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp
index 48709aa5c43..a92a1c5d285 100644
--- a/src/hotspot/share/runtime/java.cpp
+++ b/src/hotspot/share/runtime/java.cpp
@@ -77,6 +77,7 @@
#include "sanitizers/leak.hpp"
#include "services/memTracker.hpp"
#include "utilities/dtrace.hpp"
+#include "utilities/events.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/vmError.hpp"
@@ -395,6 +396,8 @@ void before_exit(JavaThread* thread, bool halt) {
#define BEFORE_EXIT_DONE 2
static jint volatile _before_exit_status = BEFORE_EXIT_NOT_RUN;
+ Events::log(thread, "Before exit entered");
+
// Note: don't use a Mutex to guard the entire before_exit(), as
// JVMTI post_thread_end_event and post_vm_death_event will run native code.
// A CAS or OSMutex would work just fine but then we need to manipulate
diff --git a/src/hotspot/share/runtime/monitorDeflationThread.cpp b/src/hotspot/share/runtime/monitorDeflationThread.cpp
index 01b33182973..3f2dcc6a673 100644
--- a/src/hotspot/share/runtime/monitorDeflationThread.cpp
+++ b/src/hotspot/share/runtime/monitorDeflationThread.cpp
@@ -94,6 +94,6 @@ void MonitorDeflationThread::monitor_deflation_thread_entry(JavaThread* jt, TRAP
}
}
- (void)ObjectSynchronizer::deflate_idle_monitors(/* ObjectMonitorsHashtable is not needed here */ nullptr);
+ (void)ObjectSynchronizer::deflate_idle_monitors();
}
}
diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp
index ea46300d145..f7587bc6723 100644
--- a/src/hotspot/share/runtime/mutexLocker.cpp
+++ b/src/hotspot/share/runtime/mutexLocker.cpp
@@ -292,9 +292,9 @@ void mutex_init() {
}
#if INCLUDE_JFR
- MUTEX_DEFN(JfrBuffer_lock , PaddedMutex , nosafepoint);
- MUTEX_DEFN(JfrMsg_lock , PaddedMonitor, nosafepoint-3);
- MUTEX_DEFN(JfrStacktrace_lock , PaddedMutex , stackwatermark-1);
+ MUTEX_DEFN(JfrBuffer_lock , PaddedMutex , event);
+ MUTEX_DEFN(JfrMsg_lock , PaddedMonitor, event);
+ MUTEX_DEFN(JfrStacktrace_lock , PaddedMutex , event);
MUTEX_DEFN(JfrThreadSampler_lock , PaddedMonitor, nosafepoint);
#endif
@@ -304,7 +304,7 @@ void mutex_init() {
MUTEX_DEFN(ContinuationRelativize_lock , PaddedMonitor, nosafepoint-3);
MUTEX_DEFN(CodeHeapStateAnalytics_lock , PaddedMutex , safepoint);
- MUTEX_DEFN(ThreadsSMRDelete_lock , PaddedMonitor, nosafepoint-3); // Holds ConcurrentHashTableResize_lock
+ MUTEX_DEFN(ThreadsSMRDelete_lock , PaddedMonitor, service-2); // Holds ConcurrentHashTableResize_lock
MUTEX_DEFN(ThreadIdTableCreate_lock , PaddedMutex , safepoint);
MUTEX_DEFN(SharedDecoder_lock , PaddedMutex , tty-1);
MUTEX_DEFN(DCmdFactory_lock , PaddedMutex , nosafepoint);
diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp
index f2e7fb05d69..e29e7164181 100644
--- a/src/hotspot/share/runtime/objectMonitor.cpp
+++ b/src/hotspot/share/runtime/objectMonitor.cpp
@@ -518,16 +518,6 @@ bool ObjectMonitor::deflate_monitor() {
return false;
}
- if (ObjectSynchronizer::is_final_audit() && owner_is_DEFLATER_MARKER()) {
- // The final audit can see an already deflated ObjectMonitor on the
- // in-use list because MonitorList::unlink_deflated() might have
- // blocked for the final safepoint before unlinking all the deflated
- // monitors.
- assert(contentions() < 0, "must be negative: contentions=%d", contentions());
- // Already returned 'true' when it was originally deflated.
- return false;
- }
-
const oop obj = object_peek();
if (obj == nullptr) {
diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp
index 8adb584b769..6cb80af29e5 100644
--- a/src/hotspot/share/runtime/os.cpp
+++ b/src/hotspot/share/runtime/os.cpp
@@ -79,6 +79,7 @@
#include "utilities/count_trailing_zeros.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/events.hpp"
+#include "utilities/macros.hpp"
#include "utilities/powerOfTwo.hpp"
#ifndef _WINDOWS
@@ -1150,6 +1151,8 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
return;
}
+#if !INCLUDE_ASAN
+
bool accessible = is_readable_pointer(addr);
// Check if addr is a JNI handle.
@@ -1236,7 +1239,10 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
return;
}
+#endif // !INCLUDE_ASAN
+
st->print_cr(INTPTR_FORMAT " is an unknown value", p2i(addr));
+
}
bool is_pointer_bad(intptr_t* ptr) {
diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp
index 404d9113f48..622bb9dd8e1 100644
--- a/src/hotspot/share/runtime/sharedRuntime.cpp
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp
@@ -1997,7 +1997,7 @@ void SharedRuntime::check_member_name_argument_is_last_argument(const methodHand
assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
- int comp_args_on_stack = java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1);
+ java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1);
for (int i = 0; i < member_arg_pos; i++) {
VMReg a = regs_with_member_name[i].first();
@@ -3095,7 +3095,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
BasicType ret_type = si.return_type();
// Now get the compiled-Java arguments layout.
- int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
+ SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
// Generate the compiled-to-native wrapper code
nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
diff --git a/src/hotspot/share/runtime/signature.cpp b/src/hotspot/share/runtime/signature.cpp
index 30eae826c9f..c23d39a81e8 100644
--- a/src/hotspot/share/runtime/signature.cpp
+++ b/src/hotspot/share/runtime/signature.cpp
@@ -178,7 +178,6 @@ void Fingerprinter::compute_fingerprint_and_return_type(bool static_flag) {
}
#if defined(_LP64) && !defined(ZERO)
- _stack_arg_slots = align_up(_stack_arg_slots, 2);
#ifdef ASSERT
int dbg_stack_arg_slots = compute_num_stack_arg_slots(_signature, _param_size, static_flag);
assert(_stack_arg_slots == dbg_stack_arg_slots, "fingerprinter: %d full: %d", _stack_arg_slots, dbg_stack_arg_slots);
@@ -235,14 +234,17 @@ void Fingerprinter::do_type_calling_convention(BasicType type) {
case T_BYTE:
case T_SHORT:
case T_INT:
-#if defined(PPC64) || defined(S390)
if (_int_args < Argument::n_int_register_parameters_j) {
_int_args++;
} else {
+#if defined(PPC64) || defined(S390)
+ _stack_arg_slots += 1;
+#else
+ _stack_arg_slots = align_up(_stack_arg_slots, 2);
_stack_arg_slots += 1;
+#endif // defined(PPC64) || defined(S390)
}
break;
-#endif // defined(PPC64) || defined(S390)
case T_LONG:
case T_OBJECT:
case T_ARRAY:
@@ -250,26 +252,27 @@ void Fingerprinter::do_type_calling_convention(BasicType type) {
if (_int_args < Argument::n_int_register_parameters_j) {
_int_args++;
} else {
- PPC64_ONLY(_stack_arg_slots = align_up(_stack_arg_slots, 2));
- S390_ONLY(_stack_arg_slots = align_up(_stack_arg_slots, 2));
+ _stack_arg_slots = align_up(_stack_arg_slots, 2);
_stack_arg_slots += 2;
}
break;
case T_FLOAT:
-#if defined(PPC64) || defined(S390)
if (_fp_args < Argument::n_float_register_parameters_j) {
_fp_args++;
} else {
+#if defined(PPC64) || defined(S390)
+ _stack_arg_slots += 1;
+#else
+ _stack_arg_slots = align_up(_stack_arg_slots, 2);
_stack_arg_slots += 1;
+#endif // defined(PPC64) || defined(S390)
}
break;
-#endif // defined(PPC64) || defined(S390)
case T_DOUBLE:
if (_fp_args < Argument::n_float_register_parameters_j) {
_fp_args++;
} else {
- PPC64_ONLY(_stack_arg_slots = align_up(_stack_arg_slots, 2));
- S390_ONLY(_stack_arg_slots = align_up(_stack_arg_slots, 2));
+ _stack_arg_slots = align_up(_stack_arg_slots, 2);
_stack_arg_slots += 2;
}
break;
diff --git a/src/hotspot/share/runtime/stubRoutines.cpp b/src/hotspot/share/runtime/stubRoutines.cpp
index 36dd503cd70..73be830463e 100644
--- a/src/hotspot/share/runtime/stubRoutines.cpp
+++ b/src/hotspot/share/runtime/stubRoutines.cpp
@@ -277,43 +277,6 @@ void StubRoutines::initialize_compiler_stubs() {
}
}
-#ifdef ASSERT
-typedef void (*arraycopy_fn)(address src, address dst, int count);
-
-// simple tests of generated arraycopy functions
-static void test_arraycopy_func(address func, int alignment) {
- int v = 0xcc;
- int v2 = 0x11;
- jlong lbuffer[8];
- jlong lbuffer2[8];
- address fbuffer = (address) lbuffer;
- address fbuffer2 = (address) lbuffer2;
- unsigned int i;
- for (i = 0; i < sizeof(lbuffer); i++) {
- fbuffer[i] = v; fbuffer2[i] = v2;
- }
- // C++ does not guarantee jlong[] array alignment to 8 bytes.
- // Use middle of array to check that memory before it is not modified.
- address buffer = align_up((address)&lbuffer[4], BytesPerLong);
- address buffer2 = align_up((address)&lbuffer2[4], BytesPerLong);
- // do an aligned copy
- ((arraycopy_fn)func)(buffer, buffer2, 0);
- for (i = 0; i < sizeof(lbuffer); i++) {
- assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
- }
- // adjust destination alignment
- ((arraycopy_fn)func)(buffer, buffer2 + alignment, 0);
- for (i = 0; i < sizeof(lbuffer); i++) {
- assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
- }
- // adjust source alignment
- ((arraycopy_fn)func)(buffer + alignment, buffer2, 0);
- for (i = 0; i < sizeof(lbuffer); i++) {
- assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
- }
-}
-#endif // ASSERT
-
void StubRoutines::initialize_final_stubs() {
if (_final_stubs_code == nullptr) {
_final_stubs_code = initialize_stubs(StubCodeGenerator::Final_stubs,
@@ -322,90 +285,8 @@ void StubRoutines::initialize_final_stubs() {
"StubRoutines (final stubs)",
"_final_stubs_code_size");
}
-
-#ifdef ASSERT
-
- MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXExec));
-
-#define TEST_ARRAYCOPY(type) \
- test_arraycopy_func( type##_arraycopy(), sizeof(type)); \
- test_arraycopy_func( type##_disjoint_arraycopy(), sizeof(type)); \
- test_arraycopy_func(arrayof_##type##_arraycopy(), sizeof(HeapWord)); \
- test_arraycopy_func(arrayof_##type##_disjoint_arraycopy(), sizeof(HeapWord))
-
- // Make sure all the arraycopy stubs properly handle zero count
- TEST_ARRAYCOPY(jbyte);
- TEST_ARRAYCOPY(jshort);
- TEST_ARRAYCOPY(jint);
- TEST_ARRAYCOPY(jlong);
-
-#undef TEST_ARRAYCOPY
-
-#define TEST_FILL(type) \
- if (_##type##_fill != nullptr) { \
- union { \
- double d; \
- type body[96]; \
- } s; \
- \
- int v = 32; \
- for (int offset = -2; offset <= 2; offset++) { \
- for (int i = 0; i < 96; i++) { \
- s.body[i] = 1; \
- } \
- type* start = s.body + 8 + offset; \
- for (int aligned = 0; aligned < 2; aligned++) { \
- if (aligned) { \
- if (((intptr_t)start) % HeapWordSize == 0) { \
- ((void (*)(type*, int, int))StubRoutines::_arrayof_##type##_fill)(start, v, 80); \
- } else { \
- continue; \
- } \
- } else { \
- ((void (*)(type*, int, int))StubRoutines::_##type##_fill)(start, v, 80); \
- } \
- for (int i = 0; i < 96; i++) { \
- if (i < (8 + offset) || i >= (88 + offset)) { \
- assert(s.body[i] == 1, "what?"); \
- } else { \
- assert(s.body[i] == 32, "what?"); \
- } \
- } \
- } \
- } \
- } \
-
- TEST_FILL(jbyte);
- TEST_FILL(jshort);
- TEST_FILL(jint);
-
-#undef TEST_FILL
-
-#define TEST_COPYRTN(type) \
- test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_##type##s_atomic), sizeof(type)); \
- test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::arrayof_conjoint_##type##s), (int)MAX2(sizeof(HeapWord), sizeof(type)))
-
- // Make sure all the copy runtime routines properly handle zero count
- TEST_COPYRTN(jbyte);
- TEST_COPYRTN(jshort);
- TEST_COPYRTN(jint);
- TEST_COPYRTN(jlong);
-
-#undef TEST_COPYRTN
-
- test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_words), sizeof(HeapWord));
- test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::disjoint_words), sizeof(HeapWord));
- test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::disjoint_words_atomic), sizeof(HeapWord));
- // Aligned to BytesPerLong
- test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_conjoint_words), sizeof(jlong));
- test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_disjoint_words), sizeof(jlong));
-
- MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXWrite));
-
-#endif
}
-
void initial_stubs_init() { StubRoutines::initialize_initial_stubs(); }
void continuation_stubs_init() { StubRoutines::initialize_continuation_stubs(); }
void final_stubs_init() { StubRoutines::initialize_final_stubs(); }
diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp
index fdd94b21883..44c93897852 100644
--- a/src/hotspot/share/runtime/synchronizer.cpp
+++ b/src/hotspot/share/runtime/synchronizer.cpp
@@ -69,45 +69,6 @@
#include "utilities/linkedlist.hpp"
#include "utilities/preserveException.hpp"
-class ObjectMonitorsHashtable::PtrList :
- public LinkedListImpl {};
-
-class CleanupObjectMonitorsHashtable: StackObj {
- public:
- bool do_entry(void*& key, ObjectMonitorsHashtable::PtrList*& list) {
- list->clear(); // clear the LinkListNodes
- delete list; // then delete the LinkedList
- return true;
- }
-};
-
-ObjectMonitorsHashtable::~ObjectMonitorsHashtable() {
- CleanupObjectMonitorsHashtable cleanup;
- _ptrs->unlink(&cleanup); // cleanup the LinkedLists
- delete _ptrs; // then delete the hash table
-}
-
-void ObjectMonitorsHashtable::add_entry(void* key, ObjectMonitor* om) {
- ObjectMonitorsHashtable::PtrList* list = get_entry(key);
- if (list == nullptr) {
- // Create new list and add it to the hash table:
- list = new (mtThread) ObjectMonitorsHashtable::PtrList;
- add_entry(key, list);
- }
- list->add(om); // Add the ObjectMonitor to the list.
- _om_count++;
-}
-
-bool ObjectMonitorsHashtable::has_entry(void* key, ObjectMonitor* om) {
- ObjectMonitorsHashtable::PtrList* list = get_entry(key);
- if (list == nullptr || list->find(om) == nullptr) {
- return false;
- }
- return true;
-}
-
void MonitorList::add(ObjectMonitor* m) {
ObjectMonitor* head;
do {
@@ -129,44 +90,68 @@ size_t MonitorList::max() const {
return Atomic::load(&_max);
}
-// Walk the in-use list and unlink (at most MonitorDeflationMax) deflated
-// ObjectMonitors. Returns the number of unlinked ObjectMonitors.
+// Walk the in-use list and unlink deflated ObjectMonitors.
+// Returns the number of unlinked ObjectMonitors.
size_t MonitorList::unlink_deflated(Thread* current, LogStream* ls,
elapsedTimer* timer_p,
+ size_t deflated_count,
GrowableArray* unlinked_list) {
size_t unlinked_count = 0;
ObjectMonitor* prev = nullptr;
- ObjectMonitor* head = Atomic::load_acquire(&_head);
- ObjectMonitor* m = head;
+ ObjectMonitor* m = Atomic::load_acquire(&_head);
+
// The in-use list head can be null during the final audit.
while (m != nullptr) {
if (m->is_being_async_deflated()) {
- // Find next live ObjectMonitor.
+ // Find next live ObjectMonitor. Batch up the unlinkable monitors, so we can
+ // modify the list once per batch. The batch starts at "m".
+ size_t unlinked_batch = 0;
ObjectMonitor* next = m;
+ // Look for at most MonitorUnlinkBatch monitors, or the number of
+ // deflated and not unlinked monitors, whatever comes first.
+ assert(deflated_count >= unlinked_count, "Sanity: underflow");
+ size_t unlinked_batch_limit = MIN2(deflated_count - unlinked_count, MonitorUnlinkBatch);
do {
ObjectMonitor* next_next = next->next_om();
- unlinked_count++;
+ unlinked_batch++;
unlinked_list->append(next);
next = next_next;
- if (unlinked_count >= (size_t)MonitorDeflationMax) {
- // Reached the max so bail out on the gathering loop.
+ if (unlinked_batch >= unlinked_batch_limit) {
+ // Reached the max batch, so bail out of the gathering loop.
+ break;
+ }
+ if (prev == nullptr && Atomic::load(&_head) != m) {
+ // Current batch used to be at head, but it is not at head anymore.
+ // Bail out and figure out where we currently are. This avoids long
+ // walks searching for new prev during unlink under heavy list inserts.
break;
}
} while (next != nullptr && next->is_being_async_deflated());
+
+ // Unlink the found batch.
if (prev == nullptr) {
- ObjectMonitor* prev_head = Atomic::cmpxchg(&_head, head, next);
- if (prev_head != head) {
- // Find new prev ObjectMonitor that just got inserted.
+ // The current batch is the first batch, so there is a chance that it starts at head.
+ // Optimistically assume no inserts happened, and try to unlink the entire batch from the head.
+ ObjectMonitor* prev_head = Atomic::cmpxchg(&_head, m, next);
+ if (prev_head != m) {
+ // Something must have updated the head. Figure out the actual prev for this batch.
for (ObjectMonitor* n = prev_head; n != m; n = n->next_om()) {
prev = n;
}
+ assert(prev != nullptr, "Should have found the prev for the current batch");
prev->set_next_om(next);
}
} else {
+ // The current batch is preceded by another batch. This guarantees the current batch
+ // does not start at head. Unlink the entire current batch without updating the head.
+ assert(Atomic::load(&_head) != m, "Sanity");
prev->set_next_om(next);
}
- if (unlinked_count >= (size_t)MonitorDeflationMax) {
- // Reached the max so bail out on the searching loop.
+
+ unlinked_count += unlinked_batch;
+ if (unlinked_count >= deflated_count) {
+ // Reached the max so bail out of the searching loop.
+ // There should be no more deflated monitors left.
break;
}
m = next;
@@ -182,6 +167,20 @@ size_t MonitorList::unlink_deflated(Thread* current, LogStream* ls,
ls, timer_p);
}
}
+
+#ifdef ASSERT
+ // Invariant: the code above should unlink all deflated monitors.
+ // The code that runs after this unlinking does not expect deflated monitors.
+ // Notably, attempting to deflate the already deflated monitor would break.
+ {
+ ObjectMonitor* m = Atomic::load_acquire(&_head);
+ while (m != nullptr) {
+ assert(!m->is_being_async_deflated(), "All deflated monitors should be unlinked");
+ m = m->next_om();
+ }
+ }
+#endif
+
Atomic::sub(&_count, unlinked_count);
return unlinked_count;
}
@@ -1105,57 +1104,46 @@ JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_ob
// Visitors ...
-// Iterate ObjectMonitors where the owner == thread; this does NOT include
-// ObjectMonitors where owner is set to a stack-lock address in thread.
-//
-// This version of monitors_iterate() works with the in-use monitor list.
-//
-void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
+// Iterate over all ObjectMonitors.
+template
+void ObjectSynchronizer::monitors_iterate(Function function) {
MonitorList::Iterator iter = _in_use_list.iterator();
while (iter.has_next()) {
- ObjectMonitor* mid = iter.next();
- if (mid->owner() != thread) {
- // Not owned by the target thread and intentionally skips when owner
- // is set to a stack-lock address in the target thread.
- continue;
- }
- if (!mid->is_being_async_deflated() && mid->object_peek() != nullptr) {
- // Only process with closure if the object is set.
-
- // monitors_iterate() is only called at a safepoint or when the
- // target thread is suspended or when the target thread is
- // operating on itself. The current closures in use today are
- // only interested in an owned ObjectMonitor and ownership
- // cannot be dropped under the calling contexts so the
- // ObjectMonitor cannot be async deflated.
- closure->do_monitor(mid);
- }
+ ObjectMonitor* monitor = iter.next();
+ function(monitor);
}
}
-// This version of monitors_iterate() works with the specified linked list.
-//
-void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure,
- ObjectMonitorsHashtable::PtrList* list,
- JavaThread* thread) {
- typedef LinkedListIterator ObjectMonitorIterator;
- ObjectMonitorIterator iter(list->head());
- while (!iter.is_empty()) {
- ObjectMonitor* mid = *iter.next();
- // Owner set to a stack-lock address in thread should never be seen here:
- assert(mid->owner() == thread, "must be");
- if (!mid->is_being_async_deflated() && mid->object_peek() != nullptr) {
- // Only process with closure if the object is set.
-
- // monitors_iterate() is only called at a safepoint or when the
- // target thread is suspended or when the target thread is
- // operating on itself. The current closures in use today are
- // only interested in an owned ObjectMonitor and ownership
- // cannot be dropped under the calling contexts so the
- // ObjectMonitor cannot be async deflated.
- closure->do_monitor(mid);
+// Iterate ObjectMonitors owned by any thread and where the owner `filter`
+// returns true.
+template
+void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
+ monitors_iterate([&](ObjectMonitor* monitor) {
+ // This function is only called at a safepoint or when the
+ // target thread is suspended or when the target thread is
+ // operating on itself. The current closures in use today are
+ // only interested in an owned ObjectMonitor and ownership
+ // cannot be dropped under the calling contexts so the
+ // ObjectMonitor cannot be async deflated.
+ if (monitor->has_owner() && filter(monitor->owner_raw())) {
+ assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
+
+ closure->do_monitor(monitor);
}
- }
+ });
+}
+
+// Iterate ObjectMonitors where the owner == thread; this does NOT include
+// ObjectMonitors where owner is set to a stack-lock address in thread.
+void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
+ auto thread_filter = [&](void* owner) { return owner == thread; };
+ return owned_monitors_iterate_filtered(closure, thread_filter);
+}
+
+// Iterate ObjectMonitors owned by any thread.
+void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
+ auto all_filter = [&](void* owner) { return true; };
+ return owned_monitors_iterate_filtered(closure, all_filter);
}
static bool monitors_used_above_threshold(MonitorList* list) {
@@ -1262,16 +1250,20 @@ bool ObjectSynchronizer::is_async_deflation_needed() {
return false;
}
-bool ObjectSynchronizer::request_deflate_idle_monitors() {
+void ObjectSynchronizer::request_deflate_idle_monitors() {
+ MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
+ set_is_async_deflation_requested(true);
+ ml.notify_all();
+}
+
+bool ObjectSynchronizer::request_deflate_idle_monitors_from_wb() {
JavaThread* current = JavaThread::current();
bool ret_code = false;
jlong last_time = last_async_deflation_time_ns();
- set_is_async_deflation_requested(true);
- {
- MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
- ml.notify_all();
- }
+
+ request_deflate_idle_monitors();
+
const int N_CHECKS = 5;
for (int i = 0; i < N_CHECKS; i++) { // sleep for at most 5 seconds
if (last_async_deflation_time_ns() > last_time) {
@@ -1588,16 +1580,8 @@ void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_n
// Walk the in-use list and deflate (at most MonitorDeflationMax) idle
// ObjectMonitors. Returns the number of deflated ObjectMonitors.
//
-// If table != nullptr, we gather owned ObjectMonitors indexed by the
-// owner in the table. Please note that ObjectMonitors where the owner
-// is set to a stack-lock address are NOT associated with the JavaThread
-// that holds that stack-lock. All of the current consumers of
-// ObjectMonitorsHashtable info only care about JNI locked monitors and
-// those do not have the owner set to a stack-lock address.
-//
size_t ObjectSynchronizer::deflate_monitor_list(Thread* current, LogStream* ls,
- elapsedTimer* timer_p,
- ObjectMonitorsHashtable* table) {
+ elapsedTimer* timer_p) {
MonitorList::Iterator iter = _in_use_list.iterator();
size_t deflated_count = 0;
@@ -1608,18 +1592,6 @@ size_t ObjectSynchronizer::deflate_monitor_list(Thread* current, LogStream* ls,
ObjectMonitor* mid = iter.next();
if (mid->deflate_monitor()) {
deflated_count++;
- } else if (table != nullptr) {
- // The caller is interested in the owned ObjectMonitors. This does
- // not include when owner is set to a stack-lock address in thread.
- // This also does not capture unowned ObjectMonitors that cannot be
- // deflated because of a waiter.
- void* key = mid->owner();
- // Since deflate_idle_monitors() and deflate_monitor_list() can be
- // called more than once, we have to make sure the entry has not
- // already been added.
- if (key != nullptr && !table->has_entry(key, mid)) {
- table->add_entry(key, mid);
- }
}
if (current->is_Java_thread()) {
@@ -1669,9 +1641,8 @@ static size_t delete_monitors(Thread* current, GrowableArray* de
}
// This function is called by the MonitorDeflationThread to deflate
-// ObjectMonitors. It is also called via do_final_audit_and_print_stats()
-// and VM_ThreadDump::doit() by the VMThread.
-size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table) {
+// ObjectMonitors.
+size_t ObjectSynchronizer::deflate_idle_monitors() {
Thread* current = Thread::current();
if (current->is_Java_thread()) {
// The async deflation request has been processed.
@@ -1696,19 +1667,16 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table)
}
// Deflate some idle ObjectMonitors.
- size_t deflated_count = deflate_monitor_list(current, ls, &timer, table);
+ size_t deflated_count = deflate_monitor_list(current, ls, &timer);
size_t unlinked_count = 0;
size_t deleted_count = 0;
- if (deflated_count > 0 || is_final_audit()) {
- // There are ObjectMonitors that have been deflated or this is the
- // final audit and all the remaining ObjectMonitors have been
- // deflated, BUT the MonitorDeflationThread blocked for the final
- // safepoint during unlinking.
+ if (deflated_count > 0) {
+ // There are ObjectMonitors that have been deflated.
// Unlink deflated ObjectMonitors from the in-use list.
ResourceMark rm;
GrowableArray delete_list((int)deflated_count);
- unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer, &delete_list);
+ unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer, deflated_count, &delete_list);
if (current->is_monitor_deflation_thread()) {
if (ls != nullptr) {
timer.stop();
@@ -1755,10 +1723,6 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table)
}
ls->print_cr("end deflating: in_use_list stats: ceiling=" SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
- if (table != nullptr) {
- ls->print_cr("ObjectMonitorsHashtable: key_count=" SIZE_FORMAT ", om_count=" SIZE_FORMAT,
- table->key_count(), table->om_count());
- }
}
OM_PERFDATA_OP(MonExtant, set_value(_in_use_list.count()));
@@ -1811,7 +1775,7 @@ void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
assert(current == JavaThread::current(), "must be current Java thread");
NoSafepointVerifier nsv;
ReleaseJavaMonitorsClosure rjmc(current);
- ObjectSynchronizer::monitors_iterate(&rjmc, current);
+ ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
assert(!current->has_pending_exception(), "Should not be possible");
current->clear_pending_exception();
assert(current->held_monitor_count() == 0, "Should not be possible");
@@ -1865,12 +1829,6 @@ void ObjectSynchronizer::do_final_audit_and_print_stats() {
log_info(monitorinflation)("Starting the final audit.");
if (log_is_enabled(Info, monitorinflation)) {
- // Do deflations in order to reduce the in-use monitor population
- // that is reported by ObjectSynchronizer::log_in_use_monitor_details()
- // which is called by ObjectSynchronizer::audit_and_print_stats().
- while (deflate_idle_monitors(/* ObjectMonitorsHashtable is not needed here */ nullptr) > 0) {
- ; // empty
- }
// The other audit_and_print_stats() call is done at the Debug
// level at a safepoint in SafepointSynchronize::do_cleanup_tasks.
audit_and_print_stats(true /* on_exit */);
@@ -1919,7 +1877,7 @@ void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
// When exiting this log output is at the Info level. When called
// at a safepoint, this log output is at the Trace level since
// there can be a lot of it.
- log_in_use_monitor_details(ls);
+ log_in_use_monitor_details(ls, !on_exit /* log_all */);
}
ls->flush();
@@ -1965,11 +1923,10 @@ void ObjectSynchronizer::chk_in_use_list(outputStream* out, int *error_cnt_p) {
void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
int* error_cnt_p) {
if (n->owner_is_DEFLATER_MARKER()) {
- // This should not happen, but if it does, it is not fatal.
- out->print_cr("WARNING: monitor=" INTPTR_FORMAT ": in-use monitor is "
- "deflated.", p2i(n));
+ // This could happen when monitor deflation blocks for a safepoint.
return;
}
+
if (n->header().value() == 0) {
out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
"have non-null _header field.", p2i(n));
@@ -1999,29 +1956,34 @@ void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
// Log details about ObjectMonitors on the in_use_list. The 'BHL'
// flags indicate why the entry is in-use, 'object' and 'object type'
// indicate the associated object and its type.
-void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out) {
- stringStream ss;
+void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) {
if (_in_use_list.count() > 0) {
+ stringStream ss;
out->print_cr("In-use monitor info:");
out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
out->print_cr("%18s %s %18s %18s",
"monitor", "BHL", "object", "object type");
out->print_cr("================== === ================== ==================");
- MonitorList::Iterator iter = _in_use_list.iterator();
- while (iter.has_next()) {
- ObjectMonitor* mid = iter.next();
- const oop obj = mid->object_peek();
- const markWord mark = mid->header();
- ResourceMark rm;
- out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(mid),
- mid->is_busy(), mark.hash() != 0, mid->owner() != nullptr,
- p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
- if (mid->is_busy()) {
- out->print(" (%s)", mid->is_busy_to_string(&ss));
- ss.reset();
+
+ auto is_interesting = [&](ObjectMonitor* monitor) {
+ return log_all || monitor->has_owner() || monitor->is_busy();
+ };
+
+ monitors_iterate([&](ObjectMonitor* monitor) {
+ if (is_interesting(monitor)) {
+ const oop obj = monitor->object_peek();
+ const markWord mark = monitor->header();
+ ResourceMark rm;
+ out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
+ monitor->is_busy(), mark.hash() != 0, monitor->owner() != nullptr,
+ p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
+ if (monitor->is_busy()) {
+ out->print(" (%s)", monitor->is_busy_to_string(&ss));
+ ss.reset();
+ }
+ out->cr();
}
- out->cr();
- }
+ });
}
out->flush();
diff --git a/src/hotspot/share/runtime/synchronizer.hpp b/src/hotspot/share/runtime/synchronizer.hpp
index 66d08a9d998..4dea13432e7 100644
--- a/src/hotspot/share/runtime/synchronizer.hpp
+++ b/src/hotspot/share/runtime/synchronizer.hpp
@@ -36,55 +36,6 @@ class LogStream;
class ObjectMonitor;
class ThreadsList;
-// Hash table of void* to a list of ObjectMonitor* owned by the JavaThread.
-// The JavaThread's owner key is either a JavaThread* or a stack lock
-// address in the JavaThread so we use "void*".
-//
-class ObjectMonitorsHashtable {
- private:
- static unsigned int ptr_hash(void* const& s1) {
- // 2654435761 = 2^32 * Phi (golden ratio)
- return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
- }
-
- public:
- class PtrList;
-
- private:
- // ResourceHashtable SIZE is specified at compile time so we
- // use 1031 which is the first prime after 1024.
- typedef ResourceHashtable PtrTable;
- PtrTable* _ptrs;
- size_t _key_count;
- size_t _om_count;
-
- public:
- // ResourceHashtable is passed to various functions and populated in
- // different places so we allocate it using C_HEAP to make it immune
- // from any ResourceMarks that happen to be in the code paths.
- ObjectMonitorsHashtable() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {}
-
- ~ObjectMonitorsHashtable();
-
- void add_entry(void* key, ObjectMonitor* om);
-
- void add_entry(void* key, PtrList* list) {
- _ptrs->put(key, list);
- _key_count++;
- }
-
- PtrList* get_entry(void* key) {
- PtrList** listpp = _ptrs->get(key);
- return (listpp == nullptr) ? nullptr : *listpp;
- }
-
- bool has_entry(void* key, ObjectMonitor* om);
-
- size_t key_count() { return _key_count; }
- size_t om_count() { return _om_count; }
-};
-
class MonitorList {
friend class VMStructs;
@@ -96,6 +47,7 @@ class MonitorList {
public:
void add(ObjectMonitor* monitor);
size_t unlink_deflated(Thread* current, LogStream* ls, elapsedTimer* timer_p,
+ size_t deflated_count,
GrowableArray* unlinked_list);
size_t count() const;
size_t max() const;
@@ -172,29 +124,34 @@ class ObjectSynchronizer : AllStatic {
// JNI detach support
static void release_monitors_owned_by_thread(JavaThread* current);
+ // Iterate over all ObjectMonitors.
+ template
+ static void monitors_iterate(Function function);
+
+ // Iterate ObjectMonitors owned by any thread and where the owner `filter`
+ // returns true.
+ template
+ static void owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter);
+
// Iterate ObjectMonitors where the owner == thread; this does NOT include
- // ObjectMonitors where owner is set to a stack lock address in thread:
- //
- // This version of monitors_iterate() works with the in-use monitor list.
- static void monitors_iterate(MonitorClosure* m, JavaThread* thread);
- // This version of monitors_iterate() works with the specified linked list.
- static void monitors_iterate(MonitorClosure* closure,
- ObjectMonitorsHashtable::PtrList* list,
- JavaThread* thread);
+ // ObjectMonitors where owner is set to a stack lock address in thread.
+ static void owned_monitors_iterate(MonitorClosure* m, JavaThread* thread);
+
+ // Iterate ObjectMonitors owned by any thread.
+ static void owned_monitors_iterate(MonitorClosure* closure);
// Initialize the gInflationLocks
static void initialize();
- // GC: we currently use aggressive monitor deflation policy
- // Basically we try to deflate all monitors that are not busy.
- static size_t deflate_idle_monitors(ObjectMonitorsHashtable* table);
+ // We currently use aggressive monitor deflation policy;
+ // basically we try to deflate all monitors that are not busy.
+ static size_t deflate_idle_monitors();
// Deflate idle monitors:
static void chk_for_block_req(JavaThread* current, const char* op_name,
const char* cnt_name, size_t cnt, LogStream* ls,
elapsedTimer* timer_p);
- static size_t deflate_monitor_list(Thread* current, LogStream* ls, elapsedTimer* timer_p,
- ObjectMonitorsHashtable* table);
+ static size_t deflate_monitor_list(Thread* current, LogStream* ls, elapsedTimer* timer_p);
static size_t in_use_list_ceiling();
static void dec_in_use_list_ceiling();
static void inc_in_use_list_ceiling();
@@ -204,7 +161,8 @@ class ObjectSynchronizer : AllStatic {
static bool is_final_audit() { return _is_final_audit; }
static void set_is_final_audit() { _is_final_audit = true; }
static jlong last_async_deflation_time_ns() { return _last_async_deflation_time_ns; }
- static bool request_deflate_idle_monitors(); // for whitebox test support
+ static void request_deflate_idle_monitors();
+ static bool request_deflate_idle_monitors_from_wb(); // for whitebox test support
static void set_is_async_deflation_requested(bool new_value) { _is_async_deflation_requested = new_value; }
static jlong time_since_last_async_deflation_ms();
@@ -214,7 +172,7 @@ class ObjectSynchronizer : AllStatic {
static void chk_in_use_entry(ObjectMonitor* n, outputStream* out,
int* error_cnt_p);
static void do_final_audit_and_print_stats();
- static void log_in_use_monitor_details(outputStream* out);
+ static void log_in_use_monitor_details(outputStream* out, bool log_all);
private:
friend class SynchronizerTest;
@@ -252,4 +210,11 @@ class ObjectLocker : public StackObj {
void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); }
};
+// Interface to visit monitors
+class ObjectMonitorsView {
+public:
+ // Visit monitors that belong to the given thread
+ virtual void visit(MonitorClosure* closure, JavaThread* thread) = 0;
+};
+
#endif // SHARE_RUNTIME_SYNCHRONIZER_HPP
diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp
index 3e121e3bf74..ac435317d05 100644
--- a/src/hotspot/share/runtime/threads.cpp
+++ b/src/hotspot/share/runtime/threads.cpp
@@ -823,10 +823,10 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// Threads::destroy_vm() is normally called from jni_DestroyJavaVM() when
// the program falls off the end of main(). Another VM exit path is through
-// vm_exit() when the program calls System.exit() to return a value or when
-// there is a serious error in VM. The two shutdown paths are not exactly
-// the same, but they share Shutdown.shutdown() at Java level and before_exit()
-// and VM_Exit op at VM level.
+// vm_exit(), when the program calls System.exit() to return a value, or when
+// there is a serious error in VM.
+// These two separate shutdown paths are not exactly the same, but they share
+// Shutdown.shutdown() at Java level and before_exit() and VM_Exit op at VM level.
//
// Shutdown sequence:
// + Shutdown native memory tracking if it is on
@@ -1008,7 +1008,7 @@ void Threads::add(JavaThread* p, bool force_daemon) {
ObjectSynchronizer::inc_in_use_list_ceiling();
// Possible GC point.
- Events::log(p, "Thread added: " INTPTR_FORMAT, p2i(p));
+ Events::log(Thread::current(), "Thread added: " INTPTR_FORMAT, p2i(p));
// Make new thread known to active EscapeBarrier
EscapeBarrier::thread_added(p);
@@ -1071,7 +1071,7 @@ void Threads::remove(JavaThread* p, bool is_daemon) {
ObjectSynchronizer::dec_in_use_list_ceiling();
// Since Events::log uses a lock, we grab it outside the Threads_lock
- Events::log(p, "Thread exited: " INTPTR_FORMAT, p2i(p));
+ Events::log(Thread::current(), "Thread exited: " INTPTR_FORMAT, p2i(p));
}
// Operations on the Threads list for GC. These are not explicitly locked,
@@ -1317,10 +1317,7 @@ void Threads::print_on(outputStream* st, bool print_stacks,
}
PrintOnClosure cl(st);
- cl.do_thread(VMThread::vm_thread());
- Universe::heap()->gc_threads_do(&cl);
- cl.do_thread(WatcherThread::watcher_thread());
- cl.do_thread(AsyncLogWriter::instance());
+ non_java_threads_do(&cl);
st->flush();
}
diff --git a/src/hotspot/share/runtime/vmOperations.cpp b/src/hotspot/share/runtime/vmOperations.cpp
index fefb96acc7a..b5651940346 100644
--- a/src/hotspot/share/runtime/vmOperations.cpp
+++ b/src/hotspot/share/runtime/vmOperations.cpp
@@ -43,12 +43,14 @@
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/jniHandles.hpp"
+#include "runtime/objectMonitor.inline.hpp"
#include "runtime/stackFrameStream.inline.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/threads.hpp"
#include "runtime/threadSMR.inline.hpp"
#include "runtime/vmOperations.hpp"
#include "services/threadService.hpp"
+#include "utilities/ticks.hpp"
#define VM_OP_NAME_INITIALIZE(name) #name,
@@ -228,7 +230,6 @@ VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
_result = result;
_num_threads = 0; // 0 indicates all threads
_threads = nullptr;
- _result = result;
_max_depth = max_depth;
_with_locked_monitors = with_locked_monitors;
_with_locked_synchronizers = with_locked_synchronizers;
@@ -243,7 +244,6 @@ VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
_result = result;
_num_threads = num_threads;
_threads = threads;
- _result = result;
_max_depth = max_depth;
_with_locked_monitors = with_locked_monitors;
_with_locked_synchronizers = with_locked_synchronizers;
@@ -265,6 +265,111 @@ void VM_ThreadDump::doit_epilogue() {
}
}
+// Hash table of void* to a list of ObjectMonitor* owned by the JavaThread.
+// The JavaThread's owner key is either a JavaThread* or a stack lock
+// address in the JavaThread so we use "void*".
+//
+class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
+ private:
+ static unsigned int ptr_hash(void* const& s1) {
+ // 2654435761 = 2^32 * Phi (golden ratio)
+ return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
+ }
+
+ private:
+ class ObjectMonitorLinkedList :
+ public LinkedListImpl {};
+
+ // ResourceHashtable SIZE is specified at compile time so we
+ // use 1031 which is the first prime after 1024.
+ typedef ResourceHashtable PtrTable;
+ PtrTable* _ptrs;
+ size_t _key_count;
+ size_t _om_count;
+
+ void add_list(void* key, ObjectMonitorLinkedList* list) {
+ _ptrs->put(key, list);
+ _key_count++;
+ }
+
+ ObjectMonitorLinkedList* get_list(void* key) {
+ ObjectMonitorLinkedList** listpp = _ptrs->get(key);
+ return (listpp == nullptr) ? nullptr : *listpp;
+ }
+
+ void add(ObjectMonitor* monitor) {
+ void* key = monitor->owner();
+
+ ObjectMonitorLinkedList* list = get_list(key);
+ if (list == nullptr) {
+ // Create new list and add it to the hash table:
+ list = new (mtThread) ObjectMonitorLinkedList;
+ _ptrs->put(key, list);
+ _key_count++;
+ }
+
+ assert(list->find(monitor) == nullptr, "Should not contain duplicates");
+ list->add(monitor); // Add the ObjectMonitor to the list.
+ _om_count++;
+ }
+
+ public:
+ // ResourceHashtable is passed to various functions and populated in
+ // different places so we allocate it using C_HEAP to make it immune
+ // from any ResourceMarks that happen to be in the code paths.
+ ObjectMonitorsDump() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {}
+
+ ~ObjectMonitorsDump() {
+ class CleanupObjectMonitorsDump: StackObj {
+ public:
+ bool do_entry(void*& key, ObjectMonitorLinkedList*& list) {
+ list->clear(); // clear the LinkListNodes
+ delete list; // then delete the LinkedList
+ return true;
+ }
+ } cleanup;
+
+ _ptrs->unlink(&cleanup); // cleanup the LinkedLists
+ delete _ptrs; // then delete the hash table
+ }
+
+ // Implements MonitorClosure used to collect all owned monitors in the system
+ void do_monitor(ObjectMonitor* monitor) override {
+ assert(monitor->has_owner(), "Expects only owned monitors");
+
+ if (monitor->is_owner_anonymous()) {
+ // There's no need to collect anonymous owned monitors
+ // because the caller of this code is only interested
+ // in JNI owned monitors.
+ return;
+ }
+
+ if (monitor->object_peek() == nullptr) {
+ // JNI code doesn't necessarily keep the monitor object
+ // alive. Filter out monitors with dead objects.
+ return;
+ }
+
+ add(monitor);
+ }
+
+ // Implements the ObjectMonitorsView interface
+ void visit(MonitorClosure* closure, JavaThread* thread) override {
+ ObjectMonitorLinkedList* list = get_list(thread);
+ LinkedListIterator iter(list != nullptr ? list->head() : nullptr);
+ while (!iter.is_empty()) {
+ ObjectMonitor* monitor = *iter.next();
+ closure->do_monitor(monitor);
+ }
+ }
+
+ size_t key_count() { return _key_count; }
+ size_t om_count() { return _om_count; }
+};
+
void VM_ThreadDump::doit() {
ResourceMark rm;
@@ -279,16 +384,20 @@ void VM_ThreadDump::doit() {
concurrent_locks.dump_at_safepoint();
}
- ObjectMonitorsHashtable table;
- ObjectMonitorsHashtable* tablep = nullptr;
+ ObjectMonitorsDump object_monitors;
if (_with_locked_monitors) {
- // The caller wants locked monitor information and that's expensive to gather
- // when there are a lot of inflated monitors. So we deflate idle monitors and
- // gather information about owned monitors at the same time.
- tablep = &table;
- while (ObjectSynchronizer::deflate_idle_monitors(tablep) > 0) {
- ; /* empty */
- }
+ // Gather information about owned monitors.
+ ObjectSynchronizer::owned_monitors_iterate(&object_monitors);
+
+ // If there are many object monitors in the system then the above iteration
+ // can start to take time. Be friendly to following thread dumps by telling
+ // the MonitorDeflationThread to deflate monitors.
+ //
+ // This is trying to be somewhat backwards compatible with the previous
+ // implementation, which performed monitor deflation right here. We might
+ // want to reconsider the need to trigger monitor deflation from the thread
+ // dumping and instead maybe tweak the deflation heuristics.
+ ObjectSynchronizer::request_deflate_idle_monitors();
}
if (_num_threads == 0) {
@@ -305,7 +414,7 @@ void VM_ThreadDump::doit() {
if (_with_locked_synchronizers) {
tcl = concurrent_locks.thread_concurrent_locks(jt);
}
- snapshot_thread(jt, tcl, tablep);
+ snapshot_thread(jt, tcl, &object_monitors);
}
} else {
// Snapshot threads in the given _threads array
@@ -340,15 +449,15 @@ void VM_ThreadDump::doit() {
if (_with_locked_synchronizers) {
tcl = concurrent_locks.thread_concurrent_locks(jt);
}
- snapshot_thread(jt, tcl, tablep);
+ snapshot_thread(jt, tcl, &object_monitors);
}
}
}
void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
- ObjectMonitorsHashtable* table) {
+ ObjectMonitorsView* monitors) {
ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread);
- snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, table, false);
+ snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, monitors, false);
snapshot->set_concurrent_locks(tcl);
}
diff --git a/src/hotspot/share/runtime/vmOperations.hpp b/src/hotspot/share/runtime/vmOperations.hpp
index 152d72a0400..8778fe29ade 100644
--- a/src/hotspot/share/runtime/vmOperations.hpp
+++ b/src/hotspot/share/runtime/vmOperations.hpp
@@ -30,7 +30,7 @@
#include "runtime/vmOperation.hpp"
#include "runtime/threadSMR.hpp"
-class ObjectMonitorsHashtable;
+class ObjectMonitorsView;
// A hodge podge of commonly used VM Operations
@@ -204,7 +204,7 @@ class VM_ThreadDump : public VM_Operation {
bool _with_locked_synchronizers;
void snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
- ObjectMonitorsHashtable* table);
+ ObjectMonitorsView* monitors);
public:
VM_ThreadDump(ThreadDumpResult* result,
diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp
index f7302bea5f7..80f7ec0d827 100644
--- a/src/hotspot/share/runtime/vmStructs.cpp
+++ b/src/hotspot/share/runtime/vmStructs.cpp
@@ -791,7 +791,6 @@
/************/ \
\
nonstatic_field(ciEnv, _compiler_data, void*) \
- nonstatic_field(ciEnv, _failure_reason, const char*) \
nonstatic_field(ciEnv, _factory, ciObjectFactory*) \
nonstatic_field(ciEnv, _dependencies, Dependencies*) \
nonstatic_field(ciEnv, _task, CompileTask*) \
diff --git a/src/hotspot/share/services/heapDumper.cpp b/src/hotspot/share/services/heapDumper.cpp
index 74786534069..0f7c780035e 100644
--- a/src/hotspot/share/services/heapDumper.cpp
+++ b/src/hotspot/share/services/heapDumper.cpp
@@ -883,8 +883,10 @@ class ParDumpWriter : public AbstractDumpWriter {
Monitor* ParDumpWriter::_lock = nullptr;
-// Support class with a collection of functions used when dumping the heap
+class DumperClassCacheTable;
+class DumperClassCacheTableEntry;
+// Support class with a collection of functions used when dumping the heap
class DumperSupport : AllStatic {
public:
@@ -899,7 +901,7 @@ class DumperSupport : AllStatic {
static u4 sig2size(Symbol* sig);
// returns the size of the instance of the given class
- static u4 instance_size(Klass* k);
+ static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
// dump a jfloat
static void dump_float(AbstractDumpWriter* writer, jfloat f);
@@ -912,13 +914,13 @@ class DumperSupport : AllStatic {
// dumps static fields of the given class
static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
// dump the raw values of the instance fields of the given object
- static void dump_instance_fields(AbstractDumpWriter* writer, oop o);
+ static void dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry);
// get the count of the instance fields for a given class
static u2 get_instance_fields_count(InstanceKlass* ik);
// dumps the definition of the instance fields for a given class
static void dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k);
// creates HPROF_GC_INSTANCE_DUMP record for the given object
- static void dump_instance(AbstractDumpWriter* writer, oop o);
+ static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
// creates HPROF_GC_CLASS_DUMP record for the given instance class
static void dump_instance_class(AbstractDumpWriter* writer, Klass* k);
// creates HPROF_GC_CLASS_DUMP record for a given array class
@@ -937,15 +939,130 @@ class DumperSupport : AllStatic {
// fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
static void end_of_dump(AbstractDumpWriter* writer);
- static oop mask_dormant_archived_object(oop o) {
- if (o != nullptr && o->klass()->java_mirror() == nullptr) {
+ static oop mask_dormant_archived_object(oop o, oop ref_obj) {
+ if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
// Ignore this object since the corresponding java mirror is not loaded.
// Might be a dormant archive object.
+ report_dormant_archived_object(o, ref_obj);
return nullptr;
} else {
return o;
}
}
+
+ static void report_dormant_archived_object(oop o, oop ref_obj) {
+ if (log_is_enabled(Trace, cds, heap)) {
+ ResourceMark rm;
+ if (ref_obj != nullptr) {
+ log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
+ p2i(o), o->klass()->external_name(),
+ p2i(ref_obj), ref_obj->klass()->external_name());
+ } else {
+ log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
+ p2i(o), o->klass()->external_name());
+ }
+ }
+ }
+};
+
+// Hash table of klasses to the klass metadata. This should greatly improve the
+// hash dumping performance. This hash table is supposed to be used by a single
+// thread only.
+//
+class DumperClassCacheTableEntry : public CHeapObj {
+ friend class DumperClassCacheTable;
+private:
+ GrowableArray _sigs_start;
+ GrowableArray _offsets;
+ u4 _instance_size;
+ int _entries;
+
+public:
+ DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
+
+ int field_count() { return _entries; }
+ char sig_start(int field_idx) { return _sigs_start.at(field_idx); }
+ int offset(int field_idx) { return _offsets.at(field_idx); }
+ u4 instance_size() { return _instance_size; }
+};
+
+class DumperClassCacheTable {
+private:
+ // ResourceHashtable SIZE is specified at compile time so we
+ // use 1031 which is the first prime after 1024.
+ static constexpr size_t TABLE_SIZE = 1031;
+
+ // Maintain the cache for N classes. This limits memory footprint
+ // impact, regardless of how many classes we have in the dump.
+ // This also improves look up performance by keeping the statically
+ // sized table from overloading.
+ static constexpr int CACHE_TOP = 256;
+
+ typedef ResourceHashtable PtrTable;
+ PtrTable* _ptrs;
+
+ // Single-slot cache to handle the major case of objects of the same
+ // class back-to-back, e.g. from T[].
+ InstanceKlass* _last_ik;
+ DumperClassCacheTableEntry* _last_entry;
+
+ void unlink_all(PtrTable* table) {
+ class CleanupEntry: StackObj {
+ public:
+ bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
+ delete entry;
+ return true;
+ }
+ } cleanup;
+ table->unlink(&cleanup);
+ }
+
+public:
+ DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
+ if (_last_ik == ik) {
+ return _last_entry;
+ }
+
+ DumperClassCacheTableEntry* entry;
+ DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
+ if (from_cache == nullptr) {
+ entry = new DumperClassCacheTableEntry();
+ for (HierarchicalFieldStream fld(ik); !fld.done(); fld.next()) {
+ if (!fld.access_flags().is_static()) {
+ Symbol* sig = fld.signature();
+ entry->_sigs_start.push(sig->char_at(0));
+ entry->_offsets.push(fld.offset());
+ entry->_entries++;
+ entry->_instance_size += DumperSupport::sig2size(sig);
+ }
+ }
+
+ if (_ptrs->number_of_entries() >= CACHE_TOP) {
+ // We do not track the individual hit rates for table entries.
+ // Purge the entire table, and let the cache catch up with new
+ // distribution.
+ unlink_all(_ptrs);
+ }
+
+ _ptrs->put(ik, entry);
+ } else {
+ entry = *from_cache;
+ }
+
+ // Remember for single-slot cache.
+ _last_ik = ik;
+ _last_entry = entry;
+
+ return entry;
+ }
+
+ DumperClassCacheTable() : _ptrs(new (mtServiceability) PtrTable), _last_ik(nullptr), _last_entry(nullptr) {}
+
+ ~DumperClassCacheTable() {
+ unlink_all(_ptrs);
+ delete _ptrs;
+ }
};
// write a header of the given type
@@ -1033,13 +1150,7 @@ void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop
case JVM_SIGNATURE_CLASS :
case JVM_SIGNATURE_ARRAY : {
oop o = obj->obj_field_access(offset);
- if (o != nullptr && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == nullptr) {
- ResourceMark rm;
- log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
- p2i(o), o->klass()->external_name(),
- p2i(obj), obj->klass()->external_name());
- }
- o = mask_dormant_archived_object(o);
+ o = mask_dormant_archived_object(o, obj);
assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
writer->write_objectID(o);
break;
@@ -1092,16 +1203,18 @@ void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop
}
// returns the size of the instance of the given class
-u4 DumperSupport::instance_size(Klass* k) {
- InstanceKlass* ik = InstanceKlass::cast(k);
- u4 size = 0;
-
- for (HierarchicalFieldStream fld(ik); !fld.done(); fld.next()) {
- if (!fld.access_flags().is_static()) {
- size += sig2size(fld.signature());
+u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
+ if (class_cache_entry != nullptr) {
+ return class_cache_entry->instance_size();
+ } else {
+ u4 size = 0;
+ for (HierarchicalFieldStream fld(ik); !fld.done(); fld.next()) {
+ if (!fld.access_flags().is_static()) {
+ size += sig2size(fld.signature());
+ }
}
+ return size;
}
- return size;
}
u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
@@ -1173,14 +1286,10 @@ void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
}
// dump the raw values of the instance fields of the given object
-void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o) {
- InstanceKlass* ik = InstanceKlass::cast(o->klass());
-
- for (HierarchicalFieldStream fld(ik); !fld.done(); fld.next()) {
- if (!fld.access_flags().is_static()) {
- Symbol* sig = fld.signature();
- dump_field_value(writer, sig->char_at(0), o, fld.offset());
- }
+void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry) {
+ assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
+ for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
+ dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));
}
}
@@ -1211,9 +1320,12 @@ void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer,
}
// creates HPROF_GC_INSTANCE_DUMP record for the given object
-void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o) {
+void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
InstanceKlass* ik = InstanceKlass::cast(o->klass());
- u4 is = instance_size(ik);
+
+ DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
+
+ u4 is = instance_size(ik, cache_entry);
u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
@@ -1227,7 +1339,7 @@ void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o) {
writer->write_u4(is);
// field values
- dump_instance_fields(writer, o);
+ dump_instance_fields(writer, o, cache_entry);
writer->end_sub_record();
}
@@ -1370,13 +1482,7 @@ void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop ar
// [id]* elements
for (int index = 0; index < length; index++) {
oop o = array->obj_at(index);
- if (o != nullptr && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == nullptr) {
- ResourceMark rm;
- log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
- p2i(o), o->klass()->external_name(),
- p2i(array), array->klass()->external_name());
- }
- o = mask_dormant_archived_object(o);
+ o = mask_dormant_archived_object(o, array);
writer->write_objectID(o);
}
@@ -1690,6 +1796,9 @@ class HeapObjectDumper : public ObjectClosure {
AbstractDumpWriter* writer() { return _writer; }
bool is_large(oop o);
+
+ DumperClassCacheTable _class_cache;
+
public:
HeapObjectDumper(AbstractDumpWriter* writer, HeapDumpLargeObjectList* list = nullptr) {
_writer = writer;
@@ -1708,8 +1817,7 @@ void HeapObjectDumper::do_object(oop o) {
}
}
- if (DumperSupport::mask_dormant_archived_object(o) == nullptr) {
- log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)", p2i(o), o->klass()->external_name());
+ if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
return;
}
@@ -1722,7 +1830,7 @@ void HeapObjectDumper::do_object(oop o) {
if (o->is_instance()) {
// create a HPROF_GC_INSTANCE record for each object
- DumperSupport::dump_instance(writer(), o);
+ DumperSupport::dump_instance(writer(), o, &_class_cache);
} else if (o->is_objArray()) {
// create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
DumperSupport::dump_object_array(writer(), objArrayOop(o));
@@ -2278,6 +2386,7 @@ void VM_HeapDumper::work(uint worker_id) {
// The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
// of the heap dump.
if (_num_dumper_threads <= 1) {
+ ResourceMark rm;
HeapObjectDumper obj_dumper(writer());
Universe::heap()->object_iterate(&obj_dumper);
} else {
@@ -2294,6 +2403,7 @@ void VM_HeapDumper::work(uint worker_id) {
{
ParDumpWriter pw(writer());
{
+ ResourceMark rm;
HeapObjectDumper obj_dumper(&pw, _large_object_list);
_poi->object_iterate(&obj_dumper, worker_id);
}
@@ -2314,6 +2424,7 @@ void VM_HeapDumper::work(uint worker_id) {
assert(get_worker_type(worker_id) == VMDumperType, "Heap dumper must be VMDumper");
// Use writer() rather than ParDumpWriter to avoid memory consumption.
+ ResourceMark rm;
HeapObjectDumper obj_dumper(writer());
dump_large_objects(&obj_dumper);
// Writes the HPROF_HEAP_DUMP_END record.
diff --git a/src/hotspot/share/services/mallocTracker.cpp b/src/hotspot/share/services/mallocTracker.cpp
index 5dab3837707..064002c707c 100644
--- a/src/hotspot/share/services/mallocTracker.cpp
+++ b/src/hotspot/share/services/mallocTracker.cpp
@@ -40,10 +40,11 @@
#include "services/mallocTracker.hpp"
#include "services/memTracker.hpp"
#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
#include "utilities/vmError.hpp"
-size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
+MallocMemorySnapshot MallocMemorySummary::_snapshot;
void MemoryCounter::update_peak(size_t size, size_t cnt) {
size_t peak_sz = peak_size();
@@ -78,9 +79,7 @@ void MallocMemorySnapshot::make_adjustment() {
}
void MallocMemorySummary::initialize() {
- assert(sizeof(_snapshot) >= sizeof(MallocMemorySnapshot), "Sanity Check");
// Uses placement new operator to initialize static area.
- ::new ((void*)_snapshot)MallocMemorySnapshot();
MallocLimitHandler::initialize(MallocLimit);
}
@@ -198,6 +197,8 @@ void MallocTracker::deaccount(MallocHeader::FreeInfo free_info) {
bool MallocTracker::print_pointer_information(const void* p, outputStream* st) {
assert(MemTracker::enabled(), "NMT not enabled");
+#if !INCLUDE_ASAN
+
address addr = (address)p;
// Carefully feel your way upwards and try to find a malloc header. Then check if
@@ -275,5 +276,8 @@ bool MallocTracker::print_pointer_information(const void* p, outputStream* st) {
}
return true;
}
+
+#endif // !INCLUDE_ASAN
+
return false;
}
diff --git a/src/hotspot/share/services/mallocTracker.hpp b/src/hotspot/share/services/mallocTracker.hpp
index ed66f643c74..d6ea25fa9d6 100644
--- a/src/hotspot/share/services/mallocTracker.hpp
+++ b/src/hotspot/share/services/mallocTracker.hpp
@@ -140,7 +140,7 @@ class MallocMemorySummary;
// A snapshot of malloc'd memory, includes malloc memory
// usage by types and memory used by tracking itself.
-class MallocMemorySnapshot : public ResourceObj {
+class MallocMemorySnapshot {
friend class MallocMemorySummary;
private:
@@ -198,7 +198,7 @@ class MallocMemorySnapshot : public ResourceObj {
class MallocMemorySummary : AllStatic {
private:
// Reserve memory for placement of MallocMemorySnapshot object
- static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
+ static MallocMemorySnapshot _snapshot;
static bool _have_limits;
// Called when a total limit break was detected.
@@ -245,7 +245,7 @@ class MallocMemorySummary : AllStatic {
}
static MallocMemorySnapshot* as_snapshot() {
- return (MallocMemorySnapshot*)_snapshot;
+ return &_snapshot;
}
// MallocLimit: returns true if allocating s bytes on f would trigger
diff --git a/src/hotspot/share/services/memReporter.cpp b/src/hotspot/share/services/memReporter.cpp
index 717699d84ea..1fa0438e30e 100644
--- a/src/hotspot/share/services/memReporter.cpp
+++ b/src/hotspot/share/services/memReporter.cpp
@@ -50,10 +50,13 @@ size_t MemReporterBase::committed_total(const MallocMemory* malloc, const Virtua
return malloc->malloc_size() + malloc->arena_size() + vm->committed();
}
-void MemReporterBase::print_total(size_t reserved, size_t committed) const {
+void MemReporterBase::print_total(size_t reserved, size_t committed, size_t peak) const {
const char* scale = current_scale();
output()->print("reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s",
amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
+ if (peak != 0) {
+ output()->print(", peak=" SIZE_FORMAT "%s", amount_in_current_scale(peak), scale);
+ }
}
void MemReporterBase::print_malloc(const MemoryCounter* c, MEMFLAGS flag) const {
@@ -89,10 +92,16 @@ void MemReporterBase::print_malloc(const MemoryCounter* c, MEMFLAGS flag) const
}
}
-void MemReporterBase::print_virtual_memory(size_t reserved, size_t committed) const {
+void MemReporterBase::print_virtual_memory(size_t reserved, size_t committed, size_t peak) const {
+ outputStream* out = output();
const char* scale = current_scale();
- output()->print("(mmap: reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s)",
+ out->print("(mmap: reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s, ",
amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
+ if (peak == committed) {
+ out->print_raw("at peak)");
+ } else {
+ out->print("peak=" SIZE_FORMAT "%s)", amount_in_current_scale(peak), scale);
+ }
}
void MemReporterBase::print_malloc_line(const MemoryCounter* c) const {
@@ -101,9 +110,9 @@ void MemReporterBase::print_malloc_line(const MemoryCounter* c) const {
output()->print_cr(" ");
}
-void MemReporterBase::print_virtual_memory_line(size_t reserved, size_t committed) const {
+void MemReporterBase::print_virtual_memory_line(size_t reserved, size_t committed, size_t peak) const {
output()->print("%28s", " ");
- print_virtual_memory(reserved, committed);
+ print_virtual_memory(reserved, committed, peak);
output()->print_cr(" ");
}
@@ -201,73 +210,79 @@ void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
committed_amount += _malloc_snapshot->malloc_overhead();
}
- if (amount_in_current_scale(reserved_amount) > 0) {
- outputStream* out = output();
- const char* scale = current_scale();
- out->print("-%26s (", NMTUtil::flag_to_name(flag));
- print_total(reserved_amount, committed_amount);
+ // Omit printing if the current reserved value as well as all historical peaks (malloc, mmap committed, arena)
+ // fall below scale threshold
+ const size_t pk_vm = virtual_memory->peak_size();
+ const size_t pk_malloc = malloc_memory->malloc_peak_size();
+ const size_t pk_arena = malloc_memory->arena_peak_size();
+
+ if (amount_in_current_scale(MAX4(reserved_amount, pk_vm, pk_malloc, pk_arena)) == 0) {
+ return;
+ }
+
+ outputStream* out = output();
+ const char* scale = current_scale();
+ out->print("-%26s (", NMTUtil::flag_to_name(flag));
+ print_total(reserved_amount, committed_amount);
#if INCLUDE_CDS
- if (flag == mtClassShared) {
- size_t read_only_bytes = FileMapInfo::readonly_total();
- output()->print(", readonly=" SIZE_FORMAT "%s",
- amount_in_current_scale(read_only_bytes), scale);
- }
+ if (flag == mtClassShared) {
+ size_t read_only_bytes = FileMapInfo::readonly_total();
+ output()->print(", readonly=" SIZE_FORMAT "%s",
+ amount_in_current_scale(read_only_bytes), scale);
+ }
#endif
- out->print_cr(")");
+ out->print_cr(")");
- if (flag == mtClass) {
- // report class count
- out->print_cr("%27s (classes #" SIZE_FORMAT ")",
- " ", (_instance_class_count + _array_class_count));
- out->print_cr("%27s ( instance classes #" SIZE_FORMAT ", array classes #" SIZE_FORMAT ")",
- " ", _instance_class_count, _array_class_count);
- } else if (flag == mtThread) {
- if (ThreadStackTracker::track_as_vm()) {
- const VirtualMemory* thread_stack_usage =
- _vm_snapshot->by_type(mtThreadStack);
- // report thread count
- out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", ThreadStackTracker::thread_count());
- out->print("%27s (stack: ", " ");
- print_total(thread_stack_usage->reserved(), thread_stack_usage->committed());
- } else {
- MallocMemory* thread_stack_memory = _malloc_snapshot->by_type(mtThreadStack);
- const char* scale = current_scale();
- // report thread count
- out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", thread_stack_memory->malloc_count());
- out->print("%27s (Stack: " SIZE_FORMAT "%s", " ",
- amount_in_current_scale(thread_stack_memory->malloc_size()), scale);
- }
- out->print_cr(")");
+ if (flag == mtClass) {
+ // report class count
+ out->print_cr("%27s (classes #" SIZE_FORMAT ")",
+ " ", (_instance_class_count + _array_class_count));
+ out->print_cr("%27s ( instance classes #" SIZE_FORMAT ", array classes #" SIZE_FORMAT ")",
+ " ", _instance_class_count, _array_class_count);
+ } else if (flag == mtThread) {
+ if (ThreadStackTracker::track_as_vm()) {
+ const VirtualMemory* thread_stack_usage =
+ _vm_snapshot->by_type(mtThreadStack);
+ // report thread count
+ out->print_cr("%27s (threads #" SIZE_FORMAT ")", " ", ThreadStackTracker::thread_count());
+ out->print("%27s (stack: ", " ");
+ print_total(thread_stack_usage->reserved(), thread_stack_usage->committed(), thread_stack_usage->peak_size());
+ } else {
+ MallocMemory* thread_stack_memory = _malloc_snapshot->by_type(mtThreadStack);
+ const char* scale = current_scale();
+ // report thread count
+ out->print_cr("%27s (threads #" SIZE_FORMAT ")", " ", thread_stack_memory->malloc_count());
+ out->print("%27s (Stack: " SIZE_FORMAT "%s", " ",
+ amount_in_current_scale(thread_stack_memory->malloc_size()), scale);
}
+ out->print_cr(")");
+ }
- // report malloc'd memory
- if (amount_in_current_scale(malloc_memory->malloc_size()) > 0
- || amount_in_current_scale(malloc_memory->malloc_peak_size()) > 0) {
- print_malloc_line(malloc_memory->malloc_counter());
- }
+ // report malloc'd memory
+ if (amount_in_current_scale(MAX2(malloc_memory->malloc_size(), pk_malloc)) > 0) {
+ print_malloc_line(malloc_memory->malloc_counter());
+ }
- if (amount_in_current_scale(virtual_memory->reserved()) > 0) {
- print_virtual_memory_line(virtual_memory->reserved(), virtual_memory->committed());
- }
+ if (amount_in_current_scale(MAX2(virtual_memory->reserved(), pk_vm)) > 0) {
+ print_virtual_memory_line(virtual_memory->reserved(), virtual_memory->committed(), virtual_memory->peak_size());
+ }
- if (amount_in_current_scale(malloc_memory->arena_size()) > 0
- DEBUG_ONLY(|| amount_in_current_scale(malloc_memory->arena_peak_size()) > 0)) {
- print_arena_line(malloc_memory->arena_counter());
- }
+ if (amount_in_current_scale(MAX2(malloc_memory->arena_size(), pk_arena)) > 0) {
+ print_arena_line(malloc_memory->arena_counter());
+ }
- if (flag == mtNMT &&
- amount_in_current_scale(_malloc_snapshot->malloc_overhead()) > 0) {
- out->print_cr("%27s (tracking overhead=" SIZE_FORMAT "%s)", " ",
- amount_in_current_scale(_malloc_snapshot->malloc_overhead()), scale);
- } else if (flag == mtClass) {
- // Metadata information
- report_metadata(Metaspace::NonClassType);
- if (Metaspace::using_class_space()) {
- report_metadata(Metaspace::ClassType);
- }
+ if (flag == mtNMT &&
+ amount_in_current_scale(_malloc_snapshot->malloc_overhead()) > 0) {
+ out->print_cr("%27s (tracking overhead=" SIZE_FORMAT "%s)", " ",
+ amount_in_current_scale(_malloc_snapshot->malloc_overhead()), scale);
+ } else if (flag == mtClass) {
+ // Metadata information
+ report_metadata(Metaspace::NonClassType);
+ if (Metaspace::using_class_space()) {
+ report_metadata(Metaspace::ClassType);
}
- out->print_cr(" ");
}
+ out->print_cr(" ");
}
void MemSummaryReporter::report_metadata(Metaspace::MetadataType type) const {
@@ -317,9 +332,8 @@ int MemDetailReporter::report_malloc_sites() {
const MallocSite* malloc_site;
int num_omitted = 0;
while ((malloc_site = malloc_itr.next()) != nullptr) {
- // Don't report if site has never allocated less than one unit of whatever our scale is
- if (scale() > 1 && amount_in_current_scale(malloc_site->size()) == 0
- DEBUG_ONLY(&& amount_in_current_scale(malloc_site->peak_size()) == 0)) {
+ // Omit printing if the current value and the historic peak value both fall below the reporting scale threshold
+ if (amount_in_current_scale(MAX2(malloc_site->size(), malloc_site->peak_size())) == 0) {
num_omitted ++;
continue;
}
@@ -349,8 +363,10 @@ int MemDetailReporter::report_virtual_memory_allocation_sites() {
if (virtual_memory_site->reserved() == 0) {
continue;
}
- // Don't report if site has reserved less than one unit of whatever our scale is
- if (scale() > 1 && amount_in_current_scale(virtual_memory_site->reserved()) == 0) {
+ // Omit printing if the current value and the historic peak value both fall below the
+ // reporting scale threshold
+ if (amount_in_current_scale(MAX2(virtual_memory_site->reserved(),
+ virtual_memory_site->peak_size())) == 0) {
num_omitted++;
continue;
}
@@ -382,7 +398,16 @@ void MemDetailReporter::report_virtual_memory_map() {
void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) {
assert(reserved_rgn != nullptr, "null pointer");
- // Don't report if size is too small
+ // We don't bother about reporting peaks here.
+ // That is because peaks - in the context of virtual memory, peak of committed areas - make little sense
+ // when we report *by region*, which are identified by their location in memory. There is a philosophical
+ // question about identity here: e.g. a committed region that has been split into three regions by
+ // uncommitting a middle section of it, should that still count as "having peaked" before the split? If
+ // yes, which of the three new regions would be the spiritual successor? Rather than introducing more
+ // complexity, we avoid printing peaks altogether. Note that peaks should still be printed when reporting
+ // usage *by callsite*.
+
+ // Don't report if size is too small.
if (amount_in_current_scale(reserved_rgn->size()) == 0) return;
outputStream* out = output();
diff --git a/src/hotspot/share/services/memReporter.hpp b/src/hotspot/share/services/memReporter.hpp
index affc97098dc..b9e31d4bc4b 100644
--- a/src/hotspot/share/services/memReporter.hpp
+++ b/src/hotspot/share/services/memReporter.hpp
@@ -107,12 +107,12 @@ class MemReporterBase : public StackObj {
}
// Print summary total, malloc and virtual memory
- void print_total(size_t reserved, size_t committed) const;
+ void print_total(size_t reserved, size_t committed, size_t peak = 0) const;
void print_malloc(const MemoryCounter* c, MEMFLAGS flag = mtNone) const;
- void print_virtual_memory(size_t reserved, size_t committed) const;
+ void print_virtual_memory(size_t reserved, size_t committed, size_t peak) const;
void print_malloc_line(const MemoryCounter* c) const;
- void print_virtual_memory_line(size_t reserved, size_t committed) const;
+ void print_virtual_memory_line(size_t reserved, size_t committed, size_t peak) const;
void print_arena_line(const MemoryCounter* c) const;
void print_virtual_memory_region(const char* type, address base, size_t size) const;
diff --git a/src/hotspot/share/services/nmtCommon.hpp b/src/hotspot/share/services/nmtCommon.hpp
index 71a48f656d0..590838b8a69 100644
--- a/src/hotspot/share/services/nmtCommon.hpp
+++ b/src/hotspot/share/services/nmtCommon.hpp
@@ -31,8 +31,6 @@
#include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp"
-#define CALC_OBJ_SIZE_IN_TYPE(obj, type) (align_up(sizeof(obj), sizeof(type))/sizeof(type))
-
// Native memory tracking level
//
// The meaning of the different states:
diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp
index bae98f0aadf..6865982aa8e 100644
--- a/src/hotspot/share/services/threadService.cpp
+++ b/src/hotspot/share/services/threadService.cpp
@@ -45,6 +45,7 @@
#include "runtime/init.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/objectMonitor.inline.hpp"
+#include "runtime/synchronizer.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threads.hpp"
#include "runtime/threadSMR.inline.hpp"
@@ -687,7 +688,7 @@ ThreadStackTrace::~ThreadStackTrace() {
}
}
-void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsHashtable* table, bool full) {
+void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsView* monitors, bool full) {
assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
if (_thread->has_last_Java_frame()) {
@@ -695,7 +696,7 @@ void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsHasht
RegisterMap::UpdateMap::include,
RegisterMap::ProcessFrames::include,
RegisterMap::WalkContinuation::skip);
-
+ ResourceMark rm;
// If full, we want to print both vthread and carrier frames
vframe* start_vf = !full && _thread->is_vthread_mounted()
? _thread->carrier_last_java_vframe(®_map)
@@ -723,17 +724,7 @@ void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsHasht
// Iterate inflated monitors and find monitors locked by this thread
// that are not found in the stack, e.g. JNI locked monitors:
InflatedMonitorsClosure imc(this);
- if (table != nullptr) {
- // Get the ObjectMonitors locked by the target thread, if any,
- // and does not include any where owner is set to a stack lock
- // address in the target thread:
- ObjectMonitorsHashtable::PtrList* list = table->get_entry(_thread);
- if (list != nullptr) {
- ObjectSynchronizer::monitors_iterate(&imc, list, _thread);
- }
- } else {
- ObjectSynchronizer::monitors_iterate(&imc, _thread);
- }
+ monitors->visit(&imc, _thread);
}
}
@@ -988,9 +979,9 @@ ThreadSnapshot::~ThreadSnapshot() {
}
void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors,
- ObjectMonitorsHashtable* table, bool full) {
+ ObjectMonitorsView* monitors, bool full) {
_stack_trace = new ThreadStackTrace(_thread, with_locked_monitors);
- _stack_trace->dump_stack_at_safepoint(max_depth, table, full);
+ _stack_trace->dump_stack_at_safepoint(max_depth, monitors, full);
}
diff --git a/src/hotspot/share/services/threadService.hpp b/src/hotspot/share/services/threadService.hpp
index ce0ab3c5027..f2b72700a4b 100644
--- a/src/hotspot/share/services/threadService.hpp
+++ b/src/hotspot/share/services/threadService.hpp
@@ -38,7 +38,7 @@
#include "services/management.hpp"
class DeadlockCycle;
-class ObjectMonitorsHashtable;
+class ObjectMonitorsView;
class OopClosure;
class StackFrameInfo;
class ThreadConcurrentLocks;
@@ -264,7 +264,7 @@ class ThreadSnapshot : public CHeapObj {
ThreadConcurrentLocks* get_concurrent_locks() { return _concurrent_locks; }
void dump_stack_at_safepoint(int max_depth, bool with_locked_monitors,
- ObjectMonitorsHashtable* table, bool full);
+ ObjectMonitorsView* monitors, bool full);
void set_concurrent_locks(ThreadConcurrentLocks* l) { _concurrent_locks = l; }
void metadata_do(void f(Metadata*));
};
@@ -287,7 +287,7 @@ class ThreadStackTrace : public CHeapObj {
int get_stack_depth() { return _depth; }
void add_stack_frame(javaVFrame* jvf);
- void dump_stack_at_safepoint(int max_depth, ObjectMonitorsHashtable* table, bool full);
+ void dump_stack_at_safepoint(int max_depth, ObjectMonitorsView* monitors, bool full);
Handle allocate_fill_stack_trace_element_array(TRAPS);
void metadata_do(void f(Metadata*));
GrowableArray* jni_locked_monitors() { return _jni_locked_monitors; }
diff --git a/src/hotspot/share/services/virtualMemoryTracker.cpp b/src/hotspot/share/services/virtualMemoryTracker.cpp
index 66fdb236256..5c0ab5e592f 100644
--- a/src/hotspot/share/services/virtualMemoryTracker.cpp
+++ b/src/hotspot/share/services/virtualMemoryTracker.cpp
@@ -32,12 +32,18 @@
#include "services/virtualMemoryTracker.hpp"
#include "utilities/ostream.hpp"
-size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
+VirtualMemorySnapshot VirtualMemorySummary::_snapshot;
-void VirtualMemorySummary::initialize() {
- assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
- // Use placement operator new to initialize static data area.
- ::new ((void*)_snapshot) VirtualMemorySnapshot();
+void VirtualMemory::update_peak(size_t size) {
+ size_t peak_sz = peak_size();
+ while (peak_sz < size) {
+ size_t old_sz = Atomic::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed);
+ if (old_sz == peak_sz) {
+ break;
+ } else {
+ peak_sz = old_sz;
+ }
+ }
}
void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
@@ -322,7 +328,6 @@ address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
assert(_reserved_regions == nullptr, "only call once");
if (level >= NMT_summary) {
- VirtualMemorySummary::initialize();
_reserved_regions = new (std::nothrow, mtNMT)
SortedLinkedList();
return (_reserved_regions != nullptr);
diff --git a/src/hotspot/share/services/virtualMemoryTracker.hpp b/src/hotspot/share/services/virtualMemoryTracker.hpp
index de1a72f65aa..d9c20d957dc 100644
--- a/src/hotspot/share/services/virtualMemoryTracker.hpp
+++ b/src/hotspot/share/services/virtualMemoryTracker.hpp
@@ -43,13 +43,18 @@ class VirtualMemory {
size_t _reserved;
size_t _committed;
+ volatile size_t _peak_size;
+ void update_peak(size_t size);
+
public:
- VirtualMemory() : _reserved(0), _committed(0) { }
+ VirtualMemory() : _reserved(0), _committed(0), _peak_size(0) {}
inline void reserve_memory(size_t sz) { _reserved += sz; }
inline void commit_memory (size_t sz) {
_committed += sz;
+ DEBUG_ONLY(update_peak(sz);)
assert(_committed <= _reserved, "Sanity check");
+ update_peak(_committed);
}
inline void release_memory (size_t sz) {
@@ -64,6 +69,9 @@ class VirtualMemory {
inline size_t reserved() const { return _reserved; }
inline size_t committed() const { return _committed; }
+ inline size_t peak_size() const {
+ return Atomic::load(&_peak_size);
+ }
};
// Virtual memory allocation site, keeps track where the virtual memory is reserved.
@@ -75,10 +83,9 @@ class VirtualMemoryAllocationSite : public AllocationSite {
inline void reserve_memory(size_t sz) { _c.reserve_memory(sz); }
inline void commit_memory (size_t sz) { _c.commit_memory(sz); }
- inline void uncommit_memory(size_t sz) { _c.uncommit_memory(sz); }
- inline void release_memory(size_t sz) { _c.release_memory(sz); }
inline size_t reserved() const { return _c.reserved(); }
inline size_t committed() const { return _c.committed(); }
+ inline size_t peak_size() const { return _c.peak_size(); }
};
class VirtualMemorySummary;
@@ -127,7 +134,6 @@ class VirtualMemorySnapshot : public ResourceObj {
class VirtualMemorySummary : AllStatic {
public:
- static void initialize();
static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->reserve_memory(size);
@@ -162,11 +168,11 @@ class VirtualMemorySummary : AllStatic {
static void snapshot(VirtualMemorySnapshot* s);
static VirtualMemorySnapshot* as_snapshot() {
- return (VirtualMemorySnapshot*)_snapshot;
+ return &_snapshot;
}
private:
- static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
+ static VirtualMemorySnapshot _snapshot;
};
diff --git a/src/hotspot/share/utilities/chunkedList.hpp b/src/hotspot/share/utilities/chunkedList.hpp
index 81898ac53b2..9a600e4ce1b 100644
--- a/src/hotspot/share/utilities/chunkedList.hpp
+++ b/src/hotspot/share/utilities/chunkedList.hpp
@@ -44,7 +44,7 @@ template class ChunkedList : public CHeapObj {
}
public:
- ChunkedList() : _top(_values), _next_used(nullptr), _next_free(nullptr) {}
+ ChunkedList() : _top(_values), _next_used(nullptr), _next_free(nullptr) {}
bool is_full() const {
return _top == end();
diff --git a/src/hotspot/share/utilities/concurrentHashTable.hpp b/src/hotspot/share/utilities/concurrentHashTable.hpp
index 0336f06916b..fe6fdb0a95e 100644
--- a/src/hotspot/share/utilities/concurrentHashTable.hpp
+++ b/src/hotspot/share/utilities/concurrentHashTable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#define SHARE_UTILITIES_CONCURRENTHASHTABLE_HPP
#include "memory/allocation.hpp"
+#include "runtime/mutex.hpp"
#include "utilities/globalCounter.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
@@ -406,10 +407,11 @@ class ConcurrentHashTable : public CHeapObj {
size_t log2size_limit = DEFAULT_MAX_SIZE_LOG2,
size_t grow_hint = DEFAULT_GROW_HINT,
bool enable_statistics = DEFAULT_ENABLE_STATISTICS,
+ Mutex::Rank rank = Mutex::nosafepoint-2,
void* context = nullptr);
- explicit ConcurrentHashTable(void* context, size_t log2size = DEFAULT_START_SIZE_LOG2, bool enable_statistics = DEFAULT_ENABLE_STATISTICS) :
- ConcurrentHashTable(log2size, DEFAULT_MAX_SIZE_LOG2, DEFAULT_GROW_HINT, enable_statistics, context) {}
+ explicit ConcurrentHashTable(Mutex::Rank rank, void* context, size_t log2size = DEFAULT_START_SIZE_LOG2, bool enable_statistics = DEFAULT_ENABLE_STATISTICS) :
+ ConcurrentHashTable(log2size, DEFAULT_MAX_SIZE_LOG2, DEFAULT_GROW_HINT, enable_statistics, rank, context) {}
~ConcurrentHashTable();
diff --git a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp
index b222d379b72..21277853089 100644
--- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp
+++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1012,7 +1012,7 @@ inline size_t ConcurrentHashTable::
// Constructor
template
inline ConcurrentHashTable::
-ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint, bool enable_statistics, void* context)
+ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint, bool enable_statistics, Mutex::Rank rank, void* context)
: _context(context), _new_table(nullptr), _log2_size_limit(log2size_limit),
_log2_start_size(log2size), _grow_hint(grow_hint),
_size_limit_reached(false), _resize_lock_owner(nullptr),
@@ -1023,8 +1023,7 @@ ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint, bo
} else {
_stats_rate = nullptr;
}
- _resize_lock =
- new Mutex(Mutex::nosafepoint-2, "ConcurrentHashTableResize_lock");
+ _resize_lock = new Mutex(rank, "ConcurrentHashTableResize_lock");
_table = new InternalTable(log2size);
assert(log2size_limit >= log2size, "bad ergo");
_size_limit_reached = _table->_log2_size == _log2_size_limit;
@@ -1223,23 +1222,30 @@ template
inline TableStatistics ConcurrentHashTable::
statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f)
{
+ constexpr size_t batch_size = 128;
NumberSeq summary;
size_t literal_bytes = 0;
InternalTable* table = get_table();
- for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
+ size_t num_batches = table->_size / batch_size;
+ for (size_t batch_start = 0; batch_start < _table->_size; batch_start += batch_size) {
+ // We batch the use of ScopedCS here as it has been found to be quite expensive to
+ // invoke it for every single bucket.
+ size_t batch_end = MIN2(batch_start + batch_size, _table->_size);
ScopedCS cs(thread, this);
- size_t count = 0;
- Bucket* bucket = table->get_bucket(bucket_it);
- if (bucket->have_redirect() || bucket->is_locked()) {
- continue;
- }
- Node* current_node = bucket->first();
- while (current_node != nullptr) {
- ++count;
- literal_bytes += vs_f(current_node->value());
- current_node = current_node->next();
+ for (size_t bucket_it = batch_start; bucket_it < batch_end; bucket_it++) {
+ size_t count = 0;
+ Bucket* bucket = table->get_bucket(bucket_it);
+ if (bucket->have_redirect() || bucket->is_locked()) {
+ continue;
+ }
+ Node* current_node = bucket->first();
+ while (current_node != nullptr) {
+ ++count;
+ literal_bytes += vs_f(current_node->value());
+ current_node = current_node->next();
+ }
+ summary.add((double)count);
}
- summary.add((double)count);
}
if (_stats_rate == nullptr) {
diff --git a/src/hotspot/share/utilities/events.cpp b/src/hotspot/share/utilities/events.cpp
index c47c466372e..f11b7c5bfc3 100644
--- a/src/hotspot/share/utilities/events.cpp
+++ b/src/hotspot/share/utilities/events.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,8 @@
EventLog* Events::_logs = nullptr;
StringEventLog* Events::_messages = nullptr;
+StringEventLog* Events::_memprotect_messages = nullptr;
+StringEventLog* Events::_nmethod_flush_messages = nullptr;
StringEventLog* Events::_vm_operations = nullptr;
StringEventLog* Events::_zgc_phase_switch = nullptr;
ExceptionsEventLog* Events::_exceptions = nullptr;
@@ -95,6 +97,8 @@ void Events::print() {
void Events::init() {
if (LogEvents) {
_messages = new StringEventLog("Events", "events");
+ _nmethod_flush_messages = new StringEventLog("Nmethod flushes", "nmethodflushes");
+ _memprotect_messages = new StringEventLog("Memory protections", "memprotects");
_vm_operations = new StringEventLog("VM Operations", "vmops");
_zgc_phase_switch = new StringEventLog("ZGC Phase Switch", "zgcps");
_exceptions = new ExceptionsEventLog("Internal exceptions", "exc");
diff --git a/src/hotspot/share/utilities/events.hpp b/src/hotspot/share/utilities/events.hpp
index b400fd707fa..4470002a1e3 100644
--- a/src/hotspot/share/utilities/events.hpp
+++ b/src/hotspot/share/utilities/events.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -99,7 +99,7 @@ template class EventLogBase : public EventLog {
EventRecord* _records;
public:
- EventLogBase(const char* name, const char* handle, int length = LogEventsBufferEntries):
+ EventLogBase(const char* name, const char* handle, int length = LogEventsBufferEntries):
_mutex(Mutex::event, name),
_name(name),
_handle(handle),
@@ -220,6 +220,12 @@ class Events : AllStatic {
// A log for generic messages that aren't well categorized.
static StringEventLog* _messages;
+ // A log for memory protection related messages
+ static StringEventLog* _memprotect_messages;
+
+ // A log for nmethod flush operations
+ static StringEventLog* _nmethod_flush_messages;
+
// A log for VM Operations
static StringEventLog* _vm_operations;
@@ -259,6 +265,10 @@ class Events : AllStatic {
// Logs a generic message with timestamp and format as printf.
static void log(Thread* thread, const char* format, ...) ATTRIBUTE_PRINTF(2, 3);
+ static void log_memprotect(Thread* thread, const char* format, ...) ATTRIBUTE_PRINTF(2, 3);
+
+ static void log_nmethod_flush(Thread* thread, const char* format, ...) ATTRIBUTE_PRINTF(2, 3);
+
static void log_vm_operation(Thread* thread, const char* format, ...) ATTRIBUTE_PRINTF(2, 3);
static void log_zgc_phase_switch(const char* format, ...) ATTRIBUTE_PRINTF(1, 2);
@@ -290,6 +300,24 @@ inline void Events::log(Thread* thread, const char* format, ...) {
}
}
+inline void Events::log_memprotect(Thread* thread, const char* format, ...) {
+ if (LogEvents && _memprotect_messages != nullptr) {
+ va_list ap;
+ va_start(ap, format);
+ _memprotect_messages->logv(thread, format, ap);
+ va_end(ap);
+ }
+}
+
+inline void Events::log_nmethod_flush(Thread* thread, const char* format, ...) {
+ if (LogEvents && _nmethod_flush_messages != nullptr) {
+ va_list ap;
+ va_start(ap, format);
+ _nmethod_flush_messages->logv(thread, format, ap);
+ va_end(ap);
+ }
+}
+
inline void Events::log_vm_operation(Thread* thread, const char* format, ...) {
if (LogEvents && _vm_operations != nullptr) {
va_list ap;
diff --git a/src/hotspot/share/utilities/growableArray.hpp b/src/hotspot/share/utilities/growableArray.hpp
index d9c47e8360f..5b67bc88e1b 100644
--- a/src/hotspot/share/utilities/growableArray.hpp
+++ b/src/hotspot/share/utilities/growableArray.hpp
@@ -118,7 +118,7 @@ class GrowableArrayView : public GrowableArrayBase {
protected:
E* _data; // data array
- GrowableArrayView(E* data, int capacity, int initial_len) :
+ GrowableArrayView(E* data, int capacity, int initial_len) :
GrowableArrayBase(capacity, initial_len), _data(data) {}
~GrowableArrayView() {}
@@ -126,7 +126,7 @@ class GrowableArrayView : public GrowableArrayBase {
public:
const static GrowableArrayView EMPTY;
- bool operator==(const GrowableArrayView& rhs) const {
+ bool operator==(const GrowableArrayView& rhs) const {
if (_len != rhs._len)
return false;
for (int i = 0; i < _len; i++) {
@@ -137,7 +137,7 @@ class GrowableArrayView : public GrowableArrayBase {
return true;
}
- bool operator!=(const GrowableArrayView& rhs) const {
+ bool operator!=(const GrowableArrayView& rhs) const {
return !(*this == rhs);
}
@@ -345,7 +345,7 @@ template
class GrowableArrayFromArray : public GrowableArrayView {
public:
- GrowableArrayFromArray(E* data, int len) :
+ GrowableArrayFromArray(E* data, int len) :
GrowableArrayView(data, len, len) {}
};
@@ -480,7 +480,7 @@ class GrowableArrayWithAllocator : public GrowableArrayView {
return this->at(location);
}
- void swap(GrowableArrayWithAllocator* other) {
+ void swap(GrowableArrayWithAllocator* other) {
::swap(this->_data, other->_data);
::swap(this->_len, other->_len);
::swap(this->_capacity, other->_capacity);
@@ -682,8 +682,8 @@ class GrowableArrayMetadata {
// See: init_checks.
template
-class GrowableArray : public GrowableArrayWithAllocator > {
- friend class GrowableArrayWithAllocator >;
+class GrowableArray : public GrowableArrayWithAllocator> {
+ friend class GrowableArrayWithAllocator;
friend class GrowableArrayTest;
static E* allocate(int max) {
@@ -731,7 +731,7 @@ class GrowableArray : public GrowableArrayWithAllocator > {
GrowableArray() : GrowableArray(2 /* initial_capacity */) {}
explicit GrowableArray(int initial_capacity) :
- GrowableArrayWithAllocator >(
+ GrowableArrayWithAllocator(
allocate(initial_capacity),
initial_capacity),
_metadata() {
@@ -739,7 +739,7 @@ class GrowableArray : public GrowableArrayWithAllocator > {
}
GrowableArray(int initial_capacity, MEMFLAGS memflags) :
- GrowableArrayWithAllocator >(
+ GrowableArrayWithAllocator(
allocate(initial_capacity, memflags),
initial_capacity),
_metadata(memflags) {
@@ -747,7 +747,7 @@ class GrowableArray : public GrowableArrayWithAllocator > {
}
GrowableArray(int initial_capacity, int initial_len, const E& filler) :
- GrowableArrayWithAllocator >(
+ GrowableArrayWithAllocator(
allocate(initial_capacity),
initial_capacity, initial_len, filler),
_metadata() {
@@ -755,7 +755,7 @@ class GrowableArray : public GrowableArrayWithAllocator > {
}
GrowableArray(int initial_capacity, int initial_len, const E& filler, MEMFLAGS memflags) :
- GrowableArrayWithAllocator >(
+ GrowableArrayWithAllocator(
allocate(initial_capacity, memflags),
initial_capacity, initial_len, filler),
_metadata(memflags) {
@@ -763,7 +763,7 @@ class GrowableArray : public GrowableArrayWithAllocator > {
}
GrowableArray(Arena* arena, int initial_capacity, int initial_len, const E& filler) :
- GrowableArrayWithAllocator >(
+ GrowableArrayWithAllocator(
allocate(initial_capacity, arena),
initial_capacity, initial_len, filler),
_metadata(arena) {
@@ -847,15 +847,15 @@ class GrowableArrayIterator : public StackObj {
public:
GrowableArrayIterator() : _array(nullptr), _position(0) { }
- GrowableArrayIterator& operator++() { ++_position; return *this; }
- E operator*() { return _array->at(_position); }
+ GrowableArrayIterator& operator++() { ++_position; return *this; }
+ E operator*() { return _array->at(_position); }
- bool operator==(const GrowableArrayIterator& rhs) {
+ bool operator==(const GrowableArrayIterator& rhs) {
assert(_array == rhs._array, "iterator belongs to different array");
return _position == rhs._position;
}
- bool operator!=(const GrowableArrayIterator& rhs) {
+ bool operator!=(const GrowableArrayIterator& rhs) {
assert(_array == rhs._array, "iterator belongs to different array");
return _position != rhs._position;
}
diff --git a/src/hotspot/share/utilities/linkedlist.hpp b/src/hotspot/share/utilities/linkedlist.hpp
index 5b8e258d539..eec7ea1e48d 100644
--- a/src/hotspot/share/utilities/linkedlist.hpp
+++ b/src/hotspot/share/utilities/linkedlist.hpp
@@ -82,7 +82,7 @@ template class LinkedListNode : public AnyObj {
template class LinkedList : public AnyObj {
protected:
LinkedListNode* _head;
- NONCOPYABLE(LinkedList);
+ NONCOPYABLE(LinkedList);
public:
LinkedList() : _head(nullptr) { }
diff --git a/src/hotspot/share/utilities/macros.hpp b/src/hotspot/share/utilities/macros.hpp
index 74e56975081..cdb392b5ef2 100644
--- a/src/hotspot/share/utilities/macros.hpp
+++ b/src/hotspot/share/utilities/macros.hpp
@@ -653,4 +653,10 @@
#define NOT_CDS_JAVA_HEAP_RETURN_(code) { return code; }
#endif
+#ifdef ADDRESS_SANITIZER
+#define INCLUDE_ASAN 1
+#else
+#define INCLUDE_ASAN 0
+#endif
+
#endif // SHARE_UTILITIES_MACROS_HPP
diff --git a/src/hotspot/share/utilities/nativeCallStack.cpp b/src/hotspot/share/utilities/nativeCallStack.cpp
index 0fb0303fb90..3ddf296506c 100644
--- a/src/hotspot/share/utilities/nativeCallStack.cpp
+++ b/src/hotspot/share/utilities/nativeCallStack.cpp
@@ -82,18 +82,33 @@ void NativeCallStack::print_on(outputStream* out, int indent) const {
char buf[1024];
int offset;
if (is_empty()) {
- for (int index = 0; index < indent; index ++) out->print(" ");
+ out->fill_to(indent);
out->print("[BOOTSTRAP]");
} else {
for (int frame = 0; frame < NMT_TrackingStackDepth; frame ++) {
pc = get_frame(frame);
if (pc == nullptr) break;
- // Print indent
- for (int index = 0; index < indent; index ++) out->print(" ");
+ out->fill_to(indent);
+ out->print("[" PTR_FORMAT "]", p2i(pc));
+ // Print function and library; shorten library name to just its last component
+ // for brevity, and omit it completely for libjvm.so
+ bool function_printed = false;
if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
- out->print("[" PTR_FORMAT "] %s+0x%x", p2i(pc), buf, offset);
- } else {
- out->print("[" PTR_FORMAT "]", p2i(pc));
+ out->print("%s+0x%x", buf, offset);
+ function_printed = true;
+ }
+ if ((!function_printed || !os::address_is_in_vm(pc)) &&
+ os::dll_address_to_library_name(pc, buf, sizeof(buf), &offset)) {
+ const char* libname = strrchr(buf, os::file_separator()[0]);
+ if (libname != nullptr) {
+ libname++;
+ } else {
+ libname = buf;
+ }
+ out->print(" in %s", libname);
+ if (!function_printed) {
+ out->print("+0x%x", offset);
+ }
}
// Note: we deliberately omit printing source information here. NativeCallStack::print_on()
diff --git a/src/hotspot/share/utilities/vmError.cpp b/src/hotspot/share/utilities/vmError.cpp
index 5c0b4440e2c..868c67a7dbc 100644
--- a/src/hotspot/share/utilities/vmError.cpp
+++ b/src/hotspot/share/utilities/vmError.cpp
@@ -100,6 +100,7 @@ const char* VMError::_filename;
int VMError::_lineno;
size_t VMError::_size;
const size_t VMError::_reattempt_required_stack_headroom = 64 * K;
+const intptr_t VMError::segfault_address = pd_segfault_address;
// List of environment variables that should be reported in error log file.
static const char* env_list[] = {
diff --git a/src/hotspot/share/utilities/vmError.hpp b/src/hotspot/share/utilities/vmError.hpp
index 5a0625c920b..88ba476891e 100644
--- a/src/hotspot/share/utilities/vmError.hpp
+++ b/src/hotspot/share/utilities/vmError.hpp
@@ -207,7 +207,7 @@ class VMError : public AllStatic {
DEBUG_ONLY(static void controlled_crash(int how);)
// Non-null address guaranteed to generate a SEGV mapping error on read, for test purposes.
- static constexpr intptr_t segfault_address = AIX_ONLY(-1) NOT_AIX(1 * K);
+ static const intptr_t segfault_address;
// Max value for the ErrorLogPrintCodeLimit flag.
static const int max_error_log_print_code = 10;
diff --git a/src/hotspot/share/utilities/waitBarrier_generic.cpp b/src/hotspot/share/utilities/waitBarrier_generic.cpp
index b5d9ff67eb7..dbf4db336c2 100644
--- a/src/hotspot/share/utilities/waitBarrier_generic.cpp
+++ b/src/hotspot/share/utilities/waitBarrier_generic.cpp
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,66 +30,228 @@
#include "utilities/waitBarrier_generic.hpp"
#include "utilities/spinYield.hpp"
+// Implements the striped semaphore wait barrier.
+//
+// To guarantee progress and safety, we need to make sure that new barrier tag
+// starts with the completely empty set of waiters and free semaphore. This
+// requires either waiting for all threads to leave wait() for current barrier
+// tag on disarm(), or waiting for all threads to leave the previous tag before
+// reusing the semaphore in arm().
+//
+// When there are multiple threads, it is normal for some threads to take
+// significant time to leave the barrier. Waiting for these threads introduces
+// stalls on barrier reuse.
+//
+// If we wait on disarm(), this stall is nearly guaranteed to happen if some threads
+// are de-scheduled by prior wait(). It would be especially bad if there are more
+// waiting threads than CPUs: every thread would need to wake up and register itself
+// as leaving, before we can unblock from disarm().
+//
+// If we wait on arm(), we can get lucky that most threads would be able to catch up,
+// exit wait(), and so we arrive to arm() with semaphore ready for reuse. However,
+// that is still insufficient in practice.
+//
+// Therefore, this implementation goes a step further and implements the _striped_
+// semaphores. We maintain several semaphores in cells. The barrier tags are assigned
+// to cells in some simple manner. Most of the current uses have sequential barrier
+// tags, so simple modulo works well. We then operate on a cell like we would operate
+// on a single semaphore: we wait at arm() for all threads to catch up before reusing
+// the cell. For the cost of maintaining just a few cells, we have enough window for
+// threads to catch up.
+//
+// The correctness is guaranteed by using a single atomic state variable per cell,
+// with updates always done with CASes:
+//
+// [.......... barrier tag ..........][.......... waiters ..........]
+// 63 31 0
+//
+// Cell starts with zero tag and zero waiters. Arming the cell swings barrier tag from
+// zero to some tag, while checking that no waiters have appeared. Disarming swings
+// the barrier tag back from tag to zero. Every waiter registers itself by incrementing
+// the "waiters", while checking that barrier tag is still the same. Every completing waiter
+// decrements the "waiters". When all waiters complete, a cell ends up in initial state,
+// ready to be armed again. This allows accurate tracking of how many signals
+// to issue and does not race with disarm.
+//
+// The implementation uses the strongest (default) barriers for extra safety, even
+// when not strictly required to do so for correctness. Extra barrier overhead is
+// dominated by the actual wait/notify latency anyway.
+//
+
void GenericWaitBarrier::arm(int barrier_tag) {
- assert(_barrier_tag == 0, "Already armed");
- assert(_waiters == 0, "We left a thread hanging");
- _barrier_tag = barrier_tag;
- _waiters = 0;
+ assert(barrier_tag != 0, "Pre arm: Should be arming with armed value");
+ assert(Atomic::load(&_barrier_tag) == 0,
+ "Pre arm: Should not be already armed. Tag: %d",
+ Atomic::load(&_barrier_tag));
+ Atomic::release_store(&_barrier_tag, barrier_tag);
+
+ Cell &cell = tag_to_cell(barrier_tag);
+ cell.arm(barrier_tag);
+
+ // API specifies arm() must provide a trailing fence.
OrderAccess::fence();
}
-int GenericWaitBarrier::wake_if_needed() {
- assert(_barrier_tag == 0, "Not disarmed");
- int w = _waiters;
- if (w == 0) {
- // Load of _barrier_threads in caller must not pass the load of _waiters.
- OrderAccess::loadload();
- return 0;
- }
- assert(w > 0, "Bad counting");
- // We need an exact count which never goes below zero,
- // otherwise the semaphore may be signalled too many times.
- if (Atomic::cmpxchg(&_waiters, w, w - 1) == w) {
- _sem_barrier.signal();
- return w - 1;
- }
- return w;
+void GenericWaitBarrier::disarm() {
+ int barrier_tag = Atomic::load_acquire(&_barrier_tag);
+ assert(barrier_tag != 0, "Pre disarm: Should be armed. Tag: %d", barrier_tag);
+ Atomic::release_store(&_barrier_tag, 0);
+
+ Cell &cell = tag_to_cell(barrier_tag);
+ cell.disarm(barrier_tag);
+
+ // API specifies disarm() must provide a trailing fence.
+ OrderAccess::fence();
}
-void GenericWaitBarrier::disarm() {
- assert(_barrier_tag != 0, "Not armed");
- _barrier_tag = 0;
- // Loads of _barrier_threads/_waiters must not float above disarm store and
- // disarm store must not sink below.
+void GenericWaitBarrier::wait(int barrier_tag) {
+ assert(barrier_tag != 0, "Pre wait: Should be waiting on armed value");
+
+ Cell &cell = tag_to_cell(barrier_tag);
+ cell.wait(barrier_tag);
+
+ // API specifies wait() must provide a trailing fence.
OrderAccess::fence();
- int left;
+}
+
+void GenericWaitBarrier::Cell::arm(int32_t requested_tag) {
+ // Before we continue to arm, we need to make sure that all threads
+ // have left the previous cell.
+
+ int64_t state;
+
SpinYield sp;
- do {
- left = GenericWaitBarrier::wake_if_needed();
- if (left == 0 && _barrier_threads > 0) {
- // There is no thread to wake but we still have barrier threads.
+ while (true) {
+ state = Atomic::load_acquire(&_state);
+ assert(decode_tag(state) == 0,
+ "Pre arm: Should not be armed. "
+ "Tag: " INT32_FORMAT "; Waiters: " INT32_FORMAT,
+ decode_tag(state), decode_waiters(state));
+ if (decode_waiters(state) == 0) {
+ break;
+ }
+ sp.wait();
+ }
+
+ // Try to swing cell to armed. This should always succeed after the check above.
+ int64_t new_state = encode(requested_tag, 0);
+ int64_t prev_state = Atomic::cmpxchg(&_state, state, new_state);
+ if (prev_state != state) {
+ fatal("Cannot arm the wait barrier. "
+ "Tag: " INT32_FORMAT "; Waiters: " INT32_FORMAT,
+ decode_tag(prev_state), decode_waiters(prev_state));
+ }
+}
+
+int GenericWaitBarrier::Cell::signal_if_needed(int max) {
+ int signals = 0;
+ while (true) {
+ int cur = Atomic::load_acquire(&_outstanding_wakeups);
+ if (cur == 0) {
+ // All done, no more waiters.
+ return 0;
+ }
+ assert(cur > 0, "Sanity");
+
+ int prev = Atomic::cmpxchg(&_outstanding_wakeups, cur, cur - 1);
+ if (prev != cur) {
+ // Contention, return to caller for early return or backoff.
+ return prev;
+ }
+
+ // Signal!
+ _sem.signal();
+
+ if (++signals >= max) {
+ // Signalled requested number of times, break out.
+ return prev;
+ }
+ }
+}
+
+void GenericWaitBarrier::Cell::disarm(int32_t expected_tag) {
+ int32_t waiters;
+
+ while (true) {
+ int64_t state = Atomic::load_acquire(&_state);
+ int32_t tag = decode_tag(state);
+ waiters = decode_waiters(state);
+
+ assert((tag == expected_tag) && (waiters >= 0),
+ "Mid disarm: Should be armed with expected tag and have sane waiters. "
+ "Tag: " INT32_FORMAT "; Waiters: " INT32_FORMAT,
+ tag, waiters);
+
+ int64_t new_state = encode(0, waiters);
+ if (Atomic::cmpxchg(&_state, state, new_state) == state) {
+ // Successfully disarmed.
+ break;
+ }
+ }
+
+ // Wake up waiters, if we have at least one.
+ // Allow other threads to assist with wakeups, if possible.
+ if (waiters > 0) {
+ Atomic::release_store(&_outstanding_wakeups, waiters);
+ SpinYield sp;
+ while (signal_if_needed(INT_MAX) > 0) {
sp.wait();
}
- // We must loop here until there are no waiters or potential waiters.
- } while (left > 0 || _barrier_threads > 0);
- // API specifies disarm() must provide a trailing fence.
- OrderAccess::fence();
+ }
+ assert(Atomic::load(&_outstanding_wakeups) == 0, "Post disarm: Should not have outstanding wakeups");
}
-void GenericWaitBarrier::wait(int barrier_tag) {
- assert(barrier_tag != 0, "Trying to wait on disarmed value");
- if (barrier_tag != _barrier_tag) {
- // API specifies wait() must provide a trailing fence.
- OrderAccess::fence();
- return;
+void GenericWaitBarrier::Cell::wait(int32_t expected_tag) {
+ // Try to register ourselves as pending waiter.
+ while (true) {
+ int64_t state = Atomic::load_acquire(&_state);
+ int32_t tag = decode_tag(state);
+ if (tag != expected_tag) {
+ // Cell tag had changed while waiting here. This means either the cell had
+ // been disarmed, or we are late and the cell was armed with a new tag.
+ // Exit without touching anything else.
+ return;
+ }
+ int32_t waiters = decode_waiters(state);
+
+ assert((tag == expected_tag) && (waiters >= 0 && waiters < INT32_MAX),
+ "Before wait: Should be armed with expected tag and waiters are in range. "
+ "Tag: " INT32_FORMAT "; Waiters: " INT32_FORMAT,
+ tag, waiters);
+
+ int64_t new_state = encode(tag, waiters + 1);
+ if (Atomic::cmpxchg(&_state, state, new_state) == state) {
+ // Success! Proceed to wait.
+ break;
+ }
}
- Atomic::add(&_barrier_threads, 1);
- if (barrier_tag != 0 && barrier_tag == _barrier_tag) {
- Atomic::add(&_waiters, 1);
- _sem_barrier.wait();
- // We help out with posting, but we need to do so before we decrement the
- // _barrier_threads otherwise we might wake threads up in next wait.
- GenericWaitBarrier::wake_if_needed();
+
+ // Wait for notification.
+ _sem.wait();
+
+ // Unblocked! We help out with waking up two siblings. This allows to avalanche
+ // the wakeups for many threads, even if some threads are lagging behind.
+ // Note that we can only do this *before* reporting back as completed waiter,
+ // otherwise we might prematurely wake up threads for another barrier tag.
+ // Current arm() sequence protects us from this trouble by waiting until all waiters
+ // leave.
+ signal_if_needed(2);
+
+ // Register ourselves as completed waiter before leaving.
+ while (true) {
+ int64_t state = Atomic::load_acquire(&_state);
+ int32_t tag = decode_tag(state);
+ int32_t waiters = decode_waiters(state);
+
+ assert((tag == 0) && (waiters > 0),
+ "After wait: Should be not armed and have non-complete waiters. "
+ "Tag: " INT32_FORMAT "; Waiters: " INT32_FORMAT,
+ tag, waiters);
+
+ int64_t new_state = encode(tag, waiters - 1);
+ if (Atomic::cmpxchg(&_state, state, new_state) == state) {
+ // Success!
+ break;
+ }
}
- Atomic::add(&_barrier_threads, -1);
}
diff --git a/src/hotspot/share/utilities/waitBarrier_generic.hpp b/src/hotspot/share/utilities/waitBarrier_generic.hpp
index 50bfea6aebf..d3a45b33b82 100644
--- a/src/hotspot/share/utilities/waitBarrier_generic.hpp
+++ b/src/hotspot/share/utilities/waitBarrier_generic.hpp
@@ -26,29 +26,79 @@
#define SHARE_UTILITIES_WAITBARRIER_GENERIC_HPP
#include "memory/allocation.hpp"
+#include "memory/padded.hpp"
#include "runtime/semaphore.hpp"
#include "utilities/globalDefinitions.hpp"
-// In addition to the barrier tag, it uses two counters to keep the semaphore
-// count correct and not leave any late thread waiting.
class GenericWaitBarrier : public CHeapObj {
+private:
+ class Cell : public CHeapObj {
+ private:
+ // Pad out the cells to avoid interference between the cells.
+ // This would insulate from stalls when adjacent cells have returning
+ // workers and contend over the cache line for current latency-critical
+ // cell.
+ DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
+
+ Semaphore _sem;
+
+ // Cell state, tracks the arming + waiters status
+ volatile int64_t _state;
+
+ // Wakeups to deliver for current waiters
+ volatile int _outstanding_wakeups;
+
+ int signal_if_needed(int max);
+
+ static int64_t encode(int32_t barrier_tag, int32_t waiters) {
+ int64_t val = (((int64_t) barrier_tag) << 32) |
+ (((int64_t) waiters) & 0xFFFFFFFF);
+ assert(decode_tag(val) == barrier_tag, "Encoding is reversible");
+ assert(decode_waiters(val) == waiters, "Encoding is reversible");
+ return val;
+ }
+
+ static int32_t decode_tag(int64_t value) {
+ return (int32_t)(value >> 32);
+ }
+
+ static int32_t decode_waiters(int64_t value) {
+ return (int32_t)(value & 0xFFFFFFFF);
+ }
+
+ public:
+ Cell() : _sem(0), _state(encode(0, 0)), _outstanding_wakeups(0) {}
+ NONCOPYABLE(Cell);
+
+ void arm(int32_t requested_tag);
+ void disarm(int32_t expected_tag);
+ void wait(int32_t expected_tag);
+ };
+
+ // Should be enough for most uses without exploding the footprint.
+ static constexpr int CELLS_COUNT = 16;
+
+ Cell _cells[CELLS_COUNT];
+
+ // Trailing padding to protect the last cell.
+ DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
+
volatile int _barrier_tag;
- // The number of threads waiting on or about to wait on the semaphore.
- volatile int _waiters;
- // The number of threads in the wait path, before or after the tag check.
- // These threads can become waiters.
- volatile int _barrier_threads;
- Semaphore _sem_barrier;
+
+ // Trailing padding to insulate the rest of the barrier from adjacent
+ // data structures. The leading padding is not needed, as cell padding
+ // handles this for us.
+ DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
NONCOPYABLE(GenericWaitBarrier);
- int wake_if_needed();
+ Cell& tag_to_cell(int tag) { return _cells[tag & (CELLS_COUNT - 1)]; }
- public:
- GenericWaitBarrier() : _barrier_tag(0), _waiters(0), _barrier_threads(0), _sem_barrier(0) {}
+public:
+ GenericWaitBarrier() : _cells(), _barrier_tag(0) {}
~GenericWaitBarrier() {}
- const char* description() { return "semaphore"; }
+ const char* description() { return "striped semaphore"; }
void arm(int barrier_tag);
void disarm();
diff --git a/src/java.base/share/classes/java/lang/PinnedThreadPrinter.java b/src/java.base/share/classes/java/lang/PinnedThreadPrinter.java
index 02e0683a1b9..a9b40d028f5 100644
--- a/src/java.base/share/classes/java/lang/PinnedThreadPrinter.java
+++ b/src/java.base/share/classes/java/lang/PinnedThreadPrinter.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,10 @@
import java.util.Set;
import java.util.stream.Collectors;
import static java.lang.StackWalker.Option.*;
+import jdk.internal.access.JavaIOPrintStreamAccess;
+import jdk.internal.access.SharedSecrets;
+import jdk.internal.misc.InternalLock;
+import jdk.internal.vm.Continuation;
/**
* Helper class to print the virtual thread stack trace when pinned.
@@ -42,7 +46,8 @@
* code in that Class. This is used to avoid printing the same stack trace many times.
*/
class PinnedThreadPrinter {
- static final StackWalker STACK_WALKER;
+ private static final JavaIOPrintStreamAccess JIOPSA = SharedSecrets.getJavaIOPrintStreamAccess();
+ private static final StackWalker STACK_WALKER;
static {
var options = Set.of(SHOW_REFLECT_FRAMES, RETAIN_CLASS_REFERENCE);
PrivilegedAction pa = () ->
@@ -86,45 +91,59 @@ private static int hash(List stack) {
}
/**
- * Prints the continuation stack trace.
+ * Returns true if the frame is native, a class initializer, or holds monitors.
+ */
+ private static boolean isInterestingFrame(LiveStackFrame f) {
+ return f.isNativeMethod()
+ || "".equals(f.getMethodName())
+ || (f.getMonitors().length > 0);
+ }
+
+ /**
+ * Prints the current thread's stack trace.
*
* @param printAll true to print all stack frames, false to only print the
* frames that are native or holding a monitor
*/
- static void printStackTrace(PrintStream out, boolean printAll) {
+ static void printStackTrace(PrintStream out, Continuation.Pinned reason, boolean printAll) {
List stack = STACK_WALKER.walk(s ->
s.map(f -> (LiveStackFrame) f)
.filter(f -> f.getDeclaringClass() != PinnedThreadPrinter.class)
.collect(Collectors.toList())
);
+ Object lockObj = JIOPSA.lock(out);
+ if (lockObj instanceof InternalLock lock && lock.tryLock()) {
+ try {
+ // find the closest frame that is causing the thread to be pinned
+ stack.stream()
+ .filter(f -> isInterestingFrame(f))
+ .map(LiveStackFrame::getDeclaringClass)
+ .findFirst()
+ .ifPresentOrElse(klass -> {
+ // print the stack trace if not already seen
+ int hash = hash(stack);
+ if (HASHES.get(klass).add(hash)) {
+ printStackTrace(out, reason, stack, printAll);
+ }
+ }, () -> printStackTrace(out, reason, stack, true)); // not found
- // find the closest frame that is causing the thread to be pinned
- stack.stream()
- .filter(f -> (f.isNativeMethod() || f.getMonitors().length > 0))
- .map(LiveStackFrame::getDeclaringClass)
- .findFirst()
- .ifPresentOrElse(klass -> {
- int hash = hash(stack);
- Hashes hashes = HASHES.get(klass);
- synchronized (hashes) {
- // print the stack trace if not already seen
- if (hashes.add(hash)) {
- printStackTrace(stack, out, printAll);
- }
- }
- }, () -> printStackTrace(stack, out, true)); // not found
+ } finally {
+ lock.unlock();
+ }
+ }
}
- private static void printStackTrace(List stack,
- PrintStream out,
+ private static void printStackTrace(PrintStream out,
+ Continuation.Pinned reason,
+ List stack,
boolean printAll) {
- out.println(Thread.currentThread());
+ out.format("%s reason:%s%n", Thread.currentThread(), reason);
for (LiveStackFrame frame : stack) {
var ste = frame.toStackTraceElement();
int monitorCount = frame.getMonitors().length;
if (monitorCount > 0) {
out.format(" %s <== monitors:%d%n", ste, monitorCount);
- } else if (frame.isNativeMethod() || printAll) {
+ } else if (printAll || isInterestingFrame(frame)) {
out.format(" %s%n", ste);
}
}
diff --git a/src/java.base/share/classes/java/lang/String.java b/src/java.base/share/classes/java/lang/String.java
index 9b19d7e2ac1..cd8995e18e4 100644
--- a/src/java.base/share/classes/java/lang/String.java
+++ b/src/java.base/share/classes/java/lang/String.java
@@ -574,7 +574,7 @@ private String(Charset charset, byte[] bytes, int offset, int length) {
this.coder = LATIN1;
return;
}
- byte[] buf = new byte[length << 1];
+ byte[] buf = StringUTF16.newBytesFor(length);
StringLatin1.inflate(dst, 0, buf, 0, dp);
dst = buf;
dp = decodeUTF8_UTF16(bytes, offset, sl, dst, dp, true);
@@ -584,7 +584,7 @@ private String(Charset charset, byte[] bytes, int offset, int length) {
this.value = dst;
this.coder = UTF16;
} else { // !COMPACT_STRINGS
- byte[] dst = new byte[length << 1];
+ byte[] dst = StringUTF16.newBytesFor(length);
int dp = decodeUTF8_UTF16(bytes, offset, offset + length, dst, 0, true);
if (dp != length) {
dst = Arrays.copyOf(dst, dp << 1);
@@ -605,7 +605,7 @@ private String(Charset charset, byte[] bytes, int offset, int length) {
this.value = Arrays.copyOfRange(bytes, offset, offset + length);
this.coder = LATIN1;
} else {
- byte[] dst = new byte[length << 1];
+ byte[] dst = StringUTF16.newBytesFor(length);
int dp = 0;
while (dp < length) {
int b = bytes[offset++];
@@ -750,15 +750,15 @@ static String newStringUTF8NoRepl(byte[] bytes, int offset, int length, boolean
return new String(dst, LATIN1);
}
if (dp == 0) {
- dst = new byte[length << 1];
+ dst = StringUTF16.newBytesFor(length);
} else {
- byte[] buf = new byte[length << 1];
+ byte[] buf = StringUTF16.newBytesFor(length);
StringLatin1.inflate(dst, 0, buf, 0, dp);
dst = buf;
}
dp = decodeUTF8_UTF16(bytes, offset, sl, dst, dp, false);
} else { // !COMPACT_STRINGS
- dst = new byte[length << 1];
+ dst = StringUTF16.newBytesFor(length);
dp = decodeUTF8_UTF16(bytes, offset, offset + length, dst, 0, false);
}
if (dp != length) {
@@ -1304,7 +1304,7 @@ private static byte[] encodeUTF8(byte coder, byte[] val, boolean doReplace) {
}
int dp = 0;
- byte[] dst = new byte[val.length << 1];
+ byte[] dst = StringUTF16.newBytesFor(val.length);
for (byte c : val) {
if (c < 0) {
dst[dp++] = (byte) (0xc0 | ((c & 0xff) >> 6));
diff --git a/src/java.base/share/classes/java/lang/VirtualThread.java b/src/java.base/share/classes/java/lang/VirtualThread.java
index 37d092e2011..1b2b40b8c08 100644
--- a/src/java.base/share/classes/java/lang/VirtualThread.java
+++ b/src/java.base/share/classes/java/lang/VirtualThread.java
@@ -86,41 +86,52 @@ final class VirtualThread extends BaseVirtualThread {
private volatile int state;
/*
- * Virtual thread state and transitions:
+ * Virtual thread state transitions:
*
- * NEW -> STARTED // Thread.start
+ * NEW -> STARTED // Thread.start, schedule to run
* STARTED -> TERMINATED // failed to start
* STARTED -> RUNNING // first run
+ * RUNNING -> TERMINATED // done
*
- * RUNNING -> PARKING // Thread attempts to park
- * PARKING -> PARKED // cont.yield successful, thread is parked
- * PARKING -> PINNED // cont.yield failed, thread is pinned
- *
- * PARKED -> RUNNABLE // unpark or interrupted
- * PINNED -> RUNNABLE // unpark or interrupted
+ * RUNNING -> PARKING // Thread parking with LockSupport.park
+ * PARKING -> PARKED // cont.yield successful, parked indefinitely
+ * PARKING -> PINNED // cont.yield failed, parked indefinitely on carrier
+ * PARKED -> UNPARKED // unparked, may be scheduled to continue
+ * PINNED -> RUNNING // unparked, continue execution on same carrier
+ * UNPARKED -> RUNNING // continue execution after park
*
- * RUNNABLE -> RUNNING // continue execution
+ * RUNNING -> TIMED_PARKING // Thread parking with LockSupport.parkNanos
+ * TIMED_PARKING -> TIMED_PARKED // cont.yield successful, timed-parked
+ * TIMED_PARKING -> TIMED_PINNED // cont.yield failed, timed-parked on carrier
+ * TIMED_PARKED -> UNPARKED // unparked, may be scheduled to continue
+ * TIMED_PINNED -> RUNNING // unparked, continue execution on same carrier
*
* RUNNING -> YIELDING // Thread.yield
- * YIELDING -> RUNNABLE // yield successful
- * YIELDING -> RUNNING // yield failed
- *
- * RUNNING -> TERMINATED // done
+ * YIELDING -> YIELDED // cont.yield successful, may be scheduled to continue
+ * YIELDING -> RUNNING // cont.yield failed
+ * YIELDED -> RUNNING // continue execution after Thread.yield
*/
private static final int NEW = 0;
private static final int STARTED = 1;
- private static final int RUNNABLE = 2; // runnable-unmounted
- private static final int RUNNING = 3; // runnable-mounted
- private static final int PARKING = 4;
- private static final int PARKED = 5; // unmounted
- private static final int PINNED = 6; // mounted
- private static final int YIELDING = 7; // Thread.yield
+ private static final int RUNNING = 2; // runnable-mounted
+
+ // untimed and timed parking
+ private static final int PARKING = 3;
+ private static final int PARKED = 4; // unmounted
+ private static final int PINNED = 5; // mounted
+ private static final int TIMED_PARKING = 6;
+ private static final int TIMED_PARKED = 7; // unmounted
+ private static final int TIMED_PINNED = 8; // mounted
+ private static final int UNPARKED = 9; // unmounted but runnable
+
+ // Thread.yield
+ private static final int YIELDING = 10;
+ private static final int YIELDED = 11; // unmounted but runnable
+
private static final int TERMINATED = 99; // final state
// can be suspended from scheduling when unmounted
private static final int SUSPENDED = 1 << 8;
- private static final int RUNNABLE_SUSPENDED = (RUNNABLE | SUSPENDED);
- private static final int PARKED_SUSPENDED = (PARKED | SUSPENDED);
// parking permit
private volatile boolean parkPermit;
@@ -180,7 +191,15 @@ private static class VThreadContinuation extends Continuation {
protected void onPinned(Continuation.Pinned reason) {
if (TRACE_PINNING_MODE > 0) {
boolean printAll = (TRACE_PINNING_MODE == 1);
- PinnedThreadPrinter.printStackTrace(System.out, printAll);
+ VirtualThread vthread = (VirtualThread) Thread.currentThread();
+ int oldState = vthread.state();
+ try {
+ // avoid printing when in transition states
+ vthread.setState(RUNNING);
+ PinnedThreadPrinter.printStackTrace(System.out, reason, printAll);
+ } finally {
+ vthread.setState(oldState);
+ }
}
}
private static Runnable wrap(VirtualThread vthread, Runnable task) {
@@ -194,8 +213,11 @@ public void run() {
}
/**
- * Runs or continues execution of the continuation on the current thread.
+ * Runs or continues execution on the current thread. The virtual thread is mounted
+ * on the current thread before the task runs or continues. It unmounts when the
+ * task completes or yields.
*/
+ @ChangesCurrentThread
private void runContinuation() {
// the carrier must be a platform thread
if (Thread.currentThread().isVirtual()) {
@@ -204,24 +226,27 @@ private void runContinuation() {
// set state to RUNNING
int initialState = state();
- if (initialState == STARTED && compareAndSetState(STARTED, RUNNING)) {
- // first run
- } else if (initialState == RUNNABLE && compareAndSetState(RUNNABLE, RUNNING)) {
- // consume parking permit
- setParkPermit(false);
+ if (initialState == STARTED || initialState == UNPARKED || initialState == YIELDED) {
+ // newly started or continue after parking/blocking/Thread.yield
+ if (!compareAndSetState(initialState, RUNNING)) {
+ return;
+ }
+ // consume parking permit when continuing after parking
+ if (initialState == UNPARKED) {
+ setParkPermit(false);
+ }
} else {
// not runnable
return;
}
- // notify JVMTI before mount
- notifyJvmtiMount(/*hide*/true);
-
+ mount();
try {
cont.run();
} finally {
+ unmount();
if (cont.isDone()) {
- afterTerminate();
+ afterDone();
} else {
afterYield();
}
@@ -231,8 +256,7 @@ private void runContinuation() {
/**
* Submits the runContinuation task to the scheduler. For the default scheduler,
* and calling it on a worker thread, the task will be pushed to the local queue,
- * otherwise it will be pushed to a submission queue.
- *
+ * otherwise it will be pushed to an external submission queue.
* @throws RejectedExecutionException
*/
private void submitRunContinuation() {
@@ -245,7 +269,7 @@ private void submitRunContinuation() {
}
/**
- * Submits the runContinuation task to the scheduler with a lazy submit.
+ * Submits the runContinuation task to given scheduler with a lazy submit.
* @throws RejectedExecutionException
* @see ForkJoinPool#lazySubmit(ForkJoinTask)
*/
@@ -259,7 +283,7 @@ private void lazySubmitRunContinuation(ForkJoinPool pool) {
}
/**
- * Submits the runContinuation task to the scheduler as an external submit.
+ * Submits the runContinuation task to the given scheduler as an external submit.
* @throws RejectedExecutionException
* @see ForkJoinPool#externalSubmit(ForkJoinTask)
*/
@@ -285,16 +309,12 @@ private void submitFailed(RejectedExecutionException ree) {
}
/**
- * Runs a task in the context of this virtual thread. The virtual thread is
- * mounted on the current (carrier) thread before the task runs. It unmounts
- * from its carrier thread when the task completes.
+ * Runs a task in the context of this virtual thread.
*/
- @ChangesCurrentThread
private void run(Runnable task) {
- assert state == RUNNING;
+ assert Thread.currentThread() == this && state == RUNNING;
- // first mount
- mount();
+ // notify JVMTI, may post VirtualThreadStart event
notifyJvmtiStart();
// emit JFR event if enabled
@@ -322,12 +342,8 @@ private void run(Runnable task) {
}
} finally {
- // last unmount
+ // notify JVMTI, may post VirtualThreadEnd event
notifyJvmtiEnd();
- unmount();
-
- // final state
- setState(TERMINATED);
}
}
}
@@ -339,6 +355,9 @@ private void run(Runnable task) {
@ChangesCurrentThread
@ReservedStackAccess
private void mount() {
+ // notify JVMTI before mount
+ notifyJvmtiMount(/*hide*/true);
+
// sets the carrier thread
Thread carrier = Thread.currentCarrierThread();
setCarrierThread(carrier);
@@ -375,6 +394,9 @@ private void unmount() {
setCarrierThread(null);
}
carrier.clearInterrupt();
+
+ // notify JVMTI after unmount
+ notifyJvmtiUnmount(/*hide*/false);
}
/**
@@ -417,21 +439,15 @@ V executeOnCarrierThread(Callable task) throws Exception {
}
/**
- * Unmounts this virtual thread, invokes Continuation.yield, and re-mounts the
- * thread when continued. When enabled, JVMTI must be notified from this method.
- * @return true if the yield was successful
+ * Invokes Continuation.yield, notifying JVMTI (if enabled) to hide frames until
+ * the continuation continues.
*/
@Hidden
- @ChangesCurrentThread
private boolean yieldContinuation() {
- // unmount
notifyJvmtiUnmount(/*hide*/true);
- unmount();
try {
return Continuation.yield(VTHREAD_SCOPE);
} finally {
- // re-mount
- mount();
notifyJvmtiMount(/*hide*/false);
}
}
@@ -442,17 +458,17 @@ private boolean yieldContinuation() {
* If yielding due to Thread.yield then it just submits the task to continue.
*/
private void afterYield() {
- int s = state();
- assert (s == PARKING || s == YIELDING) && (carrierThread == null);
+ assert carrierThread == null;
- if (s == PARKING) {
- setState(PARKED);
+ int s = state();
- // notify JVMTI that unmount has completed, thread is parked
- notifyJvmtiUnmount(/*hide*/false);
+ // LockSupport.park/parkNanos
+ if (s == PARKING || s == TIMED_PARKING) {
+ int newState = (s == PARKING) ? PARKED : TIMED_PARKED;
+ setState(newState);
// may have been unparked while parking
- if (parkPermit && compareAndSetState(PARKED, RUNNABLE)) {
+ if (parkPermit && compareAndSetState(newState, UNPARKED)) {
// lazy submit to continue on the current thread as carrier if possible
if (currentThread() instanceof CarrierThread ct) {
lazySubmitRunContinuation(ct.getPool());
@@ -461,11 +477,12 @@ private void afterYield() {
}
}
- } else if (s == YIELDING) { // Thread.yield
- setState(RUNNABLE);
+ return;
+ }
- // notify JVMTI that unmount has completed, thread is runnable
- notifyJvmtiUnmount(/*hide*/false);
+ // Thread.yield
+ if (s == YIELDING) {
+ setState(YIELDED);
// external submit if there are no tasks in the local task queue
if (currentThread() instanceof CarrierThread ct && ct.getQueuedTaskCount() == 0) {
@@ -473,30 +490,28 @@ private void afterYield() {
} else {
submitRunContinuation();
}
+ return;
}
+
+ assert false;
}
/**
- * Invoked after the thread terminates execution. It notifies anyone
- * waiting for the thread to terminate.
+ * Invoked after the continuation completes.
*/
- private void afterTerminate() {
- afterTerminate(true, true);
+ private void afterDone() {
+ afterDone(true);
}
/**
- * Invoked after the thread terminates (or start failed). This method
- * notifies anyone waiting for the thread to terminate.
+ * Invoked after the continuation completes (or start failed). Sets the thread
+ * state to TERMINATED and notifies anyone waiting for the thread to terminate.
*
* @param notifyContainer true if its container should be notified
- * @param executed true if the thread executed, false if it failed to start
*/
- private void afterTerminate(boolean notifyContainer, boolean executed) {
- assert (state() == TERMINATED) && (carrierThread == null);
-
- if (executed) {
- notifyJvmtiUnmount(/*hide*/false);
- }
+ private void afterDone(boolean notifyContainer) {
+ assert carrierThread == null;
+ setState(TERMINATED);
// notify anyone waiting for this virtual thread to terminate
CountDownLatch termination = this.termination;
@@ -546,8 +561,7 @@ void start(ThreadContainer container) {
started = true;
} finally {
if (!started) {
- setState(TERMINATED);
- afterTerminate(addedToContainer, /*executed*/false);
+ afterDone(addedToContainer);
}
}
}
@@ -615,14 +629,14 @@ void parkNanos(long nanos) {
long startTime = System.nanoTime();
boolean yielded = false;
- Future> unparker = scheduleUnpark(this::unpark, nanos);
- setState(PARKING);
+ Future> unparker = scheduleUnpark(nanos); // may throw OOME
+ setState(TIMED_PARKING);
try {
yielded = yieldContinuation(); // may throw
} finally {
assert (Thread.currentThread() == this) && (yielded == (state() == RUNNING));
if (!yielded) {
- assert state() == PARKING;
+ assert state() == TIMED_PARKING;
setState(RUNNING);
}
cancel(unparker);
@@ -654,7 +668,7 @@ private void parkOnCarrierThread(boolean timed, long nanos) {
event = null;
}
- setState(PINNED);
+ setState(timed ? TIMED_PINNED : PINNED);
try {
if (!parkPermit) {
if (!timed) {
@@ -680,14 +694,15 @@ private void parkOnCarrierThread(boolean timed, long nanos) {
}
/**
- * Schedule an unpark task to run after a given delay.
+ * Schedule this virtual thread to be unparked after a given delay.
*/
@ChangesCurrentThread
- private Future> scheduleUnpark(Runnable unparker, long nanos) {
+ private Future> scheduleUnpark(long nanos) {
+ assert Thread.currentThread() == this;
// need to switch to current carrier thread to avoid nested parking
switchToCarrierThread();
try {
- return UNPARKER.schedule(unparker, nanos, NANOSECONDS);
+ return UNPARKER.schedule(this::unpark, nanos, NANOSECONDS);
} finally {
switchToVirtualThread(this);
}
@@ -722,7 +737,8 @@ void unpark() {
Thread currentThread = Thread.currentThread();
if (!getAndSetParkPermit(true) && currentThread != this) {
int s = state();
- if (s == PARKED && compareAndSetState(PARKED, RUNNABLE)) {
+ boolean parked = (s == PARKED) || (s == TIMED_PARKED);
+ if (parked && compareAndSetState(s, UNPARKED)) {
if (currentThread instanceof VirtualThread vthread) {
vthread.switchToCarrierThread();
try {
@@ -733,11 +749,11 @@ void unpark() {
} else {
submitRunContinuation();
}
- } else if (s == PINNED) {
- // unpark carrier thread when pinned.
+ } else if ((s == PINNED) || (s == TIMED_PINNED)) {
+ // unpark carrier thread when pinned
synchronized (carrierThreadAccessLock()) {
Thread carrier = carrierThread;
- if (carrier != null && state() == PINNED) {
+ if (carrier != null && ((s = state()) == PINNED || s == TIMED_PINNED)) {
U.unpark(carrier);
}
}
@@ -874,7 +890,8 @@ boolean getAndClearInterrupt() {
@Override
Thread.State threadState() {
- switch (state()) {
+ int s = state();
+ switch (s & ~SUSPENDED) {
case NEW:
return Thread.State.NEW;
case STARTED:
@@ -884,8 +901,8 @@ Thread.State threadState() {
} else {
return Thread.State.RUNNABLE;
}
- case RUNNABLE:
- case RUNNABLE_SUSPENDED:
+ case UNPARKED:
+ case YIELDED:
// runnable, not mounted
return Thread.State.RUNNABLE;
case RUNNING:
@@ -899,13 +916,16 @@ Thread.State threadState() {
// runnable, mounted
return Thread.State.RUNNABLE;
case PARKING:
+ case TIMED_PARKING:
case YIELDING:
- // runnable, mounted, not yet waiting
+ // runnable, in transition
return Thread.State.RUNNABLE;
case PARKED:
- case PARKED_SUSPENDED:
case PINNED:
- return Thread.State.WAITING;
+ return State.WAITING;
+ case TIMED_PARKED:
+ case TIMED_PINNED:
+ return State.TIMED_WAITING;
case TERMINATED:
return Thread.State.TERMINATED;
default:
@@ -940,35 +960,58 @@ StackTraceElement[] asyncGetStackTrace() {
/**
* Returns the stack trace for this virtual thread if it is unmounted.
- * Returns null if the thread is in another state.
+ * Returns null if the thread is mounted or in transition.
*/
private StackTraceElement[] tryGetStackTrace() {
- int initialState = state();
- return switch (initialState) {
- case RUNNABLE, PARKED -> {
- int suspendedState = initialState | SUSPENDED;
- if (compareAndSetState(initialState, suspendedState)) {
- try {
- yield cont.getStackTrace();
- } finally {
- assert state == suspendedState;
- setState(initialState);
-
- // re-submit if runnable
- // re-submit if unparked while suspended
- if (initialState == RUNNABLE
- || (parkPermit && compareAndSetState(PARKED, RUNNABLE))) {
- try {
- submitRunContinuation();
- } catch (RejectedExecutionException ignore) { }
- }
- }
- }
- yield null;
+ int initialState = state() & ~SUSPENDED;
+ switch (initialState) {
+ case NEW, STARTED, TERMINATED -> {
+ return new StackTraceElement[0]; // unmounted, empty stack
+ }
+ case RUNNING, PINNED, TIMED_PINNED -> {
+ return null; // mounted
+ }
+ case PARKED, TIMED_PARKED -> {
+ // unmounted, not runnable
}
- case NEW, STARTED, TERMINATED -> new StackTraceElement[0]; // empty stack
- default -> null;
+ case UNPARKED, YIELDED -> {
+ // unmounted, runnable
+ }
+ case PARKING, TIMED_PARKING, YIELDING -> {
+ return null; // in transition
+ }
+ default -> throw new InternalError("" + initialState);
+ }
+
+ // thread is unmounted, prevent it from continuing
+ int suspendedState = initialState | SUSPENDED;
+ if (!compareAndSetState(initialState, suspendedState)) {
+ return null;
+ }
+
+ // get stack trace and restore state
+ StackTraceElement[] stack;
+ try {
+ stack = cont.getStackTrace();
+ } finally {
+ assert state == suspendedState;
+ setState(initialState);
+ }
+ boolean resubmit = switch (initialState) {
+ case UNPARKED, YIELDED -> {
+ // resubmit as task may have run while suspended
+ yield true;
+ }
+ case PARKED, TIMED_PARKED -> {
+ // resubmit if unparked while suspended
+ yield parkPermit && compareAndSetState(initialState, UNPARKED);
+ }
+ default -> throw new InternalError();
};
+ if (resubmit) {
+ submitRunContinuation();
+ }
+ return stack;
}
@Override
diff --git a/src/java.base/share/classes/java/lang/invoke/MethodType.java b/src/java.base/share/classes/java/lang/invoke/MethodType.java
index 951d209b69f..447a26e7600 100644
--- a/src/java.base/share/classes/java/lang/invoke/MethodType.java
+++ b/src/java.base/share/classes/java/lang/invoke/MethodType.java
@@ -33,7 +33,9 @@
import java.lang.ref.WeakReference;
import java.util.Arrays;
import java.util.Collections;
+import java.util.function.Supplier;
import java.util.List;
+import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.Optional;
@@ -42,7 +44,8 @@
import java.util.concurrent.ConcurrentMap;
import java.util.stream.Stream;
-import jdk.internal.access.SharedSecrets;
+import jdk.internal.util.ReferencedKeySet;
+import jdk.internal.util.ReferenceKey;
import jdk.internal.vm.annotation.Stable;
import sun.invoke.util.BytecodeDescriptor;
import sun.invoke.util.VerifyType;
@@ -227,7 +230,13 @@ private static IndexOutOfBoundsException newIndexOutOfBoundsException(Object num
return new IndexOutOfBoundsException(num.toString());
}
- static final ConcurrentWeakInternSet internTable = new ConcurrentWeakInternSet<>();
+ static final ReferencedKeySet internTable =
+ ReferencedKeySet.create(false, true, new Supplier<>() {
+ @Override
+ public Map, ReferenceKey> get() {
+ return new ConcurrentHashMap<>(512);
+ }
+ });
static final Class>[] NO_PTYPES = {};
@@ -405,7 +414,7 @@ private static MethodType makeImpl(Class> rtype, Class>[] ptypes, boolean tr
mt = new MethodType(rtype, ptypes);
}
mt.form = MethodTypeForm.findForm(mt);
- return internTable.add(mt);
+ return internTable.intern(mt);
}
private static final @Stable MethodType[] objectOnlyTypes = new MethodType[20];
@@ -883,10 +892,6 @@ public Class>[] parameterArray() {
* @param x object to compare
* @see Object#equals(Object)
*/
- // This implementation may also return true if x is a WeakEntry containing
- // a method type that is equal to this. This is an internal implementation
- // detail to allow for faster method type lookups.
- // See ConcurrentWeakInternSet.WeakEntry#equals(Object)
@Override
public boolean equals(Object x) {
if (this == x) {
@@ -895,12 +900,6 @@ public boolean equals(Object x) {
if (x instanceof MethodType) {
return equals((MethodType)x);
}
- if (x instanceof ConcurrentWeakInternSet.WeakEntry) {
- Object o = ((ConcurrentWeakInternSet.WeakEntry)x).get();
- if (o instanceof MethodType) {
- return equals((MethodType)o);
- }
- }
return false;
}
@@ -1392,112 +1391,4 @@ private Object readResolve() {
wrapAlt = null;
return mt;
}
-
- /**
- * Simple implementation of weak concurrent intern set.
- *
- * @param interned type
- */
- private static class ConcurrentWeakInternSet {
-
- private final ConcurrentMap, WeakEntry> map;
- private final ReferenceQueue stale;
-
- public ConcurrentWeakInternSet() {
- this.map = new ConcurrentHashMap<>(512);
- this.stale = SharedSecrets.getJavaLangRefAccess().newNativeReferenceQueue();
- }
-
- /**
- * Get the existing interned element.
- * This method returns null if no element is interned.
- *
- * @param elem element to look up
- * @return the interned element
- */
- public T get(T elem) {
- if (elem == null) throw new NullPointerException();
- expungeStaleElements();
-
- WeakEntry value = map.get(elem);
- if (value != null) {
- T res = value.get();
- if (res != null) {
- return res;
- }
- }
- return null;
- }
-
- /**
- * Interns the element.
- * Always returns non-null element, matching the one in the intern set.
- * Under the race against another add(), it can return different
- * element, if another thread beats us to interning it.
- *
- * @param elem element to add
- * @return element that was actually added
- */
- public T add(T elem) {
- if (elem == null) throw new NullPointerException();
-
- // Playing double race here, and so spinloop is required.
- // First race is with two concurrent updaters.
- // Second race is with GC purging weak ref under our feet.
- // Hopefully, we almost always end up with a single pass.
- T interned;
- WeakEntry e = new WeakEntry<>(elem, stale);
- do {
- expungeStaleElements();
- WeakEntry exist = map.putIfAbsent(e, e);
- interned = (exist == null) ? elem : exist.get();
- } while (interned == null);
- return interned;
- }
-
- private void expungeStaleElements() {
- Reference extends T> reference;
- while ((reference = stale.poll()) != null) {
- map.remove(reference);
- }
- }
-
- private static class WeakEntry extends WeakReference {
-
- public final int hashcode;
-
- public WeakEntry(T key, ReferenceQueue queue) {
- super(key, queue);
- hashcode = key.hashCode();
- }
-
- /**
- * This implementation returns {@code true} if {@code obj} is another
- * {@code WeakEntry} whose referent is equal to this referent, or
- * if {@code obj} is equal to the referent of this. This allows
- * lookups to be made without wrapping in a {@code WeakEntry}.
- *
- * @param obj the object to compare
- * @return true if {@code obj} is equal to this or the referent of this
- * @see MethodType#equals(Object)
- * @see Object#equals(Object)
- */
- @Override
- public boolean equals(Object obj) {
- Object mine = get();
- if (obj instanceof WeakEntry) {
- Object that = ((WeakEntry) obj).get();
- return (that == null || mine == null) ? (this == obj) : mine.equals(that);
- }
- return (mine == null) ? (obj == null) : mine.equals(obj);
- }
-
- @Override
- public int hashCode() {
- return hashcode;
- }
-
- }
- }
-
}
diff --git a/src/java.base/share/classes/java/lang/runtime/Carriers.java b/src/java.base/share/classes/java/lang/runtime/Carriers.java
index e0ebc998ee5..a74144fcbeb 100644
--- a/src/java.base/share/classes/java/lang/runtime/Carriers.java
+++ b/src/java.base/share/classes/java/lang/runtime/Carriers.java
@@ -35,6 +35,7 @@
import java.util.concurrent.ConcurrentHashMap;
import jdk.internal.misc.Unsafe;
+import jdk.internal.util.ReferencedKeyMap;
import static java.lang.invoke.MethodType.methodType;
@@ -366,7 +367,7 @@ MethodHandle[] createComponents(CarrierShape carrierShape) {
* Cache mapping {@link MethodType} to previously defined {@link CarrierElements}.
*/
private static final Map
- methodTypeCache = ReferencedKeyMap.create(ConcurrentHashMap::new);
+ methodTypeCache = ReferencedKeyMap.create(false, ConcurrentHashMap::new);
/**
* Permute a raw constructor and component accessor {@link MethodHandle MethodHandles} to
diff --git a/src/java.base/share/classes/java/security/Provider.java b/src/java.base/share/classes/java/security/Provider.java
index 7246285b349..de857d014bc 100644
--- a/src/java.base/share/classes/java/security/Provider.java
+++ b/src/java.base/share/classes/java/security/Provider.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,9 @@
import jdk.internal.event.SecurityProviderServiceEvent;
+import javax.security.auth.login.Configuration;
import java.io.*;
+import java.security.cert.CertStoreParameters;
import java.util.*;
import static java.util.Locale.ENGLISH;
import java.lang.ref.*;
@@ -1556,20 +1558,20 @@ public String toString() {
private static class EngineDescription {
final String name;
final boolean supportsParameter;
- final String constructorParameterClassName;
+ final Class> constructorParameterClass;
- EngineDescription(String name, boolean sp, String paramName) {
+ EngineDescription(String name, boolean sp, Class> constructorParameterClass) {
this.name = name;
this.supportsParameter = sp;
- this.constructorParameterClassName = paramName;
+ this.constructorParameterClass = constructorParameterClass;
}
}
// built in knowledge of the engine types shipped as part of the JDK
private static final Map knownEngines;
- private static void addEngine(String name, boolean sp, String paramName) {
- EngineDescription ed = new EngineDescription(name, sp, paramName);
+ private static void addEngine(String name, boolean sp, Class> constructorParameterClass) {
+ EngineDescription ed = new EngineDescription(name, sp, constructorParameterClass);
// also index by canonical name to avoid toLowerCase() for some lookups
knownEngines.put(name.toLowerCase(ENGLISH), ed);
knownEngines.put(name, ed);
@@ -1585,13 +1587,13 @@ private static void addEngine(String name, boolean sp, String paramName) {
addEngine("KeyStore", false, null);
addEngine("MessageDigest", false, null);
addEngine("SecureRandom", false,
- "java.security.SecureRandomParameters");
+ SecureRandomParameters.class);
addEngine("Signature", true, null);
addEngine("CertificateFactory", false, null);
addEngine("CertPathBuilder", false, null);
addEngine("CertPathValidator", false, null);
addEngine("CertStore", false,
- "java.security.cert.CertStoreParameters");
+ CertStoreParameters.class);
// JCE
addEngine("Cipher", true, null);
addEngine("ExemptionMechanism", false, null);
@@ -1610,18 +1612,20 @@ private static void addEngine(String name, boolean sp, String paramName) {
addEngine("SaslClientFactory", false, null);
addEngine("SaslServerFactory", false, null);
// POLICY
+ @SuppressWarnings("removal")
+ Class policyParams = Policy.Parameters.class;
addEngine("Policy", false,
- "java.security.Policy$Parameters");
+ policyParams);
// CONFIGURATION
addEngine("Configuration", false,
- "javax.security.auth.login.Configuration$Parameters");
+ Configuration.Parameters.class);
// XML DSig
addEngine("XMLSignatureFactory", false, null);
addEngine("KeyInfoFactory", false, null);
addEngine("TransformService", false, null);
// Smart Card I/O
addEngine("TerminalFactory", false,
- "java.lang.Object");
+ Object.class);
}
// get the "standard" (mixed-case) engine name for arbitrary case engine name
@@ -1895,8 +1899,7 @@ public Object newInstance(Object constructorParameter)
ctrParamClz = constructorParameter == null?
null : constructorParameter.getClass();
} else {
- ctrParamClz = cap.constructorParameterClassName == null?
- null : Class.forName(cap.constructorParameterClassName);
+ ctrParamClz = cap.constructorParameterClass;
if (constructorParameter != null) {
if (ctrParamClz == null) {
throw new InvalidParameterException
@@ -1907,7 +1910,7 @@ public Object newInstance(Object constructorParameter)
if (!ctrParamClz.isAssignableFrom(argClass)) {
throw new InvalidParameterException
("constructorParameter must be instanceof "
- + cap.constructorParameterClassName.replace('$', '.')
+ + cap.constructorParameterClass.getName().replace('$', '.')
+ " for engine type " + type);
}
}
diff --git a/src/java.base/share/classes/jdk/internal/access/JavaLangRefAccess.java b/src/java.base/share/classes/jdk/internal/access/JavaLangRefAccess.java
index b9b180fc2da..ed9967ec3eb 100644
--- a/src/java.base/share/classes/jdk/internal/access/JavaLangRefAccess.java
+++ b/src/java.base/share/classes/jdk/internal/access/JavaLangRefAccess.java
@@ -54,7 +54,7 @@ public interface JavaLangRefAccess {
/**
* Constructs a new NativeReferenceQueue.
*
- * Invoked by MethodType.ConcurrentWeakInternSet
+ * Invoked by jdk.internal.util.ReferencedKeyMap
*/
ReferenceQueue newNativeReferenceQueue();
}
diff --git a/src/java.base/share/classes/jdk/internal/event/EventHelper.java b/src/java.base/share/classes/jdk/internal/event/EventHelper.java
index e890ad8dde0..4da2d5854cb 100644
--- a/src/java.base/share/classes/jdk/internal/event/EventHelper.java
+++ b/src/java.base/share/classes/jdk/internal/event/EventHelper.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
import jdk.internal.access.JavaUtilJarAccess;
import jdk.internal.access.SharedSecrets;
+import jdk.internal.misc.ThreadTracker;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.VarHandle;
@@ -133,6 +134,18 @@ private static String getDurationString(Instant start) {
}
}
+ private static class ThreadTrackHolder {
+ static final ThreadTracker TRACKER = new ThreadTracker();
+ }
+
+ private static Object tryBeginLookup() {
+ return ThreadTrackHolder.TRACKER.tryBegin();
+ }
+
+ private static void endLookup(Object key) {
+ ThreadTrackHolder.TRACKER.end(key);
+ }
+
/**
* Helper to determine if security events are being logged
* at a preconfigured logging level. The configuration value
@@ -141,14 +154,20 @@ private static String getDurationString(Instant start) {
* @return boolean indicating whether an event should be logged
*/
public static boolean isLoggingSecurity() {
- // Avoid a bootstrap issue where the commitEvent attempts to
- // trigger early loading of System Logger but where
- // the verification process still has JarFiles locked
- if (securityLogger == null && !JUJA.isInitializing()) {
- LOGGER_HANDLE.compareAndSet( null, System.getLogger(SECURITY_LOGGER_NAME));
- loggingSecurity = securityLogger.isLoggable(LOG_LEVEL);
+ Object key;
+ // Avoid bootstrap issues where
+ // * commitEvent triggers early loading of System Logger but where
+ // the verification process still has JarFiles locked
+ // * the loading of the logging libraries involves recursive
+ // calls to security libraries triggering recursion
+ if (securityLogger == null && !JUJA.isInitializing() && (key = tryBeginLookup()) != null) {
+ try {
+ LOGGER_HANDLE.compareAndSet(null, System.getLogger(SECURITY_LOGGER_NAME));
+ loggingSecurity = securityLogger.isLoggable(LOG_LEVEL);
+ } finally {
+ endLookup(key);
+ }
}
return loggingSecurity;
}
-
}
diff --git a/src/java.base/share/classes/java/lang/runtime/ReferenceKey.java b/src/java.base/share/classes/jdk/internal/util/ReferenceKey.java
similarity index 81%
rename from src/java.base/share/classes/java/lang/runtime/ReferenceKey.java
rename to src/java.base/share/classes/jdk/internal/util/ReferenceKey.java
index 983d81d3a0f..a193794fe70 100644
--- a/src/java.base/share/classes/java/lang/runtime/ReferenceKey.java
+++ b/src/java.base/share/classes/jdk/internal/util/ReferenceKey.java
@@ -23,12 +23,9 @@
* questions.
*/
-package java.lang.runtime;
+package jdk.internal.util;
-import java.lang.ref.ReferenceQueue;
-import java.lang.ref.SoftReference;
-import java.lang.ref.WeakReference;
-import java.util.Objects;
+import java.lang.ref.Reference;
/**
* View/wrapper of keys used by the backing {@link ReferencedKeyMap}.
@@ -39,11 +36,8 @@
* @param key type
*
* @since 21
- *
- * Warning: This class is part of PreviewFeature.Feature.STRING_TEMPLATES.
- * Do not rely on its availability.
*/
-sealed interface ReferenceKey permits StrongReferenceKey, WeakReferenceKey, SoftReferenceKey {
+public sealed interface ReferenceKey permits StrongReferenceKey, WeakReferenceKey, SoftReferenceKey {
/**
* {@return the value of the unwrapped key}
*/
diff --git a/src/java.base/share/classes/java/lang/runtime/ReferencedKeyMap.java b/src/java.base/share/classes/jdk/internal/util/ReferencedKeyMap.java
similarity index 60%
rename from src/java.base/share/classes/java/lang/runtime/ReferencedKeyMap.java
rename to src/java.base/share/classes/jdk/internal/util/ReferencedKeyMap.java
index 1ded08c4cba..be392c3ae2d 100644
--- a/src/java.base/share/classes/java/lang/runtime/ReferencedKeyMap.java
+++ b/src/java.base/share/classes/jdk/internal/util/ReferencedKeyMap.java
@@ -23,7 +23,7 @@
* questions.
*/
-package java.lang.runtime;
+package jdk.internal.util;
import java.lang.ref.Reference;
import java.lang.ref.ReferenceQueue;
@@ -37,9 +37,12 @@
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Supplier;
+import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import java.util.stream.Stream;
+import jdk.internal.access.SharedSecrets;
+
/**
* This class provides management of {@link Map maps} where it is desirable to
* remove entries automatically when the key is garbage collected. This is
@@ -78,11 +81,8 @@
* @param the type of mapped values
*
* @since 21
- *
- * Warning: This class is part of PreviewFeature.Feature.STRING_TEMPLATES.
- * Do not rely on its availability.
*/
-final class ReferencedKeyMap implements Map {
+public final class ReferencedKeyMap implements Map {
/**
* true if {@link SoftReference} keys are to be used,
* {@link WeakReference} otherwise.
@@ -95,54 +95,61 @@ final class ReferencedKeyMap implements Map {
private final Map, V> map;
/**
- * {@link ReferenceQueue} for cleaning up {@link WeakReferenceKey EntryKeys}.
+ * {@link ReferenceQueue} for cleaning up entries.
*/
private final ReferenceQueue stale;
/**
* Private constructor.
*
- * @param isSoft true if {@link SoftReference} keys are to
- * be used, {@link WeakReference} otherwise.
- * @param map backing map
+ * @param isSoft true if {@link SoftReference} keys are to
+ * be used, {@link WeakReference} otherwise.
+ * @param map backing map
+ * @param stale {@link ReferenceQueue} for cleaning up entries
*/
- private ReferencedKeyMap(boolean isSoft, Map, V> map) {
+ private ReferencedKeyMap(boolean isSoft, Map, V> map, ReferenceQueue stale) {
this.isSoft = isSoft;
this.map = map;
- this.stale = new ReferenceQueue<>();
+ this.stale = stale;
}
/**
* Create a new {@link ReferencedKeyMap} map.
*
- * @param isSoft true if {@link SoftReference} keys are to
- * be used, {@link WeakReference} otherwise.
- * @param supplier {@link Supplier} of the backing map
+ * @param isSoft true if {@link SoftReference} keys are to
+ * be used, {@link WeakReference} otherwise.
+ * @param supplier {@link Supplier} of the backing map
*
* @return a new map with {@link Reference} keys
*
* @param the type of keys maintained by the new map
* @param the type of mapped values
*/
- static ReferencedKeyMap
+ public static ReferencedKeyMap
create(boolean isSoft, Supplier