The basic tooling is provided as part of the core operating system,
but you will most likely need to install developer packages.
For apt-based distributions (Debian, Ubuntu, etc), try this:
-
sudo apt-get install build-essential
+
sudo apt-get install build-essential autoconf
For rpm-based distributions (Fedora, Red Hat, etc), try this:
sudo yum groupinstall "Development Tools"
For Alpine Linux, aside from basic tooling, install the GNU versions
@@ -2166,15 +2166,26 @@
Using Multiple
configure from there, e.g.
mkdir build/<name> && cd build/<name> && bash ../../configure.
Then you can build that configuration using
-make CONF_NAME=<name> or
-make CONF=<pattern>, where
-<pattern> is a substring matching one or several
-configurations, e.g. CONF=debug. The special empty pattern
-(CONF=) will match all available configuration, so
-make CONF= hotspot will build the hotspot
-target for all configurations. Alternatively, you can execute
-make in the configuration directory, e.g.
-cd build/<name> && make.
+make CONF=<selector>, where
+<selector> is interpreted as follows:
+
+
If <selector> exacly matches the name of a
+configuration, this and only this configuration will be selected.
+
If <selector> matches (i.e. is a substring of)
+the names of several configurations, then all these configurations will
+be selected.
+
If <selector> is empty (i.e. CONF=),
+then all configurations will be selected.
+
If <selector> begins with !, then
+all configurations not matching the string following
+! will be selected.
+
+
A more specialized version, CONF_NAME=<name> also
+exists, which will only match if the given <name>
+exactly matches a single configuration.
+
Alternatively, you can execute make in the configuration
+directory, e.g. cd build/<name> && make.
+
make CONF_NAME=<name> or
Handling Reconfigurations
If you update the repository and part of the configure script has
changed, the build system will force you to re-run
diff --git a/doc/building.md b/doc/building.md
index 9d928a3924557..ed8a06693551d 100644
--- a/doc/building.md
+++ b/doc/building.md
@@ -349,7 +349,7 @@ will most likely need to install developer packages.
For apt-based distributions (Debian, Ubuntu, etc), try this:
```
-sudo apt-get install build-essential
+sudo apt-get install build-essential autoconf
```
For rpm-based distributions (Fedora, Red Hat, etc), try this:
@@ -1952,12 +1952,25 @@ configuration with the name ``. Alternatively, you can create a directory
under `build` and run `configure` from there, e.g. `mkdir build/ && cd
build/ && bash ../../configure`.
-Then you can build that configuration using `make CONF_NAME=` or `make
-CONF=`, where `` is a substring matching one or several
-configurations, e.g. `CONF=debug`. The special empty pattern (`CONF=`) will
-match *all* available configuration, so `make CONF= hotspot` will build the
-`hotspot` target for all configurations. Alternatively, you can execute `make`
-in the configuration directory, e.g. `cd build/ && make`.
+Then you can build that configuration using `make CONF=`, where
+`` is interpreted as follows:
+
+* If `` exacly matches the name of a configuration, this and only
+ this configuration will be selected.
+* If `` matches (i.e. is a substring of) the names of several
+ configurations, then all these configurations will be selected.
+* If `` is empty (i.e. `CONF=`), then all configurations will be
+ selected.
+* If `` begins with `!`, then all configurations **not** matching the
+ string following `!` will be selected.
+
+A more specialized version, `CONF_NAME=` also exists, which will only
+match if the given `` exactly matches a single configuration.
+
+Alternatively, you can execute `make` in the configuration directory, e.g. `cd
+build/ && make`.
+
+`make CONF_NAME=` or
### Handling Reconfigurations
diff --git a/make/Global.gmk b/make/Global.gmk
index e5e76b475b941..1df6c5fb6bc4b 100644
--- a/make/Global.gmk
+++ b/make/Global.gmk
@@ -87,10 +87,9 @@ help:
$(info $(_) # (gensrc, java, copy, libs, launchers, gendata))
$(info )
$(info Make control variables)
- $(info $(_) CONF= # Build all configurations (note, assignment is empty))
- $(info $(_) CONF= # Build the configuration(s) with a name matching)
- $(info $(_) # )
- $(info $(_) CONF_NAME= # Build the configuration with exactly the )
+ $(info $(_) CONF= # Select which configuration(s) to build)
+ $(info $(_) CONF= # Select all configurations (note, assignment is empty))
+ $(info $(_) CONF_NAME= # Select the configuration with the name )
$(info $(_) SPEC= # Build the configuration given by the spec file)
$(info $(_) LOG= # Change the log level from warn to )
$(info $(_) # Available log levels are:)
diff --git a/make/InitSupport.gmk b/make/InitSupport.gmk
index 31c80e2f7267f..9ea01d375ced9 100644
--- a/make/InitSupport.gmk
+++ b/make/InitSupport.gmk
@@ -202,8 +202,14 @@ ifeq ($(HAS_SPEC),)
matching_confs := $$(strip $$(all_confs))
else
# Otherwise select those that contain the given CONF string
- matching_confs := $$(strip $$(foreach var, $$(all_confs), \
- $$(if $$(findstring $$(CONF), $$(var)), $$(var))))
+ ifeq ($$(patsubst !%,,$$(CONF)),)
+ # A CONF starting with ! means we should negate the search term
+ matching_confs := $$(strip $$(foreach var, $$(all_confs), \
+ $$(if $$(findstring $$(subst !,,$$(CONF)), $$(var)), ,$$(var))))
+ else
+ matching_confs := $$(strip $$(foreach var, $$(all_confs), \
+ $$(if $$(findstring $$(CONF), $$(var)), $$(var))))
+ endif
ifneq ($$(filter $$(CONF), $$(matching_confs)), )
# If we found an exact match, use that
matching_confs := $$(CONF)
diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js
index 45a1e6528b358..af164624877a4 100644
--- a/make/conf/jib-profiles.js
+++ b/make/conf/jib-profiles.js
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1181,12 +1181,6 @@ var getJibProfilesDependencies = function (input, common) {
revision: (input.build_cpu == "x64" ? "Xcode11.3.1-MacOSX10.15+1.2" : devkit_platform_revisions[devkit_platform])
},
- cups: {
- organization: common.organization,
- ext: "tar.gz",
- revision: "1.0118+1.0"
- },
-
jtreg: {
server: "jpg",
product: "jtreg",
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index f22af58f40ab5..501d9df08c1c2 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -2205,14 +2205,14 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
st->print_cr("# MachUEPNode");
if (UseCompressedClassPointers) {
- st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
- if (CompressedKlassPointers::shift() != 0) {
- st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
- }
+ st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
+ st->print_cr("\tcmpw rscratch1, r10");
} else {
- st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
+ st->print_cr("\tcmp rscratch1, r10");
}
- st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
}
#endif
@@ -2221,14 +2221,7 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
-
- __ cmp_klass(j_rarg0, rscratch2, rscratch1);
- Label skip;
- // TODO
- // can we avoid this skip and still use a reloc?
- __ br(Assembler::EQ, skip);
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
- __ bind(skip);
+ __ ic_check(InteriorEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
@@ -2582,7 +2575,7 @@ Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
}
// Binary src (Replicate con)
-bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
+static bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
if (n == nullptr || m == nullptr) {
return false;
}
@@ -2623,7 +2616,7 @@ bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
// (XorV src (Replicate m1))
// (XorVMask src (MaskAll m1))
-bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
+static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
if (n != nullptr && m != nullptr) {
return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
VectorNode::is_all_ones_vector(m);
@@ -3715,7 +3708,7 @@ encode %{
cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
} else {
// Emit stub for static call
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, call);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -8289,7 +8282,7 @@ instruct membar_acquire() %{
ins_cost(VOLATILE_REF_COST);
format %{ "membar_acquire\n\t"
- "dmb ishld" %}
+ "dmb ish" %}
ins_encode %{
__ block_comment("membar_acquire");
@@ -8343,12 +8336,11 @@ instruct membar_release() %{
ins_cost(VOLATILE_REF_COST);
format %{ "membar_release\n\t"
- "dmb ishst\n\tdmb ishld" %}
+ "dmb ish" %}
ins_encode %{
__ block_comment("membar_release");
- __ membar(Assembler::StoreStore);
- __ membar(Assembler::LoadStore);
+ __ membar(Assembler::LoadStore|Assembler::StoreStore);
%}
ins_pipe(pipe_serial);
%}
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index b83d618506298..ba613b62a3e2a 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -53,7 +53,6 @@
#endif
NEEDS_CLEANUP // remove this definitions ?
-const Register IC_Klass = rscratch2; // where the IC klass is cached
const Register SYNC_header = r0; // synchronization header
const Register SHIFT_count = r0; // where count for shift operations must be
@@ -293,27 +292,7 @@ void LIR_Assembler::osr_entry() {
// inline cache check; done before the frame is built.
int LIR_Assembler::check_icache() {
- Register receiver = FrameMap::receiver_opr->as_register();
- Register ic_klass = IC_Klass;
- int start_offset = __ offset();
- __ inline_cache_check(receiver, ic_klass);
-
- // if icache check fails, then jump to runtime routine
- // Note: RECEIVER must still contain the receiver!
- Label dont;
- __ br(Assembler::EQ, dont);
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
- // We align the verified entry point unless the method body
- // (including its inline cache check) will fit in a single 64-byte
- // icache line.
- if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
- // force alignment after the cache check.
- __ align(CodeEntryAlignment);
- }
-
- __ bind(dont);
- return start_offset;
+ return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
@@ -2042,7 +2021,7 @@ void LIR_Assembler::emit_static_call_stub() {
__ relocate(static_stub_Relocation::spec(call_pc));
__ emit_static_call_stub();
- assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
+ assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
<= call_stub_size(), "stub too big");
__ end_a_stub();
}
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
index 43ec189255f9c..ef1b5fe2703e6 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
@@ -71,8 +71,8 @@ friend class ArrayCopyStub;
void deoptimize_trap(CodeEmitInfo *info);
enum {
- // call stub: CompiledStaticCall::to_interp_stub_size() +
- // CompiledStaticCall::to_trampoline_stub_size()
+ // call stub: CompiledDirectCall::to_interp_stub_size() +
+ // CompiledDirectCall::to_trampoline_stub_size()
_call_stub_size = 13 * NativeInstruction::instruction_size,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
_deopt_handler_size = 7 * NativeInstruction::instruction_size
diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
index d3a746178f14e..c0d1d1747ab28 100644
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
@@ -308,17 +308,6 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
verify_oop(obj);
}
-
-void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
- verify_oop(receiver);
- // explicit null check not needed since load from [klass_offset] causes a trap
- // check against inline cache
- assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
-
- cmp_klass(receiver, iCache, rscratch1);
-}
-
-
void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.
diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
index d2f4744a04914..63a32e714e365 100644
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
@@ -38,7 +38,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_aarch64.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_aarch64.hpp"
diff --git a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
index c58ff8828bce6..23c08f11d1a8b 100644
--- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
@@ -26,7 +26,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
@@ -36,7 +35,7 @@
// ----------------------------------------------------------------------------
#define __ _masm.
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
precond(cbuf.stubs()->start() != badAddress);
precond(cbuf.stubs()->end() != badAddress);
@@ -71,11 +70,11 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
}
#undef __
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
return MacroAssembler::static_call_stub_size();
}
-int CompiledStaticCall::to_trampoline_stub_size() {
+int CompiledDirectCall::to_trampoline_stub_size() {
// Somewhat pessimistically, we count 3 instructions here (although
// there are only two) because we sometimes emit an alignment nop.
// Trampoline stubs are always word aligned.
@@ -83,21 +82,14 @@ int CompiledStaticCall::to_trampoline_stub_size() {
}
// Relocation entries for call stub, compiled java to interpreter.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
- {
- ResourceMark rm;
- log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
// Creation also verifies the object.
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
@@ -115,7 +107,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@@ -132,7 +124,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();
diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.cpp b/src/hotspot/cpu/aarch64/frame_aarch64.cpp
index c5b2ff8a4c01c..8d0fa8895d15c 100644
--- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -678,7 +678,7 @@ static void printbc(Method *m, intptr_t bcx) {
printf("%s : %s ==> %s\n", m->name_and_sig_as_C_string(), buf, name);
}
-void internal_pf(uintptr_t sp, uintptr_t fp, uintptr_t pc, uintptr_t bcx) {
+static void internal_pf(uintptr_t sp, uintptr_t fp, uintptr_t pc, uintptr_t bcx) {
if (! fp)
return;
diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
index 13f2e4b61b9a4..293cc6eb0d0c6 100644
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
@@ -127,8 +127,6 @@ define_pd_global(intx, InlineSmallCode, 1000);
range(1, 99) \
product(ccstr, UseBranchProtection, "none", \
"Branch Protection to use: none, standard, pac-ret") \
- product(bool, AlwaysMergeDMB, false, DIAGNOSTIC, \
- "Always merge DMB instructions in code emission") \
// end of ARCH_FLAGS
diff --git a/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp b/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp
deleted file mode 100644
index bd8cfc42600e2..0000000000000
--- a/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_aarch64.hpp"
-#include "oops/oop.inline.hpp"
-
-int InlineCacheBuffer::ic_stub_code_size() {
- return (MacroAssembler::far_branches() ? 6 : 4) * NativeInstruction::instruction_size;
-}
-
-#define __ masm->
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
- ResourceMark rm;
- CodeBuffer code(code_begin, ic_stub_code_size());
- MacroAssembler* masm = new MacroAssembler(&code);
- // note: even though the code contains an embedded value, we do not need reloc info
- // because
- // (1) the value is old (i.e., doesn't matter for scavenges)
- // (2) these ICStubs are removed *before* a GC happens, so the roots disappear
- // assert(cached_value == nullptr || cached_oop->is_perm(), "must be perm oop");
-
- address start = __ pc();
- Label l;
- __ ldr(rscratch2, l);
- int jump_code_size = __ far_jump(ExternalAddress(entry_point));
- // IC stub code size is not expected to vary depending on target address.
- // We use NOPs to make the [ldr + far_jump + nops + int64] stub size equal to ic_stub_code_size.
- for (int size = NativeInstruction::instruction_size + jump_code_size + 8;
- size < ic_stub_code_size(); size += NativeInstruction::instruction_size) {
- __ nop();
- }
- __ bind(l);
- assert((uintptr_t)__ pc() % wordSize == 0, "");
- __ emit_int64((int64_t)cached_value);
- // Only need to invalidate the 1st two instructions - not the whole ic stub
- ICache::invalidate_range(code_begin, InlineCacheBuffer::ic_stub_code_size());
- assert(__ pc() - start == ic_stub_code_size(), "must be");
-}
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
- NativeJump* jump = nativeJump_at(code_begin + 4);
- return jump->jump_destination();
-}
-
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- // The word containing the cached value is at the end of this IC buffer
- uintptr_t *p = (uintptr_t *)(code_begin + ic_stub_code_size() - wordSize);
- void* o = (void*)*p;
- return o;
-}
diff --git a/src/hotspot/cpu/aarch64/immediate_aarch64.cpp b/src/hotspot/cpu/aarch64/immediate_aarch64.cpp
index 3d87fde2b5bcd..7caafc19fbd31 100644
--- a/src/hotspot/cpu/aarch64/immediate_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/immediate_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -53,7 +53,7 @@ struct li_pair {
static struct li_pair InverseLITable[LI_TABLE_SIZE];
// comparator to sort entries in the inverse table
-int compare_immediate_pair(const void *i1, const void *i2)
+static int compare_immediate_pair(const void *i1, const void *i2)
{
struct li_pair *li1 = (struct li_pair *)i1;
struct li_pair *li2 = (struct li_pair *)i2;
@@ -142,7 +142,7 @@ static inline uint32_t uimm(uint32_t val, int hi, int lo)
// result
// a bit string containing count copies of input bit string
//
-uint64_t replicate(uint64_t bits, int nbits, int count)
+static uint64_t replicate(uint64_t bits, int nbits, int count)
{
assert(count > 0, "must be");
assert(nbits > 0, "must be");
@@ -231,8 +231,8 @@ uint64_t replicate(uint64_t bits, int nbits, int count)
// For historical reasons the implementation of this function is much
// more convoluted than is really necessary.
-int expandLogicalImmediate(uint32_t immN, uint32_t immr,
- uint32_t imms, uint64_t &bimm)
+static int expandLogicalImmediate(uint32_t immN, uint32_t immr,
+ uint32_t imms, uint64_t &bimm)
{
int len; // ought to be <= 6
uint32_t levels; // 6 bits
@@ -446,4 +446,3 @@ uint32_t encoding_for_fp_immediate(float immediate)
res = (s << 7) | (r << 4) | f;
return res;
}
-
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 124af3bafbe3a..b19587ebe760a 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -29,6 +29,7 @@
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "ci/ciEnv.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/compileTask.hpp"
#include "compiler/disassembler.hpp"
#include "compiler/oopMap.hpp"
@@ -965,7 +966,7 @@ int MacroAssembler::max_trampoline_stub_size() {
}
void MacroAssembler::emit_static_call_stub() {
- // CompiledDirectStaticCall::set_to_interpreted knows the
+ // CompiledDirectCall::set_to_interpreted knows the
// exact layout of this stub.
isb();
@@ -995,10 +996,51 @@ address MacroAssembler::ic_call(address entry, jint method_index) {
// address const_ptr = long_constant((jlong)Universe::non_oop_word());
// uintptr_t offset;
// ldr_constant(rscratch2, const_ptr);
- movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
+ movptr(rscratch2, (intptr_t)Universe::non_oop_word());
return trampoline_call(Address(entry, rh));
}
+int MacroAssembler::ic_check_size() {
+ if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) {
+ return NativeInstruction::instruction_size * 7;
+ } else {
+ return NativeInstruction::instruction_size * 5;
+ }
+}
+
+int MacroAssembler::ic_check(int end_alignment) {
+ Register receiver = j_rarg0;
+ Register data = rscratch2;
+ Register tmp1 = rscratch1;
+ Register tmp2 = r10;
+
+ // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
+ // before the inline cache check, so we don't have to execute any nop instructions when dispatching
+ // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
+ // before the inline cache check here, and not after
+ align(end_alignment, offset() + ic_check_size());
+
+ int uep_offset = offset();
+
+ if (UseCompressedClassPointers) {
+ ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
+ cmpw(tmp1, tmp2);
+ } else {
+ ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
+ cmp(tmp1, tmp2);
+ }
+
+ Label dont;
+ br(Assembler::EQ, dont);
+ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+ bind(dont);
+ assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
+
+ return uep_offset;
+}
+
// Implementation of call_VM versions
void MacroAssembler::call_VM(Register oop_result,
@@ -1100,7 +1142,14 @@ void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thr
}
void MacroAssembler::align(int modulus) {
- while (offset() % modulus != 0) nop();
+ align(modulus, offset());
+}
+
+// Ensure that the code at target bytes offset from the current offset() is aligned
+// according to modulus.
+void MacroAssembler::align(int modulus, int target) {
+ int delta = target - offset();
+ while ((offset() + delta) % modulus != 0) nop();
}
void MacroAssembler::post_call_nop() {
@@ -1197,7 +1246,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
}
// Look up the method for a megamorphic invokeinterface call in a single pass over itable:
-// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICHolder
+// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
// - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
// The target method is determined by .
// The receiver klass is in recv_klass.
@@ -2066,21 +2115,14 @@ void MacroAssembler::membar(Membar_mask_bits order_constraint) {
address last = code()->last_insn();
if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) {
NativeMembar *bar = NativeMembar_at(prev);
- // Don't promote DMB ST|DMB LD to DMB (a full barrier) because
- // doing so would introduce a StoreLoad which the caller did not
- // intend
- if (AlwaysMergeDMB || bar->get_kind() == order_constraint
- || bar->get_kind() == AnyAny
- || order_constraint == AnyAny) {
- // We are merging two memory barrier instructions. On AArch64 we
- // can do this simply by ORing them together.
- bar->set_kind(bar->get_kind() | order_constraint);
- BLOCK_COMMENT("merged membar");
- return;
- }
+ // We are merging two memory barrier instructions. On AArch64 we
+ // can do this simply by ORing them together.
+ bar->set_kind(bar->get_kind() | order_constraint);
+ BLOCK_COMMENT("merged membar");
+ } else {
+ code()->set_last_insn(pc());
+ dmb(Assembler::barrier(order_constraint));
}
- code()->set_last_insn(pc());
- dmb(Assembler::barrier(order_constraint));
}
bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
index e92e0ee6aa934..990e725d099eb 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
@@ -720,6 +720,7 @@ class MacroAssembler: public Assembler {
// Alignment
void align(int modulus);
+ void align(int modulus, int target);
// nop
void post_call_nop();
@@ -1247,6 +1248,8 @@ class MacroAssembler: public Assembler {
// Emit the CompiledIC call idiom
address ic_call(address entry, jint method_index = 0);
+ static int ic_check_size();
+ int ic_check(int end_alignment);
public:
diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
index 82da734611693..216c1ff35092a 100644
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
@@ -30,7 +30,6 @@
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@@ -39,7 +38,6 @@
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_aarch64.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "prims/methodHandles.hpp"
@@ -740,9 +738,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_unverified_entry = __ pc();
Label skip_fixup;
- Label ok;
-
- Register holder = rscratch2;
+ Register data = rscratch2;
Register receiver = j_rarg0;
Register tmp = r10; // A call-clobbered register not used for arg passing
@@ -757,17 +753,12 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
{
__ block_comment("c2i_unverified_entry {");
- __ load_klass(rscratch1, receiver);
- __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
- __ cmp(rscratch1, tmp);
- __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
- __ br(Assembler::EQ, ok);
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
- __ bind(ok);
// Method might have been compiled since the call site was patched to
// interpreted; if that is the case treat it as a miss so we can get
// the call site corrected.
+ __ ic_check(1 /* end_alignment */);
+ __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
+
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
__ cbz(rscratch1, skip_fixup);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
@@ -1118,7 +1109,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ b(exit);
CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1183,7 +1174,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
}
CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1539,25 +1530,15 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// restoring them except rfp. rfp is the only callee save register
// as far as the interpreter and the compiler(s) are concerned.
-
- const Register ic_reg = rscratch2;
const Register receiver = j_rarg0;
- Label hit;
Label exception_pending;
- assert_different_registers(ic_reg, receiver, rscratch1);
+ assert_different_registers(receiver, rscratch1);
__ verify_oop(receiver);
- __ cmp_klass(receiver, ic_reg, rscratch1);
- __ br(Assembler::EQ, hit);
-
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+ __ ic_check(8 /* end_alignment */);
// Verified entry point must be aligned
- __ align(8);
-
- __ bind(hit);
-
int vep_offset = ((intptr_t)__ pc()) - start;
// If we have to make this method not-entrant we'll overwrite its
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
index b53e427649781..18f310c746cd4 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
@@ -191,9 +191,6 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
}
- if (FLAG_IS_DEFAULT(AlwaysMergeDMB)) {
- FLAG_SET_DEFAULT(AlwaysMergeDMB, true);
- }
}
// Cortex A53
diff --git a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
index c895ff5cc0ec1..2bb53d16a3c97 100644
--- a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
@@ -26,10 +26,10 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_aarch64.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -168,22 +168,22 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
// Entry arguments:
- // rscratch2: CompiledICHolder
+ // rscratch2: CompiledICData
// j_rarg0: Receiver
// This stub is called from compiled code which has no callee-saved registers,
// so all registers except arguments are free at this point.
const Register recv_klass_reg = r10;
- const Register holder_klass_reg = r16; // declaring interface klass (DECC)
+ const Register holder_klass_reg = r16; // declaring interface klass (DEFC)
const Register resolved_klass_reg = r17; // resolved interface klass (REFC)
const Register temp_reg = r11;
const Register temp_reg2 = r15;
- const Register icholder_reg = rscratch2;
+ const Register icdata_reg = rscratch2;
Label L_no_such_interface;
- __ ldr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
- __ ldr(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
+ __ ldr(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
+ __ ldr(holder_klass_reg, Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
start_pc = __ pc();
diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad
index 6b18e76e6d7eb..1a833b08c4cf4 100644
--- a/src/hotspot/cpu/arm/arm.ad
+++ b/src/hotspot/cpu/arm/arm.ad
@@ -869,12 +869,7 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
#define R_RTEMP "R_R12"
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
st->print_cr("\nUEP:");
- if (UseCompressedClassPointers) {
- st->print_cr("\tLDR_w " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
- st->print_cr("\tdecode_klass " R_RTEMP);
- } else {
- st->print_cr("\tLDR " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
- }
+ st->print_cr("\tLDR " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
st->print_cr("\tCMP " R_RTEMP ",R_R8" );
st->print ("\tB.NE SharedRuntime::handle_ic_miss_stub");
}
@@ -882,13 +877,7 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
- Register iCache = reg_to_register_object(Matcher::inline_cache_reg_encode());
- assert(iCache == Ricklass, "should be");
- Register receiver = R0;
-
- __ load_klass(Rtemp, receiver);
- __ cmp(Rtemp, iCache);
- __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, noreg, ne);
+ __ ic_check(InteriorEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
@@ -1241,7 +1230,7 @@ encode %{
emit_call_reloc(cbuf, as_MachCall(), $meth, rspec);
// Emit stubs for static call.
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
index 999309c02258d..16aeaa20c04b8 100644
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
@@ -161,10 +161,7 @@ void LIR_Assembler::osr_entry() {
int LIR_Assembler::check_icache() {
- Register receiver = LIR_Assembler::receiverOpr()->as_register();
- int offset = __ offset();
- __ inline_cache_check(receiver, Ricklass);
- return offset;
+ return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
@@ -1950,7 +1947,7 @@ void LIR_Assembler::emit_static_call_stub() {
__ relocate(static_stub_Relocation::spec(call_pc));
// If not a single instruction, NativeMovConstReg::next_instruction_address()
// must jump over the whole following ldr_literal.
- // (See CompiledStaticCall::set_to_interpreted())
+ // (See CompiledDirectCall::set_to_interpreted())
#ifdef ASSERT
address ldr_site = __ pc();
#endif
diff --git a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
index c09e54e0e57ad..d9d042bb2e4e7 100644
--- a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
@@ -43,16 +43,6 @@
// arm [macro]assembler) and used with care in the other C1 specific
// files.
-void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
- Label verified;
- load_klass(Rtemp, receiver);
- cmp(Rtemp, iCache);
- b(verified, eq); // jump over alignment no-ops
- jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
- align(CodeEntryAlignment);
- bind(verified);
-}
-
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
assert((frame_size_in_bytes % StackAlignmentInBytes) == 0, "frame size should be aligned");
diff --git a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
index 62faa6170833b..9862a074a687f 100644
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
@@ -37,7 +37,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_arm.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_arm.hpp"
diff --git a/src/hotspot/cpu/arm/compiledIC_arm.cpp b/src/hotspot/cpu/arm/compiledIC_arm.cpp
index 2d4187b7d6c6a..71389d2353d66 100644
--- a/src/hotspot/cpu/arm/compiledIC_arm.cpp
+++ b/src/hotspot/cpu/arm/compiledIC_arm.cpp
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
@@ -37,7 +36,7 @@
#if COMPILER2_OR_JVMCI
#define __ _masm.
// emit call stub, compiled java to interpreter
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
// set (empty), R9
@@ -59,7 +58,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
InlinedMetadata object_literal(nullptr);
// single instruction, see NativeMovConstReg::next_instruction_address() in
- // CompiledStaticCall::set_to_interpreted()
+ // CompiledDirectCall::set_to_interpreted()
__ ldr_literal(Rmethod, object_literal);
__ set_inst_mark(); // Who uses this?
@@ -87,32 +86,25 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
#undef __
// Relocation entries for call stub, compiled java to interpreter.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
return 10; // 4 in emit_to_interp_stub + 1 in Java_Static_Call
}
#endif // COMPILER2_OR_JVMCI
-int CompiledStaticCall::to_trampoline_stub_size() {
+int CompiledDirectCall::to_trampoline_stub_size() {
// ARM doesn't use trampolines.
return 0;
}
// size of C2 call stub, compiled java to interpreter
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
return 8 * NativeInstruction::instruction_size;
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
- {
- ResourceMark rm;
- log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
@@ -128,7 +120,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@@ -144,7 +136,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();
diff --git a/src/hotspot/cpu/arm/icBuffer_arm.cpp b/src/hotspot/cpu/arm/icBuffer_arm.cpp
deleted file mode 100644
index e3a1c148ec6a0..0000000000000
--- a/src/hotspot/cpu/arm/icBuffer_arm.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_arm.hpp"
-#include "oops/oop.inline.hpp"
-
-#define __ masm->
-
-int InlineCacheBuffer::ic_stub_code_size() {
- return (4 * Assembler::InstructionSize);
-}
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
- ResourceMark rm;
- CodeBuffer code(code_begin, ic_stub_code_size());
- MacroAssembler* masm = new MacroAssembler(&code);
-
- InlinedAddress oop_literal((address) cached_value);
- __ ldr_literal(Ricklass, oop_literal);
- // FIXME: OK to remove reloc here?
- __ patchable_jump(entry_point, relocInfo::runtime_call_type, Rtemp);
- __ bind_literal(oop_literal);
- __ flush();
-}
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- address jump_address;
- jump_address = code_begin + NativeInstruction::instruction_size;
- NativeJump* jump = nativeJump_at(jump_address);
- return jump->jump_destination();
-}
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin);
- return (void*)move->data();
-}
-
-#undef __
diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.cpp b/src/hotspot/cpu/arm/macroAssembler_arm.cpp
index b827e69d02233..99d619bddb55a 100644
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp
@@ -28,6 +28,7 @@
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "ci/ciEnv.hpp"
+#include "code/compiledIC.hpp"
#include "code/nativeInst.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
@@ -297,11 +298,13 @@ Address MacroAssembler::receiver_argument_address(Register params_base, Register
return Address(tmp, -Interpreter::stackElementSize);
}
+void MacroAssembler::align(int modulus, int target) {
+ int delta = target - offset();
+ while ((offset() + delta) % modulus != 0) nop();
+}
void MacroAssembler::align(int modulus) {
- while (offset() % modulus != 0) {
- nop();
- }
+ align(modulus, offset());
}
int MacroAssembler::set_last_Java_frame(Register last_java_sp,
@@ -1860,3 +1863,31 @@ void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2,
// Fallthrough: success
}
+
+int MacroAssembler::ic_check_size() {
+ return NativeInstruction::instruction_size * 7;
+}
+
+int MacroAssembler::ic_check(int end_alignment) {
+ Register receiver = j_rarg0;
+ Register tmp1 = R4;
+ Register tmp2 = R5;
+
+ // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
+ // before the inline cache check, so we don't have to execute any nop instructions when dispatching
+ // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
+ // before the inline cache check here, and not after
+ align(end_alignment, offset() + ic_check_size());
+
+ int uep_offset = offset();
+
+ ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ ldr(tmp2, Address(Ricklass, CompiledICData::speculated_klass_offset()));
+ cmp(tmp1, tmp2);
+
+ Label dont;
+ b(dont, eq);
+ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
+ bind(dont);
+ return uep_offset;
+}
diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.hpp b/src/hotspot/cpu/arm/macroAssembler_arm.hpp
index d9e49ab986c3a..691c8fa70ee8b 100644
--- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp
@@ -221,6 +221,7 @@ class MacroAssembler: public Assembler {
inline bool ignore_non_patchable_relocations() { return true; }
void align(int modulus);
+ void align(int modulus, int target);
// Support for VM calls
//
@@ -1077,6 +1078,9 @@ class MacroAssembler: public Assembler {
void safepoint_poll(Register tmp1, Label& slow_path);
void get_polling_page(Register dest);
void read_polling_page(Register dest, relocInfo::relocType rtype);
+
+ static int ic_check_size();
+ int ic_check(int end_alignment);
};
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
index 23ee01d335264..6a4062f29b3ba 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_arm.hpp"
#include "oops/oop.inline.hpp"
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
index 7006d7709813a..15b57188730df 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
@@ -385,7 +385,7 @@ class NativeMovConstReg: public NativeInstruction {
}
void set_pc_relative_offset(address addr, address pc);
address next_instruction_address() const {
- // NOTE: CompiledStaticCall::set_to_interpreted() calls this but
+ // NOTE: CompiledDirectCall::set_to_interpreted() calls this but
// are restricted to single-instruction ldr. No need to jump over
// several instructions.
assert(is_ldr_literal(), "Should only use single-instructions load");
diff --git a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
index 716c7b7575e9c..3792fab082ba6 100644
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
@@ -24,15 +24,14 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/jniHandles.hpp"
@@ -626,12 +625,9 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Label skip_fixup;
const Register receiver = R0;
const Register holder_klass = Rtemp; // XXX should be OK for C2 but not 100% sure
- const Register receiver_klass = R4;
- __ load_klass(receiver_klass, receiver);
- __ ldr(holder_klass, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
- __ ldr(Rmethod, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
- __ cmp(receiver_klass, holder_klass);
+ __ ic_check(1 /* end_alignment */);
+ __ ldr(Rmethod, Address(Ricklass, CompiledICData::speculated_method_offset()));
__ ldr(Rtemp, Address(Rmethod, Method::code_offset()), eq);
__ cmp(Rtemp, 0, eq);
@@ -819,21 +815,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Unverified entry point
address start = __ pc();
- // Inline cache check, same as in C1_MacroAssembler::inline_cache_check()
const Register receiver = R0; // see receiverOpr()
- __ load_klass(Rtemp, receiver);
- __ cmp(Rtemp, Ricklass);
- Label verified;
-
- __ b(verified, eq); // jump over alignment no-ops too
- __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, Rtemp);
- __ align(CodeEntryAlignment);
+ __ verify_oop(receiver);
+ // Inline cache check
+ __ ic_check(CodeEntryAlignment /* end_alignment */);
// Verified entry point
- __ bind(verified);
int vep_offset = __ pc() - start;
-
if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
// Object.hashCode, System.identityHashCode can pull the hashCode from the header word
// instead of doing a full VM transition once it's been computed.
diff --git a/src/hotspot/cpu/arm/vtableStubs_arm.cpp b/src/hotspot/cpu/arm/vtableStubs_arm.cpp
index 539e288f63fb2..1229b5073f506 100644
--- a/src/hotspot/cpu/arm/vtableStubs_arm.cpp
+++ b/src/hotspot/cpu/arm/vtableStubs_arm.cpp
@@ -25,10 +25,10 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_arm.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "oops/klass.inline.hpp"
@@ -160,7 +160,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
__ load_klass(Rclass, R0);
// Receiver subtype check against REFC.
- __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
+ __ ldr(Rintf, Address(Ricklass, CompiledICData::itable_refc_klass_offset()));
__ lookup_interface_method(// inputs: rec. class, interface, itable index
Rclass, Rintf, noreg,
// outputs: temp reg1, temp reg2
@@ -171,7 +171,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
start_pc = __ pc();
// Get Method* and entry point for compiler
- __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
+ __ ldr(Rintf, Address(Ricklass, CompiledICData::itable_defc_klass_offset()));
__ lookup_interface_method(// inputs: rec. class, interface, itable index
Rclass, Rintf, itable_index,
// outputs: temp reg1, temp reg2, temp reg3
diff --git a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
index 47b681ce26be4..d78dec964cbb0 100644
--- a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
+++ b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
@@ -451,7 +451,7 @@ inline void Assembler::bcctrl(int boint, int biint, int bhint, relocInfo::relocT
// helper function for b
inline bool Assembler::is_within_range_of_b(address a, address pc) {
- // Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
+ // Guard against illegal branch targets, e.g. -1 (see CompiledDirectCall and ad-file).
if ((((uint64_t)a) & 0x3) != 0) return false;
const int range = 1 << (29-6); // li field is from bit 6 to bit 29.
@@ -465,7 +465,7 @@ inline bool Assembler::is_within_range_of_b(address a, address pc) {
// helper functions for bcxx.
inline bool Assembler::is_within_range_of_bcxx(address a, address pc) {
- // Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
+ // Guard against illegal branch targets, e.g. -1 (see CompiledDirectCall and ad-file).
if ((((uint64_t)a) & 0x3) != 0) return false;
const int range = 1 << (29-16); // bd field is from bit 16 to bit 29.
diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
index d316c2b3db2be..4b29bcf57e4d3 100644
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
@@ -77,9 +77,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const {
// we fetch the class of the receiver and compare it with the cached class.
// If they do not match we jump to slow case.
int LIR_Assembler::check_icache() {
- int offset = __ offset();
- __ inline_cache_check(R3_ARG1, R19_inline_cache_reg);
- return offset;
+ return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
index 577dcae25f4bc..b379d4141a32b 100644
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
@@ -40,29 +40,6 @@
#include "utilities/macros.hpp"
#include "utilities/powerOfTwo.hpp"
-void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
- const Register temp_reg = R12_scratch2;
- Label Lmiss;
-
- verify_oop(receiver, FILE_AND_LINE);
- load_klass_check_null(temp_reg, receiver, &Lmiss);
-
- if (TrapBasedICMissChecks && TrapBasedNullChecks) {
- trap_ic_miss_check(temp_reg, iCache);
- } else {
- Label Lok;
- cmpd(CCR0, temp_reg, iCache);
- beq(CCR0, Lok);
- bind(Lmiss);
- //load_const_optimized(temp_reg, SharedRuntime::get_ic_miss_stub(), R0);
- calculate_address_from_global_toc(temp_reg, SharedRuntime::get_ic_miss_stub(), true, true, false);
- mtctr(temp_reg);
- bctr();
- align(32, 12);
- bind(Lok);
- }
-}
-
void C1_MacroAssembler::explicit_null_check(Register base) {
Unimplemented();
diff --git a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
index 2ba6a6bca4e03..63914c5d1cb93 100644
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
@@ -34,7 +34,6 @@
#include "gc/shared/cardTableBarrierSet.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_ppc.hpp"
diff --git a/src/hotspot/cpu/ppc/compiledIC_ppc.cpp b/src/hotspot/cpu/ppc/compiledIC_ppc.cpp
index 54f9cfa936797..355ac4815d551 100644
--- a/src/hotspot/cpu/ppc/compiledIC_ppc.cpp
+++ b/src/hotspot/cpu/ppc/compiledIC_ppc.cpp
@@ -26,7 +26,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
@@ -37,7 +36,7 @@
// ----------------------------------------------------------------------------
-// A PPC CompiledDirectStaticCall looks like this:
+// A PPC CompiledDirectCall looks like this:
//
// >>>> consts
//
@@ -79,7 +78,7 @@
const int IC_pos_in_java_to_interp_stub = 8;
#define __ _masm.
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
#ifdef COMPILER2
if (mark == nullptr) {
// Get the mark within main instrs section which is set to the address of the call.
@@ -91,7 +90,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
MacroAssembler _masm(&cbuf);
// Start the stub.
- address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
+ address stub = __ start_a_stub(CompiledDirectCall::to_interp_stub_size());
if (stub == nullptr) {
return nullptr; // CodeCache is full
}
@@ -135,7 +134,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
// FIXME: Assert that the stub can be identified and patched.
// Java_to_interp_stub_size should be good.
- assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(),
+ assert((__ offset() - stub_start_offset) <= CompiledDirectCall::to_interp_stub_size(),
"should be good size");
assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)),
"must not confuse java_to_interp with trampoline stubs");
@@ -153,27 +152,20 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
// Size of java_to_interp stub, this doesn't need to be accurate but it must
// be larger or equal to the real size of the stub.
// Used for optimization in Compile::Shorten_branches.
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
return 12 * BytesPerInstWord;
}
// Relocation entries for call stub, compiled java to interpreter.
// Used for optimization in Compile::Shorten_branches.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
return 5;
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
- {
- ResourceMark rm;
- log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
@@ -188,7 +180,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@@ -204,7 +196,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();
diff --git a/src/hotspot/cpu/ppc/icBuffer_ppc.cpp b/src/hotspot/cpu/ppc/icBuffer_ppc.cpp
deleted file mode 100644
index 4157a5b0fd788..0000000000000
--- a/src/hotspot/cpu/ppc/icBuffer_ppc.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_ppc.hpp"
-#include "oops/oop.inline.hpp"
-
-#define __ masm.
-
-int InlineCacheBuffer::ic_stub_code_size() {
- return MacroAssembler::load_const_size + MacroAssembler::b64_patchable_size;
-}
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
- ResourceMark rm;
- CodeBuffer code(code_begin, ic_stub_code_size());
- MacroAssembler masm(&code);
- // Note: even though the code contains an embedded metadata, we do not need reloc info
- // because
- // (1) the metadata is old (i.e., doesn't matter for scavenges)
- // (2) these ICStubs are removed *before* a GC happens, so the roots disappear.
-
- // Load the oop ...
- __ load_const(R19_method, (address) cached_value, R0);
- // ... and jump to entry point.
- __ b64_patchable((address) entry_point, relocInfo::none);
-
- __ flush();
-}
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
- NativeJump* jump = nativeJump_at(move->next_instruction_address());
- return jump->jump_destination();
-}
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
- void* o = (void*)move->data();
- return o;
-}
-
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
index b9d1cdb19ac9d..fe19cf0350020 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/barrierSet.hpp"
@@ -1195,6 +1196,81 @@ void MacroAssembler::post_call_nop() {
assert(is_post_call_nop(*(int*)(pc() - 4)), "post call not not found");
}
+int MacroAssembler::ic_check_size() {
+ bool implicit_null_checks_available = ImplicitNullChecks && os::zero_page_read_protected(),
+ use_fast_receiver_null_check = implicit_null_checks_available || TrapBasedNullChecks,
+ use_trap_based_null_check = !implicit_null_checks_available && TrapBasedNullChecks;
+
+ int num_ins;
+ if (use_fast_receiver_null_check && TrapBasedICMissChecks) {
+ num_ins = 3;
+ if (use_trap_based_null_check) num_ins += 1;
+ } else {
+ num_ins = 7;
+ if (!implicit_null_checks_available) num_ins += 2;
+ }
+ return num_ins * BytesPerInstWord;
+}
+
+int MacroAssembler::ic_check(int end_alignment) {
+ bool implicit_null_checks_available = ImplicitNullChecks && os::zero_page_read_protected(),
+ use_fast_receiver_null_check = implicit_null_checks_available || TrapBasedNullChecks,
+ use_trap_based_null_check = !implicit_null_checks_available && TrapBasedNullChecks;
+
+ Register receiver = R3_ARG1;
+ Register data = R19_inline_cache_reg;
+ Register tmp1 = R11_scratch1;
+ Register tmp2 = R12_scratch2;
+
+ // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
+ // before the inline cache check, so we don't have to execute any nop instructions when dispatching
+ // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
+ // before the inline cache check here, and not after
+ align(end_alignment, end_alignment, end_alignment - ic_check_size());
+
+ int uep_offset = offset();
+
+ if (use_fast_receiver_null_check && TrapBasedICMissChecks) {
+ // Fast version which uses SIGTRAP
+
+ if (use_trap_based_null_check) {
+ trap_null_check(receiver);
+ }
+ if (UseCompressedClassPointers) {
+ lwz(tmp1, oopDesc::klass_offset_in_bytes(), receiver);
+ } else {
+ ld(tmp1, oopDesc::klass_offset_in_bytes(), receiver);
+ }
+ ld(tmp2, in_bytes(CompiledICData::speculated_klass_offset()), data);
+ trap_ic_miss_check(tmp1, tmp2);
+
+ } else {
+ // Slower version which doesn't use SIGTRAP
+
+ // Load stub address using toc (fixed instruction size, unlike load_const_optimized)
+ calculate_address_from_global_toc(tmp1, SharedRuntime::get_ic_miss_stub(),
+ true, true, false); // 2 instructions
+ mtctr(tmp1);
+
+ if (!implicit_null_checks_available) {
+ cmpdi(CCR0, receiver, 0);
+ beqctr(CCR0);
+ }
+ if (UseCompressedClassPointers) {
+ lwz(tmp1, oopDesc::klass_offset_in_bytes(), receiver);
+ } else {
+ ld(tmp1, oopDesc::klass_offset_in_bytes(), receiver);
+ }
+ ld(tmp2, in_bytes(CompiledICData::speculated_klass_offset()), data);
+ cmpd(CCR0, tmp1, tmp2);
+ bnectr(CCR0);
+ }
+
+ assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
+
+ return uep_offset;
+}
+
void MacroAssembler::call_VM_base(Register oop_result,
Register last_java_sp,
address entry_point,
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
index cddc8b92fa09a..ec370a450ac35 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
@@ -367,6 +367,9 @@ class MacroAssembler: public Assembler {
Register toc);
#endif
+ static int ic_check_size();
+ int ic_check(int end_alignment);
+
protected:
// It is imperative that all calls into the VM are handled via the
diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad
index 783edf727b3bc..6f5e6dabec5a2 100644
--- a/src/hotspot/cpu/ppc/ppc.ad
+++ b/src/hotspot/cpu/ppc/ppc.ad
@@ -1978,42 +1978,7 @@ void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
- // Inline_cache contains a klass.
- Register ic_klass = as_Register(Matcher::inline_cache_reg_encode());
- Register receiver_klass = R12_scratch2; // tmp
-
- assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1);
- assert(R11_scratch1 == R11, "need prologue scratch register");
-
- // Check for nullptr argument if we don't have implicit null checks.
- if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
- if (TrapBasedNullChecks) {
- __ trap_null_check(R3_ARG1);
- } else {
- Label valid;
- __ cmpdi(CCR0, R3_ARG1, 0);
- __ bne_predict_taken(CCR0, valid);
- // We have a null argument, branch to ic_miss_stub.
- __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
- relocInfo::runtime_call_type);
- __ bind(valid);
- }
- }
- // Assume argument is not nullptr, load klass from receiver.
- __ load_klass(receiver_klass, R3_ARG1);
-
- if (TrapBasedICMissChecks) {
- __ trap_ic_miss_check(receiver_klass, ic_klass);
- } else {
- Label valid;
- __ cmpd(CCR0, receiver_klass, ic_klass);
- __ beq_predict_taken(CCR0, valid);
- // We have an unexpected klass, branch to ic_miss_stub.
- __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
- relocInfo::runtime_call_type);
- __ bind(valid);
- }
-
+ __ ic_check(CodeEntryAlignment);
// Argument is valid and klass is as expected, continue.
}
@@ -3452,7 +3417,7 @@ encode %{
__ bl(__ pc()); // Emits a relocation.
// The stub for call to interpreter.
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -3507,7 +3472,7 @@ encode %{
// Create the nodes for loading the IC from the TOC.
loadConLNodesTuple loadConLNodes_IC =
- loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong)Universe::non_oop_word()),
+ loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) Universe::non_oop_word()),
OptoReg::Name(R19_H_num), OptoReg::Name(R19_num));
// Create the call node.
diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
index ebe918785edc0..5a080adc7a9fa 100644
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
@@ -27,7 +27,6 @@
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "frame_ppc.hpp"
#include "compiler/oopMap.hpp"
@@ -35,7 +34,6 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/continuation.hpp"
@@ -1174,8 +1172,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
BLOCK_COMMENT("c2i unverified entry");
c2i_unverified_entry = __ pc();
- // inline_cache contains a compiledICHolder
- const Register ic = R19_method;
+ // inline_cache contains a CompiledICData
+ const Register ic = R19_inline_cache_reg;
const Register ic_klass = R11_scratch1;
const Register receiver_klass = R12_scratch2;
const Register code = R21_tmp1;
@@ -1186,45 +1184,10 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Label call_interpreter;
- assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
- "klass offset should reach into any page");
- // Check for null argument if we don't have implicit null checks.
- if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
- if (TrapBasedNullChecks) {
- __ trap_null_check(R3_ARG1);
- } else {
- Label valid;
- __ cmpdi(CCR0, R3_ARG1, 0);
- __ bne_predict_taken(CCR0, valid);
- // We have a null argument, branch to ic_miss_stub.
- __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
- relocInfo::runtime_call_type);
- __ BIND(valid);
- }
- }
- // Assume argument is not null, load klass from receiver.
- __ load_klass(receiver_klass, R3_ARG1);
-
- __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic);
-
- if (TrapBasedICMissChecks) {
- __ trap_ic_miss_check(receiver_klass, ic_klass);
- } else {
- Label valid;
- __ cmpd(CCR0, receiver_klass, ic_klass);
- __ beq_predict_taken(CCR0, valid);
- // We have an unexpected klass, branch to ic_miss_stub.
- __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
- relocInfo::runtime_call_type);
- __ BIND(valid);
- }
-
+ __ ic_check(4 /* end_alignment */);
+ __ ld(R19_method, CompiledICData::speculated_method_offset(), ic);
// Argument is valid and klass is as expected, continue.
- // Extract method from inline cache, verified entry point needs it.
- __ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic);
- assert(R19_method == ic, "the inline cache register is dead here");
-
__ ld(code, method_(code));
__ cmpdi(CCR0, code, 0);
__ ld(ientry, method_(interpreter_entry)); // preloaded
@@ -1798,7 +1761,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
// static stub for the call above
CodeBuffer* cbuf = masm->code_section()->outer();
- stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, c2i_call_pc);
+ stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, c2i_call_pc);
guarantee(stub != nullptr, "no space for static stub");
}
@@ -1891,7 +1854,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
// static stub for the call above
CodeBuffer* cbuf = masm->code_section()->outer();
- stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, call_pc);
+ stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, call_pc);
guarantee(stub != nullptr, "no space for static stub");
}
@@ -2188,7 +2151,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
intptr_t frame_done_pc;
intptr_t oopmap_pc;
- Label ic_miss;
Label handle_pending_exception;
Register r_callers_sp = R21;
@@ -2212,19 +2174,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Check ic: object class == cached class?
if (!method_is_static) {
- Register ic = R19_inline_cache_reg;
- Register receiver_klass = r_temp_1;
-
- __ cmpdi(CCR0, R3_ARG1, 0);
- __ beq(CCR0, ic_miss);
- __ verify_oop(R3_ARG1, FILE_AND_LINE);
- __ load_klass(receiver_klass, R3_ARG1);
-
- __ cmpd(CCR0, receiver_klass, ic);
- __ bne(CCR0, ic_miss);
+ __ ic_check(4 /* end_alignment */);
}
-
// Generate the Verified Entry Point (VEP).
// --------------------------------------------------------------------------
vep_start_pc = (intptr_t)__ pc();
@@ -2704,16 +2656,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ b64_patchable((address)StubRoutines::forward_exception_entry(),
relocInfo::runtime_call_type);
- // Handler for a cache miss (out-of-line).
- // --------------------------------------------------------------------------
-
- if (!method_is_static) {
- __ bind(ic_miss);
-
- __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
- relocInfo::runtime_call_type);
- }
-
// Done.
// --------------------------------------------------------------------------
diff --git a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
index fe4eb3df8f12f..28ba04d833bed 100644
--- a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
@@ -25,10 +25,10 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_ppc.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/klassVtable.hpp"
@@ -181,13 +181,13 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
__ load_klass_check_null(rcvr_klass, R3_ARG1);
// Receiver subtype check against REFC.
- __ ld(interface, CompiledICHolder::holder_klass_offset(), R19_method);
+ __ ld(interface, CompiledICData::itable_refc_klass_offset(), R19_method);
__ lookup_interface_method(rcvr_klass, interface, noreg,
R0, tmp1, tmp2,
L_no_such_interface, /*return_method=*/ false);
// Get Method* and entrypoint for compiler
- __ ld(interface, CompiledICHolder::holder_metadata_offset(), R19_method);
+ __ ld(interface, CompiledICData::itable_defc_klass_offset(), R19_method);
__ lookup_interface_method(rcvr_klass, interface, itable_index,
R19_method, tmp1, tmp2,
L_no_such_interface, /*return_method=*/ true);
diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
index 0bbf3771a04bc..e3ec023aef260 100644
--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
@@ -51,7 +51,6 @@
#endif
NEEDS_CLEANUP // remove this definitions ?
-const Register IC_Klass = t1; // where the IC klass is cached
const Register SYNC_header = x10; // synchronization header
const Register SHIFT_count = x10; // where count for shift operations must be
@@ -265,26 +264,7 @@ void LIR_Assembler::osr_entry() {
// inline cache check; done before the frame is built.
int LIR_Assembler::check_icache() {
- Register receiver = FrameMap::receiver_opr->as_register();
- Register ic_klass = IC_Klass;
- int start_offset = __ offset();
- Label dont;
- __ inline_cache_check(receiver, ic_klass, dont);
-
- // if icache check fails, then jump to runtime routine
- // Note: RECEIVER must still contain the receiver!
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
- // We align the verified entry point unless the method body
- // (including its inline cache check) will fit in a single 64-byte
- // icache line.
- if (!method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
- // force alignment after the cache check.
- __ align(CodeEntryAlignment);
- }
-
- __ bind(dont);
- return start_offset;
+ return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::jobject2reg(jobject o, Register reg) {
@@ -1398,7 +1378,7 @@ void LIR_Assembler::emit_static_call_stub() {
__ relocate(static_stub_Relocation::spec(call_pc));
__ emit_static_call_stub();
- assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
+ assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
<= call_stub_size(), "stub too big");
__ end_a_stub();
}
diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
index b088498e6fc08..ce23213776c08 100644
--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
@@ -68,7 +68,7 @@ friend class ArrayCopyStub;
enum {
// See emit_static_call_stub for detail
- // CompiledStaticCall::to_interp_stub_size() (14) + CompiledStaticCall::to_trampoline_stub_size() (1 + 3 + address)
+ // CompiledDirectCall::to_interp_stub_size() (14) + CompiledDirectCall::to_trampoline_stub_size() (1 + 3 + address)
_call_stub_size = 14 * NativeInstruction::instruction_size +
(NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size),
// See emit_exception_handler for detail
diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
index 6c1dce0de1598..2961b1a91ceab 100644
--- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
@@ -314,15 +314,6 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1
verify_oop(obj);
}
-void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) {
- verify_oop(receiver);
- // explicit null check not needed since load from [klass_offset] causes a trap
- // check against inline cache
- assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
- assert_different_registers(receiver, iCache, t0, t2);
- cmp_klass(receiver, iCache, t0, t2 /* call-clobbered t2 as a tmp */, L);
-}
-
void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.
diff --git a/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
index b76163a30841d..9fa8939837a85 100644
--- a/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
@@ -37,7 +37,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_riscv.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_riscv.hpp"
diff --git a/src/hotspot/cpu/riscv/compiledIC_riscv.cpp b/src/hotspot/cpu/riscv/compiledIC_riscv.cpp
index e29dee56de8d8..fdb2bcb06ff97 100644
--- a/src/hotspot/cpu/riscv/compiledIC_riscv.cpp
+++ b/src/hotspot/cpu/riscv/compiledIC_riscv.cpp
@@ -27,7 +27,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
@@ -37,7 +36,7 @@
// ----------------------------------------------------------------------------
#define __ _masm.
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
precond(cbuf.stubs()->start() != badAddress);
precond(cbuf.stubs()->end() != badAddress);
// Stub is fixed up when the corresponding call is converted from
@@ -69,11 +68,11 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
}
#undef __
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
return MacroAssembler::static_call_stub_size();
}
-int CompiledStaticCall::to_trampoline_stub_size() {
+int CompiledDirectCall::to_trampoline_stub_size() {
// Somewhat pessimistically, we count 4 instructions here (although
// there are only 3) because we sometimes emit an alignment nop.
// Trampoline stubs are always word aligned.
@@ -81,21 +80,14 @@ int CompiledStaticCall::to_trampoline_stub_size() {
}
// Relocation entries for call stub, compiled java to interpreter.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
- {
- ResourceMark rm;
- log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
// Creation also verifies the object.
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub);
@@ -112,7 +104,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@@ -129,7 +121,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();
diff --git a/src/hotspot/cpu/riscv/icBuffer_riscv.cpp b/src/hotspot/cpu/riscv/icBuffer_riscv.cpp
deleted file mode 100644
index ab904817816fc..0000000000000
--- a/src/hotspot/cpu/riscv/icBuffer_riscv.cpp
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_riscv.hpp"
-#include "oops/oop.inline.hpp"
-
-int InlineCacheBuffer::ic_stub_code_size() {
- // 6: auipc + ld + auipc + jalr + address(2 * instruction_size)
- return 6 * NativeInstruction::instruction_size;
-}
-
-#define __ masm->
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
- assert_cond(code_begin != nullptr && entry_point != nullptr);
- ResourceMark rm;
- CodeBuffer code(code_begin, ic_stub_code_size());
- MacroAssembler* masm = new MacroAssembler(&code);
- // Note: even though the code contains an embedded value, we do not need reloc info
- // because
- // (1) the value is old (i.e., doesn't matter for scavenges)
- // (2) these ICStubs are removed *before* a GC happens, so the roots disappear
-
- address start = __ pc();
- Label l;
- __ ld(t1, l);
- __ far_jump(ExternalAddress(entry_point));
- __ align(wordSize);
- __ bind(l);
- __ emit_int64((intptr_t)cached_value);
- // Only need to invalidate the 1st two instructions - not the whole ic stub
- ICache::invalidate_range(code_begin, InlineCacheBuffer::ic_stub_code_size());
- assert(__ pc() - start == ic_stub_code_size(), "must be");
-}
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
- NativeJump* jump = nativeJump_at(move->next_instruction_address());
- return jump->jump_destination();
-}
-
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- // The word containing the cached value is at the end of this IC buffer
- uintptr_t *p = (uintptr_t *)(code_begin + ic_stub_code_size() - wordSize);
- void* o = (void*)*p;
- return o;
-}
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
index ce336c16aa718..96e07319e843f 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
@@ -27,6 +27,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@@ -634,8 +635,8 @@ void MacroAssembler::unimplemented(const char* what) {
}
void MacroAssembler::emit_static_call_stub() {
- IncompressibleRegion ir(this); // Fixed length: see CompiledStaticCall::to_interp_stub_size().
- // CompiledDirectStaticCall::set_to_interpreted knows the
+ IncompressibleRegion ir(this); // Fixed length: see CompiledDirectCall::to_interp_stub_size().
+ // CompiledDirectCall::set_to_interpreted knows the
// exact layout of this stub.
mov_metadata(xmethod, (Metadata*)nullptr);
@@ -2542,7 +2543,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
}
// Look up the method for a megamorphic invokeinterface call in a single pass over itable:
-// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICHolder
+// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
// - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
// The target method is determined by .
// The receiver klass is in recv_klass.
@@ -3542,6 +3543,48 @@ address MacroAssembler::ic_call(address entry, jint method_index) {
return trampoline_call(Address(entry, rh));
}
+int MacroAssembler::ic_check_size() {
+ // No compressed
+ return (NativeInstruction::instruction_size * (2 /* 2 loads */ + 1 /* branch */)) +
+ far_branch_size();
+}
+
+int MacroAssembler::ic_check(int end_alignment) {
+ IncompressibleRegion ir(this);
+ Register receiver = j_rarg0;
+ Register data = t1;
+
+ Register tmp1 = t0; // t0 always scratch
+ // t2 is saved on call, thus should have been saved before this check.
+ // Hence we can clobber it.
+ Register tmp2 = t2;
+
+ // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
+ // before the inline cache check, so we don't have to execute any nop instructions when dispatching
+ // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
+ // before the inline cache check here, and not after
+ align(end_alignment, ic_check_size());
+ int uep_offset = offset();
+
+ if (UseCompressedClassPointers) {
+ lwu(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ lwu(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
+ } else {
+ ld(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ ld(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
+ }
+
+ Label ic_hit;
+ beq(tmp1, tmp2, ic_hit);
+ // Note, far_jump is not fixed size.
+ // Is this ever generates a movptr alignment/size will be off.
+ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+ bind(ic_hit);
+
+ assert((offset() % end_alignment) == 0, "Misaligned verified entry point.");
+ return uep_offset;
+}
+
// Emit a trampoline stub for a call to a target which is too far away.
//
// code sequences:
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
index d283654e6e179..63cfb22855180 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
@@ -1193,7 +1193,10 @@ class MacroAssembler: public Assembler {
//
// Return: the call PC or null if CodeCache is full.
address trampoline_call(Address entry);
+
address ic_call(address entry, jint method_index = 0);
+ static int ic_check_size();
+ int ic_check(int end_alignment = NativeInstruction::instruction_size);
// Support for memory inc/dec
// n.b. increment/decrement calls with an Address destination will
diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad
index a6f0959942414..10a80cd094024 100644
--- a/src/hotspot/cpu/riscv/riscv.ad
+++ b/src/hotspot/cpu/riscv/riscv.ad
@@ -1808,14 +1808,13 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
assert_cond(st != nullptr);
st->print_cr("# MachUEPNode");
if (UseCompressedClassPointers) {
- st->print_cr("\tlwu t0, [j_rarg0, oopDesc::klass_offset_in_bytes()]\t# compressed klass");
- if (CompressedKlassPointers::shift() != 0) {
- st->print_cr("\tdecode_klass_not_null t0, t0");
- }
+ st->print_cr("\tlwu t0, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tlwu t2, [t1 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
} else {
- st->print_cr("\tld t0, [j_rarg0, oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tld t0, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tld t2, [t1 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
}
- st->print_cr("\tbeq t0, t1, ic_hit");
+ st->print_cr("\tbeq t0, t2, ic_hit");
st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check");
st->print_cr("\tic_hit:");
}
@@ -1825,15 +1824,11 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
+ __ ic_check(CodeEntryAlignment);
- Label skip;
- __ cmp_klass(j_rarg0, t1, t0, t2 /* call-clobbered t2 as a tmp */, skip);
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
- __ bind(skip);
-
- // These NOPs are critical so that verified entry point is properly
- // 4 bytes aligned for patching by NativeJump::patch_verified_entry()
- __ align(NativeInstruction::instruction_size);
+ // Verified entry point must be properly 4 bytes aligned for patching by NativeJump::patch_verified_entry().
+ // ic_check() aligns to CodeEntryAlignment >= InteriorEntryAlignment(min 16) > NativeInstruction::instruction_size(4).
+ assert(((__ offset()) % CodeEntryAlignment) == 0, "Misaligned verified entry point");
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
@@ -2402,7 +2397,7 @@ encode %{
cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
} else {
// Emit stub for static call
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, call);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
index 9f04e20ea3b73..7435b552d15de 100644
--- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
+++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
@@ -29,7 +29,6 @@
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@@ -38,7 +37,6 @@
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_riscv.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "prims/methodHandles.hpp"
@@ -622,10 +620,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_unverified_entry = __ pc();
Label skip_fixup;
- Label ok;
-
- const Register holder = t1;
const Register receiver = j_rarg0;
+ const Register data = t1;
const Register tmp = t2; // A call-clobbered register not used for arg passing
// -------------------------------------------------------------------------
@@ -639,16 +635,10 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
{
__ block_comment("c2i_unverified_entry {");
- __ load_klass(t0, receiver, tmp);
- __ ld(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
- __ ld(xmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
- __ beq(t0, tmp, ok);
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
- __ bind(ok);
- // Method might have been compiled since the call site was patched to
- // interpreted; if that is the case treat it as a miss so we can get
- // the call site corrected.
+ __ ic_check();
+ __ ld(xmethod, Address(data, CompiledICData::speculated_method_offset()));
+
__ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
__ beqz(t0, skip_fixup);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
@@ -985,7 +975,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ j(exit);
CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1051,7 +1041,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
}
CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1425,19 +1415,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
const Register ic_reg = t1;
const Register receiver = j_rarg0;
- Label hit;
- Label exception_pending;
-
__ verify_oop(receiver);
- assert_different_registers(ic_reg, receiver, t0, t2);
- __ cmp_klass(receiver, ic_reg, t0, t2 /* call-clobbered t2 as a tmp */, hit);
-
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+ assert_different_registers(receiver, t0, t1);
- // Verified entry point must be aligned
- __ align(8);
-
- __ bind(hit);
+ __ ic_check();
int vep_offset = ((intptr_t)__ pc()) - start;
@@ -1872,6 +1853,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ leave();
// Any exception pending?
+ Label exception_pending;
__ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
__ bnez(t0, exception_pending);
diff --git a/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp b/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp
index 9d08796681f3f..5d945dbc32309 100644
--- a/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp
+++ b/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp
@@ -27,10 +27,10 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_riscv.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -171,22 +171,22 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
// Entry arguments:
- // t1: CompiledICHolder
+ // t1: CompiledICData
// j_rarg0: Receiver
// This stub is called from compiled code which has no callee-saved registers,
// so all registers except arguments are free at this point.
const Register recv_klass_reg = x18;
- const Register holder_klass_reg = x19; // declaring interface klass (DECC)
+ const Register holder_klass_reg = x19; // declaring interface klass (DEFC)
const Register resolved_klass_reg = x30; // resolved interface klass (REFC)
const Register temp_reg = x28;
const Register temp_reg2 = x29;
- const Register icholder_reg = t1;
+ const Register icdata_reg = t1;
Label L_no_such_interface;
- __ ld(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
- __ ld(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
+ __ ld(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
+ __ ld(holder_klass_reg, Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
start_pc = __ pc();
diff --git a/src/hotspot/cpu/s390/assembler_s390.hpp b/src/hotspot/cpu/s390/assembler_s390.hpp
index 9bb143001b944..91cc7e611bfd1 100644
--- a/src/hotspot/cpu/s390/assembler_s390.hpp
+++ b/src/hotspot/cpu/s390/assembler_s390.hpp
@@ -107,7 +107,7 @@ class RelAddr {
static bool is_in_range_of_RelAddr(address target, address pc, bool shortForm) {
// Guard against illegal branch targets, e.g. -1. Occurrences in
- // CompiledStaticCall and ad-file. Do not assert (it's a test
+ // CompiledDirectCall and ad-file. Do not assert (it's a test
// function!). Just return false in case of illegal operands.
if ((((uint64_t)target) & 0x0001L) != 0) return false;
if ((((uint64_t)pc) & 0x0001L) != 0) return false;
diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
index 355c66047c1b2..13c45bb9fe708 100644
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
@@ -76,10 +76,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const {
// We fetch the class of the receiver and compare it with the cached class.
// If they do not match we jump to the slow case.
int LIR_Assembler::check_icache() {
- Register receiver = receiverOpr()->as_register();
- int offset = __ offset();
- __ inline_cache_check(receiver, Z_inline_cache);
- return offset;
+ return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.hpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.hpp
index 229216ef20d44..c8815f3a729a4 100644
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.hpp
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.hpp
@@ -45,7 +45,7 @@
}
enum {
- _call_stub_size = 512, // See Compile::MAX_stubs_size and CompiledStaticCall::emit_to_interp_stub.
+ _call_stub_size = 512, // See Compile::MAX_stubs_size and CompiledDirectCall::emit_to_interp_stub.
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
_deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)
};
diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
index 40edca6559aa4..5dddc7a756f4c 100644
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
@@ -40,31 +40,6 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
-void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
- Label ic_miss, ic_hit;
- verify_oop(receiver, FILE_AND_LINE);
- int klass_offset = oopDesc::klass_offset_in_bytes();
-
- if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
- if (VM_Version::has_CompareBranch()) {
- z_cgij(receiver, 0, Assembler::bcondEqual, ic_miss);
- } else {
- z_ltgr(receiver, receiver);
- z_bre(ic_miss);
- }
- }
-
- compare_klass_ptr(iCache, klass_offset, receiver, false);
- z_bre(ic_hit);
-
- // If icache check fails, then jump to runtime routine.
- // Note: RECEIVER must still contain the receiver!
- load_const_optimized(Z_R1_scratch, AddressLiteral(SharedRuntime::get_ic_miss_stub()));
- z_br(Z_R1_scratch);
- align(CodeEntryAlignment);
- bind(ic_hit);
-}
-
void C1_MacroAssembler::explicit_null_check(Register base) {
ShouldNotCallThis(); // unused
}
diff --git a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
index 257148827be4e..decb3a1cafc31 100644
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
@@ -35,7 +35,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_s390.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_s390.hpp"
diff --git a/src/hotspot/cpu/s390/compiledIC_s390.cpp b/src/hotspot/cpu/s390/compiledIC_s390.cpp
index 7ea90c1de7c69..3adcfbc85f185 100644
--- a/src/hotspot/cpu/s390/compiledIC_s390.cpp
+++ b/src/hotspot/cpu/s390/compiledIC_s390.cpp
@@ -26,7 +26,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
@@ -40,7 +39,7 @@
#undef __
#define __ _masm.
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
#ifdef COMPILER2
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
@@ -54,7 +53,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
- address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
+ address stub = __ start_a_stub(CompiledDirectCall::to_interp_stub_size());
if (stub == nullptr) {
return nullptr; // CodeBuffer::expand failed.
}
@@ -81,27 +80,20 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
#undef __
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
return 2 * MacroAssembler::load_const_from_toc_size() +
2; // branch
}
// Relocation entries for call stub, compiled java to interpreter.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
return 5; // 4 in emit_java_to_interp + 1 in Java_Static_Call
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
- {
- ResourceMark rm;
- log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub());
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
@@ -115,7 +107,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@@ -131,7 +123,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();
diff --git a/src/hotspot/cpu/s390/icBuffer_s390.cpp b/src/hotspot/cpu/s390/icBuffer_s390.cpp
deleted file mode 100644
index 0dc936d6fad0c..0000000000000
--- a/src/hotspot/cpu/s390/icBuffer_s390.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_s390.hpp"
-#include "oops/oop.inline.hpp"
-
-#define __ masm.
-
-int InlineCacheBuffer::ic_stub_code_size() {
- return MacroAssembler::load_const_size() + Assembler::z_brul_size();
-}
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_oop, address entry_point) {
- ResourceMark rm;
- CodeBuffer code(code_begin, ic_stub_code_size());
- MacroAssembler masm(&code);
- // Note: even though the code contains an embedded oop, we do not need reloc info
- // because
- // (1) the oop is old (i.e., doesn't matter for scavenges)
- // (2) these ICStubs are removed *before* a GC happens, so the roots disappear.
-
- // Load the oop,
- __ load_const(Z_method, (address) cached_oop); // inline cache reg = Z_method
- // and do a tail-call (pc-relative).
- __ z_brul((address) entry_point);
- __ flush();
-}
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // Creation also verifies the object.
- return MacroAssembler::get_target_addr_pcrel(move->next_instruction_address());
-}
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // Creation also verifies the object.
- return (void*)move->data();
-}
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
index 14fc07ec00794..0226d494c8958 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
@@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@@ -1097,7 +1098,13 @@ void MacroAssembler::clear_mem(const Address& addr, unsigned int size) {
}
void MacroAssembler::align(int modulus) {
- while (offset() % modulus != 0) z_nop();
+ align(modulus, offset());
+}
+
+void MacroAssembler::align(int modulus, int target) {
+ assert(((modulus % 2 == 0) && (target % 2 == 0)), "needs to be even");
+ int delta = target - offset();
+ while ((offset() + delta) % modulus != 0) z_nop();
}
// Special version for non-relocateable code if required alignment
@@ -2150,6 +2157,45 @@ void MacroAssembler::call_VM_leaf_base(address entry_point) {
call_VM_leaf_base(entry_point, allow_relocation);
}
+int MacroAssembler::ic_check_size() {
+ return 30 + (ImplicitNullChecks ? 0 : 6);
+}
+
+int MacroAssembler::ic_check(int end_alignment) {
+ Register R2_receiver = Z_ARG1;
+ Register R0_scratch = Z_R0_scratch;
+ Register R1_scratch = Z_R1_scratch;
+ Register R9_data = Z_inline_cache;
+ Label success, failure;
+
+ // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
+ // before the inline cache check, so we don't have to execute any nop instructions when dispatching
+ // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
+ // before the inline cache check here, and not after
+ align(end_alignment, offset() + ic_check_size());
+
+ int uep_offset = offset();
+ if (!ImplicitNullChecks) {
+ z_cgij(R2_receiver, 0, Assembler::bcondEqual, failure);
+ }
+
+ if (UseCompressedClassPointers) {
+ z_llgf(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes()));
+ } else {
+ z_lg(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes()));
+ }
+ z_cg(R1_scratch, Address(R9_data, in_bytes(CompiledICData::speculated_klass_offset())));
+ z_bre(success);
+
+ bind(failure);
+ load_const(R1_scratch, AddressLiteral(SharedRuntime::get_ic_miss_stub()));
+ z_br(R1_scratch);
+ bind(success);
+
+ assert((offset() % end_alignment) == 0, "Misaligned verified entry point, offset() = %d, end_alignment = %d", offset(), end_alignment);
+ return uep_offset;
+}
+
void MacroAssembler::call_VM_base(Register oop_result,
Register last_java_sp,
address entry_point,
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.hpp b/src/hotspot/cpu/s390/macroAssembler_s390.hpp
index bf14b42e2d1b3..924583abdf563 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp
@@ -257,6 +257,7 @@ class MacroAssembler: public Assembler {
// nop padding
void align(int modulus);
+ void align(int modulus, int target);
void align_address(int modulus);
//
@@ -566,6 +567,9 @@ class MacroAssembler: public Assembler {
// Get the pc where the last call will return to. Returns _last_calls_return_pc.
inline address last_calls_return_pc();
+ static int ic_check_size();
+ int ic_check(int end_alignment);
+
private:
static bool is_call_far_patchable_variant0_at(address instruction_addr); // Dynamic TOC: load target addr from CP and call.
static bool is_call_far_patchable_variant2_at(address instruction_addr); // PC-relative call, prefixed with NOPs.
diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad
index fa53c73269196..5db2db9d32c73 100644
--- a/src/hotspot/cpu/s390/s390.ad
+++ b/src/hotspot/cpu/s390/s390.ad
@@ -1341,51 +1341,9 @@ void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
#endif
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+ // This is Unverified Entry Point
C2_MacroAssembler _masm(&cbuf);
- const int ic_miss_offset = 2;
-
- // Inline_cache contains a klass.
- Register ic_klass = as_Register(Matcher::inline_cache_reg_encode());
- // ARG1 is the receiver oop.
- Register R2_receiver = Z_ARG1;
- int klass_offset = oopDesc::klass_offset_in_bytes();
- AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
- Register R1_ic_miss_stub_addr = Z_R1_scratch;
-
- // Null check of receiver.
- // This is the null check of the receiver that actually should be
- // done in the caller. It's here because in case of implicit null
- // checks we get it for free.
- assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
- "second word in oop should not require explicit null check.");
- if (!ImplicitNullChecks) {
- Label valid;
- if (VM_Version::has_CompareBranch()) {
- __ z_cgij(R2_receiver, 0, Assembler::bcondNotEqual, valid);
- } else {
- __ z_ltgr(R2_receiver, R2_receiver);
- __ z_bre(valid);
- }
- // The ic_miss_stub will handle the null pointer exception.
- __ load_const_optimized(R1_ic_miss_stub_addr, icmiss);
- __ z_br(R1_ic_miss_stub_addr);
- __ bind(valid);
- }
-
- // Check whether this method is the proper implementation for the class of
- // the receiver (ic miss check).
- {
- Label valid;
- // Compare cached class against klass from receiver.
- // This also does an implicit null check!
- __ compare_klass_ptr(ic_klass, klass_offset, R2_receiver, false);
- __ z_bre(valid);
- // The inline cache points to the wrong method. Call the
- // ic_miss_stub to find the proper method.
- __ load_const_optimized(R1_ic_miss_stub_addr, icmiss);
- __ z_br(R1_ic_miss_stub_addr);
- __ bind(valid);
- }
+ __ ic_check(CodeEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
@@ -2146,7 +2104,7 @@ encode %{
assert(__ inst_mark() != nullptr, "emit_call_reloc must set_inst_mark()");
if (_method) { // Emit stub for static call.
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
index ed1795cfa339f..11e1e617d8e3a 100644
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
@@ -26,8 +26,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/gcLocker.hpp"
@@ -35,7 +35,6 @@
#include "interpreter/interp_masm.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_s390.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "registerSaver_s390.hpp"
@@ -1500,17 +1499,15 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
unsigned int wrapper_FrameDone;
unsigned int wrapper_CRegsSet;
Label handle_pending_exception;
- Label ic_miss;
//---------------------------------------------------------------------
// Unverified entry point (UEP)
//---------------------------------------------------------------------
- wrapper_UEPStart = __ offset();
// check ic: object class <-> cached class
- if (!method_is_static) __ nmethod_UEP(ic_miss);
- // Fill with nops (alignment of verified entry point).
- __ align(CodeEntryAlignment);
+ if (!method_is_static) {
+ wrapper_UEPStart = __ ic_check(CodeEntryAlignment /* end_alignment */);
+ }
//---------------------------------------------------------------------
// Verified entry point (VEP)
@@ -2026,13 +2023,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ restore_return_pc();
__ z_br(Z_R1_scratch);
- //---------------------------------------------------------------------
- // Handler for a cache miss (out-of-line)
- //---------------------------------------------------------------------
- __ call_ic_miss_handler(ic_miss, 0x77, 0, Z_R1_scratch);
__ flush();
-
-
//////////////////////////////////////////////////////////////////////
// end of code generation
//////////////////////////////////////////////////////////////////////
@@ -2318,9 +2309,6 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Label skip_fixup;
{
Label ic_miss;
- const int klass_offset = oopDesc::klass_offset_in_bytes();
- const int holder_klass_offset = in_bytes(CompiledICHolder::holder_klass_offset());
- const int holder_metadata_offset = in_bytes(CompiledICHolder::holder_metadata_offset());
// Out-of-line call to ic_miss handler.
__ call_ic_miss_handler(ic_miss, 0x11, 0, Z_R1_scratch);
@@ -2329,27 +2317,11 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
__ align(CodeEntryAlignment);
c2i_unverified_entry = __ pc();
- // Check the pointers.
- if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
- __ z_ltgr(Z_ARG1, Z_ARG1);
- __ z_bre(ic_miss);
- }
- __ verify_oop(Z_ARG1, FILE_AND_LINE);
-
- // Check ic: object class <-> cached class
- // Compress cached class for comparison. That's more efficient.
- if (UseCompressedClassPointers) {
- __ z_lg(Z_R11, holder_klass_offset, Z_method); // Z_R11 is overwritten a few instructions down anyway.
- __ compare_klass_ptr(Z_R11, klass_offset, Z_ARG1, false); // Cached class can't be zero.
- } else {
- __ z_clc(klass_offset, sizeof(void *)-1, Z_ARG1, holder_klass_offset, Z_method);
- }
- __ z_brne(ic_miss); // Cache miss: call runtime to handle this.
-
+ __ ic_check(2);
+ __ z_lg(Z_method, Address(Z_inline_cache, CompiledICData::speculated_method_offset()));
// This def MUST MATCH code in gen_c2i_adapter!
const Register code = Z_R11;
- __ z_lg(Z_method, holder_metadata_offset, Z_method);
__ load_and_test_long(Z_R0, method_(code));
__ z_brne(ic_miss); // Cache miss: call runtime to handle this.
diff --git a/src/hotspot/cpu/s390/vtableStubs_s390.cpp b/src/hotspot/cpu/s390/vtableStubs_s390.cpp
index 5a79369ceab47..573c23d796708 100644
--- a/src/hotspot/cpu/s390/vtableStubs_s390.cpp
+++ b/src/hotspot/cpu/s390/vtableStubs_s390.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2021 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,10 +25,10 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_s390.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/klassVtable.hpp"
@@ -197,12 +197,12 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
__ load_klass(rcvr_klass, Z_ARG1);
// Receiver subtype check against REFC.
- __ z_lg(interface, Address(Z_method, CompiledICHolder::holder_klass_offset()));
+ __ z_lg(interface, Address(Z_method, CompiledICData::itable_refc_klass_offset()));
__ lookup_interface_method(rcvr_klass, interface, noreg,
noreg, Z_R1, no_such_interface, /*return_method=*/ false);
// Get Method* and entrypoint for compiler
- __ z_lg(interface, Address(Z_method, CompiledICHolder::holder_metadata_offset()));
+ __ z_lg(interface, Address(Z_method, CompiledICData::itable_defc_klass_offset()));
__ lookup_interface_method(rcvr_klass, interface, itable_index,
Z_method, Z_R1, no_such_interface, /*return_method=*/ true);
diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
index ff0726840d30a..3b7a3cec2d815 100644
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
@@ -72,7 +72,6 @@ static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jl
NEEDS_CLEANUP // remove this definitions ?
-const Register IC_Klass = rax; // where the IC klass is cached
const Register SYNC_header = rax; // synchronization header
const Register SHIFT_count = rcx; // where count for shift operations must be
@@ -336,23 +335,7 @@ void LIR_Assembler::osr_entry() {
// inline cache check; done before the frame is built.
int LIR_Assembler::check_icache() {
- Register receiver = FrameMap::receiver_opr->as_register();
- Register ic_klass = IC_Klass;
- const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
- const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
- if (!do_post_padding) {
- // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
- __ align(CodeEntryAlignment, __ offset() + ic_cmp_size);
- }
- int offset = __ offset();
- __ inline_cache_check(receiver, IC_Klass);
- assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
- if (do_post_padding) {
- // force alignment after the cache check.
- // It's been verified to be aligned if !VerifyOops
- __ align(CodeEntryAlignment);
- }
- return offset;
+ return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
index b6a27abf0f37e..7088cf33cf646 100644
--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1207,9 +1207,10 @@ void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
__ move(result_reg, result);
}
+#ifndef _LP64
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
// _i2b, _i2c, _i2s
-LIR_Opr fixed_register_for(BasicType type) {
+static LIR_Opr fixed_register_for(BasicType type) {
switch (type) {
case T_FLOAT: return FrameMap::fpu0_float_opr;
case T_DOUBLE: return FrameMap::fpu0_double_opr;
@@ -1218,6 +1219,7 @@ LIR_Opr fixed_register_for(BasicType type) {
default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
}
}
+#endif
void LIRGenerator::do_Convert(Convert* x) {
#ifdef _LP64
diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
index 78361a305aeeb..0c4544f5bc49e 100644
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@@ -34,10 +35,12 @@
#include "oops/arrayOop.hpp"
#include "oops/markWord.hpp"
#include "runtime/basicLock.hpp"
+#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/checkedCast.hpp"
+#include "utilities/globalDefinitions.hpp"
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register tmp, Label& slow_case) {
const int aligned_mask = BytesPerWord -1;
@@ -60,9 +63,6 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
jcc(Assembler::notZero, slow_case);
}
- // Load object header
- movptr(hdr, Address(obj, hdr_offset));
-
if (LockingMode == LM_LIGHTWEIGHT) {
#ifdef _LP64
const Register thread = r15_thread;
@@ -73,6 +73,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
lightweight_lock(obj, hdr, thread, tmp, slow_case);
} else if (LockingMode == LM_LEGACY) {
Label done;
+ // Load object header
+ movptr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
orptr(hdr, markWord::unlocked_value);
// save unlocked object header into the displaced header location on the stack
@@ -134,9 +136,14 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
verify_oop(obj);
if (LockingMode == LM_LIGHTWEIGHT) {
- movptr(disp_hdr, Address(obj, hdr_offset));
- andptr(disp_hdr, ~(int32_t)markWord::lock_mask_in_place);
- lightweight_unlock(obj, disp_hdr, hdr, slow_case);
+#ifdef _LP64
+ lightweight_unlock(obj, disp_hdr, r15_thread, hdr, slow_case);
+#else
+ // This relies on the implementation of lightweight_unlock being able to handle
+ // that the reg_rax and thread Register parameters may alias each other.
+ get_thread(disp_hdr);
+ lightweight_unlock(obj, disp_hdr, disp_hdr, hdr, slow_case);
+#endif
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to
@@ -295,30 +302,6 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
verify_oop(obj);
}
-
-
-void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
- verify_oop(receiver);
- // explicit null check not needed since load from [klass_offset] causes a trap
- // check against inline cache
- assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
- int start_offset = offset();
-
- if (UseCompressedClassPointers) {
- load_klass(rscratch1, receiver, rscratch2);
- cmpptr(rscratch1, iCache);
- } else {
- cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
- }
- // if icache check fails, then jump to runtime routine
- // Note: RECEIVER must still contain the receiver!
- jump_cc(Assembler::notEqual,
- RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
- const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
- assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
-}
-
-
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.
diff --git a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
index 8b56f464f2739..2c24c0c2cfb17 100644
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
@@ -38,7 +38,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_x86.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_x86.hpp"
diff --git a/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp
index b9b4e8af02c5f..6dc8d14064ad2 100644
--- a/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp
+++ b/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -73,26 +73,74 @@ void C2EntryBarrierStub::emit(C2_MacroAssembler& masm) {
__ jmp(continuation(), false /* maybe_short */);
}
-#ifdef _LP64
-int C2HandleAnonOMOwnerStub::max_size() const {
- // Max size of stub has been determined by testing with 0, in which case
- // C2CodeStubList::emit() will throw an assertion and report the actual size that
- // is needed.
- return DEBUG_ONLY(36) NOT_DEBUG(21);
+int C2FastUnlockLightweightStub::max_size() const {
+ return 128;
}
-void C2HandleAnonOMOwnerStub::emit(C2_MacroAssembler& masm) {
- __ bind(entry());
- Register mon = monitor();
- Register t = tmp();
- __ movptr(Address(mon, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), r15_thread);
- __ subl(Address(r15_thread, JavaThread::lock_stack_top_offset()), oopSize);
+void C2FastUnlockLightweightStub::emit(C2_MacroAssembler& masm) {
+ assert(_t == rax, "must be");
+
+ Label restore_held_monitor_count_and_slow_path;
+
+ { // Restore lock-stack and handle the unlock in runtime.
+
+ __ bind(_push_and_slow_path);
#ifdef ASSERT
- __ movl(t, Address(r15_thread, JavaThread::lock_stack_top_offset()));
- __ movptr(Address(r15_thread, t), 0);
+ // The obj was only cleared in debug.
+ __ movl(_t, Address(_thread, JavaThread::lock_stack_top_offset()));
+ __ movptr(Address(_thread, _t), _obj);
#endif
- __ jmp(continuation());
-}
+ __ addl(Address(_thread, JavaThread::lock_stack_top_offset()), oopSize);
+ }
+
+ { // Restore held monitor count and slow path.
+
+ __ bind(restore_held_monitor_count_and_slow_path);
+ // Restore held monitor count.
+ __ increment(Address(_thread, JavaThread::held_monitor_count_offset()));
+ // increment will always result in ZF = 0 (no overflows).
+ __ jmp(slow_path_continuation());
+ }
+
+ { // Handle monitor medium path.
+
+ __ bind(_check_successor);
+
+ Label fix_zf_and_unlocked;
+ const Register monitor = _mark;
+
+#ifndef _LP64
+ __ jmpb(restore_held_monitor_count_and_slow_path);
+#else // _LP64
+ // successor null check.
+ __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
+ __ jccb(Assembler::equal, restore_held_monitor_count_and_slow_path);
+
+ // Release lock.
+ __ movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
+
+ // Fence.
+ // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
+ __ lock(); __ addl(Address(rsp, 0), 0);
+
+ // Recheck successor.
+ __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
+ // Observed a successor after the release -> fence we have handed off the monitor
+ __ jccb(Assembler::notEqual, fix_zf_and_unlocked);
+
+ // Try to relock, if it fails the monitor has been handed over
+ // TODO: Caveat, this may fail due to deflation, which does
+ // not handle the monitor handoff. Currently only works
+ // due to the responsible thread.
+ __ xorptr(rax, rax);
+ __ lock(); __ cmpxchgptr(_thread, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+ __ jccb (Assembler::equal, restore_held_monitor_count_and_slow_path);
#endif
+ __ bind(fix_zf_and_unlocked);
+ __ xorl(rax, rax);
+ __ jmp(unlocked_continuation());
+ }
+}
+
#undef __
diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
index 7512a366e7ea0..b6ecde62af655 100644
--- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
@@ -33,9 +33,13 @@
#include "opto/output.hpp"
#include "opto/opcodes.hpp"
#include "opto/subnode.hpp"
+#include "runtime/globals.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/checkedCast.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/powerOfTwo.hpp"
+#include "utilities/sizes.hpp"
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
@@ -554,6 +558,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
RTMLockingCounters* stack_rtm_counters,
Metadata* method_data,
bool use_rtm, bool profile_rtm) {
+ assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
// Ensure the register assignments are disjoint
assert(tmpReg == rax, "");
@@ -605,7 +610,8 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
if (LockingMode == LM_MONITOR) {
// Clear ZF so that we take the slow path at the DONE label. objReg is known to be not 0.
testptr(objReg, objReg);
- } else if (LockingMode == LM_LEGACY) {
+ } else {
+ assert(LockingMode == LM_LEGACY, "must be");
// Attempt stack-locking ...
orptr (tmpReg, markWord::unlocked_value);
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
@@ -620,10 +626,6 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
// Next instruction set ZFlag == 1 (Success) if difference is less then one page.
andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - (int)os::vm_page_size())) );
movptr(Address(boxReg, 0), tmpReg);
- } else {
- assert(LockingMode == LM_LIGHTWEIGHT, "");
- lightweight_lock(objReg, tmpReg, thread, scrReg, NO_COUNT);
- jmp(COUNT);
}
jmp(DONE_LABEL);
@@ -754,6 +756,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
// Xcheck:jni is enabled.
void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) {
+ assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
assert(boxReg == rax, "");
assert_different_registers(objReg, boxReg, tmpReg);
@@ -784,23 +787,6 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
}
// It's inflated.
- if (LockingMode == LM_LIGHTWEIGHT) {
- // If the owner is ANONYMOUS, we need to fix it - in an outline stub.
- testb(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t) ObjectMonitor::ANONYMOUS_OWNER);
-#ifdef _LP64
- if (!Compile::current()->output()->in_scratch_emit_size()) {
- C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmpReg, boxReg);
- Compile::current()->output()->add_stub(stub);
- jcc(Assembler::notEqual, stub->entry());
- bind(stub->continuation());
- } else
-#endif
- {
- // We can't easily implement this optimization on 32 bit because we don't have a thread register.
- // Call the slow-path instead.
- jcc(Assembler::notEqual, NO_COUNT);
- }
- }
#if INCLUDE_RTM_OPT
if (use_rtm) {
@@ -922,19 +908,14 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
jmpb (DONE_LABEL);
#endif
- if (LockingMode != LM_MONITOR) {
+ if (LockingMode == LM_LEGACY) {
bind (Stacked);
- if (LockingMode == LM_LIGHTWEIGHT) {
- mov(boxReg, tmpReg);
- lightweight_unlock(objReg, boxReg, tmpReg, NO_COUNT);
- jmp(COUNT);
- } else if (LockingMode == LM_LEGACY) {
- movptr(tmpReg, Address (boxReg, 0)); // re-fetch
- lock();
- cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
- }
+ movptr(tmpReg, Address (boxReg, 0)); // re-fetch
+ lock();
+ cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
// Intentional fall-thru into DONE_LABEL
}
+
bind(DONE_LABEL);
// ZFlag == 1 count in fast path
@@ -955,6 +936,247 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
bind(NO_COUNT);
}
+void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register rax_reg,
+ Register t, Register thread) {
+ assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+ assert(rax_reg == rax, "Used for CAS");
+ assert_different_registers(obj, box, rax_reg, t, thread);
+
+ // Handle inflated monitor.
+ Label inflated;
+ // Finish fast lock successfully. ZF value is irrelevant.
+ Label locked;
+ // Finish fast lock unsuccessfully. MUST jump with ZF == 0
+ Label slow_path;
+
+ if (DiagnoseSyncOnValueBasedClasses != 0) {
+ load_klass(rax_reg, obj, t);
+ movl(rax_reg, Address(rax_reg, Klass::access_flags_offset()));
+ testl(rax_reg, JVM_ACC_IS_VALUE_BASED_CLASS);
+ jcc(Assembler::notZero, slow_path);
+ }
+
+ const Register mark = t;
+
+ { // Lightweight Lock
+
+ Label push;
+
+ const Register top = box;
+
+ // Load the mark.
+ movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+
+ // Prefetch top.
+ movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
+
+ // Check for monitor (0b10).
+ testptr(mark, markWord::monitor_value);
+ jcc(Assembler::notZero, inflated);
+
+ // Check if lock-stack is full.
+ cmpl(top, LockStack::end_offset() - 1);
+ jcc(Assembler::greater, slow_path);
+
+ // Check if recursive.
+ cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
+ jccb(Assembler::equal, push);
+
+ // Try to lock. Transition lock bits 0b01 => 0b00
+ movptr(rax_reg, mark);
+ orptr(rax_reg, markWord::unlocked_value);
+ andptr(mark, ~(int32_t)markWord::unlocked_value);
+ lock(); cmpxchgptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+ jcc(Assembler::notEqual, slow_path);
+
+ bind(push);
+ // After successful lock, push object on lock-stack.
+ movptr(Address(thread, top), obj);
+ addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
+ jmpb(locked);
+ }
+
+ { // Handle inflated monitor.
+ bind(inflated);
+
+ const Register tagged_monitor = mark;
+
+ // CAS owner (null => current thread).
+ xorptr(rax_reg, rax_reg);
+ lock(); cmpxchgptr(thread, Address(tagged_monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+ jccb(Assembler::equal, locked);
+
+ // Check if recursive.
+ cmpptr(thread, rax_reg);
+ jccb(Assembler::notEqual, slow_path);
+
+ // Recursive.
+ increment(Address(tagged_monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
+ }
+
+ bind(locked);
+ increment(Address(thread, JavaThread::held_monitor_count_offset()));
+ // Set ZF = 1
+ xorl(rax_reg, rax_reg);
+
+#ifdef ASSERT
+ // Check that locked label is reached with ZF set.
+ Label zf_correct;
+ jccb(Assembler::zero, zf_correct);
+ stop("Fast Lock ZF != 1");
+#endif
+
+ bind(slow_path);
+#ifdef ASSERT
+ // Check that slow_path label is reached with ZF not set.
+ jccb(Assembler::notZero, zf_correct);
+ stop("Fast Lock ZF != 0");
+ bind(zf_correct);
+#endif
+ // C2 uses the value of ZF to determine the continuation.
+}
+
+void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, Register t, Register thread) {
+ assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+ assert(reg_rax == rax, "Used for CAS");
+ assert_different_registers(obj, reg_rax, t);
+
+ // Handle inflated monitor.
+ Label inflated, inflated_check_lock_stack;
+ // Finish fast unlock successfully. MUST jump with ZF == 1
+ Label unlocked;
+
+ // Assume success.
+ decrement(Address(thread, JavaThread::held_monitor_count_offset()));
+
+ const Register mark = t;
+ const Register top = reg_rax;
+
+ Label dummy;
+ C2FastUnlockLightweightStub* stub = nullptr;
+
+ if (!Compile::current()->output()->in_scratch_emit_size()) {
+ stub = new (Compile::current()->comp_arena()) C2FastUnlockLightweightStub(obj, mark, reg_rax, thread);
+ Compile::current()->output()->add_stub(stub);
+ }
+
+ Label& push_and_slow_path = stub == nullptr ? dummy : stub->push_and_slow_path();
+ Label& check_successor = stub == nullptr ? dummy : stub->check_successor();
+
+ { // Lightweight Unlock
+
+ // Load top.
+ movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
+
+ // Prefetch mark.
+ movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+
+ // Check if obj is top of lock-stack.
+ cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
+ // Top of lock stack was not obj. Must be monitor.
+ jcc(Assembler::notEqual, inflated_check_lock_stack);
+
+ // Pop lock-stack.
+ DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
+ subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
+
+ // Check if recursive.
+ cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
+ jcc(Assembler::equal, unlocked);
+
+ // We elide the monitor check, let the CAS fail instead.
+
+ // Try to unlock. Transition lock bits 0b00 => 0b01
+ movptr(reg_rax, mark);
+ andptr(reg_rax, ~(int32_t)markWord::lock_mask);
+ orptr(mark, markWord::unlocked_value);
+ lock(); cmpxchgptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+ jcc(Assembler::notEqual, push_and_slow_path);
+ jmp(unlocked);
+ }
+
+
+ { // Handle inflated monitor.
+ bind(inflated_check_lock_stack);
+#ifdef ASSERT
+ Label check_done;
+ subl(top, oopSize);
+ cmpl(top, in_bytes(JavaThread::lock_stack_base_offset()));
+ jcc(Assembler::below, check_done);
+ cmpptr(obj, Address(thread, top));
+ jccb(Assembler::notEqual, inflated_check_lock_stack);
+ stop("Fast Unlock lock on stack");
+ bind(check_done);
+ testptr(mark, markWord::monitor_value);
+ jccb(Assembler::notZero, inflated);
+ stop("Fast Unlock not monitor");
+#endif
+
+ bind(inflated);
+
+ // mark contains the tagged ObjectMonitor*.
+ const Register monitor = mark;
+
+#ifndef _LP64
+ // Check if recursive.
+ xorptr(reg_rax, reg_rax);
+ orptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
+ jcc(Assembler::notZero, check_successor);
+
+ // Check if the entry lists are empty.
+ movptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
+ orptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
+ jcc(Assembler::notZero, check_successor);
+
+ // Release lock.
+ movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
+#else // _LP64
+ Label recursive;
+
+ // Check if recursive.
+ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 0);
+ jccb(Assembler::notEqual, recursive);
+
+ // Check if the entry lists are empty.
+ movptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
+ orptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
+ jcc(Assembler::notZero, check_successor);
+
+ // Release lock.
+ movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
+ jmpb(unlocked);
+
+ // Recursive unlock.
+ bind(recursive);
+ decrement(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
+ xorl(t, t);
+#endif
+ }
+
+ bind(unlocked);
+ if (stub != nullptr) {
+ bind(stub->unlocked_continuation());
+ }
+
+#ifdef ASSERT
+ // Check that unlocked label is reached with ZF set.
+ Label zf_correct;
+ jccb(Assembler::zero, zf_correct);
+ stop("Fast Unlock ZF != 1");
+#endif
+
+ if (stub != nullptr) {
+ bind(stub->slow_path_continuation());
+ }
+#ifdef ASSERT
+ // Check that stub->continuation() label is reached with ZF not set.
+ jccb(Assembler::notZero, zf_correct);
+ stop("Fast Unlock ZF != 0");
+ bind(zf_correct);
+#endif
+ // C2 uses the value of ZF to determine the continuation.
+}
+
//-------------------------------------------------------------------------------------------
// Generic instructions support for use in .ad files C2 code generation
diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp
index 151f2148372d5..26f7fb44aa939 100644
--- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp
@@ -43,6 +43,10 @@
bool use_rtm, bool profile_rtm);
void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
+ void fast_lock_lightweight(Register obj, Register box, Register rax_reg,
+ Register t, Register thread);
+ void fast_unlock_lightweight(Register obj, Register reg_rax, Register t, Register thread);
+
#if INCLUDE_RTM_OPT
void rtm_counters_update(Register abort_status, Register rtm_counters);
void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
diff --git a/src/hotspot/cpu/x86/compiledIC_x86.cpp b/src/hotspot/cpu/x86/compiledIC_x86.cpp
index 8fc001039fbd3..95b41f62b6aab 100644
--- a/src/hotspot/cpu/x86/compiledIC_x86.cpp
+++ b/src/hotspot/cpu/x86/compiledIC_x86.cpp
@@ -26,7 +26,6 @@
#include "asm/macroAssembler.inline.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
@@ -36,7 +35,7 @@
// ----------------------------------------------------------------------------
#define __ _masm.
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling interpreted code.
// movq rbx, 0
@@ -66,32 +65,25 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
}
#undef __
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
return NOT_LP64(10) // movl; jmp
LP64_ONLY(15); // movq (1+1+8); jmp (1+4)
}
-int CompiledStaticCall::to_trampoline_stub_size() {
+int CompiledDirectCall::to_trampoline_stub_size() {
// x86 doesn't use trampolines.
return 0;
}
// Relocation entries for call stub, compiled java to interpreter.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
- {
- ResourceMark rm;
- log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
@@ -105,7 +97,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
assert(CompiledICLocker::is_safe(static_stub->addr()), "mt unsafe call");
// Reset stub.
address stub = static_stub->addr();
@@ -122,7 +114,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();
diff --git a/src/hotspot/cpu/x86/icBuffer_x86.cpp b/src/hotspot/cpu/x86/icBuffer_x86.cpp
deleted file mode 100644
index af374b5741659..0000000000000
--- a/src/hotspot/cpu/x86/icBuffer_x86.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_x86.hpp"
-#include "oops/oop.inline.hpp"
-
-int InlineCacheBuffer::ic_stub_code_size() {
- // Worst case, if destination is not a near call:
- // lea rax, lit1
- // lea scratch, lit2
- // jmp scratch
-
- // Best case
- // lea rax, lit1
- // jmp lit2
-
- int best = NativeMovConstReg::instruction_size + NativeJump::instruction_size;
- int worst = 2 * NativeMovConstReg::instruction_size + 3;
- return MAX2(best, worst);
-}
-
-
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
- ResourceMark rm;
- CodeBuffer code(code_begin, ic_stub_code_size());
- MacroAssembler* masm = new MacroAssembler(&code);
- // note: even though the code contains an embedded value, we do not need reloc info
- // because
- // (1) the value is old (i.e., doesn't matter for scavenges)
- // (2) these ICStubs are removed *before* a GC happens, so the roots disappear
- // assert(cached_value == nullptr || cached_oop->is_perm(), "must be perm oop");
- masm->lea(rax, AddressLiteral((address) cached_value, relocInfo::metadata_type));
- masm->jump(ExternalAddress(entry_point));
-}
-
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
- address jmp = move->next_instruction_address();
- NativeInstruction* ni = nativeInstruction_at(jmp);
- if (ni->is_jump()) {
- NativeJump* jump = nativeJump_at(jmp);
- return jump->jump_destination();
- } else {
- assert(ni->is_far_jump(), "unexpected instruction");
- NativeFarJump* jump = nativeFarJump_at(jmp);
- return jump->jump_destination();
- }
-}
-
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- // creation also verifies the object
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin);
- // Verifies the jump
- address jmp = move->next_instruction_address();
- NativeInstruction* ni = nativeInstruction_at(jmp);
- if (ni->is_jump()) {
- NativeJump* jump = nativeJump_at(jmp);
- } else {
- assert(ni->is_far_jump(), "unexpected instruction");
- NativeFarJump* jump = nativeFarJump_at(jmp);
- }
- void* o = (void*)move->data();
- return o;
-}
diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp
index f5f83ae21f475..33570f3155b15 100644
--- a/src/hotspot/cpu/x86/interp_masm_x86.cpp
+++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1192,8 +1192,6 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
const Register thread = lock_reg;
get_thread(thread);
#endif
- // Load object header, prepare for CAS from unlocked to locked.
- movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
lightweight_lock(obj_reg, swap_reg, thread, tmp_reg, slow_case);
} else if (LockingMode == LM_LEGACY) {
// Load immediate 1 into swap_reg %rax
@@ -1311,20 +1309,13 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
if (LockingMode == LM_LIGHTWEIGHT) {
#ifdef _LP64
- const Register thread = r15_thread;
+ lightweight_unlock(obj_reg, swap_reg, r15_thread, header_reg, slow_case);
#else
- const Register thread = header_reg;
- get_thread(thread);
+ // This relies on the implementation of lightweight_unlock being able to handle
+ // that the reg_rax and thread Register parameters may alias each other.
+ get_thread(swap_reg);
+ lightweight_unlock(obj_reg, swap_reg, swap_reg, header_reg, slow_case);
#endif
- // Handle unstructured locking.
- Register tmp = swap_reg;
- movl(tmp, Address(thread, JavaThread::lock_stack_top_offset()));
- cmpptr(obj_reg, Address(thread, tmp, Address::times_1, -oopSize));
- jcc(Assembler::notEqual, slow_case);
- // Try to swing header from locked to unlocked.
- movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
- lightweight_unlock(obj_reg, swap_reg, header_reg, slow_case);
} else if (LockingMode == LM_LEGACY) {
// Load the old header from BasicLock structure
movptr(header_reg, Address(swap_reg,
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index ba4b089c7aa6e..f0e7a08dd5f2a 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/compiler_globals.hpp"
#include "compiler/disassembler.hpp"
#include "crc32c.h"
@@ -1341,13 +1342,45 @@ void MacroAssembler::ic_call(address entry, jint method_index) {
RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
#ifdef _LP64
// Needs full 64-bit immediate for later patching.
- mov64(rax, (intptr_t)Universe::non_oop_word());
+ mov64(rax, (int64_t)Universe::non_oop_word());
#else
movptr(rax, (intptr_t)Universe::non_oop_word());
#endif
call(AddressLiteral(entry, rh));
}
+int MacroAssembler::ic_check_size() {
+ return LP64_ONLY(14) NOT_LP64(12);
+}
+
+int MacroAssembler::ic_check(int end_alignment) {
+ Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
+ Register data = rax;
+ Register temp = LP64_ONLY(rscratch1) NOT_LP64(rbx);
+
+ // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
+ // before the inline cache check, so we don't have to execute any nop instructions when dispatching
+ // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
+ // before the inline cache check here, and not after
+ align(end_alignment, offset() + ic_check_size());
+
+ int uep_offset = offset();
+
+ if (UseCompressedClassPointers) {
+ movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
+ } else {
+ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
+ }
+
+ // if inline cache check fails, then jump to runtime routine
+ jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+ assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
+
+ return uep_offset;
+}
+
void MacroAssembler::emit_static_call_stub() {
// Static stub relocation also tags the Method* in the code-stream.
mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time.
@@ -4087,8 +4120,9 @@ static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister r
}
}
-int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, bool save_fpu,
- int& gp_area_size, int& fp_area_size, int& xmm_area_size) {
+static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers,
+ bool save_fpu, int& gp_area_size,
+ int& fp_area_size, int& xmm_area_size) {
gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size,
StackAlignmentInBytes);
@@ -4354,7 +4388,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
}
// Look up the method for a megamorphic invokeinterface call in a single pass over itable:
-// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICHolder
+// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
// - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
// The target method is determined by .
// The receiver klass is in recv_klass.
@@ -9877,68 +9911,116 @@ void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigne
}
// Implements lightweight-locking.
-// Branches to slow upon failure to lock the object, with ZF cleared.
-// Falls through upon success with unspecified ZF.
//
// obj: the object to be locked
-// hdr: the (pre-loaded) header of the object, must be rax
+// reg_rax: rax
// thread: the thread which attempts to lock obj
// tmp: a temporary register
-void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register thread, Register tmp, Label& slow) {
- assert(hdr == rax, "header must be in rax for cmpxchg");
- assert_different_registers(obj, hdr, thread, tmp);
-
- // First we need to check if the lock-stack has room for pushing the object reference.
- // Note: we subtract 1 from the end-offset so that we can do a 'greater' comparison, instead
- // of 'greaterEqual' below, which readily clears the ZF. This makes C2 code a little simpler and
- // avoids one branch.
- cmpl(Address(thread, JavaThread::lock_stack_top_offset()), LockStack::end_offset() - 1);
- jcc(Assembler::greater, slow);
-
- // Now we attempt to take the fast-lock.
- // Clear lock_mask bits (locked state).
- andptr(hdr, ~(int32_t)markWord::lock_mask_in_place);
- movptr(tmp, hdr);
- // Set unlocked_value bit.
- orptr(hdr, markWord::unlocked_value);
- lock();
- cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
+void MacroAssembler::lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
+ assert(reg_rax == rax, "");
+ assert_different_registers(obj, reg_rax, thread, tmp);
+
+ Label push;
+ const Register top = tmp;
+
+ // Preload the markWord. It is important that this is the first
+ // instruction emitted as it is part of C1's null check semantics.
+ movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
+
+ // Load top.
+ movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
+
+ // Check if the lock-stack is full.
+ cmpl(top, LockStack::end_offset());
+ jcc(Assembler::greaterEqual, slow);
+
+ // Check for recursion.
+ cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
+ jcc(Assembler::equal, push);
+
+ // Check header for monitor (0b10).
+ testptr(reg_rax, markWord::monitor_value);
+ jcc(Assembler::notZero, slow);
+
+ // Try to lock. Transition lock bits 0b01 => 0b00
+ movptr(tmp, reg_rax);
+ andptr(tmp, ~(int32_t)markWord::unlocked_value);
+ orptr(reg_rax, markWord::unlocked_value);
+ lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
jcc(Assembler::notEqual, slow);
- // If successful, push object to lock-stack.
- movl(tmp, Address(thread, JavaThread::lock_stack_top_offset()));
- movptr(Address(thread, tmp), obj);
- incrementl(tmp, oopSize);
- movl(Address(thread, JavaThread::lock_stack_top_offset()), tmp);
+ // Restore top, CAS clobbers register.
+ movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
+
+ bind(push);
+ // After successful lock, push object on lock-stack.
+ movptr(Address(thread, top), obj);
+ incrementl(top, oopSize);
+ movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
}
// Implements lightweight-unlocking.
-// Branches to slow upon failure, with ZF cleared.
-// Falls through upon success, with unspecified ZF.
//
// obj: the object to be unlocked
-// hdr: the (pre-loaded) header of the object, must be rax
+// reg_rax: rax
+// thread: the thread
// tmp: a temporary register
-void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow) {
- assert(hdr == rax, "header must be in rax for cmpxchg");
- assert_different_registers(obj, hdr, tmp);
-
- // Mark-word must be lock_mask now, try to swing it back to unlocked_value.
- movptr(tmp, hdr); // The expected old value
- orptr(tmp, markWord::unlocked_value);
- lock();
- cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
+//
+// x86_32 Note: reg_rax and thread may alias each other due to limited register
+// availiability.
+void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
+ assert(reg_rax == rax, "");
+ assert_different_registers(obj, reg_rax, tmp);
+ LP64_ONLY(assert_different_registers(obj, reg_rax, thread, tmp);)
+
+ Label unlocked, push_and_slow;
+ const Register top = tmp;
+
+ // Check if obj is top of lock-stack.
+ movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
+ cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
jcc(Assembler::notEqual, slow);
- // Pop the lock object from the lock-stack.
-#ifdef _LP64
- const Register thread = r15_thread;
-#else
- const Register thread = rax;
- get_thread(thread);
-#endif
+
+ // Pop lock-stack.
+ DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
+
+ // Check if recursive.
+ cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
+ jcc(Assembler::equal, unlocked);
+
+ // Not recursive. Check header for monitor (0b10).
+ movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
+ testptr(reg_rax, markWord::monitor_value);
+ jcc(Assembler::notZero, push_and_slow);
+
+#ifdef ASSERT
+ // Check header not unlocked (0b01).
+ Label not_unlocked;
+ testptr(reg_rax, markWord::unlocked_value);
+ jcc(Assembler::zero, not_unlocked);
+ stop("lightweight_unlock already unlocked");
+ bind(not_unlocked);
+#endif
+
+ // Try to unlock. Transition lock bits 0b00 => 0b01
+ movptr(tmp, reg_rax);
+ orptr(tmp, markWord::unlocked_value);
+ lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
+ jcc(Assembler::equal, unlocked);
+
+ bind(push_and_slow);
+ // Restore lock-stack and handle the unlock in runtime.
+ if (thread == reg_rax) {
+ // On x86_32 we may lose the thread.
+ get_thread(thread);
+ }
#ifdef ASSERT
- movl(tmp, Address(thread, JavaThread::lock_stack_top_offset()));
- movptr(Address(thread, tmp), 0);
+ movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
+ movptr(Address(thread, top), obj);
#endif
+ addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
+ jmp(slow);
+
+ bind(unlocked);
}
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
index 4b30168452796..4789b63decc6c 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -896,6 +896,8 @@ class MacroAssembler: public Assembler {
// Emit the CompiledIC call idiom
void ic_call(address entry, jint method_index = 0);
+ static int ic_check_size();
+ int ic_check(int end_alignment);
void emit_static_call_stub();
@@ -2031,8 +2033,8 @@ class MacroAssembler: public Assembler {
void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
- void lightweight_lock(Register obj, Register hdr, Register thread, Register tmp, Label& slow);
- void lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow);
+ void lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow);
+ void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow);
};
/**
diff --git a/src/hotspot/cpu/x86/peephole_x86_64.cpp b/src/hotspot/cpu/x86/peephole_x86_64.cpp
index 8c956aeb05393..92a29490edaf8 100644
--- a/src/hotspot/cpu/x86/peephole_x86_64.cpp
+++ b/src/hotspot/cpu/x86/peephole_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,8 @@
// lea d, [s1 + s2] and
// mov d, s1; shl d, s2 into
// lea d, [s1 << s2] with s2 = 1, 2, 3
-bool lea_coalesce_helper(Block* block, int block_index, PhaseCFG* cfg_, PhaseRegAlloc* ra_,
- MachNode* (*new_root)(), uint inst0_rule, bool imm) {
+static bool lea_coalesce_helper(Block* block, int block_index, PhaseCFG* cfg_, PhaseRegAlloc* ra_,
+ MachNode* (*new_root)(), uint inst0_rule, bool imm) {
MachNode* inst0 = block->get_node(block_index)->as_Mach();
assert(inst0->rule() == inst0_rule, "sanity");
@@ -136,7 +136,7 @@ bool lea_coalesce_helper(Block* block, int block_index, PhaseCFG* cfg_, PhaseReg
// This helper func takes a condition and returns the flags that need to be set for the condition
// It uses the same flags as the test instruction, so if the e.g. the overflow bit is required,
// this func returns clears_overflow, as that is what the test instruction does and what the downstream path expects
-juint map_condition_to_required_test_flags(Assembler::Condition condition) {
+static juint map_condition_to_required_test_flags(Assembler::Condition condition) {
switch (condition) {
case Assembler::Condition::zero: // Same value as equal
case Assembler::Condition::notZero: // Same value as notEqual
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
index 571160523cbe4..febc1b2c3b143 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
@@ -36,7 +36,6 @@
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/jniHandles.hpp"
@@ -944,25 +943,18 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_unverified_entry = __ pc();
Label skip_fixup;
- Register holder = rax;
+ Register data = rax;
Register receiver = rcx;
Register temp = rbx;
{
-
- Label missed;
- __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
- __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
- __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
- __ jcc(Assembler::notEqual, missed);
+ __ ic_check(1 /* end_alignment */);
+ __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
// Method might have been compiled since the call site was patched to
// interpreted if that is the case treat it as a miss so we can get
// the call site corrected.
__ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
__ jcc(Assembler::equal, skip_fixup);
-
- __ bind(missed);
- __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
}
address c2i_entry = __ pc();
@@ -1449,23 +1441,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// as far as the interpreter and the compiler(s) are concerned.
- const Register ic_reg = rax;
const Register receiver = rcx;
- Label hit;
Label exception_pending;
__ verify_oop(receiver);
- __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
- __ jcc(Assembler::equal, hit);
-
- __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
// verified entry must be aligned for code patching.
- // and the first 5 bytes must be in the same cache line
- // if we align at 8 then we will be sure 5 bytes are in the same line
- __ align(8);
-
- __ bind(hit);
+ __ ic_check(8 /* end_alignment */);
int vep_offset = ((intptr_t)__ pc()) - start;
@@ -1713,8 +1694,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ jcc(Assembler::notEqual, slow_path_lock);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- // Load object header
- __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ lightweight_lock(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
}
__ bind(count_mon);
@@ -1872,9 +1851,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ dec_held_monitor_count();
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
- __ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
+ __ lightweight_unlock(obj_reg, swap_reg, thread, lock_reg, slow_path_unlock);
__ dec_held_monitor_count();
}
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
index cab50e85ec51c..c666f982d0f52 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
@@ -42,7 +41,6 @@
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "prims/methodHandles.hpp"
@@ -1000,20 +998,14 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_unverified_entry = __ pc();
Label skip_fixup;
- Label ok;
- Register holder = rax;
+ Register data = rax;
Register receiver = j_rarg0;
Register temp = rbx;
{
- __ load_klass(temp, receiver, rscratch1);
- __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
- __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
- __ jcc(Assembler::equal, ok);
- __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
- __ bind(ok);
+ __ ic_check(1 /* end_alignment */);
+ __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
// Method might have been compiled since the call site was patched to
// interpreted if that is the case treat it as a miss so we can get
// the call site corrected.
@@ -1450,7 +1442,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ align(BytesPerWord, __ offset() + NativeCall::displacement_offset);
// Emit stub for static call
CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, __ pc());
+ address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, __ pc());
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1487,7 +1479,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
// Emit stub for static call
CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, __ pc());
+ address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, __ pc());
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1883,25 +1875,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// restoring them except rbp. rbp is the only callee save register
// as far as the interpreter and the compiler(s) are concerned.
-
- const Register ic_reg = rax;
const Register receiver = j_rarg0;
- Label hit;
Label exception_pending;
- assert_different_registers(ic_reg, receiver, rscratch1, rscratch2);
+ assert_different_registers(receiver, rscratch1, rscratch2);
__ verify_oop(receiver);
- __ load_klass(rscratch1, receiver, rscratch2);
- __ cmpq(ic_reg, rscratch1);
- __ jcc(Assembler::equal, hit);
-
- __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
- // Verified entry point must be aligned
- __ align(8);
-
- __ bind(hit);
+ __ ic_check(8 /* end_alignment */);
int vep_offset = ((intptr_t)__ pc()) - start;
@@ -2190,8 +2170,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ jcc(Assembler::notEqual, slow_path_lock);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- // Load object header
- __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
}
__ bind(count_mon);
@@ -2334,9 +2312,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ dec_held_monitor_count();
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
- __ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
+ __ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
__ dec_held_monitor_count();
}
diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.cpp b/src/hotspot/cpu/x86/stubRoutines_x86.cpp
index 3be83eed9d22f..bc1cbdbba26b5 100644
--- a/src/hotspot/cpu/x86/stubRoutines_x86.cpp
+++ b/src/hotspot/cpu/x86/stubRoutines_x86.cpp
@@ -279,7 +279,7 @@ uint32_t _crc32c_pow_2k_table[TILL_CYCLE]; // because _crc32c_pow_2k_table[TILL_
// A. Kadatch and B. Jenkins / Everything we know about CRC but afraid to forget September 3, 2010 8
// Listing 1: Multiplication of normalized polynomials
// "a" and "b" occupy D least significant bits.
-uint32_t crc32c_multiply(uint32_t a, uint32_t b) {
+static uint32_t crc32c_multiply(uint32_t a, uint32_t b) {
uint32_t product = 0;
uint32_t b_pow_x_table[D + 1]; // b_pow_x_table[k] = (b * x**k) mod P
b_pow_x_table[0] = b;
@@ -303,7 +303,7 @@ uint32_t crc32c_multiply(uint32_t a, uint32_t b) {
#undef P
// A. Kadatch and B. Jenkins / Everything we know about CRC but afraid to forget September 3, 2010 9
-void crc32c_init_pow_2k(void) {
+static void crc32c_init_pow_2k(void) {
// _crc32c_pow_2k_table(0) =
// x^(2^k) mod P(x) = x mod P(x) = x
// Since we are operating on a reflected values
@@ -318,7 +318,7 @@ void crc32c_init_pow_2k(void) {
}
// x^N mod P(x)
-uint32_t crc32c_f_pow_n(uint32_t n) {
+static uint32_t crc32c_f_pow_n(uint32_t n) {
// result = 1 (polynomial)
uint32_t one, result = 0x80000000, i = 0;
diff --git a/src/hotspot/cpu/x86/vm_version_x86.hpp b/src/hotspot/cpu/x86/vm_version_x86.hpp
index cfc16acabc674..bf5e052e16702 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -770,6 +770,10 @@ class VM_Version : public Abstract_VM_Version {
return true;
}
+ constexpr static bool supports_recursive_lightweight_locking() {
+ return true;
+ }
+
// For AVX CPUs only. f16c support is disabled if UseAVX == 0.
static bool supports_float16() {
return supports_f16c() || supports_avx512vl();
diff --git a/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp b/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp
index 0e78e0274d7f2..398f2e37eb5cc 100644
--- a/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp
+++ b/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp
@@ -24,10 +24,10 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_x86.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -176,21 +176,21 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
#endif /* PRODUCT */
// Entry arguments:
- // rax: CompiledICHolder
+ // rax: CompiledICData
// rcx: Receiver
// Most registers are in use; we'll use rax, rbx, rcx, rdx, rsi, rdi
// (If we need to make rsi, rdi callee-save, do a push/pop here.)
const Register recv_klass_reg = rsi;
- const Register holder_klass_reg = rax; // declaring interface klass (DECC)
+ const Register holder_klass_reg = rax; // declaring interface klass (DEFC)
const Register resolved_klass_reg = rdi; // resolved interface klass (REFC)
const Register temp_reg = rdx;
const Register method = rbx;
- const Register icholder_reg = rax;
+ const Register icdata_reg = rax;
const Register receiver = rcx;
- __ movptr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
- __ movptr(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
+ __ movptr(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
+ __ movptr(holder_klass_reg, Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
Label L_no_such_interface;
diff --git a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp
index f162a651183f9..158d6f9c6922b 100644
--- a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp
+++ b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp
@@ -24,10 +24,10 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_x86.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -168,21 +168,21 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
#endif // PRODUCT
// Entry arguments:
- // rax: CompiledICHolder
+ // rax: CompiledICData
// j_rarg0: Receiver
// Most registers are in use; we'll use rax, rbx, r10, r11
// (various calling sequences use r[cd]x, r[sd]i, r[89]; stay away from them)
const Register recv_klass_reg = r10;
- const Register holder_klass_reg = rax; // declaring interface klass (DECC)
+ const Register holder_klass_reg = rax; // declaring interface klass (DEFC)
const Register resolved_klass_reg = r14; // resolved interface klass (REFC)
const Register temp_reg = r11;
const Register temp_reg2 = r13;
const Register method = rbx;
- const Register icholder_reg = rax;
+ const Register icdata_reg = rax;
- __ movptr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
- __ movptr(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
+ __ movptr(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
+ __ movptr(holder_klass_reg, Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
Label L_no_such_interface;
diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad
index a31548eb8c3f9..6df02d280bcef 100644
--- a/src/hotspot/cpu/x86/x86.ad
+++ b/src/hotspot/cpu/x86/x86.ad
@@ -1358,7 +1358,7 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
return offset;
}
-Assembler::Width widthForType(BasicType bt) {
+static Assembler::Width widthForType(BasicType bt) {
if (bt == T_BYTE) {
return Assembler::B;
} else if (bt == T_SHORT) {
diff --git a/src/hotspot/cpu/x86/x86_32.ad b/src/hotspot/cpu/x86/x86_32.ad
index 9aa0051043575..2fe655a576778 100644
--- a/src/hotspot/cpu/x86/x86_32.ad
+++ b/src/hotspot/cpu/x86/x86_32.ad
@@ -504,7 +504,7 @@ void emit_cmpfp_fixup(MacroAssembler& _masm) {
__ bind(exit);
}
-void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
+static void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
Label done;
__ movl(dst, -1);
__ jcc(Assembler::parity, done);
@@ -1383,24 +1383,12 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
MacroAssembler masm(&cbuf);
-#ifdef ASSERT
- uint insts_size = cbuf.insts_size();
-#endif
- masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
- masm.jump_cc(Assembler::notEqual,
- RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
- /* WARNING these NOPs are critical so that verified entry point is properly
- aligned for patching by NativeJump::patch_verified_entry() */
- int nops_cnt = 2;
- if( !OptoBreakpoint ) // Leave space for int3
- nops_cnt += 1;
- masm.nop(nops_cnt);
-
- assert(cbuf.insts_size() - insts_size == size(ra_), "checking code size of inline cache node");
+ masm.ic_check(CodeEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
- return OptoBreakpoint ? 11 : 12;
+ return MachNode::size(ra_); // too many variables; just compute it
+ // the hard way
}
@@ -1842,7 +1830,7 @@ encode %{
cbuf.shared_stub_to_interp_for(_method, cbuf.insts()->mark_off());
} else {
// Emit stubs for static call.
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, mark);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -13776,7 +13764,7 @@ instruct cmpFastLockRTM(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eD
%}
instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr, eRegP thread) %{
- predicate(!Compile::current()->use_rtm());
+ predicate(LockingMode != LM_LIGHTWEIGHT && !Compile::current()->use_rtm());
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP scr, USE_KILL box, TEMP thread);
ins_cost(300);
@@ -13790,6 +13778,7 @@ instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP
%}
instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
+ predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set cr (FastUnlock object box));
effect(TEMP tmp, USE_KILL box);
ins_cost(300);
@@ -13800,6 +13789,32 @@ instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
ins_pipe(pipe_slow);
%}
+instruct cmpFastLockLightweight(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI eax_reg, eRegP tmp, eRegP thread) %{
+ predicate(LockingMode == LM_LIGHTWEIGHT);
+ match(Set cr (FastLock object box));
+ effect(TEMP eax_reg, TEMP tmp, USE_KILL box, TEMP thread);
+ ins_cost(300);
+ format %{ "FASTLOCK $object,$box\t! kills $box,$eax_reg,$tmp" %}
+ ins_encode %{
+ __ get_thread($thread$$Register);
+ __ fast_lock_lightweight($object$$Register, $box$$Register, $eax_reg$$Register, $tmp$$Register, $thread$$Register);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct cmpFastUnlockLightweight(eFlagsReg cr, eRegP object, eAXRegP eax_reg, eRegP tmp, eRegP thread) %{
+ predicate(LockingMode == LM_LIGHTWEIGHT);
+ match(Set cr (FastUnlock object eax_reg));
+ effect(TEMP tmp, USE_KILL eax_reg, TEMP thread);
+ ins_cost(300);
+ format %{ "FASTUNLOCK $object,$eax_reg\t! kills $eax_reg,$tmp" %}
+ ins_encode %{
+ __ get_thread($thread$$Register);
+ __ fast_unlock_lightweight($object$$Register, $eax_reg$$Register, $tmp$$Register, $thread$$Register);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
instruct mask_all_evexL_LT32(kReg dst, eRegL src) %{
predicate(Matcher::vector_length(n) <= 32);
match(Set dst (MaskAll src));
diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad
index a248daaa1917b..a8136688cdedd 100644
--- a/src/hotspot/cpu/x86/x86_64.ad
+++ b/src/hotspot/cpu/x86/x86_64.ad
@@ -519,7 +519,7 @@ int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
}
// This could be in MacroAssembler but it's fairly C2 specific
-void emit_cmpfp_fixup(MacroAssembler& _masm) {
+static void emit_cmpfp_fixup(MacroAssembler& _masm) {
Label exit;
__ jccb(Assembler::noParity, exit);
__ pushf();
@@ -539,7 +539,7 @@ void emit_cmpfp_fixup(MacroAssembler& _masm) {
__ bind(exit);
}
-void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
+static void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
Label done;
__ movl(dst, -1);
__ jcc(Assembler::parity, done);
@@ -558,10 +558,10 @@ void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
// je #
// |-jz -> a | b # a & b
// | -> a #
-void emit_fp_min_max(MacroAssembler& _masm, XMMRegister dst,
- XMMRegister a, XMMRegister b,
- XMMRegister xmmt, Register rt,
- bool min, bool single) {
+static void emit_fp_min_max(MacroAssembler& _masm, XMMRegister dst,
+ XMMRegister a, XMMRegister b,
+ XMMRegister xmmt, Register rt,
+ bool min, bool single) {
Label nan, zero, below, above, done;
@@ -1472,40 +1472,19 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
if (UseCompressedClassPointers) {
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
- st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
- st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
+ st->print_cr("\tcmpl rscratch1, [rax + CompiledICData::speculated_klass_offset()]\t # Inline cache check");
} else {
- st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
- "# Inline cache check");
+ st->print_cr("movq rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tcmpq rscratch1, [rax + CompiledICData::speculated_klass_offset()]\t # Inline cache check");
}
st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
- st->print_cr("\tnop\t# nops to align entry point");
}
#endif
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
MacroAssembler masm(&cbuf);
- uint insts_size = cbuf.insts_size();
- if (UseCompressedClassPointers) {
- masm.load_klass(rscratch1, j_rarg0, rscratch2);
- masm.cmpptr(rax, rscratch1);
- } else {
- masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
- }
-
- masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
- /* WARNING these NOPs are critical so that verified entry point is properly
- 4 bytes aligned for patching by NativeJump::patch_verified_entry() */
- int nops_cnt = 4 - ((cbuf.insts_size() - insts_size) & 0x3);
- if (OptoBreakpoint) {
- // Leave space for int3
- nops_cnt -= 1;
- }
- nops_cnt &= 0x3; // Do not add nops if code is aligned.
- if (nops_cnt > 0)
- masm.nop(nops_cnt);
+ masm.ic_check(InteriorEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
@@ -1840,7 +1819,7 @@ encode %{
cbuf.shared_stub_to_interp_for(_method, call_offset);
} else {
// Emit stubs for static call.
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, mark);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -12404,7 +12383,7 @@ instruct cmpFastLockRTM(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp,
%}
instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr) %{
- predicate(!Compile::current()->use_rtm());
+ predicate(LockingMode != LM_LIGHTWEIGHT && !Compile::current()->use_rtm());
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP scr, USE_KILL box);
ins_cost(300);
@@ -12417,6 +12396,7 @@ instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRe
%}
instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP box, rRegP tmp) %{
+ predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set cr (FastUnlock object box));
effect(TEMP tmp, USE_KILL box);
ins_cost(300);
@@ -12427,6 +12407,30 @@ instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP box, rRegP tmp) %{
ins_pipe(pipe_slow);
%}
+instruct cmpFastLockLightweight(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI rax_reg, rRegP tmp) %{
+ predicate(LockingMode == LM_LIGHTWEIGHT);
+ match(Set cr (FastLock object box));
+ effect(TEMP rax_reg, TEMP tmp, USE_KILL box);
+ ins_cost(300);
+ format %{ "fastlock $object,$box\t! kills $box,$rax_reg,$tmp" %}
+ ins_encode %{
+ __ fast_lock_lightweight($object$$Register, $box$$Register, $rax_reg$$Register, $tmp$$Register, r15_thread);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct cmpFastUnlockLightweight(rFlagsReg cr, rRegP object, rax_RegP rax_reg, rRegP tmp) %{
+ predicate(LockingMode == LM_LIGHTWEIGHT);
+ match(Set cr (FastUnlock object rax_reg));
+ effect(TEMP tmp, USE_KILL rax_reg);
+ ins_cost(300);
+ format %{ "fastunlock $object,$rax_reg\t! kills $rax_reg,$tmp" %}
+ ins_encode %{
+ __ fast_unlock_lightweight($object$$Register, $rax_reg$$Register, $tmp$$Register, r15_thread);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
// ============================================================================
// Safepoint Instructions
diff --git a/src/hotspot/cpu/zero/compiledIC_zero.cpp b/src/hotspot/cpu/zero/compiledIC_zero.cpp
index b0564643af080..24153aeacc5e1 100644
--- a/src/hotspot/cpu/zero/compiledIC_zero.cpp
+++ b/src/hotspot/cpu/zero/compiledIC_zero.cpp
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
@@ -43,27 +42,27 @@
// ----------------------------------------------------------------------------
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
ShouldNotReachHere(); // Only needed for COMPILER2.
return nullptr;
}
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
ShouldNotReachHere(); // Only needed for COMPILER2.
return 0;
}
// Relocation entries for call stub, compiled java to interpreter.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
ShouldNotReachHere(); // Only needed for COMPILER2.
return 0;
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
ShouldNotReachHere(); // Only needed for COMPILER2.
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
ShouldNotReachHere(); // Only needed for COMPILER2.
}
@@ -71,7 +70,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code.
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
ShouldNotReachHere(); // Only needed for COMPILER2.
}
diff --git a/src/hotspot/cpu/zero/icBuffer_zero.cpp b/src/hotspot/cpu/zero/icBuffer_zero.cpp
deleted file mode 100644
index adde916a4c4ad..0000000000000
--- a/src/hotspot/cpu/zero/icBuffer_zero.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_zero.hpp"
-#include "oops/oop.inline.hpp"
-
-int InlineCacheBuffer::ic_stub_code_size() {
- // NB set this once the functions below are implemented
- return 4;
-}
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin,
- void* cached_oop,
- address entry_point) {
- // NB ic_stub_code_size() must return the size of the code we generate
- ShouldNotCallThis();
-}
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- // NB ic_stub_code_size() must return the size of the code we generate
- ShouldNotCallThis();
- return nullptr;
-}
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- ShouldNotCallThis();
- return nullptr;
-}
diff --git a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp
index 4244b5817db98..986cee685123b 100644
--- a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp
+++ b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp
@@ -26,10 +26,8 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
-#include "oops/compiledICHolder.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index e701e0aef6082..5d5ea364b6652 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -29,7 +29,6 @@
// no precompiled headers
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/interpreter.hpp"
diff --git a/src/hotspot/os/aix/os_perf_aix.cpp b/src/hotspot/os/aix/os_perf_aix.cpp
index e1719df48c331..b5ae1a6a725a5 100644
--- a/src/hotspot/os/aix/os_perf_aix.cpp
+++ b/src/hotspot/os/aix/os_perf_aix.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2022, IBM Corp.
+ * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, IBM Corp.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -87,6 +87,7 @@ static bool read_psinfo(const u_longlong_t& pid, psinfo_t& psinfo) {
}
len = fread(&psinfo, 1, sizeof(psinfo_t), fp);
+ fclose(fp);
return len == sizeof(psinfo_t);
}
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 42a0b9c083239..dbc18794cfec7 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -24,7 +24,6 @@
// no precompiled headers
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
@@ -1269,7 +1268,8 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
}
#endif // !__APPLE__
-int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
+static int _print_dll_info_cb(const char * name, address base_address,
+ address top_address, void * param) {
outputStream * out = (outputStream *) param;
out->print_cr(INTPTR_FORMAT " \t%s", (intptr_t)base_address, name);
return 0;
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index f02ca95be5593..d912f9f44a916 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -25,7 +25,6 @@
// no precompiled headers
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp
index eaadb36731518..6a958f8903b8e 100644
--- a/src/hotspot/os/posix/signals_posix.cpp
+++ b/src/hotspot/os/posix/signals_posix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -340,7 +340,7 @@ static const struct {
////////////////////////////////////////////////////////////////////////////////
// sun.misc.Signal and BREAK_SIGNAL support
-void jdk_misc_signal_init() {
+static void jdk_misc_signal_init() {
// Initialize signal structures
::memset((void*)pending_signals, 0, sizeof(pending_signals));
@@ -380,7 +380,7 @@ int os::signal_wait() {
////////////////////////////////////////////////////////////////////////////////
// signal chaining support
-struct sigaction* get_chained_signal_action(int sig) {
+static struct sigaction* get_chained_signal_action(int sig) {
struct sigaction *actp = nullptr;
if (libjsig_is_loaded) {
@@ -1245,7 +1245,7 @@ int os::get_signal_number(const char* signal_name) {
return -1;
}
-void set_signal_handler(int sig) {
+static void set_signal_handler(int sig) {
// Check for overwrite.
struct sigaction oldAct;
sigaction(sig, (struct sigaction*)nullptr, &oldAct);
@@ -1292,7 +1292,7 @@ void set_signal_handler(int sig) {
// install signal handlers for signals that HotSpot needs to
// handle in order to support Java-level exception handling.
-void install_signal_handlers() {
+static void install_signal_handlers() {
// signal-chaining
typedef void (*signal_setting_t)();
signal_setting_t begin_signal_setting = nullptr;
@@ -1723,7 +1723,7 @@ static void SR_handler(int sig, siginfo_t* siginfo, void* context) {
errno = old_errno;
}
-int SR_initialize() {
+static int SR_initialize() {
struct sigaction act;
char *s;
// Get signal number to use for suspend/resume
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index 3613edfc7d9e6..f9c3f23f0a67b 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -27,7 +27,6 @@
// no precompiled headers
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
index 5e0086521aad9..242042d4247aa 100644
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
@@ -28,7 +28,6 @@
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
index fbd7c4eccd403..4750ed8805644 100644
--- a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
+++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
@@ -29,7 +29,6 @@
#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
index 37b92bc7ffd48..c73e83996ff57 100644
--- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
+++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
@@ -351,7 +350,7 @@ frame os::get_sender_for_C_frame(frame* fr) {
return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
}
-intptr_t* _get_previous_fp() {
+static intptr_t* _get_previous_fp() {
#if defined(__clang__) || defined(__llvm__)
intptr_t **ebp;
__asm__("mov %%" SPELL_REG_FP ", %0":"=r"(ebp));
diff --git a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
index 012f85ac0ff4a..0fc9484ce23ef 100644
--- a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
+++ b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
@@ -27,7 +27,6 @@
#include "asm/assembler.inline.hpp"
#include "atomic_bsd_zero.hpp"
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
index 4835eb9405a1b..3698896abb78a 100644
--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
+++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
@@ -27,7 +27,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "code/nativeInst.hpp"
#include "interpreter/interpreter.hpp"
diff --git a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
index 86e8ed25618c1..551270588438e 100644
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
@@ -25,7 +25,6 @@
// no precompiled headers
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
index b570e3b6d7f12..0b666f29c312b 100644
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
@@ -28,7 +28,6 @@
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp
index 282467bc9e096..3d923c03094ab 100644
--- a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp
+++ b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp
@@ -27,7 +27,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
diff --git a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp
index 033ea14ead6a4..5aa65e705d9ed 100644
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp
@@ -28,7 +28,6 @@
// no precompiled headers
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/disassembler.hpp"
diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
index b211330409d59..4dcaedf71da8c 100644
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
@@ -165,7 +164,7 @@ frame os::get_sender_for_C_frame(frame* fr) {
return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
}
-intptr_t* _get_previous_fp() {
+static intptr_t* _get_previous_fp() {
#if defined(__clang__)
intptr_t **ebp;
__asm__ __volatile__ ("mov %%" SPELL_REG_FP ", %0":"=r"(ebp):);
diff --git a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
index 1ce73588524c1..d593c46d15d91 100644
--- a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
+++ b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
@@ -27,7 +27,6 @@
#include "asm/assembler.inline.hpp"
#include "atomic_linux_zero.hpp"
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp b/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp
index 46f718a9cd0f5..78e98609b6bdc 100644
--- a/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp
+++ b/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp
@@ -27,7 +27,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "code/nativeInst.hpp"
#include "interpreter/interpreter.hpp"
diff --git a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
index 4e18334315a37..7e0814c014bec 100644
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
@@ -25,7 +25,6 @@
// no precompiled headers
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/share/adlc/main.cpp b/src/hotspot/share/adlc/main.cpp
index 6d921cf5e7bee..4d1c5491044a7 100644
--- a/src/hotspot/share/adlc/main.cpp
+++ b/src/hotspot/share/adlc/main.cpp
@@ -216,7 +216,6 @@ int main(int argc, char *argv[])
AD.addInclude(AD._CPP_file, "code/nativeInst.hpp");
AD.addInclude(AD._CPP_file, "code/vmreg.inline.hpp");
AD.addInclude(AD._CPP_file, "gc/shared/collectedHeap.inline.hpp");
- AD.addInclude(AD._CPP_file, "oops/compiledICHolder.hpp");
AD.addInclude(AD._CPP_file, "oops/compressedOops.hpp");
AD.addInclude(AD._CPP_file, "oops/markWord.hpp");
AD.addInclude(AD._CPP_file, "oops/method.hpp");
diff --git a/src/hotspot/share/asm/codeBuffer.cpp b/src/hotspot/share/asm/codeBuffer.cpp
index 7a0a31abf597f..5b1e113f15d10 100644
--- a/src/hotspot/share/asm/codeBuffer.cpp
+++ b/src/hotspot/share/asm/codeBuffer.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
+#include "code/compiledIC.hpp"
#include "code/oopRecorder.inline.hpp"
#include "compiler/disassembler.hpp"
#include "logging/log.hpp"
diff --git a/src/hotspot/share/asm/codeBuffer.inline.hpp b/src/hotspot/share/asm/codeBuffer.inline.hpp
index 838447ad882dc..06ec9174b34bb 100644
--- a/src/hotspot/share/asm/codeBuffer.inline.hpp
+++ b/src/hotspot/share/asm/codeBuffer.inline.hpp
@@ -48,7 +48,7 @@ bool emit_shared_stubs_to_interp(CodeBuffer* cb, SharedStubToInterpRequests* sha
shared_stub_to_interp_requests->sort(by_shared_method);
MacroAssembler masm(cb);
for (int i = 0; i < shared_stub_to_interp_requests->length();) {
- address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
+ address stub = __ start_a_stub(CompiledDirectCall::to_interp_stub_size());
if (stub == nullptr) {
return false;
}
diff --git a/src/hotspot/share/c1/c1_GraphBuilder.cpp b/src/hotspot/share/c1/c1_GraphBuilder.cpp
index 6e5fb99242c8c..396c83c6ab976 100644
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp
@@ -523,7 +523,7 @@ inline bool BlockListBuilder::is_successor(BlockBegin* block, BlockBegin* sux) {
#ifndef PRODUCT
-int compare_depth_first(BlockBegin** a, BlockBegin** b) {
+static int compare_depth_first(BlockBegin** a, BlockBegin** b) {
return (*a)->depth_first_number() - (*b)->depth_first_number();
}
diff --git a/src/hotspot/share/c1/c1_LIRAssembler.cpp b/src/hotspot/share/c1/c1_LIRAssembler.cpp
index a601696d8df7e..51fb851d00c0e 100644
--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp
+++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp
@@ -606,13 +606,14 @@ void LIR_Assembler::emit_op0(LIR_Op0* op) {
Unimplemented();
break;
- case lir_std_entry:
+ case lir_std_entry: {
// init offsets
offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
- _masm->align(CodeEntryAlignment);
if (needs_icache(compilation()->method())) {
- check_icache();
+ int offset = check_icache();
+ offsets()->set_value(CodeOffsets::Entry, offset);
}
+ _masm->align(CodeEntryAlignment);
offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
_masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
if (needs_clinit_barrier_on_entry(compilation()->method())) {
@@ -621,6 +622,7 @@ void LIR_Assembler::emit_op0(LIR_Op0* op) {
build_frame();
offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
break;
+ }
case lir_osr_entry:
offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
diff --git a/src/hotspot/share/c1/c1_LinearScan.cpp b/src/hotspot/share/c1/c1_LinearScan.cpp
index 9e9195a0d60d0..a4d955e52a004 100644
--- a/src/hotspot/share/c1/c1_LinearScan.cpp
+++ b/src/hotspot/share/c1/c1_LinearScan.cpp
@@ -1446,12 +1446,12 @@ int LinearScan::interval_cmp(Interval** a, Interval** b) {
}
}
-#ifndef PRODUCT
-int interval_cmp(Interval* const& l, Interval* const& r) {
+#ifdef ASSERT
+static int interval_cmp(Interval* const& l, Interval* const& r) {
return l->from() - r->from();
}
-bool find_interval(Interval* interval, IntervalArray* intervals) {
+static bool find_interval(Interval* interval, IntervalArray* intervals) {
bool found;
int idx = intervals->find_sorted(interval, found);
@@ -2303,11 +2303,11 @@ void assert_no_register_values(GrowableArray* values) {
}
}
-void assert_equal(Location l1, Location l2) {
+static void assert_equal(Location l1, Location l2) {
assert(l1.where() == l2.where() && l1.type() == l2.type() && l1.offset() == l2.offset(), "");
}
-void assert_equal(ScopeValue* v1, ScopeValue* v2) {
+static void assert_equal(ScopeValue* v1, ScopeValue* v2) {
if (v1->is_location()) {
assert(v2->is_location(), "");
assert_equal(((LocationValue*)v1)->location(), ((LocationValue*)v2)->location());
@@ -2328,12 +2328,12 @@ void assert_equal(ScopeValue* v1, ScopeValue* v2) {
}
}
-void assert_equal(MonitorValue* m1, MonitorValue* m2) {
+static void assert_equal(MonitorValue* m1, MonitorValue* m2) {
assert_equal(m1->owner(), m2->owner());
assert_equal(m1->basic_lock(), m2->basic_lock());
}
-void assert_equal(IRScopeDebugInfo* d1, IRScopeDebugInfo* d2) {
+static void assert_equal(IRScopeDebugInfo* d1, IRScopeDebugInfo* d2) {
assert(d1->scope() == d2->scope(), "not equal");
assert(d1->bci() == d2->bci(), "not equal");
@@ -2375,7 +2375,7 @@ void assert_equal(IRScopeDebugInfo* d1, IRScopeDebugInfo* d2) {
}
}
-void check_stack_depth(CodeEmitInfo* info, int stack_end) {
+static void check_stack_depth(CodeEmitInfo* info, int stack_end) {
if (info->stack()->bci() != SynchronizationEntryBCI && !info->scope()->method()->is_native()) {
Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
switch (code) {
diff --git a/src/hotspot/share/c1/c1_MacroAssembler.hpp b/src/hotspot/share/c1/c1_MacroAssembler.hpp
index 6a8304bd405fa..1e193ce086961 100644
--- a/src/hotspot/share/c1/c1_MacroAssembler.hpp
+++ b/src/hotspot/share/c1/c1_MacroAssembler.hpp
@@ -38,7 +38,6 @@ class C1_MacroAssembler: public MacroAssembler {
//----------------------------------------------------
void explicit_null_check(Register base);
- void inline_cache_check(Register receiver, Register iCache);
void build_frame(int frame_size_in_bytes, int bang_size_in_bytes);
void remove_frame(int frame_size_in_bytes);
diff --git a/src/hotspot/share/c1/c1_Optimizer.cpp b/src/hotspot/share/c1/c1_Optimizer.cpp
index 6cd282d02ad7e..dd428a5895bc4 100644
--- a/src/hotspot/share/c1/c1_Optimizer.cpp
+++ b/src/hotspot/share/c1/c1_Optimizer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -335,7 +335,7 @@ void Optimizer::eliminate_conditional_expressions() {
}
// This removes others' relation to block, but doesn't empty block's lists
-void disconnect_from_graph(BlockBegin* block) {
+static void disconnect_from_graph(BlockBegin* block) {
for (int p = 0; p < block->number_of_preds(); p++) {
BlockBegin* pred = block->pred_at(p);
int idx;
diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp
index 5a0bf2e61b5d0..f6e02594b5c7b 100644
--- a/src/hotspot/share/cds/filemap.cpp
+++ b/src/hotspot/share/cds/filemap.cpp
@@ -1664,9 +1664,9 @@ void FileMapInfo::close() {
/*
* Same as os::map_memory() but also pretouches if AlwaysPreTouch is enabled.
*/
-char* map_memory(int fd, const char* file_name, size_t file_offset,
- char *addr, size_t bytes, bool read_only,
- bool allow_exec, MEMFLAGS flags = mtNone) {
+static char* map_memory(int fd, const char* file_name, size_t file_offset,
+ char *addr, size_t bytes, bool read_only,
+ bool allow_exec, MEMFLAGS flags = mtNone) {
char* mem = os::map_memory(fd, file_name, file_offset, addr, bytes,
AlwaysPreTouch ? false : read_only,
allow_exec, flags);
diff --git a/src/hotspot/share/classfile/altHashing.cpp b/src/hotspot/share/classfile/altHashing.cpp
index 158a8a232a7b4..1d43d6ebf1ed0 100644
--- a/src/hotspot/share/classfile/altHashing.cpp
+++ b/src/hotspot/share/classfile/altHashing.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -120,7 +120,7 @@ static void halfsiphash_init64(uint32_t v[4], uint64_t seed) {
v[1] ^= 0xee;
}
-uint32_t halfsiphash_finish32(uint32_t v[4], int rounds) {
+static uint32_t halfsiphash_finish32(uint32_t v[4], int rounds) {
v[2] ^= 0xff;
halfsiphash_rounds(v, rounds);
return (v[1] ^ v[3]);
diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp
index 43aa82b67f85f..9ce49b71734b1 100644
--- a/src/hotspot/share/classfile/classLoader.cpp
+++ b/src/hotspot/share/classfile/classLoader.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -134,7 +134,8 @@ ClassPathEntry* ClassLoader::_last_module_path_entry = nullptr;
#endif
// helper routines
-bool string_starts_with(const char* str, const char* str_to_find) {
+#if INCLUDE_CDS
+static bool string_starts_with(const char* str, const char* str_to_find) {
size_t str_len = strlen(str);
size_t str_to_find_len = strlen(str_to_find);
if (str_to_find_len > str_len) {
@@ -142,6 +143,7 @@ bool string_starts_with(const char* str, const char* str_to_find) {
}
return (strncmp(str, str_to_find, str_to_find_len) == 0);
}
+#endif
static const char* get_jimage_version_string() {
static char version_string[10] = "";
@@ -1009,8 +1011,8 @@ const char* ClassLoader::file_name_for_class_name(const char* class_name,
return file_name;
}
-ClassPathEntry* find_first_module_cpe(ModuleEntry* mod_entry,
- const GrowableArray* const module_list) {
+static ClassPathEntry* find_first_module_cpe(ModuleEntry* mod_entry,
+ const GrowableArray* const module_list) {
int num_of_entries = module_list->length();
const Symbol* class_module_name = mod_entry->name();
@@ -1355,7 +1357,7 @@ void ClassLoader::initialize(TRAPS) {
setup_bootstrap_search_path(THREAD);
}
-char* lookup_vm_resource(JImageFile *jimage, const char *jimage_version, const char *path) {
+static char* lookup_vm_resource(JImageFile *jimage, const char *jimage_version, const char *path) {
jlong size;
JImageLocationRef location = (*JImageFindResource)(jimage, "java.base", jimage_version, path, &size);
if (location == 0)
diff --git a/src/hotspot/share/classfile/loaderConstraints.cpp b/src/hotspot/share/classfile/loaderConstraints.cpp
index e6021e00d3391..99d0c07ed42d4 100644
--- a/src/hotspot/share/classfile/loaderConstraints.cpp
+++ b/src/hotspot/share/classfile/loaderConstraints.cpp
@@ -296,8 +296,8 @@ void LoaderConstraintTable::purge_loader_constraints() {
_loader_constraint_table->unlink(&purge);
}
-void log_ldr_constraint_msg(Symbol* class_name, const char* reason,
- ClassLoaderData* loader1, ClassLoaderData* loader2) {
+static void log_ldr_constraint_msg(Symbol* class_name, const char* reason,
+ ClassLoaderData* loader1, ClassLoaderData* loader2) {
LogTarget(Info, class, loader, constraints) lt;
if (lt.is_enabled()) {
ResourceMark rm;
diff --git a/src/hotspot/share/classfile/modules.cpp b/src/hotspot/share/classfile/modules.cpp
index bd4be93b86877..4664d9565d353 100644
--- a/src/hotspot/share/classfile/modules.cpp
+++ b/src/hotspot/share/classfile/modules.cpp
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -259,7 +259,7 @@ static void define_javabase_module(Handle module_handle, jstring version, jstrin
}
// Caller needs ResourceMark.
-void throw_dup_pkg_exception(const char* module_name, PackageEntry* package, TRAPS) {
+static void throw_dup_pkg_exception(const char* module_name, PackageEntry* package, TRAPS) {
const char* package_name = package->name()->as_C_string();
if (package->module()->is_named()) {
THROW_MSG(vmSymbols::java_lang_IllegalStateException(),
diff --git a/src/hotspot/share/classfile/placeholders.cpp b/src/hotspot/share/classfile/placeholders.cpp
index 1bb5f87870417..a6a86473ea794 100644
--- a/src/hotspot/share/classfile/placeholders.cpp
+++ b/src/hotspot/share/classfile/placeholders.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -198,8 +198,8 @@ void PlaceholderEntry::set_supername(Symbol* supername) {
// All threads examining the placeholder table must hold the
// SystemDictionary_lock, so we don't need special precautions
// on store ordering here.
-PlaceholderEntry* add_entry(Symbol* class_name, ClassLoaderData* loader_data,
- Symbol* supername){
+static PlaceholderEntry* add_entry(Symbol* class_name, ClassLoaderData* loader_data,
+ Symbol* supername){
assert_locked_or_safepoint(SystemDictionary_lock);
assert(class_name != nullptr, "adding nullptr obj");
@@ -213,7 +213,7 @@ PlaceholderEntry* add_entry(Symbol* class_name, ClassLoaderData* loader_data,
}
// Remove a placeholder object.
-void remove_entry(Symbol* class_name, ClassLoaderData* loader_data) {
+static void remove_entry(Symbol* class_name, ClassLoaderData* loader_data) {
assert_locked_or_safepoint(SystemDictionary_lock);
PlaceholderKey key(class_name, loader_data);
diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp
index 9e96340d82b9c..be2971288ef12 100644
--- a/src/hotspot/share/classfile/stringTable.cpp
+++ b/src/hotspot/share/classfile/stringTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -122,7 +122,7 @@ volatile bool _alt_hash = false;
static bool _rehashed = false;
static uint64_t _alt_hash_seed = 0;
-unsigned int hash_string(const jchar* s, int len, bool useAlt) {
+static unsigned int hash_string(const jchar* s, int len, bool useAlt) {
return useAlt ?
AltHashing::halfsiphash_32(_alt_hash_seed, s, len) :
java_lang_String::hash_code(s, len);
diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp
index 82c20a962ce14..f2a88f00e0d9e 100644
--- a/src/hotspot/share/classfile/systemDictionary.cpp
+++ b/src/hotspot/share/classfile/systemDictionary.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -212,13 +212,13 @@ void SystemDictionary::set_platform_loader(ClassLoaderData *cld) {
// ----------------------------------------------------------------------------
// Parallel class loading check
-bool is_parallelCapable(Handle class_loader) {
+static bool is_parallelCapable(Handle class_loader) {
if (class_loader.is_null()) return true;
return java_lang_ClassLoader::parallelCapable(class_loader());
}
// ----------------------------------------------------------------------------
// ParallelDefineClass flag does not apply to bootclass loader
-bool is_parallelDefine(Handle class_loader) {
+static bool is_parallelDefine(Handle class_loader) {
if (class_loader.is_null()) return false;
if (AllowParallelDefineClass && java_lang_ClassLoader::parallelCapable(class_loader())) {
return true;
@@ -280,7 +280,7 @@ Symbol* SystemDictionary::class_name_symbol(const char* name, Symbol* exception,
#ifdef ASSERT
// Used to verify that class loading succeeded in adding k to the dictionary.
-void verify_dictionary_entry(Symbol* class_name, InstanceKlass* k) {
+static void verify_dictionary_entry(Symbol* class_name, InstanceKlass* k) {
MutexLocker mu(SystemDictionary_lock);
ClassLoaderData* loader_data = k->class_loader_data();
Dictionary* dictionary = loader_data->dictionary();
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp
index 297483526846c..44d7da5c4a4f1 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1348,7 +1348,7 @@ void SystemDictionaryShared::update_shared_entry(InstanceKlass* k, int id) {
info->_id = id;
}
-const char* class_loader_name_for_shared(Klass* k) {
+static const char* class_loader_name_for_shared(Klass* k) {
assert(k != nullptr, "Sanity");
assert(k->is_shared(), "Must be");
assert(k->is_instance_klass(), "Must be");
diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp
index b30a88e90cb48..d24e29c288d5c 100644
--- a/src/hotspot/share/code/codeBlob.cpp
+++ b/src/hotspot/share/code/codeBlob.cpp
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "code/codeBlob.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/relocInfo.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/disassembler.hpp"
@@ -649,11 +648,6 @@ void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const
st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
return;
}
- // the InlineCacheBuffer is using stubs generated into a buffer blob
- if (InlineCacheBuffer::contains(addr)) {
- st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", p2i(addr));
- return;
- }
VtableStub* v = VtableStubs::stub_containing(addr);
if (v != nullptr) {
st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
index bf8c1d84e71a0..cdc9f3f50dc15 100644
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -29,7 +29,6 @@
#include "code/compiledIC.hpp"
#include "code/dependencies.hpp"
#include "code/dependencyContext.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "code/pcDesc.hpp"
#include "compiler/compilationPolicy.hpp"
@@ -913,23 +912,6 @@ void CodeCache::verify_clean_inline_caches() {
#endif
}
-void CodeCache::verify_icholder_relocations() {
-#ifdef ASSERT
- // make sure that we aren't leaking icholders
- int count = 0;
- FOR_ALL_HEAPS(heap) {
- FOR_ALL_BLOBS(cb, *heap) {
- CompiledMethod *nm = cb->as_compiled_method_or_null();
- if (nm != nullptr) {
- count += nm->verify_icholder_relocations();
- }
- }
- }
- assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
- CompiledICHolder::live_count(), "must agree");
-#endif
-}
-
// Defer freeing of concurrently cleaned ExceptionCache entries until
// after a global handshake operation.
void CodeCache::release_exception_cache(ExceptionCache* entry) {
diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp
index 103268c8ffcd1..d1c91727bf124 100644
--- a/src/hotspot/share/code/codeCache.hpp
+++ b/src/hotspot/share/code/codeCache.hpp
@@ -294,7 +294,6 @@ class CodeCache : AllStatic {
}
static void verify_clean_inline_caches();
- static void verify_icholder_relocations();
// Deoptimization
private:
diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp
index c5063560ee570..250ef063a2a33 100644
--- a/src/hotspot/share/code/compiledIC.cpp
+++ b/src/hotspot/share/code/compiledIC.cpp
@@ -26,27 +26,19 @@
#include "code/codeBehaviours.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "code/vtableStubs.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/linkResolver.hpp"
-#include "memory/metadataFactory.hpp"
-#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
+#include "oops/compressedKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "oops/symbol.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/continuationEntry.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/icache.hpp"
-#include "runtime/safepoint.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
#include "sanitizers/leak.hpp"
-#include "utilities/events.hpp"
// Every time a compiled IC is changed or its type is being accessed,
@@ -75,191 +67,175 @@ bool CompiledICLocker::is_safe(address code) {
return CompiledICProtectionBehaviour::current()->is_safe(cm);
}
-//-----------------------------------------------------------------------------
-// Low-level access to an inline cache. Private, since they might not be
-// MT-safe to use.
+CompiledICData::CompiledICData()
+ : _speculated_method(),
+ _speculated_klass(),
+ _itable_defc_klass(),
+ _itable_refc_klass(),
+ _is_initialized() {}
-void* CompiledIC::cached_value() const {
- assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
- assert (!is_optimized(), "an optimized virtual call does not have a cached metadata");
-
- if (!is_in_transition_state()) {
- void* data = get_data();
- // If we let the metadata value here be initialized to zero...
- assert(data != nullptr || Universe::non_oop_word() == nullptr,
- "no raw nulls in CompiledIC metadatas, because of patching races");
- return (data == (void*)Universe::non_oop_word()) ? nullptr : data;
+// Inline cache callsite info is initialized once the first time it is resolved
+void CompiledICData::initialize(CallInfo* call_info, Klass* receiver_klass) {
+ _speculated_method = call_info->selected_method();
+ if (UseCompressedClassPointers) {
+ _speculated_klass = (uintptr_t)CompressedKlassPointers::encode_not_null(receiver_klass);
} else {
- return InlineCacheBuffer::cached_value_for((CompiledIC *)this);
+ _speculated_klass = (uintptr_t)receiver_klass;
}
+ if (call_info->call_kind() == CallInfo::itable_call) {
+ _itable_defc_klass = call_info->resolved_method()->method_holder();
+ _itable_refc_klass = call_info->resolved_klass();
+ }
+ _is_initialized = true;
}
+bool CompiledICData::is_speculated_klass_unloaded() const {
+ return is_initialized() && _speculated_klass == 0;
+}
-void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
- assert(entry_point != nullptr, "must set legal entry point");
- assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
- assert (!is_optimized() || cache == nullptr, "an optimized virtual call does not have a cached metadata");
- assert (cache == nullptr || cache != (Metadata*)badOopVal, "invalid metadata");
-
- assert(!is_icholder || is_icholder_entry(entry_point), "must be");
-
- // Don't use ic_destination for this test since that forwards
- // through ICBuffer instead of returning the actual current state of
- // the CompiledIC.
- if (is_icholder_entry(_call->destination())) {
- // When patching for the ICStub case the cached value isn't
- // overwritten until the ICStub copied into the CompiledIC during
- // the next safepoint. Make sure that the CompiledICHolder* is
- // marked for release at this point since it won't be identifiable
- // once the entry point is overwritten.
- InlineCacheBuffer::queue_for_release((CompiledICHolder*)get_data());
+void CompiledICData::clean_metadata() {
+ if (!is_initialized() || is_speculated_klass_unloaded()) {
+ return;
}
- if (TraceCompiledIC) {
- tty->print(" ");
- print_compiled_ic();
- tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point));
- if (!is_optimized()) {
- tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache));
- }
- if (is_icstub) {
- tty->print(" (icstub)");
- }
- tty->cr();
+ // GC cleaning doesn't need to change the state of the inline cache,
+ // only nuke stale speculated metadata if it gets unloaded. If the
+ // inline cache is monomorphic, the unverified entries will miss, and
+ // subsequent miss handlers will upgrade the callsite to megamorphic,
+ // which makes sense as it obviously is megamorphic then.
+ if (!speculated_klass()->is_loader_alive()) {
+ Atomic::store(&_speculated_klass, (uintptr_t)0);
+ Atomic::store(&_speculated_method, (Method*)nullptr);
}
-#ifdef ASSERT
- {
- CodeBlob* cb = CodeCache::find_blob(_call->instruction_address());
- assert(cb != nullptr && cb->is_compiled(), "must be compiled");
- }
-#endif
- _call->set_destination_mt_safe(entry_point);
+ assert(_speculated_method == nullptr || _speculated_method->method_holder()->is_loader_alive(),
+ "Speculated method is not unloaded despite class being unloaded");
+}
- if (is_optimized() || is_icstub) {
- // Optimized call sites don't have a cache value and ICStub call
- // sites only change the entry point. Changing the value in that
- // case could lead to MT safety issues.
- assert(cache == nullptr, "must be null");
+void CompiledICData::metadata_do(MetadataClosure* cl) {
+ if (!is_initialized()) {
return;
}
- if (cache == nullptr) cache = Universe::non_oop_word();
-
- set_data((intptr_t)cache);
-}
-
-
-void CompiledIC::set_ic_destination(ICStub* stub) {
- internal_set_ic_destination(stub->code_begin(), true, nullptr, false);
+ if (!is_speculated_klass_unloaded()) {
+ cl->do_metadata(_speculated_method);
+ cl->do_metadata(speculated_klass());
+ }
+ if (_itable_refc_klass != nullptr) {
+ cl->do_metadata(_itable_refc_klass);
+ }
+ if (_itable_defc_klass != nullptr) {
+ cl->do_metadata(_itable_defc_klass);
+ }
}
+Klass* CompiledICData::speculated_klass() const {
+ if (is_speculated_klass_unloaded()) {
+ return nullptr;
+ }
-
-address CompiledIC::ic_destination() const {
- assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
- if (!is_in_transition_state()) {
- return _call->destination();
+ if (UseCompressedClassPointers) {
+ return CompressedKlassPointers::decode_not_null((narrowKlass)_speculated_klass);
} else {
- return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
+ return (Klass*)_speculated_klass;
}
}
+//-----------------------------------------------------------------------------
+// High-level access to an inline cache. Guaranteed to be MT-safe.
-bool CompiledIC::is_in_transition_state() const {
- assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
- return InlineCacheBuffer::contains(_call->destination());;
+CompiledICData* CompiledIC::data() const {
+ return _data;
}
+CompiledICData* data_from_reloc_iter(RelocIterator* iter) {
+ assert(iter->type() == relocInfo::virtual_call_type, "wrong reloc. info");
+
+ virtual_call_Relocation* r = iter->virtual_call_reloc();
+ NativeMovConstReg* value = nativeMovConstReg_at(r->cached_value());
+
+ return (CompiledICData*)value->data();
+}
-bool CompiledIC::is_icholder_call() const {
+CompiledIC::CompiledIC(RelocIterator* iter)
+ : _method(iter->code()),
+ _data(data_from_reloc_iter(iter)),
+ _call(nativeCall_at(iter->addr()))
+{
+ assert(_method != nullptr, "must pass compiled method");
+ assert(_method->contains(iter->addr()), "must be in compiled method");
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
- return !_is_optimized && is_icholder_entry(ic_destination());
}
-// Returns native address of 'call' instruction in inline-cache. Used by
-// the InlineCacheBuffer when it needs to find the stub.
-address CompiledIC::stub_address() const {
- assert(is_in_transition_state(), "should only be called when we are in a transition state");
- return _call->destination();
+CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr) {
+ address call_site = nativeCall_before(return_addr)->instruction_address();
+ return CompiledIC_at(nm, call_site);
}
-// Clears the IC stub if the compiled IC is in transition state
-void CompiledIC::clear_ic_stub() {
- if (is_in_transition_state()) {
- ICStub* stub = ICStub::from_destination_address(stub_address());
- stub->clear();
- }
+CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) {
+ RelocIterator iter(nm, call_site, call_site + 1);
+ iter.next();
+ return CompiledIC_at(&iter);
}
-//-----------------------------------------------------------------------------
-// High-level access to an inline cache. Guaranteed to be MT-safe.
+CompiledIC* CompiledIC_at(Relocation* call_reloc) {
+ address call_site = call_reloc->addr();
+ CompiledMethod* cm = CodeCache::find_blob(call_reloc->addr())->as_compiled_method();
+ return CompiledIC_at(cm, call_site);
+}
-void CompiledIC::initialize_from_iter(RelocIterator* iter) {
- assert(iter->addr() == _call->instruction_address(), "must find ic_call");
+CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
+ CompiledIC* c_ic = new CompiledIC(reloc_iter);
+ c_ic->verify();
+ return c_ic;
+}
- if (iter->type() == relocInfo::virtual_call_type) {
- virtual_call_Relocation* r = iter->virtual_call_reloc();
- _is_optimized = false;
- _value = _call->get_load_instruction(r);
- } else {
- assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
- _is_optimized = true;
- _value = nullptr;
+void CompiledIC::ensure_initialized(CallInfo* call_info, Klass* receiver_klass) {
+ if (!_data->is_initialized()) {
+ _data->initialize(call_info, receiver_klass);
}
}
-CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call)
- : _method(cm)
-{
- _call = _method->call_wrapper_at((address) call);
- address ic_call = _call->instruction_address();
-
- assert(ic_call != nullptr, "ic_call address must be set");
- assert(cm != nullptr, "must pass compiled method");
- assert(cm->contains(ic_call), "must be in compiled method");
-
- // Search for the ic_call at the given address.
- RelocIterator iter(cm, ic_call, ic_call+1);
- bool ret = iter.next();
- assert(ret == true, "relocInfo must exist at this address");
- assert(iter.addr() == ic_call, "must find ic_call");
-
- initialize_from_iter(&iter);
+void CompiledIC::set_to_clean() {
+ log_debug(inlinecache)("IC@" INTPTR_FORMAT ": set to clean", p2i(_call->instruction_address()));
+ _call->set_destination_mt_safe(SharedRuntime::get_resolve_virtual_call_stub());
}
-CompiledIC::CompiledIC(RelocIterator* iter)
- : _method(iter->code())
-{
- _call = _method->call_wrapper_at(iter->addr());
- address ic_call = _call->instruction_address();
+void CompiledIC::set_to_monomorphic() {
+ assert(data()->is_initialized(), "must be initialized");
+ Method* method = data()->speculated_method();
+ CompiledMethod* code = method->code();
+ address entry;
+ bool to_compiled = code != nullptr && code->is_in_use() && !code->is_unloading();
+
+ if (to_compiled) {
+ entry = code->entry_point();
+ } else {
+ entry = method->get_c2i_unverified_entry();
+ }
- CompiledMethod* nm = iter->code();
- assert(ic_call != nullptr, "ic_call address must be set");
- assert(nm != nullptr, "must pass compiled method");
- assert(nm->contains(ic_call), "must be in compiled method");
+ log_trace(inlinecache)("IC@" INTPTR_FORMAT ": monomorphic to %s: %s",
+ p2i(_call->instruction_address()),
+ to_compiled ? "compiled" : "interpreter",
+ method->print_value_string());
- initialize_from_iter(iter);
+ _call->set_destination_mt_safe(entry);
}
-// This function may fail for two reasons: either due to running out of vtable
-// stubs, or due to running out of IC stubs in an attempted transition to a
-// transitional state. The needs_ic_stub_refill value will be set if the failure
-// was due to running out of IC stubs, in which case the caller will refill IC
-// stubs and retry.
-bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode,
- bool& needs_ic_stub_refill, TRAPS) {
- assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
- assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
- assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
+void CompiledIC::set_to_megamorphic(CallInfo* call_info) {
+ assert(data()->is_initialized(), "must be initialized");
address entry;
- if (call_info->call_kind() == CallInfo::itable_call) {
- assert(bytecode == Bytecodes::_invokeinterface, "");
+ if (call_info->call_kind() == CallInfo::direct_call) {
+ // C1 sometimes compiles a callsite before the target method is loaded, resulting in
+ // dynamically bound callsites that should really be statically bound. However, the
+ // target method might not have a vtable or itable. We just wait for better code to arrive
+ return;
+ } else if (call_info->call_kind() == CallInfo::itable_call) {
int itable_index = call_info->itable_index();
entry = VtableStubs::find_itable_stub(itable_index);
if (entry == nullptr) {
- return false;
+ return;
}
#ifdef ASSERT
int index = call_info->resolved_method()->itable_index();
@@ -267,401 +243,151 @@ bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
InstanceKlass* k = call_info->resolved_method()->method_holder();
assert(k->verify_itable_index(itable_index), "sanity check");
#endif //ASSERT
- CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
- call_info->resolved_klass(), false);
- holder->claim();
- if (!InlineCacheBuffer::create_transition_stub(this, holder, entry)) {
- delete holder;
- needs_ic_stub_refill = true;
- return false;
- }
- // LSan appears unable to follow malloc-based memory consistently when embedded as an immediate
- // in generated machine code. So we have to ignore it.
- LSAN_IGNORE_OBJECT(holder);
} else {
- assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
+ assert(call_info->call_kind() == CallInfo::vtable_call, "what else?");
// Can be different than selected_method->vtable_index(), due to package-private etc.
int vtable_index = call_info->vtable_index();
assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
entry = VtableStubs::find_vtable_stub(vtable_index);
if (entry == nullptr) {
- return false;
+ return;
}
- if (!InlineCacheBuffer::create_transition_stub(this, nullptr, entry)) {
- needs_ic_stub_refill = true;
- return false;
- }
- }
-
- {
- ResourceMark rm;
- assert(call_info->selected_method() != nullptr, "Unexpected null selected method");
- log_trace(inlinecache)("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
- p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
}
- // We can't check this anymore. With lazy deopt we could have already
- // cleaned this IC entry before we even return. This is possible if
- // we ran out of space in the inline cache buffer trying to do the
- // set_next and we safepointed to free up space. This is a benign
- // race because the IC entry was complete when we safepointed so
- // cleaning it immediately is harmless.
- // assert(is_megamorphic(), "sanity check");
- return true;
-}
-
+ log_trace(inlinecache)("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
+ p2i(_call->instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
-// true if destination is megamorphic stub
-bool CompiledIC::is_megamorphic() const {
- assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
- assert(!is_optimized(), "an optimized call cannot be megamorphic");
-
- // Cannot rely on cached_value. It is either an interface or a method.
- return VtableStubs::entry_point(ic_destination()) != nullptr;
+ _call->set_destination_mt_safe(entry);
+ assert(is_megamorphic(), "sanity check");
}
-bool CompiledIC::is_call_to_compiled() const {
- assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
-
- CodeBlob* cb = CodeCache::find_blob(ic_destination());
- bool is_monomorphic = (cb != nullptr && cb->is_compiled());
- // Check that the cached_value is a klass for non-optimized monomorphic calls
- // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
- // for calling directly to vep without using the inline cache (i.e., cached_value == nullptr).
- // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
- // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
-#ifdef ASSERT
- CodeBlob* caller = CodeCache::find_blob(instruction_address());
- bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
- assert( is_c1_or_jvmci_method ||
- !is_monomorphic ||
- is_optimized() ||
- (cached_metadata() != nullptr && cached_metadata()->is_klass()), "sanity check");
-#endif // ASSERT
- return is_monomorphic;
-}
+void CompiledIC::update(CallInfo* call_info, Klass* receiver_klass) {
+ // If this is the first time we fix the inline cache, we ensure it's initialized
+ ensure_initialized(call_info, receiver_klass);
+ if (is_megamorphic()) {
+ // Terminal state for the inline cache
+ return;
+ }
-bool CompiledIC::is_call_to_interpreted() const {
- assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
- // Call to interpreter if destination is either calling to a stub (if it
- // is optimized), or calling to an I2C blob
- bool is_call_to_interpreted = false;
- if (!is_optimized()) {
- CodeBlob* cb = CodeCache::find_blob(ic_destination());
- is_call_to_interpreted = (cb != nullptr && cb->is_adapter_blob());
- assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != nullptr), "sanity check");
+ if (is_speculated_klass(receiver_klass)) {
+ // If the speculated class matches the receiver klass, we can speculate that will
+ // continue to be the case with a monomorphic inline cache
+ set_to_monomorphic();
} else {
- // Check if we are calling into our own codeblob (i.e., to a stub)
- address dest = ic_destination();
-#ifdef ASSERT
- {
- _call->verify_resolve_call(dest);
- }
-#endif /* ASSERT */
- is_call_to_interpreted = _call->is_call_to_interpreted(dest);
+ // If the dynamic type speculation fails, we try to transform to a megamorphic state
+ // for the inline cache using stubs to dispatch in tables
+ set_to_megamorphic(call_info);
}
- return is_call_to_interpreted;
}
-bool CompiledIC::set_to_clean(bool in_use) {
- assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
- if (TraceInlineCacheClearing) {
- tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
- print();
- }
- log_trace(inlinecache)("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
-
- address entry = _call->get_resolve_call_stub(is_optimized());
-
- bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
+bool CompiledIC::is_clean() const {
+ return destination() == SharedRuntime::get_resolve_virtual_call_stub();
+}
- if (safe_transition) {
- // Kill any leftover stub we might have too
- clear_ic_stub();
- if (is_optimized()) {
- set_ic_destination(entry);
- } else {
- set_ic_destination_and_value(entry, (void*)nullptr);
- }
- } else {
- // Unsafe transition - create stub.
- if (!InlineCacheBuffer::create_transition_stub(this, nullptr, entry)) {
- return false;
- }
- }
- // We can't check this anymore. With lazy deopt we could have already
- // cleaned this IC entry before we even return. This is possible if
- // we ran out of space in the inline cache buffer trying to do the
- // set_next and we safepointed to free up space. This is a benign
- // race because the IC entry was complete when we safepointed so
- // cleaning it immediately is harmless.
- // assert(is_clean(), "sanity check");
- return true;
+bool CompiledIC::is_monomorphic() const {
+ return !is_clean() && !is_megamorphic();
}
-bool CompiledIC::is_clean() const {
- assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
- bool is_clean = false;
- address dest = ic_destination();
- is_clean = dest == _call->get_resolve_call_stub(is_optimized());
- assert(!is_clean || is_optimized() || cached_value() == nullptr, "sanity check");
- return is_clean;
+bool CompiledIC::is_megamorphic() const {
+ return VtableStubs::entry_point(destination()) != nullptr;;
}
-bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
- assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
- // Updating a cache to the wrong entry can cause bugs that are very hard
- // to track down - if cache entry gets invalid - we just clean it. In
- // this way it is always the same code path that is responsible for
- // updating and resolving an inline cache
- //
- // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
- // callsites. In addition ic_miss code will update a site to monomorphic if it determines
- // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
- //
- // In both of these cases the only thing being modified is the jump/call target and these
- // transitions are mt_safe
-
- Thread *thread = Thread::current();
- if (info.to_interpreter()) {
- // Call to interpreter
- if (info.is_optimized() && is_optimized()) {
- assert(is_clean(), "unsafe IC path");
- // the call analysis (callee structure) specifies that the call is optimized
- // (either because of CHA or the static target is final)
- // At code generation time, this call has been emitted as static call
- // Call via stub
- assert(info.cached_metadata() != nullptr && info.cached_metadata()->is_method(), "sanity check");
- methodHandle method (thread, (Method*)info.cached_metadata());
- _call->set_to_interpreted(method, info);
-
- {
- ResourceMark rm(thread);
- log_trace(inlinecache)("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
- p2i(instruction_address()),
- method->print_value_string());
- }
- } else {
- // Call via method-klass-holder
- CompiledICHolder* holder = info.claim_cached_icholder();
- if (!InlineCacheBuffer::create_transition_stub(this, holder, info.entry())) {
- delete holder;
- return false;
- }
- // LSan appears unable to follow malloc-based memory consistently when embedded as an
- // immediate in generated machine code. So we have to ignore it.
- LSAN_IGNORE_OBJECT(holder);
- {
- ResourceMark rm(thread);
- log_trace(inlinecache)("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
- }
- }
- } else {
- // Call to compiled code
- bool static_bound = info.is_optimized() || (info.cached_metadata() == nullptr);
-#ifdef ASSERT
- CodeBlob* cb = CodeCache::find_blob(info.entry());
- assert (cb != nullptr && cb->is_compiled(), "must be compiled!");
-#endif /* ASSERT */
-
- // This is MT safe if we come from a clean-cache and go through a
- // non-verified entry point
- bool safe = SafepointSynchronize::is_at_safepoint() ||
- (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
-
- if (!safe) {
- if (!InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry())) {
- return false;
- }
- } else {
- if (is_optimized()) {
- set_ic_destination(info.entry());
- } else {
- set_ic_destination_and_value(info.entry(), info.cached_metadata());
- }
- }
+bool CompiledIC::is_speculated_klass(Klass* receiver_klass) {
+ return data()->speculated_klass() == receiver_klass;
+}
- {
- ResourceMark rm(thread);
- assert(info.cached_metadata() == nullptr || info.cached_metadata()->is_klass(), "must be");
- log_trace(inlinecache)("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass = %s) %s",
- p2i(instruction_address()),
- (info.cached_metadata() != nullptr) ? ((Klass*)info.cached_metadata())->print_value_string() : "nullptr",
- (safe) ? "" : " via stub");
- }
- }
- // We can't check this anymore. With lazy deopt we could have already
- // cleaned this IC entry before we even return. This is possible if
- // we ran out of space in the inline cache buffer trying to do the
- // set_next and we safepointed to free up space. This is a benign
- // race because the IC entry was complete when we safepointed so
- // cleaning it immediately is harmless.
- // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
- return true;
-}
-
-
-// is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
-// static_bound: The call can be static bound. If it isn't also optimized, the property
-// wasn't provable at time of compilation. An optimized call will have any necessary
-// null check, while a static_bound won't. A static_bound (but not optimized) must
-// therefore use the unverified entry point.
-void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
- Klass* receiver_klass,
- bool is_optimized,
- bool static_bound,
- bool caller_is_nmethod,
- CompiledICInfo& info,
- TRAPS) {
- CompiledMethod* method_code = method->code();
-
- address entry = nullptr;
- if (method_code != nullptr && method_code->is_in_use() && !method_code->is_unloading()) {
- assert(method_code->is_compiled(), "must be compiled");
- // Call to compiled code
- //
- // Note: the following problem exists with Compiler1:
- // - at compile time we may or may not know if the destination is final
- // - if we know that the destination is final (is_optimized), we will emit
- // an optimized virtual call (no inline cache), and need a Method* to make
- // a call to the interpreter
- // - if we don't know if the destination is final, we emit a standard
- // virtual call, and use CompiledICHolder to call interpreted code
- // (no static call stub has been generated)
- // - In the case that we here notice the call is static bound we
- // convert the call into what looks to be an optimized virtual call,
- // but we must use the unverified entry point (since there will be no
- // null check on a call when the target isn't loaded).
- // This causes problems when verifying the IC because
- // it looks vanilla but is optimized. Code in is_call_to_interpreted
- // is aware of this and weakens its asserts.
- if (is_optimized) {
- entry = method_code->verified_entry_point();
- } else {
- entry = method_code->entry_point();
- }
- }
- if (entry != nullptr) {
- // Call to near compiled code.
- info.set_compiled_entry(entry, is_optimized ? nullptr : receiver_klass, is_optimized);
- } else {
- if (is_optimized) {
- // Use stub entry
- info.set_interpreter_entry(method()->get_c2i_entry(), method());
- } else {
- // Use icholder entry
- assert(method_code == nullptr || method_code->is_compiled(), "must be compiled");
- CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass);
- info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
- }
- }
- assert(info.is_optimized() == is_optimized, "must agree");
+// GC support
+void CompiledIC::clean_metadata() {
+ data()->clean_metadata();
}
+void CompiledIC::metadata_do(MetadataClosure* cl) {
+ data()->metadata_do(cl);
+}
-bool CompiledIC::is_icholder_entry(address entry) {
- CodeBlob* cb = CodeCache::find_blob(entry);
- if (cb == nullptr) {
- return false;
- }
- if (cb->is_adapter_blob()) {
- return true;
- } else if (cb->is_vtable_blob()) {
- return VtableStubs::is_icholder_entry(entry);
- }
- return false;
+#ifndef PRODUCT
+void CompiledIC::print() {
+ tty->print("Inline cache at " INTPTR_FORMAT ", calling " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
+ p2i(instruction_address()), p2i(destination()), p2i(data()));
+ tty->cr();
}
-bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
- // This call site might have become stale so inspect it carefully.
- address dest = cm->call_wrapper_at(call_site->addr())->destination();
- return is_icholder_entry(dest);
+void CompiledIC::verify() {
+ _call->verify();
}
+#endif
// ----------------------------------------------------------------------------
-bool CompiledStaticCall::set_to_clean(bool in_use) {
+void CompiledDirectCall::set_to_clean() {
// in_use is unused but needed to match template function in CompiledMethod
assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
// Reset call site
- set_destination_mt_safe(resolve_call_stub());
+ RelocIterator iter((nmethod*)nullptr, instruction_address(), instruction_address() + 1);
+ while (iter.next()) {
+ switch(iter.type()) {
+ case relocInfo::static_call_type:
+ _call->set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub());
+ break;
+ case relocInfo::opt_virtual_call_type:
+ _call->set_destination_mt_safe(SharedRuntime::get_resolve_opt_virtual_call_stub());
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ }
+ assert(is_clean(), "should be clean after cleaning");
- // Do not reset stub here: It is too expensive to call find_stub.
- // Instead, rely on caller (nmethod::clear_inline_caches) to clear
- // both the call and its stub.
- return true;
+ log_debug(inlinecache)("DC@" INTPTR_FORMAT ": set to clean", p2i(_call->instruction_address()));
}
-bool CompiledStaticCall::is_clean() const {
- return destination() == resolve_call_stub();
-}
+void CompiledDirectCall::set(const methodHandle& callee_method) {
+ CompiledMethod* code = callee_method->code();
+ CompiledMethod* caller = CodeCache::find_compiled(instruction_address());
-bool CompiledStaticCall::is_call_to_compiled() const {
- return CodeCache::contains(destination());
-}
+ bool to_interp_cont_enter = caller->method()->is_continuation_enter_intrinsic() &&
+ ContinuationEntry::is_interpreted_call(instruction_address());
-bool CompiledDirectStaticCall::is_call_to_interpreted() const {
- // It is a call to interpreted, if it calls to a stub. Hence, the destination
- // must be in the stub part of the nmethod that contains the call
- CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
- return cm->stub_contains(destination());
-}
+ bool to_compiled = !to_interp_cont_enter && code != nullptr && code->is_in_use() && !code->is_unloading();
-void CompiledStaticCall::set_to_compiled(address entry) {
- {
- ResourceMark rm;
- log_trace(inlinecache)("%s@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
- name(),
- p2i(instruction_address()),
- p2i(entry));
+ if (to_compiled) {
+ _call->set_destination_mt_safe(code->verified_entry_point());
+ assert(is_call_to_compiled(), "should be compiled after set to compiled");
+ } else {
+ // Patch call site to C2I adapter if code is deoptimized or unloaded.
+ // We also need to patch the static call stub to set the rmethod register
+ // to the callee_method so the c2i adapter knows how to build the frame
+ set_to_interpreted(callee_method, callee_method->get_c2i_entry());
+ assert(is_call_to_interpreted(), "should be interpreted after set to interpreted");
}
- // Call to compiled code
- assert(CodeCache::contains(entry), "wrong entry point");
- set_destination_mt_safe(entry);
+
+ log_trace(inlinecache)("DC@" INTPTR_FORMAT ": set to %s: %s: " INTPTR_FORMAT,
+ p2i(_call->instruction_address()),
+ to_compiled ? "compiled" : "interpreter",
+ callee_method->print_value_string(),
+ p2i(_call->destination()));
}
-void CompiledStaticCall::set(const StaticCallInfo& info) {
- assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
- // Updating a cache to the wrong entry can cause bugs that are very hard
- // to track down - if cache entry gets invalid - we just clean it. In
- // this way it is always the same code path that is responsible for
- // updating and resolving an inline cache
- assert(is_clean(), "do not update a call entry - use clean");
-
- if (info._to_interpreter) {
- // Call to interpreted code
- set_to_interpreted(info.callee(), info.entry());
- } else {
- set_to_compiled(info.entry());
- }
+bool CompiledDirectCall::is_clean() const {
+ return destination() == SharedRuntime::get_resolve_static_call_stub() ||
+ destination() == SharedRuntime::get_resolve_opt_virtual_call_stub();
}
-// Compute settings for a CompiledStaticCall. Since we might have to set
-// the stub when calling to the interpreter, we need to return arguments.
-void CompiledStaticCall::compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info) {
- CompiledMethod* m_code = m->code();
- info._callee = m;
- if (m_code != nullptr && m_code->is_in_use() && !m_code->is_unloading()) {
- info._to_interpreter = false;
- info._entry = m_code->verified_entry_point();
- } else {
- // Callee is interpreted code. In any case entering the interpreter
- // puts a converter-frame on the stack to save arguments.
- assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics");
- info._to_interpreter = true;
- info._entry = m()->get_c2i_entry();
- }
+bool CompiledDirectCall::is_call_to_interpreted() const {
+ // It is a call to interpreted, if it calls to a stub. Hence, the destination
+ // must be in the stub part of the nmethod that contains the call
+ CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
+ return cm->stub_contains(destination());
}
-void CompiledStaticCall::compute_entry_for_continuation_entry(const methodHandle& m, StaticCallInfo& info) {
- if (ContinuationEntry::is_interpreted_call(instruction_address())) {
- info._to_interpreter = true;
- info._entry = m()->get_c2i_entry();
- }
+bool CompiledDirectCall::is_call_to_compiled() const {
+ CompiledMethod* caller = CodeCache::find_compiled(instruction_address());
+ CodeBlob* dest_cb = CodeCache::find_blob(destination());
+ return !caller->stub_contains(destination()) && dest_cb->is_compiled();
}
-address CompiledDirectStaticCall::find_stub_for(address instruction) {
+address CompiledDirectCall::find_stub_for(address instruction) {
// Find reloc. information containing this call-site
RelocIterator iter((nmethod*)nullptr, instruction);
while (iter.next()) {
@@ -673,8 +399,6 @@ address CompiledDirectStaticCall::find_stub_for(address instruction) {
// from the CompiledIC implementation
case relocInfo::opt_virtual_call_type:
return iter.opt_virtual_call_reloc()->static_stub();
- case relocInfo::poll_type:
- case relocInfo::poll_return_type: // A safepoint can't overlap a call.
default:
ShouldNotReachHere();
}
@@ -683,36 +407,13 @@ address CompiledDirectStaticCall::find_stub_for(address instruction) {
return nullptr;
}
-address CompiledDirectStaticCall::find_stub() {
- return CompiledDirectStaticCall::find_stub_for(instruction_address());
+address CompiledDirectCall::find_stub() {
+ return find_stub_for(instruction_address());
}
-address CompiledDirectStaticCall::resolve_call_stub() const {
- return SharedRuntime::get_resolve_static_call_stub();
-}
-
-//-----------------------------------------------------------------------------
-// Non-product mode code
#ifndef PRODUCT
-
-void CompiledIC::verify() {
- _call->verify();
- assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
- || is_optimized() || is_megamorphic(), "sanity check");
-}
-
-void CompiledIC::print() {
- print_compiled_ic();
- tty->cr();
-}
-
-void CompiledIC::print_compiled_ic() {
- tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
- p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? nullptr : cached_value()));
-}
-
-void CompiledDirectStaticCall::print() {
- tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
+void CompiledDirectCall::print() {
+ tty->print("direct call at " INTPTR_FORMAT " to " INTPTR_FORMAT " -> ", p2i(instruction_address()), p2i(destination()));
if (is_clean()) {
tty->print("clean");
} else if (is_call_to_compiled()) {
@@ -723,9 +424,10 @@ void CompiledDirectStaticCall::print() {
tty->cr();
}
-void CompiledDirectStaticCall::verify_mt_safe(const methodHandle& callee, address entry,
- NativeMovConstReg* method_holder,
- NativeJump* jump) {
+void CompiledDirectCall::verify_mt_safe(const methodHandle& callee, address entry,
+ NativeMovConstReg* method_holder,
+ NativeJump* jump) {
+ _call->verify();
// A generated lambda form might be deleted from the Lambdaform
// cache in MethodTypeForm. If a jit compiled lambdaform method
// becomes not entrant and the cache access returns null, the new
@@ -743,4 +445,4 @@ void CompiledDirectStaticCall::verify_mt_safe(const methodHandle& callee, addres
|| old_method->is_old(), // may be race patching deoptimized nmethod due to redefinition.
"b) MT-unsafe modification of inline cache");
}
-#endif // !PRODUCT
+#endif
diff --git a/src/hotspot/share/code/compiledIC.hpp b/src/hotspot/share/code/compiledIC.hpp
index 17586fc57a05f..321bf280ed40a 100644
--- a/src/hotspot/share/code/compiledIC.hpp
+++ b/src/hotspot/share/code/compiledIC.hpp
@@ -27,42 +27,19 @@
#include "code/nativeInst.hpp"
#include "interpreter/linkResolver.hpp"
-#include "oops/compiledICHolder.hpp"
#include "runtime/safepointVerifiers.hpp"
//-----------------------------------------------------------------------------
// The CompiledIC represents a compiled inline cache.
//
-// In order to make patching of the inline cache MT-safe, we only allow the following
-// transitions (when not at a safepoint):
-//
-//
-// [1] --<-- Clean -->--- [1]
-// / (null) \
-// / \ /-<-\
-// / [2] \ / \
-// Interpreted ---------> Monomorphic | [3]
-// (CompiledICHolder*) (Klass*) |
-// \ / \ /
-// [4] \ / [4] \->-/
-// \->- Megamorphic -<-/
-// (CompiledICHolder*)
-//
-// The text in parentheses () refers to the value of the inline cache receiver (mov instruction)
-//
-// The numbers in square brackets refer to the kind of transition:
-// [1]: Initial fixup. Receiver it found from debug information
-// [2]: Compilation of a method
-// [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
-// [4]: Inline cache miss. We go directly to megamorphic call.
-//
-// The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
-// transition is made to a stub.
+// It's safe to transition from any state to any state. Typically an inline cache starts
+// in the clean state, meaning it will resolve the call when called. Then it typically
+// transitions to monomorphic, assuming the first dynamic receiver will be the only one
+// observed. If that speculation fails, we transition to megamorphic.
//
class CompiledIC;
class CompiledICProtectionBehaviour;
class CompiledMethod;
-class ICStub;
class CompiledICLocker: public StackObj {
CompiledMethod* _method;
@@ -77,237 +54,105 @@ class CompiledICLocker: public StackObj {
static bool is_safe(address code);
};
-class CompiledICInfo : public StackObj {
- private:
- address _entry; // entry point for call
- void* _cached_value; // Value of cached_value (either in stub or inline cache)
- bool _is_icholder; // Is the cached value a CompiledICHolder*
- bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound)
- bool _to_interpreter; // Call it to interpreter
- bool _release_icholder;
+// A CompiledICData is a helper object for the inline cache implementation.
+// It comprises:
+// (1) The first receiver klass and its selected method
+// (2) Itable call metadata
+
+class CompiledICData : public CHeapObj {
+ friend class VMStructs;
+ friend class JVMCIVMStructs;
+
+ Method* volatile _speculated_method;
+ uintptr_t volatile _speculated_klass;
+ Klass* _itable_defc_klass;
+ Klass* _itable_refc_klass;
+ bool _is_initialized;
+
+ bool is_speculated_klass_unloaded() const;
+
public:
- address entry() const { return _entry; }
- Metadata* cached_metadata() const { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
- CompiledICHolder* claim_cached_icholder() {
- assert(_is_icholder, "");
- assert(_cached_value != nullptr, "must be non-null");
- _release_icholder = false;
- CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
- icholder->claim();
- return icholder;
- }
- bool is_optimized() const { return _is_optimized; }
- bool to_interpreter() const { return _to_interpreter; }
-
- void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
- _entry = entry;
- _cached_value = (void*)klass;
- _to_interpreter = false;
- _is_icholder = false;
- _is_optimized = is_optimized;
- _release_icholder = false;
- }
+ // Constructor
+ CompiledICData();
- void set_interpreter_entry(address entry, Method* method) {
- _entry = entry;
- _cached_value = (void*)method;
- _to_interpreter = true;
- _is_icholder = false;
- _is_optimized = true;
- _release_icholder = false;
- }
+ // accessors
+ Klass* speculated_klass() const;
+ Method* speculated_method() const { return _speculated_method; }
+ Klass* itable_defc_klass() const { return _itable_defc_klass; }
+ Klass* itable_refc_klass() const { return _itable_refc_klass; }
- void set_icholder_entry(address entry, CompiledICHolder* icholder) {
- _entry = entry;
- _cached_value = (void*)icholder;
- _to_interpreter = true;
- _is_icholder = true;
- _is_optimized = false;
- _release_icholder = true;
- }
+ static ByteSize speculated_method_offset() { return byte_offset_of(CompiledICData, _speculated_method); }
+ static ByteSize speculated_klass_offset() { return byte_offset_of(CompiledICData, _speculated_klass); }
- CompiledICInfo(): _entry(nullptr), _cached_value(nullptr), _is_icholder(false),
- _is_optimized(false), _to_interpreter(false), _release_icholder(false) {
- }
- ~CompiledICInfo() {
- // In rare cases the info is computed but not used, so release any
- // CompiledICHolder* that was created
- if (_release_icholder) {
- assert(_is_icholder, "must be");
- CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
- icholder->claim();
- delete icholder;
- }
- }
-};
+ static ByteSize itable_defc_klass_offset() { return byte_offset_of(CompiledICData, _itable_defc_klass); }
+ static ByteSize itable_refc_klass_offset() { return byte_offset_of(CompiledICData, _itable_refc_klass); }
-class NativeCallWrapper: public ResourceObj {
-public:
- virtual address destination() const = 0;
- virtual address instruction_address() const = 0;
- virtual address next_instruction_address() const = 0;
- virtual address return_address() const = 0;
- virtual address get_resolve_call_stub(bool is_optimized) const = 0;
- virtual void set_destination_mt_safe(address dest) = 0;
- virtual void set_to_interpreted(const methodHandle& method, CompiledICInfo& info) = 0;
- virtual void verify() const = 0;
- virtual void verify_resolve_call(address dest) const = 0;
-
- virtual bool is_call_to_interpreted(address dest) const = 0;
- virtual bool is_safe_for_patching() const = 0;
-
- virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const = 0;
-
- virtual void *get_data(NativeInstruction* instruction) const = 0;
- virtual void set_data(NativeInstruction* instruction, intptr_t data) = 0;
+ void initialize(CallInfo* call_info, Klass* receiver_klass);
+
+ bool is_initialized() const { return _is_initialized; }
+
+ // GC Support
+ void clean_metadata();
+ void metadata_do(MetadataClosure* cl);
};
class CompiledIC: public ResourceObj {
- friend class InlineCacheBuffer;
- friend class ICStub;
-
- private:
- NativeCallWrapper* _call;
- NativeInstruction* _value; // patchable value cell for this IC
- bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
+private:
CompiledMethod* _method;
+ CompiledICData* _data;
+ NativeCall* _call;
- CompiledIC(CompiledMethod* cm, NativeCall* ic_call);
CompiledIC(RelocIterator* iter);
- void initialize_from_iter(RelocIterator* iter);
+ // CompiledICData wrappers
+ void ensure_initialized(CallInfo* call_info, Klass* receiver_klass);
+ bool is_speculated_klass(Klass* receiver_klass);
- static bool is_icholder_entry(address entry);
+ // Inline cache states
+ void set_to_monomorphic();
+ void set_to_megamorphic(CallInfo* call_info);
- // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
- // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
- // changes to a transition stub.
- void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder);
- void set_ic_destination(ICStub* stub);
- void set_ic_destination(address entry_point) {
- assert(_is_optimized, "use set_ic_destination_and_value instead");
- internal_set_ic_destination(entry_point, false, nullptr, false);
- }
- // This only for use by ICStubs where the type of the value isn't known
- void set_ic_destination_and_value(address entry_point, void* value) {
- internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point));
- }
- void set_ic_destination_and_value(address entry_point, Metadata* value) {
- internal_set_ic_destination(entry_point, false, value, false);
- }
- void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) {
- internal_set_ic_destination(entry_point, false, value, true);
- }
-
- // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
- // associated with the inline cache.
- address stub_address() const;
- bool is_in_transition_state() const; // Use InlineCacheBuffer
-
- public:
+public:
// conversion (machine PC to CompiledIC*)
friend CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr);
friend CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site);
friend CompiledIC* CompiledIC_at(Relocation* call_site);
friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
- static bool is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm);
-
- // Return the cached_metadata/destination associated with this inline cache. If the cache currently points
- // to a transition stub, it will read the values from the transition stub.
- void* cached_value() const;
- CompiledICHolder* cached_icholder() const {
- assert(is_icholder_call(), "must be");
- return (CompiledICHolder*) cached_value();
- }
- Metadata* cached_metadata() const {
- assert(!is_icholder_call(), "must be");
- return (Metadata*) cached_value();
- }
-
- void* get_data() const {
- return _call->get_data(_value);
- }
-
- void set_data(intptr_t data) {
- _call->set_data(_value, data);
- }
-
- address ic_destination() const;
-
- bool is_optimized() const { return _is_optimized; }
+ CompiledICData* data() const;
// State
- bool is_clean() const;
+ bool is_clean() const;
+ bool is_monomorphic() const;
bool is_megamorphic() const;
- bool is_call_to_compiled() const;
- bool is_call_to_interpreted() const;
-
- bool is_icholder_call() const;
- address end_of_call() const { return _call->return_address(); }
+ address end_of_call() const { return _call->return_address(); }
- // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
+ // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledICLocker
// so you are guaranteed that no patching takes place. The same goes for verify.
- //
- // Note: We do not provide any direct access to the stub code, to prevent parts of the code
- // to manipulate the inline cache in MT-unsafe ways.
- //
- // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
- //
- bool set_to_clean(bool in_use = true);
- bool set_to_monomorphic(CompiledICInfo& info);
- void clear_ic_stub();
-
- // Returns true if successful and false otherwise. The call can fail if memory
- // allocation in the code cache fails, or ic stub refill is required.
- bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, bool& needs_ic_stub_refill, TRAPS);
-
- static void compute_monomorphic_entry(const methodHandle& method, Klass* receiver_klass,
- bool is_optimized, bool static_bound, bool caller_is_nmethod,
- CompiledICInfo& info, TRAPS);
+ void set_to_clean();
+ void update(CallInfo* call_info, Klass* receiver_klass);
+
+ // GC support
+ void clean_metadata();
+ void metadata_do(MetadataClosure* cl);
// Location
address instruction_address() const { return _call->instruction_address(); }
+ address destination() const { return _call->destination(); }
// Misc
void print() PRODUCT_RETURN;
- void print_compiled_ic() PRODUCT_RETURN;
void verify() PRODUCT_RETURN;
};
-inline CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr) {
- CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
- c_ic->verify();
- return c_ic;
-}
-
-inline CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) {
- CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
- c_ic->verify();
- return c_ic;
-}
-
-inline CompiledIC* CompiledIC_at(Relocation* call_site) {
- assert(call_site->type() == relocInfo::virtual_call_type ||
- call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
- CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr()));
- c_ic->verify();
- return c_ic;
-}
-
-inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
- assert(reloc_iter->type() == relocInfo::virtual_call_type ||
- reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
- CompiledIC* c_ic = new CompiledIC(reloc_iter);
- c_ic->verify();
- return c_ic;
-}
+CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr);
+CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site);
+CompiledIC* CompiledIC_at(Relocation* call_site);
+CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
//-----------------------------------------------------------------------------
-// The CompiledStaticCall represents a call to a static method in the compiled
-//
-// Transition diagram of a static call site is somewhat simpler than for an inlined cache:
+// The CompiledDirectCall represents a call to a method in the compiled code
//
//
// -----<----- Clean ----->-----
@@ -321,63 +166,7 @@ inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
//
//
-class StaticCallInfo {
- private:
- address _entry; // Entrypoint
- methodHandle _callee; // Callee (used when calling interpreter)
- bool _to_interpreter; // call to interpreted method (otherwise compiled)
-
- friend class CompiledStaticCall;
- friend class CompiledDirectStaticCall;
- friend class CompiledPltStaticCall;
- public:
- address entry() const { return _entry; }
- methodHandle callee() const { return _callee; }
-};
-
-class CompiledStaticCall : public ResourceObj {
- public:
- // Code
-
- // Returns null if CodeBuffer::expand fails
- static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = nullptr);
- static int to_interp_stub_size();
- static int to_trampoline_stub_size();
- static int reloc_to_interp_stub();
-
- // Compute entry point given a method
- static void compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info);
- void compute_entry_for_continuation_entry(const methodHandle& m, StaticCallInfo& info);
-
-public:
- // Clean static call (will force resolving on next use)
- virtual address destination() const = 0;
-
- // Clean static call (will force resolving on next use)
- bool set_to_clean(bool in_use = true);
-
- // Set state. The entry must be the same, as computed by compute_entry.
- // Computation and setting is split up, since the actions are separate during
- // a OptoRuntime::resolve_xxx.
- void set(const StaticCallInfo& info);
-
- // State
- bool is_clean() const;
- bool is_call_to_compiled() const;
- virtual bool is_call_to_interpreted() const = 0;
-
- virtual address instruction_address() const = 0;
- virtual address end_of_call() const = 0;
-protected:
- virtual address resolve_call_stub() const = 0;
- virtual void set_destination_mt_safe(address dest) = 0;
- virtual void set_to_interpreted(const methodHandle& callee, address entry) = 0;
- virtual const char* name() const = 0;
-
- void set_to_compiled(address entry);
-};
-
-class CompiledDirectStaticCall : public CompiledStaticCall {
+class CompiledDirectCall : public ResourceObj {
private:
friend class CompiledIC;
friend class DirectNativeCallWrapper;
@@ -392,22 +181,28 @@ class CompiledDirectStaticCall : public CompiledStaticCall {
NativeCall* _call;
- CompiledDirectStaticCall(NativeCall* call) : _call(call) {}
+ CompiledDirectCall(NativeCall* call) : _call(call) {}
public:
- static inline CompiledDirectStaticCall* before(address return_addr) {
- CompiledDirectStaticCall* st = new CompiledDirectStaticCall(nativeCall_before(return_addr));
+ // Returns null if CodeBuffer::expand fails
+ static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = nullptr);
+ static int to_interp_stub_size();
+ static int to_trampoline_stub_size();
+ static int reloc_to_interp_stub();
+
+ static inline CompiledDirectCall* before(address return_addr) {
+ CompiledDirectCall* st = new CompiledDirectCall(nativeCall_before(return_addr));
st->verify();
return st;
}
- static inline CompiledDirectStaticCall* at(address native_call) {
- CompiledDirectStaticCall* st = new CompiledDirectStaticCall(nativeCall_at(native_call));
+ static inline CompiledDirectCall* at(address native_call) {
+ CompiledDirectCall* st = new CompiledDirectCall(nativeCall_at(native_call));
st->verify();
return st;
}
- static inline CompiledDirectStaticCall* at(Relocation* call_site) {
+ static inline CompiledDirectCall* at(Relocation* call_site) {
return at(call_site->addr());
}
@@ -415,8 +210,15 @@ class CompiledDirectStaticCall : public CompiledStaticCall {
address destination() const { return _call->destination(); }
address end_of_call() const { return _call->return_address(); }
+ // Clean static call (will force resolving on next use)
+ void set_to_clean();
+
+ void set(const methodHandle& callee_method);
+
// State
- virtual bool is_call_to_interpreted() const;
+ bool is_clean() const;
+ bool is_call_to_interpreted() const;
+ bool is_call_to_compiled() const;
// Stub support
static address find_stub_for(address instruction);
@@ -426,10 +228,6 @@ class CompiledDirectStaticCall : public CompiledStaticCall {
// Misc.
void print() PRODUCT_RETURN;
void verify() PRODUCT_RETURN;
-
- protected:
- virtual address resolve_call_stub() const;
- virtual const char* name() const { return "CompiledDirectStaticCall"; }
};
#endif // SHARE_CODE_COMPILEDIC_HPP
diff --git a/src/hotspot/share/code/compiledMethod.cpp b/src/hotspot/share/code/compiledMethod.cpp
index a26d4a98aba9c..6553d6f79344f 100644
--- a/src/hotspot/share/code/compiledMethod.cpp
+++ b/src/hotspot/share/code/compiledMethod.cpp
@@ -28,7 +28,6 @@
#include "code/exceptionHandlerTable.hpp"
#include "code/scopeDesc.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/gcBehaviours.hpp"
@@ -36,7 +35,6 @@
#include "logging/log.hpp"
#include "logging/logTag.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.inline.hpp"
#include "oops/klass.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/method.inline.hpp"
@@ -335,28 +333,6 @@ address CompiledMethod::oops_reloc_begin() const {
return low_boundary;
}
-int CompiledMethod::verify_icholder_relocations() {
- ResourceMark rm;
- int count = 0;
-
- RelocIterator iter(this);
- while(iter.next()) {
- if (iter.type() == relocInfo::virtual_call_type) {
- if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
- CompiledIC *ic = CompiledIC_at(&iter);
- if (TraceCompiledIC) {
- tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
- ic->print();
- }
- assert(ic->cached_icholder() != nullptr, "must be non-nullptr");
- count++;
- }
- }
- }
-
- return count;
-}
-
// Method that knows how to preserve outgoing arguments at call. This method must be
// called with a frame corresponding to a Java invoke
void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
@@ -431,20 +407,6 @@ void CompiledMethod::clear_inline_caches() {
}
}
-// Clear IC callsites, releasing ICStubs of all compiled ICs
-// as well as any associated CompiledICHolders.
-void CompiledMethod::clear_ic_callsites() {
- assert(CompiledICLocker::is_safe(this), "mt unsafe call");
- ResourceMark rm;
- RelocIterator iter(this);
- while(iter.next()) {
- if (iter.type() == relocInfo::virtual_call_type) {
- CompiledIC* ic = CompiledIC_at(&iter);
- ic->set_to_clean(false);
- }
- }
-}
-
#ifdef ASSERT
// Check class_loader is alive for this bit of metadata.
class CheckClass : public MetadataClosure {
@@ -466,70 +428,22 @@ class CheckClass : public MetadataClosure {
#endif // ASSERT
-bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
- if (ic->is_clean()) {
- return true;
- }
- if (ic->is_icholder_call()) {
- // The only exception is compiledICHolder metadata which may
- // yet be marked below. (We check this further below).
- CompiledICHolder* cichk_metdata = ic->cached_icholder();
-
- if (cichk_metdata->is_loader_alive()) {
- return true;
- }
- } else {
- Metadata* ic_metdata = ic->cached_metadata();
- if (ic_metdata != nullptr) {
- if (ic_metdata->is_klass()) {
- if (((Klass*)ic_metdata)->is_loader_alive()) {
- return true;
- }
- } else if (ic_metdata->is_method()) {
- Method* method = (Method*)ic_metdata;
- assert(!method->is_old(), "old method should have been cleaned");
- if (method->method_holder()->is_loader_alive()) {
- return true;
- }
- } else {
- ShouldNotReachHere();
- }
- } else {
- // This inline cache is a megamorphic vtable call. Those ICs never hold
- // any Metadata and should therefore never be cleaned by this function.
- return true;
- }
- }
-
- return ic->set_to_clean();
+static void clean_ic_if_metadata_is_dead(CompiledIC *ic) {
+ ic->clean_metadata();
}
// Clean references to unloaded nmethods at addr from this one, which is not unloaded.
-template
-static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
+template
+static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, CompiledMethod* from,
bool clean_all) {
- CodeBlob *cb = CodeCache::find_blob(addr);
- CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
- if (nm != nullptr) {
- // Clean inline caches pointing to bad nmethods
- if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
- if (!ic->set_to_clean(!from->is_unloading())) {
- return false;
- }
- assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
- }
+ CodeBlob* cb = CodeCache::find_blob(callsite->destination());
+ if (!cb->is_compiled()) {
+ return;
+ }
+ CompiledMethod* cm = cb->as_compiled_method();
+ if (clean_all || !cm->is_in_use() || cm->is_unloading() || cm->method()->code() != cm) {
+ callsite->set_to_clean();
}
- return true;
-}
-
-static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
- bool clean_all) {
- return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
-}
-
-static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
- bool clean_all) {
- return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
}
// Cleans caches in nmethods that point to either classes that are unloaded
@@ -539,7 +453,7 @@ static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod
// nmethods are unloaded. Return postponed=true in the parallel case for
// inline caches found that point to nmethods that are not yet visited during
// the do_unloading walk.
-bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
+void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
ResourceMark rm;
// Exception cache only needs to be called if unloading occurred
@@ -547,16 +461,13 @@ bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
clean_exception_cache();
}
- if (!cleanup_inline_caches_impl(unloading_occurred, false)) {
- return false;
- }
+ cleanup_inline_caches_impl(unloading_occurred, false);
#ifdef ASSERT
// Check that the metadata embedded in the nmethod is alive
CheckClass check_class;
metadata_do(&check_class);
#endif
- return true;
}
void CompiledMethod::run_nmethod_entry_barrier() {
@@ -578,8 +489,7 @@ void CompiledMethod::run_nmethod_entry_barrier() {
void CompiledMethod::cleanup_inline_caches_whitebox() {
assert_locked_or_safepoint(CodeCache_lock);
CompiledICLocker ic_locker(this);
- guarantee(cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */),
- "Inline cache cleaning in a safepoint can't fail");
+ cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */);
}
address* CompiledMethod::orig_pc_addr(const frame* fr) {
@@ -587,7 +497,7 @@ address* CompiledMethod::orig_pc_addr(const frame* fr) {
}
// Called to clean up after class unloading for live nmethods
-bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
+void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
ResourceMark rm;
@@ -602,26 +512,15 @@ bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool cl
if (unloading_occurred) {
// If class unloading occurred we first clear ICs where the cached metadata
// is referring to an unloaded klass or method.
- if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) {
- return false;
- }
+ clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
}
- if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
- return false;
- }
+ clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
break;
case relocInfo::opt_virtual_call_type:
- if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
- return false;
- }
- break;
-
case relocInfo::static_call_type:
- if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) {
- return false;
- }
+ clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), this, clean_all);
break;
case relocInfo::static_stub_type: {
@@ -672,8 +571,6 @@ bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool cl
break;
}
}
-
- return true;
}
address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
diff --git a/src/hotspot/share/code/compiledMethod.hpp b/src/hotspot/share/code/compiledMethod.hpp
index afe0905266259..42d68bda55472 100644
--- a/src/hotspot/share/code/compiledMethod.hpp
+++ b/src/hotspot/share/code/compiledMethod.hpp
@@ -35,7 +35,7 @@ class ExceptionHandlerTable;
class ImplicitExceptionTable;
class AbstractCompiler;
class xmlStream;
-class CompiledStaticCall;
+class CompiledDirectCall;
class NativeCallWrapper;
class ScopeDesc;
class CompiledIC;
@@ -364,7 +364,7 @@ class CompiledMethod : public CodeBlob {
// Inline cache support for class unloading and nmethod unloading
private:
- bool cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
+ void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
address continuation_for_implicit_exception(address pc, bool for_div0_check);
@@ -373,13 +373,10 @@ class CompiledMethod : public CodeBlob {
void cleanup_inline_caches_whitebox();
virtual void clear_inline_caches();
- void clear_ic_callsites();
// Execute nmethod barrier code, as if entering through nmethod call.
void run_nmethod_entry_barrier();
- // Verify and count cached icholder relocations.
- int verify_icholder_relocations();
void verify_oop_relocations();
bool has_evol_metadata();
@@ -389,14 +386,8 @@ class CompiledMethod : public CodeBlob {
// corresponds to the given method as well.
virtual bool is_dependent_on_method(Method* dependee) = 0;
- virtual NativeCallWrapper* call_wrapper_at(address call) const = 0;
- virtual NativeCallWrapper* call_wrapper_before(address return_pc) const = 0;
virtual address call_instruction_address(address pc) const = 0;
- virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const = 0;
- virtual CompiledStaticCall* compiledStaticCall_at(address addr) const = 0;
- virtual CompiledStaticCall* compiledStaticCall_before(address addr) const = 0;
-
Method* attached_method(address call_pc);
Method* attached_method_before_pc(address pc);
@@ -406,16 +397,13 @@ class CompiledMethod : public CodeBlob {
protected:
address oops_reloc_begin() const;
- private:
- bool static clean_ic_if_metadata_is_dead(CompiledIC *ic);
-
public:
// GC unloading support
// Cleans unloaded klasses and unloaded nmethods in inline caches
virtual bool is_unloading() = 0;
- bool unload_nmethod_caches(bool class_unloading_occurred);
+ void unload_nmethod_caches(bool class_unloading_occurred);
virtual void do_unloading(bool unloading_occurred) = 0;
private:
diff --git a/src/hotspot/share/code/icBuffer.cpp b/src/hotspot/share/code/icBuffer.cpp
deleted file mode 100644
index ec489eff9c882..0000000000000
--- a/src/hotspot/share/code/icBuffer.cpp
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "code/codeCache.hpp"
-#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
-#include "code/nmethod.hpp"
-#include "code/scopeDesc.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/linkResolver.hpp"
-#include "memory/resourceArea.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/javaThread.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/vmOperations.hpp"
-
-DEF_STUB_INTERFACE(ICStub);
-
-StubQueue* InlineCacheBuffer::_buffer = nullptr;
-
-CompiledICHolder* volatile InlineCacheBuffer::_pending_released = nullptr;
-volatile int InlineCacheBuffer::_pending_count = 0;
-
-#ifdef ASSERT
-ICRefillVerifier::ICRefillVerifier()
- : _refill_requested(false),
- _refill_remembered(false)
-{
- Thread* thread = Thread::current();
- assert(thread->missed_ic_stub_refill_verifier() == nullptr, "nesting not supported");
- thread->set_missed_ic_stub_refill_verifier(this);
-}
-
-ICRefillVerifier::~ICRefillVerifier() {
- assert(!_refill_requested || _refill_remembered,
- "Forgot to refill IC stubs after failed IC transition");
- Thread::current()->set_missed_ic_stub_refill_verifier(nullptr);
-}
-
-ICRefillVerifierMark::ICRefillVerifierMark(ICRefillVerifier* verifier) {
- Thread* thread = Thread::current();
- assert(thread->missed_ic_stub_refill_verifier() == nullptr, "nesting not supported");
- thread->set_missed_ic_stub_refill_verifier(verifier);
-}
-
-ICRefillVerifierMark::~ICRefillVerifierMark() {
- Thread::current()->set_missed_ic_stub_refill_verifier(nullptr);
-}
-
-static ICRefillVerifier* current_ic_refill_verifier() {
- Thread* current = Thread::current();
- ICRefillVerifier* verifier = current->missed_ic_stub_refill_verifier();
- assert(verifier != nullptr, "need a verifier for safety");
- return verifier;
-}
-#endif
-
-void ICStub::finalize() {
- if (!is_empty()) {
- ResourceMark rm;
- CompiledIC *ic = CompiledIC_at(CodeCache::find_compiled(ic_site()), ic_site());
- assert(CodeCache::find_compiled(ic->instruction_address()) != nullptr, "inline cache in non-compiled?");
-
- assert(this == ICStub::from_destination_address(ic->stub_address()), "wrong owner of ic buffer");
- ic->set_ic_destination_and_value(destination(), cached_value());
- }
-}
-
-
-address ICStub::destination() const {
- return InlineCacheBuffer::ic_buffer_entry_point(code_begin());
-}
-
-void* ICStub::cached_value() const {
- return InlineCacheBuffer::ic_buffer_cached_value(code_begin());
-}
-
-
-void ICStub::set_stub(CompiledIC *ic, void* cached_val, address dest_addr) {
- // We cannot store a pointer to the 'ic' object, since it is resource allocated. Instead we
- // store the location of the inline cache. Then we have enough information recreate the CompiledIC
- // object when we need to remove the stub.
- _ic_site = ic->instruction_address();
-
- // Assemble new stub
- InlineCacheBuffer::assemble_ic_buffer_code(code_begin(), cached_val, dest_addr);
- assert(destination() == dest_addr, "can recover destination");
- assert(cached_value() == cached_val, "can recover destination");
-}
-
-
-void ICStub::clear() {
- if (CompiledIC::is_icholder_entry(destination())) {
- InlineCacheBuffer::queue_for_release((CompiledICHolder*)cached_value());
- }
- _ic_site = nullptr;
-}
-
-
-#ifndef PRODUCT
-// anybody calling to this stub will trap
-
-void ICStub::verify() {
-}
-
-void ICStub::print() {
- tty->print_cr("ICStub: site: " INTPTR_FORMAT, p2i(_ic_site));
-}
-#endif
-
-//-----------------------------------------------------------------------------------------------
-// Implementation of InlineCacheBuffer
-
-
-void InlineCacheBuffer::initialize() {
- if (_buffer != nullptr) return; // already initialized
- _buffer = new StubQueue(new ICStubInterface, checked_cast(InlineCacheBufferSize), InlineCacheBuffer_lock, "InlineCacheBuffer");
- assert (_buffer != nullptr, "cannot allocate InlineCacheBuffer");
-}
-
-
-void InlineCacheBuffer::refill_ic_stubs() {
-#ifdef ASSERT
- ICRefillVerifier* verifier = current_ic_refill_verifier();
- verifier->request_remembered();
-#endif
- // we ran out of inline cache buffer space; must enter safepoint.
- // We do this by forcing a safepoint
- VM_ICBufferFull ibf;
- VMThread::execute(&ibf);
-}
-
-bool InlineCacheBuffer::needs_update_inline_caches() {
- // Stub removal
- if (buffer()->number_of_stubs() > 0) {
- return true;
- }
-
- // Release pending CompiledICHolder
- if (pending_icholder_count() > 0) {
- return true;
- }
-
- return false;
-}
-
-void InlineCacheBuffer::update_inline_caches() {
- if (buffer()->number_of_stubs() > 0) {
- if (TraceICBuffer) {
- tty->print_cr("[updating inline caches with %d stubs]", buffer()->number_of_stubs());
- }
- buffer()->remove_all();
- }
- release_pending_icholders();
-}
-
-
-bool InlineCacheBuffer::contains(address instruction_address) {
- return buffer()->contains(instruction_address);
-}
-
-
-bool InlineCacheBuffer::is_empty() {
- return buffer()->number_of_stubs() == 0;
-}
-
-
-void InlineCacheBuffer_init() {
- InlineCacheBuffer::initialize();
-}
-
-bool InlineCacheBuffer::create_transition_stub(CompiledIC *ic, void* cached_value, address entry) {
- assert(!SafepointSynchronize::is_at_safepoint(), "should not be called during a safepoint");
- assert(CompiledICLocker::is_safe(ic->instruction_address()), "mt unsafe call");
- if (TraceICBuffer) {
- tty->print_cr(" create transition stub for " INTPTR_FORMAT " destination " INTPTR_FORMAT " cached value " INTPTR_FORMAT,
- p2i(ic->instruction_address()), p2i(entry), p2i(cached_value));
- }
-
- // allocate and initialize new "out-of-line" inline-cache
- ICStub* ic_stub = (ICStub*) buffer()->request_committed(ic_stub_code_size());
- if (ic_stub == nullptr) {
-#ifdef ASSERT
- ICRefillVerifier* verifier = current_ic_refill_verifier();
- verifier->request_refill();
-#endif
- return false;
- }
-
-#ifdef ASSERT
- {
- ICStub* rev_stub = ICStub::from_destination_address(ic_stub->code_begin());
- assert(ic_stub == rev_stub,
- "ICStub mapping is reversible: stub=" PTR_FORMAT ", code=" PTR_FORMAT ", rev_stub=" PTR_FORMAT,
- p2i(ic_stub), p2i(ic_stub->code_begin()), p2i(rev_stub));
- }
-#endif
-
- // If an transition stub is already associate with the inline cache, then we remove the association.
- if (ic->is_in_transition_state()) {
- ICStub* old_stub = ICStub::from_destination_address(ic->stub_address());
- old_stub->clear();
- }
-
- ic_stub->set_stub(ic, cached_value, entry);
-
- // Update inline cache in nmethod to point to new "out-of-line" allocated inline cache
- ic->set_ic_destination(ic_stub);
- return true;
-}
-
-
-address InlineCacheBuffer::ic_destination_for(CompiledIC *ic) {
- ICStub* stub = ICStub::from_destination_address(ic->stub_address());
- return stub->destination();
-}
-
-
-void* InlineCacheBuffer::cached_value_for(CompiledIC *ic) {
- ICStub* stub = ICStub::from_destination_address(ic->stub_address());
- return stub->cached_value();
-}
-
-
-// Free CompiledICHolder*s that are no longer in use
-void InlineCacheBuffer::release_pending_icholders() {
- assert(SafepointSynchronize::is_at_safepoint(), "should only be called during a safepoint");
- CompiledICHolder* holder = Atomic::load(&_pending_released);
- _pending_released = nullptr;
- int count = 0;
- while (holder != nullptr) {
- CompiledICHolder* next = holder->next();
- delete holder;
- holder = next;
- count++;
- }
- assert(pending_icholder_count() == count, "wrong count");
- Atomic::store(&_pending_count, 0);
-}
-
-// Enqueue this icholder for release during the next safepoint. It's
-// not safe to free them until then since they might be visible to
-// another thread.
-void InlineCacheBuffer::queue_for_release(CompiledICHolder* icholder) {
- assert(icholder->next() == nullptr, "multiple enqueue?");
-
- CompiledICHolder* old = Atomic::load(&_pending_released);
- for (;;) {
- icholder->set_next(old);
- // The only reader runs at a safepoint serially so there is no need for a more strict atomic.
- CompiledICHolder* cur = Atomic::cmpxchg(&_pending_released, old, icholder, memory_order_relaxed);
- if (cur == old) {
- break;
- }
- old = cur;
- }
- Atomic::inc(&_pending_count, memory_order_relaxed);
-
- if (TraceICBuffer) {
- tty->print_cr("enqueueing icholder " INTPTR_FORMAT " to be freed", p2i(icholder));
- }
-}
-
-int InlineCacheBuffer::pending_icholder_count() {
- return Atomic::load(&_pending_count);
-}
diff --git a/src/hotspot/share/code/icBuffer.hpp b/src/hotspot/share/code/icBuffer.hpp
deleted file mode 100644
index f67080e6b5852..0000000000000
--- a/src/hotspot/share/code/icBuffer.hpp
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_CODE_ICBUFFER_HPP
-#define SHARE_CODE_ICBUFFER_HPP
-
-#include "asm/codeBuffer.hpp"
-#include "code/stubs.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/allocation.hpp"
-#include "runtime/safepointVerifiers.hpp"
-#include "utilities/align.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/macros.hpp"
-
-class CompiledIC;
-class CompiledICHolder;
-
-//
-// For CompiledIC's:
-//
-// In cases where we do not have MT-safe state transformation,
-// we go to a transition state, using ICStubs. At a safepoint,
-// the inline caches are transferred from the transitional code:
-//
-// instruction_address --> 01 set xxx_oop, Ginline_cache_klass
-// 23 jump_to Gtemp, yyyy
-// 4 nop
-
-class ICStub: public Stub {
- private:
- int _size; // total size of the stub incl. code
- address _ic_site; // points at call instruction of owning ic-buffer
- /* stub code follows here */
- protected:
- friend class ICStubInterface;
- // This will be called only by ICStubInterface
- void initialize(int size) { _size = size; _ic_site = nullptr; }
- void finalize(); // called when a method is removed
-
- // General info
- int size() const { return _size; }
-
- // To be cautious, we want to make sure that each ICStub is in a separate instruction
- // cache line. This would allow for piggybacking on instruction cache coherency on
- // some architectures to order the updates to ICStub and setting the destination to
- // the ICStub. Note that cache line size might be larger than CodeEntryAlignment
- // that is normal alignment for CodeBlobs.
- static int alignment() { return DEFAULT_CACHE_LINE_SIZE; }
-
- // Aligning the code section is normally done for performance reasons, which is not
- // required for ICStubs, as these stubs are transitional. Setting code alignment
- // to CodeEntryAlignment would waste a lot of memory in ICBuffer. Aligning to
- // word size should be enough. This also offsets the costs of aligning the entire
- // ICStub to cache line (see above), as smaller code alignment would allow ICStub
- // to fit a _single_ cache line.
- static int code_alignment() { return HeapWordSize; }
-
- public:
- // Creation
- void set_stub(CompiledIC *ic, void* cached_value, address dest_addr);
-
- // Code info
- address code_begin() const { return align_up((address)this + sizeof(ICStub), code_alignment()); }
- address code_end() const { return (address)this + size(); }
-
- // Call site info
- address ic_site() const { return _ic_site; }
- void clear();
- bool is_empty() const { return _ic_site == nullptr; }
-
- // stub info
- address destination() const; // destination of jump instruction
- void* cached_value() const; // cached_value for stub
-
- // Debugging
- void verify() PRODUCT_RETURN;
- void print() PRODUCT_RETURN;
-
- // Creation
- static inline ICStub* from_destination_address(address destination_address) {
- ICStub* stub = (ICStub*) align_down(destination_address - sizeof(ICStub), alignment());
-#ifdef ASSERT
- stub->verify();
-#endif
- return stub;
- }
-};
-
-#ifdef ASSERT
-// The ICRefillVerifier class is a stack allocated RAII object used to
-// detect if a failed IC transition that required IC stub refilling has
-// been accidentally missed. It is up to the caller to in that case
-// refill IC stubs.
-class ICRefillVerifier: StackObj {
- bool _refill_requested;
- bool _refill_remembered;
-
- public:
- ICRefillVerifier();
- ~ICRefillVerifier();
-
- void request_refill() { _refill_requested = true; }
- void request_remembered() { _refill_remembered = true; }
-};
-
-// The ICRefillVerifierMark is used to set the thread's current
-// ICRefillVerifier to a provided one. This is useful in particular
-// when transitioning IC stubs in parallel and refilling from the
-// master thread invoking the IC stub transitioning code.
-class ICRefillVerifierMark: StackObj {
- public:
- ICRefillVerifierMark(ICRefillVerifier* verifier);
- ~ICRefillVerifierMark();
-};
-#else
-class ICRefillVerifier: StackObj {
- public:
- ICRefillVerifier() {}
-};
-class ICRefillVerifierMark: StackObj {
- public:
- ICRefillVerifierMark(ICRefillVerifier* verifier) {}
-};
-#endif
-
-class InlineCacheBuffer: public AllStatic {
- private:
- // friends
- friend class ICStub;
-
- static int ic_stub_code_size();
-
- static StubQueue* _buffer;
-
- static CompiledICHolder* volatile _pending_released;
- static volatile int _pending_count;
-
- static StubQueue* buffer() { return _buffer; }
-
- // Machine-dependent implementation of ICBuffer
- static void assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point);
- static address ic_buffer_entry_point (address code_begin);
- static void* ic_buffer_cached_value (address code_begin);
-
- public:
-
- // Initialization; must be called before first usage
- static void initialize();
-
- // Access
- static bool contains(address instruction_address);
-
- // removes the ICStubs after backpatching
- static bool needs_update_inline_caches();
- static void update_inline_caches();
- static void refill_ic_stubs();
-
- // for debugging
- static bool is_empty();
-
- static void release_pending_icholders();
- static void queue_for_release(CompiledICHolder* icholder);
- static int pending_icholder_count();
-
- // New interface
- static bool create_transition_stub(CompiledIC *ic, void* cached_value, address entry);
- static address ic_destination_for(CompiledIC *ic);
- static void* cached_value_for(CompiledIC *ic);
-};
-
-#endif // SHARE_CODE_ICBUFFER_HPP
diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp
index cc2dcf21deebf..2755df3251396 100644
--- a/src/hotspot/share/code/nmethod.cpp
+++ b/src/hotspot/share/code/nmethod.cpp
@@ -640,6 +640,7 @@ nmethod::nmethod(
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps )
: CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
+ _compiled_ic_data(nullptr),
_is_unlinked(false),
_native_receiver_sp_offset(basic_lock_owner_sp_offset),
_native_basic_lock_sp_offset(basic_lock_sp_offset),
@@ -697,12 +698,12 @@ nmethod::nmethod(
clear_unloading_state();
+ finalize_relocations();
+
Universe::heap()->register_nmethod(this);
debug_only(Universe::heap()->verify_nmethod(this));
CodeCache::commit(this);
-
- finalize_relocations();
}
if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
@@ -784,6 +785,7 @@ nmethod::nmethod(
#endif
)
: CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
+ _compiled_ic_data(nullptr),
_is_unlinked(false),
_native_receiver_sp_offset(in_ByteSize(-1)),
_native_basic_lock_sp_offset(in_ByteSize(-1)),
@@ -887,13 +889,13 @@ nmethod::nmethod(
}
#endif
+ finalize_relocations();
+
Universe::heap()->register_nmethod(this);
debug_only(Universe::heap()->verify_nmethod(this));
CodeCache::commit(this);
- finalize_relocations();
-
// Copy contents of ExceptionHandlerTable to nmethod
handler_table->copy_to(this);
nul_chk_table->copy_to(this);
@@ -1145,16 +1147,33 @@ static void install_post_call_nop_displacement(nmethod* nm, address pc) {
void nmethod::finalize_relocations() {
NoSafepointVerifier nsv;
+ GrowableArray virtual_call_data;
+
// Make sure that post call nops fill in nmethod offsets eagerly so
// we don't have to race with deoptimization
RelocIterator iter(this);
while (iter.next()) {
- if (iter.type() == relocInfo::post_call_nop_type) {
+ if (iter.type() == relocInfo::virtual_call_type) {
+ virtual_call_Relocation* r = iter.virtual_call_reloc();
+ NativeMovConstReg* value = nativeMovConstReg_at(r->cached_value());
+ virtual_call_data.append(value);
+ } else if (iter.type() == relocInfo::post_call_nop_type) {
post_call_nop_Relocation* const reloc = iter.post_call_nop_reloc();
address pc = reloc->addr();
install_post_call_nop_displacement(this, pc);
}
}
+
+ if (virtual_call_data.length() > 0) {
+ // We allocate a block of CompiledICData per nmethod so the GC can purge this faster.
+ _compiled_ic_data = new CompiledICData[virtual_call_data.length()];
+ CompiledICData* next_data = _compiled_ic_data;
+
+ for (NativeMovConstReg* value : virtual_call_data) {
+ value->set_data((intptr_t)next_data);
+ next_data++;
+ }
+ }
}
void nmethod::make_deoptimized() {
@@ -1180,8 +1199,7 @@ void nmethod::make_deoptimized() {
while (iter.next()) {
switch (iter.type()) {
- case relocInfo::virtual_call_type:
- case relocInfo::opt_virtual_call_type: {
+ case relocInfo::virtual_call_type: {
CompiledIC *ic = CompiledIC_at(&iter);
address pc = ic->end_of_call();
NativePostCallNop* nop = nativePostCallNop_at(pc);
@@ -1191,8 +1209,9 @@ void nmethod::make_deoptimized() {
assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
break;
}
- case relocInfo::static_call_type: {
- CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
+ case relocInfo::static_call_type:
+ case relocInfo::opt_virtual_call_type: {
+ CompiledDirectCall *csc = CompiledDirectCall::at(iter.reloc());
address pc = csc->end_of_call();
NativePostCallNop* nop = nativePostCallNop_at(pc);
//tty->print_cr(" - static pc %p", pc);
@@ -1219,29 +1238,29 @@ void nmethod::verify_clean_inline_caches() {
RelocIterator iter(this, oops_reloc_begin());
while(iter.next()) {
switch(iter.type()) {
- case relocInfo::virtual_call_type:
- case relocInfo::opt_virtual_call_type: {
+ case relocInfo::virtual_call_type: {
CompiledIC *ic = CompiledIC_at(&iter);
- CodeBlob *cb = CodeCache::find_blob(ic->ic_destination());
+ CodeBlob *cb = CodeCache::find_blob(ic->destination());
assert(cb != nullptr, "destination not in CodeBlob?");
nmethod* nm = cb->as_nmethod_or_null();
- if( nm != nullptr ) {
+ if (nm != nullptr) {
// Verify that inline caches pointing to bad nmethods are clean
- if (!nm->is_in_use() || (nm->method()->code() != nm)) {
+ if (!nm->is_in_use() || nm->is_unloading()) {
assert(ic->is_clean(), "IC should be clean");
}
}
break;
}
- case relocInfo::static_call_type: {
- CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
- CodeBlob *cb = CodeCache::find_blob(csc->destination());
+ case relocInfo::static_call_type:
+ case relocInfo::opt_virtual_call_type: {
+ CompiledDirectCall *cdc = CompiledDirectCall::at(iter.reloc());
+ CodeBlob *cb = CodeCache::find_blob(cdc->destination());
assert(cb != nullptr, "destination not in CodeBlob?");
nmethod* nm = cb->as_nmethod_or_null();
- if( nm != nullptr ) {
+ if (nm != nullptr) {
// Verify that inline caches pointing to bad nmethods are clean
- if (!nm->is_in_use() || (nm->method()->code() != nm)) {
- assert(csc->is_clean(), "IC should be clean");
+ if (!nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
+ assert(cdc->is_clean(), "IC should be clean");
}
}
break;
@@ -1405,9 +1424,7 @@ bool nmethod::make_not_entrant() {
// For concurrent GCs, there must be a handshake between unlink and flush
void nmethod::unlink() {
if (_is_unlinked) {
- // Already unlinked. It can be invoked twice because concurrent code cache
- // unloading might need to restart when inline cache cleaning fails due to
- // running out of ICStubs, which can only be refilled at safepoints
+ // Already unlinked.
return;
}
@@ -1418,7 +1435,6 @@ void nmethod::unlink() {
// the Method, because it is only concurrently unlinked by
// the entry barrier, which acquires the per nmethod lock.
unlink_from_method();
- clear_ic_callsites();
if (is_osr_method()) {
invalidate_osr_method();
@@ -1463,10 +1479,11 @@ void nmethod::purge(bool free_code_cache_data, bool unregister_nmethod) {
ec = next;
}
+ delete[] _compiled_ic_data;
+
if (unregister_nmethod) {
Universe::heap()->unregister_nmethod(this);
}
-
CodeCache::unregister_old_nmethod(this);
CodeBlob::purge(free_code_cache_data, unregister_nmethod);
@@ -1604,16 +1621,7 @@ void nmethod::metadata_do(MetadataClosure* f) {
// Check compiledIC holders associated with this nmethod
ResourceMark rm;
CompiledIC *ic = CompiledIC_at(&iter);
- if (ic->is_icholder_call()) {
- CompiledICHolder* cichk = ic->cached_icholder();
- f->do_metadata(cichk->holder_metadata());
- f->do_metadata(cichk->holder_klass());
- } else {
- Metadata* ic_oop = ic->cached_metadata();
- if (ic_oop != nullptr) {
- f->do_metadata(ic_oop);
- }
- }
+ ic->metadata_do(f);
}
}
}
@@ -1750,8 +1758,7 @@ void nmethod::do_unloading(bool unloading_occurred) {
if (is_unloading()) {
unlink();
} else {
- guarantee(unload_nmethod_caches(unloading_occurred),
- "Should not need transition stubs");
+ unload_nmethod_caches(unloading_occurred);
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != nullptr) {
bs_nm->disarm(this);
@@ -2284,15 +2291,23 @@ void nmethod::verify() {
}
-void nmethod::verify_interrupt_point(address call_site) {
+void nmethod::verify_interrupt_point(address call_site, bool is_inline_cache) {
// Verify IC only when nmethod installation is finished.
if (!is_not_installed()) {
if (CompiledICLocker::is_safe(this)) {
- CompiledIC_at(this, call_site);
+ if (is_inline_cache) {
+ CompiledIC_at(this, call_site);
+ } else {
+ CompiledDirectCall::at(call_site);
+ }
} else {
CompiledICLocker ml_verify(this);
- CompiledIC_at(this, call_site);
+ if (is_inline_cache) {
+ CompiledIC_at(this, call_site);
+ } else {
+ CompiledDirectCall::at(call_site);
+ }
}
}
@@ -2316,15 +2331,15 @@ void nmethod::verify_scopes() {
address stub = nullptr;
switch (iter.type()) {
case relocInfo::virtual_call_type:
- verify_interrupt_point(iter.addr());
+ verify_interrupt_point(iter.addr(), true /* is_inline_cache */);
break;
case relocInfo::opt_virtual_call_type:
stub = iter.opt_virtual_call_reloc()->static_stub();
- verify_interrupt_point(iter.addr());
+ verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
break;
case relocInfo::static_call_type:
stub = iter.static_call_reloc()->static_stub();
- //verify_interrupt_point(iter.addr());
+ verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
break;
case relocInfo::runtime_call_type:
case relocInfo::runtime_call_w_cp_type: {
@@ -3239,75 +3254,6 @@ void nmethod::print_code_comment_on(outputStream* st, int column, address begin,
#endif
-class DirectNativeCallWrapper: public NativeCallWrapper {
-private:
- NativeCall* _call;
-
-public:
- DirectNativeCallWrapper(NativeCall* call) : _call(call) {}
-
- virtual address destination() const { return _call->destination(); }
- virtual address instruction_address() const { return _call->instruction_address(); }
- virtual address next_instruction_address() const { return _call->next_instruction_address(); }
- virtual address return_address() const { return _call->return_address(); }
-
- virtual address get_resolve_call_stub(bool is_optimized) const {
- if (is_optimized) {
- return SharedRuntime::get_resolve_opt_virtual_call_stub();
- }
- return SharedRuntime::get_resolve_virtual_call_stub();
- }
-
- virtual void set_destination_mt_safe(address dest) {
- _call->set_destination_mt_safe(dest);
- }
-
- virtual void set_to_interpreted(const methodHandle& method, CompiledICInfo& info) {
- CompiledDirectStaticCall* csc = CompiledDirectStaticCall::at(instruction_address());
- {
- csc->set_to_interpreted(method, info.entry());
- }
- }
-
- virtual void verify() const {
- // make sure code pattern is actually a call imm32 instruction
- _call->verify();
- _call->verify_alignment();
- }
-
- virtual void verify_resolve_call(address dest) const {
- CodeBlob* db = CodeCache::find_blob(dest);
- assert(db != nullptr && !db->is_adapter_blob(), "must use stub!");
- }
-
- virtual bool is_call_to_interpreted(address dest) const {
- CodeBlob* cb = CodeCache::find_blob(_call->instruction_address());
- return cb->contains(dest);
- }
-
- virtual bool is_safe_for_patching() const { return false; }
-
- virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const {
- return nativeMovConstReg_at(r->cached_value());
- }
-
- virtual void *get_data(NativeInstruction* instruction) const {
- return (void*)((NativeMovConstReg*) instruction)->data();
- }
-
- virtual void set_data(NativeInstruction* instruction, intptr_t data) {
- ((NativeMovConstReg*) instruction)->set_data(data);
- }
-};
-
-NativeCallWrapper* nmethod::call_wrapper_at(address call) const {
- return new DirectNativeCallWrapper((NativeCall*) call);
-}
-
-NativeCallWrapper* nmethod::call_wrapper_before(address return_pc) const {
- return new DirectNativeCallWrapper(nativeCall_before(return_pc));
-}
-
address nmethod::call_instruction_address(address pc) const {
if (NativeCall::is_call_before(pc)) {
NativeCall *ncall = nativeCall_before(pc);
@@ -3316,18 +3262,6 @@ address nmethod::call_instruction_address(address pc) const {
return nullptr;
}
-CompiledStaticCall* nmethod::compiledStaticCall_at(Relocation* call_site) const {
- return CompiledDirectStaticCall::at(call_site);
-}
-
-CompiledStaticCall* nmethod::compiledStaticCall_at(address call_site) const {
- return CompiledDirectStaticCall::at(call_site);
-}
-
-CompiledStaticCall* nmethod::compiledStaticCall_before(address return_addr) const {
- return CompiledDirectStaticCall::before(return_addr);
-}
-
#if defined(SUPPORT_DATA_STRUCTS)
void nmethod::print_value_on(outputStream* st) const {
st->print("nmethod");
@@ -3341,15 +3275,15 @@ void nmethod::print_calls(outputStream* st) {
RelocIterator iter(this);
while (iter.next()) {
switch (iter.type()) {
- case relocInfo::virtual_call_type:
- case relocInfo::opt_virtual_call_type: {
+ case relocInfo::virtual_call_type: {
CompiledICLocker ml_verify(this);
CompiledIC_at(&iter)->print();
break;
}
case relocInfo::static_call_type:
- st->print_cr("Static call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
- CompiledDirectStaticCall::at(iter.reloc())->print();
+ case relocInfo::opt_virtual_call_type:
+ st->print_cr("Direct call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
+ CompiledDirectCall::at(iter.reloc())->print();
break;
default:
break;
diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp
index 13cd2f799a76b..2993db21305ee 100644
--- a/src/hotspot/share/code/nmethod.hpp
+++ b/src/hotspot/share/code/nmethod.hpp
@@ -27,6 +27,7 @@
#include "code/compiledMethod.hpp"
+class CompiledICData;
class CompileTask;
class DepChange;
class DirectiveSet;
@@ -196,6 +197,7 @@ class nmethod : public CompiledMethod {
address _verified_entry_point; // entry point without class check
address _osr_entry_point; // entry point for on stack replacement
+ CompiledICData* _compiled_ic_data;
bool _is_unlinked;
// Shared fields for all nmethod's
@@ -604,7 +606,7 @@ class nmethod : public CompiledMethod {
// verify operations
void verify();
void verify_scopes();
- void verify_interrupt_point(address interrupt_point);
+ void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
// Disassemble this nmethod with additional debug information, e.g. information about blocks.
void decode2(outputStream* st) const;
@@ -699,14 +701,8 @@ class nmethod : public CompiledMethod {
virtual void metadata_do(MetadataClosure* f);
- NativeCallWrapper* call_wrapper_at(address call) const;
- NativeCallWrapper* call_wrapper_before(address return_pc) const;
address call_instruction_address(address pc) const;
- virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const;
- virtual CompiledStaticCall* compiledStaticCall_at(address addr) const;
- virtual CompiledStaticCall* compiledStaticCall_before(address addr) const;
-
virtual void make_deoptimized();
void finalize_relocations();
};
diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp
index bfb3db72d7265..ef90875767503 100644
--- a/src/hotspot/share/code/relocInfo.cpp
+++ b/src/hotspot/share/code/relocInfo.cpp
@@ -641,12 +641,10 @@ Method* virtual_call_Relocation::method_value() {
return (Method*)m;
}
-bool virtual_call_Relocation::clear_inline_cache() {
- // No stubs for ICs
- // Clean IC
+void virtual_call_Relocation::clear_inline_cache() {
ResourceMark rm;
CompiledIC* icache = CompiledIC_at(this);
- return icache->set_to_clean();
+ icache->set_to_clean();
}
@@ -669,18 +667,10 @@ Method* opt_virtual_call_Relocation::method_value() {
return (Method*)m;
}
-template
-static bool set_to_clean_no_ic_refill(CompiledICorStaticCall* ic) {
- guarantee(ic->set_to_clean(), "Should not need transition stubs");
- return true;
-}
-
-bool opt_virtual_call_Relocation::clear_inline_cache() {
- // No stubs for ICs
- // Clean IC
+void opt_virtual_call_Relocation::clear_inline_cache() {
ResourceMark rm;
- CompiledIC* icache = CompiledIC_at(this);
- return set_to_clean_no_ic_refill(icache);
+ CompiledDirectCall* callsite = CompiledDirectCall::at(this);
+ callsite->set_to_clean();
}
address opt_virtual_call_Relocation::static_stub() {
@@ -717,10 +707,10 @@ void static_call_Relocation::unpack_data() {
_method_index = unpack_1_int();
}
-bool static_call_Relocation::clear_inline_cache() {
- // Safe call site info
- CompiledStaticCall* handler = this->code()->compiledStaticCall_at(this);
- return set_to_clean_no_ic_refill(handler);
+void static_call_Relocation::clear_inline_cache() {
+ ResourceMark rm;
+ CompiledDirectCall* callsite = CompiledDirectCall::at(this);
+ callsite->set_to_clean();
}
@@ -759,11 +749,10 @@ address trampoline_stub_Relocation::get_trampoline_for(address call, nmethod* co
return nullptr;
}
-bool static_stub_Relocation::clear_inline_cache() {
+void static_stub_Relocation::clear_inline_cache() {
// Call stub is only used when calling the interpreted code.
// It does not really need to be cleared, except that we want to clean out the methodoop.
- CompiledDirectStaticCall::set_stub_to_clean(this);
- return true;
+ CompiledDirectCall::set_stub_to_clean(this);
}
diff --git a/src/hotspot/share/code/relocInfo.hpp b/src/hotspot/share/code/relocInfo.hpp
index 77e24708bb143..5f67f94bdadc5 100644
--- a/src/hotspot/share/code/relocInfo.hpp
+++ b/src/hotspot/share/code/relocInfo.hpp
@@ -862,7 +862,7 @@ class Relocation {
// all relocations are able to reassert their values
virtual void set_value(address x);
- virtual bool clear_inline_cache() { return true; }
+ virtual void clear_inline_cache() {}
// This method assumes that all virtual/static (inline) caches are cleared (since for static_call_type and
// ic_call_type is not always position dependent (depending on the state of the cache)). However, this is
@@ -1141,7 +1141,7 @@ class virtual_call_Relocation : public CallRelocation {
void pack_data_to(CodeSection* dest) override;
void unpack_data() override;
- bool clear_inline_cache() override;
+ void clear_inline_cache() override;
};
@@ -1170,7 +1170,7 @@ class opt_virtual_call_Relocation : public CallRelocation {
void pack_data_to(CodeSection* dest) override;
void unpack_data() override;
- bool clear_inline_cache() override;
+ void clear_inline_cache() override;
// find the matching static_stub
address static_stub();
@@ -1202,7 +1202,7 @@ class static_call_Relocation : public CallRelocation {
void pack_data_to(CodeSection* dest) override;
void unpack_data() override;
- bool clear_inline_cache() override;
+ void clear_inline_cache() override;
// find the matching static_stub
address static_stub();
@@ -1227,7 +1227,7 @@ class static_stub_Relocation : public Relocation {
static_stub_Relocation() : Relocation(relocInfo::static_stub_type) { }
public:
- bool clear_inline_cache() override;
+ void clear_inline_cache() override;
address static_call() { return _static_call; }
diff --git a/src/hotspot/share/code/vtableStubs.cpp b/src/hotspot/share/code/vtableStubs.cpp
index eed3dc8e7876a..5a54426d6a420 100644
--- a/src/hotspot/share/code/vtableStubs.cpp
+++ b/src/hotspot/share/code/vtableStubs.cpp
@@ -283,13 +283,6 @@ VtableStub* VtableStubs::entry_point(address pc) {
return (s == stub) ? s : nullptr;
}
-bool VtableStubs::is_icholder_entry(address pc) {
- assert(contains(pc), "must contain all vtable blobs");
- VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
- // itable stubs use CompiledICHolder.
- return stub->is_itable_stub();
-}
-
bool VtableStubs::contains(address pc) {
// simple solution for now - we may want to use
// a faster way if this function is called often
diff --git a/src/hotspot/share/code/vtableStubs.hpp b/src/hotspot/share/code/vtableStubs.hpp
index 7076e50f3e3d8..3993e1e72d5cf 100644
--- a/src/hotspot/share/code/vtableStubs.hpp
+++ b/src/hotspot/share/code/vtableStubs.hpp
@@ -107,7 +107,6 @@ class VtableStubs : AllStatic {
static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
static VtableStub* entry_point(address pc); // vtable stub entry point for a pc
- static bool is_icholder_entry(address pc); // is the blob containing pc (which must be a vtable blob) an icholder?
static bool contains(address pc); // is pc within any stub?
static VtableStub* stub_containing(address pc); // stub containing pc or nullptr
static void initialize();
diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp
index 1ade28278aedd..3a25112891484 100644
--- a/src/hotspot/share/compiler/compileBroker.cpp
+++ b/src/hotspot/share/compiler/compileBroker.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -582,7 +582,7 @@ CompilerCounters::CompilerCounters() {
// c2 uses explicit CompilerPhaseType idToPhase mapping in opto/phasetype.hpp,
// so if c2 is used, it should be always registered first.
// This function is called during vm initialization.
-void register_jfr_phasetype_serializer(CompilerType compiler_type) {
+static void register_jfr_phasetype_serializer(CompilerType compiler_type) {
ResourceMark rm;
static bool first_registration = true;
if (compiler_type == compiler_jvmci) {
diff --git a/src/hotspot/share/compiler/compilerOracle.cpp b/src/hotspot/share/compiler/compilerOracle.cpp
index 089fc5a4d995e..a8eee10fac539 100644
--- a/src/hotspot/share/compiler/compilerOracle.cpp
+++ b/src/hotspot/share/compiler/compilerOracle.cpp
@@ -48,7 +48,7 @@ static const char* optiontype_names[] = {
#undef enum_of_types
};
-const char* optiontype2name(enum OptionType type) {
+static const char* optiontype2name(enum OptionType type) {
return optiontype_names[static_cast(type)];
}
@@ -58,7 +58,7 @@ static enum OptionType option_types[] = {
#undef enum_of_options
};
-enum OptionType option2type(enum CompileCommand option) {
+static enum OptionType option2type(enum CompileCommand option) {
return option_types[static_cast(option)];
}
@@ -68,7 +68,7 @@ static const char* option_names[] = {
#undef enum_of_options
};
-const char* option2name(enum CompileCommand option) {
+static const char* option2name(enum CompileCommand option) {
return option_names[static_cast(option)];
}
@@ -108,7 +108,7 @@ static bool print_final_memstat_report = false;
// A filter for quick lookup if an option is set
static bool option_filter[static_cast(CompileCommand::Unknown) + 1] = { 0 };
-void command_set_in_filter(enum CompileCommand option) {
+static void command_set_in_filter(enum CompileCommand option) {
assert(option != CompileCommand::Unknown, "sanity");
assert(option2type(option) != OptionType::Unknown, "sanity");
@@ -120,7 +120,7 @@ void command_set_in_filter(enum CompileCommand option) {
option_filter[static_cast(option)] = true;
}
-bool has_command(enum CompileCommand option) {
+static bool has_command(enum CompileCommand option) {
return option_filter[static_cast(option)];
}
@@ -547,7 +547,7 @@ enum OptionType CompilerOracle::parse_option_type(const char* type_str) {
return OptionType::Unknown;
}
-void print_tip() { // CMH Update info
+static void print_tip() { // CMH Update info
tty->cr();
tty->print_cr("Usage: '-XX:CompileCommand=