diff --git a/src/hotspot/cpu/ppc/assembler_ppc.hpp b/src/hotspot/cpu/ppc/assembler_ppc.hpp index d445108098b86..60dd7579a6a0f 100644 --- a/src/hotspot/cpu/ppc/assembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp @@ -1816,6 +1816,7 @@ class Assembler : public AbstractAssembler { relocInfo::relocType rt = relocInfo::none); // helper function for b, bcxx + inline bool is_branch(address a); inline bool is_within_range_of_b(address a, address pc); inline bool is_within_range_of_bcxx(address a, address pc); diff --git a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp index 98c8b629844c9..9f9abdf62fe26 100644 --- a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp @@ -454,6 +454,12 @@ inline void Assembler::bclrl( int boint, int biint, int bhint, relocInfo::relocT inline void Assembler::bcctr( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(0), rt); } inline void Assembler::bcctrl(int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(1), rt); } +inline bool Assembler::is_branch(address a) { + int32_t instr = *(int32_t*) a; + int op = inv_op_ppc(instr); + return op == b_op || op == bc_op; +} + // helper function for b inline bool Assembler::is_within_range_of_b(address a, address pc) { // Guard against illegal branch targets, e.g. -1 (see CompiledDirectCall and ad-file). diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp index ea4d76e200fc2..cb293f893bdee 100644 --- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp @@ -133,7 +133,9 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox } bind(done); - inc_held_monitor_count(Rmark /*tmp*/); + if (LockingMode == LM_LEGACY) { + inc_held_monitor_count(Rmark /*tmp*/); + } } @@ -179,7 +181,9 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb // Done bind(done); - dec_held_monitor_count(Rmark /*tmp*/); + if (LockingMode == LM_LEGACY) { + dec_held_monitor_count(Rmark /*tmp*/); + } } diff --git a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp index 76a8decdeeee6..47cafc45f33ea 100644 --- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp @@ -64,7 +64,8 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address return_pc = call_c(entry_point); - reset_last_Java_frame(); + // Last java sp can be null when the RT call was preempted + reset_last_Java_frame(false /* check_last_java_sp */); // Check for pending exceptions. { @@ -258,7 +259,8 @@ void Runtime1::initialize_pd() { } uint Runtime1::runtime_blob_current_thread_offset(frame f) { - Unimplemented(); + // On PPC virtual threads don't save the JavaThread* in their context (e.g. C1 stub frames). + ShouldNotCallThis(); return 0; } diff --git a/src/hotspot/cpu/ppc/continuationFreezeThaw_ppc.inline.hpp b/src/hotspot/cpu/ppc/continuationFreezeThaw_ppc.inline.hpp index 612f8c4e70272..951f36f46a426 100644 --- a/src/hotspot/cpu/ppc/continuationFreezeThaw_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/continuationFreezeThaw_ppc.inline.hpp @@ -72,7 +72,9 @@ void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) { } inline void FreezeBase::prepare_freeze_interpreted_top_frame(const frame& f) { - Unimplemented(); + // nothing to do + DEBUG_ONLY( intptr_t* lspp = (intptr_t*) &(f.get_ijava_state()->top_frame_sp); ) + assert(*lspp == f.unextended_sp() - f.fp(), "should be " INTPTR_FORMAT " usp:" INTPTR_FORMAT " fp:" INTPTR_FORMAT, *lspp, p2i(f.unextended_sp()), p2i(f.fp())); } inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) { @@ -354,6 +356,7 @@ inline void Thaw::patch_caller_links(intptr_t* sp, intptr_t* bottom) { if (is_entry_frame) { callers_sp = _cont.entryFP(); } else { + assert(!Interpreter::contains(pc), "sp:" PTR_FORMAT " pc:" PTR_FORMAT, p2i(sp), p2i(pc)); CodeBlob* cb = CodeCache::find_blob_fast(pc); callers_sp = sp + cb->frame_size(); } @@ -484,8 +487,8 @@ inline frame ThawBase::new_entry_frame() { template frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom) { assert(FKind::is_instance(hf), ""); - assert(is_aligned(caller.fp(), frame::frame_alignment), ""); - assert(is_aligned(caller.sp(), frame::frame_alignment), ""); + assert(is_aligned(caller.fp(), frame::frame_alignment), PTR_FORMAT, p2i(caller.fp())); + // caller.sp() can be unaligned. This is fixed below. if (FKind::interpreted) { // Note: we have to overlap with the caller, at least if it is interpreted, to match the // max_thawing_size calculation during freeze. See also comment above. @@ -514,7 +517,7 @@ template frame ThawBase::new_stack_frame(const frame& hf, frame& return f; } else { int fsize = FKind::size(hf); - int argsize = hf.compiled_frame_stack_argsize(); + int argsize = FKind::stack_argsize(hf); intptr_t* frame_sp = caller.sp() - fsize; if ((bottom && argsize > 0) || caller.is_interpreted_frame()) { @@ -548,13 +551,21 @@ inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, c } inline intptr_t* ThawBase::possibly_adjust_frame(frame& top) { - Unimplemented(); - return nullptr; + // Nothing to do + return top.sp(); } inline intptr_t* ThawBase::push_cleanup_continuation() { - Unimplemented(); - return nullptr; + frame enterSpecial = new_entry_frame(); + frame::common_abi* enterSpecial_abi = (frame::common_abi*)enterSpecial.sp(); + + enterSpecial_abi->lr = (intptr_t)ContinuationEntry::cleanup_pc(); + + log_develop_trace(continuations, preempt)("push_cleanup_continuation enterSpecial sp: " INTPTR_FORMAT " cleanup pc: " INTPTR_FORMAT, + p2i(enterSpecial_abi), + p2i(ContinuationEntry::cleanup_pc())); + + return enterSpecial.sp(); } inline void ThawBase::patch_pd(frame& f, const frame& caller) { @@ -564,7 +575,7 @@ inline void ThawBase::patch_pd(frame& f, const frame& caller) { } inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) { - Unimplemented(); + assert(f.own_abi()->callers_sp == (uint64_t)caller_sp, "should have been fixed by patch_caller_links"); } // diff --git a/src/hotspot/cpu/ppc/continuationHelper_ppc.inline.hpp b/src/hotspot/cpu/ppc/continuationHelper_ppc.inline.hpp index 59e54119f6784..ae5a02451e220 100644 --- a/src/hotspot/cpu/ppc/continuationHelper_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/continuationHelper_ppc.inline.hpp @@ -27,14 +27,18 @@ #include "runtime/continuationHelper.hpp" -template -static inline intptr_t** link_address(const frame& f) { - Unimplemented(); - return nullptr; -} - static inline void patch_return_pc_with_preempt_stub(frame& f) { - Unimplemented(); + if (f.is_runtime_frame()) { + // Patch the pc of the now old last Java frame (we already set the anchor to enterSpecial) + // so that when target goes back to Java it will actually return to the preempt cleanup stub. + frame::common_abi* abi = (frame::common_abi*)f.sp(); + abi->lr = (uint64_t)StubRoutines::cont_preempt_stub(); + } else { + // The target will check for preemption once it returns to the interpreter + // or the native wrapper code and will manually jump to the preempt stub. + JavaThread *thread = JavaThread::current(); + thread->set_preempt_alternate_return(StubRoutines::cont_preempt_stub()); + } } inline int ContinuationHelper::frame_align_words(int size) { diff --git a/src/hotspot/cpu/ppc/frame_ppc.inline.hpp b/src/hotspot/cpu/ppc/frame_ppc.inline.hpp index 2beb7a84d341e..c390449dc33ff 100644 --- a/src/hotspot/cpu/ppc/frame_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/frame_ppc.inline.hpp @@ -53,7 +53,10 @@ inline void frame::setup(kind knd) { // The back link for compiled frames on the heap is not valid if (is_heap_frame()) { // fp for interpreted frames should have been derelativized and passed to the constructor - assert(is_compiled_frame(), ""); + assert(is_compiled_frame() + || is_native_frame() // native wrapper (nmethod) for j.l.Object::wait0 + || is_runtime_frame(), // e.g. Runtime1::monitorenter, SharedRuntime::complete_monitor_locking_C + "sp:" PTR_FORMAT " fp:" PTR_FORMAT " name:%s", p2i(_sp), p2i(_unextended_sp + _cb->frame_size()), _cb->name()); // The back link for compiled frames on the heap is invalid. _fp = _unextended_sp + _cb->frame_size(); } else { diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc.hpp b/src/hotspot/cpu/ppc/interp_masm_ppc.hpp index dd0a597ec8ddd..4c39f88ce5176 100644 --- a/src/hotspot/cpu/ppc/interp_masm_ppc.hpp +++ b/src/hotspot/cpu/ppc/interp_masm_ppc.hpp @@ -49,6 +49,14 @@ class InterpreterMacroAssembler: public MacroAssembler { virtual void check_and_handle_popframe(Register scratch_reg); virtual void check_and_handle_earlyret(Register scratch_reg); + void call_VM_preemptable(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); + void restore_after_resume(Register fp); + // R22 and R31 are preserved when a vthread gets preempted in the interpreter. + // The interpreter already assumes that these registers are nonvolatile across native calls. + bool nonvolatile_accross_vthread_preemtion(Register r) const { + return r->is_nonvolatile() && ((r == R22) || (r == R31)); + } + // Base routine for all dispatches. void dispatch_base(TosState state, address* table); @@ -182,7 +190,7 @@ class InterpreterMacroAssembler: public MacroAssembler { // Special call VM versions that check for exceptions and forward exception // via short cut (not via expensive forward exception stub). void check_and_forward_exception(Register Rscratch1, Register Rscratch2); - void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); + void call_VM(Register oop_result, address entry_point, bool check_exceptions = true, Label* last_java_pc = nullptr); void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp index aa77f0169ea1a..632eb97e85206 100644 --- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp @@ -932,7 +932,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state, // void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { if (LockingMode == LM_MONITOR) { - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor); + call_VM_preemptable(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor); } else { // template code (for LM_LEGACY): // @@ -953,8 +953,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { const Register current_header = R9_ARG7; const Register tmp = R10_ARG8; - Label count_locking, done; - Label cas_failed, slow_case; + Label count_locking, done, slow_case, cas_failed; assert_different_registers(header, object_mark_addr, current_header, tmp); @@ -969,7 +968,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { if (LockingMode == LM_LIGHTWEIGHT) { lightweight_lock(monitor, object, header, tmp, slow_case); - b(count_locking); + b(done); } else if (LockingMode == LM_LEGACY) { // Load markWord from object into header. ld(header, oopDesc::mark_offset_in_bytes(), object); @@ -1035,12 +1034,15 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { // None of the above fast optimizations worked so we have to get into the // slow case of monitor enter. bind(slow_case); - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor); - b(done); + call_VM_preemptable(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor); // } - align(32, 12); - bind(count_locking); - inc_held_monitor_count(current_header /*tmp*/); + + if (LockingMode == LM_LEGACY) { + b(done); + align(32, 12); + bind(count_locking); + inc_held_monitor_count(current_header /*tmp*/); + } bind(done); } } @@ -1137,7 +1139,9 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) { bind(free_slot); li(R0, 0); std(R0, in_bytes(BasicObjectLock::obj_offset()), monitor); - dec_held_monitor_count(current_header /*tmp*/); + if (LockingMode == LM_LEGACY) { + dec_held_monitor_count(current_header /*tmp*/); + } bind(done); } } @@ -2133,10 +2137,10 @@ void InterpreterMacroAssembler::check_and_forward_exception(Register Rscratch1, bind(Ldone); } -void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { +void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions, Label* last_java_pc) { save_interpreter_state(R11_scratch1); - MacroAssembler::call_VM(oop_result, entry_point, false); + MacroAssembler::call_VM(oop_result, entry_point, false /*check_exceptions*/, last_java_pc); restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true); @@ -2155,6 +2159,74 @@ void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point call_VM(oop_result, entry_point, check_exceptions); } +void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result, address entry_point, + Register arg_1, bool check_exceptions) { + if (!Continuations::enabled()) { + call_VM(oop_result, entry_point, arg_1, check_exceptions); + return; + } + + Label resume_pc, not_preempted; + + DEBUG_ONLY(ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread)); + DEBUG_ONLY(cmpdi(CCR0, R0, 0)); + asm_assert_eq("Should not have alternate return address set"); + + // Preserve 2 registers + assert(nonvolatile_accross_vthread_preemtion(R31) && nonvolatile_accross_vthread_preemtion(R22), ""); + ld(R3_ARG1, _abi0(callers_sp), R1_SP); // load FP + std(R31, _ijava_state_neg(lresult), R3_ARG1); + std(R22, _ijava_state_neg(fresult), R3_ARG1); + + // We set resume_pc as last java pc. It will be saved if the vthread gets preempted. + // Later execution will continue right there. + mr_if_needed(R4_ARG2, arg_1); + push_cont_fastpath(); + call_VM(oop_result, entry_point, false /*check_exceptions*/, &resume_pc /* last_java_pc */); + pop_cont_fastpath(); + + // Jump to handler if the call was preempted + ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread); + cmpdi(CCR0, R0, 0); + beq(CCR0, not_preempted); + mtlr(R0); + li(R0, 0); + std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread); + blr(); + + bind(resume_pc); // Location to resume execution + restore_after_resume(noreg /* fp */); + bind(not_preempted); +} + +void InterpreterMacroAssembler::restore_after_resume(Register fp) { + if (!Continuations::enabled()) return; + + const address resume_adapter = TemplateInterpreter::cont_resume_interpreter_adapter(); + add_const_optimized(R31, R29_TOC, MacroAssembler::offset_to_global_toc(resume_adapter)); + mtctr(R31); + bctrl(); + // Restore registers that are preserved across vthread preemption + assert(nonvolatile_accross_vthread_preemtion(R31) && nonvolatile_accross_vthread_preemtion(R22), ""); + ld(R3_ARG1, _abi0(callers_sp), R1_SP); // load FP + ld(R31, _ijava_state_neg(lresult), R3_ARG1); + ld(R22, _ijava_state_neg(fresult), R3_ARG1); +#ifdef ASSERT + // Assert FP is in R11_scratch1 (see generate_cont_resume_interpreter_adapter()) + { + Label ok; + ld(R12_scratch2, 0, R1_SP); // load fp + cmpd(CCR0, R12_scratch2, R11_scratch1); + beq(CCR0, ok); + stop(FILE_AND_LINE ": FP is expected in R11_scratch1"); + bind(ok); + } +#endif + if (fp != noreg && fp != R11_scratch1) { + mr(fp, R11_scratch1); + } +} + void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp index e11a31c00a567..8ce619e3bdcf9 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -31,6 +31,7 @@ #include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSetAssembler.hpp" #include "interpreter/interpreter.hpp" +#include "interpreter/interpreterRuntime.hpp" #include "memory/resourceArea.hpp" #include "nativeInst_ppc.hpp" #include "oops/compressedKlass.inline.hpp" @@ -115,7 +116,8 @@ void MacroAssembler::align_prefix() { // Issue instructions that calculate given TOC from global TOC. void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16, - bool add_relocation, bool emit_dummy_addr) { + bool add_relocation, bool emit_dummy_addr, + bool add_addr_to_reloc) { int offset = -1; if (emit_dummy_addr) { offset = -128; // dummy address @@ -129,7 +131,10 @@ void MacroAssembler::calculate_address_from_global_toc(Register dst, address add if (lo16) { if (add_relocation) { // Relocate at the addi to avoid confusion with a load from the method's TOC. - relocate(internal_word_Relocation::spec(addr)); + RelocationHolder rh = add_addr_to_reloc ? + internal_word_Relocation::spec(addr) : + internal_word_Relocation::spec_for_immediate(); + relocate(rh); } addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset)); } @@ -714,6 +719,7 @@ address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr, } } +#ifdef ASSERT void MacroAssembler::clobber_volatile_gprs(Register excluded_register) { const int magic_number = 0x42; @@ -729,6 +735,37 @@ void MacroAssembler::clobber_volatile_gprs(Register excluded_register) { } } +void MacroAssembler::clobber_nonvolatile_registers() { + BLOCK_COMMENT("clobber nonvolatile registers {"); + Register regs[] = { + R14, + R15, + // don't zap R16_thread + R17, + R18, + R19, + R20, + R21, + R22, + R23, + R24, + R25, + R26, + R27, + R28, + // don't zap R29_TOC + R30, + R31 + }; + Register bad = regs[0]; + load_const_optimized(bad, 0xbad0101babe11111); + for (uint32_t i = 1; i < (sizeof(regs) / sizeof(Register)); i++) { + mr(regs[i], bad); + } + BLOCK_COMMENT("} clobber nonvolatile registers"); +} +#endif // ASSERT + void MacroAssembler::clobber_carg_stack_slots(Register tmp) { const int magic_number = 0x43; @@ -1283,13 +1320,14 @@ int MacroAssembler::ic_check(int end_alignment) { void MacroAssembler::call_VM_base(Register oop_result, Register last_java_sp, address entry_point, - bool check_exceptions) { + bool check_exceptions, + Label* last_java_pc) { BLOCK_COMMENT("call_VM {"); // Determine last_java_sp register. if (!last_java_sp->is_valid()) { last_java_sp = R1_SP; } - set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1); + set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1, last_java_pc); // ARG1 must hold thread address. mr(R3_ARG1, R16_thread); @@ -1318,8 +1356,8 @@ void MacroAssembler::call_VM_leaf_base(address entry_point) { BLOCK_COMMENT("} call_VM_leaf"); } -void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { - call_VM_base(oop_result, noreg, entry_point, check_exceptions); +void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions, Label* last_java_pc) { + call_VM_base(oop_result, noreg, entry_point, check_exceptions, last_java_pc); } void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, @@ -2649,10 +2687,12 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register addi(recursions, recursions, 1); std(recursions, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), temp); - // flag == EQ indicates success, increment held monitor count + // flag == EQ indicates success, increment held monitor count if LM_LEGACY is enabled // flag == NE indicates failure bind(success); - inc_held_monitor_count(temp); + if (LockingMode == LM_LEGACY) { + inc_held_monitor_count(temp); + } #ifdef ASSERT // Check that unlocked label is reached with flag == EQ. Label flag_correct; @@ -2672,7 +2712,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe Register temp, Register displaced_header, Register current_header) { assert(LockingMode != LM_LIGHTWEIGHT, "uses fast_unlock_lightweight"); assert_different_registers(oop, box, temp, displaced_header, current_header); - Label success, failure, object_has_monitor, notRecursive; + Label success, failure, object_has_monitor, not_recursive; if (LockingMode == LM_LEGACY) { // Find the lock address and load the displaced header from the stack. @@ -2718,7 +2758,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe ld(displaced_header, in_bytes(ObjectMonitor::recursions_offset()), current_header); addic_(displaced_header, displaced_header, -1); - blt(CCR0, notRecursive); // Not recursive if negative after decrement. + blt(CCR0, not_recursive); // Not recursive if negative after decrement. // Recursive unlock std(displaced_header, in_bytes(ObjectMonitor::recursions_offset()), current_header); @@ -2727,7 +2767,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe } b(success); - bind(notRecursive); + bind(not_recursive); // Set owner to null. // Release to satisfy the JMM @@ -2757,10 +2797,12 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe std(current_header, in_bytes(JavaThread::unlocked_inflated_monitor_offset()), R16_thread); b(failure); // flag == NE - // flag == EQ indicates success, decrement held monitor count + // flag == EQ indicates success, decrement held monitor count if LM_LEGACY is enabled // flag == NE indicates failure bind(success); - dec_held_monitor_count(temp); + if (LockingMode == LM_LEGACY) { + dec_held_monitor_count(temp); + } #ifdef ASSERT // Check that unlocked label is reached with flag == EQ. Label flag_correct; @@ -2779,6 +2821,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister flag, Register obj, Register box, Register tmp1, Register tmp2, Register tmp3) { assert_different_registers(obj, box, tmp1, tmp2, tmp3); + assert(UseObjectMonitorTable || tmp3 == noreg, "tmp3 not needed"); assert(flag == CCR0, "bad condition register"); // Handle inflated monitor. @@ -2801,8 +2844,7 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla bne(CCR0, slow_path); } - const Register mark = tmp1; - const Register t = tmp3; // Usage of R0 allowed! + Register mark = tmp1; { // Lightweight locking @@ -2820,15 +2862,15 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla // when the lock stack is empty because of the _bad_oop_sentinel field. // Check if recursive. - subi(t, top, oopSize); - ldx(t, R16_thread, t); - cmpd(CCR0, obj, t); + subi(R0, top, oopSize); + ldx(R0, R16_thread, R0); + cmpd(CCR0, obj, R0); beq(CCR0, push); // Check for monitor (0b10) or locked (0b00). ld(mark, oopDesc::mark_offset_in_bytes(), obj); - andi_(t, mark, markWord::lock_mask_in_place); - cmpldi(CCR0, t, markWord::unlocked_value); + andi_(R0, mark, markWord::lock_mask_in_place); + cmpldi(CCR0, R0, markWord::unlocked_value); bgt(CCR0, inflated); bne(CCR0, slow_path); @@ -2851,13 +2893,15 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla // mark contains the tagged ObjectMonitor*. const uintptr_t monitor_tag = markWord::monitor_value; - const Register monitor = mark; + const Register monitor = UseObjectMonitorTable ? tmp1 : noreg; const Register owner_addr = tmp2; + const Register thread_id = UseObjectMonitorTable ? tmp3 : tmp1; Label monitor_locked; if (!UseObjectMonitorTable) { // Compute owner address. addi(owner_addr, mark, in_bytes(ObjectMonitor::owner_offset()) - monitor_tag); + mark = noreg; } else { Label monitor_found; Register cache_addr = tmp2; @@ -2867,8 +2911,8 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla const int num_unrolled = 2; for (int i = 0; i < num_unrolled; i++) { - ld(tmp3, 0, cache_addr); - cmpd(CCR0, tmp3, obj); + ld(R0, 0, cache_addr); + cmpd(CCR0, R0, obj); beq(CCR0, monitor_found); addi(cache_addr, cache_addr, in_bytes(OMCache::oop_to_oop_difference())); } @@ -2879,13 +2923,13 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla bind(loop); // Check for match. - ld(tmp3, 0, cache_addr); - cmpd(CCR0, tmp3, obj); + ld(R0, 0, cache_addr); + cmpd(CCR0, R0, obj); beq(CCR0, monitor_found); // Search until null encountered, guaranteed _null_sentinel at end. addi(cache_addr, cache_addr, in_bytes(OMCache::oop_to_oop_difference())); - cmpdi(CCR1, tmp3, 0); + cmpdi(CCR1, R0, 0); bne(CCR1, loop); // Cache Miss, CCR0.NE set from cmp above b(slow_path); @@ -2898,10 +2942,10 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla } // CAS owner (null => current thread id). - Register thread_id = tmp1; + assert_different_registers(thread_id, monitor, owner_addr, box, R0); ld(thread_id, in_bytes(JavaThread::lock_id_offset()), R16_thread); cmpxchgd(/*flag=*/CCR0, - /*current_value=*/t, + /*current_value=*/R0, /*compare_value=*/(intptr_t)0, /*exchange_value=*/thread_id, /*where=*/owner_addr, @@ -2910,7 +2954,7 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla beq(CCR0, monitor_locked); // Check if recursive. - cmpd(CCR0, t, thread_id); + cmpd(CCR0, R0, thread_id); bne(CCR0, slow_path); // Recursive. @@ -2933,7 +2977,6 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla } bind(locked); - inc_held_monitor_count(tmp1); #ifdef ASSERT // Check that locked label is reached with flag == EQ. @@ -3108,7 +3151,6 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f } bind(unlocked); - dec_held_monitor_count(t); #ifdef ASSERT // Check that unlocked label is reached with flag == EQ. @@ -3190,9 +3232,11 @@ void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Ja std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread); } -void MacroAssembler::reset_last_Java_frame(void) { - asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()), - R16_thread, "SP was not set, still zero"); +void MacroAssembler::reset_last_Java_frame(bool check_last_java_sp) { + if (check_last_java_sp) { + asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()), + R16_thread, "SP was not set, still zero"); + } BLOCK_COMMENT("reset_last_Java_frame {"); li(R0, 0); @@ -3205,14 +3249,14 @@ void MacroAssembler::reset_last_Java_frame(void) { BLOCK_COMMENT("} reset_last_Java_frame"); } -void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) { +void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, Label* jpc) { assert_different_registers(sp, tmp1); - // sp points to a TOP_IJAVA_FRAME, retrieve frame's PC via - // TOP_IJAVA_FRAME_ABI. - // FIXME: assert that we really have a TOP_IJAVA_FRAME here! - address entry = pc(); - load_const_optimized(tmp1, entry); + if (jpc == nullptr || jpc->is_bound()) { + load_const_optimized(tmp1, jpc == nullptr ? pc() : target(*jpc)); + } else { + load_const(tmp1, *jpc, R12_scratch2); + } set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1); } @@ -4482,9 +4526,9 @@ void MacroAssembler::asm_assert(bool check_equal, const char *msg) { #endif } +#ifdef ASSERT void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base, const char* msg) { -#ifdef ASSERT switch (size) { case 4: lwz(R0, mem_offset, mem_base); @@ -4498,8 +4542,8 @@ void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_of ShouldNotReachHere(); } asm_assert(check_equal, msg); -#endif // ASSERT } +#endif // ASSERT void MacroAssembler::verify_coop(Register coop, const char* msg) { if (!VerifyOops) { return; } @@ -4660,6 +4704,8 @@ void MacroAssembler::cache_wbsync(bool is_presync) { } void MacroAssembler::push_cont_fastpath() { + if (!Continuations::enabled()) return; + Label done; ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread); cmpld(CCR0, R1_SP, R0); @@ -4669,6 +4715,8 @@ void MacroAssembler::push_cont_fastpath() { } void MacroAssembler::pop_cont_fastpath() { + if (!Continuations::enabled()) return; + Label done; ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread); cmpld(CCR0, R1_SP, R0); @@ -4680,6 +4728,7 @@ void MacroAssembler::pop_cont_fastpath() { // Note: Must preserve CCR0 EQ (invariant). void MacroAssembler::inc_held_monitor_count(Register tmp) { + assert(LockingMode == LM_LEGACY, ""); ld(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); #ifdef ASSERT Label ok; @@ -4695,6 +4744,7 @@ void MacroAssembler::inc_held_monitor_count(Register tmp) { // Note: Must preserve CCR0 EQ (invariant). void MacroAssembler::dec_held_monitor_count(Register tmp) { + assert(LockingMode == LM_LEGACY, ""); ld(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); #ifdef ASSERT Label ok; diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp index 224e7bff99541..eee814afd70f9 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp @@ -115,7 +115,13 @@ class MacroAssembler: public Assembler { // Global TOC. void calculate_address_from_global_toc(Register dst, address addr, bool hi16 = true, bool lo16 = true, - bool add_relocation = true, bool emit_dummy_addr = false); + bool add_relocation = true, bool emit_dummy_addr = false, + bool add_addr_to_reloc = true); + void calculate_address_from_global_toc(Register dst, Label& addr, + bool hi16 = true, bool lo16 = true, + bool add_relocation = true, bool emit_dummy_addr = false) { + calculate_address_from_global_toc(dst, target(addr), hi16, lo16, add_relocation, emit_dummy_addr, false); + } inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) { calculate_address_from_global_toc(dst, addr, true, false); }; @@ -284,7 +290,10 @@ class MacroAssembler: public Assembler { // Clobbers all volatile, (non-floating-point) general-purpose registers for debugging purposes. // This is especially useful for making calls to the JRT in places in which this hasn't been done before; // e.g. with the introduction of LRBs (load reference barriers) for concurrent garbage collection. - void clobber_volatile_gprs(Register excluded_register = noreg); + void clobber_volatile_gprs(Register excluded_register = noreg) NOT_DEBUG_RETURN; + // Load bad values into registers that are nonvolatile according to the ABI except R16_thread and R29_TOC. + // This is done after vthread preemption and before vthread resume. + void clobber_nonvolatile_registers() NOT_DEBUG_RETURN; void clobber_carg_stack_slots(Register tmp); void save_nonvolatile_gprs( Register dst_base, int offset); @@ -398,7 +407,8 @@ class MacroAssembler: public Assembler { // the entry point address entry_point, // flag which indicates if exception should be checked - bool check_exception = true + bool check_exception = true, + Label* last_java_pc = nullptr ); // Support for VM calls. This is the base routine called by the @@ -411,7 +421,7 @@ class MacroAssembler: public Assembler { // Call into the VM. // Passes the thread pointer (in R3_ARG1) as a prepended argument. // Makes sure oop return values are visible to the GC. - void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); + void call_VM(Register oop_result, address entry_point, bool check_exceptions = true, Label* last_java_pc = nullptr); void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true); @@ -695,8 +705,8 @@ class MacroAssembler: public Assembler { // Support for last Java frame (but use call_VM instead where possible): // access R16_thread->last_Java_sp. void set_last_Java_frame(Register last_java_sp, Register last_Java_pc); - void reset_last_Java_frame(void); - void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1); + void reset_last_Java_frame(bool check_last_java_sp = true); + void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, Label* jpc = nullptr); // Read vm result from thread: oop_result = R16_thread->result; void get_vm_result (Register oop_result); @@ -909,7 +919,7 @@ class MacroAssembler: public Assembler { private: void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base, - const char* msg); + const char* msg) NOT_DEBUG_RETURN; public: diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp index e9c6fd38f4593..f98ad592388e7 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp @@ -191,8 +191,18 @@ inline void MacroAssembler::set_oop(AddressLiteral obj_addr, Register d) { } inline void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) { - jint& stub_inst = *(jint*) branch; - stub_inst = patched_branch(target - branch, stub_inst, 0); + if (is_branch(branch)) { + jint& stub_inst = *(jint*) branch; + stub_inst = patched_branch(target - branch, stub_inst, 0); + } else if (is_calculate_address_from_global_toc_at(branch + BytesPerInstWord, branch)) { + const address inst1_addr = branch; + const address inst2_addr = branch + BytesPerInstWord; + patch_calculate_address_from_global_toc_at(inst2_addr, inst1_addr, target); + } else if (is_load_const_at(branch)) { + patch_const(branch, (long)target); + } else { + assert(false, "instruction at " PTR_FORMAT " not recognized", p2i(branch)); + } } // Relocation of conditional far branches. diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp index f61328fc7360c..dbc43c45e09b0 100644 --- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp +++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp @@ -205,12 +205,14 @@ intptr_t NativeMovConstReg::data() const { // Therefore we use raw decoding. if (CompressedOops::is_null(no)) return 0; return cast_from_oop(CompressedOops::decode_raw(no)); - } else { - assert(MacroAssembler::is_load_const_from_method_toc_at(addr), "must be load_const_from_pool"); - + } else if (MacroAssembler::is_load_const_from_method_toc_at(addr)) { address ctable = cb->content_begin(); int offset = MacroAssembler::get_offset_of_load_const_from_method_toc_at(addr); return *(intptr_t *)(ctable + offset); + } else { + assert(MacroAssembler::is_calculate_address_from_global_toc_at(addr, addr - BytesPerInstWord), + "must be calculate_address_from_global_toc"); + return (intptr_t) MacroAssembler::get_address_of_calculate_address_from_global_toc_at(addr, addr - BytesPerInstWord); } } diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad index f74dde0f97e6e..93d9f990ca8a8 100644 --- a/src/hotspot/cpu/ppc/ppc.ad +++ b/src/hotspot/cpu/ppc/ppc.ad @@ -12093,15 +12093,31 @@ instruct cmpFastUnlock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp ins_pipe(pipe_class_compare); %} -instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR1 cr1) %{ - predicate(LockingMode == LM_LIGHTWEIGHT); +instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{ + predicate(LockingMode == LM_LIGHTWEIGHT && !UseObjectMonitorTable); match(Set crx (FastLock oop box)); - effect(TEMP tmp1, TEMP tmp2, KILL cr1); + effect(TEMP tmp1, TEMP tmp2); format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2" %} ins_encode %{ __ fast_lock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register, - $tmp1$$Register, $tmp2$$Register, /*tmp3*/ R0); + $tmp1$$Register, $tmp2$$Register, noreg /*tmp3*/); + // If locking was successful, crx should indicate 'EQ'. + // The compiler generates a branch to the runtime call to + // _complete_monitor_locking_Java for the case where crx is 'NE'. + %} + ins_pipe(pipe_class_compare); +%} + +instruct cmpFastLockMonitorTable(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3, flagsRegCR1 cr1) %{ + predicate(LockingMode == LM_LIGHTWEIGHT && UseObjectMonitorTable); + match(Set crx (FastLock oop box)); + effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr1); + + format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2, $tmp3" %} + ins_encode %{ + __ fast_lock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register, + $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); // If locking was successful, crx should indicate 'EQ'. // The compiler generates a branch to the runtime call to // _complete_monitor_locking_Java for the case where crx is 'NE'. diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp index 80bc7946587ed..84102ef41e819 100644 --- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp +++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp @@ -1602,6 +1602,7 @@ static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj, #ifdef ASSERT __ load_const_optimized(tmp2, ContinuationEntry::cookie_value()); __ stw(tmp2, in_bytes(ContinuationEntry::cookie_offset()), R1_SP); + __ std(tmp2, _abi0(cr), R1_SP); #endif //ASSERT __ li(zero, 0); @@ -1645,6 +1646,10 @@ static void continuation_enter_cleanup(MacroAssembler* masm) { __ ld_ptr(tmp1, JavaThread::cont_entry_offset(), R16_thread); __ cmpd(CCR0, R1_SP, tmp1); __ asm_assert_eq(FILE_AND_LINE ": incorrect R1_SP"); + __ load_const_optimized(tmp1, ContinuationEntry::cookie_value()); + __ ld(tmp2, _abi0(cr), R1_SP); + __ cmpd(CCR0, tmp1, tmp2); + __ asm_assert_eq(FILE_AND_LINE ": cookie not found"); #endif __ ld_ptr(tmp1, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP); @@ -1853,6 +1858,7 @@ static void gen_continuation_enter(MacroAssembler* masm, // --- Thawing path __ bind(L_thaw); + ContinuationEntry::_thaw_call_pc_offset = __ pc() - start; __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(StubRoutines::cont_thaw())); __ mtctr(R0); __ bctrl(); @@ -1971,6 +1977,10 @@ static void gen_continuation_yield(MacroAssembler* masm, __ bctr(); } +void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) { + ::continuation_enter_cleanup(masm); +} + // --------------------------------------------------------------------------- // Generate a native wrapper for a given method. The method takes arguments // in the Java compiled code convention, marshals them to the native @@ -2191,9 +2201,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, intptr_t start_pc = (intptr_t)__ pc(); intptr_t vep_start_pc; intptr_t frame_done_pc; - intptr_t oopmap_pc; Label handle_pending_exception; + Label last_java_pc; Register r_callers_sp = R21; Register r_temp_1 = R22; @@ -2202,7 +2212,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, Register r_temp_4 = R25; Register r_temp_5 = R26; Register r_temp_6 = R27; - Register r_return_pc = R28; + Register r_last_java_pc = R28; Register r_carg1_jnienv = noreg; Register r_carg2_classorobject = noreg; @@ -2364,15 +2374,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, // We MUST NOT touch any outgoing regs from this point on. // So if we must call out we must push a new frame. - // Get current pc for oopmap, and load it patchable relative to global toc. - oopmap_pc = (intptr_t) __ pc(); - __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true); - - // We use the same pc/oopMap repeatedly when we call out. - oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map); - - // r_return_pc now has the pc loaded that we will use when we finally call - // to native. + // The last java pc will also be used as resume pc if this is the wrapper for wait0. + // For this purpose the precise location matters but not for oopmap lookup. + __ calculate_address_from_global_toc(r_last_java_pc, last_java_pc, true, true, true, true); // Make sure that thread is non-volatile; it crosses a bunch of VM calls below. assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register"); @@ -2400,7 +2404,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, // Try fastpath for locking. if (LockingMode == LM_LIGHTWEIGHT) { // fast_lock kills r_temp_1, r_temp_2, r_temp_3. - __ compiler_fast_lock_lightweight_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); + Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg; + __ compiler_fast_lock_lightweight_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg); } else { // fast_lock kills r_temp_1, r_temp_2, r_temp_3. __ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); @@ -2417,9 +2422,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs); // Do the call. - __ set_last_Java_frame(R11_scratch1, r_return_pc); - assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register"); + __ set_last_Java_frame(R11_scratch1, r_last_java_pc); + assert(r_last_java_pc->is_nonvolatile(), "r_last_java_pc needs to be preserved accross complete_monitor_locking_C call"); + // The following call will not be preempted. + // push_cont_fastpath forces freeze slow path in case we try to preempt where we will pin the + // vthread to the carrier (see FreezeBase::recurse_freeze_native_frame()). + __ push_cont_fastpath(); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread); + __ pop_cont_fastpath(); __ reset_last_Java_frame(); RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs); @@ -2430,8 +2440,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, __ bind(locked); } - // Use that pc we placed in r_return_pc a while back as the current frame anchor. - __ set_last_Java_frame(R1_SP, r_return_pc); + __ set_last_Java_frame(R1_SP, r_last_java_pc); // Publish thread state // -------------------------------------------------------------------------- @@ -2491,8 +2500,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, break; } - Label after_transition; - // Publish thread state // -------------------------------------------------------------------------- @@ -2567,7 +2574,23 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, __ lwsync(); // Acquire safepoint and suspend state, release thread state. // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); __ stw(R0, thread_(thread_state)); - __ bind(after_transition); + + // Check preemption for Object.wait() + if (LockingMode != LM_LEGACY && method->is_object_wait0()) { + Label not_preempted; + __ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread); + __ cmpdi(CCR0, R0, 0); + __ beq(CCR0, not_preempted); + __ mtlr(R0); + __ li(R0, 0); + __ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread); + __ blr(); + __ bind(not_preempted); + } + __ bind(last_java_pc); + // We use the same pc/oopMap repeatedly when we call out above. + intptr_t oopmap_pc = (intptr_t) __ pc(); + oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map); } // Reguard any pages if necessary. @@ -2649,7 +2672,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, // Clear "last Java frame" SP and PC. // -------------------------------------------------------------------------- - __ reset_last_Java_frame(); + // Last java frame won't be set if we're resuming after preemption + bool maybe_preempted = LockingMode != LM_LEGACY && method->is_object_wait0(); + __ reset_last_Java_frame(!maybe_preempted /* check_last_java_sp */); // Unbox oop result, e.g. JNIHandles::resolve value. // -------------------------------------------------------------------------- @@ -2735,7 +2760,8 @@ uint SharedRuntime::out_preserve_stack_slots() { } VMReg SharedRuntime::thread_register() { - Unimplemented(); + // On PPC virtual threads don't save the JavaThread* in their context (e.g. C1 stub frames). + ShouldNotCallThis(); return nullptr; } diff --git a/src/hotspot/cpu/ppc/stackChunkFrameStream_ppc.inline.hpp b/src/hotspot/cpu/ppc/stackChunkFrameStream_ppc.inline.hpp index cb4af1a3ff77b..07f1c9c1c6f16 100644 --- a/src/hotspot/cpu/ppc/stackChunkFrameStream_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/stackChunkFrameStream_ppc.inline.hpp @@ -184,8 +184,9 @@ inline int StackChunkFrameStream::interpreter_frame_num_oops() const f.interpreted_frame_oop_map(&mask); return mask.num_oops() + 1 // for the mirror oop - + ((intptr_t*)f.interpreter_frame_monitor_begin() - - (intptr_t*)f.interpreter_frame_monitor_end())/BasicObjectLock::size(); + + (f.interpreter_frame_method()->is_native() ? 1 : 0) // temp oop slot + + pointer_delta_as_int((intptr_t*)f.interpreter_frame_monitor_begin(), + (intptr_t*)f.interpreter_frame_monitor_end())/BasicObjectLock::size(); } template<> diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp index 206c161287fa2..fb235a9d7f792 100644 --- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp @@ -4484,6 +4484,10 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { address start = __ pc(); + if (kind == Continuation::thaw_top) { + __ clobber_nonvolatile_registers(); // Except R16_thread and R29_TOC + } + if (return_barrier) { __ mr(nvtmp, R3_RET); __ fmr(nvftmp, F1_RET); // preserve possible return value from a method returning to the return barrier DEBUG_ONLY(__ ld_ptr(tmp1, _abi0(callers_sp), R1_SP);) @@ -4572,6 +4576,41 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception); } + address generate_cont_preempt_stub() { + if (!Continuations::enabled()) return nullptr; + StubCodeMark mark(this, "StubRoutines","Continuation preempt stub"); + address start = __ pc(); + + __ clobber_nonvolatile_registers(); // Except R16_thread and R29_TOC + + __ reset_last_Java_frame(false /*check_last_java_sp*/); + + // Set sp to enterSpecial frame, i.e. remove all frames copied into the heap. + __ ld_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread); + + Label preemption_cancelled; + __ lbz(R11_scratch1, in_bytes(JavaThread::preemption_cancelled_offset()), R16_thread); + __ cmpwi(CCR0, R11_scratch1, 0); + __ bne(CCR0, preemption_cancelled); + + // Remove enterSpecial frame from the stack and return to Continuation.run() to unmount. + SharedRuntime::continuation_enter_cleanup(_masm); + __ pop_frame(); + __ restore_LR(R11_scratch1); + __ blr(); + + // We acquired the monitor after freezing the frames so call thaw to continue execution. + __ bind(preemption_cancelled); + __ li(R11_scratch1, 0); // false + __ stb(R11_scratch1, in_bytes(JavaThread::preemption_cancelled_offset()), R16_thread); + int simm16_offs = __ load_const_optimized(R11_scratch1, ContinuationEntry::thaw_call_pc_address(), R0, true); + __ ld(R11_scratch1, simm16_offs, R11_scratch1); + __ mtctr(R11_scratch1); + __ bctr(); + + return start; + } + // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler"); @@ -4647,6 +4686,7 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { StubRoutines::_cont_thaw = generate_cont_thaw(); StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier(); StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception(); + StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub(); } void generate_final_stubs() { diff --git a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp index b8668d6da3e88..e320349583ddc 100644 --- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp @@ -697,9 +697,15 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, } address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() { - return nullptr; -} + if (!Continuations::enabled()) return nullptr; + address start = __ pc(); + + __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2); + __ restore_interpreter_state(R11_scratch1, false, true /*restore_top_frame_sp*/); + __ blr(); + return start; +} // Helpers for commoning out cases in the various type of method entries. @@ -1202,7 +1208,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { const Register signature_handler_fd = R11_scratch1; const Register pending_exception = R0; const Register result_handler_addr = R31; - const Register native_method_fd = R11_scratch1; + const Register native_method_fd = R12_scratch2; // preferred in MacroAssembler::branch_to const Register access_flags = R22_tmp2; const Register active_handles = R11_scratch1; // R26_monitor saved to state. const Register sync_state = R12_scratch2; @@ -1216,10 +1222,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { Label exception_return_sync_check; Label stack_overflow_return; - // Generate new interpreter state and jump to stack_overflow_return in case of - // a stack overflow. - //generate_compute_interpreter_state(stack_overflow_return); - Register size_of_parameters = R22_tmp2; generate_fixed_frame(true, size_of_parameters, noreg /* unused */); @@ -1258,8 +1260,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // access_flags = method->access_flags(); // Load access flags. - assert(access_flags->is_nonvolatile(), - "access_flags must be in a non-volatile register"); + assert(__ nonvolatile_accross_vthread_preemtion(access_flags), + "access_flags not preserved"); // Type check. assert(4 == sizeof(AccessFlags), "unexpected field size"); __ lwz(access_flags, method_(access_flags)); @@ -1320,8 +1322,12 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // convenient and the slow signature handler can use this same frame // anchor. + bool support_vthread_preemption = Continuations::enabled() && LockingMode != LM_LEGACY; + // We have a TOP_IJAVA_FRAME here, which belongs to us. - __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); + Label last_java_pc; + Label *resume_pc = support_vthread_preemption ? &last_java_pc : nullptr; + __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R3_ARG1/*tmp*/, resume_pc); // Now the interpreter frame (and its call chain) have been // invalidated and flushed. We are now protected against eager @@ -1340,16 +1346,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { __ call_stub(signature_handler_fd); - // Remove the register parameter varargs slots we allocated in - // compute_interpreter_state. SP+16 ends up pointing to the ABI - // outgoing argument area. - // - // Not needed on PPC64. - //__ add(SP, SP, Argument::n_int_register_parameters_c*BytesPerWord); - - assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register"); + assert(__ nonvolatile_accross_vthread_preemtion(result_handler_addr), + "result_handler_addr not preserved"); // Save across call to native method. __ mr(result_handler_addr, R3_RET); + __ ld(R11_scratch1, _abi0(callers_sp), R1_SP); // load FP __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror. @@ -1363,12 +1364,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT); __ bfalse(CCR0, method_is_not_static); - __ ld(R11_scratch1, _abi0(callers_sp), R1_SP); - // Load mirror from interpreter frame. - __ ld(R12_scratch2, _ijava_state_neg(mirror), R11_scratch1); + // Load mirror from interpreter frame (FP in R11_scratch1) + __ ld(R21_tmp1, _ijava_state_neg(mirror), R11_scratch1); // R4_ARG2 = &state->_oop_temp; __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp)); - __ std(R12_scratch2/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); + __ std(R21_tmp1/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); BIND(method_is_not_static); } @@ -1402,7 +1402,18 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // Call the native method. Argument registers must not have been // overwritten since "__ call_stub(signature_handler);" (except for // ARG1 and ARG2 for static methods). + + if (support_vthread_preemption) { + // result_handler_addr is a nonvolatile register. Its value will be preserved across + // the native call but only if the call isn't preempted. To preserve its value even + // in the case of preemption we save it in the lresult slot. It is restored at + // resume_pc if, and only if the call was preempted. This works because only + // j.l.Object::wait calls are preempted which don't return a result. + __ std(result_handler_addr, _ijava_state_neg(lresult), R11_scratch1); + } + __ push_cont_fastpath(); __ call_c(native_method_fd); + __ pop_cont_fastpath(); __ li(R0, 0); __ ld(R11_scratch1, 0, R1_SP); @@ -1500,6 +1511,35 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { __ lwsync(); // Acquire safepoint and suspend state, release thread state. __ stw(R0/*thread_state*/, thread_(thread_state)); + if (support_vthread_preemption) { + // Check preemption for Object.wait() + Label not_preempted; + __ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread); + __ cmpdi(CCR0, R0, 0); + __ beq(CCR0, not_preempted); + __ mtlr(R0); + __ li(R0, 0); + __ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread); + __ blr(); + + // Execution will be resumed here when the vthread becomes runnable again. + __ bind(*resume_pc); + __ restore_after_resume(R11_scratch1 /* fp */); + // We saved the result handler before the call + __ ld(result_handler_addr, _ijava_state_neg(lresult), R11_scratch1); +#ifdef ASSERT + // Clobber result slots. Only native methods returning void can be preemted currently. + __ load_const(R3_RET, UCONST64(0xbad01001)); + __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1); + __ std(R3_RET, _ijava_state_neg(fresult), R11_scratch1); + // reset_last_Java_frame() below asserts that a last java sp is set + __ asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_sp_offset()), + R16_thread, FILE_AND_LINE ": Last java sp should not be set when resuming"); + __ std(R3_RET, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread); +#endif + __ bind(not_preempted); + } + if (CheckJNICalls) { // clear_pending_jni_exception_check __ load_const_optimized(R0, 0L); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp index 90ac9b946c0a0..1973fcb47d8f4 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp @@ -599,7 +599,6 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_returnBarrier_exception(); address generate_cont_preempt_stub(); - address generate_cont_resume_monitor_operation(); // Continuation point for throwing of implicit exceptions that are // not handled in the current activation. Fabricates an exception diff --git a/src/hotspot/share/runtime/continuationFreezeThaw.cpp b/src/hotspot/share/runtime/continuationFreezeThaw.cpp index 0b9263e373bc5..320ccaf81db12 100644 --- a/src/hotspot/share/runtime/continuationFreezeThaw.cpp +++ b/src/hotspot/share/runtime/continuationFreezeThaw.cpp @@ -1154,8 +1154,8 @@ NOINLINE freeze_result FreezeBase::recurse_freeze_interpreted_frame(frame& f, fr // including metadata between f and its args const int argsize = ContinuationHelper::InterpretedFrame::stack_argsize(f) + frame::metadata_words_at_top; - log_develop_trace(continuations)("recurse_freeze_interpreted_frame %s _size: %d fsize: %d argsize: %d", - frame_method->name_and_sig_as_C_string(), _freeze_size, fsize, argsize); + log_develop_trace(continuations)("recurse_freeze_interpreted_frame %s _size: %d fsize: %d argsize: %d callee_interpreted: %d", + frame_method->name_and_sig_as_C_string(), _freeze_size, fsize, argsize, callee_interpreted); // we'd rather not yield inside methods annotated with @JvmtiMountTransition assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), ""); @@ -1292,18 +1292,20 @@ NOINLINE freeze_result FreezeBase::recurse_freeze_native_frame(frame& f, frame& } intptr_t* const stack_frame_top = ContinuationHelper::NativeFrame::frame_top(f); - const int fsize = f.cb()->frame_size(); + // There are no stackargs but argsize must include the metadata + const int argsize = frame::metadata_words_at_top; + const int fsize = f.cb()->frame_size() + argsize; log_develop_trace(continuations)("recurse_freeze_native_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT, f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize)); - freeze_result result = recurse_freeze_java_frame(f, caller, fsize, 0); + freeze_result result = recurse_freeze_java_frame(f, caller, fsize, argsize); if (UNLIKELY(result > freeze_ok_bottom)) { return result; } assert(result == freeze_ok, "should have caller frame"); - DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0 /* argsize */, false /* is_bottom_frame */);) + DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, false /* is_bottom_frame */);) frame hf = new_heap_frame(f, caller); intptr_t* heap_frame_top = ContinuationHelper::NativeFrame::frame_top(hf); diff --git a/src/hotspot/share/runtime/frame.cpp b/src/hotspot/share/runtime/frame.cpp index 1875fb74a70f3..df61feccf6f6a 100644 --- a/src/hotspot/share/runtime/frame.cpp +++ b/src/hotspot/share/runtime/frame.cpp @@ -500,7 +500,7 @@ jint frame::interpreter_frame_expression_stack_size() const { return (jint)stack_size; } -#ifdef ASSERT +#if defined(ASSERT) && !defined(PPC64) static address get_register_address_in_stub(const frame& stub_fr, VMReg reg) { RegisterMap map(nullptr, RegisterMap::UpdateMap::include, @@ -512,6 +512,10 @@ static address get_register_address_in_stub(const frame& stub_fr, VMReg reg) { #endif JavaThread** frame::saved_thread_address(const frame& f) { +#if defined(PPC64) + // The current thread (JavaThread*) is never stored on the stack + return nullptr; +#else CodeBlob* cb = f.cb(); assert(cb != nullptr && cb->is_runtime_stub(), "invalid frame"); @@ -528,6 +532,7 @@ JavaThread** frame::saved_thread_address(const frame& f) { } assert(get_register_address_in_stub(f, SharedRuntime::thread_register()) == (address)thread_addr, "wrong thread address"); return thread_addr; +#endif } // (frame::interpreter_frame_sender_sp accessor is in frame_.cpp) diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp index 8f759ba8ed1f1..c5cdbc537d564 100644 --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -1363,7 +1363,7 @@ std::add_rvalue_reference_t declval() noexcept; // handled. bool IEEE_subnormal_handling_OK(); -#if defined(AMD64) || defined (AARCH64) || defined (RISCV64) +#if defined(AMD64) || defined (AARCH64) || defined(PPC64) || defined (RISCV64) #define LOOM_MONITOR_SUPPORT true #define LOOM_MONITOR_SUPPORT_ONLY(code) code #define NOT_LOOM_MONITOR_SUPPORT(code) diff --git a/test/hotspot/jtreg/serviceability/jvmti/vthread/StopThreadTest/StopThreadTest.java b/test/hotspot/jtreg/serviceability/jvmti/vthread/StopThreadTest/StopThreadTest.java index b47a5ddcd42db..45b65a45cf1fc 100644 --- a/test/hotspot/jtreg/serviceability/jvmti/vthread/StopThreadTest/StopThreadTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/vthread/StopThreadTest/StopThreadTest.java @@ -278,6 +278,7 @@ static void C() { static boolean preemptableVirtualThread() { boolean legacyLockingMode = ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class) .getVMOption("LockingMode").getValue().equals("1"); - return is_virtual && !isBoundVThread && !legacyLockingMode && (Platform.isX64() || Platform.isAArch64() || Platform.isRISCV64()); + return is_virtual && !isBoundVThread && !legacyLockingMode && (Platform.isX64() || Platform.isAArch64() || + Platform.isRISCV64() || Platform.isPPC()); } } diff --git a/test/jdk/java/lang/Thread/virtual/MonitorEnterExit.java b/test/jdk/java/lang/Thread/virtual/MonitorEnterExit.java index 35222fb41833d..f095fb2cf9843 100644 --- a/test/jdk/java/lang/Thread/virtual/MonitorEnterExit.java +++ b/test/jdk/java/lang/Thread/virtual/MonitorEnterExit.java @@ -24,7 +24,6 @@ /* * @test id=default * @summary Test virtual thread with monitor enter/exit - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -33,7 +32,6 @@ /* * @test id=LM_LEGACY - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -42,7 +40,6 @@ /* * @test id=LM_LIGHTWEIGHT - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -51,7 +48,6 @@ /* * @test id=Xint-LM_LEGACY - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -60,7 +56,6 @@ /* * @test id=Xint-LM_LIGHTWEIGHT - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -69,7 +64,6 @@ /* * @test id=Xcomp-LM_LEGACY - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -78,7 +72,6 @@ /* * @test id=Xcomp-LM_LIGHTWEIGHT - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -87,7 +80,6 @@ /* * @test id=Xcomp-TieredStopAtLevel1-LM_LEGACY - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -104,7 +96,6 @@ /* * @test id=Xcomp-noTieredCompilation-LM_LEGACY - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -113,7 +104,6 @@ /* * @test id=Xcomp-noTieredCompilation-LM_LIGHTWEIGHT - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode diff --git a/test/jdk/java/lang/Thread/virtual/MonitorWaitNotify.java b/test/jdk/java/lang/Thread/virtual/MonitorWaitNotify.java index ff6d2650d7293..98bdc7fdedd00 100644 --- a/test/jdk/java/lang/Thread/virtual/MonitorWaitNotify.java +++ b/test/jdk/java/lang/Thread/virtual/MonitorWaitNotify.java @@ -24,7 +24,6 @@ /* * @test id=default * @summary Test virtual threads using Object.wait/notifyAll - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -33,7 +32,6 @@ /* * @test id=LM_LEGACY - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -42,7 +40,6 @@ /* * @test id=LM_LIGHTWEIGHT - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -51,7 +48,6 @@ /* * @test id=Xint-LM_LEGACY - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -60,7 +56,6 @@ /* * @test id=Xint-LM_LIGHTWEIGHT - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -69,7 +64,6 @@ /* * @test id=Xcomp-LM_LEGACY - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -78,7 +72,6 @@ /* * @test id=Xcomp-LM_LIGHTWEIGHT - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -87,7 +80,6 @@ /* * @test id=Xcomp-TieredStopAtLevel1-LM_LEGACY - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -104,7 +96,6 @@ /* * @test id=Xcomp-noTieredCompilation-LM_LEGACY - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -113,7 +104,6 @@ /* * @test id=Xcomp-noTieredCompilation-LM_LIGHTWEIGHT - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode diff --git a/test/jdk/java/lang/Thread/virtual/Parking.java b/test/jdk/java/lang/Thread/virtual/Parking.java index 25bae44700dbf..41f7f283bdd06 100644 --- a/test/jdk/java/lang/Thread/virtual/Parking.java +++ b/test/jdk/java/lang/Thread/virtual/Parking.java @@ -24,7 +24,6 @@ /* * @test id=default * @summary Test virtual threads using park/unpark - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -33,7 +32,6 @@ /* * @test id=Xint - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -42,7 +40,6 @@ /* * @test id=Xcomp - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode @@ -51,7 +48,6 @@ /* * @test id=Xcomp-noTieredCompilation - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @modules java.base/java.lang:+open jdk.management * @library /test/lib * @build LockingMode diff --git a/test/jdk/java/lang/Thread/virtual/stress/LotsOfContendedMonitorEnter.java b/test/jdk/java/lang/Thread/virtual/stress/LotsOfContendedMonitorEnter.java index 58fc888897ba6..86096156b9127 100644 --- a/test/jdk/java/lang/Thread/virtual/stress/LotsOfContendedMonitorEnter.java +++ b/test/jdk/java/lang/Thread/virtual/stress/LotsOfContendedMonitorEnter.java @@ -24,7 +24,6 @@ /* * @test id=default * @summary Test virtual threads entering a lot of monitors with contention - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @requires vm.opt.LockingMode != 1 * @library /test/lib * @run main/othervm LotsOfContendedMonitorEnter @@ -32,7 +31,6 @@ /* * @test id=LM_LIGHTWEIGHT - * @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" * @requires vm.opt.LockingMode != 1 * @library /test/lib * @run main/othervm -XX:LockingMode=2 LotsOfContendedMonitorEnter diff --git a/test/jdk/jdk/internal/vm/Continuation/Basic.java b/test/jdk/jdk/internal/vm/Continuation/Basic.java index 5d5c2596e42e6..2ccd0e2860aad 100644 --- a/test/jdk/jdk/internal/vm/Continuation/Basic.java +++ b/test/jdk/jdk/internal/vm/Continuation/Basic.java @@ -280,7 +280,7 @@ static String barMany(long b, @Test public void testPinnedMonitor() { - if (Platform.isX64() || Platform.isAArch64() || Platform.isRISCV64()) return; + if (Platform.isX64() || Platform.isAArch64() || Platform.isPPC() || Platform.isRISCV64()) return; // Test pinning due to held monitor final AtomicReference res = new AtomicReference<>(); diff --git a/test/jdk/jdk/internal/vm/Continuation/Fuzz.java b/test/jdk/jdk/internal/vm/Continuation/Fuzz.java index f33c4c7a0064f..c332e391327cc 100644 --- a/test/jdk/jdk/internal/vm/Continuation/Fuzz.java +++ b/test/jdk/jdk/internal/vm/Continuation/Fuzz.java @@ -473,7 +473,8 @@ void verifyResult(int result) { } boolean shouldPin() { - return traceHas(Op.PIN::contains) && (legacyLockingMode() || !(Platform.isX64() || Platform.isAArch64() || Platform.isRISCV64())); + return traceHas(Op.PIN::contains) && + (legacyLockingMode() || !(Platform.isX64() || Platform.isAArch64() || Platform.isRISCV64() || Platform.isPPC())); } void verifyPin(boolean yieldResult) {