From 4439450340a9bd50c3210931ba29000c012c55de Mon Sep 17 00:00:00 2001 From: Frederic Parain Date: Tue, 10 Dec 2024 10:02:08 -0500 Subject: [PATCH 1/4] More flat arrays work and code cleanup --- .../gc/shared/barrierSetAssembler_aarch64.cpp | 17 +- .../gc/shared/barrierSetAssembler_aarch64.hpp | 2 - .../cpu/aarch64/macroAssembler_aarch64.cpp | 8 +- .../cpu/aarch64/macroAssembler_aarch64.hpp | 1 - .../cpu/aarch64/templateTable_aarch64.cpp | 46 +- .../x86/gc/shared/barrierSetAssembler_x86.cpp | 17 +- .../x86/gc/shared/barrierSetAssembler_x86.hpp | 2 - src/hotspot/cpu/x86/interp_masm_x86.cpp | 47 -- src/hotspot/cpu/x86/interp_masm_x86.hpp | 9 - src/hotspot/cpu/x86/macroAssembler_x86.cpp | 8 +- src/hotspot/cpu/x86/macroAssembler_x86.hpp | 3 - src/hotspot/cpu/x86/templateTable_x86.cpp | 41 +- src/hotspot/share/c1/c1_Runtime1.cpp | 13 +- src/hotspot/share/cds/archiveHeapWriter.cpp | 2 +- src/hotspot/share/ci/ciArrayKlass.cpp | 9 +- src/hotspot/share/ci/ciReplay.cpp | 2 +- .../share/classfile/fieldLayoutBuilder.cpp | 14 +- .../share/classfile/fieldLayoutBuilder.hpp | 2 +- .../share/gc/shared/barrierSetRuntime.cpp | 15 +- .../share/gc/shared/barrierSetRuntime.hpp | 6 +- src/hotspot/share/gc/shared/collectedHeap.cpp | 2 +- src/hotspot/share/gc/shared/memAllocator.cpp | 2 +- src/hotspot/share/include/jvm.h | 9 + .../share/interpreter/interpreterRuntime.cpp | 68 ++- .../share/interpreter/interpreterRuntime.hpp | 4 +- src/hotspot/share/memory/oopFactory.cpp | 39 +- src/hotspot/share/memory/oopFactory.hpp | 3 +- src/hotspot/share/oops/arrayOop.hpp | 10 +- src/hotspot/share/oops/fieldInfo.hpp | 10 +- src/hotspot/share/oops/flatArrayKlass.cpp | 288 +++++------ src/hotspot/share/oops/flatArrayKlass.hpp | 19 +- src/hotspot/share/oops/flatArrayOop.hpp | 6 +- .../share/oops/flatArrayOop.inline.hpp | 46 +- src/hotspot/share/oops/inlineKlass.cpp | 212 ++++++-- src/hotspot/share/oops/inlineKlass.hpp | 97 ++-- src/hotspot/share/oops/inlineKlass.inline.hpp | 20 +- src/hotspot/share/oops/instanceKlass.cpp | 2 +- src/hotspot/share/oops/instanceKlass.hpp | 9 +- src/hotspot/share/oops/klass.hpp | 4 +- src/hotspot/share/oops/layoutKind.hpp | 37 ++ src/hotspot/share/oops/markWord.cpp | 14 + src/hotspot/share/oops/markWord.hpp | 13 +- src/hotspot/share/oops/objArrayKlass.cpp | 1 - src/hotspot/share/opto/parse2.cpp | 20 +- src/hotspot/share/opto/runtime.cpp | 20 +- src/hotspot/share/opto/runtime.hpp | 3 +- src/hotspot/share/opto/type.cpp | 6 +- src/hotspot/share/prims/jni.cpp | 19 +- src/hotspot/share/prims/jvm.cpp | 66 ++- src/hotspot/share/prims/unsafe.cpp | 8 +- src/hotspot/share/runtime/deoptimization.cpp | 4 +- src/hotspot/share/runtime/javaCalls.cpp | 2 +- .../share/runtime/stubDeclarations.hpp | 1 + src/hotspot/share/services/heapDumper.cpp | 4 +- .../share/utilities/globalDefinitions.cpp | 6 +- .../share/utilities/globalDefinitions.hpp | 12 +- .../jdk/internal/value/ValueClass.java | 8 + .../include/classfile_constants.h.template | 2 +- .../share/native/libjava/ValueClass.c | 18 + test/hotspot/gtest/oops/test_markWord.cpp | 19 +- .../valhalla/inlinetypes/FlatArraysTest.java | 459 ++++++++++++++++++ .../valhalla/inlinetypes/InlineTypeArray.java | 3 +- 62 files changed, 1247 insertions(+), 612 deletions(-) create mode 100644 src/hotspot/share/oops/layoutKind.hpp create mode 100644 test/hotspot/jtreg/runtime/valhalla/inlinetypes/FlatArraysTest.java diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp index 8e6711f89a8..77d6efe3a48 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp @@ -138,19 +138,6 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators } } -void BarrierSetAssembler::value_copy(MacroAssembler* masm, DecoratorSet decorators, - Register src, Register dst, Register value_klass) { - // value_copy implementation is fairly complex, and there are not any - // "short-cuts" to be made from asm. What there is, appears to have the same - // cost in C++, so just "call_VM_leaf" for now rather than maintain hundreds - // of hand-rolled instructions... - if (decorators & IS_DEST_UNINITIALIZED) { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy_is_dest_uninitialized), src, dst, value_klass); - } else { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy), src, dst, value_klass); - } -} - void BarrierSetAssembler::flat_field_copy(MacroAssembler* masm, DecoratorSet decorators, Register src, Register dst, Register inline_layout_info) { // flat_field_copy implementation is fairly complex, and there are not any @@ -158,9 +145,9 @@ void BarrierSetAssembler::flat_field_copy(MacroAssembler* masm, DecoratorSet dec // cost in C++, so just "call_VM_leaf" for now rather than maintain hundreds // of hand-rolled instructions... if (decorators & IS_DEST_UNINITIALIZED) { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy_is_dest_uninitialized2), src, dst, inline_layout_info); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy_is_dest_uninitialized), src, dst, inline_layout_info); } else { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy2), src, dst, inline_layout_info); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy), src, dst, inline_layout_info); } } diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp index bc5e7cd7826..7d19d096f78 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp @@ -100,8 +100,6 @@ class BarrierSetAssembler: public CHeapObj { virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2, Register tmp3); - virtual void value_copy(MacroAssembler* masm, DecoratorSet decorators, - Register src, Register dst, Register value_klass); virtual void flat_field_copy(MacroAssembler* masm, DecoratorSet decorators, Register src, Register dst, Register inline_layout_info); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index 5fd3040a718..883f6bddd8d 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -5408,12 +5408,6 @@ void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, } } -void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst, - Register inline_klass) { - BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); - bs->value_copy(this, decorators, src, dst, inline_klass); -} - void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info) { BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); @@ -5451,7 +5445,7 @@ void MacroAssembler::data_for_value_array_index(Register array, Register array_k lslv(index, index, rscratch1); add(data, array, index); - add(data, data, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT)); + add(data, data, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT)); } void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp index c04c7e6462e..4e54f01e4c7 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -926,7 +926,6 @@ class MacroAssembler: public Assembler { void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, Register tmp1, Register tmp2, Register tmp3); - void access_value_copy(DecoratorSet decorators, Register src, Register dst, Register inline_klass); void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info); // inline type data payload offsets... diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp index 948912f770a..e745db36a8f 100644 --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp @@ -828,7 +828,7 @@ void TemplateTable::aaload() __ b(done); __ bind(is_flat_array); - __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), r0, r1); + __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_load), r0, r1); // Ensure the stores to copy the inline field contents are visible // before any subsequent store that publishes this reference. __ membar(Assembler::StoreStore); @@ -1182,7 +1182,11 @@ void TemplateTable::aastore() { if (EnableValhalla) { Label is_null_into_value_array_npe, store_null; - // No way to store null in flat null-free array + if (UseFlatArray) { + __ test_flat_array_oop(r3, r8, is_flat_array); + } + + // No way to store null in a null-free array __ test_null_free_array_oop(r3, r8, is_null_into_value_array_npe); __ b(store_null); @@ -1200,40 +1204,10 @@ void TemplateTable::aastore() { Label is_type_ok; __ bind(is_flat_array); // Store non-null value to flat - // Simplistic type check... - // r0 - value, r2 - index, r3 - array. - - // Profile the not-null value's klass. - // Load value class - __ load_klass(r1, r0); - - // Move element klass into r7 - __ ldr(r7, Address(r5, ArrayKlass::element_klass_offset())); - - // flat value array needs exact type match - // is "r1 == r7" (value subclass == array element superclass) - - __ cmp(r7, r1); - __ br(Assembler::EQ, is_type_ok); - - __ b(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); - - __ bind(is_type_ok); - // r1: value's klass - // r3: array - // r5: array klass - __ test_klass_is_empty_inline_type(r1, r7, done); - - // calc dst for copy - __ ldrw(r7, at_tos_p1()); // index - __ data_for_value_array_index(r3, r5, r7, r7); - - // ...and src for copy - __ ldr(r6, at_tos()); // value - __ data_for_oop(r6, r6, r1); - - __ mov(r4, r1); // Shuffle arguments to avoid conflict with c_rarg1 - __ access_value_copy(IN_HEAP, r6, r7, r4); + __ ldr(r0, at_tos()); // value + __ ldr(r3, at_tos_p1()); // index + __ ldr(r2, at_tos_p2()); // array + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_store), r0, r2, r3); } // Pop stack arguments diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp index ca78ec91342..1a7d4a633b3 100644 --- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp @@ -200,19 +200,6 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators } } -void BarrierSetAssembler::value_copy(MacroAssembler* masm, DecoratorSet decorators, - Register src, Register dst, Register value_klass) { - // value_copy implementation is fairly complex, and there are not any - // "short-cuts" to be made from asm. What there is, appears to have the same - // cost in C++, so just "call_VM_leaf" for now rather than maintain hundreds - // of hand-rolled instructions... - if (decorators & IS_DEST_UNINITIALIZED) { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy_is_dest_uninitialized), src, dst, value_klass); - } else { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy), src, dst, value_klass); - } -} - void BarrierSetAssembler::flat_field_copy(MacroAssembler* masm, DecoratorSet decorators, Register src, Register dst, Register inline_layout_info) { // flat_field_copy implementation is fairly complex, and there are not any @@ -220,9 +207,9 @@ void BarrierSetAssembler::flat_field_copy(MacroAssembler* masm, DecoratorSet dec // cost in C++, so just "call_VM_leaf" for now rather than maintain hundreds // of hand-rolled instructions... if (decorators & IS_DEST_UNINITIALIZED) { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy_is_dest_uninitialized2), src, dst, inline_layout_info); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy_is_dest_uninitialized), src, dst, inline_layout_info); } else { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy2), src, dst, inline_layout_info); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy), src, dst, inline_layout_info); } } diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp index 3c7db6d0d46..f9608aa771b 100644 --- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp @@ -48,8 +48,6 @@ class BarrierSetAssembler: public CHeapObj { virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2, Register tmp3); - virtual void value_copy(MacroAssembler* masm, DecoratorSet decorators, - Register src, Register dst, Register value_klass); virtual void flat_field_copy(MacroAssembler* masm, DecoratorSet decorators, Register src, Register dst, Register inline_layout_info); diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp index bd0917edce6..5d9c3de606f 100644 --- a/src/hotspot/cpu/x86/interp_masm_x86.cpp +++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp @@ -1247,53 +1247,6 @@ void InterpreterMacroAssembler::read_flat_field(Register entry, Register tmp1, R bind(done); } -void InterpreterMacroAssembler::read_flat_element(Register array, Register index, - Register t1, Register t2, - Register obj) { - assert_different_registers(array, index, t1, t2); - Label alloc_failed, empty_value, done; - const Register array_klass = t2; - const Register elem_klass = t1; - const Register alloc_temp = LP64_ONLY(rscratch1) NOT_LP64(rsi); - const Register dst_temp = LP64_ONLY(rscratch2) NOT_LP64(rdi); - - // load in array->klass()->element_klass() - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); - load_klass(array_klass, array, tmp_load_klass); - movptr(elem_klass, Address(array_klass, ArrayKlass::element_klass_offset())); - - //check for empty value klass - test_klass_is_empty_inline_type(elem_klass, dst_temp, empty_value); - - // calc source into "array_klass" and free up some regs - const Register src = array_klass; - push(index); // preserve index reg in case alloc_failed - data_for_value_array_index(array, array_klass, index, src); - - allocate_instance(elem_klass, obj, alloc_temp, dst_temp, false, alloc_failed); - // Have an oop instance buffer, copy into it - store_ptr(0, obj); // preserve obj (overwrite index, no longer needed) - data_for_oop(obj, dst_temp, elem_klass); - access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, elem_klass); - pop(obj); - jmp(done); - - bind(empty_value); - get_empty_inline_type_oop(elem_klass, dst_temp, obj); - jmp(done); - - bind(alloc_failed); - pop(index); - if (array == c_rarg2) { - mov(elem_klass, array); - array = elem_klass; - } - call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), array, index); - - bind(done); -} - - // Lock object // // Args: diff --git a/src/hotspot/cpu/x86/interp_masm_x86.hpp b/src/hotspot/cpu/x86/interp_masm_x86.hpp index 072af52795b..5e6212915ea 100644 --- a/src/hotspot/cpu/x86/interp_masm_x86.hpp +++ b/src/hotspot/cpu/x86/interp_masm_x86.hpp @@ -226,15 +226,6 @@ class InterpreterMacroAssembler: public MacroAssembler { Register tmp1, Register tmp2, Register obj = rax); - // Allocate value buffer in "obj" and read in flat element at the given index - // NOTES: - // - Return via "obj" must be rax - // - kills all given regs - // - 32 bits: kills rdi and rsi - void read_flat_element(Register array, Register index, - Register t1, Register t2, - Register obj = rax); - // Object locking void lock_object (Register lock_reg); void unlock_object(Register lock_reg); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index 1624bccff20..56d96e6d6a1 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -6056,12 +6056,6 @@ void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Ad } } -void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst, - Register inline_klass) { - BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); - bs->value_copy(this, decorators, src, dst, inline_klass); -} - void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info) { BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); @@ -6099,7 +6093,7 @@ void MacroAssembler::data_for_value_array_index(Register array, Register array_k andl(rcx, Klass::_lh_log2_element_size_mask); shlptr(index); // index << rcx - lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT))); + lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT))); } void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp index 41e879a42c7..71243455200 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -405,10 +405,7 @@ class MacroAssembler: public Assembler { void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, Register tmp1, Register tmp2, Register tmp3); - void access_value_copy(DecoratorSet decorators, Register src, Register dst, Register inline_klass); void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info); - // We probably need the following for arrays: TODO FIXME - // void flat_element_copy(DecoratorSet decorators, Register src, Register dst, Register array); // inline type data payload offsets... void first_field_offset(Register inline_klass, Register offset); diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp index 37b8b3928c3..c34d301d260 100644 --- a/src/hotspot/cpu/x86/templateTable_x86.cpp +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp @@ -848,7 +848,8 @@ void TemplateTable::aaload() { IS_ARRAY); __ jmp(done); __ bind(is_flat_array); - __ read_flat_element(array, index, rbx, rcx, rax); + __ movptr(rbx, array); + call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_load), rbx, index); __ bind(done); } else { do_oop_load(_masm, @@ -1200,6 +1201,13 @@ void TemplateTable::aastore() { if (EnableValhalla) { Label is_null_into_value_array_npe, store_null; + // Move array class to rdi + __ load_klass(rdi, rdx, rscratch1); + if (UseFlatArray) { + __ movl(rbx, Address(rdi, Klass::layout_helper_offset())); + __ test_flat_array_layout(rbx, is_flat_array); + } + // No way to store null in null-free array __ test_null_free_array_oop(rdx, rbx, is_null_into_value_array_npe); __ jmp(store_null); @@ -1217,34 +1225,11 @@ void TemplateTable::aastore() { Label is_type_ok; __ bind(is_flat_array); // Store non-null value to flat - // Simplistic type check... - - // Profile the not-null value's klass. - __ load_klass(rbx, rax, rscratch1); - // Move element klass into rax - __ movptr(rax, Address(rdi, ArrayKlass::element_klass_offset())); - // flat value array needs exact type match - // is "rax == rbx" (value subclass == array element superclass) - __ cmpptr(rax, rbx); - __ jccb(Assembler::equal, is_type_ok); - - __ jump(RuntimeAddress(Interpreter::_throw_ArrayStoreException_entry)); - - __ bind(is_type_ok); - // rbx: value's klass - // rdx: array - // rdi: array klass - __ test_klass_is_empty_inline_type(rbx, rax, done); - - // calc dst for copy - __ movl(rax, at_tos_p1()); // index - __ data_for_value_array_index(rdx, rdi, rax, rax); - - // ...and src for copy - __ movptr(rcx, at_tos()); // value - __ data_for_oop(rcx, rcx, rbx); + __ movptr(rax, at_tos()); + __ movl(rcx, at_tos_p1()); // index + __ movptr(rdx, at_tos_p2()); // array - __ access_value_copy(IN_HEAP, rcx, rax, rbx); + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_store), rax, rdx, rcx); } // Pop stack arguments __ bind(done); diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp index a0ce448e2ed..2d4c8c90639 100644 --- a/src/hotspot/share/c1/c1_Runtime1.cpp +++ b/src/hotspot/share/c1/c1_Runtime1.cpp @@ -446,9 +446,16 @@ JRT_ENTRY(void, Runtime1::new_null_free_array(JavaThread* current, Klass* array_ Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive Klass* elem_klass = ArrayKlass::cast(array_klass)->element_klass(); assert(elem_klass->is_inline_klass(), "must be"); + InlineKlass* vk = InlineKlass::cast(elem_klass); // Logically creates elements, ensure klass init elem_klass->initialize(CHECK); - arrayOop obj = oopFactory::new_valueArray(elem_klass, length, CHECK); + arrayOop obj= nullptr; + // Limitation here, only non-atomic layouts are supported + if (UseFlatArray && vk->has_non_atomic_layout()) { + obj = oopFactory::new_flatArray(elem_klass, length, LayoutKind::NON_ATOMIC_FLAT, CHECK); + } else { + obj = oopFactory::new_null_free_objArray(elem_klass, length, CHECK); + } current->set_vm_result(obj); // This is pretty rare but this runtime patch is stressful to deoptimization // if we deoptimize here so force a deopt to stress the path. @@ -509,7 +516,7 @@ JRT_ENTRY(void, Runtime1::load_flat_array(JavaThread* current, flatArrayOopDesc* NOT_PRODUCT(_load_flat_array_slowcase_cnt++;) assert(array->length() > 0 && index < array->length(), "already checked"); flatArrayHandle vah(current, array); - oop obj = flatArrayOopDesc::value_alloc_copy_from_index(vah, index, CHECK); + oop obj = array->read_value_from_flat_array(index, CHECK); current->set_vm_result(obj); JRT_END @@ -525,7 +532,7 @@ JRT_ENTRY(void, Runtime1::store_flat_array(JavaThread* current, flatArrayOopDesc SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException()); } else { assert(array->klass()->is_flatArray_klass(), "should not be called"); - array->value_copy_to_index(value, index, LayoutKind::PAYLOAD); // Non atomic is currently the only layout supported by flat arrays + array->write_value_to_flat_array(value, index, CHECK); } JRT_END diff --git a/src/hotspot/share/cds/archiveHeapWriter.cpp b/src/hotspot/share/cds/archiveHeapWriter.cpp index 24e298f4900..14c4fe758a3 100644 --- a/src/hotspot/share/cds/archiveHeapWriter.cpp +++ b/src/hotspot/share/cds/archiveHeapWriter.cpp @@ -214,7 +214,7 @@ void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeapis_loaded()) { GUARDED_VM_ENTRY( EXCEPTION_CONTEXT; - Klass* ak = InlineKlass::cast(klass->get_Klass())->value_array_klass(THREAD); + Klass* ak = nullptr; + InlineKlass* vk = InlineKlass::cast(klass->get_Klass()); + if (UseFlatArray && vk->has_non_atomic_layout()) { + // Current limitation: returns only non-atomic flat arrays, atomic layout not supported here + ak = vk->flat_array_klass(LayoutKind::NON_ATOMIC_FLAT, THREAD); + } else { + ak = vk->null_free_reference_array(THREAD); + } if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; } else if (ak->is_flatArray_klass()) { diff --git a/src/hotspot/share/ci/ciReplay.cpp b/src/hotspot/share/ci/ciReplay.cpp index 99ec0d3b65d..d3b235ee0a0 100644 --- a/src/hotspot/share/ci/ciReplay.cpp +++ b/src/hotspot/share/ci/ciReplay.cpp @@ -1153,7 +1153,7 @@ class CompileReplay : public StackObj { } else if (field_signature[0] == JVM_SIGNATURE_ARRAY) { Klass* kelem = resolve_klass(field_signature + 1, CHECK_(true)); parse_klass(CHECK_(true)); // eat up the array class name - value = oopFactory::new_valueArray(kelem, length, CHECK_(true)); + value = oopFactory::new_flatArray(kelem, length, LayoutKind::NON_ATOMIC_FLAT, CHECK_(true)); // TODO FIXME fix the hard coded layout kind } else { report_error("unhandled array staticfield"); } diff --git a/src/hotspot/share/classfile/fieldLayoutBuilder.cpp b/src/hotspot/share/classfile/fieldLayoutBuilder.cpp index 0f40739ab14..dcab50cd2e9 100644 --- a/src/hotspot/share/classfile/fieldLayoutBuilder.cpp +++ b/src/hotspot/share/classfile/fieldLayoutBuilder.cpp @@ -61,7 +61,7 @@ static LayoutKind field_layout_selection(FieldInfo field_info, Arrayhas_non_atomic_layout() ? LayoutKind::NON_ATOMIC_FLAT : LayoutKind::REFERENCE; } } else { - if (NullableFieldFlattening && vk->has_nullable_layout()) { + if (NullableFieldFlattening && vk->has_nullable_atomic_layout()) { return LayoutKind::NULLABLE_ATOMIC_FLAT; } else { return LayoutKind::REFERENCE; @@ -80,7 +80,7 @@ static void get_size_and_alignment(InlineKlass* vk, LayoutKind kind, int* size, *alignment = *size; break; case LayoutKind::NULLABLE_ATOMIC_FLAT: - *size = vk->nullable_size_in_bytes(); + *size = vk->nullable_atomic_size_in_bytes(); *alignment = *size; break; default: @@ -1192,7 +1192,7 @@ void FieldLayoutBuilder::compute_inline_class_layout() { if (has_atomic_layout() && _payload_alignment < atomic_layout_size_in_bytes()) { required_alignment = atomic_layout_size_in_bytes(); } - if (has_nullable_layout() && _payload_alignment < nullable_layout_size_in_bytes()) { + if (has_nullable_atomic_layout() && _payload_alignment < nullable_layout_size_in_bytes()) { required_alignment = nullable_layout_size_in_bytes(); } int shift = first_field->offset() % required_alignment; @@ -1201,14 +1201,14 @@ void FieldLayoutBuilder::compute_inline_class_layout() { assert(_layout->first_field_block() != nullptr, "A concrete value class must have at least one (possible dummy) field"); _layout->shift_fields(shift); _first_field_offset = _layout->first_field_block()->offset(); - if (has_nullable_layout()) { + if (has_nullable_atomic_layout()) { assert(!_is_empty_inline_class, "Should not get here with empty values"); _null_marker_offset = _layout->find_null_marker()->offset(); } _payload_alignment = required_alignment; } else { _atomic_layout_size_in_bytes = -1; - if (has_nullable_layout() && !_is_empty_inline_class) { // empty values don't have a dedicated NULL_MARKER block + if (has_nullable_atomic_layout() && !_is_empty_inline_class) { // empty values don't have a dedicated NULL_MARKER block _layout->remove_null_marker(); } _nullable_layout_size_in_bytes = -1; @@ -1221,7 +1221,7 @@ void FieldLayoutBuilder::compute_inline_class_layout() { // If the inline class has a nullable layout, the layout used in heap allocated standalone // instances must also be the nullable layout, in order to be able to set the null marker to // non-null before copying the payload to other containers. - if (has_nullable_layout() && payload_layout_size_in_bytes() < nullable_layout_size_in_bytes()) { + if (has_nullable_atomic_layout() && payload_layout_size_in_bytes() < nullable_layout_size_in_bytes()) { _payload_size_in_bytes = nullable_layout_size_in_bytes(); } } @@ -1385,7 +1385,7 @@ void FieldLayoutBuilder::epilogue() { } else { st.print_cr("Atomic flat layout: -/-"); } - if (has_nullable_layout()) { + if (has_nullable_atomic_layout()) { st.print_cr("Nullable flat layout: %d/%d", _nullable_layout_size_in_bytes, _nullable_layout_size_in_bytes); } else { st.print_cr("Nullable flat layout: -/-"); diff --git a/src/hotspot/share/classfile/fieldLayoutBuilder.hpp b/src/hotspot/share/classfile/fieldLayoutBuilder.hpp index 6658b945f68..a85c3f3548f 100644 --- a/src/hotspot/share/classfile/fieldLayoutBuilder.hpp +++ b/src/hotspot/share/classfile/fieldLayoutBuilder.hpp @@ -330,7 +330,7 @@ class FieldLayoutBuilder : public ResourceObj { int non_atomic_layout_alignment() const { return _non_atomic_layout_alignment; } bool has_atomic_layout() const { return _atomic_layout_size_in_bytes != -1; } int atomic_layout_size_in_bytes() const { return _atomic_layout_size_in_bytes; } - bool has_nullable_layout() const { return _nullable_layout_size_in_bytes != -1; } + bool has_nullable_atomic_layout() const { return _nullable_layout_size_in_bytes != -1; } int nullable_layout_size_in_bytes() const { return _nullable_layout_size_in_bytes; } int null_marker_offset() const { return _null_marker_offset; } bool is_empty_inline_class() const { return _is_empty_inline_class; } diff --git a/src/hotspot/share/gc/shared/barrierSetRuntime.cpp b/src/hotspot/share/gc/shared/barrierSetRuntime.cpp index ace47d8ffbc..a3d36bb3024 100644 --- a/src/hotspot/share/gc/shared/barrierSetRuntime.cpp +++ b/src/hotspot/share/gc/shared/barrierSetRuntime.cpp @@ -28,21 +28,10 @@ #include "runtime/interfaceSupport.inline.hpp" #include "utilities/macros.hpp" - -JRT_LEAF(void, BarrierSetRuntime::value_copy(void* src, void* dst, InlineKlass* md)) - assert(md->is_inline_klass(), "invariant"); - HeapAccess<>::value_copy(src, dst, md, LayoutKind::PAYLOAD); // FIXME Hard coded value for the transition -JRT_END - -JRT_LEAF(void, BarrierSetRuntime::value_copy2(void* src, void* dst, InlineLayoutInfo* li)) +JRT_LEAF(void, BarrierSetRuntime::value_copy(void* src, void* dst, InlineLayoutInfo* li)) HeapAccess<>::value_copy(src, dst, li->klass(), li->kind()); JRT_END -JRT_LEAF(void, BarrierSetRuntime::value_copy_is_dest_uninitialized(void* src, void* dst, InlineKlass* md)) - assert(md->is_inline_klass(), "invariant"); - HeapAccess::value_copy(src, dst, md, LayoutKind::PAYLOAD); // FIXME Hard coded value for the transition -JRT_END - -JRT_LEAF(void, BarrierSetRuntime::value_copy_is_dest_uninitialized2(void* src, void* dst, InlineLayoutInfo* li)) +JRT_LEAF(void, BarrierSetRuntime::value_copy_is_dest_uninitialized(void* src, void* dst, InlineLayoutInfo* li)) HeapAccess::value_copy(src, dst, li->klass(), li->kind()); JRT_END diff --git a/src/hotspot/share/gc/shared/barrierSetRuntime.hpp b/src/hotspot/share/gc/shared/barrierSetRuntime.hpp index 07c01ffadcf..8f623f37a00 100644 --- a/src/hotspot/share/gc/shared/barrierSetRuntime.hpp +++ b/src/hotspot/share/gc/shared/barrierSetRuntime.hpp @@ -37,10 +37,8 @@ class JavaThread; class BarrierSetRuntime: public AllStatic { public: // Template interpreter... - static void value_copy(void* src, void* dst, InlineKlass* md); - static void value_copy2(void* src, void* dst, InlineLayoutInfo* layout_info); - static void value_copy_is_dest_uninitialized(void* src, void* dst, InlineKlass* md); - static void value_copy_is_dest_uninitialized2(void* src, void* dst, InlineLayoutInfo* layout_info); + static void value_copy(void* src, void* dst, InlineLayoutInfo* layout_info); + static void value_copy_is_dest_uninitialized(void* src, void* dst, InlineLayoutInfo* layout_info); }; #endif // SHARE_GC_SHARED_BARRIERSETRUNTIME_HPP diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 82eaaf9a396..9ec30dd4860 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -405,7 +405,7 @@ void CollectedHeap::set_gc_cause(GCCause::Cause v) { // Returns the header size in words aligned to the requirements of the // array object type. static int int_array_header_size() { - size_t typesize_in_bytes = arrayOopDesc::header_size_in_bytes(); + size_t typesize_in_bytes = arrayOopDesc::header_size_in_bytes(T_INT); return (int)align_up(typesize_in_bytes, HeapWordSize)/HeapWordSize; } diff --git a/src/hotspot/share/gc/shared/memAllocator.cpp b/src/hotspot/share/gc/shared/memAllocator.cpp index ffbab37b4a3..a763e09b415 100644 --- a/src/hotspot/share/gc/shared/memAllocator.cpp +++ b/src/hotspot/share/gc/shared/memAllocator.cpp @@ -404,7 +404,7 @@ oop ObjArrayAllocator::initialize(HeapWord* mem) const { void ObjArrayAllocator::mem_zap_start_padding(HeapWord* mem) const { const BasicType element_type = ArrayKlass::cast(_klass)->element_type(); const size_t base_offset_in_bytes = arrayOopDesc::base_offset_in_bytes(element_type); - const size_t header_size_in_bytes = arrayOopDesc::header_size_in_bytes(); + const size_t header_size_in_bytes = arrayOopDesc::header_size_in_bytes(element_type); const address base = reinterpret_cast
(mem) + base_offset_in_bytes; const address header_end = reinterpret_cast
(mem) + header_size_in_bytes; diff --git a/src/hotspot/share/include/jvm.h b/src/hotspot/share/include/jvm.h index f60367a4857..9be81e31cf4 100644 --- a/src/hotspot/share/include/jvm.h +++ b/src/hotspot/share/include/jvm.h @@ -1125,6 +1125,15 @@ JVM_GetTemporaryDirectory(JNIEnv *env); JNIEXPORT jarray JNICALL JVM_NewNullRestrictedArray(JNIEnv *env, jclass elmClass, jint len); +JNIEXPORT jarray JNICALL +JVM_NewNullRestrictedAtomicArray(JNIEnv *env, jclass elmClass, jint len); + +JNIEXPORT jarray JNICALL +JVM_NewNullableAtomicArray(JNIEnv *env, jclass elmClass, jint len); + +JNIEXPORT jboolean JNICALL +JVM_IsFlatArray(JNIEnv *env, jobject obj); + JNIEXPORT jboolean JNICALL JVM_IsNullRestrictedArray(JNIEnv *env, jobject obj); diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp index 51a7f28f57e..f8186be1dbd 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp @@ -302,7 +302,14 @@ JRT_ENTRY(void, InterpreterRuntime::read_flat_field(JavaThread* current, oopDesc InlineLayoutInfo* layout_info = holder->inline_layout_info_adr(entry->field_index()); InlineKlass* field_vklass = layout_info->klass(); - oop res = field_vklass->read_flat_field(obj_h(), entry->field_offset(), layout_info->kind(), CHECK); +#ifdef ASSERT + fieldDescriptor fd; + bool found = holder->find_field_from_offset(entry->field_offset(), false, &fd); + assert(found, "Field not found"); + assert(fd.is_flat(), "Field must be flat"); +#endif // ASSERT + + oop res = field_vklass->read_payload_from_addr(obj_h(), entry->field_offset(), layout_info->kind(), CHECK); current->set_vm_result(res); JRT_END @@ -315,14 +322,17 @@ JRT_ENTRY(void, InterpreterRuntime::read_nullable_flat_field(JavaThread* current int field_index = entry->field_index(); InlineLayoutInfo* li= holder->inline_layout_info_adr(field_index); - int nm_offset = li->null_marker_offset(); - if (obj_h()->byte_field_acquire(nm_offset) == 0) { - current->set_vm_result(nullptr); - } else { - InlineKlass* field_vklass = InlineKlass::cast(li->klass()); - oop res = field_vklass->read_flat_field(obj_h(), entry->field_offset(), LayoutKind::NULLABLE_ATOMIC_FLAT, CHECK); - current->set_vm_result(res); - } +#ifdef ASSERT + fieldDescriptor fd; + bool found = holder->find_field_from_offset(entry->field_offset(), false, &fd); + assert(found, "Field not found"); + assert(fd.is_flat(), "Field must be flat"); +#endif // ASSERT + + InlineKlass* field_vklass = InlineKlass::cast(li->klass()); + oop res = field_vklass->read_payload_from_addr(obj_h(), entry->field_offset(), li->kind(), CHECK); + current->set_vm_result(res); + JRT_END JRT_ENTRY(void, InterpreterRuntime::write_nullable_flat_field(JavaThread* current, oopDesc* obj, oopDesc* value, ResolvedFieldEntry* entry)) @@ -334,29 +344,7 @@ JRT_ENTRY(void, InterpreterRuntime::write_nullable_flat_field(JavaThread* curren InstanceKlass* holder = entry->field_holder(); InlineLayoutInfo* li = holder->inline_layout_info_adr(entry->field_index()); InlineKlass* vk = li->klass(); - assert(li->kind() == LayoutKind::NULLABLE_ATOMIC_FLAT, "Must be"); - int nm_offset = li->null_marker_offset(); - - if (val_h() == nullptr) { - if(li->klass()->nonstatic_oop_count() == 0) { - // No embedded oops, just reset the null marker - obj_h()->byte_field_put(nm_offset, (jbyte)0); - } else { - // Has embedded oops, using the reset value to rewrite all fields to null/zeros - assert(li->klass()->null_reset_value()->byte_field(vk->null_marker_offset()) == 0, "reset value must always have a null marker set to 0"); - vk->inline_copy_oop_to_payload(vk->null_reset_value(), ((char*)(oopDesc*)obj_h()) + entry->field_offset(), li->kind()); - } - return; - } - - assert(val_h()->klass() == vk, "Must match because flat fields are monomorphic"); - // The interpreter copies values with a bulk operation - // To avoid accidentally setting the null marker to "null" during - // the copying, the null marker is set to non zero in the source object - if (val_h()->byte_field(vk->null_marker_offset()) == 0) { - val_h()->byte_field_put(vk->null_marker_offset(), (jbyte)1); - } - vk->inline_copy_oop_to_payload(val_h(), ((char*)(oopDesc*)obj_h()) + entry->field_offset(), li->kind()); + vk->write_value_to_addr(val_h(), ((char*)(oopDesc*)obj_h()) + entry->field_offset(), li->kind(), true, CHECK); JRT_END JRT_ENTRY(void, InterpreterRuntime::newarray(JavaThread* current, BasicType type, jint size)) @@ -371,15 +359,17 @@ JRT_ENTRY(void, InterpreterRuntime::anewarray(JavaThread* current, ConstantPool* current->set_vm_result(obj); JRT_END -JRT_ENTRY(void, InterpreterRuntime::value_array_load(JavaThread* current, arrayOopDesc* array, int index)) - flatArrayHandle vah(current, (flatArrayOop)array); - oop value_holder = flatArrayOopDesc::value_alloc_copy_from_index(vah, index, CHECK); - current->set_vm_result(value_holder); +JRT_ENTRY(void, InterpreterRuntime::flat_array_load(JavaThread* current, arrayOopDesc* array, int index)) + assert(array->is_flatArray(), "Must be"); + flatArrayOop farray = (flatArrayOop)array; + oop res = farray->read_value_from_flat_array(index, CHECK); + current->set_vm_result(res); JRT_END -JRT_ENTRY(void, InterpreterRuntime::value_array_store(JavaThread* current, void* val, arrayOopDesc* array, int index)) - assert(val != nullptr, "can't store null into flat array"); - ((flatArrayOop)array)->value_copy_to_index(cast_to_oop(val), index, LayoutKind::PAYLOAD); // Non atomic is the only layout currently supported by flat arrays +JRT_ENTRY(void, InterpreterRuntime::flat_array_store(JavaThread* current, oopDesc* val, arrayOopDesc* array, int index)) + assert(array->is_flatArray(), "Must be"); + flatArrayOop farray = (flatArrayOop)array; + farray->write_value_to_flat_array(val, index, CHECK); JRT_END JRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* current, jint* first_size_address)) diff --git a/src/hotspot/share/interpreter/interpreterRuntime.hpp b/src/hotspot/share/interpreter/interpreterRuntime.hpp index a3c24132329..9f463573a66 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.hpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.hpp @@ -67,8 +67,8 @@ class InterpreterRuntime: AllStatic { static void read_nullable_flat_field(JavaThread* current, oopDesc* object, ResolvedFieldEntry* entry); static void write_nullable_flat_field(JavaThread* current, oopDesc* object, oopDesc* value, ResolvedFieldEntry* entry); - static void value_array_load(JavaThread* current, arrayOopDesc* array, int index); - static void value_array_store(JavaThread* current, void* val, arrayOopDesc* array, int index); + static void flat_array_load(JavaThread* current, arrayOopDesc* array, int index); + static void flat_array_store(JavaThread* current, oopDesc* val, arrayOopDesc* array, int index); static jboolean is_substitutable(JavaThread* current, oopDesc* aobj, oopDesc* bobj); diff --git a/src/hotspot/share/memory/oopFactory.cpp b/src/hotspot/share/memory/oopFactory.cpp index 87624ff0e8f..6e662c7a9f4 100644 --- a/src/hotspot/share/memory/oopFactory.cpp +++ b/src/hotspot/share/memory/oopFactory.cpp @@ -120,22 +120,31 @@ objArrayOop oopFactory::new_objArray(Klass* klass, int length, TRAPS) { } } -arrayOop oopFactory::new_valueArray(Klass* k, int length, TRAPS) { +objArrayOop oopFactory::new_null_free_objArray(Klass* k, int length, TRAPS) { InlineKlass* klass = InlineKlass::cast(k); - // Request a flat array, but we might not actually get it...either way "null-free" are the aaload/aastore semantics - Klass* array_klass = klass->value_array_klass(CHECK_NULL); - assert(array_klass->is_null_free_array_klass(), "Expect a null-free array class here"); - - arrayOop oop; - if (array_klass->is_flatArray_klass()) { - oop = (arrayOop) FlatArrayKlass::cast(array_klass)->allocate(length, CHECK_NULL); - assert(oop == nullptr || oop->is_flatArray(), "sanity"); - assert(oop == nullptr || oop->klass()->is_flatArray_klass(), "sanity"); - } else { - oop = (arrayOop) ObjArrayKlass::cast(array_klass)->allocate(length, CHECK_NULL); - } - assert(oop == nullptr || oop->klass()->is_null_free_array_klass(), "sanity"); - assert(oop == nullptr || oop->is_null_free_array(), "sanity"); + ObjArrayKlass* array_klass = klass->null_free_reference_array(CHECK_NULL); + + assert(array_klass->is_objArray_klass(), "Must be"); + assert(array_klass->is_null_free_array_klass(), "Must be"); + + objArrayOop oop = array_klass->allocate(length, CHECK_NULL); + + assert(oop == nullptr || oop->is_objArray(), "Sanity"); + assert(oop == nullptr || oop->klass()->is_null_free_array_klass(), "Sanity"); + + return oop; +} + +flatArrayOop oopFactory::new_flatArray(Klass* k, int length, LayoutKind lk, TRAPS) { + InlineKlass* klass = InlineKlass::cast(k); + Klass* array_klass = klass->flat_array_klass(lk, CHECK_NULL); + + assert(array_klass->is_flatArray_klass(), "Must be"); + + flatArrayOop oop = FlatArrayKlass::cast(array_klass)->allocate(length, lk, CHECK_NULL); + assert(oop == nullptr || oop->is_flatArray(), "sanity"); + assert(oop == nullptr || oop->klass()->is_flatArray_klass(), "sanity"); + return oop; } diff --git a/src/hotspot/share/memory/oopFactory.hpp b/src/hotspot/share/memory/oopFactory.hpp index 7fb22bbfb8a..04e8e3fbc2c 100644 --- a/src/hotspot/share/memory/oopFactory.hpp +++ b/src/hotspot/share/memory/oopFactory.hpp @@ -63,7 +63,8 @@ class oopFactory: AllStatic { // // Method specifically null free and possibly flat if possible // i.e. flatArrayOop if flattening can be done, else "null free" objArrayOop - static arrayOop new_valueArray(Klass* klass, int length, TRAPS); + static flatArrayOop new_flatArray(Klass* klass, int length, LayoutKind lk, TRAPS); + static objArrayOop new_null_free_objArray(Klass* klass, int length, TRAPS); // Helper conversions from value to obj array... static objArrayHandle copy_flatArray_to_objArray(flatArrayHandle array, TRAPS); diff --git a/src/hotspot/share/oops/arrayOop.hpp b/src/hotspot/share/oops/arrayOop.hpp index 795b41eeb53..87827771659 100644 --- a/src/hotspot/share/oops/arrayOop.hpp +++ b/src/hotspot/share/oops/arrayOop.hpp @@ -56,8 +56,8 @@ class arrayOopDesc : public oopDesc { // Given a type, return true if elements of that type must be aligned to 64-bit. static bool element_type_should_be_aligned(BasicType type) { - if (EnableValhalla && type == T_PRIMITIVE_OBJECT) { - return true; //CMH: tighten the alignment when removing T_PRIMITIVE_OBJECT + if (EnableValhalla && type == T_FLAT_ELEMENT) { + return true; //CMH: tighten the alignment when removing T_FLAT_ELEMENT } #ifdef _LP64 if (type == T_OBJECT || type == T_ARRAY) { @@ -71,13 +71,13 @@ class arrayOopDesc : public oopDesc { // Header size computation. // The header is considered the oop part of this type plus the length. // This is not equivalent to sizeof(arrayOopDesc) which should not appear in the code. - static int header_size_in_bytes() { + static int header_size_in_bytes(BasicType etype) { int hs = length_offset_in_bytes() + (int)sizeof(int); #ifdef ASSERT // make sure it isn't called before UseCompressedOops is initialized. static int arrayoopdesc_hs = 0; if (arrayoopdesc_hs == 0) arrayoopdesc_hs = hs; - assert(arrayoopdesc_hs == hs, "header size can't change"); + // assert(arrayoopdesc_hs == hs, "header size can't change"); #endif // ASSERT return (int)hs; } @@ -92,7 +92,7 @@ class arrayOopDesc : public oopDesc { // Returns the offset of the first element. static int base_offset_in_bytes(BasicType type) { - int hs = header_size_in_bytes(); + int hs = header_size_in_bytes(type); return element_type_should_be_aligned(type) ? align_up(hs, BytesPerLong) : hs; } diff --git a/src/hotspot/share/oops/fieldInfo.hpp b/src/hotspot/share/oops/fieldInfo.hpp index 927f36a92a1..29b7067c808 100644 --- a/src/hotspot/share/oops/fieldInfo.hpp +++ b/src/hotspot/share/oops/fieldInfo.hpp @@ -26,6 +26,7 @@ #define SHARE_OOPS_FIELDINFO_HPP #include "memory/allocation.hpp" +#include "oops/layoutKind.hpp" #include "oops/typeArrayOop.hpp" #include "utilities/unsigned5.hpp" #include "utilities/vmEnums.hpp" @@ -34,15 +35,6 @@ static constexpr u4 flag_mask(int pos) { return (u4)1 << pos; } -enum LayoutKind { - REFERENCE = 0, // indirection to a heap allocated instance - PAYLOAD = 1, // layout used in heap allocated standalone instances, probably temporary for the transition - NON_ATOMIC_FLAT = 2, // flat, no guarantee of atomic updates, no null marker - ATOMIC_FLAT = 3, // flat, size compatible with atomic updates, alignment requirement is equal to the size - NULLABLE_ATOMIC_FLAT = 4, // flat, include a null marker, plus same properties as ATOMIC layout - UNKNOWN = 5 // used for uninitialized fields of type LayoutKind -}; - // Helper class for access to the underlying Array used to // store the compressed stream of FieldInfo template diff --git a/src/hotspot/share/oops/flatArrayKlass.cpp b/src/hotspot/share/oops/flatArrayKlass.cpp index 4569b8fae10..039ae51d73a 100644 --- a/src/hotspot/share/oops/flatArrayKlass.cpp +++ b/src/hotspot/share/oops/flatArrayKlass.cpp @@ -55,24 +55,43 @@ // Allocation... -FlatArrayKlass::FlatArrayKlass(Klass* element_klass, Symbol* name) : ArrayKlass(name, Kind) { +FlatArrayKlass::FlatArrayKlass(Klass* element_klass, Symbol* name, LayoutKind lk) : ArrayKlass(name, Kind) { assert(element_klass->is_inline_klass(), "Expected Inline"); + assert(lk == NON_ATOMIC_FLAT || lk == ATOMIC_FLAT || lk == NULLABLE_ATOMIC_FLAT, "Must be a flat layout"); set_element_klass(InlineKlass::cast(element_klass)); set_class_loader_data(element_klass->class_loader_data()); + set_layout_kind(lk); - set_layout_helper(array_layout_helper(InlineKlass::cast(element_klass))); + set_layout_helper(array_layout_helper(InlineKlass::cast(element_klass), lk)); assert(is_array_klass(), "sanity"); assert(is_flatArray_klass(), "sanity"); assert(is_null_free_array_klass(), "sanity"); #ifdef _LP64 - set_prototype_header(markWord::flat_array_prototype()); + set_prototype_header(markWord::flat_array_prototype(lk)); assert(prototype_header().is_flat_array(), "sanity"); #else + fatal("Not supported yet"); set_prototype_header(markWord::inline_type_prototype()); #endif +#ifdef ASSERT + switch(lk) { + case NON_ATOMIC_FLAT: + assert(layout_helper_is_null_free(layout_helper()), "Must be"); + assert(layout_helper_is_array(layout_helper()), "Must be"); + assert(layout_helper_is_flatArray(layout_helper()), "Must be"); + assert(layout_helper_element_type(layout_helper()) == T_FLAT_ELEMENT, "Must be"); + //assert(layout_helper_header_size(layout_helper()) == , "Must be"); + assert(prototype_header().is_null_free_array(), "Must be"); + assert(prototype_header().is_flat_array(), "Must be"); + break; + default: + break; + } +#endif // ASSERT + #ifndef PRODUCT if (PrintFlatArrayLayout) { print(); @@ -80,15 +99,7 @@ FlatArrayKlass::FlatArrayKlass(Klass* element_klass, Symbol* name) : ArrayKlass( #endif } -InlineKlass* FlatArrayKlass::element_klass() const { - return InlineKlass::cast(_element_klass); -} - -void FlatArrayKlass::set_element_klass(Klass* k) { - _element_klass = k; -} - -FlatArrayKlass* FlatArrayKlass::allocate_klass(Klass* eklass, TRAPS) { +FlatArrayKlass* FlatArrayKlass::allocate_klass(Klass* eklass, LayoutKind lk, TRAPS) { guarantee((!Universe::is_bootstrapping() || vmClasses::Object_klass_loaded()), "Really ?!"); assert(UseFlatArray, "Flatten array required"); assert(MultiArray_lock->holds_lock(THREAD), "must hold lock after bootstrapping"); @@ -96,12 +107,6 @@ FlatArrayKlass* FlatArrayKlass::allocate_klass(Klass* eklass, TRAPS) { InlineKlass* element_klass = InlineKlass::cast(eklass); assert(element_klass->must_be_atomic() || (!InlineArrayAtomicAccess), "Atomic by-default"); - /* - * MVT->LWorld, now need to allocate secondaries array types, just like objArrayKlass... - * ...so now we are trying out covariant array types, just copy objArrayKlass - * TODO refactor any remaining commonality - * - */ // Eagerly allocate the direct array supertype. Klass* super_klass = nullptr; Klass* element_super = element_klass->super(); @@ -121,7 +126,7 @@ FlatArrayKlass* FlatArrayKlass::allocate_klass(Klass* eklass, TRAPS) { Symbol* name = ArrayKlass::create_element_klass_array_name(element_klass, CHECK_NULL); ClassLoaderData* loader_data = element_klass->class_loader_data(); int size = ArrayKlass::static_size(FlatArrayKlass::header_size()); - FlatArrayKlass* vak = new (loader_data, size, THREAD) FlatArrayKlass(element_klass, name); + FlatArrayKlass* vak = new (loader_data, size, THREAD) FlatArrayKlass(element_klass, name, lk); ModuleEntry* module = vak->module(); assert(module != nullptr, "No module entry for array"); @@ -142,23 +147,22 @@ void FlatArrayKlass::metaspace_pointers_do(MetaspaceClosure* it) { } // Oops allocation... -flatArrayOop FlatArrayKlass::allocate(int length, TRAPS) { +flatArrayOop FlatArrayKlass::allocate(int length, LayoutKind lk, TRAPS) { check_array_allocation_length(length, max_elements(), CHECK_NULL); int size = flatArrayOopDesc::object_size(layout_helper(), length); - return (flatArrayOop) Universe::heap()->array_allocate(this, size, length, true, THREAD); + flatArrayOop array = (flatArrayOop) Universe::heap()->array_allocate(this, size, length, true, CHECK_NULL); + // array->set_layout_kind(lk); + return array; } - oop FlatArrayKlass::multi_allocate(int rank, jint* last_size, TRAPS) { - // For flatArrays this is only called for the last dimension - assert(rank == 1, "just checking"); - int length = *last_size; - return allocate(length, THREAD); + // FlatArrays only have one dimension + ShouldNotReachHere(); } -jint FlatArrayKlass::array_layout_helper(InlineKlass* vk) { - BasicType etype = T_PRIMITIVE_OBJECT; - int esize = log2i_exact(round_up_power_of_2(vk->payload_size_in_bytes())); +jint FlatArrayKlass::array_layout_helper(InlineKlass* vk, LayoutKind lk) { + BasicType etype = T_FLAT_ELEMENT; + int esize = log2i_exact(round_up_power_of_2(vk->layout_size_in_bytes(lk))); int hsize = arrayOopDesc::base_offset_in_bytes(etype); int lh = Klass::array_layout_helper(_lh_array_tag_vt_value, true, hsize, etype, esize); @@ -189,7 +193,7 @@ size_t FlatArrayKlass::oop_size(oop obj) const { jint FlatArrayKlass::max_elements() const { // Check the max number of heap words limit first (because of int32_t in oopDesc_oop_size() etc) size_t max_size = max_jint; - max_size -= (arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT) >> LogHeapWordSize); + max_size -= (arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT) >> LogHeapWordSize); max_size = align_down(max_size, MinObjAlignment); max_size <<= LogHeapWordSize; // convert to max payload size in bytes max_size >>= layout_helper_log2_element_size(_layout_helper); // divide by element size (in bytes) = max elements @@ -216,112 +220,126 @@ void FlatArrayKlass::copy_array(arrayOop s, int src_pos, assert(s->is_objArray() || s->is_flatArray(), "must be obj or flat array"); - // Check destination - if ((!d->is_flatArray()) && (!d->is_objArray())) { - THROW(vmSymbols::java_lang_ArrayStoreException()); - } - - // Check if all offsets and lengths are non negative - if (src_pos < 0 || dst_pos < 0 || length < 0) { - THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); - } - // Check if the ranges are valid - if ( (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) + // Check destination + if ((!d->is_flatArray()) && (!d->is_objArray())) { + THROW(vmSymbols::java_lang_ArrayStoreException()); + } + + // Check if all offsets and lengths are non negative + if (src_pos < 0 || dst_pos < 0 || length < 0) { + THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); + } + // Check if the ranges are valid + if ( (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) || (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length()) ) { - THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); - } - // Check zero copy - if (length == 0) - return; - - ArrayKlass* sk = ArrayKlass::cast(s->klass()); - ArrayKlass* dk = ArrayKlass::cast(d->klass()); - Klass* d_elem_klass = dk->element_klass(); - Klass* s_elem_klass = sk->element_klass(); - /**** CMH: compare and contrast impl, re-factor once we find edge cases... ****/ - - if (sk->is_flatArray_klass()) { - assert(sk == this, "Unexpected call to copy_array"); - // Check subtype, all src homogeneous, so just once - if (!s_elem_klass->is_subtype_of(d_elem_klass)) { - THROW(vmSymbols::java_lang_ArrayStoreException()); - } - - flatArrayOop sa = flatArrayOop(s); - InlineKlass* s_elem_vklass = element_klass(); - - // flatArray-to-flatArray - if (dk->is_flatArray_klass()) { - // element types MUST be exact, subtype check would be dangerous - if (dk != this) { - THROW(vmSymbols::java_lang_ArrayStoreException()); - } - - flatArrayOop da = flatArrayOop(d); - address dst = (address) da->value_at_addr(dst_pos, layout_helper()); - address src = (address) sa->value_at_addr(src_pos, layout_helper()); - if (contains_oops()) { - int elem_incr = 1 << log2_element_size(); - address src_end = src + (length << log2_element_size()); - if (needs_backwards_copy(s, src_pos, d, dst_pos, length)) { - swap(src, src_end); - dst = dst + (length << log2_element_size()); - do { - src -= elem_incr; - dst -= elem_incr; - HeapAccess<>::value_copy(src, dst, s_elem_vklass, LayoutKind::PAYLOAD); // Temporary hack for the transition - } while (src > src_end); - } else { - address src_end = src + (length << log2_element_size()); - while (src < src_end) { - HeapAccess<>::value_copy(src, dst, s_elem_vklass, LayoutKind::PAYLOAD); // Temporary hack for the transition - src += elem_incr; - dst += elem_incr; - } - } - } else { - // we are basically a type array...don't bother limiting element copy - // it would have to be a lot wasted space to be worth value_store() calls, need a setting here ? - Copy::conjoint_memory_atomic(src, dst, (size_t)length << log2_element_size()); - } - } - else { // flatArray-to-objArray - assert(dk->is_objArray_klass(), "Expected objArray here"); - // Need to allocate each new src elem payload -> dst oop - objArrayHandle dh(THREAD, (objArrayOop)d); - flatArrayHandle sh(THREAD, sa); - int dst_end = dst_pos + length; - while (dst_pos < dst_end) { - oop o = flatArrayOopDesc::value_alloc_copy_from_index(sh, src_pos, CHECK); - dh->obj_at_put(dst_pos, o); - dst_pos++; - src_pos++; - } - } - } else { - assert(s->is_objArray(), "Expected objArray"); - objArrayOop sa = objArrayOop(s); - assert(d->is_flatArray(), "Excepted flatArray"); // objArray-to-flatArray - InlineKlass* d_elem_vklass = InlineKlass::cast(d_elem_klass); - flatArrayOop da = flatArrayOop(d); - - int src_end = src_pos + length; - int delem_incr = 1 << dk->log2_element_size(); - address dst = (address) da->value_at_addr(dst_pos, layout_helper()); - while (src_pos < src_end) { - oop se = sa->obj_at(src_pos); - if (se == nullptr) { - THROW(vmSymbols::java_lang_NullPointerException()); - } - // Check exact type per element - if (se->klass() != d_elem_klass) { - THROW(vmSymbols::java_lang_ArrayStoreException()); - } - d_elem_vklass->inline_copy_oop_to_payload(se, dst, LayoutKind::PAYLOAD); // Temporary hack for the transition - dst += delem_incr; - src_pos++; - } - } + THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); + } + // Check zero copy + if (length == 0) + return; + + ArrayKlass* sk = ArrayKlass::cast(s->klass()); + ArrayKlass* dk = ArrayKlass::cast(d->klass()); + Klass* d_elem_klass = dk->element_klass(); + Klass* s_elem_klass = sk->element_klass(); + /**** CMH: compare and contrast impl, re-factor once we find edge cases... ****/ + + if (sk->is_flatArray_klass()) { + assert(sk == this, "Unexpected call to copy_array"); + FlatArrayKlass* fsk = FlatArrayKlass::cast(sk); + // Check subtype, all src homogeneous, so just once + if (!s_elem_klass->is_subtype_of(d_elem_klass)) { + THROW(vmSymbols::java_lang_ArrayStoreException()); + } + + flatArrayOop sa = flatArrayOop(s); + InlineKlass* s_elem_vklass = element_klass(); + + // flatArray-to-flatArray + if (dk->is_flatArray_klass()) { + // element types MUST be exact, subtype check would be dangerous + if (d_elem_klass != this->element_klass()) { + THROW(vmSymbols::java_lang_ArrayStoreException()); + } + + FlatArrayKlass* fdk = FlatArrayKlass::cast(dk); + InlineKlass* vk = InlineKlass::cast(s_elem_klass); + flatArrayOop da = flatArrayOop(d); + int src_incr = fsk->element_byte_size(); + int dst_incr = fdk->element_byte_size(); + + if (fsk->layout_kind() == fdk->layout_kind()) { + assert(src_incr == dst_incr, "Must be"); + if (needs_backwards_copy(sa, src_pos, da, dst_pos, length)) { + address dst = (address) da->value_at_addr(dst_pos + length - 1, fdk->layout_helper()); + address src = (address) sa->value_at_addr(src_pos + length - 1, fsk->layout_helper()); + for (int i = 0; i < length; i++) { + // because source and destination have the same layout, bypassing the InlineKlass copy methods + // and call AccessAPI directly + HeapAccess<>::value_copy(src, dst, vk, fsk->layout_kind()); + dst -= dst_incr; + src -= src_incr; + } + } else { + // source and destination share same layout, direct copy from array to array is possible + address dst = (address) da->value_at_addr(dst_pos, fdk->layout_helper()); + address src = (address) sa->value_at_addr(src_pos, fsk->layout_helper()); + for (int i = 0; i < length; i++) { + // because source and destination have the same layout, bypassing the InlineKlass copy methods + // and call AccessAPI directly + HeapAccess<>::value_copy(src, dst, vk, fsk->layout_kind()); + dst += dst_incr; + src += src_incr; + } + } + } else { + flatArrayHandle hd(THREAD, da); + flatArrayHandle hs(THREAD, sa); + // source and destination layouts mismatch, simpler solution is to copy through an intermediate buffer (heap instance) + bool need_null_check = fsk->layout_kind() == NULLABLE_ATOMIC_FLAT && fdk->layout_kind() != NULLABLE_ATOMIC_FLAT; + oop buffer = vk->allocate_instance(CHECK); + address dst = (address) hd->value_at_addr(dst_pos, fdk->layout_helper()); + address src = (address) hs->value_at_addr(src_pos, fsk->layout_helper()); + for (int i = 0; i < length; i++) { + if (need_null_check) { + if ( *(jboolean*)(src + vk->null_marker_offset_in_payload()) == 0) { + THROW(vmSymbols::java_lang_NullPointerException()); + } + } + vk->copy_payload_to_addr(src, vk->data_for_oop(buffer), fsk->layout_kind(), true); + if (vk->has_nullable_atomic_layout()) { + // Setting null marker to not zero for non-nullable source layouts + vk->mark_payload_as_non_null(vk->data_for_oop(buffer)); + } + vk->copy_payload_to_addr(vk->data_for_oop(buffer), dst, fdk->layout_kind(), true); + dst += dst_incr; + src += src_incr; + } + } + } else { // flatArray-to-objArray + assert(dk->is_objArray_klass(), "Expected objArray here"); + // Need to allocate each new src elem payload -> dst oop + objArrayHandle dh(THREAD, (objArrayOop)d); + flatArrayHandle sh(THREAD, sa); + InlineKlass* vk = InlineKlass::cast(s_elem_klass); + for (int i = 0; i < length; i++) { + oop o = sh->read_value_from_flat_array(src_pos + i, CHECK); + dh->obj_at_put(dst_pos + i, o); + } + } + } else { + assert(s->is_objArray(), "Expected objArray"); + objArrayOop sa = objArrayOop(s); + assert(d->is_flatArray(), "Expected flatArray"); // objArray-to-flatArray + InlineKlass* d_elem_vklass = InlineKlass::cast(d_elem_klass); + flatArrayOop da = flatArrayOop(d); + FlatArrayKlass* fdk = FlatArrayKlass::cast(da->klass()); + InlineKlass* vk = InlineKlass::cast(d_elem_klass); + + for (int i = 0; i < length; i++) { + da->write_value_to_flat_array(sa->obj_at(src_pos + i), dst_pos + i, CHECK); + } + } } ModuleEntry* FlatArrayKlass::module() const { diff --git a/src/hotspot/share/oops/flatArrayKlass.hpp b/src/hotspot/share/oops/flatArrayKlass.hpp index 786f51eeb15..2528c8624eb 100644 --- a/src/hotspot/share/oops/flatArrayKlass.hpp +++ b/src/hotspot/share/oops/flatArrayKlass.hpp @@ -41,14 +41,19 @@ class FlatArrayKlass : public ArrayKlass { private: // Constructor - FlatArrayKlass(Klass* element_klass, Symbol* name); + FlatArrayKlass(Klass* element_klass, Symbol* name, LayoutKind lk); + + LayoutKind _layout_kind; public: - FlatArrayKlass() {} + FlatArrayKlass() {} // Why? + + InlineKlass* element_klass() const { return InlineKlass::cast(_element_klass); } + void set_element_klass(Klass* k) { _element_klass = k; } - virtual InlineKlass* element_klass() const; - virtual void set_element_klass(Klass* k); + LayoutKind layout_kind() const { return _layout_kind; } + void set_layout_kind(LayoutKind lk) { _layout_kind = lk; } // Casting from Klass* static FlatArrayKlass* cast(Klass* k) { @@ -57,7 +62,7 @@ class FlatArrayKlass : public ArrayKlass { } // klass allocation - static FlatArrayKlass* allocate_klass(Klass* element_klass, TRAPS); + static FlatArrayKlass* allocate_klass(Klass* element_klass, LayoutKind lk, TRAPS); void initialize(TRAPS); @@ -85,7 +90,7 @@ class FlatArrayKlass : public ArrayKlass { virtual void metaspace_pointers_do(MetaspaceClosure* iter); - static jint array_layout_helper(InlineKlass* vklass); // layout helper for values + static jint array_layout_helper(InlineKlass* vklass, LayoutKind lk); // layout helper for values // sizing static int header_size() { return sizeof(FlatArrayKlass)/HeapWordSize; } @@ -96,7 +101,7 @@ class FlatArrayKlass : public ArrayKlass { size_t oop_size(oop obj) const; // Oop Allocation - flatArrayOop allocate(int length, TRAPS); + flatArrayOop allocate(int length, LayoutKind lk, TRAPS); oop multi_allocate(int rank, jint* sizes, TRAPS); // Naming diff --git a/src/hotspot/share/oops/flatArrayOop.hpp b/src/hotspot/share/oops/flatArrayOop.hpp index 7f17b8a0fe2..32874df1380 100644 --- a/src/hotspot/share/oops/flatArrayOop.hpp +++ b/src/hotspot/share/oops/flatArrayOop.hpp @@ -39,10 +39,8 @@ class flatArrayOopDesc : public arrayOopDesc { void* base() const; void* value_at_addr(int index, jint lh) const; - // Return a buffered element from index - static oop value_alloc_copy_from_index(flatArrayHandle vah, int index, TRAPS); - void value_copy_from_index(int index, oop dst, LayoutKind lk) const; - void value_copy_to_index(oop src, int index, LayoutKind lk) const; + inline oop read_value_from_flat_array( int index, TRAPS); + inline void write_value_to_flat_array(oop value, int index, TRAPS); // Sizing static size_t element_size(int lh, int nof_elements) { diff --git a/src/hotspot/share/oops/flatArrayOop.inline.hpp b/src/hotspot/share/oops/flatArrayOop.inline.hpp index 0336194d0af..cc432a1c989 100644 --- a/src/hotspot/share/oops/flatArrayOop.inline.hpp +++ b/src/hotspot/share/oops/flatArrayOop.inline.hpp @@ -31,7 +31,7 @@ #include "oops/oop.inline.hpp" #include "runtime/globals.hpp" -inline void* flatArrayOopDesc::base() const { return arrayOopDesc::base(T_PRIMITIVE_OBJECT); } +inline void* flatArrayOopDesc::base() const { return arrayOopDesc::base(T_FLAT_ELEMENT); } inline void* flatArrayOopDesc::value_at_addr(int index, jint lh) const { assert(is_within_bounds(index), "index out of bounds"); @@ -45,37 +45,25 @@ inline int flatArrayOopDesc::object_size() const { return object_size(klass()->layout_helper(), length()); } -inline oop flatArrayOopDesc::value_alloc_copy_from_index(flatArrayHandle vah, int index, TRAPS) { - FlatArrayKlass* vaklass = FlatArrayKlass::cast(vah->klass()); - InlineKlass* vklass = vaklass->element_klass(); - assert(vklass->is_initialized(), "Should be"); - if (vklass->is_empty_inline_type()) { - return vklass->default_value(); - } else { - oop buf = vklass->allocate_instance_buffer(CHECK_NULL); - vklass->inline_copy_payload_to_new_oop(vah->value_at_addr(index, vaklass->layout_helper()), - buf, LayoutKind::PAYLOAD); // temporary hack for the transition - return buf; - } -} - -inline void flatArrayOopDesc::value_copy_from_index(int index, oop dst, LayoutKind lk) const { - FlatArrayKlass* vaklass = FlatArrayKlass::cast(klass()); - InlineKlass* vklass = vaklass->element_klass(); - void* src = value_at_addr(index, vaklass->layout_helper()); - return vklass->inline_copy_payload_to_new_oop(src, dst, lk); +inline oop flatArrayOopDesc::read_value_from_flat_array(int index, TRAPS) { + // This method assumes that the validity of the index has already been checked + FlatArrayKlass* faklass = FlatArrayKlass::cast(klass()); + InlineKlass* vk = InlineKlass::cast(faklass->element_klass()); + int offset = ((char*)value_at_addr(index, faklass->layout_helper())) - ((char*)(oopDesc*)this); + oop res = vk->read_payload_from_addr(this, offset, faklass->layout_kind(), CHECK_NULL); + return res; } -inline void flatArrayOopDesc::value_copy_to_index(oop src, int index, LayoutKind lk) const { - FlatArrayKlass* vaklass = FlatArrayKlass::cast(klass()); - InlineKlass* vklass = vaklass->element_klass(); - if (vklass->is_empty_inline_type()) { - return; +inline void flatArrayOopDesc::write_value_to_flat_array(oop value, int index, TRAPS) { + // This method assumes that the validity of the index has already been checked + FlatArrayKlass* faklass = FlatArrayKlass::cast(klass()); + InlineKlass* vk = InlineKlass::cast(faklass->element_klass()); + if (value != nullptr) { + if (value->klass() != vk) { + THROW(vmSymbols::java_lang_ArrayStoreException()); + } } - void* dst = value_at_addr(index, vaklass->layout_helper()); - vklass->inline_copy_oop_to_payload(src, dst, lk); + vk->write_value_to_addr(value, value_at_addr(index, faklass->layout_helper()), faklass->layout_kind(), true, CHECK); } - - #endif // SHARE_VM_OOPS_FLATARRAYOOP_INLINE_HPP diff --git a/src/hotspot/share/oops/inlineKlass.cpp b/src/hotspot/share/oops/inlineKlass.cpp index dd27cc3c924..046b28cfaf5 100644 --- a/src/hotspot/share/oops/inlineKlass.cpp +++ b/src/hotspot/share/oops/inlineKlass.cpp @@ -140,8 +140,8 @@ int InlineKlass::layout_size_in_bytes(LayoutKind kind) const { return atomic_size_in_bytes(); break; case LayoutKind::NULLABLE_ATOMIC_FLAT: - assert(has_nullable_layout(), "Layout not available"); - return nullable_size_in_bytes(); + assert(has_nullable_atomic_layout(), "Layout not available"); + return nullable_atomic_size_in_bytes(); break; case PAYLOAD: return payload_size_in_bytes(); @@ -162,8 +162,8 @@ int InlineKlass::layout_alignment(LayoutKind kind) const { return atomic_size_in_bytes(); break; case LayoutKind::NULLABLE_ATOMIC_FLAT: - assert(has_nullable_layout(), "Layout not available"); - return nullable_size_in_bytes(); + assert(has_nullable_atomic_layout(), "Layout not available"); + return nullable_atomic_size_in_bytes(); break; case LayoutKind::PAYLOAD: return payload_alignment(); @@ -173,36 +173,113 @@ int InlineKlass::layout_alignment(LayoutKind kind) const { } } -oop InlineKlass::read_flat_field(oop obj, int offset, LayoutKind lk, TRAPS) { +bool InlineKlass::is_layout_supported(LayoutKind lk) { + switch(lk) { + case LayoutKind::NON_ATOMIC_FLAT: + return has_non_atomic_layout(); + break; + case LayoutKind::ATOMIC_FLAT: + return has_atomic_layout(); + break; + case LayoutKind::NULLABLE_ATOMIC_FLAT: + return has_nullable_atomic_layout(); + break; + case LayoutKind::PAYLOAD: + return true; + break; + default: + ShouldNotReachHere(); + } +} - if (lk == LayoutKind::NULLABLE_ATOMIC_FLAT) { - InstanceKlass* recv = InstanceKlass::cast(obj->klass()); - int nm_offset = offset + (null_marker_offset() - first_field_offset()); - jbyte nm = obj->byte_field(nm_offset); - if (nm == 0) { - return nullptr; +void InlineKlass::copy_payload_to_addr(void* src, void* dst, LayoutKind lk, bool dest_is_initialized) { + assert(is_layout_supported(lk), "Unsupported layout"); + assert(lk != LayoutKind::REFERENCE && lk != LayoutKind::UNKNOWN, "Sanity check"); + switch(lk) { + case NULLABLE_ATOMIC_FLAT: { + if (is_payload_marked_as_null((address)src)) { + if (!contains_oops()) { + mark_payload_as_null((address)dst); + return; + } + // copy null_reset value to dest + if (dest_is_initialized) { + HeapAccess<>::value_copy(data_for_oop(null_reset_value()), dst, this, lk); + } else { + HeapAccess::value_copy(data_for_oop(null_reset_value()), dst, this, lk); + } + } else { + // Copy has to be performed, even if this is an empty value, because of the null marker + mark_payload_as_non_null((address)src); + if (dest_is_initialized) { + HeapAccess<>::value_copy(src, dst, this, lk); + } else { + HeapAccess::value_copy(src, dst, this, lk); + } + } + } + break; + case PAYLOAD: + case ATOMIC_FLAT: + case NON_ATOMIC_FLAT: { + if (is_empty_inline_type()) return; // nothing to do + if (dest_is_initialized) { + HeapAccess<>::value_copy(src, dst, this, lk); + } else { + HeapAccess::value_copy(src, dst, this, lk); + } } + break; + default: + ShouldNotReachHere(); } - oop res = nullptr; - assert(is_initialized() || is_being_initialized()|| is_in_error_state(), - "Must be initialized, initializing or in a corner case of an escaped instance of a class that failed its initialization"); - if (is_empty_inline_type()) { - res = (instanceOop)default_value(); - } else { - Handle obj_h(THREAD, obj); - res = allocate_instance_buffer(CHECK_NULL); - inline_copy_payload_to_new_oop(((char*)(oopDesc*)obj_h()) + offset, res, lk); +} + +oop InlineKlass::read_payload_from_addr(oop src, int offset, LayoutKind lk, TRAPS) { + assert(src != nullptr, "Must be"); + assert(is_layout_supported(lk), "Unsupported layout"); + switch(lk) { + case NULLABLE_ATOMIC_FLAT: { + if (is_payload_marked_as_null((address)((char*)(oopDesc*)src + offset))) { + return nullptr; + } + } // Fallthrough + case PAYLOAD: + case ATOMIC_FLAT: + case NON_ATOMIC_FLAT: { + if (is_empty_inline_type()) { + return default_value(); + } + Handle obj_h(THREAD, src); + oop res = allocate_instance_buffer(CHECK_NULL); + copy_payload_to_addr((void*)((char*)(oopDesc*)obj_h() + offset), data_for_oop(res), lk, false); + if (lk == NULLABLE_ATOMIC_FLAT) { + if(is_payload_marked_as_null(data_for_oop(res))) { + return nullptr; + } + } + return res; + } + break; + default: + ShouldNotReachHere(); } - assert(res != nullptr, "Must be set in one of two paths above"); - return res; } -void InlineKlass::write_flat_field(oop obj, int offset, oop value, bool is_null_free, LayoutKind lk, TRAPS) { - if (is_null_free && value == nullptr) { - THROW(vmSymbols::java_lang_NullPointerException()); +void InlineKlass::write_value_to_addr(oop src, void* dst, LayoutKind lk, bool dest_is_initialized, TRAPS) { + void* src_addr = nullptr; + if (src == nullptr) { + if (lk != NULLABLE_ATOMIC_FLAT) { + THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Value is null"); + } + src_addr = data_for_oop(null_reset_value()); + } else { + src_addr = data_for_oop(src); + if (lk == NULLABLE_ATOMIC_FLAT) { + mark_payload_as_non_null((address)src_addr); + } } - assert(!is_null_free || (lk == LayoutKind::ATOMIC_FLAT || lk == LayoutKind::NON_ATOMIC_FLAT || lk == LayoutKind::REFERENCE || lk == LayoutKind::PAYLOAD), "Consistency check"); - inline_copy_oop_to_payload(value, ((char*)(oopDesc*)obj) + offset, lk); + copy_payload_to_addr(src_addr, dst, lk, dest_is_initialized); } // Arrays of... @@ -231,44 +308,77 @@ bool InlineKlass::flat_array() { return true; } -Klass* InlineKlass::value_array_klass(int n, TRAPS) { - if (Atomic::load_acquire(adr_value_array_klasses()) == nullptr) { +ObjArrayKlass* InlineKlass::null_free_reference_array(TRAPS) { + if (Atomic::load_acquire(adr_null_free_reference_array_klass()) == nullptr) { // Atomic creation of array_klasses RecursiveLocker rl(MultiArray_lock, THREAD); // Check if update has already taken place - if (value_array_klasses() == nullptr) { - ArrayKlass* k; - if (flat_array()) { - k = FlatArrayKlass::allocate_klass(this, CHECK_NULL); - } else { - k = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, true, CHECK_NULL); + if (null_free_reference_array_klass() == nullptr) { + ObjArrayKlass* k = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, true, CHECK_NULL); - } // use 'release' to pair with lock-free load - Atomic::release_store(adr_value_array_klasses(), k); + Atomic::release_store(adr_null_free_reference_array_klass(), k); } } - ArrayKlass* ak = value_array_klasses(); - return ak->array_klass(n, THREAD); + return null_free_reference_array_klass(); } -Klass* InlineKlass::value_array_klass_or_null(int n) { - // Need load-acquire for lock-free read - ArrayKlass* ak = Atomic::load_acquire(adr_value_array_klasses()); - if (ak == nullptr) { - return nullptr; - } else { - return ak->array_klass_or_null(n); + +// There's no reason for this method to have a TRAP argument +FlatArrayKlass* InlineKlass::flat_array_klass(LayoutKind lk, TRAPS) { + FlatArrayKlass* volatile* adr_flat_array_klass = nullptr; + switch(lk) { + case NON_ATOMIC_FLAT: + assert(has_non_atomic_layout(), "Must be"); + adr_flat_array_klass = adr_non_atomic_flat_array_klass(); + break; + case ATOMIC_FLAT: + assert(has_atomic_layout(), "Must be"); + adr_flat_array_klass = adr_atomic_flat_array_klass(); + break; + case NULLABLE_ATOMIC_FLAT: + assert(has_nullable_atomic_layout(), "Must be"); + adr_flat_array_klass = adr_nullable_atomic_flat_array_klass(); + break; + default: + ShouldNotReachHere(); } -} -Klass* InlineKlass::value_array_klass(TRAPS) { - return value_array_klass(1, THREAD); + if (Atomic::load_acquire(adr_flat_array_klass) == nullptr) { + // Atomic creation of array_klasses + RecursiveLocker rl(MultiArray_lock, THREAD); + + if (*adr_flat_array_klass == nullptr) { + FlatArrayKlass* k = FlatArrayKlass::allocate_klass(this, lk, CHECK_NULL); + Atomic::release_store(adr_flat_array_klass, k); + } + } + return *adr_flat_array_klass; } -Klass* InlineKlass::value_array_klass_or_null() { - return value_array_klass_or_null(1); +FlatArrayKlass* InlineKlass::flat_array_klass_or_null(LayoutKind lk) { + FlatArrayKlass* volatile* adr_flat_array_klass = nullptr; + switch(lk) { + case NON_ATOMIC_FLAT: + assert(has_non_atomic_layout(), "Must be"); + adr_flat_array_klass = adr_non_atomic_flat_array_klass(); + break; + case ATOMIC_FLAT: + assert(has_atomic_layout(), "Must be"); + adr_flat_array_klass = adr_atomic_flat_array_klass(); + break; + case NULLABLE_ATOMIC_FLAT: + assert(has_nullable_atomic_layout(), "Must be"); + adr_flat_array_klass = adr_nullable_atomic_flat_array_klass(); + break; + default: + ShouldNotReachHere(); + } + + // Need load-acquire for lock-free read + FlatArrayKlass* k = Atomic::load_acquire(adr_flat_array_klass); + return k; } // Inline type arguments are not passed by reference, instead each diff --git a/src/hotspot/share/oops/inlineKlass.hpp b/src/hotspot/share/oops/inlineKlass.hpp index 82d675bb6a6..e85e044a7af 100644 --- a/src/hotspot/share/oops/inlineKlass.hpp +++ b/src/hotspot/share/oops/inlineKlass.hpp @@ -87,13 +87,49 @@ class InlineKlass: public InstanceKlass { ArrayKlass* volatile* adr_value_array_klasses() const { assert(_adr_inlineklass_fixed_block != nullptr, "Should have been initialized"); - return (ArrayKlass* volatile*) ((address)_adr_inlineklass_fixed_block) + in_bytes(byte_offset_of(InlineKlassFixedBlock, _null_free_inline_array_klasses)); + return (ArrayKlass* volatile*) ((address)_adr_inlineklass_fixed_block) + in_bytes(byte_offset_of(InlineKlassFixedBlock, _value_array_klasses)); } ArrayKlass* value_array_klasses() const { return *adr_value_array_klasses(); } + FlatArrayKlass* volatile* adr_non_atomic_flat_array_klass() const { + assert(_adr_inlineklass_fixed_block != nullptr, "Should have been initialized"); + return (FlatArrayKlass* volatile*) ((address)_adr_inlineklass_fixed_block) + in_bytes(byte_offset_of(InlineKlassFixedBlock, _non_atomic_flat_array_klass)); + } + + FlatArrayKlass* non_atomic_flat_array_klass() const { + return *adr_non_atomic_flat_array_klass(); + } + + FlatArrayKlass* volatile* adr_atomic_flat_array_klass() const { + assert(_adr_inlineklass_fixed_block != nullptr, "Should have been initialized"); + return (FlatArrayKlass* volatile*) ((address)_adr_inlineklass_fixed_block) + in_bytes(byte_offset_of(InlineKlassFixedBlock, _atomic_flat_array_klass)); + } + + FlatArrayKlass* atomic_flat_array_klass() const { + return *adr_atomic_flat_array_klass(); + } + + FlatArrayKlass* volatile* adr_nullable_atomic_flat_array_klass() const { + assert(_adr_inlineklass_fixed_block != nullptr, "Should have been initialized"); + return (FlatArrayKlass* volatile*) ((address)_adr_inlineklass_fixed_block) + in_bytes(byte_offset_of(InlineKlassFixedBlock, _nullable_atomic_flat_array_klass)); + } + + FlatArrayKlass* nullable_atomic_flat_array_klass() const { + return *adr_nullable_atomic_flat_array_klass(); + } + + ObjArrayKlass* volatile* adr_null_free_reference_array_klass() const { + assert(_adr_inlineklass_fixed_block != nullptr, "Should have been initialized"); + return (ObjArrayKlass* volatile*) ((address)_adr_inlineklass_fixed_block) + in_bytes(byte_offset_of(InlineKlassFixedBlock, _null_free_reference_array_klass)); + } + + ObjArrayKlass* null_free_reference_array_klass() const { + return *adr_null_free_reference_array_klass(); + } + address adr_first_field_offset() const { assert(_adr_inlineklass_fixed_block != nullptr, "Should have been initialized"); return ((address)_adr_inlineklass_fixed_block) + in_bytes(byte_offset_of(InlineKlassFixedBlock, _first_field_offset)); @@ -124,7 +160,7 @@ class InlineKlass: public InstanceKlass { return ((address)_adr_inlineklass_fixed_block) + in_bytes(byte_offset_of(InlineKlassFixedBlock, _atomic_size_in_bytes)); } - address adr_nullable_size_in_bytes() const { + address adr_nullable_atomic_size_in_bytes() const { assert(_adr_inlineklass_fixed_block != nullptr, "Should have been initialized"); return ((address)_adr_inlineklass_fixed_block) + in_bytes(byte_offset_of(InlineKlassFixedBlock, _nullable_size_in_bytes)); } @@ -163,12 +199,30 @@ class InlineKlass: public InstanceKlass { int atomic_size_in_bytes() const { return *(int*)adr_atomic_size_in_bytes(); } void set_atomic_size_in_bytes(int size) { *(int*)adr_atomic_size_in_bytes() = size; } - bool has_nullable_layout() const { return nullable_size_in_bytes() != -1; } - int nullable_size_in_bytes() const { return *(int*)adr_nullable_size_in_bytes(); } - void set_nullable_size_in_bytes(int size) { *(int*)adr_nullable_size_in_bytes() = size; } + bool has_nullable_atomic_layout() const { return nullable_atomic_size_in_bytes() != -1; } + int nullable_atomic_size_in_bytes() const { return *(int*)adr_nullable_atomic_size_in_bytes(); } + void set_nullable_size_in_bytes(int size) { *(int*)adr_nullable_atomic_size_in_bytes() = size; } int null_marker_offset() const { return *(int*)adr_null_marker_offset(); } + int null_marker_offset_in_payload() const { return null_marker_offset() - first_field_offset(); } void set_null_marker_offset(int offset) { *(int*)adr_null_marker_offset() = offset; } + bool is_payload_marked_as_null(address payload) { + assert(has_nullable_atomic_layout(), " Must have"); + return *((jbyte*)payload + null_marker_offset_in_payload()) == 0; + } + + void mark_payload_as_non_null(address payload) { + assert(has_nullable_atomic_layout(), " Must have"); + *((jbyte*)payload + null_marker_offset_in_payload()) = 1; + } + + void mark_payload_as_null(address payload) { + assert(has_nullable_atomic_layout(), " Must have"); + *((jbyte*)payload + null_marker_offset_in_payload()) = 0; + } + + bool is_layout_supported(LayoutKind lk); + int layout_alignment(LayoutKind kind) const; int layout_size_in_bytes(LayoutKind kind) const; @@ -222,30 +276,19 @@ class InlineKlass: public InstanceKlass { // null free inline arrays... // - // null free inline array klass, akin to InstanceKlass::array_klass() - // Returns the array class for the n'th dimension - Klass* value_array_klass(int n, TRAPS); - Klass* value_array_klass_or_null(int n); - - // Returns the array class with this class as element type - Klass* value_array_klass(TRAPS); - Klass* value_array_klass_or_null(); - - - // General store methods - // - // Normally loads and store methods would be found in *Oops classes, but since values can be - // "in-lined" (flat layout) into containing oops, these methods reside here in InlineKlass. - // - // "inline_copy_*_to_new_*" assume new memory (i.e. IS_DEST_UNINITIALIZED for write barriers) + FlatArrayKlass* flat_array_klass(LayoutKind lk, TRAPS); + FlatArrayKlass* flat_array_klass_or_null(LayoutKind lk); + ObjArrayKlass* null_free_reference_array(TRAPS); - void inline_copy_payload_to_new_oop(void* src, oop dst, LayoutKind lk); - void inline_copy_oop_to_new_oop(oop src, oop dst, LayoutKind lk); - void inline_copy_oop_to_new_payload(oop src, void* dst, LayoutKind lk); - void inline_copy_oop_to_payload(oop src, void* dst, LayoutKind lk); + // Methods to copy payload between containers + // Methods taking a LayoutKind argument expect that both the source and the destination + // layouts are compatible with the one specified in argument (alignment, size, presence + // of a null marker). Reminder: the PAYLOAD layout, used in values buffered in heap, + // is compatible with all the other layouts. - oop read_flat_field(oop obj, int offset, LayoutKind lk, TRAPS); - void write_flat_field(oop obj, int offset, oop value, bool is_null_free, LayoutKind lk, TRAPS); + void write_value_to_addr(oop src, void* dst, LayoutKind lk, bool dest_is_initialized, TRAPS); + oop read_payload_from_addr(oop src, int offset, LayoutKind lk, TRAPS); + void copy_payload_to_addr(void* src, void* dst, LayoutKind lk, bool dest_is_initialized); // oop iterate raw inline type data pointer (where oop_addr may not be an oop, but backing/array-element) template diff --git a/src/hotspot/share/oops/inlineKlass.inline.hpp b/src/hotspot/share/oops/inlineKlass.inline.hpp index c9cb9331dda..c0201f8e7e5 100644 --- a/src/hotspot/share/oops/inlineKlass.inline.hpp +++ b/src/hotspot/share/oops/inlineKlass.inline.hpp @@ -24,8 +24,11 @@ #ifndef SHARE_VM_OOPS_INLINEKLASS_INLINE_HPP #define SHARE_VM_OOPS_INLINEKLASS_INLINE_HPP +#include "classfile/vmSymbols.hpp" #include "memory/iterator.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "oops/flatArrayKlass.hpp" +#include "oops/flatArrayOop.hpp" #include "oops/inlineKlass.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/oop.inline.hpp" @@ -55,23 +58,6 @@ inline address InlineKlass::data_for_oop(oop o) const { return ((address) (void*) o) + first_field_offset(); } -inline void InlineKlass::inline_copy_payload_to_new_oop(void* src, oop dst, LayoutKind lk) { - HeapAccess::value_copy(src, data_for_oop(dst), this, lk); -} - -inline void InlineKlass::inline_copy_oop_to_new_oop(oop src, oop dst, LayoutKind lk) { - HeapAccess::value_copy(data_for_oop(src), data_for_oop(dst), this, lk); -} - -inline void InlineKlass::inline_copy_oop_to_new_payload(oop src, void* dst, LayoutKind lk) { - HeapAccess::value_copy(data_for_oop(src), dst, this, lk); -} - -inline void InlineKlass::inline_copy_oop_to_payload(oop src, void* dst, LayoutKind lk) { - HeapAccess<>::value_copy(data_for_oop(src), dst, this, lk); -} - - template void InlineKlass::oop_iterate_specialized(const address oop_addr, OopClosureType* closure) { OopMapBlock* map = start_of_nonstatic_oop_maps(); diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index ab5c52adae6..d18c7a4d8e5 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -1357,7 +1357,7 @@ void InstanceKlass::initialize_impl(TRAPS) { THROW_OOP(e()); } vk->set_default_value(val); - if (vk->has_nullable_layout()) { + if (vk->has_nullable_atomic_layout()) { val = vk->allocate_instance(THREAD); if (HAS_PENDING_EXCEPTION) { Handle e(THREAD, PENDING_EXCEPTION); diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp index 9a01a4b192c..1bb6135e6b3 100644 --- a/src/hotspot/share/oops/instanceKlass.hpp +++ b/src/hotspot/share/oops/instanceKlass.hpp @@ -146,7 +146,11 @@ class InlineKlassFixedBlock { address* _unpack_handler; int* _default_value_offset; int* _null_reset_value_offset; - ArrayKlass** _null_free_inline_array_klasses; + ArrayKlass** _value_array_klasses; // To be removed? + FlatArrayKlass* _non_atomic_flat_array_klass; + FlatArrayKlass* _atomic_flat_array_klass; + FlatArrayKlass* _nullable_atomic_flat_array_klass; + ObjArrayKlass* _null_free_reference_array_klass; int _first_field_offset; int _payload_size_in_bytes; // size of payload layout int _payload_alignment; // alignment required for payload @@ -154,7 +158,8 @@ class InlineKlassFixedBlock { int _non_atomic_alignment; // alignment requirement for null-free non-atomic layout int _atomic_size_in_bytes; // size and alignment requirement for a null-free atomic layout, -1 if no atomic flat layout is possible int _nullable_size_in_bytes; // size and alignment requirement for a nullable layout (always atomic), -1 if no nullable flat layout is possible - int _null_marker_offset; + int _null_marker_offset; // expressed as an offset from the beginning of the object for a heap buffered value + // first_field_offset must be subtracted to get the offset from the beginning of the payload friend class InlineKlass; }; diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp index 090af13f603..64668e057e5 100644 --- a/src/hotspot/share/oops/klass.hpp +++ b/src/hotspot/share/oops/klass.hpp @@ -499,7 +499,7 @@ class Klass : public Metadata { static BasicType layout_helper_element_type(jint lh) { assert(lh < (jint)_lh_neutral_value, "must be array"); int btvalue = (lh >> _lh_element_type_shift) & _lh_element_type_mask; - assert((btvalue >= T_BOOLEAN && btvalue <= T_OBJECT) || btvalue == T_PRIMITIVE_OBJECT, "sanity"); + assert((btvalue >= T_BOOLEAN && btvalue <= T_OBJECT) || btvalue == T_FLAT_ELEMENT, "sanity"); return (BasicType) btvalue; } @@ -520,7 +520,7 @@ class Klass : public Metadata { static int layout_helper_log2_element_size(jint lh) { assert(lh < (jint)_lh_neutral_value, "must be array"); int l2esz = (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask; - assert(layout_helper_element_type(lh) == T_PRIMITIVE_OBJECT || l2esz <= LogBytesPerLong, + assert(layout_helper_element_type(lh) == T_FLAT_ELEMENT || l2esz <= LogBytesPerLong, "sanity. l2esz: 0x%x for lh: 0x%x", (uint)l2esz, (uint)lh); return l2esz; } diff --git a/src/hotspot/share/oops/layoutKind.hpp b/src/hotspot/share/oops/layoutKind.hpp new file mode 100644 index 00000000000..c5e39226102 --- /dev/null +++ b/src/hotspot/share/oops/layoutKind.hpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_OOPS_LAYOUTKIND_HPP +#define SHARE_OOPS_LAYOUTKIND_HPP + +enum LayoutKind { + REFERENCE = 0, // indirection to a heap allocated instance + PAYLOAD = 1, // layout used in heap allocated standalone instances, probably temporary for the transition + NON_ATOMIC_FLAT = 2, // flat, no guarantee of atomic updates, no null marker + ATOMIC_FLAT = 3, // flat, size compatible with atomic updates, alignment requirement is equal to the size + NULLABLE_ATOMIC_FLAT = 4, // flat, include a null marker, plus same properties as ATOMIC layout + UNKNOWN = 5 // used for uninitialized fields of type LayoutKind +}; + +#endif // SHARE_OOPS_LAYOUTKIND_HPP \ No newline at end of file diff --git a/src/hotspot/share/oops/markWord.cpp b/src/hotspot/share/oops/markWord.cpp index 2bbec570fa8..ee550bc6a0c 100644 --- a/src/hotspot/share/oops/markWord.cpp +++ b/src/hotspot/share/oops/markWord.cpp @@ -94,3 +94,17 @@ void markWord::print_on(outputStream* st, bool print_monitor_info) const { st->print(" age=%d)", age()); } } + +markWord markWord::flat_array_prototype(LayoutKind lk) { + switch(lk) { + case ATOMIC_FLAT: + case NON_ATOMIC_FLAT: + return markWord(null_free_flat_array_pattern); + break; + case NULLABLE_ATOMIC_FLAT: + return markWord(nullable_flat_array_pattern); + break; + default: + ShouldNotReachHere(); + } +} diff --git a/src/hotspot/share/oops/markWord.hpp b/src/hotspot/share/oops/markWord.hpp index e4a69819fef..882a4bea480 100644 --- a/src/hotspot/share/oops/markWord.hpp +++ b/src/hotspot/share/oops/markWord.hpp @@ -26,8 +26,10 @@ #define SHARE_OOPS_MARKWORD_HPP #include "metaprogramming/primitiveConversions.hpp" +#include "layoutKind.hpp" #include "oops/oopsHierarchy.hpp" #include "runtime/globals.hpp" +#include "utilities/vmEnums.hpp" #include @@ -218,7 +220,9 @@ class markWord { static const uintptr_t inline_type_pattern = inline_type_bit_in_place | unlocked_value; static const uintptr_t null_free_array_pattern = null_free_array_bit_in_place | unlocked_value; - static const uintptr_t flat_array_pattern = flat_array_bit_in_place | null_free_array_pattern; + static const uintptr_t null_free_flat_array_pattern = flat_array_bit_in_place | null_free_array_pattern; + static const uintptr_t nullable_flat_array_pattern = flat_array_bit_in_place; + // Has static klass prototype, used for decode/encode pointer static const uintptr_t static_prototype_mask = LP64_ONLY(right_n_bits(inline_type_bits + flat_array_bits + null_free_array_bits)) NOT_LP64(right_n_bits(inline_type_bits)); static const uintptr_t static_prototype_mask_in_place = static_prototype_mask << lock_bits; @@ -382,7 +386,8 @@ class markWord { #ifdef _LP64 // 64 bit encodings only bool is_flat_array() const { - return (mask_bits(value(), flat_array_mask_in_place) == flat_array_pattern); + return (mask_bits(value(), flat_array_mask_in_place) == null_free_flat_array_pattern) + || (mask_bits(value(), flat_array_mask_in_place) == nullable_flat_array_pattern); } bool is_null_free_array() const { @@ -409,9 +414,7 @@ class markWord { } #ifdef _LP64 // 64 bit encodings only - static markWord flat_array_prototype() { - return markWord(flat_array_pattern); - } + static markWord flat_array_prototype(LayoutKind lk); static markWord null_free_array_prototype() { return markWord(null_free_array_pattern); diff --git a/src/hotspot/share/oops/objArrayKlass.cpp b/src/hotspot/share/oops/objArrayKlass.cpp index 5b4ba8d741d..d58c6d7326d 100644 --- a/src/hotspot/share/oops/objArrayKlass.cpp +++ b/src/hotspot/share/oops/objArrayKlass.cpp @@ -162,7 +162,6 @@ objArrayOop ObjArrayKlass::allocate(int length, TRAPS) { assert(dimension() == 1, "Can only populate the final dimension"); assert(element_klass()->is_inline_klass(), "Unexpected"); assert(!element_klass()->is_array_klass(), "ArrayKlass unexpected here"); - assert(!InlineKlass::cast(element_klass())->flat_array(), "Expected flatArrayOop allocation"); element_klass()->initialize(CHECK_NULL); // Populate default values... instanceOop value = (instanceOop) InlineKlass::cast(element_klass())->default_value(); diff --git a/src/hotspot/share/opto/parse2.cpp b/src/hotspot/share/opto/parse2.cpp index 35ca063cb76..22c4ef86514 100644 --- a/src/hotspot/share/opto/parse2.cpp +++ b/src/hotspot/share/opto/parse2.cpp @@ -305,11 +305,21 @@ void Parse::array_store(BasicType bt) { // ordered with other unknown and known flat array accesses. insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES)); - make_runtime_call(RC_LEAF, - OptoRuntime::store_unknown_inline_Type(), - CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_inline_C), - "store_unknown_inline", TypeRawPtr::BOTTOM, - val, casted_ary, idx); + Node* call = nullptr; + { + // Re-execute flat array store if runtime call triggers deoptimization + PreserveReexecuteState preexecs(this); + jvms()->set_bci(_bci); + jvms()->set_should_reexecute(true); + inc_sp(3); + kill_dead_locals(); + call = make_runtime_call(RC_NO_LEAF, + OptoRuntime::store_unknown_inline_Type(), + OptoRuntime::store_unknown_inline_Java(), + "store_unknown_inline", TypeRawPtr::BOTTOM, + val, casted_ary, idx); + } + make_slow_call_ex(call, env()->Throwable_klass(), false); insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES)); } diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp index 14e64281de4..1b7a6569f88 100644 --- a/src/hotspot/share/opto/runtime.cpp +++ b/src/hotspot/share/opto/runtime.cpp @@ -299,8 +299,10 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaT oop result; if (array_type->is_flatArray_klass()) { - Klass* elem_type = FlatArrayKlass::cast(array_type)->element_klass(); - result = oopFactory::new_valueArray(elem_type, len, THREAD); + Handle holder(current, array_type->klass_holder()); // keep the array klass alive + FlatArrayKlass* fak = FlatArrayKlass::cast(array_type); + Klass* elem_type = fak->element_klass(); + result = oopFactory::new_flatArray(elem_type, len, fak->layout_kind(), THREAD); } else if (array_type->is_typeArray_klass()) { // The oopFactory likes to work with the element type. // (We could bypass the oopFactory, since it doesn't add much value.) @@ -2039,8 +2041,7 @@ const TypeFunc *OptoRuntime::pack_inline_type_Type() { JRT_BLOCK_ENTRY(void, OptoRuntime::load_unknown_inline_C(flatArrayOopDesc* array, int index, JavaThread* current)) JRT_BLOCK; - flatArrayHandle vah(current, array); - oop buffer = flatArrayOopDesc::value_alloc_copy_from_index(vah, index, THREAD); + oop buffer = array->read_value_from_flat_array(index, THREAD); deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); current->set_vm_result(buffer); JRT_BLOCK_END; @@ -2063,11 +2064,14 @@ const TypeFunc* OptoRuntime::load_unknown_inline_Type() { return TypeFunc::make(domain, range); } -JRT_LEAF(void, OptoRuntime::store_unknown_inline_C(instanceOopDesc* buffer, flatArrayOopDesc* array, int index)) -{ +JRT_BLOCK_ENTRY(void, OptoRuntime::store_unknown_inline_C(instanceOopDesc* buffer, flatArrayOopDesc* array, int index, JavaThread* current)) + JRT_BLOCK; assert(buffer != nullptr, "can't store null into flat array"); - array->value_copy_to_index(buffer, index, LayoutKind::PAYLOAD); // Temporary hack for the transition -} + array->write_value_to_flat_array(buffer, index, THREAD); + if (HAS_PENDING_EXCEPTION) { + fatal("This entry must be changed to be a non-leaf entry because writing to a flat array can now throw an exception"); + } + JRT_BLOCK_END; JRT_END const TypeFunc* OptoRuntime::store_unknown_inline_Type() { diff --git a/src/hotspot/share/opto/runtime.hpp b/src/hotspot/share/opto/runtime.hpp index 4be426b5db9..0daa4672e60 100644 --- a/src/hotspot/share/opto/runtime.hpp +++ b/src/hotspot/share/opto/runtime.hpp @@ -192,7 +192,7 @@ class OptoRuntime : public AllStatic { public: static void load_unknown_inline_C(flatArrayOopDesc* array, int index, JavaThread* current); - static void store_unknown_inline_C(instanceOopDesc* buffer, flatArrayOopDesc* array, int index); + static void store_unknown_inline_C(instanceOopDesc* buffer, flatArrayOopDesc* array, int index, JavaThread* current); static bool is_callee_saved_register(MachRegisterNumbers reg); @@ -226,6 +226,7 @@ class OptoRuntime : public AllStatic { static address slow_arraycopy_Java() { return _slow_arraycopy_Java; } static address register_finalizer_Java() { return _register_finalizer_Java; } static address load_unknown_inline_Java() { return _load_unknown_inline_Java; } + static address store_unknown_inline_Java() { return _store_unknown_inline_Java; } #if INCLUDE_JVMTI static address notify_jvmti_vthread_start() { return _notify_jvmti_vthread_start; } static address notify_jvmti_vthread_end() { return _notify_jvmti_vthread_end; } diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp index e527e44e8c7..3c65d0ad9df 100644 --- a/src/hotspot/share/opto/type.cpp +++ b/src/hotspot/share/opto/type.cpp @@ -659,7 +659,7 @@ void Type::Initialize_shared(Compile* current) { // Nobody should ask _array_body_type[T_NARROWOOP]. Use null as assert. TypeAryPtr::_array_body_type[T_NARROWOOP] = nullptr; TypeAryPtr::_array_body_type[T_OBJECT] = TypeAryPtr::OOPS; - TypeAryPtr::_array_body_type[T_PRIMITIVE_OBJECT] = TypeAryPtr::OOPS; + TypeAryPtr::_array_body_type[T_FLAT_ELEMENT] = TypeAryPtr::OOPS; TypeAryPtr::_array_body_type[T_ARRAY] = TypeAryPtr::OOPS; // arrays are stored in oop arrays TypeAryPtr::_array_body_type[T_BYTE] = TypeAryPtr::BYTES; TypeAryPtr::_array_body_type[T_BOOLEAN] = TypeAryPtr::BYTES; // boolean[] is a byte array @@ -710,7 +710,7 @@ void Type::Initialize_shared(Compile* current) { _const_basic_type[T_DOUBLE] = Type::DOUBLE; _const_basic_type[T_OBJECT] = TypeInstPtr::BOTTOM; _const_basic_type[T_ARRAY] = TypeInstPtr::BOTTOM; // there is no separate bottom for arrays - _const_basic_type[T_PRIMITIVE_OBJECT] = TypeInstPtr::BOTTOM; + _const_basic_type[T_FLAT_ELEMENT] = TypeInstPtr::BOTTOM; _const_basic_type[T_VOID] = TypePtr::NULL_PTR; // reflection represents void this way _const_basic_type[T_ADDRESS] = TypeRawPtr::BOTTOM; // both interpreter return addresses & random raw ptrs _const_basic_type[T_CONFLICT] = Type::BOTTOM; // why not? @@ -727,7 +727,7 @@ void Type::Initialize_shared(Compile* current) { _zero_type[T_DOUBLE] = TypeD::ZERO; _zero_type[T_OBJECT] = TypePtr::NULL_PTR; _zero_type[T_ARRAY] = TypePtr::NULL_PTR; // null array is null oop - _zero_type[T_PRIMITIVE_OBJECT] = TypePtr::NULL_PTR; + _zero_type[T_FLAT_ELEMENT] = TypePtr::NULL_PTR; _zero_type[T_ADDRESS] = TypePtr::NULL_PTR; // raw pointers use the same null _zero_type[T_VOID] = Type::TOP; // the only void value is no value at all diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp index d6a5f5101bb..f2f59765001 100644 --- a/src/hotspot/share/prims/jni.cpp +++ b/src/hotspot/share/prims/jni.cpp @@ -848,7 +848,7 @@ class JNI_ArgumentPusherArray : public JNI_ArgumentPusher { case T_DOUBLE: push_double((_ap++)->d); break; case T_ARRAY: case T_OBJECT: - case T_PRIMITIVE_OBJECT: push_object((_ap++)->l); break; + case T_FLAT_ELEMENT: push_object((_ap++)->l); break; default: ShouldNotReachHere(); } } @@ -1835,19 +1835,19 @@ JNI_ENTRY(jobject, jni_GetObjectField(JNIEnv *env, jobject obj, jfieldID fieldID assert(k->is_instance_klass(), "Only instance can have flat fields"); InstanceKlass* ik = InstanceKlass::cast(k); fieldDescriptor fd; - ik->find_field_from_offset(offset, false, &fd); // performance bottleneck + bool found = ik->find_field_from_offset(offset, false, &fd); // performance bottleneck + assert(found, "Field not found"); InstanceKlass* holder = fd.field_holder(); + assert(holder->field_is_flat(fd.index()), "Must be"); InlineLayoutInfo* li = holder->inline_layout_info_adr(fd.index()); InlineKlass* field_vklass = li->klass(); - res = field_vklass->read_flat_field(o, ik->field_offset(fd.index()), li->kind(), CHECK_NULL); + res = field_vklass->read_payload_from_addr(o, ik->field_offset(fd.index()), li->kind(), CHECK_NULL); } jobject ret = JNIHandles::make_local(THREAD, res); HOTSPOT_JNI_GETOBJECTFIELD_RETURN(ret); return ret; JNI_END - - #define DEFINE_GETFIELD(Return,Fieldname,Result \ , EntryProbe, ReturnProbe) \ \ @@ -1944,7 +1944,9 @@ JNI_ENTRY_NO_PRESERVE(void, jni_SetObjectField(JNIEnv *env, jobject obj, jfieldI InlineLayoutInfo* li = holder->inline_layout_info_adr(fd.index()); InlineKlass* vklass = li->klass(); oop v = JNIHandles::resolve_non_null(value); - vklass->write_flat_field(o, offset, v, fd.is_null_free_inline_type(), li->kind(), CHECK); + // vklass->write_flat_field(o, offset, v, fd.is_null_free_inline_type(), li->kind(), CHECK); + // write_value_to_addr(oop src, void* dst, LayoutKind lk, bool dest_is_initialized, TRAPS); + vklass->write_value_to_addr(v, ((char*)(oopDesc*)obj) + offset, li->kind(), true, CHECK); } HOTSPOT_JNI_SETOBJECTFIELD_RETURN(); JNI_END @@ -2375,8 +2377,7 @@ JNI_ENTRY(jobject, jni_GetObjectArrayElement(JNIEnv *env, jobjectArray array, js if (arr->is_within_bounds(index)) { if (arr->is_flatArray()) { flatArrayOop a = flatArrayOop(JNIHandles::resolve_non_null(array)); - flatArrayHandle vah(thread, a); - res = flatArrayOopDesc::value_alloc_copy_from_index(vah, index, CHECK_NULL); + res = a->read_value_from_flat_array(index, CHECK_NULL); assert(res != nullptr, "Must be set in one of two paths above"); } else { assert(arr->is_objArray(), "If not a valueArray. must be an objArray"); @@ -2411,7 +2412,7 @@ JNI_ENTRY(void, jni_SetObjectArrayElement(JNIEnv *env, jobjectArray array, jsize FlatArrayKlass* vaklass = FlatArrayKlass::cast(a->klass()); InlineKlass* element_vklass = vaklass->element_klass(); if (v != nullptr && v->is_a(element_vklass)) { - a->value_copy_to_index(v, index, LayoutKind::PAYLOAD); // Temporary hack for the transition + a->write_value_to_flat_array(v, index, CHECK); } else { ResourceMark rm(THREAD); stringStream ss; diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp index 1c912a92ef2..8508d1117e2 100644 --- a/src/hotspot/share/prims/jvm.cpp +++ b/src/hotspot/share/prims/jvm.cpp @@ -428,27 +428,79 @@ JVM_ENTRY(jstring, JVM_GetTemporaryDirectory(JNIEnv *env)) return (jstring) JNIHandles::make_local(THREAD, h()); JVM_END -JVM_ENTRY(jarray, JVM_NewNullRestrictedArray(JNIEnv *env, jclass elmClass, jint len)) +static void validate_array_arguments(Klass* elmClass, jint len, TRAPS) { if (len < 0) { - THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), "Array length is negative"); + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "Array length is negative"); + } + elmClass->initialize(CHECK); + if (elmClass->is_identity_class()) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "Element class is not a value class"); + } + if (elmClass->is_abstract()) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "Element class is abstract"); + } +} + +JVM_ENTRY(jarray, JVM_NewNullRestrictedArray(JNIEnv *env, jclass elmClass, jint len)) + oop mirror = JNIHandles::resolve_non_null(elmClass); + Klass* klass = java_lang_Class::as_Klass(mirror); + klass->initialize(CHECK_NULL); + validate_array_arguments(klass, len, CHECK_NULL); + InlineKlass* vk = InlineKlass::cast(klass); + if (!vk->is_implicitly_constructible()) { + THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), "Element class is not implicitly constructible"); + } + oop array = nullptr; + if (UseFlatArray && vk->has_non_atomic_layout()) { + array = oopFactory::new_flatArray(vk, len, LayoutKind::NON_ATOMIC_FLAT, CHECK_NULL); + } else { + array = oopFactory::new_null_free_objArray(vk, len, CHECK_NULL); } + return (jarray) JNIHandles::make_local(THREAD, array); +JVM_END + +JVM_ENTRY(jarray, JVM_NewNullRestrictedAtomicArray(JNIEnv *env, jclass elmClass, jint len)) oop mirror = JNIHandles::resolve_non_null(elmClass); Klass* klass = java_lang_Class::as_Klass(mirror); klass->initialize(CHECK_NULL); - if (klass->is_identity_class()) { - THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), "Element class is not a value class"); + validate_array_arguments(klass, len, CHECK_NULL); + InlineKlass* vk = InlineKlass::cast(klass); + if (!vk->is_implicitly_constructible()) { + THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), "Element class is not implicitly constructible"); } - if (klass->is_abstract()) { - THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), "Element class is abstract"); + oop array = nullptr; + if (UseFlatArray && vk->has_atomic_layout()) { + array = oopFactory::new_flatArray(vk, len, LayoutKind::ATOMIC_FLAT, CHECK_NULL); + } else if (UseFlatArray && vk->is_naturally_atomic()) { + array = oopFactory::new_flatArray(vk, len, LayoutKind::NON_ATOMIC_FLAT, CHECK_NULL); + } else { + array = oopFactory::new_null_free_objArray(vk, len, CHECK_NULL); } + return (jarray) JNIHandles::make_local(THREAD, array); +JVM_END + +JVM_ENTRY(jarray, JVM_NewNullableAtomicArray(JNIEnv *env, jclass elmClass, jint len)) + oop mirror = JNIHandles::resolve_non_null(elmClass); + Klass* klass = java_lang_Class::as_Klass(mirror); + klass->initialize(CHECK_NULL); + validate_array_arguments(klass, len, CHECK_NULL); InlineKlass* vk = InlineKlass::cast(klass); if (!vk->is_implicitly_constructible()) { THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), "Element class is not implicitly constructible"); } - oop array = oopFactory::new_valueArray(vk, len, CHECK_NULL); + oop array = nullptr; + if (UseFlatArray && vk->has_nullable_atomic_layout()) { + array = oopFactory::new_flatArray(vk, len, LayoutKind::NULLABLE_ATOMIC_FLAT, CHECK_NULL); + } else { + array = oopFactory::new_objArray(vk, len, CHECK_NULL); + } return (jarray) JNIHandles::make_local(THREAD, array); JVM_END +JVM_ENTRY(jboolean, JVM_IsFlatArray(JNIEnv *env, jobject obj)) + arrayOop oop = arrayOop(JNIHandles::resolve_non_null(obj)); + return oop->is_flatArray(); +JVM_END JVM_ENTRY(jboolean, JVM_IsNullRestrictedArray(JNIEnv *env, jobject obj)) arrayOop oop = arrayOop(JNIHandles::resolve_non_null(obj)); diff --git a/src/hotspot/share/prims/unsafe.cpp b/src/hotspot/share/prims/unsafe.cpp index 3fb2e754a24..47fcabee098 100644 --- a/src/hotspot/share/prims/unsafe.cpp +++ b/src/hotspot/share/prims/unsafe.cpp @@ -373,6 +373,7 @@ UNSAFE_ENTRY(jint, Unsafe_NullMarkerOffset(JNIEnv *env, jobject unsage, jobject oop f = JNIHandles::resolve_non_null(o); Klass* k = java_lang_Class::as_Klass(java_lang_reflect_Field::clazz(f)); int slot = java_lang_reflect_Field::slot(f); + InstanceKlass* ik = InstanceKlass::cast(k); fatal("Not supported yet"); } UNSAFE_END @@ -394,7 +395,7 @@ UNSAFE_ENTRY(jobject, Unsafe_GetValue(JNIEnv *env, jobject unsafe, jobject obj, InlineKlass* vk = InlineKlass::cast(k); assert_and_log_unsafe_value_access(base, offset, vk); Handle base_h(THREAD, base); - oop v = vk->read_flat_field(base_h(), offset, LayoutKind::PAYLOAD, CHECK_NULL); // TODO FIXME Hard coded layout kind to make the code compile, Unsafe must be upgraded to handle correct layout kind + oop v = vk->read_payload_from_addr(base_h(), offset, LayoutKind::PAYLOAD, CHECK_NULL);// TODO FIXME Hard coded layout kind to make the code compile, Unsafe must be upgraded to handle correct layout kind return JNIHandles::make_local(THREAD, v); } UNSAFE_END @@ -405,7 +406,8 @@ UNSAFE_ENTRY(void, Unsafe_PutValue(JNIEnv *env, jobject unsafe, jobject obj, jlo assert(!base->is_inline_type() || base->mark().is_larval_state(), "must be an object instance or a larval inline type"); assert_and_log_unsafe_value_access(base, offset, vk); oop v = JNIHandles::resolve(value); - vk->write_flat_field(base, offset, v, true /*null free*/, LayoutKind::PAYLOAD, CHECK); // TODO FIXME Hard coded layout kind to make the code compile, Unsafe must be upgraded to handle correct layout kind + // TODO FIXME: problem below, with new APIs, null checking depends on LayoutKind, but Unsafe APIs are not able to communicate the right layout kind yet + vk->write_value_to_addr(v, ((char*)(oopDesc*)base) + offset, LayoutKind::PAYLOAD, true, CHECK);// TODO FIXME Hard coded layout kind to make the code compile, Unsafe must be upgraded to handle correct layout kind } UNSAFE_END UNSAFE_ENTRY(jobject, Unsafe_MakePrivateBuffer(JNIEnv *env, jobject unsafe, jobject value)) { @@ -414,7 +416,7 @@ UNSAFE_ENTRY(jobject, Unsafe_MakePrivateBuffer(JNIEnv *env, jobject unsafe, jobj Handle vh(THREAD, v); InlineKlass* vk = InlineKlass::cast(v->klass()); instanceOop new_value = vk->allocate_instance_buffer(CHECK_NULL); - vk->inline_copy_oop_to_new_oop(vh(), new_value, LayoutKind::PAYLOAD); // FIXME temporary hack for the transition + vk->copy_payload_to_addr(vk->data_for_oop(vh()), vk->data_for_oop(new_value), LayoutKind::PAYLOAD, false); markWord mark = new_value->mark(); new_value->set_mark(mark.enter_larval_state()); return JNIHandles::make_local(THREAD, new_value); diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index 59cadcde989..ca5a4f19275 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -1287,7 +1287,7 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* } else if (k->is_flatArray_klass()) { FlatArrayKlass* ak = FlatArrayKlass::cast(k); // Inline type array must be zeroed because not all memory is reassigned - obj = ak->allocate(sv->field_size(), THREAD); + obj = ak->allocate(sv->field_size(), ak->layout_kind(), THREAD); } else if (k->is_typeArray_klass()) { TypeArrayKlass* ak = TypeArrayKlass::cast(k); assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length"); @@ -1630,7 +1630,7 @@ void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_ma InlineKlass* vk = vak->element_klass(); assert(vk->flat_array(), "should only be used for flat inline type arrays"); // Adjust offset to omit oop header - int base_offset = arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT) - InlineKlass::cast(vk)->first_field_offset(); + int base_offset = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT) - InlineKlass::cast(vk)->first_field_offset(); // Initialize all elements of the flat inline type array for (int i = 0; i < sv->field_size(); i++) { ScopeValue* val = sv->field_at(i); diff --git a/src/hotspot/share/runtime/javaCalls.cpp b/src/hotspot/share/runtime/javaCalls.cpp index 9e9628646e8..1754931b56e 100644 --- a/src/hotspot/share/runtime/javaCalls.cpp +++ b/src/hotspot/share/runtime/javaCalls.cpp @@ -154,7 +154,7 @@ static BasicType runtime_type_from(JavaValue* result) { #ifndef _LP64 case T_OBJECT : // fall through case T_ARRAY : // fall through - case T_PRIMITIVE_OBJECT: // fall through + case T_FLAT_ELEMENT: // fall through #endif case T_BYTE : // fall through case T_VOID : return T_INT; diff --git a/src/hotspot/share/runtime/stubDeclarations.hpp b/src/hotspot/share/runtime/stubDeclarations.hpp index 1edb75ea4eb..d7fc5f2d2e6 100644 --- a/src/hotspot/share/runtime/stubDeclarations.hpp +++ b/src/hotspot/share/runtime/stubDeclarations.hpp @@ -169,6 +169,7 @@ do_stub(slow_arraycopy, 0, false, false) \ do_stub(register_finalizer, 0, false, false) \ do_stub(load_unknown_inline, 0, true, false) \ + do_stub(store_unknown_inline, 0, true, false) \ #else #define C2_STUBS_DO(do_blob, do_stub, do_jvmti_stub) diff --git a/src/hotspot/share/services/heapDumper.cpp b/src/hotspot/share/services/heapDumper.cpp index 4dfbbccb3f6..98ab0536113 100644 --- a/src/hotspot/share/services/heapDumper.cpp +++ b/src/hotspot/share/services/heapDumper.cpp @@ -1505,11 +1505,11 @@ int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayO int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) { BasicType type = ArrayKlass::cast(array->klass())->element_type(); - assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_PRIMITIVE_OBJECT, "invalid array element type"); + assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_FLAT_ELEMENT, "invalid array element type"); int type_size; if (type == T_OBJECT) { type_size = sizeof(address); - } else if (type == T_PRIMITIVE_OBJECT) { + } else if (type == T_FLAT_ELEMENT) { // TODO: FIXME fatal("Not supported yet"); // FIXME: JDK-8325678 } else { diff --git a/src/hotspot/share/utilities/globalDefinitions.cpp b/src/hotspot/share/utilities/globalDefinitions.cpp index 791492f4d4a..c847b085a3c 100644 --- a/src/hotspot/share/utilities/globalDefinitions.cpp +++ b/src/hotspot/share/utilities/globalDefinitions.cpp @@ -198,7 +198,7 @@ void basic_types_init() { } _type2aelembytes[T_OBJECT] = heapOopSize; _type2aelembytes[T_ARRAY] = heapOopSize; - _type2aelembytes[T_PRIMITIVE_OBJECT] = heapOopSize; + _type2aelembytes[T_FLAT_ELEMENT] = heapOopSize; } @@ -210,7 +210,7 @@ char type2char_tab[T_CONFLICT+1] = { JVM_SIGNATURE_BYTE, JVM_SIGNATURE_SHORT, JVM_SIGNATURE_INT, JVM_SIGNATURE_LONG, JVM_SIGNATURE_CLASS, JVM_SIGNATURE_ARRAY, - JVM_SIGNATURE_PRIMITIVE_OBJECT, JVM_SIGNATURE_VOID, + JVM_SIGNATURE_FLAT_ELEMENT, JVM_SIGNATURE_VOID, 0, 0, 0, 0, 0 }; @@ -325,7 +325,7 @@ int _type2aelembytes[T_CONFLICT+1] = { T_LONG_aelem_bytes, // T_LONG = 11, T_OBJECT_aelem_bytes, // T_OBJECT = 12, T_ARRAY_aelem_bytes, // T_ARRAY = 13, - T_PRIMITIVE_OBJECT_aelem_bytes, // T_PRIMITIVE_OBJECT = 14, + T_FLAT_ELEMENT_aelem_bytes, // T_PRIMITIVE_OBJECT = 14, 0, // T_VOID = 15, T_OBJECT_aelem_bytes, // T_ADDRESS = 16, T_NARROWOOP_aelem_bytes, // T_NARROWOOP= 17, diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp index a6419402482..add7b12fae9 100644 --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -724,7 +724,7 @@ enum BasicType : u1 { // types in their own right. T_OBJECT = 12, T_ARRAY = 13, - T_PRIMITIVE_OBJECT = 14, // Not a true BasicType, only use in headers of flat arrays + T_FLAT_ELEMENT = 14, // Not a true BasicType, only use in headers of flat arrays T_VOID = 15, T_ADDRESS = 16, T_NARROWOOP = 17, @@ -745,7 +745,7 @@ enum BasicType : u1 { F(JVM_SIGNATURE_LONG, T_LONG, N) \ F(JVM_SIGNATURE_CLASS, T_OBJECT, N) \ F(JVM_SIGNATURE_ARRAY, T_ARRAY, N) \ - F(JVM_SIGNATURE_PRIMITIVE_OBJECT, T_PRIMITIVE_OBJECT, N) \ + F(JVM_SIGNATURE_FLAT_ELEMENT, T_FLAT_ELEMENT, N) \ F(JVM_SIGNATURE_VOID, T_VOID, N) \ /*end*/ @@ -775,7 +775,7 @@ inline bool is_double_word_type(BasicType t) { } inline bool is_reference_type(BasicType t, bool include_narrow_oop = false) { - return (t == T_OBJECT || t == T_ARRAY || t == T_PRIMITIVE_OBJECT || (include_narrow_oop && t == T_NARROWOOP)); + return (t == T_OBJECT || t == T_ARRAY || t == T_FLAT_ELEMENT || (include_narrow_oop && t == T_NARROWOOP)); } inline bool is_integral_type(BasicType t) { @@ -834,7 +834,7 @@ enum BasicTypeSize { T_NARROWOOP_size = 1, T_NARROWKLASS_size = 1, T_VOID_size = 0, - T_PRIMITIVE_OBJECT_size = 1 + T_FLAT_ELEMENT_size = 1 }; // this works on valid parameter types but not T_VOID, T_CONFLICT, etc. @@ -864,11 +864,11 @@ enum ArrayElementSize { #ifdef _LP64 T_OBJECT_aelem_bytes = 8, T_ARRAY_aelem_bytes = 8, - T_PRIMITIVE_OBJECT_aelem_bytes = 8, + T_FLAT_ELEMENT_aelem_bytes = 8, #else T_OBJECT_aelem_bytes = 4, T_ARRAY_aelem_bytes = 4, - T_PRIMITIVE_OBJECT_aelem_bytes = 4, + T_FLAT_ELEMENT_aelem_bytes = 4, #endif T_NARROWOOP_aelem_bytes = 4, T_NARROWKLASS_aelem_bytes = 4, diff --git a/src/java.base/share/classes/jdk/internal/value/ValueClass.java b/src/java.base/share/classes/jdk/internal/value/ValueClass.java index 3fd75ea1f29..3abb107855f 100644 --- a/src/java.base/share/classes/jdk/internal/value/ValueClass.java +++ b/src/java.base/share/classes/jdk/internal/value/ValueClass.java @@ -119,6 +119,14 @@ public static Object[] newArrayInstance(CheckedType componentType, int length) { public static native Object[] newNullRestrictedArray(Class componentType, int length); + public static native Object[] newNullRestrictedAtomicArray(Class componentType, + int length); + + public static native Object[] newNullableAtomicArray(Class componentType, + int length); + + public static native boolean isFlatArray(Object array); + /** * {@return true if the given array is a null-restricted array} */ diff --git a/src/java.base/share/native/include/classfile_constants.h.template b/src/java.base/share/native/include/classfile_constants.h.template index 40a528a9a74..dd82153d11f 100644 --- a/src/java.base/share/native/include/classfile_constants.h.template +++ b/src/java.base/share/native/include/classfile_constants.h.template @@ -154,7 +154,7 @@ enum { JVM_SIGNATURE_BYTE = 'B', JVM_SIGNATURE_CHAR = 'C', JVM_SIGNATURE_CLASS = 'L', - JVM_SIGNATURE_PRIMITIVE_OBJECT = 'Q', + JVM_SIGNATURE_FLAT_ELEMENT = 'Q', JVM_SIGNATURE_ENDCLASS = ';', JVM_SIGNATURE_ENUM = 'E', JVM_SIGNATURE_FLOAT = 'F', diff --git a/src/java.base/share/native/libjava/ValueClass.c b/src/java.base/share/native/libjava/ValueClass.c index 9d89d770e78..f74f13de153 100644 --- a/src/java.base/share/native/libjava/ValueClass.c +++ b/src/java.base/share/native/libjava/ValueClass.c @@ -39,6 +39,24 @@ Java_jdk_internal_value_ValueClass_newNullRestrictedArray(JNIEnv *env, jclass cl return JVM_NewNullRestrictedArray(env, elmClass, len); } +JNIEXPORT jarray JNICALL +Java_jdk_internal_value_ValueClass_newNullRestrictedAtomicArray(JNIEnv *env, jclass cls, jclass elmClass, jint len) +{ + return JVM_NewNullRestrictedAtomicArray(env, elmClass, len); +} + +JNIEXPORT jarray JNICALL +Java_jdk_internal_value_ValueClass_newNullableAtomicArray(JNIEnv *env, jclass cls, jclass elmClass, jint len) +{ + return JVM_NewNullableAtomicArray(env, elmClass, len); +} + +JNIEXPORT jboolean JNICALL +Java_jdk_internal_value_ValueClass_isFlatArray(JNIEnv *env, jclass cls, jobject obj) +{ + return JVM_IsFlatArray(env, obj); +} + JNIEXPORT jboolean JNICALL Java_jdk_internal_value_ValueClass_isNullRestrictedArray(JNIEnv *env, jclass cls, jobject obj) { diff --git a/test/hotspot/gtest/oops/test_markWord.cpp b/test/hotspot/gtest/oops/test_markWord.cpp index 966708d695c..2a833f2a5e1 100644 --- a/test/hotspot/gtest/oops/test_markWord.cpp +++ b/test/hotspot/gtest/oops/test_markWord.cpp @@ -201,8 +201,23 @@ static void assert_flat_array_type(markWord mark) { EXPECT_TRUE(mark.is_null_free_array()); } -TEST_VM(markWord, flat_array_prototype) { - markWord mark = markWord::flat_array_prototype(); +TEST_VM(markWord, null_free_flat_array_prototype) { + markWord mark = markWord::flat_array_prototype(LayoutKind::NON_ATOMIC_FLAT); + assert_unlocked_state(mark); + EXPECT_TRUE(mark.is_neutral()); + + assert_flat_array_type(mark); + + EXPECT_TRUE(mark.has_no_hash()); + EXPECT_FALSE(mark.is_marked()); + EXPECT_TRUE(mark.decode_pointer() == NULL); + + assert_copy_set_hash(mark); + assert_flat_array_type(mark); +} + +TEST_VM(markWord, nullable_flat_array_prototype) { + markWord mark = markWord::flat_array_prototype(LayoutKind::NULLABLE_ATOMIC_FLAT); assert_unlocked_state(mark); EXPECT_TRUE(mark.is_neutral()); diff --git a/test/hotspot/jtreg/runtime/valhalla/inlinetypes/FlatArraysTest.java b/test/hotspot/jtreg/runtime/valhalla/inlinetypes/FlatArraysTest.java new file mode 100644 index 00000000000..adf9b100765 --- /dev/null +++ b/test/hotspot/jtreg/runtime/valhalla/inlinetypes/FlatArraysTest.java @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package runtime.valhalla.inlinetypes; + +import jdk.internal.value.ValueClass; +import jdk.internal.vm.annotation.ImplicitlyConstructible; +import jdk.internal.vm.annotation.LooselyConsistentValue; +import jdk.internal.vm.annotation.NullRestricted; +import java.lang.management.ManagementFactory; +import java.lang.management.RuntimeMXBean; +import java.lang.reflect.Array; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.ArrayList; +import java.util.List; + + + +import static jdk.test.lib.Asserts.*; + +/* + * @test FlatArraysTest + * @summary Plain array test for Inline Types + * @modules java.base/jdk.internal.value + * java.base/jdk.internal.vm.annotation + * @library /test/lib + * @enablePreview + * @compile --source 24 FlatArraysTest.java + * @run main/othervm -XX:FlatArrayElementMaxSize=-1 -XX:InlineFieldMaxFlatSize=-1 -XX:+AtomicFieldFlattening -XX:+NullableFieldFlattening runtime.valhalla.inlinetypes.FlatArraysTest + * @run main/othervm -XX:FlatArrayElementMaxSize=0 -XX:+AtomicFieldFlattening -XX:+NullableFieldFlattening runtime.valhalla.inlinetypes.FlatArraysTest + */ +public class FlatArraysTest { + static final int ARRAY_SIZE = 100; + + @ImplicitlyConstructible + @LooselyConsistentValue + static value class SmallValue { + byte b; + short s; + + SmallValue() { b = 0 ; s = 0; } + SmallValue(byte b0, short s0) { b = b0; s = s0; } + + public static Object getTestValue() { return new SmallValue(Byte.MIN_VALUE, Short.MIN_VALUE); } + + public static boolean expectingFlatNullRestrictedArray() { return true; } + public static boolean expectingFlatNullRestrictedAtomicArray() { return true; } + public static boolean expectingFlatNullableAtomicArray() { return true; } + } + + @ImplicitlyConstructible + @LooselyConsistentValue + static value class MediumValue { + int x; + int y; + + MediumValue() { + x = 0; + y = 0; + } + MediumValue(int x0, int y0) { + x = x0; + y = y0; + } + + public static Object getTestValue() { + return new MediumValue(Integer.MIN_VALUE, Integer.MIN_VALUE); + } + + public static boolean expectingFlatNullRestrictedArray() { return true; } + public static boolean expectingFlatNullRestrictedAtomicArray() { return true; } + public static boolean expectingFlatNullableAtomicArray() { return false; } + } + + @ImplicitlyConstructible + @LooselyConsistentValue + static value class BigValue { + long x; + long y; + long z; + + BigValue() { + x = 0; + y = 0; + z = 0; + } + BigValue(long x0, long y0, long z0) { + x = x0; + y = y0; + z = z0; + } + + public static Object getTestValue() { + return new BigValue(Long.MIN_VALUE, Long.MIN_VALUE, Long.MIN_VALUE); + } + + public static boolean expectingFlatNullRestrictedArray() { return true; } + public static boolean expectingFlatNullRestrictedAtomicArray() { return false; } + public static boolean expectingFlatNullableAtomicArray() { return false; } + } + + static void testNullFreeArray(Object[] array, Object value) { + testErrorCases(array); + assertNotNull(value, "Test needs a not null value"); + // Test 1 : check initial element value is not null + for (int i = 0 ; i < array.length; i++) { + assertNotNull(array[i], "Initial value must not be null"); + } + // Test 2 : try to write null + for (int i = 0 ; i < array.length; i++) { + try { + array[i] = null; + throw new RuntimeException("Missing NullPointerException"); + } catch (NullPointerException e) { } + } + // Test 3 : overwrite initial value with new value + for (int i = 0 ; i < array.length; i++) { + array[i] = value; + } + for (int i = 0 ; i < array.length; i++) { + assertEquals(array[i], value); + } + } + + static void testNullableArray(Object[] array, Object value) { + testErrorCases(array); + assertNotNull(value, "Test needs a not null value"); + // Test 1 : check that initial element value is null + System.gc(); + System.out.println("Test 1"); + for (int i = 0 ; i < array.length; i++) { + assertNull(array[i], "Initial value should be null"); + } + // Test 2 : write new value to all elements + System.gc(); + System.out.println("Test 2a"); + for (int i = 0 ; i < array.length; i++) { + array[i] = value; + assertEquals(array[i], value, "Value mismatch"); + } + System.gc(); + System.out.println("Test 2b"); + for (int i = 0 ; i < array.length; i++) { + assertEquals(array[i], value, "Value mismatch"); + } + // Test 3 : write null to all elements + System.gc(); + System.out.println("Test 3a"); + for (int i = 0 ; i < array.length; i++) { + array[i] = null; + } + System.gc(); + System.out.println("Test 3b"); + for (int i = 0 ; i < array.length; i++) { + assertNull(array[i], "Value mismatch"); + } + // Test 4 : write alternate null / not null values + System.gc(); + System.out.println("Test 4a"); + for (int i = 0 ; i < array.length; i++) { + if (i%2 == 0) { + array[i] = null; + } else { + array[i] = value; + } + } + System.gc(); + System.out.println("Test 4b"); + for (int i = 0 ; i < array.length; i++) { + if (i%2 == 0) { + assertNull(array[i], "Value mismatch"); + } else { + assertEquals(array[i], value, "Value mismatch"); + } + } + } + + static void testErrorCases(Object[] array) { + try { + Object o = array[-1]; + throw new RuntimeException("Missing IndexOutOfBoundsException"); + } catch(IndexOutOfBoundsException e) { } + + try { + Object o = array[array.length]; + throw new RuntimeException("Missing IndexOutOfBoundsException"); + } catch(IndexOutOfBoundsException e) { } + + assertTrue(array.getClass().getComponentType() != String.class, "Must be for the test"); + assertTrue(array.length > 0, "Must be for the test"); + try { + array[0] = new String("Bad"); + throw new RuntimeException("Missing ArrayStoreException"); + } catch (ArrayStoreException e) { } + } + + static void testArrayCopy() { + + Object[] objArray = new Object[ARRAY_SIZE]; + for (int i = 0; i < ARRAY_SIZE; i++) { + objArray[i] = SmallValue.getTestValue(); + } + SmallValue[] nonAtomicArray = (SmallValue[])ValueClass.newNullRestrictedArray(SmallValue.class, ARRAY_SIZE); + SmallValue[] atomicArray = (SmallValue[])ValueClass.newNullRestrictedAtomicArray(SmallValue.class, ARRAY_SIZE); + SmallValue[] nullableArray = (SmallValue[])ValueClass.newNullableAtomicArray(SmallValue.class, ARRAY_SIZE); + + // obj -> non-atomic + testArrayCopyInternal(objArray, nonAtomicArray); + + // obj -> atomic + testArrayCopyInternal(objArray, atomicArray); + + // obj -> nullable + testArrayCopyInternal(objArray, nullableArray); + + objArray[45] = null; + // obj with null -> non-atomic => NPE + try { + testArrayCopyInternal(objArray, nonAtomicArray); + throw new RuntimeException("Missing NullPointerException"); + } catch (NullPointerException e) { } + + // obj with null -> atomic => NPE + try { + testArrayCopyInternal(objArray, atomicArray); + throw new RuntimeException("Missing NullPointerException"); + } catch (NullPointerException e) { } + + // obj with null -> nullable + try { + testArrayCopyInternal(objArray, nullableArray); + } catch (NullPointerException e) { + throw new RuntimeException("Unexpected NullPointerException"); + } + + objArray[45] = new String("bad"); + // obj with wrong type value -> non-atomic => ASE + try { + testArrayCopyInternal(objArray, nonAtomicArray); + throw new RuntimeException("Missing ArrayStoreException"); + } catch (ArrayStoreException e) { } + + // obj with wrong type value -> atomic => ASE + try { + testArrayCopyInternal(objArray, atomicArray); + throw new RuntimeException("Missing ArrayStoreException"); + } catch (ArrayStoreException e) { } + + // obj with wrong type value -> nullable => ASE + try { + testArrayCopyInternal(objArray, nullableArray); + throw new RuntimeException("Missing ArrayStoreException"); + } catch (ArrayStoreException e) { } + + // Reset all arrays + objArray = new Object[ARRAY_SIZE]; + nonAtomicArray = (SmallValue[])ValueClass.newNullRestrictedArray(SmallValue.class, ARRAY_SIZE); + atomicArray = (SmallValue[])ValueClass.newNullRestrictedAtomicArray(SmallValue.class, ARRAY_SIZE); + nullableArray = (SmallValue[])ValueClass.newNullableAtomicArray(SmallValue.class, ARRAY_SIZE); + + // non-atomic -> obj + testArrayCopyInternal(nonAtomicArray, objArray); + + // non-atomic -> non-atomic + SmallValue[] nonAtomicArray2 = (SmallValue[])ValueClass.newNullRestrictedArray(SmallValue.class, ARRAY_SIZE); + testArrayCopyInternal(nonAtomicArray, nonAtomicArray2); + + // non-atomic -> non-atomic same array + testArrayCopyInternal(nonAtomicArray, nonAtomicArray); + + // non-atomic -> atomic + testArrayCopyInternal(nonAtomicArray, atomicArray); + + // non-atomic -> nullable + testArrayCopyInternal(nonAtomicArray, nullableArray); + + // Reset all arrays + objArray = new Object[ARRAY_SIZE]; + nonAtomicArray = (SmallValue[])ValueClass.newNullRestrictedArray(SmallValue.class, ARRAY_SIZE); + atomicArray = (SmallValue[])ValueClass.newNullRestrictedAtomicArray(SmallValue.class, ARRAY_SIZE); + nullableArray = (SmallValue[])ValueClass.newNullableAtomicArray(SmallValue.class, ARRAY_SIZE); + + for (int i = 0 ; i < ARRAY_SIZE; i++) { + atomicArray[i] = (SmallValue)SmallValue.getTestValue(); + } + + // atomic -> obj + testArrayCopyInternal(atomicArray, objArray); + + // atomic -> non-atomic + testArrayCopyInternal(atomicArray, nonAtomicArray); + + // atomic -> atomic + SmallValue[] atomicArray2 = (SmallValue[])ValueClass.newNullRestrictedAtomicArray(SmallValue.class, ARRAY_SIZE); + testArrayCopyInternal(atomicArray, atomicArray2); + + // atomic -> atomic same array + testArrayCopyInternal(atomicArray, atomicArray); + + // atomic -> nullable + testArrayCopyInternal(atomicArray, nullableArray); + + // Reset all arrays + objArray = new Object[ARRAY_SIZE]; + nonAtomicArray = (SmallValue[])ValueClass.newNullRestrictedArray(SmallValue.class, ARRAY_SIZE); + atomicArray = (SmallValue[])ValueClass.newNullRestrictedAtomicArray(SmallValue.class, ARRAY_SIZE); + nullableArray = (SmallValue[])ValueClass.newNullableAtomicArray(SmallValue.class, ARRAY_SIZE); + + for (int i = 0 ; i < ARRAY_SIZE; i++) { + nullableArray[i] = (SmallValue)SmallValue.getTestValue(); + } + + // nullable -> obj + testArrayCopyInternal(nullableArray, objArray); + + // nullable -> non-atomic + testArrayCopyInternal(nullableArray, nonAtomicArray); + + // nullable -> atomic + testArrayCopyInternal(nullableArray, atomicArray); + + // nullable -> nullable + SmallValue[] nullableArray2 = (SmallValue[])ValueClass.newNullableAtomicArray(SmallValue.class, ARRAY_SIZE); + testArrayCopyInternal(nullableArray, nullableArray2); + + // nullable -> nullable same array + testArrayCopyInternal(nullableArray, nullableArray); + + nullableArray[45] = null; + + // nullable with null -> obj + testArrayCopyInternal(nullableArray, objArray); + + // nullable with null -> non-atomic => NPE + try { + testArrayCopyInternal(nullableArray, nonAtomicArray); + throw new RuntimeException("Missing NullPointerException"); + } catch (NullPointerException e) { } + + // nullable with null -> atomic => NPE + try { + testArrayCopyInternal(nullableArray, atomicArray); + throw new RuntimeException("Missing NullPointerException"); + } catch (NullPointerException e) { } + + // nullable with null -> nullable + nullableArray2 = (SmallValue[])ValueClass.newNullableAtomicArray(SmallValue.class, ARRAY_SIZE); + testArrayCopyInternal(nullableArray, nullableArray2); + + // nullable with null -> nullable same array + testArrayCopyInternal(nullableArray, nullableArray); + } + + static void testArrayCopyInternal(Object[] src, Object[] dst) { + // When using this method for cases that should trigger a NPE or an ASE, + // it is recommended to put the faulty value at index 45 in the src array + assertTrue(src.length >= ARRAY_SIZE, "Must be for the test"); + assertTrue(dst.length >= ARRAY_SIZE, "Must be for the test"); + // Test 1 : good copy without indexes overlap + System.arraycopy(src, 3, dst, 51, 40); + for (int i = 0; i < 40; i++) { + assertEquals(src[3+i], dst[51+i], "Mismatch after copying"); + } + // Test 2 : good copy with indexes overlap + System.arraycopy(src, 42, dst, 53, 45); + if (src != dst) { // Verification doesn't make sense if src and dst are the same + for (int i = 0; i < 45; i++) { + assertEquals(src[42+i], dst[53+i], "Mismatch after copying"); + } + } + // Test 3 : IOOB errors + try { + System.arraycopy(src, -1, dst, 3, 10); + throw new RuntimeException("Missing IndexOutOfBoundsException"); + } catch(IndexOutOfBoundsException e) { } + try { + System.arraycopy(src, src.length - 5, dst, 3, 10); + throw new RuntimeException("Missing IndexOutOfBoundsException"); + } catch(IndexOutOfBoundsException e) { } + try { + System.arraycopy(src, 10, dst, -1, 10); + throw new RuntimeException("Missing IndexOutOfBoundsException"); + } catch(IndexOutOfBoundsException e) { } + try { + System.arraycopy(src, 10, dst, dst.length - 5, 10); + throw new RuntimeException("Missing IndexOutOfBoundsException"); + } catch(IndexOutOfBoundsException e) { } + } + + static void testArrayAccesses() throws NoSuchMethodException, InstantiationException, + IllegalAccessException, InvocationTargetException { + RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean(); + List arguments = runtimeMxBean.getInputArguments(); + boolean useFlatArray = !arguments.contains("-XX:FlatArrayElementMaxSize=0"); + System.out.println("UseFlatArray: " + useFlatArray); + Class[] valueClasses = {SmallValue.class, MediumValue.class, BigValue.class}; + for (Class c: valueClasses) { + System.out.println("Testing class " + c.getName()); + Method gtv = c.getMethod("getTestValue", null); + Object o = gtv.invoke(null, null); + assertNotNull(o); + + System.out.println("Regular reference array"); + Object[] array = (Object[])Array.newInstance(c, ARRAY_SIZE); + assertFalse(ValueClass.isFlatArray(array)); + testNullableArray(array, o); + + System.out.println("NonAtomic NullRestricted array"); + array = ValueClass.newNullRestrictedArray(c, ARRAY_SIZE); + Method ef = c.getMethod("expectingFlatNullRestrictedArray", null); + boolean expectFlat = (Boolean)ef.invoke(null, null); + assertTrue(ValueClass.isFlatArray(array) == (useFlatArray && expectFlat)); + testNullFreeArray(array, o); + + System.out.println("NullRestricted Atomic array"); + array = ValueClass.newNullRestrictedAtomicArray(c, ARRAY_SIZE); + ef = c.getMethod("expectingFlatNullRestrictedAtomicArray", null); + expectFlat = (Boolean)ef.invoke(null, null); + assertTrue(ValueClass.isFlatArray(array) == (useFlatArray && expectFlat)); + testNullFreeArray(array, o); + + System.out.println("Nullable Atomic array"); + array = ValueClass.newNullableAtomicArray(c, ARRAY_SIZE); + ef = c.getMethod("expectingFlatNullableAtomicArray", null); + expectFlat = (Boolean)ef.invoke(null, null); + assertTrue(ValueClass.isFlatArray(array) == (useFlatArray && expectFlat)); + testNullableArray(array, o); + } + } + + public static void main(String[] args) throws NoSuchMethodException, InstantiationException, + IllegalAccessException, InvocationTargetException { + testArrayAccesses(); + testArrayCopy(); + } + + } diff --git a/test/hotspot/jtreg/runtime/valhalla/inlinetypes/InlineTypeArray.java b/test/hotspot/jtreg/runtime/valhalla/inlinetypes/InlineTypeArray.java index f4b85c82afc..f86772e5404 100644 --- a/test/hotspot/jtreg/runtime/valhalla/inlinetypes/InlineTypeArray.java +++ b/test/hotspot/jtreg/runtime/valhalla/inlinetypes/InlineTypeArray.java @@ -27,7 +27,6 @@ import jdk.internal.vm.annotation.ImplicitlyConstructible; import jdk.internal.vm.annotation.LooselyConsistentValue; import jdk.internal.vm.annotation.NullRestricted; - import java.lang.reflect.Array; import java.util.Arrays; import java.util.ArrayList; @@ -43,7 +42,7 @@ * @library /test/lib * @enablePreview * @compile --source 24 InlineTypeArray.java Point.java Long8Inline.java Person.java - * @run main/othervm -XX:FlatArrayElementMaxSize=-1 runtime.valhalla.inlinetypes.InlineTypeArray + * @run main/othervm -XX:FlatArrayElementMaxSize=-1 -XX:InlineFieldMaxFlatSize=-1 runtime.valhalla.inlinetypes.InlineTypeArray * @run main/othervm -XX:FlatArrayElementMaxSize=0 runtime.valhalla.inlinetypes.InlineTypeArray * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:ForceNonTearable=* runtime.valhalla.inlinetypes.InlineTypeArray */ From 57ffb3827ab0a70bc8a3c8128ffd7afe475a8c22 Mon Sep 17 00:00:00 2001 From: Frederic Parain Date: Tue, 10 Dec 2024 15:34:29 -0500 Subject: [PATCH 2/4] CDS fixes and various cleanup --- .../share/classfile/fieldLayoutBuilder.hpp | 2 +- src/hotspot/share/oops/flatArrayKlass.cpp | 2 - .../share/oops/flatArrayOop.inline.hpp | 1 + src/hotspot/share/oops/inlineKlass.cpp | 49 ++++++++++++++++--- src/hotspot/share/oops/inlineKlass.hpp | 9 ---- src/hotspot/share/oops/inlineKlass.inline.hpp | 3 -- src/hotspot/share/oops/instanceKlass.hpp | 1 - src/hotspot/share/oops/layoutKind.hpp | 4 +- src/hotspot/share/prims/jni.cpp | 2 - .../include/classfile_constants.h.template | 2 +- 10 files changed, 46 insertions(+), 29 deletions(-) diff --git a/src/hotspot/share/classfile/fieldLayoutBuilder.hpp b/src/hotspot/share/classfile/fieldLayoutBuilder.hpp index 0351ca66162..09f067d674d 100644 --- a/src/hotspot/share/classfile/fieldLayoutBuilder.hpp +++ b/src/hotspot/share/classfile/fieldLayoutBuilder.hpp @@ -326,7 +326,7 @@ class FieldLayoutBuilder : public ResourceObj { int non_atomic_layout_alignment() const { return _non_atomic_layout_alignment; } bool has_atomic_layout() const { return _atomic_layout_size_in_bytes != -1; } int atomic_layout_size_in_bytes() const { return _atomic_layout_size_in_bytes; } - bool has_nullable_atomic_layout() const { return _nullable_layout_size_in_bytes != -1; } + bool has_nullable_atomic_layout() const { return _nullable_layout_size_in_bytes != -1; } int nullable_layout_size_in_bytes() const { return _nullable_layout_size_in_bytes; } int null_marker_offset() const { return _null_marker_offset; } bool is_empty_inline_class() const { return _is_empty_inline_class; } diff --git a/src/hotspot/share/oops/flatArrayKlass.cpp b/src/hotspot/share/oops/flatArrayKlass.cpp index 039ae51d73a..39bb2ba0491 100644 --- a/src/hotspot/share/oops/flatArrayKlass.cpp +++ b/src/hotspot/share/oops/flatArrayKlass.cpp @@ -83,7 +83,6 @@ FlatArrayKlass::FlatArrayKlass(Klass* element_klass, Symbol* name, LayoutKind lk assert(layout_helper_is_array(layout_helper()), "Must be"); assert(layout_helper_is_flatArray(layout_helper()), "Must be"); assert(layout_helper_element_type(layout_helper()) == T_FLAT_ELEMENT, "Must be"); - //assert(layout_helper_header_size(layout_helper()) == , "Must be"); assert(prototype_header().is_null_free_array(), "Must be"); assert(prototype_header().is_flat_array(), "Must be"); break; @@ -151,7 +150,6 @@ flatArrayOop FlatArrayKlass::allocate(int length, LayoutKind lk, TRAPS) { check_array_allocation_length(length, max_elements(), CHECK_NULL); int size = flatArrayOopDesc::object_size(layout_helper(), length); flatArrayOop array = (flatArrayOop) Universe::heap()->array_allocate(this, size, length, true, CHECK_NULL); - // array->set_layout_kind(lk); return array; } diff --git a/src/hotspot/share/oops/flatArrayOop.inline.hpp b/src/hotspot/share/oops/flatArrayOop.inline.hpp index cc432a1c989..337b24eba0e 100644 --- a/src/hotspot/share/oops/flatArrayOop.inline.hpp +++ b/src/hotspot/share/oops/flatArrayOop.inline.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_VM_OOPS_FLATARRAYOOP_INLINE_HPP #define SHARE_VM_OOPS_FLATARRAYOOP_INLINE_HPP +#include "classfile/vmSymbols.hpp" #include "oops/access.inline.hpp" #include "oops/flatArrayOop.hpp" #include "oops/inlineKlass.inline.hpp" diff --git a/src/hotspot/share/oops/inlineKlass.cpp b/src/hotspot/share/oops/inlineKlass.cpp index 046b28cfaf5..43a439cf8f9 100644 --- a/src/hotspot/share/oops/inlineKlass.cpp +++ b/src/hotspot/share/oops/inlineKlass.cpp @@ -72,7 +72,10 @@ void InlineKlass::init_fixed_block() { *((address*)adr_pack_handler_jobject()) = nullptr; *((address*)adr_unpack_handler()) = nullptr; assert(pack_handler() == nullptr, "pack handler not null"); - *((address*)adr_value_array_klasses()) = nullptr; + *((address*)adr_non_atomic_flat_array_klass()) = nullptr; + *((address*)adr_atomic_flat_array_klass()) = nullptr; + *((address*)adr_nullable_atomic_flat_array_klass()) = nullptr; + *((address*)adr_null_free_reference_array_klass()) = nullptr; set_default_value_offset(0); set_null_reset_value_offset(0); set_first_field_offset(-1); @@ -677,7 +680,10 @@ void InlineKlass::metaspace_pointers_do(MetaspaceClosure* it) { InstanceKlass::metaspace_pointers_do(it); InlineKlass* this_ptr = this; - it->push((Klass**)adr_value_array_klasses()); + it->push((Klass**)adr_non_atomic_flat_array_klass()); + it->push((Klass**)adr_atomic_flat_array_klass()); + it->push((Klass**)adr_nullable_atomic_flat_array_klass()); + it->push((Klass**)adr_null_free_reference_array_klass()); } void InlineKlass::remove_unshareable_info() { @@ -693,22 +699,49 @@ void InlineKlass::remove_unshareable_info() { *((address*)adr_pack_handler_jobject()) = nullptr; *((address*)adr_unpack_handler()) = nullptr; assert(pack_handler() == nullptr, "pack handler not null"); - if (value_array_klasses() != nullptr) { - value_array_klasses()->remove_unshareable_info(); + if (non_atomic_flat_array_klass() != nullptr) { + non_atomic_flat_array_klass()->remove_unshareable_info(); + } + if (atomic_flat_array_klass() != nullptr) { + atomic_flat_array_klass()->remove_unshareable_info(); + } + if (nullable_atomic_flat_array_klass() != nullptr) { + nullable_atomic_flat_array_klass()->remove_unshareable_info(); + } + if (null_free_reference_array_klass() != nullptr) { + null_free_reference_array_klass()->remove_unshareable_info(); } } void InlineKlass::remove_java_mirror() { InstanceKlass::remove_java_mirror(); - if (value_array_klasses() != nullptr) { - value_array_klasses()->remove_java_mirror(); + if (non_atomic_flat_array_klass() != nullptr) { + non_atomic_flat_array_klass()->remove_java_mirror(); + } + if (atomic_flat_array_klass() != nullptr) { + atomic_flat_array_klass()->remove_java_mirror(); + } + if (nullable_atomic_flat_array_klass() != nullptr) { + nullable_atomic_flat_array_klass()->remove_java_mirror(); + } + if (null_free_reference_array_klass() != nullptr) { + null_free_reference_array_klass()->remove_java_mirror(); } } void InlineKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, PackageEntry* pkg_entry, TRAPS) { InstanceKlass::restore_unshareable_info(loader_data, protection_domain, pkg_entry, CHECK); - if (value_array_klasses() != nullptr) { - value_array_klasses()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK); + if (non_atomic_flat_array_klass() != nullptr) { + non_atomic_flat_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK); + } + if (atomic_flat_array_klass() != nullptr) { + atomic_flat_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK); + } + if (nullable_atomic_flat_array_klass() != nullptr) { + nullable_atomic_flat_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK); + } + if (null_free_reference_array_klass() != nullptr) { + null_free_reference_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK); } } diff --git a/src/hotspot/share/oops/inlineKlass.hpp b/src/hotspot/share/oops/inlineKlass.hpp index e85e044a7af..cf8a3ac1659 100644 --- a/src/hotspot/share/oops/inlineKlass.hpp +++ b/src/hotspot/share/oops/inlineKlass.hpp @@ -85,15 +85,6 @@ class InlineKlass: public InstanceKlass { return ((address)_adr_inlineklass_fixed_block) + in_bytes(null_reset_value_offset_offset()); } - ArrayKlass* volatile* adr_value_array_klasses() const { - assert(_adr_inlineklass_fixed_block != nullptr, "Should have been initialized"); - return (ArrayKlass* volatile*) ((address)_adr_inlineklass_fixed_block) + in_bytes(byte_offset_of(InlineKlassFixedBlock, _value_array_klasses)); - } - - ArrayKlass* value_array_klasses() const { - return *adr_value_array_klasses(); - } - FlatArrayKlass* volatile* adr_non_atomic_flat_array_klass() const { assert(_adr_inlineklass_fixed_block != nullptr, "Should have been initialized"); return (FlatArrayKlass* volatile*) ((address)_adr_inlineklass_fixed_block) + in_bytes(byte_offset_of(InlineKlassFixedBlock, _non_atomic_flat_array_klass)); diff --git a/src/hotspot/share/oops/inlineKlass.inline.hpp b/src/hotspot/share/oops/inlineKlass.inline.hpp index c0201f8e7e5..c45d084ed70 100644 --- a/src/hotspot/share/oops/inlineKlass.inline.hpp +++ b/src/hotspot/share/oops/inlineKlass.inline.hpp @@ -24,11 +24,8 @@ #ifndef SHARE_VM_OOPS_INLINEKLASS_INLINE_HPP #define SHARE_VM_OOPS_INLINEKLASS_INLINE_HPP -#include "classfile/vmSymbols.hpp" #include "memory/iterator.hpp" -#include "runtime/fieldDescriptor.inline.hpp" #include "oops/flatArrayKlass.hpp" -#include "oops/flatArrayOop.hpp" #include "oops/inlineKlass.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/oop.inline.hpp" diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp index 1b6612b569d..6560756ca8d 100644 --- a/src/hotspot/share/oops/instanceKlass.hpp +++ b/src/hotspot/share/oops/instanceKlass.hpp @@ -146,7 +146,6 @@ class InlineKlassFixedBlock { address* _unpack_handler; int* _default_value_offset; int* _null_reset_value_offset; - ArrayKlass** _value_array_klasses; // To be removed? FlatArrayKlass* _non_atomic_flat_array_klass; FlatArrayKlass* _atomic_flat_array_klass; FlatArrayKlass* _nullable_atomic_flat_array_klass; diff --git a/src/hotspot/share/oops/layoutKind.hpp b/src/hotspot/share/oops/layoutKind.hpp index c5e39226102..9356fe9f213 100644 --- a/src/hotspot/share/oops/layoutKind.hpp +++ b/src/hotspot/share/oops/layoutKind.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,4 +34,4 @@ enum LayoutKind { UNKNOWN = 5 // used for uninitialized fields of type LayoutKind }; -#endif // SHARE_OOPS_LAYOUTKIND_HPP \ No newline at end of file +#endif // SHARE_OOPS_LAYOUTKIND_HPP diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp index f2f59765001..5cef7c193e2 100644 --- a/src/hotspot/share/prims/jni.cpp +++ b/src/hotspot/share/prims/jni.cpp @@ -1944,8 +1944,6 @@ JNI_ENTRY_NO_PRESERVE(void, jni_SetObjectField(JNIEnv *env, jobject obj, jfieldI InlineLayoutInfo* li = holder->inline_layout_info_adr(fd.index()); InlineKlass* vklass = li->klass(); oop v = JNIHandles::resolve_non_null(value); - // vklass->write_flat_field(o, offset, v, fd.is_null_free_inline_type(), li->kind(), CHECK); - // write_value_to_addr(oop src, void* dst, LayoutKind lk, bool dest_is_initialized, TRAPS); vklass->write_value_to_addr(v, ((char*)(oopDesc*)obj) + offset, li->kind(), true, CHECK); } HOTSPOT_JNI_SETOBJECTFIELD_RETURN(); diff --git a/src/java.base/share/native/include/classfile_constants.h.template b/src/java.base/share/native/include/classfile_constants.h.template index dd82153d11f..9f56fe2ad6b 100644 --- a/src/java.base/share/native/include/classfile_constants.h.template +++ b/src/java.base/share/native/include/classfile_constants.h.template @@ -154,7 +154,7 @@ enum { JVM_SIGNATURE_BYTE = 'B', JVM_SIGNATURE_CHAR = 'C', JVM_SIGNATURE_CLASS = 'L', - JVM_SIGNATURE_FLAT_ELEMENT = 'Q', + JVM_SIGNATURE_FLAT_ELEMENT = 'Q', JVM_SIGNATURE_ENDCLASS = ';', JVM_SIGNATURE_ENUM = 'E', JVM_SIGNATURE_FLOAT = 'F', From 5fe9f5f545363acae5ce3c840498b8c09cc3120f Mon Sep 17 00:00:00 2001 From: Frederic Parain Date: Thu, 12 Dec 2024 14:55:53 -0500 Subject: [PATCH 3/4] Small cleanups --- src/hotspot/share/oops/flatArrayKlass.cpp | 2 +- src/hotspot/share/utilities/globalDefinitions.hpp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/hotspot/share/oops/flatArrayKlass.cpp b/src/hotspot/share/oops/flatArrayKlass.cpp index 39bb2ba0491..7858b5266cb 100644 --- a/src/hotspot/share/oops/flatArrayKlass.cpp +++ b/src/hotspot/share/oops/flatArrayKlass.cpp @@ -300,7 +300,7 @@ void FlatArrayKlass::copy_array(arrayOop s, int src_pos, address src = (address) hs->value_at_addr(src_pos, fsk->layout_helper()); for (int i = 0; i < length; i++) { if (need_null_check) { - if ( *(jboolean*)(src + vk->null_marker_offset_in_payload()) == 0) { + if (vk->is_payload_marked_as_null(src)) { THROW(vmSymbols::java_lang_NullPointerException()); } } diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp index aad2080bb23..a42771d336e 100644 --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -1366,6 +1366,7 @@ template bool primitive_equals(const K& k0, const K& k1) { // which is actually not 100% correct, but works for the current set of C1/C2 // implementation and test cases. #define UseFlatArray (EnableValhalla && (FlatArrayElementMaxSize != 0)) + template int primitive_compare(const K& k0, const K& k1) { return ((k0 < k1) ? -1 : (k0 == k1) ? 0 : 1); } From 048b88c45e17c0b36c8a6334b97659bb1bd28a8d Mon Sep 17 00:00:00 2001 From: Frederic Parain Date: Fri, 13 Dec 2024 12:41:38 -0500 Subject: [PATCH 4/4] Fix bad register usage causing mdp corruption --- src/hotspot/cpu/x86/templateTable_x86.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp index c34d301d260..6c52ba6ed35 100644 --- a/src/hotspot/cpu/x86/templateTable_x86.cpp +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp @@ -848,8 +848,8 @@ void TemplateTable::aaload() { IS_ARRAY); __ jmp(done); __ bind(is_flat_array); - __ movptr(rbx, array); - call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_load), rbx, index); + __ movptr(rcx, array); + call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_load), rcx, index); __ bind(done); } else { do_oop_load(_masm,