diff --git a/.github/actions/build-jtreg/action.yml b/.github/actions/build-jtreg/action.yml
new file mode 100644
index 00000000000..3e5ced8a7f8
--- /dev/null
+++ b/.github/actions/build-jtreg/action.yml
@@ -0,0 +1,84 @@
+#
+# Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+name: 'Build JTReg'
+description: 'Build JTReg'
+
+runs:
+ using: composite
+ steps:
+ - name: 'Get JTReg version configuration'
+ id: version
+ uses: ./.github/actions/config
+ with:
+ var: JTREG_VERSION
+
+ - name: 'Check cache for already built JTReg'
+ id: get-cached
+ uses: actions/cache@v4
+ with:
+ path: jtreg/installed
+ key: jtreg-${{ steps.version.outputs.value }}
+
+ - name: 'Checkout the JTReg source'
+ uses: actions/checkout@v4
+ with:
+ repository: openjdk/jtreg
+ ref: jtreg-${{ steps.version.outputs.value }}
+ path: jtreg/src
+ if: (steps.get-cached.outputs.cache-hit != 'true')
+
+ - name: 'Build JTReg'
+ run: |
+ # Try building JTReg several times, backing off exponentially on failure.
+ # ~500 seconds in total should be enough to capture most of the transient
+ # failures.
+ for I in `seq 0 8`; do
+ rm -rf build/images/jtreg
+ bash make/build.sh --jdk "$JAVA_HOME_17_X64" && break
+ S=$(( 2 ** $I ))
+ echo "Failure. Waiting $S seconds before retrying"
+ sleep $S
+ done
+
+ # Check if build was successful
+ if [ ! -d build/images/jtreg ]; then
+ echo "Build failed"
+ exit 1;
+ fi
+
+ # Move files to the proper locations
+ mkdir ../installed
+ mv build/images/jtreg/* ../installed
+ working-directory: jtreg/src
+ shell: bash
+ if: (steps.get-cached.outputs.cache-hit != 'true')
+
+ - name: 'Upload JTReg artifact'
+ uses: actions/upload-artifact@v4
+ with:
+ name: bundles-jtreg-${{ steps.version.outputs.value }}
+ path: jtreg/installed
+ retention-days: 1
diff --git a/.github/actions/get-jtreg/action.yml b/.github/actions/get-jtreg/action.yml
index faedcc18807..78a3a4c9edd 100644
--- a/.github/actions/get-jtreg/action.yml
+++ b/.github/actions/get-jtreg/action.yml
@@ -24,7 +24,7 @@
#
name: 'Get JTReg'
-description: 'Download JTReg from cache or source location'
+description: 'Get JTReg'
outputs:
path:
description: 'Path to the installed JTReg'
@@ -39,36 +39,12 @@ runs:
with:
var: JTREG_VERSION
- - name: 'Check cache for JTReg'
- id: get-cached-jtreg
- uses: actions/cache@v4
+ - name: 'Download JTReg artifact'
+ id: download-jtreg
+ uses: actions/download-artifact@v4
with:
+ name: bundles-jtreg-${{ steps.version.outputs.value }}
path: jtreg/installed
- key: jtreg-${{ steps.version.outputs.value }}
-
- - name: 'Checkout the JTReg source'
- uses: actions/checkout@v4
- with:
- repository: openjdk/jtreg
- ref: jtreg-${{ steps.version.outputs.value }}
- path: jtreg/src
- if: steps.get-cached-jtreg.outputs.cache-hit != 'true'
-
- - name: 'Build JTReg'
- run: |
- # If runner architecture is x64 set JAVA_HOME_17_X64 otherwise set to JAVA_HOME_17_arm64
- if [[ '${{ runner.arch }}' == 'X64' ]]; then
- JDK="$JAVA_HOME_17_X64"
- else
- JDK="$JAVA_HOME_17_arm64"
- fi
- # Build JTReg and move files to the proper locations
- bash make/build.sh --jdk "$JDK"
- mkdir ../installed
- mv build/images/jtreg/* ../installed
- working-directory: jtreg/src
- shell: bash
- if: steps.get-cached-jtreg.outputs.cache-hit != 'true'
- name: 'Export path to where JTReg is installed'
id: path-name
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index d5958853701..00f64d2aedf 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -54,8 +54,8 @@ jobs:
### Determine platforms to include
###
- select:
- name: 'Select platforms'
+ prepare:
+ name: 'Prepare the run'
runs-on: ubuntu-22.04
env:
# List of platforms to exclude by default
@@ -73,7 +73,19 @@ jobs:
docs: ${{ steps.include.outputs.docs }}
steps:
- # This function must be inlined in main.yml, or we'd be forced to checkout the repo
+ - name: 'Checkout the scripts'
+ uses: actions/checkout@v4
+ with:
+ sparse-checkout: |
+ .github
+ make/conf/github-actions.conf
+
+ - name: 'Build JTReg'
+ id: jtreg
+ uses: ./.github/actions/build-jtreg
+
+ # TODO: Now that we are checking out the repo scripts, we can put the following code
+ # into a separate file
- name: 'Check what jobs to run'
id: include
run: |
@@ -149,18 +161,18 @@ jobs:
build-linux-x64:
name: linux-x64
- needs: select
+ needs: prepare
uses: ./.github/workflows/build-linux.yml
with:
platform: linux-x64
gcc-major-version: '10'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.linux-x64 == 'true'
+ if: needs.prepare.outputs.linux-x64 == 'true'
build-linux-x86-hs:
name: linux-x86-hs
- needs: select
+ needs: prepare
uses: ./.github/workflows/build-linux.yml
with:
platform: linux-x86
@@ -174,11 +186,11 @@ jobs:
extra-conf-options: '--with-target-bits=32 --enable-fallback-linker --enable-libffi-bundling'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.linux-x86-hs == 'true'
+ if: needs.prepare.outputs.linux-x86-hs == 'true'
build-linux-x64-hs-nopch:
name: linux-x64-hs-nopch
- needs: select
+ needs: prepare
uses: ./.github/workflows/build-linux.yml
with:
platform: linux-x64
@@ -188,11 +200,11 @@ jobs:
extra-conf-options: '--disable-precompiled-headers'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.linux-x64-variants == 'true'
+ if: needs.prepare.outputs.linux-x64-variants == 'true'
build-linux-x64-hs-zero:
name: linux-x64-hs-zero
- needs: select
+ needs: prepare
uses: ./.github/workflows/build-linux.yml
with:
platform: linux-x64
@@ -202,11 +214,11 @@ jobs:
extra-conf-options: '--with-jvm-variants=zero --disable-precompiled-headers'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.linux-x64-variants == 'true'
+ if: needs.prepare.outputs.linux-x64-variants == 'true'
build-linux-x64-hs-minimal:
name: linux-x64-hs-minimal
- needs: select
+ needs: prepare
uses: ./.github/workflows/build-linux.yml
with:
platform: linux-x64
@@ -216,11 +228,11 @@ jobs:
extra-conf-options: '--with-jvm-variants=minimal --disable-precompiled-headers'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.linux-x64-variants == 'true'
+ if: needs.prepare.outputs.linux-x64-variants == 'true'
build-linux-x64-hs-optimized:
name: linux-x64-hs-optimized
- needs: select
+ needs: prepare
uses: ./.github/workflows/build-linux.yml
with:
platform: linux-x64
@@ -231,32 +243,31 @@ jobs:
extra-conf-options: '--with-debug-level=optimized --disable-precompiled-headers'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.linux-x64-variants == 'true'
+ if: needs.prepare.outputs.linux-x64-variants == 'true'
build-linux-cross-compile:
name: linux-cross-compile
- needs:
- - select
+ needs: prepare
uses: ./.github/workflows/build-cross-compile.yml
with:
gcc-major-version: '10'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.linux-cross-compile == 'true'
+ if: needs.prepare.outputs.linux-cross-compile == 'true'
build-alpine-linux-x64:
name: alpine-linux-x64
- needs: select
+ needs: prepare
uses: ./.github/workflows/build-alpine-linux.yml
with:
platform: alpine-linux-x64
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.alpine-linux-x64 == 'true'
+ if: needs.prepare.outputs.alpine-linux-x64 == 'true'
build-macos-x64:
name: macos-x64
- needs: select
+ needs: prepare
uses: ./.github/workflows/build-macos.yml
with:
platform: macos-x64
@@ -264,11 +275,11 @@ jobs:
xcode-toolset-version: '14.3.1'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.macos-x64 == 'true'
+ if: needs.prepare.outputs.macos-x64 == 'true'
build-macos-aarch64:
name: macos-aarch64
- needs: select
+ needs: prepare
uses: ./.github/workflows/build-macos.yml
with:
platform: macos-aarch64
@@ -276,11 +287,11 @@ jobs:
xcode-toolset-version: '14.3.1'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.macos-aarch64 == 'true'
+ if: needs.prepare.outputs.macos-aarch64 == 'true'
build-windows-x64:
name: windows-x64
- needs: select
+ needs: prepare
uses: ./.github/workflows/build-windows.yml
with:
platform: windows-x64
@@ -288,11 +299,11 @@ jobs:
msvc-toolset-architecture: 'x86.x64'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.windows-x64 == 'true'
+ if: needs.prepare.outputs.windows-x64 == 'true'
build-windows-aarch64:
name: windows-aarch64
- needs: select
+ needs: prepare
uses: ./.github/workflows/build-windows.yml
with:
platform: windows-aarch64
@@ -302,11 +313,11 @@ jobs:
extra-conf-options: '--openjdk-target=aarch64-unknown-cygwin'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.windows-aarch64 == 'true'
+ if: needs.prepare.outputs.windows-aarch64 == 'true'
build-docs:
name: docs
- needs: select
+ needs: prepare
uses: ./.github/workflows/build-linux.yml
with:
platform: linux-x64
@@ -318,7 +329,7 @@ jobs:
gcc-major-version: '10'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
- if: needs.select.outputs.docs == 'true'
+ if: needs.prepare.outputs.docs == 'true'
###
### Test jobs
@@ -363,48 +374,3 @@ jobs:
platform: windows-x64
bootjdk-platform: windows-x64
runs-on: windows-2019
-
- # Remove bundles so they are not misconstrued as binary distributions from the JDK project
- remove-bundles:
- name: 'Remove bundle artifacts'
- runs-on: ubuntu-22.04
- if: always()
- needs:
- - build-linux-x64
- - build-linux-x86-hs
- - build-linux-x64-hs-nopch
- - build-linux-x64-hs-zero
- - build-linux-x64-hs-minimal
- - build-linux-x64-hs-optimized
- - build-linux-cross-compile
- - build-alpine-linux-x64
- - build-macos-x64
- - build-macos-aarch64
- - build-windows-x64
- - build-windows-aarch64
- - test-linux-x64
- - test-macos-x64
- - test-macos-aarch64
- - test-windows-x64
-
- steps:
- - name: 'Remove bundle artifacts'
- run: |
- # Find and remove all bundle artifacts
- # See: https://docs.github.com/en/rest/actions/artifacts?apiVersion=2022-11-28
- ALL_ARTIFACT_IDS="$(curl -sL \
- -H 'Accept: application/vnd.github+json' \
- -H 'Authorization: Bearer ${{ github.token }}' \
- -H 'X-GitHub-Api-Version: 2022-11-28' \
- '${{ github.api_url }}/repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/artifacts?per_page=100')"
- BUNDLE_ARTIFACT_IDS="$(echo "$ALL_ARTIFACT_IDS" | jq -r -c '.artifacts | map(select(.name|startswith("bundles-"))) | .[].id')"
- for id in $BUNDLE_ARTIFACT_IDS; do
- echo "Removing $id"
- curl -sL \
- -X DELETE \
- -H 'Accept: application/vnd.github+json' \
- -H 'Authorization: Bearer ${{ github.token }}' \
- -H 'X-GitHub-Api-Version: 2022-11-28' \
- "${{ github.api_url }}/repos/${{ github.repository }}/actions/artifacts/$id" \
- || echo "Failed to remove bundle"
- done
diff --git a/doc/building.html b/doc/building.html
index c91d876246c..63af224584a 100644
--- a/doc/building.html
+++ b/doc/building.html
@@ -2016,10 +2016,18 @@
Spaces in Path
have short
paths. You can run fsutil file setshortname
in
-cmd
on certain directories, such as
-Microsoft Visual Studio
or Windows Kits
, to
-assign arbitrary short paths so configure
can access
-them.
+cmd
on directories to assign arbitrary short paths so
+configure
can access them. If the result says "Access
+denied", it may be that there are processes running in that directory;
+in this case, you can reboot Windows in safe mode and run the command on
+those directories again.
+The only directories required to have short paths are
+Microsoft Visual Studio
and Windows Kits
; the
+rest of the "contains space" warnings from configure
, such
+as IntelliJ IDEA
, can be ignored. You can choose any short
+name; once it is set, configure
's tools like
+cygpath
can convert the directory with spaces to your
+chosen short name and pass it to the build system.
Getting Help
If none of the suggestions in this document helps you, or if you find
what you believe is a bug in the build system, please contact the Build
diff --git a/doc/building.md b/doc/building.md
index 47ad9e7c72b..466e8d7edf8 100644
--- a/doc/building.md
+++ b/doc/building.md
@@ -1800,9 +1800,17 @@ temporarily.
On Windows, when configuring, `fixpath.sh` may report that some directory names
have spaces. Usually, it assumes those directories have [short
paths](https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/fsutil-8dot3name).
-You can run `fsutil file setshortname` in `cmd` on certain directories, such as
-`Microsoft Visual Studio` or `Windows Kits`, to assign arbitrary short paths so
-`configure` can access them.
+You can run `fsutil file setshortname` in `cmd` on directories to assign
+arbitrary short paths so `configure` can access them. If the result says "Access
+denied", it may be that there are processes running in that directory; in this
+case, you can reboot Windows in safe mode and run the command on those directories
+again.
+
+The only directories required to have short paths are `Microsoft Visual Studio`
+and `Windows Kits`; the rest of the "contains space" warnings from `configure`,
+such as `IntelliJ IDEA`, can be ignored. You can choose any short name; once it
+is set, `configure`'s tools like `cygpath` can convert the directory with spaces
+to your chosen short name and pass it to the build system.
### Getting Help
diff --git a/doc/testing.html b/doc/testing.html
index b74661b3924..6285fab1682 100644
--- a/doc/testing.html
+++ b/doc/testing.html
@@ -72,6 +72,9 @@
Testing the JDK
Non-US
locale
PKCS11 Tests
+Testing with
+alternative security providers
Client UI
Tests
@@ -586,6 +589,18 @@ PKCS11 Tests
JTREG="JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs"
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
+Testing with
+alternative security providers
+Some security tests use a hardcoded provider for
+KeyFactory
, Cipher
,
+KeyPairGenerator
, KeyGenerator
,
+AlgorithmParameterGenerator
, KeyAgreement
,
+Mac
, MessageDigest
, SecureRandom
,
+Signature
, AlgorithmParameters
,
+Configuration
, Policy
, or
+SecretKeyFactory
objects. Specify the
+-Dtest.provider.name=NAME
property to use a different
+provider for the service(s).
Client UI Tests
System key shortcuts
Some Client UI tests use key sequences which may be reserved by the
diff --git a/doc/testing.md b/doc/testing.md
index cdc9bbd2182..351690c5e60 100644
--- a/doc/testing.md
+++ b/doc/testing.md
@@ -603,6 +603,15 @@ $ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
+### Testing with alternative security providers
+
+Some security tests use a hardcoded provider for `KeyFactory`, `Cipher`,
+`KeyPairGenerator`, `KeyGenerator`, `AlgorithmParameterGenerator`,
+`KeyAgreement`, `Mac`, `MessageDigest`, `SecureRandom`, `Signature`,
+`AlgorithmParameters`, `Configuration`, `Policy`, or `SecretKeyFactory` objects.
+Specify the `-Dtest.provider.name=NAME` property to use a different provider for
+the service(s).
+
### Client UI Tests
#### System key shortcuts
diff --git a/make/RunTests.gmk b/make/RunTests.gmk
index 45494b859b7..bfd55394b2f 100644
--- a/make/RunTests.gmk
+++ b/make/RunTests.gmk
@@ -853,11 +853,7 @@ define SetupRunJtregTestBody
endif
ifneq ($$(findstring -XX:+UseZGC, $$(JTREG_ALL_OPTIONS)), )
- ifneq ($$(findstring -XX:-ZGenerational, $$(JTREG_ALL_OPTIONS)), )
- JTREG_AUTO_PROBLEM_LISTS += ProblemList-zgc.txt
- else
- JTREG_AUTO_PROBLEM_LISTS += ProblemList-generational-zgc.txt
- endif
+ JTREG_AUTO_PROBLEM_LISTS += ProblemList-zgc.txt
endif
ifneq ($$(JTREG_EXTRA_PROBLEM_LISTS), )
diff --git a/make/autoconf/lib-hsdis.m4 b/make/autoconf/lib-hsdis.m4
index bd78768d03e..a4d2c5f81f3 100644
--- a/make/autoconf/lib-hsdis.m4
+++ b/make/autoconf/lib-hsdis.m4
@@ -266,8 +266,10 @@ AC_DEFUN([LIB_SETUP_HSDIS_BINUTILS],
HSDIS_CFLAGS="-DLIBARCH_$OPENJDK_TARGET_CPU_LEGACY_LIB"
elif test "x$BINUTILS_INSTALL_DIR" != x; then
disasm_header="\"$BINUTILS_INSTALL_DIR/include/dis-asm.h\""
- if test -e $BINUTILS_INSTALL_DIR/lib/libbfd.a && \
- test -e $BINUTILS_INSTALL_DIR/lib/libopcodes.a && \
+ if (test -e $BINUTILS_INSTALL_DIR/lib/libbfd.a || \
+ test -e $BINUTILS_INSTALL_DIR/lib64/libbfd.a) && \
+ (test -e $BINUTILS_INSTALL_DIR/lib/libopcodes.a || \
+ test -e $BINUTILS_INSTALL_DIR/lib64/libopcodes.a) && \
(test -e $BINUTILS_INSTALL_DIR/lib/libiberty.a || \
test -e $BINUTILS_INSTALL_DIR/lib64/libiberty.a || \
test -e $BINUTILS_INSTALL_DIR/lib32/libiberty.a); then
@@ -275,7 +277,19 @@ AC_DEFUN([LIB_SETUP_HSDIS_BINUTILS],
# libiberty ignores --libdir and may be installed in $BINUTILS_INSTALL_DIR/lib, $BINUTILS_INSTALL_DIR/lib32
# or $BINUTILS_INSTALL_DIR/lib64, depending on system setup
+ LIBOPCODES_LIB=""
+ LIBBFD_LIB=""
LIBIBERTY_LIB=""
+ if test -e $BINUTILS_INSTALL_DIR/lib/libbfd.a; then
+ LIBBFD_LIB="$BINUTILS_INSTALL_DIR/lib/libbfd.a"
+ else
+ LIBBFD_LIB="$BINUTILS_INSTALL_DIR/lib64/libbfd.a"
+ fi
+ if test -e $BINUTILS_INSTALL_DIR/lib/libopcodes.a; then
+ LIBOPCODES_LIB="$BINUTILS_INSTALL_DIR/lib/libopcodes.a"
+ else
+ LIBOPCODES_LIB="$BINUTILS_INSTALL_DIR/lib64/libopcodes.a"
+ fi
if test -e $BINUTILS_INSTALL_DIR/lib/libiberty.a; then
LIBIBERTY_LIB="$BINUTILS_INSTALL_DIR/lib/libiberty.a"
elif test -e $BINUTILS_INSTALL_DIR/lib32/libiberty.a; then
@@ -283,7 +297,7 @@ AC_DEFUN([LIB_SETUP_HSDIS_BINUTILS],
else
LIBIBERTY_LIB="$BINUTILS_INSTALL_DIR/lib64/libiberty.a"
fi
- HSDIS_LIBS="$BINUTILS_INSTALL_DIR/lib/libbfd.a $BINUTILS_INSTALL_DIR/lib/libopcodes.a $LIBIBERTY_LIB"
+ HSDIS_LIBS="$LIBBFD_LIB $LIBOPCODES_LIB $LIBIBERTY_LIB"
# If we have libsframe add it.
if test -e $BINUTILS_INSTALL_DIR/lib/libsframe.a; then
HSDIS_LIBS="$HSDIS_LIBS $BINUTILS_INSTALL_DIR/lib/libsframe.a"
diff --git a/make/hotspot/gensrc/GensrcAdlc.gmk b/make/hotspot/gensrc/GensrcAdlc.gmk
index ddb2c3e33e5..ce3f2684026 100644
--- a/make/hotspot/gensrc/GensrcAdlc.gmk
+++ b/make/hotspot/gensrc/GensrcAdlc.gmk
@@ -193,8 +193,6 @@ ifeq ($(call check-jvm-feature, compiler2), true)
ifeq ($(call check-jvm-feature, zgc), true)
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
- $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/x/x_$(HOTSPOT_TARGET_CPU).ad \
- $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/x/x_$(HOTSPOT_TARGET_CPU_ARCH).ad \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU).ad \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU_ARCH).ad \
)))
diff --git a/make/hotspot/lib/JvmFeatures.gmk b/make/hotspot/lib/JvmFeatures.gmk
index c4c030810fc..b94031515f7 100644
--- a/make/hotspot/lib/JvmFeatures.gmk
+++ b/make/hotspot/lib/JvmFeatures.gmk
@@ -150,7 +150,6 @@ endif
ifneq ($(call check-jvm-feature, zgc), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_ZGC=0
JVM_EXCLUDE_PATTERNS += gc/z
- JVM_EXCLUDE_PATTERNS += gc/x
endif
ifneq ($(call check-jvm-feature, shenandoahgc), true)
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index ebd83027151..3d1be91e9b2 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -990,10 +990,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
__ decode_heap_oop(dest->as_register());
}
- if (!(UseZGC && !ZGenerational)) {
- // Load barrier has not yet been applied, so ZGC can't verify the oop here
- __ verify_oop(dest->as_register());
- }
+ __ verify_oop(dest->as_register());
}
}
diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
index 62831ee72ba..b29be7213ba 100644
--- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
@@ -217,7 +217,7 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe
// StoreLoad achieves this.
membar(StoreLoad);
- // Check if the entry lists are empty.
+ // Check if the entry lists are empty (EntryList first - by convention).
ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset()));
ldr(tmpReg, Address(tmp, ObjectMonitor::cxq_offset()));
orr(rscratch1, rscratch1, tmpReg);
@@ -538,7 +538,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Regi
// StoreLoad achieves this.
membar(StoreLoad);
- // Check if the entry lists are empty.
+ // Check if the entry lists are empty (EntryList first - by convention).
ldr(rscratch1, Address(t1_monitor, ObjectMonitor::EntryList_offset()));
ldr(t3_t, Address(t1_monitor, ObjectMonitor::cxq_offset()));
orr(rscratch1, rscratch1, t3_t);
diff --git a/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.cpp
deleted file mode 100644
index 5c891e8c170..00000000000
--- a/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.cpp
+++ /dev/null
@@ -1,462 +0,0 @@
-/*
- * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "code/codeBlob.hpp"
-#include "code/vmreg.inline.hpp"
-#include "gc/x/xBarrier.inline.hpp"
-#include "gc/x/xBarrierSet.hpp"
-#include "gc/x/xBarrierSetAssembler.hpp"
-#include "gc/x/xBarrierSetRuntime.hpp"
-#include "gc/x/xThreadLocalData.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "utilities/macros.hpp"
-#ifdef COMPILER1
-#include "c1/c1_LIRAssembler.hpp"
-#include "c1/c1_MacroAssembler.hpp"
-#include "gc/x/c1/xBarrierSetC1.hpp"
-#endif // COMPILER1
-#ifdef COMPILER2
-#include "gc/x/c2/xBarrierSetC2.hpp"
-#endif // COMPILER2
-
-#ifdef PRODUCT
-#define BLOCK_COMMENT(str) /* nothing */
-#else
-#define BLOCK_COMMENT(str) __ block_comment(str)
-#endif
-
-#undef __
-#define __ masm->
-
-void XBarrierSetAssembler::load_at(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Register dst,
- Address src,
- Register tmp1,
- Register tmp2) {
- if (!XBarrierSet::barrier_needed(decorators, type)) {
- // Barrier not needed
- BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
- return;
- }
-
- assert_different_registers(rscratch1, rscratch2, src.base());
- assert_different_registers(rscratch1, rscratch2, dst);
-
- Label done;
-
- // Load bad mask into scratch register.
- __ ldr(rscratch1, address_bad_mask_from_thread(rthread));
- __ lea(rscratch2, src);
- __ ldr(dst, src);
-
- // Test reference against bad mask. If mask bad, then we need to fix it up.
- __ tst(dst, rscratch1);
- __ br(Assembler::EQ, done);
-
- __ enter(/*strip_ret_addr*/true);
-
- __ push_call_clobbered_registers_except(RegSet::of(dst));
-
- if (c_rarg0 != dst) {
- __ mov(c_rarg0, dst);
- }
- __ mov(c_rarg1, rscratch2);
-
- __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
-
- // Make sure dst has the return value.
- if (dst != r0) {
- __ mov(dst, r0);
- }
-
- __ pop_call_clobbered_registers_except(RegSet::of(dst));
- __ leave();
-
- __ bind(done);
-}
-
-#ifdef ASSERT
-
-void XBarrierSetAssembler::store_at(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Address dst,
- Register val,
- Register tmp1,
- Register tmp2,
- Register tmp3) {
- // Verify value
- if (is_reference_type(type)) {
- // Note that src could be noreg, which means we
- // are storing null and can skip verification.
- if (val != noreg) {
- Label done;
-
- // tmp1, tmp2 and tmp3 are often set to noreg.
- RegSet savedRegs = RegSet::of(rscratch1);
- __ push(savedRegs, sp);
-
- __ ldr(rscratch1, address_bad_mask_from_thread(rthread));
- __ tst(val, rscratch1);
- __ br(Assembler::EQ, done);
- __ stop("Verify oop store failed");
- __ should_not_reach_here();
- __ bind(done);
- __ pop(savedRegs, sp);
- }
- }
-
- // Store value
- BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg);
-}
-
-#endif // ASSERT
-
-void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
- DecoratorSet decorators,
- bool is_oop,
- Register src,
- Register dst,
- Register count,
- RegSet saved_regs) {
- if (!is_oop) {
- // Barrier not needed
- return;
- }
-
- BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {");
-
- assert_different_registers(src, count, rscratch1);
-
- __ push(saved_regs, sp);
-
- if (count == c_rarg0) {
- if (src == c_rarg1) {
- // exactly backwards!!
- __ mov(rscratch1, c_rarg0);
- __ mov(c_rarg0, c_rarg1);
- __ mov(c_rarg1, rscratch1);
- } else {
- __ mov(c_rarg1, count);
- __ mov(c_rarg0, src);
- }
- } else {
- __ mov(c_rarg0, src);
- __ mov(c_rarg1, count);
- }
-
- __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2);
-
- __ pop(saved_regs, sp);
-
- BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue");
-}
-
-void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
- Register jni_env,
- Register robj,
- Register tmp,
- Label& slowpath) {
- BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {");
-
- assert_different_registers(jni_env, robj, tmp);
-
- // Resolve jobject
- BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath);
-
- // The Address offset is too large to direct load - -784. Our range is +127, -128.
- __ mov(tmp, (int64_t)(in_bytes(XThreadLocalData::address_bad_mask_offset()) -
- in_bytes(JavaThread::jni_environment_offset())));
-
- // Load address bad mask
- __ add(tmp, jni_env, tmp);
- __ ldr(tmp, Address(tmp));
-
- // Check address bad mask
- __ tst(robj, tmp);
- __ br(Assembler::NE, slowpath);
-
- BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native");
-}
-
-#ifdef COMPILER1
-
-#undef __
-#define __ ce->masm()->
-
-void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
- LIR_Opr ref) const {
- assert_different_registers(rscratch1, rthread, ref->as_register());
-
- __ ldr(rscratch1, address_bad_mask_from_thread(rthread));
- __ tst(ref->as_register(), rscratch1);
-}
-
-void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
- XLoadBarrierStubC1* stub) const {
- // Stub entry
- __ bind(*stub->entry());
-
- Register ref = stub->ref()->as_register();
- Register ref_addr = noreg;
- Register tmp = noreg;
-
- if (stub->tmp()->is_valid()) {
- // Load address into tmp register
- ce->leal(stub->ref_addr(), stub->tmp());
- ref_addr = tmp = stub->tmp()->as_pointer_register();
- } else {
- // Address already in register
- ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
- }
-
- assert_different_registers(ref, ref_addr, noreg);
-
- // Save r0 unless it is the result or tmp register
- // Set up SP to accommodate parameters and maybe r0..
- if (ref != r0 && tmp != r0) {
- __ sub(sp, sp, 32);
- __ str(r0, Address(sp, 16));
- } else {
- __ sub(sp, sp, 16);
- }
-
- // Setup arguments and call runtime stub
- ce->store_parameter(ref_addr, 1);
- ce->store_parameter(ref, 0);
-
- __ far_call(stub->runtime_stub());
-
- // Verify result
- __ verify_oop(r0);
-
- // Move result into place
- if (ref != r0) {
- __ mov(ref, r0);
- }
-
- // Restore r0 unless it is the result or tmp register
- if (ref != r0 && tmp != r0) {
- __ ldr(r0, Address(sp, 16));
- __ add(sp, sp, 32);
- } else {
- __ add(sp, sp, 16);
- }
-
- // Stub exit
- __ b(*stub->continuation());
-}
-
-#undef __
-#define __ sasm->
-
-void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
- DecoratorSet decorators) const {
- __ prologue("zgc_load_barrier stub", false);
-
- __ push_call_clobbered_registers_except(RegSet::of(r0));
-
- // Setup arguments
- __ load_parameter(0, c_rarg0);
- __ load_parameter(1, c_rarg1);
-
- __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
-
- __ pop_call_clobbered_registers_except(RegSet::of(r0));
-
- __ epilogue();
-}
-#endif // COMPILER1
-
-#ifdef COMPILER2
-
-OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
- if (!OptoReg::is_reg(opto_reg)) {
- return OptoReg::Bad;
- }
-
- const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
- if (vm_reg->is_FloatRegister()) {
- return opto_reg & ~1;
- }
-
- return opto_reg;
-}
-
-#undef __
-#define __ _masm->
-
-class XSaveLiveRegisters {
-private:
- MacroAssembler* const _masm;
- RegSet _gp_regs;
- FloatRegSet _fp_regs;
- PRegSet _p_regs;
-
-public:
- void initialize(XLoadBarrierStubC2* stub) {
- // Record registers that needs to be saved/restored
- RegMaskIterator rmi(stub->live());
- while (rmi.has_next()) {
- const OptoReg::Name opto_reg = rmi.next();
- if (OptoReg::is_reg(opto_reg)) {
- const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
- if (vm_reg->is_Register()) {
- _gp_regs += RegSet::of(vm_reg->as_Register());
- } else if (vm_reg->is_FloatRegister()) {
- _fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
- } else if (vm_reg->is_PRegister()) {
- _p_regs += PRegSet::of(vm_reg->as_PRegister());
- } else {
- fatal("Unknown register type");
- }
- }
- }
-
- // Remove C-ABI SOE registers, scratch regs and _ref register that will be updated
- _gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->ref());
- }
-
- XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
- _masm(masm),
- _gp_regs(),
- _fp_regs(),
- _p_regs() {
-
- // Figure out what registers to save/restore
- initialize(stub);
-
- // Save registers
- __ push(_gp_regs, sp);
- __ push_fp(_fp_regs, sp);
- __ push_p(_p_regs, sp);
- }
-
- ~XSaveLiveRegisters() {
- // Restore registers
- __ pop_p(_p_regs, sp);
- __ pop_fp(_fp_regs, sp);
-
- // External runtime call may clobber ptrue reg
- __ reinitialize_ptrue();
-
- __ pop(_gp_regs, sp);
- }
-};
-
-#undef __
-#define __ _masm->
-
-class XSetupArguments {
-private:
- MacroAssembler* const _masm;
- const Register _ref;
- const Address _ref_addr;
-
-public:
- XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
- _masm(masm),
- _ref(stub->ref()),
- _ref_addr(stub->ref_addr()) {
-
- // Setup arguments
- if (_ref_addr.base() == noreg) {
- // No self healing
- if (_ref != c_rarg0) {
- __ mov(c_rarg0, _ref);
- }
- __ mov(c_rarg1, 0);
- } else {
- // Self healing
- if (_ref == c_rarg0) {
- // _ref is already at correct place
- __ lea(c_rarg1, _ref_addr);
- } else if (_ref != c_rarg1) {
- // _ref is in wrong place, but not in c_rarg1, so fix it first
- __ lea(c_rarg1, _ref_addr);
- __ mov(c_rarg0, _ref);
- } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
- assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0");
- __ mov(c_rarg0, _ref);
- __ lea(c_rarg1, _ref_addr);
- } else {
- assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0");
- if (_ref_addr.base() == c_rarg0 || _ref_addr.index() == c_rarg0) {
- __ mov(rscratch2, c_rarg1);
- __ lea(c_rarg1, _ref_addr);
- __ mov(c_rarg0, rscratch2);
- } else {
- ShouldNotReachHere();
- }
- }
- }
- }
-
- ~XSetupArguments() {
- // Transfer result
- if (_ref != r0) {
- __ mov(_ref, r0);
- }
- }
-};
-
-#undef __
-#define __ masm->
-
-void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const {
- BLOCK_COMMENT("XLoadBarrierStubC2");
-
- // Stub entry
- __ bind(*stub->entry());
-
- {
- XSaveLiveRegisters save_live_registers(masm, stub);
- XSetupArguments setup_arguments(masm, stub);
- __ mov(rscratch1, stub->slow_path());
- __ blr(rscratch1);
- }
- // Stub exit
- __ b(*stub->continuation());
-}
-
-#endif // COMPILER2
-
-#undef __
-#define __ masm->
-
-void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
- // Check if mask is good.
- // verifies that XAddressBadMask & r0 == 0
- __ ldr(tmp2, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
- __ andr(tmp1, obj, tmp2);
- __ cbnz(tmp1, error);
-
- BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error);
-}
-
-#undef __
diff --git a/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.hpp
deleted file mode 100644
index 8c1e9521757..00000000000
--- a/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.hpp
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP
-#define CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP
-
-#include "code/vmreg.hpp"
-#include "oops/accessDecorators.hpp"
-#ifdef COMPILER2
-#include "opto/optoreg.hpp"
-#endif // COMPILER2
-
-#ifdef COMPILER1
-class LIR_Assembler;
-class LIR_Opr;
-class StubAssembler;
-#endif // COMPILER1
-
-#ifdef COMPILER2
-class Node;
-#endif // COMPILER2
-
-#ifdef COMPILER1
-class XLoadBarrierStubC1;
-#endif // COMPILER1
-
-#ifdef COMPILER2
-class XLoadBarrierStubC2;
-#endif // COMPILER2
-
-class XBarrierSetAssembler : public XBarrierSetAssemblerBase {
-public:
- virtual void load_at(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Register dst,
- Address src,
- Register tmp1,
- Register tmp2);
-
-#ifdef ASSERT
- virtual void store_at(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Address dst,
- Register val,
- Register tmp1,
- Register tmp2,
- Register tmp3);
-#endif // ASSERT
-
- virtual void arraycopy_prologue(MacroAssembler* masm,
- DecoratorSet decorators,
- bool is_oop,
- Register src,
- Register dst,
- Register count,
- RegSet saved_regs);
-
- virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
- Register jni_env,
- Register robj,
- Register tmp,
- Label& slowpath);
-
- virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; }
-
-#ifdef COMPILER1
- void generate_c1_load_barrier_test(LIR_Assembler* ce,
- LIR_Opr ref) const;
-
- void generate_c1_load_barrier_stub(LIR_Assembler* ce,
- XLoadBarrierStubC1* stub) const;
-
- void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
- DecoratorSet decorators) const;
-#endif // COMPILER1
-
-#ifdef COMPILER2
- OptoReg::Name refine_register(const Node* node,
- OptoReg::Name opto_reg);
-
- void generate_c2_load_barrier_stub(MacroAssembler* masm,
- XLoadBarrierStubC2* stub) const;
-#endif // COMPILER2
-
- void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
-};
-
-#endif // CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP
diff --git a/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp
deleted file mode 100644
index a9c53da3d01..00000000000
--- a/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/gcLogPrecious.hpp"
-#include "gc/shared/gc_globals.hpp"
-#include "gc/x/xGlobals.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/powerOfTwo.hpp"
-
-#ifdef LINUX
-#include
-#endif // LINUX
-
-//
-// The heap can have three different layouts, depending on the max heap size.
-//
-// Address Space & Pointer Layout 1
-// --------------------------------
-//
-// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
-// . .
-// . .
-// . .
-// +--------------------------------+ 0x0000014000000000 (20TB)
-// | Remapped View |
-// +--------------------------------+ 0x0000010000000000 (16TB)
-// . .
-// +--------------------------------+ 0x00000c0000000000 (12TB)
-// | Marked1 View |
-// +--------------------------------+ 0x0000080000000000 (8TB)
-// | Marked0 View |
-// +--------------------------------+ 0x0000040000000000 (4TB)
-// . .
-// +--------------------------------+ 0x0000000000000000
-//
-// 6 4 4 4 4
-// 3 6 5 2 1 0
-// +--------------------+----+-----------------------------------------------+
-// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111|
-// +--------------------+----+-----------------------------------------------+
-// | | |
-// | | * 41-0 Object Offset (42-bits, 4TB address space)
-// | |
-// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
-// | 0010 = Marked1 (Address view 8-12TB)
-// | 0100 = Remapped (Address view 16-20TB)
-// | 1000 = Finalizable (Address view N/A)
-// |
-// * 63-46 Fixed (18-bits, always zero)
-//
-//
-// Address Space & Pointer Layout 2
-// --------------------------------
-//
-// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
-// . .
-// . .
-// . .
-// +--------------------------------+ 0x0000280000000000 (40TB)
-// | Remapped View |
-// +--------------------------------+ 0x0000200000000000 (32TB)
-// . .
-// +--------------------------------+ 0x0000180000000000 (24TB)
-// | Marked1 View |
-// +--------------------------------+ 0x0000100000000000 (16TB)
-// | Marked0 View |
-// +--------------------------------+ 0x0000080000000000 (8TB)
-// . .
-// +--------------------------------+ 0x0000000000000000
-//
-// 6 4 4 4 4
-// 3 7 6 3 2 0
-// +------------------+-----+------------------------------------------------+
-// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111|
-// +-------------------+----+------------------------------------------------+
-// | | |
-// | | * 42-0 Object Offset (43-bits, 8TB address space)
-// | |
-// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB)
-// | 0010 = Marked1 (Address view 16-24TB)
-// | 0100 = Remapped (Address view 32-40TB)
-// | 1000 = Finalizable (Address view N/A)
-// |
-// * 63-47 Fixed (17-bits, always zero)
-//
-//
-// Address Space & Pointer Layout 3
-// --------------------------------
-//
-// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
-// . .
-// . .
-// . .
-// +--------------------------------+ 0x0000500000000000 (80TB)
-// | Remapped View |
-// +--------------------------------+ 0x0000400000000000 (64TB)
-// . .
-// +--------------------------------+ 0x0000300000000000 (48TB)
-// | Marked1 View |
-// +--------------------------------+ 0x0000200000000000 (32TB)
-// | Marked0 View |
-// +--------------------------------+ 0x0000100000000000 (16TB)
-// . .
-// +--------------------------------+ 0x0000000000000000
-//
-// 6 4 4 4 4
-// 3 8 7 4 3 0
-// +------------------+----+-------------------------------------------------+
-// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111|
-// +------------------+----+-------------------------------------------------+
-// | | |
-// | | * 43-0 Object Offset (44-bits, 16TB address space)
-// | |
-// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB)
-// | 0010 = Marked1 (Address view 32-48TB)
-// | 0100 = Remapped (Address view 64-80TB)
-// | 1000 = Finalizable (Address view N/A)
-// |
-// * 63-48 Fixed (16-bits, always zero)
-//
-
-// Default value if probing is not implemented for a certain platform
-// Max address bit is restricted by implicit assumptions in the code, for instance
-// the bit layout of XForwardingEntry or Partial array entry (see XMarkStackEntry) in mark stack
-static const size_t DEFAULT_MAX_ADDRESS_BIT = 46;
-// Minimum value returned, if probing fails
-static const size_t MINIMUM_MAX_ADDRESS_BIT = 36;
-
-static size_t probe_valid_max_address_bit() {
-#ifdef LINUX
- size_t max_address_bit = 0;
- const size_t page_size = os::vm_page_size();
- for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) {
- const uintptr_t base_addr = ((uintptr_t) 1U) << i;
- if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) {
- // msync succeeded, the address is valid, and maybe even already mapped.
- max_address_bit = i;
- break;
- }
- if (errno != ENOMEM) {
- // Some error occurred. This should never happen, but msync
- // has some undefined behavior, hence ignore this bit.
-#ifdef ASSERT
- fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
-#else // ASSERT
- log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
-#endif // ASSERT
- continue;
- }
- // Since msync failed with ENOMEM, the page might not be mapped.
- // Try to map it, to see if the address is valid.
- void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
- if (result_addr != MAP_FAILED) {
- munmap(result_addr, page_size);
- }
- if ((uintptr_t) result_addr == base_addr) {
- // address is valid
- max_address_bit = i;
- break;
- }
- }
- if (max_address_bit == 0) {
- // probing failed, allocate a very high page and take that bit as the maximum
- const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT;
- void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
- if (result_addr != MAP_FAILED) {
- max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1;
- munmap(result_addr, page_size);
- }
- }
- log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit);
- return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT);
-#else // LINUX
- return DEFAULT_MAX_ADDRESS_BIT;
-#endif // LINUX
-}
-
-size_t XPlatformAddressOffsetBits() {
- const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
- const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
- const size_t min_address_offset_bits = max_address_offset_bits - 2;
- const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio);
- const size_t address_offset_bits = log2i_exact(address_offset);
- return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
-}
-
-size_t XPlatformAddressMetadataShift() {
- return XPlatformAddressOffsetBits();
-}
diff --git a/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.hpp
deleted file mode 100644
index 870b0d74d57..00000000000
--- a/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.hpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP
-#define CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP
-
-const size_t XPlatformHeapViews = 3;
-const size_t XPlatformCacheLineSize = 64;
-
-size_t XPlatformAddressOffsetBits();
-size_t XPlatformAddressMetadataShift();
-
-#endif // CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP
diff --git a/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad b/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad
deleted file mode 100644
index 6e401724baa..00000000000
--- a/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad
+++ /dev/null
@@ -1,249 +0,0 @@
-//
-// Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-
-source_hpp %{
-
-#include "gc/shared/gc_globals.hpp"
-#include "gc/x/c2/xBarrierSetC2.hpp"
-#include "gc/x/xThreadLocalData.hpp"
-
-%}
-
-source %{
-
-static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
- if (barrier_data == XLoadBarrierElided) {
- return;
- }
- XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
- __ ldr(tmp, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
- __ andr(tmp, tmp, ref);
- __ cbnz(tmp, *stub->entry());
- __ bind(*stub->continuation());
-}
-
-static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
- XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
- __ b(*stub->entry());
- __ bind(*stub->continuation());
-}
-
-%}
-
-// Load Pointer
-instruct xLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
-%{
- match(Set dst (LoadP mem));
- predicate(UseZGC && !ZGenerational && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() != 0));
- effect(TEMP dst, KILL cr);
-
- ins_cost(4 * INSN_COST);
-
- format %{ "ldr $dst, $mem" %}
-
- ins_encode %{
- Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
- if (ref_addr.getMode() == Address::base_plus_offset) {
- // Fix up any out-of-range offsets.
- assert_different_registers(rscratch1, as_Register($mem$$base));
- assert_different_registers(rscratch1, $dst$$Register);
- ref_addr = __ legitimize_address(ref_addr, 8, rscratch1);
- }
- __ ldr($dst$$Register, ref_addr);
- x_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, barrier_data());
- %}
-
- ins_pipe(iload_reg_mem);
-%}
-
-// Load Pointer Volatile
-instruct xLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr)
-%{
- match(Set dst (LoadP mem));
- predicate(UseZGC && !ZGenerational && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
- effect(TEMP dst, KILL cr);
-
- ins_cost(VOLATILE_REF_COST);
-
- format %{ "ldar $dst, $mem\t" %}
-
- ins_encode %{
- __ ldar($dst$$Register, $mem$$Register);
- x_load_barrier(masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, barrier_data());
- %}
-
- ins_pipe(pipe_serial);
-%}
-
-instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
- match(Set res (CompareAndSwapP mem (Binary oldval newval)));
- match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
- effect(KILL cr, TEMP_DEF res);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "cmpxchg $mem, $oldval, $newval\n\t"
- "cset $res, EQ" %}
-
- ins_encode %{
- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- false /* acquire */, true /* release */, false /* weak */, rscratch2);
- __ cset($res$$Register, Assembler::EQ);
- if (barrier_data() != XLoadBarrierElided) {
- Label good;
- __ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
- __ andr(rscratch1, rscratch1, rscratch2);
- __ cbz(rscratch1, good);
- x_load_barrier_slow_path(masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- false /* acquire */, true /* release */, false /* weak */, rscratch2);
- __ cset($res$$Register, Assembler::EQ);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_slow);
-%}
-
-instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
- match(Set res (CompareAndSwapP mem (Binary oldval newval)));
- match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == XLoadBarrierStrong));
- effect(KILL cr, TEMP_DEF res);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "cmpxchg $mem, $oldval, $newval\n\t"
- "cset $res, EQ" %}
-
- ins_encode %{
- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- true /* acquire */, true /* release */, false /* weak */, rscratch2);
- __ cset($res$$Register, Assembler::EQ);
- if (barrier_data() != XLoadBarrierElided) {
- Label good;
- __ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
- __ andr(rscratch1, rscratch1, rscratch2);
- __ cbz(rscratch1, good);
- x_load_barrier_slow_path(masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- true /* acquire */, true /* release */, false /* weak */, rscratch2);
- __ cset($res$$Register, Assembler::EQ);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_slow);
-%}
-
-instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
- match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
- effect(TEMP_DEF res, KILL cr);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
-
- ins_encode %{
- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- false /* acquire */, true /* release */, false /* weak */, $res$$Register);
- if (barrier_data() != XLoadBarrierElided) {
- Label good;
- __ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
- __ andr(rscratch1, rscratch1, $res$$Register);
- __ cbz(rscratch1, good);
- x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- false /* acquire */, true /* release */, false /* weak */, $res$$Register);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_slow);
-%}
-
-instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
- match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
- effect(TEMP_DEF res, KILL cr);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
-
- ins_encode %{
- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- true /* acquire */, true /* release */, false /* weak */, $res$$Register);
- if (barrier_data() != XLoadBarrierElided) {
- Label good;
- __ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
- __ andr(rscratch1, rscratch1, $res$$Register);
- __ cbz(rscratch1, good);
- x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- true /* acquire */, true /* release */, false /* weak */, $res$$Register);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_slow);
-%}
-
-instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
- match(Set prev (GetAndSetP mem newv));
- predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
- effect(TEMP_DEF prev, KILL cr);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "atomic_xchg $prev, $newv, [$mem]" %}
-
- ins_encode %{
- __ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register);
- x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
- %}
-
- ins_pipe(pipe_serial);
-%}
-
-instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
- match(Set prev (GetAndSetP mem newv));
- predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() != 0));
- effect(TEMP_DEF prev, KILL cr);
-
- ins_cost(VOLATILE_REF_COST);
-
- format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
-
- ins_encode %{
- __ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register);
- x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
- %}
- ins_pipe(pipe_serial);
-%}
diff --git a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
index 088f92a0157..47abaae3d5b 100644
--- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
+++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
@@ -104,7 +104,7 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address
instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
%{
match(Set dst (LoadP mem));
- predicate(UseZGC && ZGenerational && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
+ predicate(UseZGC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
effect(TEMP dst, KILL cr);
ins_cost(4 * INSN_COST);
@@ -130,7 +130,7 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr)
%{
match(Set dst (LoadP mem));
- predicate(UseZGC && ZGenerational && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
+ predicate(UseZGC && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
effect(TEMP dst, KILL cr);
ins_cost(VOLATILE_REF_COST);
@@ -149,7 +149,7 @@ instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg
// Store Pointer
instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
%{
- predicate(UseZGC && ZGenerational && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
+ predicate(UseZGC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem src));
effect(TEMP tmp, KILL cr);
@@ -166,7 +166,7 @@ instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
// Store Pointer Volatile
instruct zStorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
%{
- predicate(UseZGC && ZGenerational && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
+ predicate(UseZGC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem src));
effect(TEMP tmp, KILL cr);
@@ -183,7 +183,7 @@ instruct zStorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@@ -207,7 +207,7 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@@ -231,7 +231,7 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@@ -254,7 +254,7 @@ instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@@ -277,7 +277,7 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
match(Set prev (GetAndSetP mem newv));
- predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP prev, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@@ -295,7 +295,7 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
match(Set prev (GetAndSetP mem newv));
- predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP prev, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
index 9c20e3737c8..800e7718921 100644
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
@@ -38,7 +38,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
-define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
+define_pd_global(uintx, CodeCacheSegmentSize, 64);
define_pd_global(intx, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 16);
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 9835fb5aca1..252f4232115 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -4782,23 +4782,6 @@ void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, R
mov(tmp1, v0, D, 1);
}
-SkipIfEqual::SkipIfEqual(
- MacroAssembler* masm, const bool* flag_addr, bool value) {
- _masm = masm;
- uint64_t offset;
- _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset);
- _masm->ldrb(rscratch1, Address(rscratch1, offset));
- if (value) {
- _masm->cbnzw(rscratch1, _label);
- } else {
- _masm->cbzw(rscratch1, _label);
- }
-}
-
-SkipIfEqual::~SkipIfEqual() {
- _masm->bind(_label);
-}
-
void MacroAssembler::addptr(const Address &dst, int32_t src) {
Address adr;
switch(dst.getMode()) {
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
index e49f0c49ef6..48fb3c2b071 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
@@ -1652,24 +1652,6 @@ class MacroAssembler: public Assembler {
inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
#endif
-/**
- * class SkipIfEqual:
- *
- * Instantiating this class will result in assembly code being output that will
- * jump around any code emitted between the creation of the instance and it's
- * automatic destruction at the end of a scope block, depending on the value of
- * the flag passed to the constructor, which will be checked at run-time.
- */
-class SkipIfEqual {
- private:
- MacroAssembler* _masm;
- Label _label;
-
- public:
- SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
- ~SkipIfEqual();
-};
-
struct tableswitch {
Register _reg;
int _insn_index; jint _first_key; jint _last_key;
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
index d71162ac568..f18cec16488 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
@@ -259,6 +259,9 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(UseCryptoPmullForCRC32)) {
FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, true);
}
+ if (FLAG_IS_DEFAULT(CodeEntryAlignment)) {
+ FLAG_SET_DEFAULT(CodeEntryAlignment, 32);
+ }
}
if (UseCryptoPmullForCRC32 && (!VM_Version::supports_pmull() || !VM_Version::supports_sha3() || !VM_Version::supports_crc32())) {
diff --git a/src/hotspot/cpu/arm/globals_arm.hpp b/src/hotspot/cpu/arm/globals_arm.hpp
index 084d10beea1..9c4b8500e18 100644
--- a/src/hotspot/cpu/arm/globals_arm.hpp
+++ b/src/hotspot/cpu/arm/globals_arm.hpp
@@ -36,7 +36,7 @@ define_pd_global(bool, TrapBasedNullChecks, false); // Not needed
define_pd_global(bool, DelayCompilerStubsGeneration, false); // No need - only few compiler's stubs
-define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
+define_pd_global(uintx, CodeCacheSegmentSize, 64);
define_pd_global(intx, CodeEntryAlignment, 16);
define_pd_global(intx, OptoLoopAlignment, 16);
diff --git a/src/hotspot/cpu/ppc/assembler_ppc.hpp b/src/hotspot/cpu/ppc/assembler_ppc.hpp
index d445108098b..b2711ac43b0 100644
--- a/src/hotspot/cpu/ppc/assembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp
@@ -2502,19 +2502,19 @@ class Assembler : public AbstractAssembler {
// load the constant are emitted beforehand. Store instructions need a
// tmp reg if the constant is not encodable as immediate.
// Size unpredictable.
- void ld( Register d, RegisterOrConstant roc, Register s1 = noreg);
- void lwa( Register d, RegisterOrConstant roc, Register s1 = noreg);
- void lwz( Register d, RegisterOrConstant roc, Register s1 = noreg);
- void lha( Register d, RegisterOrConstant roc, Register s1 = noreg);
- void lhz( Register d, RegisterOrConstant roc, Register s1 = noreg);
- void lbz( Register d, RegisterOrConstant roc, Register s1 = noreg);
- void std( Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
- void stw( Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
- void sth( Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
- void stb( Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
- void add( Register d, Register s, RegisterOrConstant roc);
- void add( Register d, RegisterOrConstant roc, Register s) { add(d, s, roc); }
- void sub( Register d, Register s, RegisterOrConstant roc);
+ void ld( Register d, RegisterOrConstant roc, Register s1 = noreg);
+ void lwa(Register d, RegisterOrConstant roc, Register s1 = noreg);
+ void lwz(Register d, RegisterOrConstant roc, Register s1 = noreg);
+ void lha(Register d, RegisterOrConstant roc, Register s1 = noreg);
+ void lhz(Register d, RegisterOrConstant roc, Register s1 = noreg);
+ void lbz(Register d, RegisterOrConstant roc, Register s1 = noreg);
+ void std(Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
+ void stw(Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
+ void sth(Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
+ void stb(Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
+ void add(Register d, Register s, RegisterOrConstant roc);
+ void add(Register d, RegisterOrConstant roc, Register s) { add(d, s, roc); }
+ void sub(Register d, Register s, RegisterOrConstant roc);
void xorr(Register d, Register s, RegisterOrConstant roc);
void xorr(Register d, RegisterOrConstant roc, Register s) { xorr(d, s, roc); }
void cmpw(ConditionRegister d, Register s, RegisterOrConstant roc);
@@ -2522,6 +2522,17 @@ class Assembler : public AbstractAssembler {
// Load pointer d from s1+roc.
void ld_ptr(Register d, RegisterOrConstant roc, Register s1 = noreg) { ld(d, roc, s1); }
+ void ld( Register d, Address &a);
+ void lwa(Register d, Address &a);
+ void lwz(Register d, Address &a);
+ void lha(Register d, Address &a);
+ void lhz(Register d, Address &a);
+ void lbz(Register d, Address &a);
+ void std(Register d, Address &a, Register tmp = noreg);
+ void stw(Register d, Address &a, Register tmp = noreg);
+ void sth(Register d, Address &a, Register tmp = noreg);
+ void stb(Register d, Address &a, Register tmp = noreg);
+
// Emit several instructions to load a 64 bit constant. This issues a fixed
// instruction pattern so that the constant can be patched later on.
enum {
diff --git a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
index 98c8b629844..b0eaaccf0d0 100644
--- a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
+++ b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
@@ -338,28 +338,46 @@ inline void Assembler::insrwi( Register a, Register s, int n, int b)
// PPC 1, section 3.3.2 Fixed-Point Load Instructions
inline void Assembler::lwzx( Register d, Register s1, Register s2) { emit_int32(LWZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lwz( Register d, Address &a) {
+ lwz(d, a.index() != noreg ? RegisterOrConstant(a.index()) : RegisterOrConstant(a.disp()), a.base());
+}
inline void Assembler::lwz( Register d, int si16, Register s1) { emit_int32(LWZ_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::lwzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LWZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::lwax( Register d, Register s1, Register s2) { emit_int32(LWAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lwa( Register d, Address &a) {
+ lwa(d, a.index() != noreg ? RegisterOrConstant(a.index()) : RegisterOrConstant(a.disp()), a.base());
+}
inline void Assembler::lwa( Register d, int si16, Register s1) { emit_int32(LWA_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
inline void Assembler::lwbrx( Register d, Register s1, Register s2) { emit_int32(LWBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::lhzx( Register d, Register s1, Register s2) { emit_int32(LHZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lhz( Register d, Address &a) {
+ lhz(d, a.index() != noreg ? RegisterOrConstant(a.index()) : RegisterOrConstant(a.disp()), a.base());
+}
inline void Assembler::lhz( Register d, int si16, Register s1) { emit_int32(LHZ_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::lhzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::lhbrx( Register d, Register s1, Register s2) { emit_int32(LHBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::lhax( Register d, Register s1, Register s2) { emit_int32(LHAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lha( Register d, Address &a) {
+ lha(d, a.index() != noreg ? RegisterOrConstant(a.index()) : RegisterOrConstant(a.disp()), a.base());
+}
inline void Assembler::lha( Register d, int si16, Register s1) { emit_int32(LHA_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::lhau( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHAU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::lbzx( Register d, Register s1, Register s2) { emit_int32(LBZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lbz( Register d, Address &a) {
+ lbz(d, a.index() != noreg ? RegisterOrConstant(a.index()) : RegisterOrConstant(a.disp()), a.base());
+}
inline void Assembler::lbz( Register d, int si16, Register s1) { emit_int32(LBZ_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::lbzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LBZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
+inline void Assembler::ld( Register d, Address &a) {
+ ld(d, a.index() != noreg ? RegisterOrConstant(a.index()) : RegisterOrConstant(a.disp()), a.base());
+}
inline void Assembler::ld( Register d, int si16, Register s1) { emit_int32(LD_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
inline void Assembler::ld( Register d, ByteSize si16, Register s1) { assert(in_bytes(si16) < 0x7fff, "overflow"); ld(d, in_bytes(si16), s1); }
inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
@@ -371,19 +389,31 @@ inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) { ld(d, in_by
// PPC 1, section 3.3.3 Fixed-Point Store Instructions
inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::stw( Register d, Address &a, Register tmp) {
+ stw(d, a.index() != noreg ? RegisterOrConstant(a.index()) : RegisterOrConstant(a.disp()), a.base(), tmp);
+}
inline void Assembler::stw( Register d, int si16, Register s1) { emit_int32(STW_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::stwu( Register d, int si16, Register s1) { emit_int32(STWU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::stwbrx( Register d, Register s1, Register s2) { emit_int32(STWBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::sthx( Register d, Register s1, Register s2) { emit_int32(STHX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::sth( Register d, Address &a, Register tmp) {
+ sth(d, a.index() != noreg ? RegisterOrConstant(a.index()) : RegisterOrConstant(a.disp()), a.base(), tmp);
+}
inline void Assembler::sth( Register d, int si16, Register s1) { emit_int32(STH_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::sthu( Register d, int si16, Register s1) { emit_int32(STHU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::sthbrx( Register d, Register s1, Register s2) { emit_int32(STHBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stbx( Register d, Register s1, Register s2) { emit_int32(STBX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::stb( Register d, Address &a, Register tmp) {
+ stb(d, a.index() != noreg ? RegisterOrConstant(a.index()) : RegisterOrConstant(a.disp()), a.base(), tmp);
+}
inline void Assembler::stb( Register d, int si16, Register s1) { emit_int32(STB_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::stbu( Register d, int si16, Register s1) { emit_int32(STBU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
+inline void Assembler::std( Register d, Address &a, Register tmp) {
+ std(d, a.index() != noreg ? RegisterOrConstant(a.index()) : RegisterOrConstant(a.disp()), a.base(), tmp);
+}
inline void Assembler::std( Register d, int si16, Register s1) { emit_int32(STD_OPCODE | rs(d) | ds(si16) | ra0mem(s1));}
inline void Assembler::stdx( Register d, Register s1, Register s2) { emit_int32(STDX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stdu( Register d, int si16, Register s1) { emit_int32(STDU_OPCODE | rs(d) | ds(si16) | rta0mem(s1));}
diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
index 57e5f65d2f9..36e1ac82334 100644
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
@@ -168,9 +168,9 @@ void LIR_Assembler::osr_entry() {
mo = frame_map()->address_for_monitor_object(i);
assert(ml.index() == noreg && mo.index() == noreg, "sanity");
__ ld(R0, slot_offset + 0, OSR_buf);
- __ std(R0, ml.disp(), ml.base());
+ __ std(R0, ml);
__ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
- __ std(R0, mo.disp(), mo.base());
+ __ std(R0, mo);
}
if (use_OSR_bias) {
@@ -601,7 +601,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
__ fcmpu(CCR0, rsrc, rsrc);
if (dst_in_memory) {
__ li(R0, 0); // 0 in case of NAN
- __ std(R0, addr.disp(), addr.base());
+ __ std(R0, addr);
} else {
__ li(dst->as_register(), 0);
}
@@ -625,7 +625,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
__ fcmpu(CCR0, rsrc, rsrc);
if (dst_in_memory) {
__ li(R0, 0); // 0 in case of NAN
- __ std(R0, addr.disp(), addr.base());
+ __ std(R0, addr);
} else {
__ li(dst->as_register_lo(), 0);
}
@@ -893,20 +893,20 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
int value = c->as_jint_bits();
__ load_const_optimized(src_reg, value);
Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
- __ stw(src_reg, addr.disp(), addr.base());
+ __ stw(src_reg, addr);
break;
}
case T_ADDRESS: {
int value = c->as_jint_bits();
__ load_const_optimized(src_reg, value);
Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
- __ std(src_reg, addr.disp(), addr.base());
+ __ std(src_reg, addr);
break;
}
case T_OBJECT: {
jobject2reg(c->as_jobject(), src_reg);
Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
- __ std(src_reg, addr.disp(), addr.base());
+ __ std(src_reg, addr);
break;
}
case T_LONG:
@@ -914,7 +914,7 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
int value = c->as_jlong_bits();
__ load_const_optimized(src_reg, value);
Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
- __ std(src_reg, addr.disp(), addr.base());
+ __ std(src_reg, addr);
break;
}
default:
@@ -1090,24 +1090,24 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
case T_FLOAT: {
Address from = frame_map()->address_for_slot(src->single_stack_ix());
Address to = frame_map()->address_for_slot(dest->single_stack_ix());
- __ lwz(tmp, from.disp(), from.base());
- __ stw(tmp, to.disp(), to.base());
+ __ lwz(tmp, from);
+ __ stw(tmp, to);
break;
}
case T_ADDRESS:
case T_OBJECT: {
Address from = frame_map()->address_for_slot(src->single_stack_ix());
Address to = frame_map()->address_for_slot(dest->single_stack_ix());
- __ ld(tmp, from.disp(), from.base());
- __ std(tmp, to.disp(), to.base());
+ __ ld(tmp, from);
+ __ std(tmp, to);
break;
}
case T_LONG:
case T_DOUBLE: {
Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
Address to = frame_map()->address_for_double_slot(dest->double_stack_ix());
- __ ld(tmp, from.disp(), from.base());
- __ std(tmp, to.disp(), to.base());
+ __ ld(tmp, from);
+ __ std(tmp, to);
break;
}
diff --git a/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.cpp
deleted file mode 100644
index ca826e47352..00000000000
--- a/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.cpp
+++ /dev/null
@@ -1,585 +0,0 @@
-/*
- * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2021, 2024 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "asm/register.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "code/codeBlob.hpp"
-#include "code/vmreg.inline.hpp"
-#include "gc/x/xBarrier.inline.hpp"
-#include "gc/x/xBarrierSet.hpp"
-#include "gc/x/xBarrierSetAssembler.hpp"
-#include "gc/x/xBarrierSetRuntime.hpp"
-#include "gc/x/xThreadLocalData.hpp"
-#include "memory/resourceArea.hpp"
-#include "register_ppc.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/macros.hpp"
-#ifdef COMPILER1
-#include "c1/c1_LIRAssembler.hpp"
-#include "c1/c1_MacroAssembler.hpp"
-#include "gc/x/c1/xBarrierSetC1.hpp"
-#endif // COMPILER1
-#ifdef COMPILER2
-#include "gc/x/c2/xBarrierSetC2.hpp"
-#endif // COMPILER2
-
-#undef __
-#define __ masm->
-
-void XBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
- Register base, RegisterOrConstant ind_or_offs, Register dst,
- Register tmp1, Register tmp2,
- MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null) {
- __ block_comment("load_at (zgc) {");
-
- // Check whether a special gc barrier is required for this particular load
- // (e.g. whether it's a reference load or not)
- if (!XBarrierSet::barrier_needed(decorators, type)) {
- BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst,
- tmp1, tmp2, preservation_level, L_handle_null);
- return;
- }
-
- if (ind_or_offs.is_register()) {
- assert_different_registers(base, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg);
- assert_different_registers(dst, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg);
- } else {
- assert_different_registers(base, tmp1, tmp2, R0, noreg);
- assert_different_registers(dst, tmp1, tmp2, R0, noreg);
- }
-
- /* ==== Load the pointer using the standard implementation for the actual heap access
- and the decompression of compressed pointers ==== */
- // Result of 'load_at' (standard implementation) will be written back to 'dst'.
- // As 'base' is required for the C-call, it must be reserved in case of a register clash.
- Register saved_base = base;
- if (base == dst) {
- __ mr(tmp2, base);
- saved_base = tmp2;
- }
-
- BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst,
- tmp1, noreg, preservation_level, L_handle_null);
-
- /* ==== Check whether pointer is dirty ==== */
- Label skip_barrier;
-
- // Load bad mask into scratch register.
- __ ld(tmp1, (intptr_t) XThreadLocalData::address_bad_mask_offset(), R16_thread);
-
- // The color bits of the to-be-tested pointer do not have to be equivalent to the 'bad_mask' testing bits.
- // A pointer is classified as dirty if any of the color bits that also match the bad mask is set.
- // Conversely, it follows that the logical AND of the bad mask and the pointer must be zero
- // if the pointer is not dirty.
- // Only dirty pointers must be processed by this barrier, so we can skip it in case the latter condition holds true.
- __ and_(tmp1, tmp1, dst);
- __ beq(CCR0, skip_barrier);
-
- /* ==== Invoke barrier ==== */
- int nbytes_save = 0;
-
- const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
- const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS;
- const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS;
-
- const bool preserve_R3 = dst != R3_ARG1;
-
- if (needs_frame) {
- if (preserve_gp_registers) {
- nbytes_save = (preserve_fp_registers
- ? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs
- : MacroAssembler::num_volatile_gp_regs) * BytesPerWord;
- nbytes_save -= preserve_R3 ? 0 : BytesPerWord;
- __ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3);
- }
-
- __ save_LR(tmp1);
- __ push_frame_reg_args(nbytes_save, tmp1);
- }
-
- // Setup arguments
- if (saved_base != R3_ARG1) {
- __ mr_if_needed(R3_ARG1, dst);
- __ add(R4_ARG2, ind_or_offs, saved_base);
- } else if (dst != R4_ARG2) {
- __ add(R4_ARG2, ind_or_offs, saved_base);
- __ mr(R3_ARG1, dst);
- } else {
- __ add(R0, ind_or_offs, saved_base);
- __ mr(R3_ARG1, dst);
- __ mr(R4_ARG2, R0);
- }
-
- __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators));
-
- Register result = R3_RET;
- if (needs_frame) {
- __ pop_frame();
- __ restore_LR(tmp1);
-
- if (preserve_R3) {
- __ mr(R0, R3_RET);
- result = R0;
- }
-
- if (preserve_gp_registers) {
- __ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3);
- }
- }
- __ mr_if_needed(dst, result);
-
- __ bind(skip_barrier);
- __ block_comment("} load_at (zgc)");
-}
-
-#ifdef ASSERT
-// The Z store barrier only verifies the pointers it is operating on and is thus a sole debugging measure.
-void XBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
- Register base, RegisterOrConstant ind_or_offs, Register val,
- Register tmp1, Register tmp2, Register tmp3,
- MacroAssembler::PreservationLevel preservation_level) {
- __ block_comment("store_at (zgc) {");
-
- // If the 'val' register is 'noreg', the to-be-stored value is a null pointer.
- if (is_reference_type(type) && val != noreg) {
- __ ld(tmp1, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
- __ and_(tmp1, tmp1, val);
- __ asm_assert_eq("Detected dirty pointer on the heap in Z store barrier");
- }
-
- // Store value
- BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, preservation_level);
-
- __ block_comment("} store_at (zgc)");
-}
-#endif // ASSERT
-
-void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType component_type,
- Register src, Register dst, Register count,
- Register preserve1, Register preserve2) {
- __ block_comment("arraycopy_prologue (zgc) {");
-
- /* ==== Check whether a special gc barrier is required for this particular load ==== */
- if (!is_reference_type(component_type)) {
- return;
- }
-
- Label skip_barrier;
-
- // Fast path: Array is of length zero
- __ cmpdi(CCR0, count, 0);
- __ beq(CCR0, skip_barrier);
-
- /* ==== Ensure register sanity ==== */
- Register tmp_R11 = R11_scratch1;
-
- assert_different_registers(src, dst, count, tmp_R11, noreg);
- if (preserve1 != noreg) {
- // Not technically required, but unlikely being intended.
- assert_different_registers(preserve1, preserve2);
- }
-
- /* ==== Invoke barrier (slowpath) ==== */
- int nbytes_save = 0;
-
- {
- assert(!noreg->is_volatile(), "sanity");
-
- if (preserve1->is_volatile()) {
- __ std(preserve1, -BytesPerWord * ++nbytes_save, R1_SP);
- }
-
- if (preserve2->is_volatile() && preserve1 != preserve2) {
- __ std(preserve2, -BytesPerWord * ++nbytes_save, R1_SP);
- }
-
- __ std(src, -BytesPerWord * ++nbytes_save, R1_SP);
- __ std(dst, -BytesPerWord * ++nbytes_save, R1_SP);
- __ std(count, -BytesPerWord * ++nbytes_save, R1_SP);
-
- __ save_LR(tmp_R11);
- __ push_frame_reg_args(nbytes_save, tmp_R11);
- }
-
- // XBarrierSetRuntime::load_barrier_on_oop_array_addr(src, count)
- if (count == R3_ARG1) {
- if (src == R4_ARG2) {
- // Arguments are provided in reverse order
- __ mr(tmp_R11, count);
- __ mr(R3_ARG1, src);
- __ mr(R4_ARG2, tmp_R11);
- } else {
- __ mr(R4_ARG2, count);
- __ mr(R3_ARG1, src);
- }
- } else {
- __ mr_if_needed(R3_ARG1, src);
- __ mr_if_needed(R4_ARG2, count);
- }
-
- __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr());
-
- __ pop_frame();
- __ restore_LR(tmp_R11);
-
- {
- __ ld(count, -BytesPerWord * nbytes_save--, R1_SP);
- __ ld(dst, -BytesPerWord * nbytes_save--, R1_SP);
- __ ld(src, -BytesPerWord * nbytes_save--, R1_SP);
-
- if (preserve2->is_volatile() && preserve1 != preserve2) {
- __ ld(preserve2, -BytesPerWord * nbytes_save--, R1_SP);
- }
-
- if (preserve1->is_volatile()) {
- __ ld(preserve1, -BytesPerWord * nbytes_save--, R1_SP);
- }
- }
-
- __ bind(skip_barrier);
-
- __ block_comment("} arraycopy_prologue (zgc)");
-}
-
-void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
- Register obj, Register tmp, Label& slowpath) {
- __ block_comment("try_resolve_jobject_in_native (zgc) {");
-
- assert_different_registers(jni_env, obj, tmp);
-
- // Resolve the pointer using the standard implementation for weak tag handling and pointer verification.
- BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath);
-
- // Check whether pointer is dirty.
- __ ld(tmp,
- in_bytes(XThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset()),
- jni_env);
-
- __ and_(tmp, obj, tmp);
- __ bne(CCR0, slowpath);
-
- __ block_comment("} try_resolve_jobject_in_native (zgc)");
-}
-
-#undef __
-
-#ifdef COMPILER1
-#define __ ce->masm()->
-
-// Code emitted by LIR node "LIR_OpXLoadBarrierTest" which in turn is emitted by XBarrierSetC1::load_barrier.
-// The actual compare and branch instructions are represented as stand-alone LIR nodes.
-void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
- LIR_Opr ref) const {
- __ block_comment("load_barrier_test (zgc) {");
-
- __ ld(R0, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
- __ andr(R0, R0, ref->as_pointer_register());
- __ cmpdi(CCR5 /* as mandated by LIR node */, R0, 0);
-
- __ block_comment("} load_barrier_test (zgc)");
-}
-
-// Code emitted by code stub "XLoadBarrierStubC1" which in turn is emitted by XBarrierSetC1::load_barrier.
-// Invokes the runtime stub which is defined just below.
-void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
- XLoadBarrierStubC1* stub) const {
- __ block_comment("c1_load_barrier_stub (zgc) {");
-
- __ bind(*stub->entry());
-
- /* ==== Determine relevant data registers and ensure register sanity ==== */
- Register ref = stub->ref()->as_register();
- Register ref_addr = noreg;
-
- // Determine reference address
- if (stub->tmp()->is_valid()) {
- // 'tmp' register is given, so address might have an index or a displacement.
- ce->leal(stub->ref_addr(), stub->tmp());
- ref_addr = stub->tmp()->as_pointer_register();
- } else {
- // 'tmp' register is not given, so address must have neither an index nor a displacement.
- // The address' base register is thus usable as-is.
- assert(stub->ref_addr()->as_address_ptr()->disp() == 0, "illegal displacement");
- assert(!stub->ref_addr()->as_address_ptr()->index()->is_valid(), "illegal index");
-
- ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
- }
-
- assert_different_registers(ref, ref_addr, R0, noreg);
-
- /* ==== Invoke stub ==== */
- // Pass arguments via stack. The stack pointer will be bumped by the stub.
- __ std(ref, (intptr_t) -1 * BytesPerWord, R1_SP);
- __ std(ref_addr, (intptr_t) -2 * BytesPerWord, R1_SP);
-
- __ load_const_optimized(R0, stub->runtime_stub());
- __ call_stub(R0);
-
- // The runtime stub passes the result via the R0 register, overriding the previously-loaded stub address.
- __ mr_if_needed(ref, R0);
- __ b(*stub->continuation());
-
- __ block_comment("} c1_load_barrier_stub (zgc)");
-}
-
-#undef __
-#define __ sasm->
-
-// Code emitted by runtime code stub which in turn is emitted by XBarrierSetC1::generate_c1_runtime_stubs.
-void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
- DecoratorSet decorators) const {
- __ block_comment("c1_load_barrier_runtime_stub (zgc) {");
-
- const int stack_parameters = 2;
- const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_parameters) * BytesPerWord;
-
- __ save_volatile_gprs(R1_SP, -nbytes_save);
- __ save_LR(R0);
-
- // Load arguments back again from the stack.
- __ ld(R3_ARG1, (intptr_t) -1 * BytesPerWord, R1_SP); // ref
- __ ld(R4_ARG2, (intptr_t) -2 * BytesPerWord, R1_SP); // ref_addr
-
- __ push_frame_reg_args(nbytes_save, R0);
-
- __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators));
-
- __ verify_oop(R3_RET, "Bad pointer after barrier invocation");
- __ mr(R0, R3_RET);
-
- __ pop_frame();
- __ restore_LR(R3_RET);
- __ restore_volatile_gprs(R1_SP, -nbytes_save);
-
- __ blr();
-
- __ block_comment("} c1_load_barrier_runtime_stub (zgc)");
-}
-
-#undef __
-#endif // COMPILER1
-
-#ifdef COMPILER2
-
-OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) const {
- if (!OptoReg::is_reg(opto_reg)) {
- return OptoReg::Bad;
- }
-
- VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
- if ((vm_reg->is_Register() || vm_reg ->is_FloatRegister()) && (opto_reg & 1) != 0) {
- return OptoReg::Bad;
- }
-
- return opto_reg;
-}
-
-#define __ _masm->
-
-class XSaveLiveRegisters {
- MacroAssembler* _masm;
- RegMask _reg_mask;
- Register _result_reg;
- int _frame_size;
-
- public:
- XSaveLiveRegisters(MacroAssembler *masm, XLoadBarrierStubC2 *stub)
- : _masm(masm), _reg_mask(stub->live()), _result_reg(stub->ref()) {
-
- const int register_save_size = iterate_over_register_mask(ACTION_COUNT_ONLY) * BytesPerWord;
- _frame_size = align_up(register_save_size, frame::alignment_in_bytes)
- + frame::native_abi_reg_args_size;
-
- __ save_LR_CR(R0);
- __ push_frame(_frame_size, R0);
-
- iterate_over_register_mask(ACTION_SAVE, _frame_size);
- }
-
- ~XSaveLiveRegisters() {
- iterate_over_register_mask(ACTION_RESTORE, _frame_size);
-
- __ addi(R1_SP, R1_SP, _frame_size);
- __ restore_LR_CR(R0);
- }
-
- private:
- enum IterationAction : int {
- ACTION_SAVE,
- ACTION_RESTORE,
- ACTION_COUNT_ONLY
- };
-
- int iterate_over_register_mask(IterationAction action, int offset = 0) {
- int reg_save_index = 0;
- RegMaskIterator live_regs_iterator(_reg_mask);
-
- while(live_regs_iterator.has_next()) {
- const OptoReg::Name opto_reg = live_regs_iterator.next();
-
- // Filter out stack slots (spilled registers, i.e., stack-allocated registers).
- if (!OptoReg::is_reg(opto_reg)) {
- continue;
- }
-
- const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
- if (vm_reg->is_Register()) {
- Register std_reg = vm_reg->as_Register();
-
- // '_result_reg' will hold the end result of the operation. Its content must thus not be preserved.
- if (std_reg == _result_reg) {
- continue;
- }
-
- if (std_reg->encoding() >= R2->encoding() && std_reg->encoding() <= R12->encoding()) {
- reg_save_index++;
-
- if (action == ACTION_SAVE) {
- _masm->std(std_reg, offset - reg_save_index * BytesPerWord, R1_SP);
- } else if (action == ACTION_RESTORE) {
- _masm->ld(std_reg, offset - reg_save_index * BytesPerWord, R1_SP);
- } else {
- assert(action == ACTION_COUNT_ONLY, "Sanity");
- }
- }
- } else if (vm_reg->is_FloatRegister()) {
- FloatRegister fp_reg = vm_reg->as_FloatRegister();
- if (fp_reg->encoding() >= F0->encoding() && fp_reg->encoding() <= F13->encoding()) {
- reg_save_index++;
-
- if (action == ACTION_SAVE) {
- _masm->stfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP);
- } else if (action == ACTION_RESTORE) {
- _masm->lfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP);
- } else {
- assert(action == ACTION_COUNT_ONLY, "Sanity");
- }
- }
- } else if (vm_reg->is_ConditionRegister()) {
- // NOP. Conditions registers are covered by save_LR_CR
- } else if (vm_reg->is_VectorSRegister()) {
- assert(SuperwordUseVSX, "or should not reach here");
- VectorSRegister vs_reg = vm_reg->as_VectorSRegister();
- if (vs_reg->encoding() >= VSR32->encoding() && vs_reg->encoding() <= VSR51->encoding()) {
- reg_save_index += 2;
-
- Register spill_addr = R0;
- if (action == ACTION_SAVE) {
- _masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord);
- _masm->stxvd2x(vs_reg, spill_addr);
- } else if (action == ACTION_RESTORE) {
- _masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord);
- _masm->lxvd2x(vs_reg, spill_addr);
- } else {
- assert(action == ACTION_COUNT_ONLY, "Sanity");
- }
- }
- } else {
- if (vm_reg->is_SpecialRegister()) {
- fatal("Special registers are unsupported. Found register %s", vm_reg->name());
- } else {
- fatal("Register type is not known");
- }
- }
- }
-
- return reg_save_index;
- }
-};
-
-#undef __
-#define __ _masm->
-
-class XSetupArguments {
- MacroAssembler* const _masm;
- const Register _ref;
- const Address _ref_addr;
-
- public:
- XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
- _masm(masm),
- _ref(stub->ref()),
- _ref_addr(stub->ref_addr()) {
-
- // Desired register/argument configuration:
- // _ref: R3_ARG1
- // _ref_addr: R4_ARG2
-
- // '_ref_addr' can be unspecified. In that case, the barrier will not heal the reference.
- if (_ref_addr.base() == noreg) {
- assert_different_registers(_ref, R0, noreg);
-
- __ mr_if_needed(R3_ARG1, _ref);
- __ li(R4_ARG2, 0);
- } else {
- assert_different_registers(_ref, _ref_addr.base(), R0, noreg);
- assert(!_ref_addr.index()->is_valid(), "reference addresses must not contain an index component");
-
- if (_ref != R4_ARG2) {
- // Calculate address first as the address' base register might clash with R4_ARG2
- __ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp());
- __ mr_if_needed(R3_ARG1, _ref);
- } else if (_ref_addr.base() != R3_ARG1) {
- __ mr(R3_ARG1, _ref);
- __ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp()); // Clobbering _ref
- } else {
- // Arguments are provided in inverse order (i.e. _ref == R4_ARG2, _ref_addr == R3_ARG1)
- __ mr(R0, _ref);
- __ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp());
- __ mr(R3_ARG1, R0);
- }
- }
- }
-};
-
-#undef __
-#define __ masm->
-
-void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const {
- __ block_comment("generate_c2_load_barrier_stub (zgc) {");
-
- __ bind(*stub->entry());
-
- Register ref = stub->ref();
- Address ref_addr = stub->ref_addr();
-
- assert_different_registers(ref, ref_addr.base());
-
- {
- XSaveLiveRegisters save_live_registers(masm, stub);
- XSetupArguments setup_arguments(masm, stub);
-
- __ call_VM_leaf(stub->slow_path());
- __ mr_if_needed(ref, R3_RET);
- }
-
- __ b(*stub->continuation());
-
- __ block_comment("} generate_c2_load_barrier_stub (zgc)");
-}
-
-#undef __
-#endif // COMPILER2
diff --git a/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp
deleted file mode 100644
index 8dfd4524dfe..00000000000
--- a/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2021, 2022 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP
-#define CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP
-
-#include "code/vmreg.hpp"
-#include "oops/accessDecorators.hpp"
-#ifdef COMPILER2
-#include "opto/optoreg.hpp"
-#endif // COMPILER2
-
-#ifdef COMPILER1
-class LIR_Assembler;
-class LIR_Opr;
-class StubAssembler;
-#endif // COMPILER1
-
-#ifdef COMPILER2
-class Node;
-#endif // COMPILER2
-
-#ifdef COMPILER1
-class XLoadBarrierStubC1;
-#endif // COMPILER1
-
-#ifdef COMPILER2
-class XLoadBarrierStubC2;
-#endif // COMPILER2
-
-class XBarrierSetAssembler : public XBarrierSetAssemblerBase {
-public:
- virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
- Register base, RegisterOrConstant ind_or_offs, Register dst,
- Register tmp1, Register tmp2,
- MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = nullptr);
-
-#ifdef ASSERT
- virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
- Register base, RegisterOrConstant ind_or_offs, Register val,
- Register tmp1, Register tmp2, Register tmp3,
- MacroAssembler::PreservationLevel preservation_level);
-#endif // ASSERT
-
- virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
- Register src, Register dst, Register count,
- Register preserve1, Register preserve2);
-
- virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
- Register obj, Register tmp, Label& slowpath);
-
- virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; }
-
-#ifdef COMPILER1
- void generate_c1_load_barrier_test(LIR_Assembler* ce,
- LIR_Opr ref) const;
-
- void generate_c1_load_barrier_stub(LIR_Assembler* ce,
- XLoadBarrierStubC1* stub) const;
-
- void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
- DecoratorSet decorators) const;
-#endif // COMPILER1
-
-#ifdef COMPILER2
- OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const;
-
- void generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const;
-#endif // COMPILER2
-};
-
-#endif // CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP
diff --git a/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.cpp b/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.cpp
deleted file mode 100644
index 3218a765fc7..00000000000
--- a/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2021 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/gcLogPrecious.hpp"
-#include "gc/shared/gc_globals.hpp"
-#include "gc/x/xGlobals.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/powerOfTwo.hpp"
-#include
-
-#ifdef LINUX
-#include
-#endif // LINUX
-
-//
-// The overall memory layouts across different power platforms are similar and only differ with regards to
-// the position of the highest addressable bit; the position of the metadata bits and the size of the actual
-// addressable heap address space are adjusted accordingly.
-//
-// The following memory schema shows an exemplary layout in which bit '45' is the highest addressable bit.
-// It is assumed that this virtual memory address space layout is predominant on the power platform.
-//
-// Standard Address Space & Pointer Layout
-// ---------------------------------------
-//
-// +--------------------------------+ 0x00007FFFFFFFFFFF (127 TiB - 1)
-// . .
-// . .
-// . .
-// +--------------------------------+ 0x0000140000000000 (20 TiB)
-// | Remapped View |
-// +--------------------------------+ 0x0000100000000000 (16 TiB)
-// . .
-// +--------------------------------+ 0x00000c0000000000 (12 TiB)
-// | Marked1 View |
-// +--------------------------------+ 0x0000080000000000 (8 TiB)
-// | Marked0 View |
-// +--------------------------------+ 0x0000040000000000 (4 TiB)
-// . .
-// +--------------------------------+ 0x0000000000000000
-//
-// 6 4 4 4 4
-// 3 6 5 2 1 0
-// +--------------------+----+-----------------------------------------------+
-// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111|
-// +--------------------+----+-----------------------------------------------+
-// | | |
-// | | * 41-0 Object Offset (42-bits, 4TB address space)
-// | |
-// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
-// | 0010 = Marked1 (Address view 8-12TB)
-// | 0100 = Remapped (Address view 16-20TB)
-// | 1000 = Finalizable (Address view N/A)
-// |
-// * 63-46 Fixed (18-bits, always zero)
-//
-
-// Maximum value as per spec (Power ISA v2.07): 2 ^ 60 bytes, i.e. 1 EiB (exbibyte)
-static const unsigned int MAXIMUM_MAX_ADDRESS_BIT = 60;
-
-// Most modern power processors provide an address space with not more than 45 bit addressable bit,
-// that is an address space of 32 TiB in size.
-static const unsigned int DEFAULT_MAX_ADDRESS_BIT = 45;
-
-// Minimum value returned, if probing fails: 64 GiB
-static const unsigned int MINIMUM_MAX_ADDRESS_BIT = 36;
-
-// Determines the highest addressable bit of the virtual address space (depends on platform)
-// by trying to interact with memory in that address range,
-// i.e. by syncing existing mappings (msync) or by temporarily mapping the memory area (mmap).
-// If one of those operations succeeds, it is proven that the targeted memory area is within the virtual address space.
-//
-// To reduce the number of required system calls to a bare minimum, the DEFAULT_MAX_ADDRESS_BIT is intentionally set
-// lower than what the ABI would theoretically permit.
-// Such an avoidance strategy, however, might impose unnecessary limits on processors that exceed this limit.
-// If DEFAULT_MAX_ADDRESS_BIT is addressable, the next higher bit will be tested as well to ensure that
-// the made assumption does not artificially restrict the memory availability.
-static unsigned int probe_valid_max_address_bit(size_t init_bit, size_t min_bit) {
- assert(init_bit >= min_bit, "Sanity");
- assert(init_bit <= MAXIMUM_MAX_ADDRESS_BIT, "Test bit is outside the assumed address space range");
-
-#ifdef LINUX
- unsigned int max_valid_address_bit = 0;
- void* last_allocatable_address = nullptr;
-
- const size_t page_size = os::vm_page_size();
-
- for (size_t i = init_bit; i >= min_bit; --i) {
- void* base_addr = (void*) (((unsigned long) 1U) << i);
-
- /* ==== Try msync-ing already mapped memory page ==== */
- if (msync(base_addr, page_size, MS_ASYNC) == 0) {
- // The page of the given address was synced by the linux kernel and must thus be both, mapped and valid.
- max_valid_address_bit = i;
- break;
- }
- if (errno != ENOMEM) {
- // An unexpected error occurred, i.e. an error not indicating that the targeted memory page is unmapped,
- // but pointing out another type of issue.
- // Even though this should never happen, those issues may come up due to undefined behavior.
-#ifdef ASSERT
- fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
-#else // ASSERT
- log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
-#endif // ASSERT
- continue;
- }
-
- /* ==== Try mapping memory page on our own ==== */
- last_allocatable_address = mmap(base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
- if (last_allocatable_address != MAP_FAILED) {
- munmap(last_allocatable_address, page_size);
- }
-
- if (last_allocatable_address == base_addr) {
- // As the linux kernel mapped exactly the page we have requested, the address must be valid.
- max_valid_address_bit = i;
- break;
- }
-
- log_info_p(gc, init)("Probe failed for bit '%zu'", i);
- }
-
- if (max_valid_address_bit == 0) {
- // Probing did not bring up any usable address bit.
- // As an alternative, the VM evaluates the address returned by mmap as it is expected that the reserved page
- // will be close to the probed address that was out-of-range.
- // As per mmap(2), "the kernel [will take] [the address] as a hint about where to
- // place the mapping; on Linux, the mapping will be created at a nearby page boundary".
- // It should thus be a "close enough" approximation to the real virtual memory address space limit.
- //
- // This recovery strategy is only applied in production builds.
- // In debug builds, an assertion in 'XPlatformAddressOffsetBits' will bail out the VM to indicate that
- // the assumed address space is no longer up-to-date.
- if (last_allocatable_address != MAP_FAILED) {
- const unsigned int bitpos = BitsPerSize_t - count_leading_zeros((size_t) last_allocatable_address) - 1;
- log_info_p(gc, init)("Did not find any valid addresses within the range, using address '%u' instead", bitpos);
- return bitpos;
- }
-
-#ifdef ASSERT
- fatal("Available address space can not be determined");
-#else // ASSERT
- log_warning_p(gc)("Cannot determine available address space. Falling back to default value.");
- return DEFAULT_MAX_ADDRESS_BIT;
-#endif // ASSERT
- } else {
- if (max_valid_address_bit == init_bit) {
- // An usable address bit has been found immediately.
- // To ensure that the entire virtual address space is exploited, the next highest bit will be tested as well.
- log_info_p(gc, init)("Hit valid address '%u' on first try, retrying with next higher bit", max_valid_address_bit);
- return MAX2(max_valid_address_bit, probe_valid_max_address_bit(init_bit + 1, init_bit + 1));
- }
- }
-
- log_info_p(gc, init)("Found valid address '%u'", max_valid_address_bit);
- return max_valid_address_bit;
-#else // LINUX
- return DEFAULT_MAX_ADDRESS_BIT;
-#endif // LINUX
-}
-
-size_t XPlatformAddressOffsetBits() {
- const static unsigned int valid_max_address_offset_bits =
- probe_valid_max_address_bit(DEFAULT_MAX_ADDRESS_BIT, MINIMUM_MAX_ADDRESS_BIT) + 1;
- assert(valid_max_address_offset_bits >= MINIMUM_MAX_ADDRESS_BIT,
- "Highest addressable bit is outside the assumed address space range");
-
- const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
- const size_t min_address_offset_bits = max_address_offset_bits - 2;
- const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio);
- const size_t address_offset_bits = log2i_exact(address_offset);
-
- return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
-}
-
-size_t XPlatformAddressMetadataShift() {
- return XPlatformAddressOffsetBits();
-}
diff --git a/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.hpp b/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.hpp
deleted file mode 100644
index be88b05b02a..00000000000
--- a/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.hpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2021 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef CPU_PPC_GC_X_XGLOBALS_PPC_HPP
-#define CPU_PPC_GC_X_XGLOBALS_PPC_HPP
-
-#include "globalDefinitions_ppc.hpp"
-
-const size_t XPlatformHeapViews = 3;
-const size_t XPlatformCacheLineSize = DEFAULT_CACHE_LINE_SIZE;
-
-size_t XPlatformAddressOffsetBits();
-size_t XPlatformAddressMetadataShift();
-
-#endif // CPU_PPC_GC_X_XGLOBALS_PPC_HPP
diff --git a/src/hotspot/cpu/ppc/gc/x/x_ppc.ad b/src/hotspot/cpu/ppc/gc/x/x_ppc.ad
deleted file mode 100644
index b206b6593fb..00000000000
--- a/src/hotspot/cpu/ppc/gc/x/x_ppc.ad
+++ /dev/null
@@ -1,298 +0,0 @@
-//
-// Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
-// Copyright (c) 2021 SAP SE. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-
-source_hpp %{
-
-#include "gc/shared/gc_globals.hpp"
-#include "gc/x/c2/xBarrierSetC2.hpp"
-#include "gc/x/xThreadLocalData.hpp"
-
-%}
-
-source %{
-
-static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref,
- Register tmp, uint8_t barrier_data) {
- if (barrier_data == XLoadBarrierElided) {
- return;
- }
-
- XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
- __ ld(tmp, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
- __ and_(tmp, tmp, ref);
- __ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate);
- __ bind(*stub->continuation());
-}
-
-static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref,
- Register tmp) {
- XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
- __ b(*stub->entry());
- __ bind(*stub->continuation());
-}
-
-static void x_compare_and_swap(MacroAssembler* masm, const MachNode* node,
- Register res, Register mem, Register oldval, Register newval,
- Register tmp_xchg, Register tmp_mask,
- bool weak, bool acquire) {
- // z-specific load barrier requires strong CAS operations.
- // Weak CAS operations are thus only emitted if the barrier is elided.
- __ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem,
- MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true,
- weak && node->barrier_data() == XLoadBarrierElided);
-
- if (node->barrier_data() != XLoadBarrierElided) {
- Label skip_barrier;
-
- __ ld(tmp_mask, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
- __ and_(tmp_mask, tmp_mask, tmp_xchg);
- __ beq(CCR0, skip_barrier);
-
- // CAS must have failed because pointer in memory is bad.
- x_load_barrier_slow_path(masm, node, Address(mem), tmp_xchg, res /* used as tmp */);
-
- __ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem,
- MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true, weak);
-
- __ bind(skip_barrier);
- }
-
- if (acquire) {
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- // Uses the isync instruction as an acquire barrier.
- // This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
- __ isync();
- } else {
- __ sync();
- }
- }
-}
-
-static void x_compare_and_exchange(MacroAssembler* masm, const MachNode* node,
- Register res, Register mem, Register oldval, Register newval, Register tmp,
- bool weak, bool acquire) {
- // z-specific load barrier requires strong CAS operations.
- // Weak CAS operations are thus only emitted if the barrier is elided.
- __ cmpxchgd(CCR0, res, oldval, newval, mem,
- MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true,
- weak && node->barrier_data() == XLoadBarrierElided);
-
- if (node->barrier_data() != XLoadBarrierElided) {
- Label skip_barrier;
- __ ld(tmp, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
- __ and_(tmp, tmp, res);
- __ beq(CCR0, skip_barrier);
-
- x_load_barrier_slow_path(masm, node, Address(mem), res, tmp);
-
- __ cmpxchgd(CCR0, res, oldval, newval, mem,
- MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true, weak);
-
- __ bind(skip_barrier);
- }
-
- if (acquire) {
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- // Uses the isync instruction as an acquire barrier.
- // This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
- __ isync();
- } else {
- __ sync();
- }
- }
-}
-
-%}
-
-instruct xLoadP(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
-%{
- match(Set dst (LoadP mem));
- effect(TEMP_DEF dst, TEMP tmp, KILL cr0);
- ins_cost(MEMORY_REF_COST);
-
- predicate((UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0)
- && (n->as_Load()->is_unordered() || followed_by_acquire(n)));
-
- format %{ "LD $dst, $mem" %}
- ins_encode %{
- assert($mem$$index == 0, "sanity");
- __ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
- x_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
- %}
- ins_pipe(pipe_class_default);
-%}
-
-// Load Pointer Volatile
-instruct xLoadP_acq(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
-%{
- match(Set dst (LoadP mem));
- effect(TEMP_DEF dst, TEMP tmp, KILL cr0);
- ins_cost(3 * MEMORY_REF_COST);
-
- // Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation
- predicate(UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0);
-
- format %{ "LD acq $dst, $mem" %}
- ins_encode %{
- __ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
- x_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
-
- // Uses the isync instruction as an acquire barrier.
- // This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
- __ isync();
- %}
- ins_pipe(pipe_class_default);
-%}
-
-instruct xCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
- iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
- match(Set res (CompareAndSwapP mem (Binary oldval newval)));
- effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
-
- predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
- && (((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst));
-
- format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
- ins_encode %{
- x_compare_and_swap(masm, this,
- $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
- $tmp_xchg$$Register, $tmp_mask$$Register,
- false /* weak */, false /* acquire */);
- %}
- ins_pipe(pipe_class_default);
-%}
-
-instruct xCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
- iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
- match(Set res (CompareAndSwapP mem (Binary oldval newval)));
- effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
-
- predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
- && (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst));
-
- format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
- ins_encode %{
- x_compare_and_swap(masm, this,
- $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
- $tmp_xchg$$Register, $tmp_mask$$Register,
- false /* weak */, true /* acquire */);
- %}
- ins_pipe(pipe_class_default);
-%}
-
-instruct xCompareAndSwapPWeak(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
- iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
- match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
-
- predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
- && ((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst);
-
- format %{ "weak CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
- ins_encode %{
- x_compare_and_swap(masm, this,
- $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
- $tmp_xchg$$Register, $tmp_mask$$Register,
- true /* weak */, false /* acquire */);
- %}
- ins_pipe(pipe_class_default);
-%}
-
-instruct xCompareAndSwapPWeak_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
- iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
- match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
-
- predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
- && (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst));
-
- format %{ "weak CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
- ins_encode %{
- x_compare_and_swap(masm, this,
- $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
- $tmp_xchg$$Register, $tmp_mask$$Register,
- true /* weak */, true /* acquire */);
- %}
- ins_pipe(pipe_class_default);
-%}
-
-instruct xCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
- iRegPdst tmp, flagsRegCR0 cr0) %{
- match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
- effect(TEMP_DEF res, TEMP tmp, KILL cr0);
-
- predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
- && (
- ((CompareAndSwapNode*)n)->order() != MemNode::acquire
- && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst
- ));
-
- format %{ "CMPXCHG $res, $mem, $oldval, $newval; as ptr; ptr" %}
- ins_encode %{
- x_compare_and_exchange(masm, this,
- $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
- false /* weak */, false /* acquire */);
- %}
- ins_pipe(pipe_class_default);
-%}
-
-instruct xCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
- iRegPdst tmp, flagsRegCR0 cr0) %{
- match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
- effect(TEMP_DEF res, TEMP tmp, KILL cr0);
-
- predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
- && (
- ((CompareAndSwapNode*)n)->order() == MemNode::acquire
- || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst
- ));
-
- format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as ptr; ptr" %}
- ins_encode %{
- x_compare_and_exchange(masm, this,
- $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
- false /* weak */, true /* acquire */);
- %}
- ins_pipe(pipe_class_default);
-%}
-
-instruct xGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp, flagsRegCR0 cr0) %{
- match(Set res (GetAndSetP mem newval));
- effect(TEMP_DEF res, TEMP tmp, KILL cr0);
-
- predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() != 0);
-
- format %{ "GetAndSetP $res, $mem, $newval" %}
- ins_encode %{
- __ getandsetd($res$$Register, $newval$$Register, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
- x_load_barrier(masm, this, Address(noreg, (intptr_t) 0), $res$$Register, $tmp$$Register, barrier_data());
-
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ isync();
- } else {
- __ sync();
- }
- %}
- ins_pipe(pipe_class_default);
-%}
diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp
index 8a65022126e..b9ea67dabe3 100644
--- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp
@@ -610,14 +610,14 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R
// Resolve global handle
__ ld(dst, 0, dst);
- __ ld(tmp, load_bad_mask.disp(), load_bad_mask.base());
+ __ ld(tmp, load_bad_mask);
__ b(check_color);
__ bind(weak_tagged);
// Resolve weak handle
__ ld(dst, 0, dst);
- __ ld(tmp, mark_bad_mask.disp(), mark_bad_mask.base());
+ __ ld(tmp, mark_bad_mask);
__ bind(check_color);
__ and_(tmp, tmp, dst);
diff --git a/src/hotspot/cpu/ppc/gc/z/z_ppc.ad b/src/hotspot/cpu/ppc/gc/z/z_ppc.ad
index bb696a4738f..97b49bc1b02 100644
--- a/src/hotspot/cpu/ppc/gc/z/z_ppc.ad
+++ b/src/hotspot/cpu/ppc/gc/z/z_ppc.ad
@@ -143,7 +143,7 @@ instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
effect(TEMP_DEF dst, KILL cr0);
ins_cost(MEMORY_REF_COST);
- predicate((UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0)
+ predicate((UseZGC && n->as_Load()->barrier_data() != 0)
&& (n->as_Load()->is_unordered() || followed_by_acquire(n)));
format %{ "LD $dst, $mem" %}
@@ -163,7 +163,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
ins_cost(3 * MEMORY_REF_COST);
// Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation
- predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0);
+ predicate(UseZGC && n->as_Load()->barrier_data() != 0);
format %{ "LD acq $dst, $mem" %}
ins_encode %{
@@ -181,7 +181,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
// Store Pointer
instruct zStoreP(memoryAlg4 mem, iRegPsrc src, iRegPdst tmp, flagsRegCR0 cr0)
%{
- predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
+ predicate(UseZGC && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem src));
effect(TEMP tmp, KILL cr0);
ins_cost(2 * MEMORY_REF_COST);
@@ -195,7 +195,7 @@ instruct zStoreP(memoryAlg4 mem, iRegPsrc src, iRegPdst tmp, flagsRegCR0 cr0)
instruct zStorePNull(memoryAlg4 mem, immP_0 zero, iRegPdst tmp, flagsRegCR0 cr0)
%{
- predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
+ predicate(UseZGC && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem zero));
effect(TEMP tmp, KILL cr0);
ins_cost(MEMORY_REF_COST);
@@ -213,7 +213,7 @@ instruct zCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0);
- predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0)
+ predicate((UseZGC && n->as_LoadStore()->barrier_data() != 0)
&& (((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst));
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
@@ -232,7 +232,7 @@ instruct zCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegP
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0);
- predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0)
+ predicate((UseZGC && n->as_LoadStore()->barrier_data() != 0)
&& (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst));
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
@@ -250,7 +250,7 @@ instruct zCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegP
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
- predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0)
+ predicate((UseZGC && n->as_LoadStore()->barrier_data() != 0)
&& (
((CompareAndSwapNode*)n)->order() != MemNode::acquire
&& ((CompareAndSwapNode*)n)->order() != MemNode::seqcst
@@ -270,7 +270,7 @@ instruct zCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, i
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
- predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0)
+ predicate((UseZGC && n->as_LoadStore()->barrier_data() != 0)
&& (
((CompareAndSwapNode*)n)->order() == MemNode::acquire
|| ((CompareAndSwapNode*)n)->order() == MemNode::seqcst
@@ -289,7 +289,7 @@ instruct zGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp,
match(Set res (GetAndSetP mem newval));
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
- predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
format %{ "GetAndSetP $res, $mem, $newval" %}
ins_encode %{
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
index f036caa0675..c7adbfb52f0 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
@@ -2736,7 +2736,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
// StoreLoad achieves this.
membar(StoreLoad);
- // Check if the entry lists are empty.
+ // Check if the entry lists are empty (EntryList first - by convention).
ld(temp, in_bytes(ObjectMonitor::EntryList_offset()), current_header);
ld(displaced_header, in_bytes(ObjectMonitor::cxq_offset()), current_header);
orr(temp, temp, displaced_header); // Will be 0 if both are 0.
@@ -3083,7 +3083,7 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f
// StoreLoad achieves this.
membar(StoreLoad);
- // Check if the entry lists are empty.
+ // Check if the entry lists are empty (EntryList first - by convention).
ld(t, in_bytes(ObjectMonitor::EntryList_offset()), monitor);
ld(t2, in_bytes(ObjectMonitor::cxq_offset()), monitor);
orr(t, t, t2);
@@ -4619,23 +4619,6 @@ void MacroAssembler::zap_from_to(Register low, int before, Register high, int af
#endif // !PRODUCT
-void SkipIfEqualZero::skip_to_label_if_equal_zero(MacroAssembler* masm, Register temp,
- const bool* flag_addr, Label& label) {
- int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
- assert(sizeof(bool) == 1, "PowerPC ABI");
- masm->lbz(temp, simm16_offset, temp);
- masm->cmpwi(CCR0, temp, 0);
- masm->beq(CCR0, label);
-}
-
-SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
- skip_to_label_if_equal_zero(masm, temp, flag_addr, _label);
-}
-
-SkipIfEqualZero::~SkipIfEqualZero() {
- _masm->bind(_label);
-}
-
void MacroAssembler::cache_wb(Address line) {
assert(line.index() == noreg, "index should be noreg");
assert(line.disp() == 0, "displacement should be 0");
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
index 224e7bff995..f0e7c644535 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
@@ -960,23 +960,4 @@ class MacroAssembler: public Assembler {
void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
};
-// class SkipIfEqualZero:
-//
-// Instantiating this class will result in assembly code being output that will
-// jump around any code emitted between the creation of the instance and it's
-// automatic destruction at the end of a scope block, depending on the value of
-// the flag passed to the constructor, which will be checked at run-time.
-class SkipIfEqualZero : public StackObj {
- private:
- MacroAssembler* _masm;
- Label _label;
-
- public:
- // 'Temp' is a temp register that this object can use (and trash).
- explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr);
- static void skip_to_label_if_equal_zero(MacroAssembler*, Register temp,
- const bool* flag_addr, Label& label);
- ~SkipIfEqualZero();
-};
-
#endif // CPU_PPC_MACROASSEMBLER_PPC_HPP
diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
index 206c161287f..b3ace8898ad 100644
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
+++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
@@ -49,7 +49,6 @@
#include "utilities/align.hpp"
#include "utilities/powerOfTwo.hpp"
#if INCLUDE_ZGC
-#include "gc/x/xBarrierSetAssembler.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#endif
@@ -1976,7 +1975,7 @@ class StubGenerator: public StubCodeGenerator {
generate_conjoint_int_copy_core(aligned);
} else {
#if INCLUDE_ZGC
- if (UseZGC && ZGenerational) {
+ if (UseZGC) {
ZBarrierSetAssembler *zbs = (ZBarrierSetAssembler*)bs;
zbs->generate_conjoint_oop_copy(_masm, dest_uninitialized);
} else
@@ -2019,7 +2018,7 @@ class StubGenerator: public StubCodeGenerator {
generate_disjoint_int_copy_core(aligned);
} else {
#if INCLUDE_ZGC
- if (UseZGC && ZGenerational) {
+ if (UseZGC) {
ZBarrierSetAssembler *zbs = (ZBarrierSetAssembler*)bs;
zbs->generate_disjoint_oop_copy(_masm, dest_uninitialized);
} else
@@ -2137,7 +2136,7 @@ class StubGenerator: public StubCodeGenerator {
} else {
__ bind(store_null);
#if INCLUDE_ZGC
- if (UseZGC && ZGenerational) {
+ if (UseZGC) {
__ store_heap_oop(R10_oop, R8_offset, R4_to, R11_scratch1, R12_tmp, noreg,
MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS,
dest_uninitialized ? IS_DEST_UNINITIALIZED : 0);
@@ -2153,7 +2152,7 @@ class StubGenerator: public StubCodeGenerator {
// ======== loop entry is here ========
__ bind(load_element);
#if INCLUDE_ZGC
- if (UseZGC && ZGenerational) {
+ if (UseZGC) {
__ load_heap_oop(R10_oop, R8_offset, R3_from,
R11_scratch1, R12_tmp,
MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS,
diff --git a/src/hotspot/cpu/riscv/assembler_riscv.hpp b/src/hotspot/cpu/riscv/assembler_riscv.hpp
index 23046419460..7334ec675e3 100644
--- a/src/hotspot/cpu/riscv/assembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/assembler_riscv.hpp
@@ -1962,6 +1962,13 @@ enum Nf {
INSN(vbrev8_v, 0b1010111, 0b010, 0b01000, 0b010010); // reverse bits in every byte of element
INSN(vrev8_v, 0b1010111, 0b010, 0b01001, 0b010010); // reverse bytes in every elememt
+ // Vector AES instructions (Zvkned extension)
+ INSN(vaesem_vv, 0b1110111, 0b010, 0b00010, 0b101000);
+ INSN(vaesef_vv, 0b1110111, 0b010, 0b00011, 0b101000);
+
+ INSN(vaesdm_vv, 0b1110111, 0b010, 0b00000, 0b101000);
+ INSN(vaesdf_vv, 0b1110111, 0b010, 0b00001, 0b101000);
+
INSN(vclz_v, 0b1010111, 0b010, 0b01100, 0b010010); // count leading zeros
INSN(vctz_v, 0b1010111, 0b010, 0b01101, 0b010010); // count trailing zeros
diff --git a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp
index c5764bcebf7..0be1252f57f 100644
--- a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp
@@ -43,9 +43,7 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
InternalAddress safepoint_pc(__ pc() - __ offset() + safepoint_offset());
__ relocate(safepoint_pc.rspec(), [&] {
- int32_t offset;
- __ la(t0, safepoint_pc.target(), offset);
- __ addi(t0, t0, offset);
+ __ la(t0, safepoint_pc.target());
});
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
index 828f70e4dec..21bf089118b 100644
--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
@@ -838,10 +838,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
__ decode_heap_oop(dest->as_register());
}
- if (!(UseZGC && !ZGenerational)) {
- // Load barrier has not yet been applied, so ZGC can't verify the oop here
- __ verify_oop(dest->as_register());
- }
+ __ verify_oop(dest->as_register());
}
}
@@ -1406,9 +1403,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
int pc_for_athrow_offset = __ offset();
InternalAddress pc_for_athrow(__ pc());
__ relocate(pc_for_athrow.rspec(), [&] {
- int32_t offset;
- __ la(exceptionPC->as_register(), pc_for_athrow.target(), offset);
- __ addi(exceptionPC->as_register(), exceptionPC->as_register(), offset);
+ __ la(exceptionPC->as_register(), pc_for_athrow.target());
});
add_call_info(pc_for_athrow_offset, info); // for exception handler
diff --git a/src/hotspot/cpu/riscv/c2_CodeStubs_riscv.cpp b/src/hotspot/cpu/riscv/c2_CodeStubs_riscv.cpp
index db18525b89c..781f95ab73e 100644
--- a/src/hotspot/cpu/riscv/c2_CodeStubs_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c2_CodeStubs_riscv.cpp
@@ -45,9 +45,7 @@ void C2SafepointPollStub::emit(C2_MacroAssembler& masm) {
__ bind(entry());
InternalAddress safepoint_pc(__ pc() - __ offset() + _safepoint_offset);
__ relocate(safepoint_pc.rspec(), [&] {
- int32_t offset;
- __ la(t0, safepoint_pc.target(), offset);
- __ addi(t0, t0, offset);
+ __ la(t0, safepoint_pc.target());
});
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
__ far_jump(callback_addr);
diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp
index 75f87e35adf..0ffdcbca723 100644
--- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp
@@ -234,7 +234,7 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg,
// StoreLoad achieves this.
membar(StoreLoad);
- // Check if the entry lists are empty.
+ // Check if the entry lists are empty (EntryList first - by convention).
ld(t0, Address(tmp, ObjectMonitor::EntryList_offset()));
ld(tmp1Reg, Address(tmp, ObjectMonitor::cxq_offset()));
orr(t0, t0, tmp1Reg);
@@ -566,7 +566,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box,
// StoreLoad achieves this.
membar(StoreLoad);
- // Check if the entry lists are empty.
+ // Check if the entry lists are empty (EntryList first - by convention).
ld(t0, Address(tmp1_monitor, ObjectMonitor::EntryList_offset()));
ld(tmp3_t, Address(tmp1_monitor, ObjectMonitor::cxq_offset()));
orr(t0, t0, tmp3_t);
diff --git a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp
deleted file mode 100644
index eb8d4c44b88..00000000000
--- a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp
+++ /dev/null
@@ -1,454 +0,0 @@
-/*
- * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "code/codeBlob.hpp"
-#include "code/vmreg.inline.hpp"
-#include "gc/x/xBarrier.inline.hpp"
-#include "gc/x/xBarrierSet.hpp"
-#include "gc/x/xBarrierSetAssembler.hpp"
-#include "gc/x/xBarrierSetRuntime.hpp"
-#include "gc/x/xThreadLocalData.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "utilities/macros.hpp"
-#ifdef COMPILER1
-#include "c1/c1_LIRAssembler.hpp"
-#include "c1/c1_MacroAssembler.hpp"
-#include "gc/x/c1/xBarrierSetC1.hpp"
-#endif // COMPILER1
-#ifdef COMPILER2
-#include "gc/x/c2/xBarrierSetC2.hpp"
-#endif // COMPILER2
-
-#ifdef PRODUCT
-#define BLOCK_COMMENT(str) /* nothing */
-#else
-#define BLOCK_COMMENT(str) __ block_comment(str)
-#endif
-
-#undef __
-#define __ masm->
-
-void XBarrierSetAssembler::load_at(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Register dst,
- Address src,
- Register tmp1,
- Register tmp2) {
- if (!XBarrierSet::barrier_needed(decorators, type)) {
- // Barrier not needed
- BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
- return;
- }
-
- assert_different_registers(t1, src.base());
- assert_different_registers(t0, t1, dst);
-
- Label done;
-
- // Load bad mask into temp register.
- __ la(t0, src);
- __ ld(t1, address_bad_mask_from_thread(xthread));
- __ ld(dst, Address(t0));
-
- // Test reference against bad mask. If mask bad, then we need to fix it up.
- __ andr(t1, dst, t1);
- __ beqz(t1, done);
-
- __ enter();
-
- __ push_call_clobbered_registers_except(RegSet::of(dst));
-
- if (c_rarg0 != dst) {
- __ mv(c_rarg0, dst);
- }
-
- __ mv(c_rarg1, t0);
-
- __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
-
- // Make sure dst has the return value.
- if (dst != x10) {
- __ mv(dst, x10);
- }
-
- __ pop_call_clobbered_registers_except(RegSet::of(dst));
- __ leave();
-
- __ bind(done);
-}
-
-#ifdef ASSERT
-
-void XBarrierSetAssembler::store_at(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Address dst,
- Register val,
- Register tmp1,
- Register tmp2,
- Register tmp3) {
- // Verify value
- if (is_reference_type(type)) {
- // Note that src could be noreg, which means we
- // are storing null and can skip verification.
- if (val != noreg) {
- Label done;
-
- // tmp1, tmp2 and tmp3 are often set to noreg.
- RegSet savedRegs = RegSet::of(t0);
- __ push_reg(savedRegs, sp);
-
- __ ld(t0, address_bad_mask_from_thread(xthread));
- __ andr(t0, val, t0);
- __ beqz(t0, done);
- __ stop("Verify oop store failed");
- __ should_not_reach_here();
- __ bind(done);
- __ pop_reg(savedRegs, sp);
- }
- }
-
- // Store value
- BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg);
-}
-
-#endif // ASSERT
-
-void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
- DecoratorSet decorators,
- bool is_oop,
- Register src,
- Register dst,
- Register count,
- RegSet saved_regs) {
- if (!is_oop) {
- // Barrier not needed
- return;
- }
-
- BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {");
-
- assert_different_registers(src, count, t0);
-
- __ push_reg(saved_regs, sp);
-
- if (count == c_rarg0 && src == c_rarg1) {
- // exactly backwards!!
- __ xorr(c_rarg0, c_rarg0, c_rarg1);
- __ xorr(c_rarg1, c_rarg0, c_rarg1);
- __ xorr(c_rarg0, c_rarg0, c_rarg1);
- } else {
- __ mv(c_rarg0, src);
- __ mv(c_rarg1, count);
- }
-
- __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2);
-
- __ pop_reg(saved_regs, sp);
-
- BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue");
-}
-
-void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
- Register jni_env,
- Register robj,
- Register tmp,
- Label& slowpath) {
- BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {");
-
- assert_different_registers(jni_env, robj, tmp);
-
- // Resolve jobject
- BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath);
-
- // Compute the offset of address bad mask from the field of jni_environment
- long int bad_mask_relative_offset = (long int) (in_bytes(XThreadLocalData::address_bad_mask_offset()) -
- in_bytes(JavaThread::jni_environment_offset()));
-
- // Load the address bad mask
- __ ld(tmp, Address(jni_env, bad_mask_relative_offset));
-
- // Check address bad mask
- __ andr(tmp, robj, tmp);
- __ bnez(tmp, slowpath);
-
- BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native");
-}
-
-#ifdef COMPILER2
-
-OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
- if (!OptoReg::is_reg(opto_reg)) {
- return OptoReg::Bad;
- }
-
- const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
- if (vm_reg->is_FloatRegister()) {
- return opto_reg & ~1;
- }
-
- return opto_reg;
-}
-
-#undef __
-#define __ _masm->
-
-class XSaveLiveRegisters {
-private:
- MacroAssembler* const _masm;
- RegSet _gp_regs;
- FloatRegSet _fp_regs;
- VectorRegSet _vp_regs;
-
-public:
- void initialize(XLoadBarrierStubC2* stub) {
- // Record registers that needs to be saved/restored
- RegMaskIterator rmi(stub->live());
- while (rmi.has_next()) {
- const OptoReg::Name opto_reg = rmi.next();
- if (OptoReg::is_reg(opto_reg)) {
- const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
- if (vm_reg->is_Register()) {
- _gp_regs += RegSet::of(vm_reg->as_Register());
- } else if (vm_reg->is_FloatRegister()) {
- _fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
- } else if (vm_reg->is_VectorRegister()) {
- const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~(VectorRegister::max_slots_per_register - 1));
- _vp_regs += VectorRegSet::of(vm_reg_base->as_VectorRegister());
- } else {
- fatal("Unknown register type");
- }
- }
- }
-
- // Remove C-ABI SOE registers, tmp regs and _ref register that will be updated
- _gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2) + RegSet::of(x8, x9) + RegSet::of(x5, stub->ref());
- }
-
- XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
- _masm(masm),
- _gp_regs(),
- _fp_regs(),
- _vp_regs() {
- // Figure out what registers to save/restore
- initialize(stub);
-
- // Save registers
- __ push_reg(_gp_regs, sp);
- __ push_fp(_fp_regs, sp);
- __ push_v(_vp_regs, sp);
- }
-
- ~XSaveLiveRegisters() {
- // Restore registers
- __ pop_v(_vp_regs, sp);
- __ pop_fp(_fp_regs, sp);
- __ pop_reg(_gp_regs, sp);
- }
-};
-
-class XSetupArguments {
-private:
- MacroAssembler* const _masm;
- const Register _ref;
- const Address _ref_addr;
-
-public:
- XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
- _masm(masm),
- _ref(stub->ref()),
- _ref_addr(stub->ref_addr()) {
-
- // Setup arguments
- if (_ref_addr.base() == noreg) {
- // No self healing
- if (_ref != c_rarg0) {
- __ mv(c_rarg0, _ref);
- }
- __ mv(c_rarg1, zr);
- } else {
- // Self healing
- if (_ref == c_rarg0) {
- // _ref is already at correct place
- __ la(c_rarg1, _ref_addr);
- } else if (_ref != c_rarg1) {
- // _ref is in wrong place, but not in c_rarg1, so fix it first
- __ la(c_rarg1, _ref_addr);
- __ mv(c_rarg0, _ref);
- } else if (_ref_addr.base() != c_rarg0) {
- assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0");
- __ mv(c_rarg0, _ref);
- __ la(c_rarg1, _ref_addr);
- } else {
- assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0");
- if (_ref_addr.base() == c_rarg0) {
- __ mv(t1, c_rarg1);
- __ la(c_rarg1, _ref_addr);
- __ mv(c_rarg0, t1);
- } else {
- ShouldNotReachHere();
- }
- }
- }
- }
-
- ~XSetupArguments() {
- // Transfer result
- if (_ref != x10) {
- __ mv(_ref, x10);
- }
- }
-};
-
-#undef __
-#define __ masm->
-
-void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const {
- BLOCK_COMMENT("XLoadBarrierStubC2");
-
- // Stub entry
- __ bind(*stub->entry());
-
- {
- XSaveLiveRegisters save_live_registers(masm, stub);
- XSetupArguments setup_arguments(masm, stub);
-
- __ mv(t1, stub->slow_path());
- __ jalr(t1);
- }
-
- // Stub exit
- __ j(*stub->continuation());
-}
-
-#endif // COMPILER2
-
-#ifdef COMPILER1
-#undef __
-#define __ ce->masm()->
-
-void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
- LIR_Opr ref) const {
- assert_different_registers(xthread, ref->as_register(), t1);
- __ ld(t1, address_bad_mask_from_thread(xthread));
- __ andr(t1, t1, ref->as_register());
-}
-
-void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
- XLoadBarrierStubC1* stub) const {
- // Stub entry
- __ bind(*stub->entry());
-
- Register ref = stub->ref()->as_register();
- Register ref_addr = noreg;
- Register tmp = noreg;
-
- if (stub->tmp()->is_valid()) {
- // Load address into tmp register
- ce->leal(stub->ref_addr(), stub->tmp());
- ref_addr = tmp = stub->tmp()->as_pointer_register();
- } else {
- // Address already in register
- ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
- }
-
- assert_different_registers(ref, ref_addr, noreg);
-
- // Save x10 unless it is the result or tmp register
- // Set up SP to accommodate parameters and maybe x10.
- if (ref != x10 && tmp != x10) {
- __ sub(sp, sp, 32);
- __ sd(x10, Address(sp, 16));
- } else {
- __ sub(sp, sp, 16);
- }
-
- // Setup arguments and call runtime stub
- ce->store_parameter(ref_addr, 1);
- ce->store_parameter(ref, 0);
-
- __ far_call(stub->runtime_stub());
-
- // Verify result
- __ verify_oop(x10);
-
-
- // Move result into place
- if (ref != x10) {
- __ mv(ref, x10);
- }
-
- // Restore x10 unless it is the result or tmp register
- if (ref != x10 && tmp != x10) {
- __ ld(x10, Address(sp, 16));
- __ add(sp, sp, 32);
- } else {
- __ add(sp, sp, 16);
- }
-
- // Stub exit
- __ j(*stub->continuation());
-}
-
-#undef __
-#define __ sasm->
-
-void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
- DecoratorSet decorators) const {
- __ prologue("zgc_load_barrier stub", false);
-
- __ push_call_clobbered_registers_except(RegSet::of(x10));
-
- // Setup arguments
- __ load_parameter(0, c_rarg0);
- __ load_parameter(1, c_rarg1);
-
- __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
-
- __ pop_call_clobbered_registers_except(RegSet::of(x10));
-
- __ epilogue();
-}
-
-#endif // COMPILER1
-
-#undef __
-#define __ masm->
-
-void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
- // Check if mask is good.
- // verifies that XAddressBadMask & obj == 0
- __ ld(tmp2, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
- __ andr(tmp1, obj, tmp2);
- __ bnez(tmp1, error);
-
- BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error);
-}
-
-#undef __
diff --git a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.hpp b/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.hpp
deleted file mode 100644
index cbf5077999b..00000000000
--- a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.hpp
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP
-#define CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP
-
-#include "code/vmreg.hpp"
-#include "oops/accessDecorators.hpp"
-#ifdef COMPILER2
-#include "opto/optoreg.hpp"
-#endif // COMPILER2
-
-#ifdef COMPILER1
-class LIR_Assembler;
-class LIR_Opr;
-class StubAssembler;
-#endif // COMPILER1
-
-#ifdef COMPILER2
-class Node;
-#endif // COMPILER2
-
-#ifdef COMPILER1
-class XLoadBarrierStubC1;
-#endif // COMPILER1
-
-#ifdef COMPILER2
-class XLoadBarrierStubC2;
-#endif // COMPILER2
-
-class XBarrierSetAssembler : public XBarrierSetAssemblerBase {
-public:
- virtual void load_at(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Register dst,
- Address src,
- Register tmp1,
- Register tmp2);
-
-#ifdef ASSERT
- virtual void store_at(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Address dst,
- Register val,
- Register tmp1,
- Register tmp2,
- Register tmp3);
-#endif // ASSERT
-
- virtual void arraycopy_prologue(MacroAssembler* masm,
- DecoratorSet decorators,
- bool is_oop,
- Register src,
- Register dst,
- Register count,
- RegSet saved_regs);
-
- virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
- Register jni_env,
- Register robj,
- Register tmp,
- Label& slowpath);
-
- virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; }
-
-#ifdef COMPILER1
- void generate_c1_load_barrier_test(LIR_Assembler* ce,
- LIR_Opr ref) const;
-
- void generate_c1_load_barrier_stub(LIR_Assembler* ce,
- XLoadBarrierStubC1* stub) const;
-
- void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
- DecoratorSet decorators) const;
-#endif // COMPILER1
-
-#ifdef COMPILER2
- OptoReg::Name refine_register(const Node* node,
- OptoReg::Name opto_reg);
-
- void generate_c2_load_barrier_stub(MacroAssembler* masm,
- XLoadBarrierStubC2* stub) const;
-#endif // COMPILER2
-
- void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
-};
-
-#endif // CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP
diff --git a/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.cpp b/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.cpp
deleted file mode 100644
index 602dab56747..00000000000
--- a/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.cpp
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/gcLogPrecious.hpp"
-#include "gc/shared/gc_globals.hpp"
-#include "gc/x/xGlobals.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/powerOfTwo.hpp"
-
-#ifdef LINUX
-#include
-#endif // LINUX
-
-//
-// The heap can have three different layouts, depending on the max heap size.
-//
-// Address Space & Pointer Layout 1
-// --------------------------------
-//
-// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
-// . .
-// . .
-// . .
-// +--------------------------------+ 0x0000014000000000 (20TB)
-// | Remapped View |
-// +--------------------------------+ 0x0000010000000000 (16TB)
-// . .
-// +--------------------------------+ 0x00000c0000000000 (12TB)
-// | Marked1 View |
-// +--------------------------------+ 0x0000080000000000 (8TB)
-// | Marked0 View |
-// +--------------------------------+ 0x0000040000000000 (4TB)
-// . .
-// +--------------------------------+ 0x0000000000000000
-//
-// 6 4 4 4 4
-// 3 6 5 2 1 0
-// +--------------------+----+-----------------------------------------------+
-// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111|
-// +--------------------+----+-----------------------------------------------+
-// | | |
-// | | * 41-0 Object Offset (42-bits, 4TB address space)
-// | |
-// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
-// | 0010 = Marked1 (Address view 8-12TB)
-// | 0100 = Remapped (Address view 16-20TB)
-// | 1000 = Finalizable (Address view N/A)
-// |
-// * 63-46 Fixed (18-bits, always zero)
-//
-//
-// Address Space & Pointer Layout 2
-// --------------------------------
-//
-// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
-// . .
-// . .
-// . .
-// +--------------------------------+ 0x0000280000000000 (40TB)
-// | Remapped View |
-// +--------------------------------+ 0x0000200000000000 (32TB)
-// . .
-// +--------------------------------+ 0x0000180000000000 (24TB)
-// | Marked1 View |
-// +--------------------------------+ 0x0000100000000000 (16TB)
-// | Marked0 View |
-// +--------------------------------+ 0x0000080000000000 (8TB)
-// . .
-// +--------------------------------+ 0x0000000000000000
-//
-// 6 4 4 4 4
-// 3 7 6 3 2 0
-// +------------------+-----+------------------------------------------------+
-// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111|
-// +-------------------+----+------------------------------------------------+
-// | | |
-// | | * 42-0 Object Offset (43-bits, 8TB address space)
-// | |
-// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB)
-// | 0010 = Marked1 (Address view 16-24TB)
-// | 0100 = Remapped (Address view 32-40TB)
-// | 1000 = Finalizable (Address view N/A)
-// |
-// * 63-47 Fixed (17-bits, always zero)
-//
-//
-// Address Space & Pointer Layout 3
-// --------------------------------
-//
-// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
-// . .
-// . .
-// . .
-// +--------------------------------+ 0x0000500000000000 (80TB)
-// | Remapped View |
-// +--------------------------------+ 0x0000400000000000 (64TB)
-// . .
-// +--------------------------------+ 0x0000300000000000 (48TB)
-// | Marked1 View |
-// +--------------------------------+ 0x0000200000000000 (32TB)
-// | Marked0 View |
-// +--------------------------------+ 0x0000100000000000 (16TB)
-// . .
-// +--------------------------------+ 0x0000000000000000
-//
-// 6 4 4 4 4
-// 3 8 7 4 3 0
-// +------------------+----+-------------------------------------------------+
-// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111|
-// +------------------+----+-------------------------------------------------+
-// | | |
-// | | * 43-0 Object Offset (44-bits, 16TB address space)
-// | |
-// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB)
-// | 0010 = Marked1 (Address view 32-48TB)
-// | 0100 = Remapped (Address view 64-80TB)
-// | 1000 = Finalizable (Address view N/A)
-// |
-// * 63-48 Fixed (16-bits, always zero)
-//
-
-// Default value if probing is not implemented for a certain platform: 128TB
-static const size_t DEFAULT_MAX_ADDRESS_BIT = 47;
-// Minimum value returned, if probing fails: 64GB
-static const size_t MINIMUM_MAX_ADDRESS_BIT = 36;
-
-static size_t probe_valid_max_address_bit() {
-#ifdef LINUX
- size_t max_address_bit = 0;
- const size_t page_size = os::vm_page_size();
- for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) {
- const uintptr_t base_addr = ((uintptr_t) 1U) << i;
- if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) {
- // msync succeeded, the address is valid, and maybe even already mapped.
- max_address_bit = i;
- break;
- }
- if (errno != ENOMEM) {
- // Some error occurred. This should never happen, but msync
- // has some undefined behavior, hence ignore this bit.
-#ifdef ASSERT
- fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
-#else // ASSERT
- log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
-#endif // ASSERT
- continue;
- }
- // Since msync failed with ENOMEM, the page might not be mapped.
- // Try to map it, to see if the address is valid.
- void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
- if (result_addr != MAP_FAILED) {
- munmap(result_addr, page_size);
- }
- if ((uintptr_t) result_addr == base_addr) {
- // address is valid
- max_address_bit = i;
- break;
- }
- }
- if (max_address_bit == 0) {
- // probing failed, allocate a very high page and take that bit as the maximum
- const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT;
- void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
- if (result_addr != MAP_FAILED) {
- max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1;
- munmap(result_addr, page_size);
- }
- }
- log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit);
- return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT);
-#else // LINUX
- return DEFAULT_MAX_ADDRESS_BIT;
-#endif // LINUX
-}
-
-size_t XPlatformAddressOffsetBits() {
- const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
- const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
- const size_t min_address_offset_bits = max_address_offset_bits - 2;
- const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio);
- const size_t address_offset_bits = log2i_exact(address_offset);
- return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
-}
-
-size_t XPlatformAddressMetadataShift() {
- return XPlatformAddressOffsetBits();
-}
diff --git a/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.hpp b/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.hpp
deleted file mode 100644
index 836dc7aac0d..00000000000
--- a/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.hpp
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP
-#define CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP
-
-const size_t XPlatformHeapViews = 3;
-const size_t XPlatformCacheLineSize = 64;
-
-size_t XPlatformAddressOffsetBits();
-size_t XPlatformAddressMetadataShift();
-
-#endif // CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP
diff --git a/src/hotspot/cpu/riscv/gc/x/x_riscv.ad b/src/hotspot/cpu/riscv/gc/x/x_riscv.ad
deleted file mode 100644
index b93b7066425..00000000000
--- a/src/hotspot/cpu/riscv/gc/x/x_riscv.ad
+++ /dev/null
@@ -1,229 +0,0 @@
-//
-// Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
-// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-
-source_hpp %{
-
-#include "gc/shared/gc_globals.hpp"
-#include "gc/x/c2/xBarrierSetC2.hpp"
-#include "gc/x/xThreadLocalData.hpp"
-
-%}
-
-source %{
-
-static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) {
- if (barrier_data == XLoadBarrierElided) {
- return;
- }
- XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
- __ ld(tmp, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
- __ andr(tmp, tmp, ref);
- __ bnez(tmp, *stub->entry(), true /* far */);
- __ bind(*stub->continuation());
-}
-
-static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
- XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
- __ j(*stub->entry());
- __ bind(*stub->continuation());
-}
-
-%}
-
-// Load Pointer
-instruct xLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr)
-%{
- match(Set dst (LoadP mem));
- predicate(UseZGC && !ZGenerational && (n->as_Load()->barrier_data() != 0));
- effect(TEMP dst, TEMP tmp, KILL cr);
-
- ins_cost(4 * DEFAULT_COST);
-
- format %{ "ld $dst, $mem, #@zLoadP" %}
-
- ins_encode %{
- const Address ref_addr (as_Register($mem$$base), $mem$$disp);
- __ ld($dst$$Register, ref_addr);
- x_load_barrier(masm, this, ref_addr, $dst$$Register, $tmp$$Register /* tmp */, barrier_data());
- %}
-
- ins_pipe(iload_reg_mem);
-%}
-
-instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
- match(Set res (CompareAndSwapP mem (Binary oldval newval)));
- match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
- effect(TEMP_DEF res, TEMP tmp, KILL cr);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapP\n\t"
- "mv $res, $res == $oldval" %}
-
- ins_encode %{
- Label failed;
- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
- Assembler::relaxed /* acquire */, Assembler::rl /* release */, $tmp$$Register);
- __ sub(t0, $tmp$$Register, $oldval$$Register);
- __ seqz($res$$Register, t0);
- if (barrier_data() != XLoadBarrierElided) {
- Label good;
- __ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
- __ andr(t0, t0, $tmp$$Register);
- __ beqz(t0, good);
- x_load_barrier_slow_path(masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
- Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
- true /* result_as_bool */);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_slow);
-%}
-
-instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
- match(Set res (CompareAndSwapP mem (Binary oldval newval)));
- match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == XLoadBarrierStrong));
- effect(TEMP_DEF res, TEMP tmp, KILL cr);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapPAcq\n\t"
- "mv $res, $res == $oldval" %}
-
- ins_encode %{
- Label failed;
- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
- Assembler::aq /* acquire */, Assembler::rl /* release */, $tmp$$Register);
- __ sub(t0, $tmp$$Register, $oldval$$Register);
- __ seqz($res$$Register, t0);
- if (barrier_data() != XLoadBarrierElided) {
- Label good;
- __ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
- __ andr(t0, t0, $tmp$$Register);
- __ beqz(t0, good);
- x_load_barrier_slow_path(masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
- Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
- true /* result_as_bool */);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_slow);
-%}
-
-instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
- match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
- effect(TEMP_DEF res, TEMP tmp, KILL cr);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangeP" %}
-
- ins_encode %{
- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
- Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
- if (barrier_data() != XLoadBarrierElided) {
- Label good;
- __ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
- __ andr(t0, t0, $res$$Register);
- __ beqz(t0, good);
- x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
- Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_slow);
-%}
-
-instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
- match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
- effect(TEMP_DEF res, TEMP tmp, KILL cr);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangePAcq" %}
-
- ins_encode %{
- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
- Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
- if (barrier_data() != XLoadBarrierElided) {
- Label good;
- __ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
- __ andr(t0, t0, $res$$Register);
- __ beqz(t0, good);
- x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
- Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_slow);
-%}
-
-instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rFlagsReg cr) %{
- match(Set prev (GetAndSetP mem newv));
- predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
- effect(TEMP_DEF prev, TEMP tmp, KILL cr);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "atomic_xchg $prev, $newv, [$mem], #@zGetAndSetP" %}
-
- ins_encode %{
- __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
- x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
- %}
-
- ins_pipe(pipe_serial);
-%}
-
-instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rFlagsReg cr) %{
- match(Set prev (GetAndSetP mem newv));
- predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() != 0));
- effect(TEMP_DEF prev, TEMP tmp, KILL cr);
-
- ins_cost(VOLATILE_REF_COST);
-
- format %{ "atomic_xchg_acq $prev, $newv, [$mem], #@zGetAndSetPAcq" %}
-
- ins_encode %{
- __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
- x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
- %}
- ins_pipe(pipe_serial);
-%}
diff --git a/src/hotspot/cpu/riscv/gc/z/z_riscv.ad b/src/hotspot/cpu/riscv/gc/z/z_riscv.ad
index 24669f45eb4..8e33d514f46 100644
--- a/src/hotspot/cpu/riscv/gc/z/z_riscv.ad
+++ b/src/hotspot/cpu/riscv/gc/z/z_riscv.ad
@@ -94,7 +94,7 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address
instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr)
%{
match(Set dst (LoadP mem));
- predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0);
+ predicate(UseZGC && n->as_Load()->barrier_data() != 0);
effect(TEMP dst, TEMP tmp, KILL cr);
ins_cost(4 * DEFAULT_COST);
@@ -113,7 +113,7 @@ instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr)
// Store Pointer
instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr)
%{
- predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
+ predicate(UseZGC && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem src));
effect(TEMP tmp1, TEMP tmp2, KILL cr);
@@ -131,7 +131,7 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@@ -154,7 +154,7 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@@ -176,7 +176,7 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval,
iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1, rFlagsReg cr) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@@ -198,7 +198,7 @@ instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval,
iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1, rFlagsReg cr) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@@ -219,7 +219,7 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rFlagsReg cr) %{
match(Set prev (GetAndSetP mem newv));
- predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP_DEF prev, TEMP tmp, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@@ -237,7 +237,7 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rF
instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rFlagsReg cr) %{
match(Set prev (GetAndSetP mem newv));
- predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP_DEF prev, TEMP tmp, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
diff --git a/src/hotspot/cpu/riscv/globals_riscv.hpp b/src/hotspot/cpu/riscv/globals_riscv.hpp
index dd31de14704..7bd0200f118 100644
--- a/src/hotspot/cpu/riscv/globals_riscv.hpp
+++ b/src/hotspot/cpu/riscv/globals_riscv.hpp
@@ -112,11 +112,11 @@ define_pd_global(intx, InlineSmallCode, 1000);
product(bool, UseZicbom, false, EXPERIMENTAL, "Use Zicbom instructions") \
product(bool, UseZicbop, false, EXPERIMENTAL, "Use Zicbop instructions") \
product(bool, UseZicboz, false, EXPERIMENTAL, "Use Zicboz instructions") \
- product(bool, UseZtso, false, EXPERIMENTAL, "Assume Ztso memory model") \
product(bool, UseZihintpause, false, EXPERIMENTAL, \
"Use Zihintpause instructions") \
+ product(bool, UseZtso, false, EXPERIMENTAL, "Assume Ztso memory model") \
product(bool, UseZvbb, false, EXPERIMENTAL, "Use Zvbb instructions") \
- product(bool, UseZvfh, false, EXPERIMENTAL, "Use Zvfh instructions") \
+ product(bool, UseZvfh, false, "Use Zvfh instructions") \
product(bool, UseZvkn, false, EXPERIMENTAL, \
"Use Zvkn group extension, Zvkned, Zvknhb, Zvkb, Zvkt") \
product(bool, UseRVVForBigIntegerShiftIntrinsics, true, \
diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp
index fd75bde7655..f383557e43f 100644
--- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp
+++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp
@@ -193,9 +193,7 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, i
void InterpreterMacroAssembler::get_dispatch() {
ExternalAddress target((address)Interpreter::dispatch_table());
relocate(target.rspec(), [&] {
- int32_t offset;
- la(xdispatch, target.target(), offset);
- addi(xdispatch, xdispatch, offset);
+ la(xdispatch, target.target());
});
}
diff --git a/src/hotspot/cpu/riscv/jniFastGetField_riscv.cpp b/src/hotspot/cpu/riscv/jniFastGetField_riscv.cpp
index f7d702c6310..2b4a8b87e54 100644
--- a/src/hotspot/cpu/riscv/jniFastGetField_riscv.cpp
+++ b/src/hotspot/cpu/riscv/jniFastGetField_riscv.cpp
@@ -75,9 +75,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
Address target(SafepointSynchronize::safepoint_counter_addr());
__ relocate(target.rspec(), [&] {
- int32_t offset;
- __ la(rcounter_addr, target.target(), offset);
- __ addi(rcounter_addr, rcounter_addr, offset);
+ __ la(rcounter_addr, target.target());
});
Label slow;
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
index 7101f7d726e..b58590e790f 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
@@ -530,7 +530,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
movptr(t0, (address) b);
}
- // call indirectly to solve generation ordering problem
+ // Call indirectly to solve generation ordering problem
RuntimeAddress target(StubRoutines::verify_oop_subroutine_entry_address());
relocate(target.rspec(), [&] {
int32_t offset;
@@ -575,7 +575,7 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* f
movptr(t0, (address) b);
}
- // call indirectly to solve generation ordering problem
+ // Call indirectly to solve generation ordering problem
RuntimeAddress target(StubRoutines::verify_oop_subroutine_entry_address());
relocate(target.rspec(), [&] {
int32_t offset;
@@ -2569,27 +2569,6 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
}
}
-SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value) {
- int32_t offset = 0;
- _masm = masm;
- ExternalAddress target((address)flag_addr);
- _masm->relocate(target.rspec(), [&] {
- int32_t offset;
- _masm->la(t0, target.target(), offset);
- _masm->lbu(t0, Address(t0, offset));
- });
- if (value) {
- _masm->bnez(t0, _label);
- } else {
- _masm->beqz(t0, _label);
- }
-}
-
-SkipIfEqual::~SkipIfEqual() {
- _masm->bind(_label);
- _masm = nullptr;
-}
-
void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
ld(dst, Address(xmethod, Method::const_offset()));
@@ -4210,7 +4189,7 @@ void MacroAssembler::read_polling_page(Register r, int32_t offset, relocInfo::re
});
}
-void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
+void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
#ifdef ASSERT
{
ThreadInVMfromUnknown tiv;
@@ -4511,14 +4490,15 @@ void MacroAssembler::decrementw(const Address dst, int32_t value, Register tmp1,
sw(tmp1, adr);
}
-void MacroAssembler::cmpptr(Register src1, Address src2, Label& equal) {
- assert_different_registers(src1, t0);
+void MacroAssembler::cmpptr(Register src1, const Address &src2, Label& equal, Register tmp) {
+ assert_different_registers(src1, tmp);
+ assert(src2.getMode() == Address::literal, "must be applied to a literal address");
relocate(src2.rspec(), [&] {
int32_t offset;
- la(t0, src2.target(), offset);
- ld(t0, Address(t0, offset));
+ la(tmp, src2.target(), offset);
+ ld(tmp, Address(tmp, offset));
});
- beq(src1, t0, equal);
+ beq(src1, tmp, equal);
}
void MacroAssembler::load_method_holder_cld(Register result, Register method) {
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
index fda3badf350..b248db39933 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
@@ -1327,7 +1327,7 @@ class MacroAssembler: public Assembler {
void decrement(const Address dst, int64_t value = 1, Register tmp1 = t0, Register tmp2 = t1);
void decrementw(const Address dst, int32_t value = 1, Register tmp1 = t0, Register tmp2 = t1);
- void cmpptr(Register src1, Address src2, Label& equal);
+ void cmpptr(Register src1, const Address &src2, Label& equal, Register tmp = t0);
void clinit_barrier(Register klass, Register tmp, Label* L_fast_path = nullptr, Label* L_slow_path = nullptr);
void load_method_holder_cld(Register result, Register method);
@@ -1794,22 +1794,4 @@ class MacroAssembler: public Assembler {
inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
#endif
-/**
- * class SkipIfEqual:
- *
- * Instantiating this class will result in assembly code being output that will
- * jump around any code emitted between the creation of the instance and it's
- * automatic destruction at the end of a scope block, depending on the value of
- * the flag passed to the constructor, which will be checked at run-time.
- */
-class SkipIfEqual {
- private:
- MacroAssembler* _masm;
- Label _label;
-
- public:
- SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
- ~SkipIfEqual();
-};
-
#endif // CPU_RISCV_MACROASSEMBLER_RISCV_HPP
diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
index cec6a5f9760..29c96112ead 100644
--- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
+++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
@@ -2616,19 +2616,18 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti
__ reset_last_Java_frame(false);
// check for pending exceptions
Label pending;
- __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
- __ bnez(t0, pending);
+ __ ld(t1, Address(xthread, Thread::pending_exception_offset()));
+ __ bnez(t1, pending);
// get the returned Method*
__ get_vm_result_2(xmethod, xthread);
__ sd(xmethod, Address(sp, reg_saver.reg_offset_in_bytes(xmethod)));
- // x10 is where we want to jump, overwrite t0 which is saved and temporary
- __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(t0)));
+ // x10 is where we want to jump, overwrite t1 which is saved and temporary
+ __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(t1)));
reg_saver.restore_live_registers(masm);
// We are back to the original state on entry and ready to go.
- __ mv(t1, t0);
__ jr(t1);
// Pending exception after the safepoint
diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp
index a4744dfc05c..bce0c8f1f3d 100644
--- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp
+++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp
@@ -946,7 +946,7 @@ class StubGenerator: public StubCodeGenerator {
// The size of copy32_loop body increases significantly with ZGC GC barriers.
// Need conditional far branches to reach a point beyond the loop in this case.
- bool is_far = UseZGC && ZGenerational;
+ bool is_far = UseZGC;
__ beqz(count, done, is_far);
__ slli(cnt, count, exact_log2(granularity));
@@ -2276,6 +2276,174 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
}
+ void generate_aes_loadkeys(const Register &key, VectorRegister *working_vregs, int rounds) {
+ const int step = 16;
+ for (int i = 0; i < rounds; i++) {
+ __ vle32_v(working_vregs[i], key);
+ // The keys are stored in little-endian array, while we need
+ // to operate in big-endian.
+ // So performing an endian-swap here with vrev8.v instruction
+ __ vrev8_v(working_vregs[i], working_vregs[i]);
+ __ addi(key, key, step);
+ }
+ }
+
+ void generate_aes_encrypt(const VectorRegister &res, VectorRegister *working_vregs, int rounds) {
+ assert(rounds <= 15, "rounds should be less than or equal to working_vregs size");
+
+ __ vxor_vv(res, res, working_vregs[0]);
+ for (int i = 1; i < rounds - 1; i++) {
+ __ vaesem_vv(res, working_vregs[i]);
+ }
+ __ vaesef_vv(res, working_vregs[rounds - 1]);
+ }
+
+ // Arguments:
+ //
+ // Inputs:
+ // c_rarg0 - source byte array address
+ // c_rarg1 - destination byte array address
+ // c_rarg2 - K (key) in little endian int array
+ //
+ address generate_aescrypt_encryptBlock() {
+ assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support");
+
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
+
+ Label L_aes128, L_aes192;
+
+ const Register from = c_rarg0; // source array address
+ const Register to = c_rarg1; // destination array address
+ const Register key = c_rarg2; // key array address
+ const Register keylen = c_rarg3;
+
+ VectorRegister working_vregs[] = {
+ v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16, v17, v18
+ };
+ const VectorRegister res = v19;
+
+ address start = __ pc();
+ __ enter();
+
+ __ lwu(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+
+ __ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
+ __ vle32_v(res, from);
+
+ __ mv(t2, 52);
+ __ blt(keylen, t2, L_aes128);
+ __ beq(keylen, t2, L_aes192);
+ // Else we fallthrough to the biggest case (256-bit key size)
+
+ // Note: the following function performs key += 15*16
+ generate_aes_loadkeys(key, working_vregs, 15);
+ generate_aes_encrypt(res, working_vregs, 15);
+ __ vse32_v(res, to);
+ __ mv(c_rarg0, 0);
+ __ leave();
+ __ ret();
+
+ __ bind(L_aes192);
+ // Note: the following function performs key += 13*16
+ generate_aes_loadkeys(key, working_vregs, 13);
+ generate_aes_encrypt(res, working_vregs, 13);
+ __ vse32_v(res, to);
+ __ mv(c_rarg0, 0);
+ __ leave();
+ __ ret();
+
+ __ bind(L_aes128);
+ // Note: the following function performs key += 11*16
+ generate_aes_loadkeys(key, working_vregs, 11);
+ generate_aes_encrypt(res, working_vregs, 11);
+ __ vse32_v(res, to);
+ __ mv(c_rarg0, 0);
+ __ leave();
+ __ ret();
+
+ return start;
+ }
+
+ void generate_aes_decrypt(const VectorRegister &res, VectorRegister *working_vregs, int rounds) {
+ assert(rounds <= 15, "rounds should be less than or equal to working_vregs size");
+
+ __ vxor_vv(res, res, working_vregs[rounds - 1]);
+ for (int i = rounds - 2; i > 0; i--) {
+ __ vaesdm_vv(res, working_vregs[i]);
+ }
+ __ vaesdf_vv(res, working_vregs[0]);
+ }
+
+ // Arguments:
+ //
+ // Inputs:
+ // c_rarg0 - source byte array address
+ // c_rarg1 - destination byte array address
+ // c_rarg2 - K (key) in little endian int array
+ //
+ address generate_aescrypt_decryptBlock() {
+ assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support");
+
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
+
+ Label L_aes128, L_aes192;
+
+ const Register from = c_rarg0; // source array address
+ const Register to = c_rarg1; // destination array address
+ const Register key = c_rarg2; // key array address
+ const Register keylen = c_rarg3;
+
+ VectorRegister working_vregs[] = {
+ v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16, v17, v18
+ };
+ const VectorRegister res = v19;
+
+ address start = __ pc();
+ __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+ __ lwu(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+
+ __ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
+ __ vle32_v(res, from);
+
+ __ mv(t2, 52);
+ __ blt(keylen, t2, L_aes128);
+ __ beq(keylen, t2, L_aes192);
+ // Else we fallthrough to the biggest case (256-bit key size)
+
+ // Note: the following function performs key += 15*16
+ generate_aes_loadkeys(key, working_vregs, 15);
+ generate_aes_decrypt(res, working_vregs, 15);
+ __ vse32_v(res, to);
+ __ mv(c_rarg0, 0);
+ __ leave();
+ __ ret();
+
+ __ bind(L_aes192);
+ // Note: the following function performs key += 13*16
+ generate_aes_loadkeys(key, working_vregs, 13);
+ generate_aes_decrypt(res, working_vregs, 13);
+ __ vse32_v(res, to);
+ __ mv(c_rarg0, 0);
+ __ leave();
+ __ ret();
+
+ __ bind(L_aes128);
+ // Note: the following function performs key += 11*16
+ generate_aes_loadkeys(key, working_vregs, 11);
+ generate_aes_decrypt(res, working_vregs, 11);
+ __ vse32_v(res, to);
+ __ mv(c_rarg0, 0);
+ __ leave();
+ __ ret();
+
+ return start;
+ }
+
// code for comparing 16 bytes of strings with same encoding
void compare_string_16_bytes_same(Label &DIFF1, Label &DIFF2) {
const Register result = x10, str1 = x11, cnt1 = x12, str2 = x13, tmp1 = x28, tmp2 = x29, tmp4 = x7, tmp5 = x31;
@@ -6294,6 +6462,11 @@ static const int64_t right_3_bits = right_n_bits(3);
StubRoutines::_montgomerySquare = g.generate_square();
}
+ if (UseAESIntrinsics) {
+ StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
+ StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
+ }
+
if (UsePoly1305Intrinsics) {
StubRoutines::_poly1305_processBlocks = generate_poly1305_processBlocks();
}
diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
index 0c20f0e3f92..9f37774e297 100644
--- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp
+++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
@@ -2465,7 +2465,7 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index,
// take the time to call into the VM.
Label L1;
assert_different_registers(cache, index, x10);
- ExternalAddress target((address) JvmtiExport::get_field_access_count_addr());
+ ExternalAddress target(JvmtiExport::get_field_access_count_addr());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la(t0, target.target(), offset);
@@ -2676,7 +2676,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
// we take the time to call into the VM.
Label L1;
assert_different_registers(cache, index, x10);
- ExternalAddress target((address)JvmtiExport::get_field_modification_count_addr());
+ ExternalAddress target(JvmtiExport::get_field_modification_count_addr());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la(t0, target.target(), offset);
@@ -2969,7 +2969,7 @@ void TemplateTable::jvmti_post_fast_field_mod() {
// Check to see if a field modification watch has been set before
// we take the time to call into the VM.
Label L2;
- ExternalAddress target((address)JvmtiExport::get_field_modification_count_addr());
+ ExternalAddress target(JvmtiExport::get_field_modification_count_addr());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la(t0, target.target(), offset);
@@ -3101,7 +3101,7 @@ void TemplateTable::fast_accessfield(TosState state) {
// Check to see if a field access watch has been set before we
// take the time to call into the VM.
Label L1;
- ExternalAddress target((address)JvmtiExport::get_field_access_count_addr());
+ ExternalAddress target(JvmtiExport::get_field_access_count_addr());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la(t0, target.target(), offset);
diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.cpp b/src/hotspot/cpu/riscv/vm_version_riscv.cpp
index e9c6226f446..c32d2af9939 100644
--- a/src/hotspot/cpu/riscv/vm_version_riscv.cpp
+++ b/src/hotspot/cpu/riscv/vm_version_riscv.cpp
@@ -122,17 +122,6 @@ void VM_Version::common_initialize() {
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 0);
}
- if (UseAES || UseAESIntrinsics) {
- if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
- warning("AES instructions are not available on this CPU");
- FLAG_SET_DEFAULT(UseAES, false);
- }
- if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
- warning("AES intrinsics are not available on this CPU");
- FLAG_SET_DEFAULT(UseAESIntrinsics, false);
- }
- }
-
if (UseAESCTRIntrinsics) {
warning("AES/CTR intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
@@ -429,6 +418,23 @@ void VM_Version::c2_initialize() {
if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA3Intrinsics || UseSHA512Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA, false);
}
+
+ // AES
+ if (UseZvkn) {
+ UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
+ UseAESIntrinsics =
+ UseAESIntrinsics || (UseAES && FLAG_IS_DEFAULT(UseAESIntrinsics));
+ if (UseAESIntrinsics && !UseAES) {
+ warning("UseAESIntrinsics enabled, but UseAES not, enabling");
+ UseAES = true;
+ }
+ } else if (UseAESIntrinsics || UseAES) {
+ if (!FLAG_IS_DEFAULT(UseAESIntrinsics) || !FLAG_IS_DEFAULT(UseAES)) {
+ warning("AES intrinsics require Zvkn extension (not available on this CPU).");
+ }
+ FLAG_SET_DEFAULT(UseAES, false);
+ FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+ }
}
#endif // COMPILER2
diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
index 8bf11816219..d2e860aa320 100644
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
@@ -131,9 +131,19 @@ void LIR_Assembler::osr_entry() {
// copied into place by code emitted in the IR.
Register OSR_buf = osrBufferPointer()->as_register();
- { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
- int monitor_offset = BytesPerWord * method()->max_locals() +
- (2 * BytesPerWord) * (number_of_locks - 1);
+ {
+ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
+
+ const int locals_space = BytesPerWord * method() -> max_locals();
+ int monitor_offset = locals_space + (2 * BytesPerWord) * (number_of_locks - 1);
+ bool large_offset = !Immediate::is_simm20(monitor_offset + BytesPerWord) && number_of_locks > 0;
+
+ if (large_offset) {
+ // z_lg can only handle displacement upto 20bit signed binary integer
+ __ z_algfi(OSR_buf, locals_space);
+ monitor_offset -= locals_space;
+ }
+
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
// the OSR buffer using 2 word entries: first the lock and then
// the oop.
@@ -147,6 +157,10 @@ void LIR_Assembler::osr_entry() {
__ z_lg(Z_R1_scratch, slot_offset + 1*BytesPerWord, OSR_buf);
__ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_object(i));
}
+
+ if (large_offset) {
+ __ z_slgfi(OSR_buf, locals_space);
+ }
}
}
diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp
index d00b6c3e2cc..5e80817aaba 100644
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp
@@ -2131,18 +2131,6 @@ void InterpreterMacroAssembler::notify_method_exit(bool native_method,
if (!native_method) pop(state);
bind(jvmti_post_done);
}
-
-#if 0
- // Dtrace currently not supported on z/Architecture.
- {
- SkipIfEqual skip(this, &DTraceMethodProbes, false);
- push(state);
- get_method(c_rarg1);
- call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
- r15_thread, c_rarg1);
- pop(state);
- }
-#endif
}
void InterpreterMacroAssembler::skip_if_jvmti_mode(Label &Lskip, Register Rscratch) {
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
index 6bfe5125959..6b739553b15 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
@@ -3667,7 +3667,7 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg
// We need a full fence after clearing owner to avoid stranding.
z_fence();
- // Check if the entry lists are empty.
+ // Check if the entry lists are empty (EntryList first - by convention).
load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
z_brne(check_succ);
load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
@@ -6016,21 +6016,6 @@ void MacroAssembler::zap_from_to(Register low, Register high, Register val, Regi
}
#endif // !PRODUCT
-SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) {
- _masm = masm;
- _masm->load_absolute_address(_rscratch, (address)flag_addr);
- _masm->load_and_test_int(_rscratch, Address(_rscratch));
- if (value) {
- _masm->z_brne(_label); // Skip if true, i.e. != 0.
- } else {
- _masm->z_bre(_label); // Skip if false, i.e. == 0.
- }
-}
-
-SkipIfEqual::~SkipIfEqual() {
- _masm->bind(_label);
-}
-
// Implements lightweight-locking.
// - obj: the object to be locked, contents preserved.
// - temp1, temp2: temporary registers, contents destroyed.
@@ -6510,7 +6495,7 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Regis
// We need a full fence after clearing owner to avoid stranding.
z_fence();
- // Check if the entry lists are empty.
+ // Check if the entry lists are empty (EntryList first - by convention).
load_and_test_long(tmp2, EntryList_address);
z_brne(check_succ);
load_and_test_long(tmp2, cxq_address);
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.hpp b/src/hotspot/cpu/s390/macroAssembler_s390.hpp
index 5d3a4c29940..061817a1289 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp
@@ -1064,24 +1064,6 @@ class MacroAssembler: public Assembler {
};
-/**
- * class SkipIfEqual:
- *
- * Instantiating this class will result in assembly code being output that will
- * jump around any code emitted between the creation of the instance and it's
- * automatic destruction at the end of a scope block, depending on the value of
- * the flag passed to the constructor, which will be checked at run-time.
- */
-class SkipIfEqual {
- private:
- MacroAssembler* _masm;
- Label _label;
-
- public:
- SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register _rscratch);
- ~SkipIfEqual();
-};
-
#ifdef ASSERT
// Return false (e.g. important for our impl. of virtual calls).
inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp
index 0b021bbbf5e..e4ab99bf1c3 100644
--- a/src/hotspot/cpu/x86/assembler_x86.cpp
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp
@@ -557,6 +557,14 @@ bool Assembler::needs_rex2(Register reg1, Register reg2, Register reg3) {
return rex2;
}
+#ifndef PRODUCT
+bool Assembler::needs_evex(XMMRegister reg1, XMMRegister reg2, XMMRegister reg3) {
+ return (reg1->is_valid() && reg1->encoding() >= 16) ||
+ (reg2->is_valid() && reg2->encoding() >= 16) ||
+ (reg3->is_valid() && reg3->encoding() >= 16);
+}
+#endif
+
bool Assembler::needs_eevex(Register reg1, Register reg2, Register reg3) {
return needs_rex2(reg1, reg2, reg3);
}
@@ -3525,7 +3533,7 @@ void Assembler::vmaskmovpd(Address dst, XMMRegister src, XMMRegister mask, int v
// Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64)
void Assembler::evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
- assert(VM_Version::supports_avx512vlbw(), "");
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_embedded_opmask_register_specifier(mask);
attributes.set_is_evex_instruction();
@@ -3542,7 +3550,7 @@ void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) {
}
void Assembler::evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
- assert(VM_Version::supports_avx512vlbw(), "");
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
@@ -3562,7 +3570,7 @@ void Assembler::evmovdqub(XMMRegister dst, Address src, int vector_len) {
}
void Assembler::evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
- assert(VM_Version::supports_avx512vlbw(), "");
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
assert(src != xnoreg, "sanity");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
@@ -3577,13 +3585,18 @@ void Assembler::evmovdqub(Address dst, KRegister mask, XMMRegister src, bool mer
emit_operand(src, dst, 0);
}
+void Assembler::evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) {
+ // Unmasked instruction
+ evmovdquw(dst, k0, src, /*merge*/ false, vector_len);
+}
+
void Assembler::evmovdquw(XMMRegister dst, Address src, int vector_len) {
// Unmasked instruction
evmovdquw(dst, k0, src, /*merge*/ false, vector_len);
}
void Assembler::evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
- assert(VM_Version::supports_avx512vlbw(), "");
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
@@ -3603,7 +3616,7 @@ void Assembler::evmovdquw(Address dst, XMMRegister src, int vector_len) {
}
void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
- assert(VM_Version::supports_avx512vlbw(), "");
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
assert(src != xnoreg, "sanity");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
@@ -3618,6 +3631,19 @@ void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, bool mer
emit_operand(src, dst, 0);
}
+void Assembler::evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_embedded_opmask_register_specifier(mask);
+ attributes.set_is_evex_instruction();
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
+ emit_int16(0x6F, (0xC0 | encode));
+}
+
+
void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
// Unmasked instruction
evmovdqul(dst, k0, src, /*merge*/ false, vector_len);
@@ -4805,6 +4831,7 @@ void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int ve
// In this context, kdst is written the mask used to process the equal components
void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
assert(VM_Version::supports_avx512bw(), "");
+ assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -4812,7 +4839,8 @@ void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int
}
void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
- assert(VM_Version::supports_avx512vlbw(), "");
+ assert(VM_Version::supports_avx512bw(), "");
+ assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
@@ -4824,7 +4852,8 @@ void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vect
}
void Assembler::evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
- assert(VM_Version::supports_avx512vlbw(), "");
+ assert(VM_Version::supports_avx512bw(), "");
+ assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
@@ -4837,16 +4866,34 @@ void Assembler::evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Addre
emit_operand(as_Register(dst_enc), src, 0);
}
+void Assembler::evpcmpub(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
+ assert(VM_Version::supports_avx512bw(), "");
+ assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
+ InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+ emit_int24(0x3E, (0xC0 | encode), vcc);
+}
+
void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
- assert(VM_Version::supports_avx512vlbw(), "");
+ assert(VM_Version::supports_avx512bw(), "");
+ assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int24(0x3E, (0xC0 | encode), vcc);
}
+void Assembler::evpcmpud(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
+ assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
+ InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+ emit_int24(0x1E, (0xC0 | encode), vcc);
+}
+
void Assembler::evpcmpuq(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
- assert(VM_Version::supports_avx512vl(), "");
+ assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -4854,7 +4901,8 @@ void Assembler::evpcmpuq(KRegister kdst, XMMRegister nds, XMMRegister src, Compa
}
void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len) {
- assert(VM_Version::supports_avx512vlbw(), "");
+ assert(VM_Version::supports_avx512bw(), "");
+ assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
@@ -4868,6 +4916,7 @@ void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, Compariso
void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
assert(VM_Version::supports_avx512bw(), "");
+ assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_is_evex_instruction();
@@ -4879,7 +4928,8 @@ void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vect
}
void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
- assert(VM_Version::supports_avx512vlbw(), "");
+ assert(VM_Version::supports_avx512bw(), "");
+ assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
@@ -8353,6 +8403,161 @@ void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector
emit_operand(dst, src, 0);
}
+void Assembler::vpaddsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xEC, (0xC0 | encode));
+}
+
+void Assembler::vpaddsb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xEC);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::vpaddsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xED, (0xC0 | encode));
+}
+
+void Assembler::vpaddsw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xED);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::vpaddusb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xDC, (0xC0 | encode));
+}
+
+void Assembler::vpaddusb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xDC);
+ emit_operand(dst, src, 0);
+}
+
+
+void Assembler::vpaddusw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xDD, (0xC0 | encode));
+}
+
+void Assembler::vpaddusw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xDD);
+ emit_operand(dst, src, 0);
+}
+
+
+void Assembler::vpsubsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xE8, (0xC0 | encode));
+}
+
+void Assembler::vpsubsb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xE8);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::vpsubsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xE9, (0xC0 | encode));
+}
+
+void Assembler::vpsubsw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xE9);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::vpsubusb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xD8, (0xC0 | encode));
+}
+
+void Assembler::vpsubusb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xD8);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::vpsubusw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xD9, (0xC0 | encode));
+}
+
+void Assembler::vpsubusw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xD9);
+ emit_operand(dst, src, 0);
+}
+
+
void Assembler::psubb(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
@@ -8382,13 +8587,6 @@ void Assembler::psubq(XMMRegister dst, XMMRegister src) {
emit_int8((0xC0 | encode));
}
-void Assembler::vpsubusb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
- assert(UseAVX > 0, "requires some form of AVX");
- InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
- int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
- emit_int16((unsigned char)0xD8, (0xC0 | encode));
-}
-
void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
assert(UseAVX > 0, "requires some form of AVX");
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
@@ -8518,6 +8716,15 @@ void Assembler::vpmuludq(XMMRegister dst, XMMRegister nds, XMMRegister src, int
emit_int16((unsigned char)0xF4, (0xC0 | encode));
}
+void Assembler::vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
+ (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_rex_vex_w_reverted();
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x28, (0xC0 | encode));
+}
+
void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
assert(UseAVX > 0, "requires some form of AVX");
InstructionMark im(this);
@@ -8565,14 +8772,6 @@ void Assembler::vpminsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int v
emit_int16(0x38, (0xC0 | encode));
}
-void Assembler::vpminub(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
- assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
- (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
- InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
- int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
- emit_int16(0xDA, (0xC0 | encode));
-}
-
void Assembler::pminsw(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_sse2(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
@@ -8718,66 +8917,406 @@ void Assembler::vmaxpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int ve
emit_int16(0x5F, (0xC0 | encode));
}
-// Shift packed integers left by specified number of bits.
-void Assembler::psllw(XMMRegister dst, int shift) {
- NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
- // XMM6 is for /6 encoding: 66 0F 71 /6 ib
- int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
- emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
-}
-
-void Assembler::pslld(XMMRegister dst, int shift) {
- NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
- // XMM6 is for /6 encoding: 66 0F 72 /6 ib
- int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
- emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
+void Assembler::vpminub(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xDA, (0xC0 | encode));
}
-void Assembler::psllq(XMMRegister dst, int shift) {
- NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
- // XMM6 is for /6 encoding: 66 0F 73 /6 ib
- int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
- emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
+void Assembler::vpminub(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xDA);
+ emit_operand(dst, src, 0);
}
-void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
- NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
- int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
- emit_int16((unsigned char)0xF1, (0xC0 | encode));
+void Assembler::evpminub(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xDA, (0xC0 | encode));
}
-void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
- NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
- int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
- emit_int16((unsigned char)0xF2, (0xC0 | encode));
+void Assembler::evpminub(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xDA);
+ emit_operand(dst, src, 0);
}
-void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
- NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
- attributes.set_rex_vex_w_reverted();
- int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
- emit_int16((unsigned char)0xF3, (0xC0 | encode));
+void Assembler::vpminuw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x3A, (0xC0 | encode));
}
-void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
- assert(UseAVX > 0, "requires some form of AVX");
+void Assembler::vpminuw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
+ InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
- // XMM6 is for /6 encoding: 66 0F 71 /6 ib
- int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
- emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8((unsigned char)0x3A);
+ emit_operand(dst, src, 0);
}
-void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
- assert(UseAVX > 0, "requires some form of AVX");
- NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
- // XMM6 is for /6 encoding: 66 0F 72 /6 ib
+void Assembler::evpminuw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x3A, (0xC0 | encode));
+}
+
+void Assembler::evpminuw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8(0x3A);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::vpminud(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x3B, (0xC0 | encode));
+}
+
+void Assembler::vpminud(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8((unsigned char)0x3B);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpminud(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_evex(), "");
+ assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x3B, (0xC0 | encode));
+}
+
+void Assembler::evpminud(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ assert(VM_Version::supports_evex(), "");
+ assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8(0x3B);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpminuq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_evex(), "");
+ assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x3B, (0xC0 | encode));
+}
+
+void Assembler::evpminuq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ assert(VM_Version::supports_evex(), "");
+ assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8(0x3B);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::vpmaxub(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
+ (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xDE, (0xC0 | encode));
+}
+
+void Assembler::vpmaxub(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
+ (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xDE);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpmaxub(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xDE, (0xC0 | encode));
+}
+
+void Assembler::evpmaxub(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xDE);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::vpmaxuw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
+ (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x3E, (0xC0 | encode));
+}
+
+void Assembler::vpmaxuw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
+ (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
+ assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8((unsigned char)0x3E);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpmaxuw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x3E, (0xC0 | encode));
+}
+
+void Assembler::evpmaxuw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8(0x3E);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::vpmaxud(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(UseAVX > 0, "");
+ assert((vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x3F, (0xC0 | encode));
+}
+
+void Assembler::vpmaxud(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+ assert(UseAVX > 0, "");
+ assert((vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8((unsigned char)0x3F);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpmaxud(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_evex(), "");
+ assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x3F, (0xC0 | encode));
+}
+
+void Assembler::evpmaxud(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ assert(VM_Version::supports_evex(), "");
+ assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8(0x3F);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpmaxuq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_evex(), "");
+ assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x3F, (0xC0 | encode));
+}
+
+void Assembler::evpmaxuq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ assert(VM_Version::supports_evex(), "");
+ assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8(0x3F);
+ emit_operand(dst, src, 0);
+}
+
+// Shift packed integers left by specified number of bits.
+void Assembler::psllw(XMMRegister dst, int shift) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ // XMM6 is for /6 encoding: 66 0F 71 /6 ib
+ int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
+}
+
+void Assembler::pslld(XMMRegister dst, int shift) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ // XMM6 is for /6 encoding: 66 0F 72 /6 ib
+ int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
+}
+
+void Assembler::psllq(XMMRegister dst, int shift) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ // XMM6 is for /6 encoding: 66 0F 73 /6 ib
+ int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
+}
+
+void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xF1, (0xC0 | encode));
+}
+
+void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xF2, (0xC0 | encode));
+}
+
+void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_rex_vex_w_reverted();
+ int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xF3, (0xC0 | encode));
+}
+
+void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
+ assert(UseAVX > 0, "requires some form of AVX");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+ // XMM6 is for /6 encoding: 66 0F 71 /6 ib
+ int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
+}
+
+void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
+ assert(UseAVX > 0, "requires some form of AVX");
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ // XMM6 is for /6 encoding: 66 0F 72 /6 ib
int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
}
@@ -10421,6 +10960,223 @@ void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, Addres
emit_operand(dst, src, 0);
}
+void Assembler::evpaddsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xEC, (0xC0 | encode));
+}
+
+void Assembler::evpaddsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xEC);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpaddsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xED, (0xC0 | encode));
+}
+
+void Assembler::evpaddsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xED);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpaddusb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xDC, (0xC0 | encode));
+}
+
+void Assembler::evpaddusb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xDC);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpaddusw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xDD, (0xC0 | encode));
+}
+
+void Assembler::evpaddusw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xDD);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpsubsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xE8, (0xC0 | encode));
+}
+
+void Assembler::evpsubsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xE8);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpsubsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xE9, (0xC0 | encode));
+}
+
+void Assembler::evpsubsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xE9);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpsubusb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xD8, (0xC0 | encode));
+}
+
+void Assembler::evpsubusb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xD8);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::evpsubusw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xD9, (0xC0 | encode));
+}
+
+
+void Assembler::evpsubusw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xD9);
+ emit_operand(dst, src, 0);
+}
+
void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
@@ -10504,6 +11260,18 @@ void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, Addre
emit_operand(dst, src, 0);
}
+void Assembler::evpmulhw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_embedded_opmask_register_specifier(mask);
+ if (merge) {
+ attributes.reset_is_clear_context();
+ }
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+ emit_int16((unsigned char)0xE5, (0xC0 | encode));
+}
+
void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
assert(VM_Version::supports_evex(), "");
assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
@@ -16172,3 +16940,28 @@ void Assembler::evpermt2b(XMMRegister dst, XMMRegister nds, XMMRegister src, int
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int16(0x7D, (0xC0 | encode));
}
+
+void Assembler::evpermt2w(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(vector_len <= AVX_256bit ? VM_Version::supports_avx512vlbw() : VM_Version::supports_avx512bw(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x7D, (0xC0 | encode));
+}
+
+void Assembler::evpermt2d(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(VM_Version::supports_evex() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x7E, (0xC0 | encode));
+}
+
+void Assembler::evpermt2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(VM_Version::supports_evex() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int16(0x7E, (0xC0 | encode));
+}
+
diff --git a/src/hotspot/cpu/x86/assembler_x86.hpp b/src/hotspot/cpu/x86/assembler_x86.hpp
index 36dfafc8b5d..420c28254d5 100644
--- a/src/hotspot/cpu/x86/assembler_x86.hpp
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp
@@ -780,6 +780,7 @@ class Assembler : public AbstractAssembler {
bool needs_eevex(Register reg1, Register reg2 = noreg, Register reg3 = noreg);
bool needs_eevex(int enc1, int enc2 = -1, int enc3 = -1);
+ NOT_PRODUCT(bool needs_evex(XMMRegister reg1, XMMRegister reg2 = xnoreg, XMMRegister reg3 = xnoreg);)
void rex_prefix(Address adr, XMMRegister xreg,
VexSimdPrefix pre, VexOpcode opc, bool rex_w);
@@ -1756,6 +1757,7 @@ class Assembler : public AbstractAssembler {
void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len);
void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len);
+ void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len);
void evmovdquw(XMMRegister dst, Address src, int vector_len);
void evmovdquw(Address dst, XMMRegister src, int vector_len);
void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len);
@@ -1969,6 +1971,9 @@ class Assembler : public AbstractAssembler {
void evpermi2ps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void evpermi2pd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void evpermt2b(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void evpermt2w(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void evpermt2d(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void evpermt2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void pause();
@@ -1992,9 +1997,12 @@ class Assembler : public AbstractAssembler {
void evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len);
void evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len);
+ void evpcmpub(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len);
+
void evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len);
void evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len);
+ void evpcmpud(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len);
void evpcmpuq(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len);
void pcmpeqw(XMMRegister dst, XMMRegister src);
@@ -2678,6 +2686,40 @@ class Assembler : public AbstractAssembler {
void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+ // Saturating packed insturctions.
+ void vpaddsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void vpaddsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void vpaddusb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void vpaddusw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void evpaddsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpaddsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpaddusb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpaddusw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void vpsubsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void vpsubsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void vpsubusb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void vpsubusw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void evpsubsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpsubsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpsubusb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpsubusw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void vpaddsb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+ void vpaddsw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+ void vpaddusb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+ void vpaddusw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+ void evpaddsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpaddsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpaddusb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpaddusw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void vpsubsb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+ void vpsubsw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+ void vpsubusb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+ void vpsubusw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+ void evpsubsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpsubsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpsubusb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpsubusw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+
// Leaf level assembler routines for masked operations.
void evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
void evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
@@ -2703,6 +2745,7 @@ class Assembler : public AbstractAssembler {
void evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
void evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
void evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpmulhw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
void evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
void evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
void evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
@@ -2821,7 +2864,6 @@ class Assembler : public AbstractAssembler {
void psubw(XMMRegister dst, XMMRegister src);
void psubd(XMMRegister dst, XMMRegister src);
void psubq(XMMRegister dst, XMMRegister src);
- void vpsubusb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
@@ -2839,6 +2881,7 @@ class Assembler : public AbstractAssembler {
void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void evpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpmuludq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void evpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
@@ -2847,7 +2890,6 @@ class Assembler : public AbstractAssembler {
// Minimum of packed integers
void pminsb(XMMRegister dst, XMMRegister src);
void vpminsb(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
- void vpminub(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
void pminsw(XMMRegister dst, XMMRegister src);
void vpminsw(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
void pminsd(XMMRegister dst, XMMRegister src);
@@ -2871,6 +2913,38 @@ class Assembler : public AbstractAssembler {
void maxpd(XMMRegister dst, XMMRegister src);
void vmaxpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
+ // Unsigned maximum packed integers.
+ void vpmaxub(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
+ void vpmaxuw(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
+ void vpmaxud(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
+ void vpmaxub(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
+ void vpmaxuw(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
+ void vpmaxud(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
+ void evpmaxub(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpmaxuw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpmaxud(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpmaxuq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpmaxub(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpmaxuw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpmaxud(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpmaxuq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+
+ // Unsigned minimum packed integers.
+ void vpminub(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
+ void vpminuw(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
+ void vpminud(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
+ void vpminub(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
+ void vpminuw(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
+ void vpminud(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
+ void evpminub(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpminuw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpminud(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpminuq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpminub(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpminuw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpminud(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpminuq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+
// Shift left packed integers
void psllw(XMMRegister dst, int shift);
void pslld(XMMRegister dst, int shift);
diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
index 6d9812c11ae..64265a96909 100644
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
@@ -1333,10 +1333,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
}
#endif
- if (!(UseZGC && !ZGenerational)) {
- // Load barrier has not yet been applied, so ZGC can't verify the oop here
- __ verify_oop(dest->as_register());
- }
+ __ verify_oop(dest->as_register());
}
}
diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
index 879f33ede2d..61c8036b1ce 100644
--- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
@@ -477,9 +477,9 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
// StoreLoad achieves this.
membar(StoreLoad);
- // Check if the entry lists are empty.
- movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
- orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
+ // Check if the entry lists are empty (EntryList first - by convention).
+ movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
+ orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
jccb(Assembler::zero, LSuccess); // If so we are done.
// Check if there is a successor.
@@ -806,9 +806,9 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax,
// StoreLoad achieves this.
membar(StoreLoad);
- // Check if the entry lists are empty.
- movptr(reg_rax, cxq_address);
- orptr(reg_rax, EntryList_address);
+ // Check if the entry lists are empty (EntryList first - by convention).
+ movptr(reg_rax, EntryList_address);
+ orptr(reg_rax, cxq_address);
jccb(Assembler::zero, unlocked); // If so we are done.
// Check if there is a successor.
@@ -939,6 +939,72 @@ void C2_MacroAssembler::pminmax(int opcode, BasicType elem_bt, XMMRegister dst,
}
}
+void C2_MacroAssembler::vpuminmax(int opcode, BasicType elem_bt, XMMRegister dst,
+ XMMRegister src1, Address src2, int vlen_enc) {
+ assert(opcode == Op_UMinV || opcode == Op_UMaxV, "sanity");
+ if (opcode == Op_UMinV) {
+ switch(elem_bt) {
+ case T_BYTE: vpminub(dst, src1, src2, vlen_enc); break;
+ case T_SHORT: vpminuw(dst, src1, src2, vlen_enc); break;
+ case T_INT: vpminud(dst, src1, src2, vlen_enc); break;
+ case T_LONG: evpminuq(dst, k0, src1, src2, false, vlen_enc); break;
+ default: fatal("Unsupported type %s", type2name(elem_bt)); break;
+ }
+ } else {
+ assert(opcode == Op_UMaxV, "required");
+ switch(elem_bt) {
+ case T_BYTE: vpmaxub(dst, src1, src2, vlen_enc); break;
+ case T_SHORT: vpmaxuw(dst, src1, src2, vlen_enc); break;
+ case T_INT: vpmaxud(dst, src1, src2, vlen_enc); break;
+ case T_LONG: evpmaxuq(dst, k0, src1, src2, false, vlen_enc); break;
+ default: fatal("Unsupported type %s", type2name(elem_bt)); break;
+ }
+ }
+}
+
+void C2_MacroAssembler::vpuminmaxq(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc) {
+ // T1 = -1
+ vpcmpeqq(xtmp1, xtmp1, xtmp1, vlen_enc);
+ // T1 = -1 << 63
+ vpsllq(xtmp1, xtmp1, 63, vlen_enc);
+ // Convert SRC2 to signed value i.e. T2 = T1 + SRC2
+ vpaddq(xtmp2, xtmp1, src2, vlen_enc);
+ // Convert SRC1 to signed value i.e. T1 = T1 + SRC1
+ vpaddq(xtmp1, xtmp1, src1, vlen_enc);
+ // Mask = T2 > T1
+ vpcmpgtq(xtmp1, xtmp2, xtmp1, vlen_enc);
+ if (opcode == Op_UMaxV) {
+ // Res = Mask ? Src2 : Src1
+ vpblendvb(dst, src1, src2, xtmp1, vlen_enc);
+ } else {
+ // Res = Mask ? Src1 : Src2
+ vpblendvb(dst, src2, src1, xtmp1, vlen_enc);
+ }
+}
+
+void C2_MacroAssembler::vpuminmax(int opcode, BasicType elem_bt, XMMRegister dst,
+ XMMRegister src1, XMMRegister src2, int vlen_enc) {
+ assert(opcode == Op_UMinV || opcode == Op_UMaxV, "sanity");
+ if (opcode == Op_UMinV) {
+ switch(elem_bt) {
+ case T_BYTE: vpminub(dst, src1, src2, vlen_enc); break;
+ case T_SHORT: vpminuw(dst, src1, src2, vlen_enc); break;
+ case T_INT: vpminud(dst, src1, src2, vlen_enc); break;
+ case T_LONG: evpminuq(dst, k0, src1, src2, false, vlen_enc); break;
+ default: fatal("Unsupported type %s", type2name(elem_bt)); break;
+ }
+ } else {
+ assert(opcode == Op_UMaxV, "required");
+ switch(elem_bt) {
+ case T_BYTE: vpmaxub(dst, src1, src2, vlen_enc); break;
+ case T_SHORT: vpmaxuw(dst, src1, src2, vlen_enc); break;
+ case T_INT: vpmaxud(dst, src1, src2, vlen_enc); break;
+ case T_LONG: evpmaxuq(dst, k0, src1, src2, false, vlen_enc); break;
+ default: fatal("Unsupported type %s", type2name(elem_bt)); break;
+ }
+ }
+}
+
void C2_MacroAssembler::vpminmax(int opcode, BasicType elem_bt,
XMMRegister dst, XMMRegister src1, XMMRegister src2,
int vlen_enc) {
@@ -2362,6 +2428,10 @@ void C2_MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, X
MacroAssembler::evmovdqu(type, kmask, dst, src, merge, vector_len);
}
+void C2_MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
+ MacroAssembler::evmovdqu(type, kmask, dst, src, merge, vector_len);
+}
+
void C2_MacroAssembler::vmovmask(BasicType elem_bt, XMMRegister dst, Address src, XMMRegister mask,
int vec_enc) {
switch(elem_bt) {
@@ -2660,7 +2730,6 @@ void C2_MacroAssembler::vectortest(BasicType bt, XMMRegister src1, XMMRegister s
}
void C2_MacroAssembler::vpadd(BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vlen_enc) {
- assert(UseAVX >= 2, "required");
#ifdef ASSERT
bool is_bw = ((elem_bt == T_BYTE) || (elem_bt == T_SHORT));
bool is_bw_supported = VM_Version::supports_avx512bw();
@@ -4634,7 +4703,126 @@ void C2_MacroAssembler::evmasked_op(int ideal_opc, BasicType eType, KRegister ma
case Op_RotateLeftV:
evrold(eType, dst, mask, src1, imm8, merge, vlen_enc); break;
default:
- fatal("Unsupported masked operation"); break;
+ fatal("Unsupported operation %s", NodeClassNames[ideal_opc]);
+ break;
+ }
+}
+
+void C2_MacroAssembler::evmasked_saturating_op(int ideal_opc, BasicType elem_bt, KRegister mask, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, bool is_unsigned, bool merge, int vlen_enc) {
+ if (is_unsigned) {
+ evmasked_saturating_unsigned_op(ideal_opc, elem_bt, mask, dst, src1, src2, merge, vlen_enc);
+ } else {
+ evmasked_saturating_signed_op(ideal_opc, elem_bt, mask, dst, src1, src2, merge, vlen_enc);
+ }
+}
+
+void C2_MacroAssembler::evmasked_saturating_signed_op(int ideal_opc, BasicType elem_bt, KRegister mask, XMMRegister dst,
+ XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
+ switch (elem_bt) {
+ case T_BYTE:
+ if (ideal_opc == Op_SaturatingAddV) {
+ evpaddsb(dst, mask, src1, src2, merge, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ evpsubsb(dst, mask, src1, src2, merge, vlen_enc);
+ }
+ break;
+ case T_SHORT:
+ if (ideal_opc == Op_SaturatingAddV) {
+ evpaddsw(dst, mask, src1, src2, merge, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ evpsubsw(dst, mask, src1, src2, merge, vlen_enc);
+ }
+ break;
+ default:
+ fatal("Unsupported type %s", type2name(elem_bt));
+ break;
+ }
+}
+
+void C2_MacroAssembler::evmasked_saturating_unsigned_op(int ideal_opc, BasicType elem_bt, KRegister mask, XMMRegister dst,
+ XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
+ switch (elem_bt) {
+ case T_BYTE:
+ if (ideal_opc == Op_SaturatingAddV) {
+ evpaddusb(dst, mask, src1, src2, merge, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ evpsubusb(dst, mask, src1, src2, merge, vlen_enc);
+ }
+ break;
+ case T_SHORT:
+ if (ideal_opc == Op_SaturatingAddV) {
+ evpaddusw(dst, mask, src1, src2, merge, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ evpsubusw(dst, mask, src1, src2, merge, vlen_enc);
+ }
+ break;
+ default:
+ fatal("Unsupported type %s", type2name(elem_bt));
+ break;
+ }
+}
+
+void C2_MacroAssembler::evmasked_saturating_op(int ideal_opc, BasicType elem_bt, KRegister mask, XMMRegister dst, XMMRegister src1,
+ Address src2, bool is_unsigned, bool merge, int vlen_enc) {
+ if (is_unsigned) {
+ evmasked_saturating_unsigned_op(ideal_opc, elem_bt, mask, dst, src1, src2, merge, vlen_enc);
+ } else {
+ evmasked_saturating_signed_op(ideal_opc, elem_bt, mask, dst, src1, src2, merge, vlen_enc);
+ }
+}
+
+void C2_MacroAssembler::evmasked_saturating_signed_op(int ideal_opc, BasicType elem_bt, KRegister mask, XMMRegister dst,
+ XMMRegister src1, Address src2, bool merge, int vlen_enc) {
+ switch (elem_bt) {
+ case T_BYTE:
+ if (ideal_opc == Op_SaturatingAddV) {
+ evpaddsb(dst, mask, src1, src2, merge, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ evpsubsb(dst, mask, src1, src2, merge, vlen_enc);
+ }
+ break;
+ case T_SHORT:
+ if (ideal_opc == Op_SaturatingAddV) {
+ evpaddsw(dst, mask, src1, src2, merge, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ evpsubsw(dst, mask, src1, src2, merge, vlen_enc);
+ }
+ break;
+ default:
+ fatal("Unsupported type %s", type2name(elem_bt));
+ break;
+ }
+}
+
+void C2_MacroAssembler::evmasked_saturating_unsigned_op(int ideal_opc, BasicType elem_bt, KRegister mask, XMMRegister dst,
+ XMMRegister src1, Address src2, bool merge, int vlen_enc) {
+ switch (elem_bt) {
+ case T_BYTE:
+ if (ideal_opc == Op_SaturatingAddV) {
+ evpaddusb(dst, mask, src1, src2, merge, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ evpsubusb(dst, mask, src1, src2, merge, vlen_enc);
+ }
+ break;
+ case T_SHORT:
+ if (ideal_opc == Op_SaturatingAddV) {
+ evpaddusw(dst, mask, src1, src2, merge, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ evpsubusw(dst, mask, src1, src2, merge, vlen_enc);
+ }
+ break;
+ default:
+ fatal("Unsupported type %s", type2name(elem_bt));
+ break;
}
}
@@ -4724,6 +4912,10 @@ void C2_MacroAssembler::evmasked_op(int ideal_opc, BasicType eType, KRegister ma
evpmaxs(eType, dst, mask, src1, src2, merge, vlen_enc); break;
case Op_MinV:
evpmins(eType, dst, mask, src1, src2, merge, vlen_enc); break;
+ case Op_UMinV:
+ evpminu(eType, dst, mask, src1, src2, merge, vlen_enc); break;
+ case Op_UMaxV:
+ evpmaxu(eType, dst, mask, src1, src2, merge, vlen_enc); break;
case Op_XorV:
evxor(eType, dst, mask, src1, src2, merge, vlen_enc); break;
case Op_OrV:
@@ -4731,7 +4923,8 @@ void C2_MacroAssembler::evmasked_op(int ideal_opc, BasicType eType, KRegister ma
case Op_AndV:
evand(eType, dst, mask, src1, src2, merge, vlen_enc); break;
default:
- fatal("Unsupported masked operation"); break;
+ fatal("Unsupported operation %s", NodeClassNames[ideal_opc]);
+ break;
}
}
@@ -4784,6 +4977,10 @@ void C2_MacroAssembler::evmasked_op(int ideal_opc, BasicType eType, KRegister ma
evpmaxs(eType, dst, mask, src1, src2, merge, vlen_enc); break;
case Op_MinV:
evpmins(eType, dst, mask, src1, src2, merge, vlen_enc); break;
+ case Op_UMaxV:
+ evpmaxu(eType, dst, mask, src1, src2, merge, vlen_enc); break;
+ case Op_UMinV:
+ evpminu(eType, dst, mask, src1, src2, merge, vlen_enc); break;
case Op_XorV:
evxor(eType, dst, mask, src1, src2, merge, vlen_enc); break;
case Op_OrV:
@@ -4791,7 +4988,8 @@ void C2_MacroAssembler::evmasked_op(int ideal_opc, BasicType eType, KRegister ma
case Op_AndV:
evand(eType, dst, mask, src1, src2, merge, vlen_enc); break;
default:
- fatal("Unsupported masked operation"); break;
+ fatal("Unsupported operation %s", NodeClassNames[ideal_opc]);
+ break;
}
}
@@ -6479,6 +6677,369 @@ void C2_MacroAssembler::vector_rearrange_int_float(BasicType bt, XMMRegister dst
}
}
+void C2_MacroAssembler::vector_saturating_op(int ideal_opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vlen_enc) {
+ switch(elem_bt) {
+ case T_BYTE:
+ if (ideal_opc == Op_SaturatingAddV) {
+ vpaddsb(dst, src1, src2, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ vpsubsb(dst, src1, src2, vlen_enc);
+ }
+ break;
+ case T_SHORT:
+ if (ideal_opc == Op_SaturatingAddV) {
+ vpaddsw(dst, src1, src2, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ vpsubsw(dst, src1, src2, vlen_enc);
+ }
+ break;
+ default:
+ fatal("Unsupported type %s", type2name(elem_bt));
+ break;
+ }
+}
+
+void C2_MacroAssembler::vector_saturating_unsigned_op(int ideal_opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vlen_enc) {
+ switch(elem_bt) {
+ case T_BYTE:
+ if (ideal_opc == Op_SaturatingAddV) {
+ vpaddusb(dst, src1, src2, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ vpsubusb(dst, src1, src2, vlen_enc);
+ }
+ break;
+ case T_SHORT:
+ if (ideal_opc == Op_SaturatingAddV) {
+ vpaddusw(dst, src1, src2, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ vpsubusw(dst, src1, src2, vlen_enc);
+ }
+ break;
+ default:
+ fatal("Unsupported type %s", type2name(elem_bt));
+ break;
+ }
+}
+
+void C2_MacroAssembler::vector_sub_dq_saturating_unsigned_evex(BasicType elem_bt, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, KRegister ktmp, int vlen_enc) {
+ // For unsigned subtraction, overflow happens when magnitude of second input is greater than first input.
+ // overflow_mask = Inp1 Inp1 + MIN_VALUE < Inp2 + MIN_VALUE
+ vpgenmin_value(elem_bt, xtmp1, xtmp1, vlen_enc, true);
+ vpadd(elem_bt, xtmp2, src1, xtmp1, vlen_enc);
+ vpadd(elem_bt, xtmp1, src2, xtmp1, vlen_enc);
+
+ vpcmpgt(elem_bt, xtmp2, xtmp1, xtmp2, vlen_enc);
+
+ // Res = INP1 - INP2 (non-commutative and non-associative)
+ vpsub(elem_bt, dst, src1, src2, vlen_enc);
+ // Res = Mask ? Zero : Res
+ vpxor(xtmp1, xtmp1, xtmp1, vlen_enc);
+ vpblendvb(dst, dst, xtmp1, xtmp2, vlen_enc);
+}
+
+void C2_MacroAssembler::vector_add_dq_saturating_unsigned_evex(BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp, int vlen_enc) {
+ // Unsigned values ranges comprise of only +ve numbers, thus there exist only an upper bound saturation.
+ // overflow_mask = (SRC1 + SRC2) >> 31 == 1
+//
+// We empirically determined its semantic equivalence to following reduced expression
+// overflow_mask = (a + b) = Res + MIN_VALUE
+ vpadd(elem_bt, xtmp2, xtmp2, dst, vlen_enc);
+ // Compute overflow detection mask = Res<1> >> 31(I)/63(L)) == 1
+ vpxor(xtmp1, dst, src1, vlen_enc);
+ vpxor(xtmp2, dst, src2, vlen_enc);
+ vpand(xtmp2, xtmp1, xtmp2, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ // res = src1 - src2
+ vpsub(elem_bt, dst, src1, src2, vlen_enc);
+ // Overflow occurs when both inputs have opposite polarity and
+ // result polarity does not comply with first input polarity.
+ // overflow = ((src1 ^ src2) & (res ^ src1) >>> 31(I)/63(L)) == 1;
+ vpxor(xtmp1, src1, src2, vlen_enc);
+ vpxor(xtmp2, dst, src1, vlen_enc);
+ vpand(xtmp2, xtmp1, xtmp2, vlen_enc);
+ }
+
+ // Compute overflow detection mask.
+ evpmov_vec_to_mask(elem_bt, ktmp1, xtmp2, xtmp2, xtmp1, vlen_enc);
+ // Note: xtmp1 hold -1 in all its lanes after above call.
+
+ // Compute mask based on first input polarity.
+ evpmov_vec_to_mask(elem_bt, ktmp2, src1, xtmp2, xtmp1, vlen_enc, true);
+
+ vpgenmax_value(elem_bt, xtmp2, xtmp1, vlen_enc, true);
+ vpgenmin_value(elem_bt, xtmp1, xtmp1, vlen_enc);
+
+ // Compose a vector of saturating (MAX/MIN) values, where lanes corresponding to
+ // set bits in first input polarity mask holds a min value.
+ evpblend(elem_bt, xtmp2, ktmp2, xtmp2, xtmp1, true, vlen_enc);
+ // Blend destination lanes with saturated values using overflow detection mask.
+ evpblend(elem_bt, dst, ktmp1, dst, xtmp2, true, vlen_enc);
+}
+
+
+void C2_MacroAssembler::vector_addsub_dq_saturating_avx(int ideal_opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister xtmp1, XMMRegister xtmp2,
+ XMMRegister xtmp3, XMMRegister xtmp4, int vlen_enc) {
+ assert(elem_bt == T_INT || elem_bt == T_LONG, "");
+ // Addition/Subtraction happens over two's compliment representation of numbers and is agnostic to signed'ness.
+ // Overflow detection based on Hacker's delight section 2-13.
+ if (ideal_opc == Op_SaturatingAddV) {
+ // res = src1 + src2
+ vpadd(elem_bt, dst, src1, src2, vlen_enc);
+ // Overflow occurs if result polarity does not comply with equivalent polarity inputs.
+ // overflow = (((res ^ src1) & (res ^ src2)) >>> 31(I)/63(L)) == 1
+ vpxor(xtmp1, dst, src1, vlen_enc);
+ vpxor(xtmp2, dst, src2, vlen_enc);
+ vpand(xtmp2, xtmp1, xtmp2, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ // res = src1 - src2
+ vpsub(elem_bt, dst, src1, src2, vlen_enc);
+ // Overflow occurs when both inputs have opposite polarity and
+ // result polarity does not comply with first input polarity.
+ // overflow = ((src1 ^ src2) & (res ^ src1) >>> 31(I)/63(L)) == 1;
+ vpxor(xtmp1, src1, src2, vlen_enc);
+ vpxor(xtmp2, dst, src1, vlen_enc);
+ vpand(xtmp2, xtmp1, xtmp2, vlen_enc);
+ }
+
+ // Sign-extend to compute overflow detection mask.
+ vpsign_extend_dq(elem_bt, xtmp3, xtmp2, vlen_enc);
+
+ vpcmpeqd(xtmp1, xtmp1, xtmp1, vlen_enc);
+ vpgenmax_value(elem_bt, xtmp2, xtmp1, vlen_enc);
+ vpgenmin_value(elem_bt, xtmp1, xtmp1, vlen_enc);
+
+ // Compose saturating min/max vector using first input polarity mask.
+ vpsign_extend_dq(elem_bt, xtmp4, src1, vlen_enc);
+ vpblendvb(xtmp1, xtmp2, xtmp1, xtmp4, vlen_enc);
+
+ // Blend result with saturating vector using overflow detection mask.
+ vpblendvb(dst, dst, xtmp1, xtmp3, vlen_enc);
+}
+
+void C2_MacroAssembler::vector_saturating_op(int ideal_opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, Address src2, int vlen_enc) {
+ switch(elem_bt) {
+ case T_BYTE:
+ if (ideal_opc == Op_SaturatingAddV) {
+ vpaddsb(dst, src1, src2, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ vpsubsb(dst, src1, src2, vlen_enc);
+ }
+ break;
+ case T_SHORT:
+ if (ideal_opc == Op_SaturatingAddV) {
+ vpaddsw(dst, src1, src2, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ vpsubsw(dst, src1, src2, vlen_enc);
+ }
+ break;
+ default:
+ fatal("Unsupported type %s", type2name(elem_bt));
+ break;
+ }
+}
+
+void C2_MacroAssembler::vector_saturating_unsigned_op(int ideal_opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, Address src2, int vlen_enc) {
+ switch(elem_bt) {
+ case T_BYTE:
+ if (ideal_opc == Op_SaturatingAddV) {
+ vpaddusb(dst, src1, src2, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ vpsubusb(dst, src1, src2, vlen_enc);
+ }
+ break;
+ case T_SHORT:
+ if (ideal_opc == Op_SaturatingAddV) {
+ vpaddusw(dst, src1, src2, vlen_enc);
+ } else {
+ assert(ideal_opc == Op_SaturatingSubV, "");
+ vpsubusw(dst, src1, src2, vlen_enc);
+ }
+ break;
+ default:
+ fatal("Unsupported type %s", type2name(elem_bt));
+ break;
+ }
+}
+
void C2_MacroAssembler::select_from_two_vectors_evex(BasicType elem_bt, XMMRegister dst, XMMRegister src1,
XMMRegister src2, int vlen_enc) {
switch(elem_bt) {
@@ -6505,3 +7066,19 @@ void C2_MacroAssembler::select_from_two_vectors_evex(BasicType elem_bt, XMMRegis
break;
}
}
+
+void C2_MacroAssembler::vector_saturating_op(int ideal_opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, bool is_unsigned, int vlen_enc) {
+ if (is_unsigned) {
+ vector_saturating_unsigned_op(ideal_opc, elem_bt, dst, src1, src2, vlen_enc);
+ } else {
+ vector_saturating_op(ideal_opc, elem_bt, dst, src1, src2, vlen_enc);
+ }
+}
+
+void C2_MacroAssembler::vector_saturating_op(int ideal_opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, Address src2, bool is_unsigned, int vlen_enc) {
+ if (is_unsigned) {
+ vector_saturating_unsigned_op(ideal_opc, elem_bt, dst, src1, src2, vlen_enc);
+ } else {
+ vector_saturating_op(ideal_opc, elem_bt, dst, src1, src2, vlen_enc);
+ }
+}
diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp
index 5744fedcc64..3a36fd75e3f 100644
--- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp
@@ -56,10 +56,21 @@
XMMRegister dst, XMMRegister src1, XMMRegister src2,
int vlen_enc);
+ void vpuminmax(int opcode, BasicType elem_bt,
+ XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ int vlen_enc);
+
+ void vpuminmax(int opcode, BasicType elem_bt,
+ XMMRegister dst, XMMRegister src1, Address src2,
+ int vlen_enc);
+
void vminmax_fp(int opcode, BasicType elem_bt,
XMMRegister dst, XMMRegister a, XMMRegister b,
XMMRegister tmp, XMMRegister atmp, XMMRegister btmp,
int vlen_enc);
+
+ void vpuminmaxq(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc);
+
void evminmax_fp(int opcode, BasicType elem_bt,
XMMRegister dst, XMMRegister a, XMMRegister b,
KRegister ktmp, XMMRegister atmp, XMMRegister btmp,
@@ -105,6 +116,7 @@
void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len);
void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len);
+ void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len);
// extract
void extract(BasicType typ, Register dst, XMMRegister src, int idx);
@@ -505,6 +517,70 @@
void vgather8b_offset(BasicType elem_bt, XMMRegister dst, Register base, Register idx_base,
Register offset, Register rtmp, int vlen_enc);
+ void vector_saturating_op(int opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, bool is_unsigned, int vlen_enc);
+
+ void vector_saturating_op(int opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, Address src2, bool is_unsigned, int vlen_enc);
+
+ void vector_saturating_op(int opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vlen_enc);
+
+ void vector_saturating_op(int opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, Address src2, int vlen_enc);
+
+ void vector_saturating_unsigned_op(int opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vlen_enc);
+
+ void vector_saturating_unsigned_op(int opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, Address src2, int vlen_enc);
+
+ void vector_sub_dq_saturating_unsigned_evex(BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, KRegister ktmp, int vlen_enc);
+
+ void vector_sub_dq_saturating_unsigned_avx(BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc);
+
+ void vector_add_dq_saturating_unsigned_evex(BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp, int vlen_enc);
+
+ void vector_add_dq_saturating_unsigned_avx(BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, int vlen_enc);
+
+ void vector_addsub_dq_saturating_avx(int opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, int vlen_enc);
+
+ void vector_addsub_dq_saturating_evex(int opc, BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, int vlen_enc);
+
+ void evpmovd2m_emu(KRegister ktmp, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc, bool xtmp2_hold_M1 = false);
+
+ void evpmovq2m_emu(KRegister ktmp, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc, bool xtmp2_hold_M1 = false);
+
+ void vpsign_extend_dq(BasicType etype, XMMRegister dst, XMMRegister src, int vlen_enc);
+
+ void vpgenmin_value(BasicType etype, XMMRegister dst, XMMRegister allones, int vlen_enc, bool compute_allones = false);
+
+ void vpgenmax_value(BasicType etype, XMMRegister dst, XMMRegister allones, int vlen_enc, bool compute_allones = false);
+
+ void evpcmpu(BasicType etype, KRegister kmask, XMMRegister src1, XMMRegister src2, Assembler::ComparisonPredicate cond, int vlen_enc);
+
+ void vpcmpgt(BasicType etype, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vlen_enc);
+
+ void evpmov_vec_to_mask(BasicType etype, KRegister ktmp, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2,
+ int vlen_enc, bool xtmp2_hold_M1 = false);
+
+ void evmasked_saturating_op(int ideal_opc, BasicType elem_bt, KRegister mask, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ bool is_unsigned, bool merge, int vlen_enc);
+
+ void evmasked_saturating_op(int ideal_opc, BasicType elem_bt, KRegister mask, XMMRegister dst, XMMRegister src1, Address src2,
+ bool is_unsigned, bool merge, int vlen_enc);
+
+ void evmasked_saturating_signed_op(int ideal_opc, BasicType elem_bt, KRegister mask, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ bool merge, int vlen_enc);
+
+ void evmasked_saturating_signed_op(int ideal_opc, BasicType elem_bt, KRegister mask, XMMRegister dst, XMMRegister src1, Address src2,
+ bool merge, int vlen_enc);
+
+ void evmasked_saturating_unsigned_op(int ideal_opc, BasicType elem_bt, KRegister mask, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, bool merge, int vlen_enc);
+
+ void evmasked_saturating_unsigned_op(int ideal_opc, BasicType elem_bt, KRegister mask, XMMRegister dst, XMMRegister src1,
+ Address src2, bool merge, int vlen_enc);
+
void select_from_two_vectors_evex(BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vlen_enc);
#endif // CPU_X86_C2_MACROASSEMBLER_X86_HPP
diff --git a/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp
deleted file mode 100644
index a7dc34b17b1..00000000000
--- a/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp
+++ /dev/null
@@ -1,734 +0,0 @@
-/*
- * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "code/codeBlob.hpp"
-#include "code/vmreg.inline.hpp"
-#include "gc/x/xBarrier.inline.hpp"
-#include "gc/x/xBarrierSet.hpp"
-#include "gc/x/xBarrierSetAssembler.hpp"
-#include "gc/x/xBarrierSetRuntime.hpp"
-#include "gc/x/xThreadLocalData.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "utilities/macros.hpp"
-#ifdef COMPILER1
-#include "c1/c1_LIRAssembler.hpp"
-#include "c1/c1_MacroAssembler.hpp"
-#include "gc/x/c1/xBarrierSetC1.hpp"
-#endif // COMPILER1
-#ifdef COMPILER2
-#include "gc/x/c2/xBarrierSetC2.hpp"
-#endif // COMPILER2
-
-#ifdef PRODUCT
-#define BLOCK_COMMENT(str) /* nothing */
-#else
-#define BLOCK_COMMENT(str) __ block_comment(str)
-#endif
-
-#undef __
-#define __ masm->
-
-static void call_vm(MacroAssembler* masm,
- address entry_point,
- Register arg0,
- Register arg1) {
- // Setup arguments
- if (arg1 == c_rarg0) {
- if (arg0 == c_rarg1) {
- __ xchgptr(c_rarg1, c_rarg0);
- } else {
- __ movptr(c_rarg1, arg1);
- __ movptr(c_rarg0, arg0);
- }
- } else {
- if (arg0 != c_rarg0) {
- __ movptr(c_rarg0, arg0);
- }
- if (arg1 != c_rarg1) {
- __ movptr(c_rarg1, arg1);
- }
- }
-
- // Call VM
- __ MacroAssembler::call_VM_leaf_base(entry_point, 2);
-}
-
-void XBarrierSetAssembler::load_at(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Register dst,
- Address src,
- Register tmp1,
- Register tmp_thread) {
- if (!XBarrierSet::barrier_needed(decorators, type)) {
- // Barrier not needed
- BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
- return;
- }
-
- BLOCK_COMMENT("XBarrierSetAssembler::load_at {");
-
- // Allocate scratch register
- Register scratch = tmp1;
- if (tmp1 == noreg) {
- scratch = r12;
- __ push(scratch);
- }
-
- assert_different_registers(dst, scratch);
-
- Label done;
-
- //
- // Fast Path
- //
-
- // Load address
- __ lea(scratch, src);
-
- // Load oop at address
- __ movptr(dst, Address(scratch, 0));
-
- // Test address bad mask
- __ testptr(dst, address_bad_mask_from_thread(r15_thread));
- __ jcc(Assembler::zero, done);
-
- //
- // Slow path
- //
-
- // Save registers
- __ push(rax);
- __ push(rcx);
- __ push(rdx);
- __ push(rdi);
- __ push(rsi);
- __ push(r8);
- __ push(r9);
- __ push(r10);
- __ push(r11);
-
- // We may end up here from generate_native_wrapper, then the method may have
- // floats as arguments, and we must spill them before calling the VM runtime
- // leaf. From the interpreter all floats are passed on the stack.
- assert(Argument::n_float_register_parameters_j == 8, "Assumption");
- const int xmm_size = wordSize * 2;
- const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
- __ subptr(rsp, xmm_spill_size);
- __ movdqu(Address(rsp, xmm_size * 7), xmm7);
- __ movdqu(Address(rsp, xmm_size * 6), xmm6);
- __ movdqu(Address(rsp, xmm_size * 5), xmm5);
- __ movdqu(Address(rsp, xmm_size * 4), xmm4);
- __ movdqu(Address(rsp, xmm_size * 3), xmm3);
- __ movdqu(Address(rsp, xmm_size * 2), xmm2);
- __ movdqu(Address(rsp, xmm_size * 1), xmm1);
- __ movdqu(Address(rsp, xmm_size * 0), xmm0);
-
- // Call VM
- call_vm(masm, XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch);
-
- __ movdqu(xmm0, Address(rsp, xmm_size * 0));
- __ movdqu(xmm1, Address(rsp, xmm_size * 1));
- __ movdqu(xmm2, Address(rsp, xmm_size * 2));
- __ movdqu(xmm3, Address(rsp, xmm_size * 3));
- __ movdqu(xmm4, Address(rsp, xmm_size * 4));
- __ movdqu(xmm5, Address(rsp, xmm_size * 5));
- __ movdqu(xmm6, Address(rsp, xmm_size * 6));
- __ movdqu(xmm7, Address(rsp, xmm_size * 7));
- __ addptr(rsp, xmm_spill_size);
-
- __ pop(r11);
- __ pop(r10);
- __ pop(r9);
- __ pop(r8);
- __ pop(rsi);
- __ pop(rdi);
- __ pop(rdx);
- __ pop(rcx);
-
- if (dst == rax) {
- __ addptr(rsp, wordSize);
- } else {
- __ movptr(dst, rax);
- __ pop(rax);
- }
-
- __ bind(done);
-
- // Restore scratch register
- if (tmp1 == noreg) {
- __ pop(scratch);
- }
-
- BLOCK_COMMENT("} XBarrierSetAssembler::load_at");
-}
-
-#ifdef ASSERT
-
-void XBarrierSetAssembler::store_at(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Address dst,
- Register src,
- Register tmp1,
- Register tmp2,
- Register tmp3) {
- BLOCK_COMMENT("XBarrierSetAssembler::store_at {");
-
- // Verify oop store
- if (is_reference_type(type)) {
- // Note that src could be noreg, which means we
- // are storing null and can skip verification.
- if (src != noreg) {
- Label done;
- __ testptr(src, address_bad_mask_from_thread(r15_thread));
- __ jcc(Assembler::zero, done);
- __ stop("Verify oop store failed");
- __ should_not_reach_here();
- __ bind(done);
- }
- }
-
- // Store value
- BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2, tmp3);
-
- BLOCK_COMMENT("} XBarrierSetAssembler::store_at");
-}
-
-#endif // ASSERT
-
-void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Register src,
- Register dst,
- Register count) {
- if (!XBarrierSet::barrier_needed(decorators, type)) {
- // Barrier not needed
- return;
- }
-
- BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {");
-
- // Save registers
- __ pusha();
-
- // Call VM
- call_vm(masm, XBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count);
-
- // Restore registers
- __ popa();
-
- BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue");
-}
-
-void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
- Register jni_env,
- Register obj,
- Register tmp,
- Label& slowpath) {
- BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {");
-
- // Resolve jobject
- BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
-
- // Test address bad mask
- __ testptr(obj, address_bad_mask_from_jni_env(jni_env));
- __ jcc(Assembler::notZero, slowpath);
-
- BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native");
-}
-
-#ifdef COMPILER1
-
-#undef __
-#define __ ce->masm()->
-
-void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
- LIR_Opr ref) const {
- __ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread));
-}
-
-void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
- XLoadBarrierStubC1* stub) const {
- // Stub entry
- __ bind(*stub->entry());
-
- Register ref = stub->ref()->as_register();
- Register ref_addr = noreg;
- Register tmp = noreg;
-
- if (stub->tmp()->is_valid()) {
- // Load address into tmp register
- ce->leal(stub->ref_addr(), stub->tmp());
- ref_addr = tmp = stub->tmp()->as_pointer_register();
- } else {
- // Address already in register
- ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
- }
-
- assert_different_registers(ref, ref_addr, noreg);
-
- // Save rax unless it is the result or tmp register
- if (ref != rax && tmp != rax) {
- __ push(rax);
- }
-
- // Setup arguments and call runtime stub
- __ subptr(rsp, 2 * BytesPerWord);
- ce->store_parameter(ref_addr, 1);
- ce->store_parameter(ref, 0);
- __ call(RuntimeAddress(stub->runtime_stub()));
- __ addptr(rsp, 2 * BytesPerWord);
-
- // Verify result
- __ verify_oop(rax);
-
- // Move result into place
- if (ref != rax) {
- __ movptr(ref, rax);
- }
-
- // Restore rax unless it is the result or tmp register
- if (ref != rax && tmp != rax) {
- __ pop(rax);
- }
-
- // Stub exit
- __ jmp(*stub->continuation());
-}
-
-#undef __
-#define __ sasm->
-
-void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
- DecoratorSet decorators) const {
- // Enter and save registers
- __ enter();
- __ save_live_registers_no_oop_map(true /* save_fpu_registers */);
-
- // Setup arguments
- __ load_parameter(1, c_rarg1);
- __ load_parameter(0, c_rarg0);
-
- // Call VM
- __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
-
- // Restore registers and return
- __ restore_live_registers_except_rax(true /* restore_fpu_registers */);
- __ leave();
- __ ret(0);
-}
-
-#endif // COMPILER1
-
-#ifdef COMPILER2
-
-OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
- if (!OptoReg::is_reg(opto_reg)) {
- return OptoReg::Bad;
- }
-
- const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
- if (vm_reg->is_XMMRegister()) {
- opto_reg &= ~15;
- switch (node->ideal_reg()) {
- case Op_VecX:
- opto_reg |= 2;
- break;
- case Op_VecY:
- opto_reg |= 4;
- break;
- case Op_VecZ:
- opto_reg |= 8;
- break;
- default:
- opto_reg |= 1;
- break;
- }
- }
-
- return opto_reg;
-}
-
-// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
-extern void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
- int stack_offset, int reg, uint ireg, outputStream* st);
-
-#undef __
-#define __ _masm->
-
-class XSaveLiveRegisters {
-private:
- struct XMMRegisterData {
- XMMRegister _reg;
- int _size;
-
- // Used by GrowableArray::find()
- bool operator == (const XMMRegisterData& other) {
- return _reg == other._reg;
- }
- };
-
- MacroAssembler* const _masm;
- GrowableArray _gp_registers;
- GrowableArray _opmask_registers;
- GrowableArray _xmm_registers;
- int _spill_size;
- int _spill_offset;
-
- static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) {
- if (left->_size == right->_size) {
- return 0;
- }
-
- return (left->_size < right->_size) ? -1 : 1;
- }
-
- static int xmm_slot_size(OptoReg::Name opto_reg) {
- // The low order 4 bytes denote what size of the XMM register is live
- return (opto_reg & 15) << 3;
- }
-
- static uint xmm_ideal_reg_for_size(int reg_size) {
- switch (reg_size) {
- case 8:
- return Op_VecD;
- case 16:
- return Op_VecX;
- case 32:
- return Op_VecY;
- case 64:
- return Op_VecZ;
- default:
- fatal("Invalid register size %d", reg_size);
- return 0;
- }
- }
-
- bool xmm_needs_vzeroupper() const {
- return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16;
- }
-
- void xmm_register_save(const XMMRegisterData& reg_data) {
- const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
- const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
- _spill_offset -= reg_data._size;
- C2_MacroAssembler c2_masm(__ code());
- vec_spill_helper(&c2_masm, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
- }
-
- void xmm_register_restore(const XMMRegisterData& reg_data) {
- const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
- const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
- C2_MacroAssembler c2_masm(__ code());
- vec_spill_helper(&c2_masm, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
- _spill_offset += reg_data._size;
- }
-
- void gp_register_save(Register reg) {
- _spill_offset -= 8;
- __ movq(Address(rsp, _spill_offset), reg);
- }
-
- void opmask_register_save(KRegister reg) {
- _spill_offset -= 8;
- __ kmov(Address(rsp, _spill_offset), reg);
- }
-
- void gp_register_restore(Register reg) {
- __ movq(reg, Address(rsp, _spill_offset));
- _spill_offset += 8;
- }
-
- void opmask_register_restore(KRegister reg) {
- __ kmov(reg, Address(rsp, _spill_offset));
- _spill_offset += 8;
- }
-
- void initialize(XLoadBarrierStubC2* stub) {
- // Create mask of caller saved registers that need to
- // be saved/restored if live
- RegMask caller_saved;
- caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
- caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg()));
-
- if (UseAPX) {
- caller_saved.Insert(OptoReg::as_OptoReg(r16->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r17->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r18->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r19->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r20->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r21->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r22->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r23->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r24->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r25->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r26->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r27->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r28->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r29->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r30->as_VMReg()));
- caller_saved.Insert(OptoReg::as_OptoReg(r31->as_VMReg()));
- }
-
- // Create mask of live registers
- RegMask live = stub->live();
- if (stub->tmp() != noreg) {
- live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg()));
- }
-
- int gp_spill_size = 0;
- int opmask_spill_size = 0;
- int xmm_spill_size = 0;
-
- // Record registers that needs to be saved/restored
- RegMaskIterator rmi(live);
- while (rmi.has_next()) {
- const OptoReg::Name opto_reg = rmi.next();
- const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
-
- if (vm_reg->is_Register()) {
- if (caller_saved.Member(opto_reg)) {
- _gp_registers.append(vm_reg->as_Register());
- gp_spill_size += 8;
- }
- } else if (vm_reg->is_KRegister()) {
- // All opmask registers are caller saved, thus spill the ones
- // which are live.
- if (_opmask_registers.find(vm_reg->as_KRegister()) == -1) {
- _opmask_registers.append(vm_reg->as_KRegister());
- opmask_spill_size += 8;
- }
- } else if (vm_reg->is_XMMRegister()) {
- // We encode in the low order 4 bits of the opto_reg, how large part of the register is live
- const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15);
- const int reg_size = xmm_slot_size(opto_reg);
- const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size };
- const int reg_index = _xmm_registers.find(reg_data);
- if (reg_index == -1) {
- // Not previously appended
- _xmm_registers.append(reg_data);
- xmm_spill_size += reg_size;
- } else {
- // Previously appended, update size
- const int reg_size_prev = _xmm_registers.at(reg_index)._size;
- if (reg_size > reg_size_prev) {
- _xmm_registers.at_put(reg_index, reg_data);
- xmm_spill_size += reg_size - reg_size_prev;
- }
- }
- } else {
- fatal("Unexpected register type");
- }
- }
-
- // Sort by size, largest first
- _xmm_registers.sort(xmm_compare_register_size);
-
- // On Windows, the caller reserves stack space for spilling register arguments
- const int arg_spill_size = frame::arg_reg_save_area_bytes;
-
- // Stack pointer must be 16 bytes aligned for the call
- _spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + opmask_spill_size + arg_spill_size, 16);
- }
-
-public:
- XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
- _masm(masm),
- _gp_registers(),
- _opmask_registers(),
- _xmm_registers(),
- _spill_size(0),
- _spill_offset(0) {
-
- //
- // Stack layout after registers have been spilled:
- //
- // | ... | original rsp, 16 bytes aligned
- // ------------------
- // | zmm0 high |
- // | ... |
- // | zmm0 low | 16 bytes aligned
- // | ... |
- // | ymm1 high |
- // | ... |
- // | ymm1 low | 16 bytes aligned
- // | ... |
- // | xmmN high |
- // | ... |
- // | xmmN low | 8 bytes aligned
- // | reg0 | 8 bytes aligned
- // | reg1 |
- // | ... |
- // | regN | new rsp, if 16 bytes aligned
- // | | else new rsp, 16 bytes aligned
- // ------------------
- //
-
- // Figure out what registers to save/restore
- initialize(stub);
-
- // Allocate stack space
- if (_spill_size > 0) {
- __ subptr(rsp, _spill_size);
- }
-
- // Save XMM/YMM/ZMM registers
- for (int i = 0; i < _xmm_registers.length(); i++) {
- xmm_register_save(_xmm_registers.at(i));
- }
-
- if (xmm_needs_vzeroupper()) {
- __ vzeroupper();
- }
-
- // Save general purpose registers
- for (int i = 0; i < _gp_registers.length(); i++) {
- gp_register_save(_gp_registers.at(i));
- }
-
- // Save opmask registers
- for (int i = 0; i < _opmask_registers.length(); i++) {
- opmask_register_save(_opmask_registers.at(i));
- }
- }
-
- ~XSaveLiveRegisters() {
- // Restore opmask registers
- for (int i = _opmask_registers.length() - 1; i >= 0; i--) {
- opmask_register_restore(_opmask_registers.at(i));
- }
-
- // Restore general purpose registers
- for (int i = _gp_registers.length() - 1; i >= 0; i--) {
- gp_register_restore(_gp_registers.at(i));
- }
-
- __ vzeroupper();
-
- // Restore XMM/YMM/ZMM registers
- for (int i = _xmm_registers.length() - 1; i >= 0; i--) {
- xmm_register_restore(_xmm_registers.at(i));
- }
-
- // Free stack space
- if (_spill_size > 0) {
- __ addptr(rsp, _spill_size);
- }
- }
-};
-
-class XSetupArguments {
-private:
- MacroAssembler* const _masm;
- const Register _ref;
- const Address _ref_addr;
-
-public:
- XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
- _masm(masm),
- _ref(stub->ref()),
- _ref_addr(stub->ref_addr()) {
-
- // Setup arguments
- if (_ref_addr.base() == noreg) {
- // No self healing
- if (_ref != c_rarg0) {
- __ movq(c_rarg0, _ref);
- }
- __ xorq(c_rarg1, c_rarg1);
- } else {
- // Self healing
- if (_ref == c_rarg0) {
- __ lea(c_rarg1, _ref_addr);
- } else if (_ref != c_rarg1) {
- __ lea(c_rarg1, _ref_addr);
- __ movq(c_rarg0, _ref);
- } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
- __ movq(c_rarg0, _ref);
- __ lea(c_rarg1, _ref_addr);
- } else {
- __ xchgq(c_rarg0, c_rarg1);
- if (_ref_addr.base() == c_rarg0) {
- __ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp()));
- } else if (_ref_addr.index() == c_rarg0) {
- __ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp()));
- } else {
- ShouldNotReachHere();
- }
- }
- }
- }
-
- ~XSetupArguments() {
- // Transfer result
- if (_ref != rax) {
- __ movq(_ref, rax);
- }
- }
-};
-
-#undef __
-#define __ masm->
-
-void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const {
- BLOCK_COMMENT("XLoadBarrierStubC2");
-
- // Stub entry
- __ bind(*stub->entry());
-
- {
- XSaveLiveRegisters save_live_registers(masm, stub);
- XSetupArguments setup_arguments(masm, stub);
- __ call(RuntimeAddress(stub->slow_path()));
- }
-
- // Stub exit
- __ jmp(*stub->continuation());
-}
-
-#endif // COMPILER2
-
-#undef __
-#define __ masm->
-
-void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
- // Check if metadata bits indicate a bad oop
- __ testptr(obj, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
- __ jcc(Assembler::notZero, error);
- BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error);
-}
-
-#undef __
diff --git a/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.hpp
deleted file mode 100644
index 52034ab786e..00000000000
--- a/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.hpp
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP
-#define CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP
-
-#include "code/vmreg.hpp"
-#include "oops/accessDecorators.hpp"
-#ifdef COMPILER2
-#include "opto/optoreg.hpp"
-#endif // COMPILER2
-
-class MacroAssembler;
-
-#ifdef COMPILER1
-class LIR_Assembler;
-class LIR_Opr;
-class StubAssembler;
-#endif // COMPILER1
-
-#ifdef COMPILER2
-class Node;
-#endif // COMPILER2
-
-#ifdef COMPILER1
-class XLoadBarrierStubC1;
-#endif // COMPILER1
-
-#ifdef COMPILER2
-class XLoadBarrierStubC2;
-#endif // COMPILER2
-
-class XBarrierSetAssembler : public XBarrierSetAssemblerBase {
-public:
- virtual void load_at(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Register dst,
- Address src,
- Register tmp1,
- Register tmp_thread);
-
-#ifdef ASSERT
- virtual void store_at(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Address dst,
- Register src,
- Register tmp1,
- Register tmp2,
- Register tmp3);
-#endif // ASSERT
-
- virtual void arraycopy_prologue(MacroAssembler* masm,
- DecoratorSet decorators,
- BasicType type,
- Register src,
- Register dst,
- Register count);
-
- virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
- Register jni_env,
- Register obj,
- Register tmp,
- Label& slowpath);
-
-#ifdef COMPILER1
- void generate_c1_load_barrier_test(LIR_Assembler* ce,
- LIR_Opr ref) const;
-
- void generate_c1_load_barrier_stub(LIR_Assembler* ce,
- XLoadBarrierStubC1* stub) const;
-
- void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
- DecoratorSet decorators) const;
-#endif // COMPILER1
-
-#ifdef COMPILER2
- OptoReg::Name refine_register(const Node* node,
- OptoReg::Name opto_reg);
-
- void generate_c2_load_barrier_stub(MacroAssembler* masm,
- XLoadBarrierStubC2* stub) const;
-#endif // COMPILER2
-
- void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
-};
-
-#endif // CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP
diff --git a/src/hotspot/cpu/x86/gc/x/xGlobals_x86.cpp b/src/hotspot/cpu/x86/gc/x/xGlobals_x86.cpp
deleted file mode 100644
index baa99ddd60d..00000000000
--- a/src/hotspot/cpu/x86/gc/x/xGlobals_x86.cpp
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/gc_globals.hpp"
-#include "gc/x/xGlobals.hpp"
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/powerOfTwo.hpp"
-
-//
-// The heap can have three different layouts, depending on the max heap size.
-//
-// Address Space & Pointer Layout 1
-// --------------------------------
-//
-// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
-// . .
-// . .
-// . .
-// +--------------------------------+ 0x0000014000000000 (20TB)
-// | Remapped View |
-// +--------------------------------+ 0x0000010000000000 (16TB)
-// . .
-// +--------------------------------+ 0x00000c0000000000 (12TB)
-// | Marked1 View |
-// +--------------------------------+ 0x0000080000000000 (8TB)
-// | Marked0 View |
-// +--------------------------------+ 0x0000040000000000 (4TB)
-// . .
-// +--------------------------------+ 0x0000000000000000
-//
-// 6 4 4 4 4
-// 3 6 5 2 1 0
-// +--------------------+----+-----------------------------------------------+
-// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111|
-// +--------------------+----+-----------------------------------------------+
-// | | |
-// | | * 41-0 Object Offset (42-bits, 4TB address space)
-// | |
-// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
-// | 0010 = Marked1 (Address view 8-12TB)
-// | 0100 = Remapped (Address view 16-20TB)
-// | 1000 = Finalizable (Address view N/A)
-// |
-// * 63-46 Fixed (18-bits, always zero)
-//
-//
-// Address Space & Pointer Layout 2
-// --------------------------------
-//
-// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
-// . .
-// . .
-// . .
-// +--------------------------------+ 0x0000280000000000 (40TB)
-// | Remapped View |
-// +--------------------------------+ 0x0000200000000000 (32TB)
-// . .
-// +--------------------------------+ 0x0000180000000000 (24TB)
-// | Marked1 View |
-// +--------------------------------+ 0x0000100000000000 (16TB)
-// | Marked0 View |
-// +--------------------------------+ 0x0000080000000000 (8TB)
-// . .
-// +--------------------------------+ 0x0000000000000000
-//
-// 6 4 4 4 4
-// 3 7 6 3 2 0
-// +------------------+-----+------------------------------------------------+
-// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111|
-// +-------------------+----+------------------------------------------------+
-// | | |
-// | | * 42-0 Object Offset (43-bits, 8TB address space)
-// | |
-// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB)
-// | 0010 = Marked1 (Address view 16-24TB)
-// | 0100 = Remapped (Address view 32-40TB)
-// | 1000 = Finalizable (Address view N/A)
-// |
-// * 63-47 Fixed (17-bits, always zero)
-//
-//
-// Address Space & Pointer Layout 3
-// --------------------------------
-//
-// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
-// . .
-// . .
-// . .
-// +--------------------------------+ 0x0000500000000000 (80TB)
-// | Remapped View |
-// +--------------------------------+ 0x0000400000000000 (64TB)
-// . .
-// +--------------------------------+ 0x0000300000000000 (48TB)
-// | Marked1 View |
-// +--------------------------------+ 0x0000200000000000 (32TB)
-// | Marked0 View |
-// +--------------------------------+ 0x0000100000000000 (16TB)
-// . .
-// +--------------------------------+ 0x0000000000000000
-//
-// 6 4 4 4 4
-// 3 8 7 4 3 0
-// +------------------+----+-------------------------------------------------+
-// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111|
-// +------------------+----+-------------------------------------------------+
-// | | |
-// | | * 43-0 Object Offset (44-bits, 16TB address space)
-// | |
-// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB)
-// | 0010 = Marked1 (Address view 32-48TB)
-// | 0100 = Remapped (Address view 64-80TB)
-// | 1000 = Finalizable (Address view N/A)
-// |
-// * 63-48 Fixed (16-bits, always zero)
-//
-
-size_t XPlatformAddressOffsetBits() {
- const size_t min_address_offset_bits = 42; // 4TB
- const size_t max_address_offset_bits = 44; // 16TB
- const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio);
- const size_t address_offset_bits = log2i_exact(address_offset);
- return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
-}
-
-size_t XPlatformAddressMetadataShift() {
- return XPlatformAddressOffsetBits();
-}
diff --git a/src/hotspot/cpu/x86/gc/x/xGlobals_x86.hpp b/src/hotspot/cpu/x86/gc/x/xGlobals_x86.hpp
deleted file mode 100644
index dd00d4ddadc..00000000000
--- a/src/hotspot/cpu/x86/gc/x/xGlobals_x86.hpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef CPU_X86_GC_X_XGLOBALS_X86_HPP
-#define CPU_X86_GC_X_XGLOBALS_X86_HPP
-
-const size_t XPlatformHeapViews = 3;
-const size_t XPlatformCacheLineSize = 64;
-
-size_t XPlatformAddressOffsetBits();
-size_t XPlatformAddressMetadataShift();
-
-#endif // CPU_X86_GC_X_XGLOBALS_X86_HPP
diff --git a/src/hotspot/cpu/x86/gc/x/x_x86_64.ad b/src/hotspot/cpu/x86/gc/x/x_x86_64.ad
deleted file mode 100644
index ba4b3cb6df0..00000000000
--- a/src/hotspot/cpu/x86/gc/x/x_x86_64.ad
+++ /dev/null
@@ -1,156 +0,0 @@
-//
-// Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-
-source_hpp %{
-
-#include "gc/shared/gc_globals.hpp"
-#include "gc/x/c2/xBarrierSetC2.hpp"
-#include "gc/x/xThreadLocalData.hpp"
-
-%}
-
-source %{
-
-#include "c2_intelJccErratum_x86.hpp"
-
-static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
- if (barrier_data == XLoadBarrierElided) {
- return;
- }
- XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
- {
- IntelJccErratumAlignment intel_alignment(masm, 10 /* jcc_size */);
- __ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
- __ jcc(Assembler::notZero, *stub->entry());
- }
- __ bind(*stub->continuation());
-}
-
-static void x_load_barrier_cmpxchg(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, Label& good) {
- XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
- {
- IntelJccErratumAlignment intel_alignment(masm, 10 /* jcc_size */);
- __ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
- __ jcc(Assembler::zero, good);
- }
- {
- IntelJccErratumAlignment intel_alignment(masm, 5 /* jcc_size */);
- __ jmp(*stub->entry());
- }
- __ bind(*stub->continuation());
-}
-
-static void x_cmpxchg_common(MacroAssembler* masm, const MachNode* node, Register mem_reg, Register newval, Register tmp) {
- // Compare value (oldval) is in rax
- const Address mem = Address(mem_reg, 0);
-
- if (node->barrier_data() != XLoadBarrierElided) {
- __ movptr(tmp, rax);
- }
-
- __ lock();
- __ cmpxchgptr(newval, mem);
-
- if (node->barrier_data() != XLoadBarrierElided) {
- Label good;
- x_load_barrier_cmpxchg(masm, node, mem, rax, tmp, good);
- __ movptr(rax, tmp);
- __ lock();
- __ cmpxchgptr(newval, mem);
- __ bind(good);
- }
-}
-
-%}
-
-// Load Pointer
-instruct xLoadP(rRegP dst, memory mem, rFlagsReg cr)
-%{
- predicate(UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0);
- match(Set dst (LoadP mem));
- effect(KILL cr, TEMP dst);
-
- ins_cost(125);
-
- format %{ "movq $dst, $mem" %}
-
- ins_encode %{
- __ movptr($dst$$Register, $mem$$Address);
- x_load_barrier(masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, barrier_data());
- %}
-
- ins_pipe(ialu_reg_mem);
-%}
-
-instruct xCompareAndExchangeP(indirect mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{
- match(Set oldval (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
- effect(KILL cr, TEMP tmp);
-
- format %{ "lock\n\t"
- "cmpxchgq $newval, $mem" %}
-
- ins_encode %{
- precond($oldval$$Register == rax);
- x_cmpxchg_common(masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
- %}
-
- ins_pipe(pipe_cmpxchg);
-%}
-
-instruct xCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{
- match(Set res (CompareAndSwapP mem (Binary oldval newval)));
- match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
- effect(KILL cr, KILL oldval, TEMP tmp);
-
- format %{ "lock\n\t"
- "cmpxchgq $newval, $mem\n\t"
- "setcc $res \t# emits sete + movzbl or setzue for APX" %}
-
- ins_encode %{
- precond($oldval$$Register == rax);
- x_cmpxchg_common(masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
- if (barrier_data() != XLoadBarrierElided) {
- __ cmpptr($tmp$$Register, rax);
- }
- __ setcc(Assembler::equal, $res$$Register);
- %}
-
- ins_pipe(pipe_cmpxchg);
-%}
-
-instruct xXChgP(indirect mem, rRegP newval, rFlagsReg cr) %{
- match(Set newval (GetAndSetP mem newval));
- predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() != 0);
- effect(KILL cr);
-
- format %{ "xchgq $newval, $mem" %}
-
- ins_encode %{
- __ xchgptr($newval$$Register, Address($mem$$Register, 0));
- x_load_barrier(masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, barrier_data());
- %}
-
- ins_pipe(pipe_cmpxchg);
-%}
diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
index bc51a2b4468..3795b1fc176 100644
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
@@ -363,8 +363,12 @@ static void emit_store_fast_path_check_c2(MacroAssembler* masm, Address ref_addr
}
static bool is_c2_compilation() {
+#ifdef COMPILER2
CompileTask* task = ciEnv::current()->task();
return task != nullptr && is_c2_compile(task->comp_level());
+#else
+ return false;
+#endif
}
void ZBarrierSetAssembler::store_barrier_fast(MacroAssembler* masm,
diff --git a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad
index f55ad70e861..9555cadd022 100644
--- a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad
+++ b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad
@@ -115,7 +115,7 @@ operand no_rax_RegP()
// Load Pointer
instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
%{
- predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0);
+ predicate(UseZGC && n->as_Load()->barrier_data() != 0);
match(Set dst (LoadP mem));
effect(TEMP dst, KILL cr);
@@ -134,7 +134,7 @@ instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
// Load Pointer and Null Check
instruct zLoadPNullCheck(rFlagsReg cr, memory op, immP0 zero)
%{
- predicate(UseZGC && ZGenerational && n->in(1)->as_Load()->barrier_data() != 0);
+ predicate(UseZGC && n->in(1)->as_Load()->barrier_data() != 0);
match(Set cr (CmpP (LoadP op) zero));
ins_cost(500); // XXX
@@ -150,7 +150,7 @@ instruct zLoadPNullCheck(rFlagsReg cr, memory op, immP0 zero)
// Store Pointer
instruct zStoreP(memory mem, any_RegP src, rRegP tmp, rFlagsReg cr)
%{
- predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
+ predicate(UseZGC && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem src));
effect(TEMP tmp, KILL cr);
@@ -166,7 +166,7 @@ instruct zStoreP(memory mem, any_RegP src, rRegP tmp, rFlagsReg cr)
// Store Null Pointer
instruct zStorePNull(memory mem, immP0 zero, rRegP tmp, rFlagsReg cr)
%{
- predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
+ predicate(UseZGC && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem zero));
effect(TEMP tmp, KILL cr);
@@ -185,7 +185,7 @@ instruct zStorePNull(memory mem, immP0 zero, rRegP tmp, rFlagsReg cr)
instruct zCompareAndExchangeP(indirect mem, no_rax_RegP newval, rRegP tmp, rax_RegP oldval, rFlagsReg cr) %{
match(Set oldval (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP tmp, KILL cr);
format %{ "lock\n\t"
@@ -208,7 +208,7 @@ instruct zCompareAndExchangeP(indirect mem, no_rax_RegP newval, rRegP tmp, rax_R
instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rax_RegP oldval, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP tmp, KILL oldval, KILL cr);
format %{ "lock\n\t"
@@ -230,7 +230,7 @@ instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rax_
instruct zXChgP(indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr) %{
match(Set newval (GetAndSetP mem newval));
- predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0);
+ predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP tmp, KILL cr);
format %{ "xchgq $newval, $mem" %}
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index 1a69b4c1ad7..55c4e29b8a3 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -9314,6 +9314,30 @@ void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len
bind(done);
}
+void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
+ switch(type) {
+ case T_BYTE:
+ case T_BOOLEAN:
+ evmovdqub(dst, kmask, src, merge, vector_len);
+ break;
+ case T_CHAR:
+ case T_SHORT:
+ evmovdquw(dst, kmask, src, merge, vector_len);
+ break;
+ case T_INT:
+ case T_FLOAT:
+ evmovdqul(dst, kmask, src, merge, vector_len);
+ break;
+ case T_LONG:
+ case T_DOUBLE:
+ evmovdquq(dst, kmask, src, merge, vector_len);
+ break;
+ default:
+ fatal("Unexpected type argument %s", type2name(type));
+ break;
+ }
+}
+
void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) {
switch(type) {
@@ -9505,6 +9529,66 @@ void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMM
}
}
+void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ switch(type) {
+ case T_BYTE:
+ evpminub(dst, mask, nds, src, merge, vector_len); break;
+ case T_SHORT:
+ evpminuw(dst, mask, nds, src, merge, vector_len); break;
+ case T_INT:
+ evpminud(dst, mask, nds, src, merge, vector_len); break;
+ case T_LONG:
+ evpminuq(dst, mask, nds, src, merge, vector_len); break;
+ default:
+ fatal("Unexpected type argument %s", type2name(type)); break;
+ }
+}
+
+void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+ switch(type) {
+ case T_BYTE:
+ evpmaxub(dst, mask, nds, src, merge, vector_len); break;
+ case T_SHORT:
+ evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
+ case T_INT:
+ evpmaxud(dst, mask, nds, src, merge, vector_len); break;
+ case T_LONG:
+ evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
+ default:
+ fatal("Unexpected type argument %s", type2name(type)); break;
+ }
+}
+
+void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ switch(type) {
+ case T_BYTE:
+ evpminub(dst, mask, nds, src, merge, vector_len); break;
+ case T_SHORT:
+ evpminuw(dst, mask, nds, src, merge, vector_len); break;
+ case T_INT:
+ evpminud(dst, mask, nds, src, merge, vector_len); break;
+ case T_LONG:
+ evpminuq(dst, mask, nds, src, merge, vector_len); break;
+ default:
+ fatal("Unexpected type argument %s", type2name(type)); break;
+ }
+}
+
+void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+ switch(type) {
+ case T_BYTE:
+ evpmaxub(dst, mask, nds, src, merge, vector_len); break;
+ case T_SHORT:
+ evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
+ case T_INT:
+ evpmaxud(dst, mask, nds, src, merge, vector_len); break;
+ case T_LONG:
+ evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
+ default:
+ fatal("Unexpected type argument %s", type2name(type)); break;
+ }
+}
+
void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
switch(type) {
case T_BYTE:
@@ -10213,17 +10297,6 @@ Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond)
ShouldNotReachHere(); return Assembler::overflow;
}
-SkipIfEqual::SkipIfEqual(
- MacroAssembler* masm, const bool* flag_addr, bool value, Register rscratch) {
- _masm = masm;
- _masm->cmp8(ExternalAddress((address)flag_addr), value, rscratch);
- _masm->jcc(Assembler::equal, _label);
-}
-
-SkipIfEqual::~SkipIfEqual() {
- _masm->bind(_label);
-}
-
// 32-bit Windows has its own fast-path implementation
// of get_thread
#if !defined(WIN32) || defined(_LP64)
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
index e6de99eb207..d508feed93c 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
@@ -1282,6 +1282,7 @@ class MacroAssembler: public Assembler {
// AVX512 Unaligned
void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len);
void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len);
+ void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len);
void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
@@ -1295,6 +1296,7 @@ class MacroAssembler: public Assembler {
void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
+ void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
@@ -1505,6 +1507,8 @@ class MacroAssembler: public Assembler {
void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
+ void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); }
+
void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
@@ -1514,9 +1518,13 @@ class MacroAssembler: public Assembler {
void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
+ void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
+ void evpsrad(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
+
void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
+ using Assembler::evpsllw;
void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
if (!is_varshift) {
Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
@@ -1561,6 +1569,7 @@ class MacroAssembler: public Assembler {
Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
}
}
+ using Assembler::evpsraw;
void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
if (!is_varshift) {
Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
@@ -1568,6 +1577,7 @@ class MacroAssembler: public Assembler {
Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
}
}
+ using Assembler::evpsrad;
void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
if (!is_varshift) {
Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
@@ -1589,6 +1599,11 @@ class MacroAssembler: public Assembler {
void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
+ void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+ void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
+
void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
@@ -2162,22 +2177,4 @@ class MacroAssembler: public Assembler {
#endif
};
-/**
- * class SkipIfEqual:
- *
- * Instantiating this class will result in assembly code being output that will
- * jump around any code emitted between the creation of the instance and it's
- * automatic destruction at the end of a scope block, depending on the value of
- * the flag passed to the constructor, which will be checked at run-time.
- */
-class SkipIfEqual {
- private:
- MacroAssembler* _masm;
- Label _label;
-
- public:
- SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register rscratch);
- ~SkipIfEqual();
-};
-
#endif // CPU_X86_MACROASSEMBLER_X86_HPP
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
index e23c83ed197..93b1618024e 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
@@ -4032,6 +4032,8 @@ void StubGenerator::generate_compiler_stubs() {
generate_chacha_stubs();
+ generate_sha3_stubs();
+
#ifdef COMPILER2
if ((UseAVX == 2) && EnableX86ECoreOpts) {
generate_string_indexof(StubRoutines::_string_indexof_array);
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp
index 7280e9fbe95..c6fa31c5213 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp
@@ -497,6 +497,10 @@ class StubGenerator: public StubCodeGenerator {
address generate_intpoly_montgomeryMult_P256();
address generate_intpoly_assign();
+ // SHA3 stubs
+ void generate_sha3_stubs();
+ address generate_sha3_implCompress(bool multiBlock, const char *name);
+
// BASE64 stubs
address base64_shuffle_addr();
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp
new file mode 100644
index 00000000000..49c39226708
--- /dev/null
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "asm/assembler.inline.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "macroAssembler_x86.hpp"
+#include "stubGenerator_x86_64.hpp"
+
+#define __ _masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif // PRODUCT
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+// Constants
+ATTRIBUTE_ALIGNED(64) static const uint64_t round_consts_arr[24] = {
+ 0x0000000000000001L, 0x0000000000008082L, 0x800000000000808AL,
+ 0x8000000080008000L, 0x000000000000808BL, 0x0000000080000001L,
+ 0x8000000080008081L, 0x8000000000008009L, 0x000000000000008AL,
+ 0x0000000000000088L, 0x0000000080008009L, 0x000000008000000AL,
+ 0x000000008000808BL, 0x800000000000008BL, 0x8000000000008089L,
+ 0x8000000000008003L, 0x8000000000008002L, 0x8000000000000080L,
+ 0x000000000000800AL, 0x800000008000000AL, 0x8000000080008081L,
+ 0x8000000000008080L, 0x0000000080000001L, 0x8000000080008008L
+ };
+
+ATTRIBUTE_ALIGNED(64) static const uint64_t permsAndRots[] = {
+ // permutation in combined rho and pi
+ 9, 2, 11, 0, 1, 2, 3, 4, // step 1 and 3
+ 8, 1, 9, 2, 11, 4, 12, 0, // step 2
+ 9, 2, 10, 3, 11, 4, 12, 0, // step 4
+ 8, 9, 2, 3, 4, 5, 6, 7, // step 5
+ 0, 8, 9, 10, 15, 0, 0, 0, // step 6
+ 4, 5, 8, 9, 6, 7, 10, 11, // step 7 and 8
+ 0, 1, 2, 3, 13, 0, 0, 0, // step 9
+ 2, 3, 0, 1, 11, 0, 0, 0, // step 10
+ 4, 5, 6, 7, 14, 0, 0, 0, // step 11
+ 14, 15, 12, 13, 4, 0, 0, 0, // step 12
+ // size of rotations (after step 5)
+ 1, 6, 62, 55, 28, 20, 27, 36,
+ 3, 45, 10, 15, 25, 8, 39, 41,
+ 44, 43, 21, 18, 2, 61, 56, 14,
+ // rotation of row elements
+ 12, 8, 9, 10, 11, 5, 6, 7,
+ 9, 10, 11, 12, 8, 5, 6, 7
+};
+
+static address round_constsAddr() {
+ return (address) round_consts_arr;
+}
+
+static address permsAndRotsAddr() {
+ return (address) permsAndRots;
+}
+
+void StubGenerator::generate_sha3_stubs() {
+ if (UseSHA3Intrinsics) {
+ StubRoutines::_sha3_implCompress = generate_sha3_implCompress(false,"sha3_implCompress");
+ StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress(true, "sha3_implCompressMB");
+ }
+}
+
+// Arguments:
+//
+// Inputs:
+// c_rarg0 - byte[] source+offset
+// c_rarg1 - long[] SHA3.state
+// c_rarg2 - int block_size
+// c_rarg3 - int offset
+// c_rarg4 - int limit
+//
+address StubGenerator::generate_sha3_implCompress(bool multiBlock, const char *name) {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", name);
+ address start = __ pc();
+
+ const Register buf = c_rarg0;
+ const Register state = c_rarg1;
+ const Register block_size = c_rarg2;
+ const Register ofs = c_rarg3;
+#ifndef _WIN64
+ const Register limit = c_rarg4;
+#else
+ const Address limit_mem(rbp, 6 * wordSize);
+ const Register limit = r12;
+#endif
+
+ const Register permsAndRots = r10;
+ const Register round_consts = r11;
+ const Register constant2use = r13;
+ const Register roundsLeft = r14;
+
+ Label sha3_loop;
+ Label rounds24_loop, block104, block136, block144, block168;
+
+ __ enter();
+
+ __ push(r12);
+ __ push(r13);
+ __ push(r14);
+
+#ifdef _WIN64
+ // on win64, fill limit from stack position
+ __ movptr(limit, limit_mem);
+#endif
+
+ __ lea(permsAndRots, ExternalAddress(permsAndRotsAddr()));
+ __ lea(round_consts, ExternalAddress(round_constsAddr()));
+
+ // set up the masks
+ __ movl(rax, 0x1F);
+ __ kmovwl(k5, rax);
+ __ kshiftrwl(k4, k5, 1);
+ __ kshiftrwl(k3, k5, 2);
+ __ kshiftrwl(k2, k5, 3);
+ __ kshiftrwl(k1, k5, 4);
+
+ // load the state
+ __ evmovdquq(xmm0, k5, Address(state, 0), false, Assembler::AVX_512bit);
+ __ evmovdquq(xmm1, k5, Address(state, 40), false, Assembler::AVX_512bit);
+ __ evmovdquq(xmm2, k5, Address(state, 80), false, Assembler::AVX_512bit);
+ __ evmovdquq(xmm3, k5, Address(state, 120), false, Assembler::AVX_512bit);
+ __ evmovdquq(xmm4, k5, Address(state, 160), false, Assembler::AVX_512bit);
+
+ // load the permutation and rotation constants
+ __ evmovdquq(xmm17, Address(permsAndRots, 0), Assembler::AVX_512bit);
+ __ evmovdquq(xmm18, Address(permsAndRots, 64), Assembler::AVX_512bit);
+ __ evmovdquq(xmm19, Address(permsAndRots, 128), Assembler::AVX_512bit);
+ __ evmovdquq(xmm20, Address(permsAndRots, 192), Assembler::AVX_512bit);
+ __ evmovdquq(xmm21, Address(permsAndRots, 256), Assembler::AVX_512bit);
+ __ evmovdquq(xmm22, Address(permsAndRots, 320), Assembler::AVX_512bit);
+ __ evmovdquq(xmm23, Address(permsAndRots, 384), Assembler::AVX_512bit);
+ __ evmovdquq(xmm24, Address(permsAndRots, 448), Assembler::AVX_512bit);
+ __ evmovdquq(xmm25, Address(permsAndRots, 512), Assembler::AVX_512bit);
+ __ evmovdquq(xmm26, Address(permsAndRots, 576), Assembler::AVX_512bit);
+ __ evmovdquq(xmm27, Address(permsAndRots, 640), Assembler::AVX_512bit);
+ __ evmovdquq(xmm28, Address(permsAndRots, 704), Assembler::AVX_512bit);
+ __ evmovdquq(xmm29, Address(permsAndRots, 768), Assembler::AVX_512bit);
+ __ evmovdquq(xmm30, Address(permsAndRots, 832), Assembler::AVX_512bit);
+ __ evmovdquq(xmm31, Address(permsAndRots, 896), Assembler::AVX_512bit);
+
+ __ BIND(sha3_loop);
+
+ // there will be 24 keccak rounds
+ __ movl(roundsLeft, 24);
+ // load round_constants base
+ __ movptr(constant2use, round_consts);
+
+ // load input: 72, 104, 136, 144 or 168 bytes
+ // i.e. 5+4, 2*5+3, 3*5+2, 3*5+3 or 4*5+1 longs
+ __ evpxorq(xmm0, k5, xmm0, Address(buf, 0), true, Assembler::AVX_512bit);
+
+ // if(blockSize == 72) SHA3-512
+ __ cmpl(block_size, 72);
+ __ jcc(Assembler::notEqual, block104);
+ __ evpxorq(xmm1, k4, xmm1, Address(buf, 40), true, Assembler::AVX_512bit);
+ __ jmp(rounds24_loop);
+
+ // if(blockSize == 104) SHA3-384
+ __ BIND(block104);
+ __ cmpl(block_size, 104);
+ __ jcc(Assembler::notEqual, block136);
+ __ evpxorq(xmm1, k5, xmm1, Address(buf, 40), true, Assembler::AVX_512bit);
+ __ evpxorq(xmm2, k3, xmm2, Address(buf, 80), true, Assembler::AVX_512bit);
+ __ jmp(rounds24_loop);
+
+ // if(blockSize == 136) SHA3-256 and SHAKE256
+ __ BIND(block136);
+ __ cmpl(block_size, 136);
+ __ jcc(Assembler::notEqual, block144);
+ __ evpxorq(xmm1, k5, xmm1, Address(buf, 40), true, Assembler::AVX_512bit);
+ __ evpxorq(xmm2, k5, xmm2, Address(buf, 80), true, Assembler::AVX_512bit);
+ __ evpxorq(xmm3, k2, xmm3, Address(buf, 120), true, Assembler::AVX_512bit);
+ __ jmp(rounds24_loop);
+
+ // if(blockSize == 144) SHA3-224
+ __ BIND(block144);
+ __ cmpl(block_size, 144);
+ __ jcc(Assembler::notEqual, block168);
+ __ evpxorq(xmm1, k5, xmm1, Address(buf, 40), true, Assembler::AVX_512bit);
+ __ evpxorq(xmm2, k5, xmm2, Address(buf, 80), true, Assembler::AVX_512bit);
+ __ evpxorq(xmm3, k3, xmm3, Address(buf, 120), true, Assembler::AVX_512bit);
+ __ jmp(rounds24_loop);
+
+ // if(blockSize == 168) SHAKE128
+ __ BIND(block168);
+ __ evpxorq(xmm1, k5, xmm1, Address(buf, 40), true, Assembler::AVX_512bit);
+ __ evpxorq(xmm2, k5, xmm2, Address(buf, 80), true, Assembler::AVX_512bit);
+ __ evpxorq(xmm3, k5, xmm3, Address(buf, 120), true, Assembler::AVX_512bit);
+ __ evpxorq(xmm4, k1, xmm4, Address(buf, 160), true, Assembler::AVX_512bit);
+
+ // The 24 rounds of the keccak transformation.
+ // The implementation closely follows the Java version, with the state
+ // array "rows" in the lowest 5 64-bit slots of zmm0 - zmm4, i.e.
+ // each row of the SHA3 specification is located in one zmm register.
+ __ BIND(rounds24_loop);
+ __ subl(roundsLeft, 1);
+
+ __ evmovdquw(xmm5, xmm0, Assembler::AVX_512bit);
+ // vpternlogq(x, 150, y, z) does x = x ^ y ^ z
+ __ vpternlogq(xmm5, 150, xmm1, xmm2, Assembler::AVX_512bit);
+ __ vpternlogq(xmm5, 150, xmm3, xmm4, Assembler::AVX_512bit);
+ // Now the "c row", i.e. c0-c4 are in zmm5.
+ // Rotate each element of the c row by one bit to zmm6, call the
+ // rotated version c'.
+ __ evprolq(xmm6, xmm5, 1, Assembler::AVX_512bit);
+ // Rotate elementwise the c row so that c4 becomes c0,
+ // c0 becomes c1, etc.
+ __ evpermt2q(xmm5, xmm30, xmm5, Assembler::AVX_512bit);
+ // rotate elementwise the c' row so that c'0 becomes c'4,
+ // c'1 becomes c'0, etc.
+ __ evpermt2q(xmm6, xmm31, xmm6, Assembler::AVX_512bit);
+ __ vpternlogq(xmm0, 150, xmm5, xmm6, Assembler::AVX_512bit);
+ __ vpternlogq(xmm1, 150, xmm5, xmm6, Assembler::AVX_512bit);
+ __ vpternlogq(xmm2, 150, xmm5, xmm6, Assembler::AVX_512bit);
+ __ vpternlogq(xmm3, 150, xmm5, xmm6, Assembler::AVX_512bit);
+ __ vpternlogq(xmm4, 150, xmm5, xmm6, Assembler::AVX_512bit);
+ // Now the theta mapping has been finished.
+
+ // Do the cyclical permutation of the 24 moving state elements
+ // and the required rotations within each element (the combined
+ // rho and sigma steps).
+ __ evpermt2q(xmm4, xmm17, xmm3, Assembler::AVX_512bit);
+ __ evpermt2q(xmm3, xmm18, xmm2, Assembler::AVX_512bit);
+ __ evpermt2q(xmm2, xmm17, xmm1, Assembler::AVX_512bit);
+ __ evpermt2q(xmm1, xmm19, xmm0, Assembler::AVX_512bit);
+ __ evpermt2q(xmm4, xmm20, xmm2, Assembler::AVX_512bit);
+ // The 24 moving elements are now in zmm1, zmm3 and zmm4,
+ // do the rotations now.
+ __ evprolvq(xmm1, xmm1, xmm27, Assembler::AVX_512bit);
+ __ evprolvq(xmm3, xmm3, xmm28, Assembler::AVX_512bit);
+ __ evprolvq(xmm4, xmm4, xmm29, Assembler::AVX_512bit);
+ __ evmovdquw(xmm2, xmm1, Assembler::AVX_512bit);
+ __ evmovdquw(xmm5, xmm3, Assembler::AVX_512bit);
+ __ evpermt2q(xmm0, xmm21, xmm4, Assembler::AVX_512bit);
+ __ evpermt2q(xmm1, xmm22, xmm3, Assembler::AVX_512bit);
+ __ evpermt2q(xmm5, xmm22, xmm2, Assembler::AVX_512bit);
+ __ evmovdquw(xmm3, xmm1, Assembler::AVX_512bit);
+ __ evmovdquw(xmm2, xmm5, Assembler::AVX_512bit);
+ __ evpermt2q(xmm1, xmm23, xmm4, Assembler::AVX_512bit);
+ __ evpermt2q(xmm2, xmm24, xmm4, Assembler::AVX_512bit);
+ __ evpermt2q(xmm3, xmm25, xmm4, Assembler::AVX_512bit);
+ __ evpermt2q(xmm4, xmm26, xmm5, Assembler::AVX_512bit);
+ // The combined rho and sigma steps are done.
+
+ // Do the chi step (the same operation on all 5 rows).
+ // vpternlogq(x, 180, y, z) does x = x ^ (y & ~z).
+ __ evpermt2q(xmm5, xmm31, xmm0, Assembler::AVX_512bit);
+ __ evpermt2q(xmm6, xmm31, xmm5, Assembler::AVX_512bit);
+ __ vpternlogq(xmm0, 180, xmm6, xmm5, Assembler::AVX_512bit);
+
+ __ evpermt2q(xmm5, xmm31, xmm1, Assembler::AVX_512bit);
+ __ evpermt2q(xmm6, xmm31, xmm5, Assembler::AVX_512bit);
+ __ vpternlogq(xmm1, 180, xmm6, xmm5, Assembler::AVX_512bit);
+
+ // xor the round constant into a0 (the lowest 64 bits of zmm0
+ __ evpxorq(xmm0, k1, xmm0, Address(constant2use, 0), true, Assembler::AVX_512bit);
+ __ addptr(constant2use, 8);
+
+ __ evpermt2q(xmm5, xmm31, xmm2, Assembler::AVX_512bit);
+ __ evpermt2q(xmm6, xmm31, xmm5, Assembler::AVX_512bit);
+ __ vpternlogq(xmm2, 180, xmm6, xmm5, Assembler::AVX_512bit);
+
+ __ evpermt2q(xmm5, xmm31, xmm3, Assembler::AVX_512bit);
+ __ evpermt2q(xmm6, xmm31, xmm5, Assembler::AVX_512bit);
+ __ vpternlogq(xmm3, 180, xmm6, xmm5, Assembler::AVX_512bit);
+
+ __ evpermt2q(xmm5, xmm31, xmm4, Assembler::AVX_512bit);
+ __ evpermt2q(xmm6, xmm31, xmm5, Assembler::AVX_512bit);
+ __ vpternlogq(xmm4, 180, xmm6, xmm5, Assembler::AVX_512bit);
+ __ cmpl(roundsLeft, 0);
+ __ jcc(Assembler::notEqual, rounds24_loop);
+
+ if (multiBlock) {
+ __ addptr(buf, block_size);
+ __ addl(ofs, block_size);
+ __ cmpl(ofs, limit);
+ __ jcc(Assembler::lessEqual, sha3_loop);
+ __ movq(rax, ofs); // return ofs
+ } else {
+ __ xorq(rax, rax); // return 0
+ }
+
+ // store the state
+ __ evmovdquq(Address(state, 0), k5, xmm0, true, Assembler::AVX_512bit);
+ __ evmovdquq(Address(state, 40), k5, xmm1, true, Assembler::AVX_512bit);
+ __ evmovdquq(Address(state, 80), k5, xmm2, true, Assembler::AVX_512bit);
+ __ evmovdquq(Address(state, 120), k5, xmm3, true, Assembler::AVX_512bit);
+ __ evmovdquq(Address(state, 160), k5, xmm4, true, Assembler::AVX_512bit);
+
+ __ pop(r14);
+ __ pop(r13);
+ __ pop(r12);
+
+ __ leave(); // required for proper stackwalking of RuntimeStub frame
+ __ ret(0);
+
+ return start;
+}
diff --git a/src/hotspot/cpu/x86/vm_version_x86.cpp b/src/hotspot/cpu/x86/vm_version_x86.cpp
index 63347c51d60..f8c5de551cd 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp
@@ -1316,9 +1316,16 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}
- if (UseSHA3Intrinsics) {
- warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU.");
- FLAG_SET_DEFAULT(UseSHA3Intrinsics, false);
+#ifdef _LP64
+ if (supports_evex() && supports_avx512bw()) {
+ if (FLAG_IS_DEFAULT(UseSHA3Intrinsics)) {
+ UseSHA3Intrinsics = true;
+ }
+ } else
+#endif
+ if (UseSHA3Intrinsics) {
+ warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU.");
+ FLAG_SET_DEFAULT(UseSHA3Intrinsics, false);
}
if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad
index 43c959bb917..cbe05429fc4 100644
--- a/src/hotspot/cpu/x86/x86.ad
+++ b/src/hotspot/cpu/x86/x86.ad
@@ -1765,6 +1765,12 @@ bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
return false;
}
break;
+ case Op_UMinV:
+ case Op_UMaxV:
+ if (UseAVX == 0) {
+ return false;
+ }
+ break;
case Op_MaxV:
case Op_MinV:
if (UseSSE < 4 && is_integral_type(bt)) {
@@ -1935,6 +1941,15 @@ bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
return false;
}
break;
+ case Op_SaturatingAddV:
+ case Op_SaturatingSubV:
+ if (UseAVX < 1) {
+ return false; // Implementation limitation
+ }
+ if (is_subword_type(bt) && size_in_bits == 512 && !VM_Version::supports_avx512bw()) {
+ return false;
+ }
+ break;
case Op_SelectFromTwoVector:
if (size_in_bits < 128 || (size_in_bits < 512 && !VM_Version::supports_avx512vl())) {
return false;
@@ -2125,6 +2140,8 @@ bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType
case Op_MaxV:
case Op_MinV:
+ case Op_UMinV:
+ case Op_UMaxV:
if (is_subword_type(bt) && !VM_Version::supports_avx512bw()) {
return false; // Implementation limitation
}
@@ -2132,6 +2149,15 @@ bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType
return false; // Implementation limitation
}
return true;
+ case Op_SaturatingAddV:
+ case Op_SaturatingSubV:
+ if (!is_subword_type(bt)) {
+ return false;
+ }
+ if (size_in_bits < 128 || !VM_Version::supports_avx512bw()) {
+ return false; // Implementation limitation
+ }
+ return true;
case Op_VectorMaskCmp:
if (is_subword_type(bt) && !VM_Version::supports_avx512bw()) {
@@ -6492,6 +6518,80 @@ instruct evminmaxFP_reg_eavx(vec dst, vec a, vec b, vec atmp, vec btmp, kReg ktm
ins_pipe( pipe_slow );
%}
+// ------------------------------ Unsigned vector Min/Max ----------------------
+
+instruct vector_uminmax_reg(vec dst, vec a, vec b) %{
+ predicate(VM_Version::supports_avx512vl() || Matcher::vector_element_basic_type(n) != T_LONG);
+ match(Set dst (UMinV a b));
+ match(Set dst (UMaxV a b));
+ format %{ "vector_uminmax $dst,$a,$b\t!" %}
+ ins_encode %{
+ int opcode = this->ideal_Opcode();
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ assert(is_integral_type(elem_bt), "");
+ __ vpuminmax(opcode, elem_bt, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, vlen_enc);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vector_uminmax_mem(vec dst, vec a, memory b) %{
+ predicate(VM_Version::supports_avx512vl() || Matcher::vector_element_basic_type(n) != T_LONG);
+ match(Set dst (UMinV a (LoadVector b)));
+ match(Set dst (UMaxV a (LoadVector b)));
+ format %{ "vector_uminmax $dst,$a,$b\t!" %}
+ ins_encode %{
+ int opcode = this->ideal_Opcode();
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ assert(is_integral_type(elem_bt), "");
+ __ vpuminmax(opcode, elem_bt, $dst$$XMMRegister, $a$$XMMRegister, $b$$Address, vlen_enc);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vector_uminmaxq_reg(vec dst, vec a, vec b, vec xtmp1, vec xtmp2) %{
+ predicate(!VM_Version::supports_avx512vl() && Matcher::vector_element_basic_type(n) == T_LONG);
+ match(Set dst (UMinV a b));
+ match(Set dst (UMaxV a b));
+ effect(TEMP xtmp1, TEMP xtmp2);
+ format %{ "vector_uminmaxq $dst,$a,$b\t! using xtmp1 and xtmp2 as TEMP" %}
+ ins_encode %{
+ int opcode = this->ideal_Opcode();
+ int vlen_enc = vector_length_encoding(this);
+ __ vpuminmaxq(opcode, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, vlen_enc);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vector_uminmax_reg_masked(vec dst, vec src1, vec src2, kReg mask) %{
+ match(Set dst (UMinV (Binary src1 src2) mask));
+ match(Set dst (UMaxV (Binary src1 src2) mask));
+ format %{ "vector_uminmax_masked $dst, $src1, $src2, $mask\t! umin/max masked operation" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType bt = Matcher::vector_element_basic_type(this);
+ int opc = this->ideal_Opcode();
+ __ evmasked_op(opc, bt, $mask$$KRegister, $dst$$XMMRegister,
+ $dst$$XMMRegister, $src2$$XMMRegister, true, vlen_enc);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vector_uminmax_mem_masked(vec dst, vec src1, memory src2, kReg mask) %{
+ match(Set dst (UMinV (Binary src1 (LoadVector src2)) mask));
+ match(Set dst (UMaxV (Binary src1 (LoadVector src2)) mask));
+ format %{ "vector_uminmax_masked $dst, $dst, $src2, $mask\t! umin/max masked operation" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType bt = Matcher::vector_element_basic_type(this);
+ int opc = this->ideal_Opcode();
+ __ evmasked_op(opc, bt, $mask$$KRegister, $dst$$XMMRegister,
+ $src1$$XMMRegister, $src2$$Address, true, vlen_enc);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
// --------------------------------- Signum/CopySign ---------------------------
instruct signumF_reg(regF dst, regF zero, regF one, rFlagsReg cr) %{
@@ -10484,6 +10584,236 @@ instruct DoubleClassCheck_reg_reg_vfpclass(rRegI dst, regD src, kReg ktmp, rFlag
ins_pipe(pipe_slow);
%}
+instruct vector_addsub_saturating_subword_reg(vec dst, vec src1, vec src2)
+%{
+ predicate(is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && !n->as_SaturatingVector()->is_unsigned());
+ match(Set dst (SaturatingAddV src1 src2));
+ match(Set dst (SaturatingSubV src1 src2));
+ format %{ "vector_addsub_saturating_subword $dst, $src1, $src2" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ vector_saturating_op(this->ideal_Opcode(), elem_bt, $dst$$XMMRegister,
+ $src1$$XMMRegister, $src2$$XMMRegister, false, vlen_enc);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct vector_addsub_saturating_unsigned_subword_reg(vec dst, vec src1, vec src2)
+%{
+ predicate(is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && n->as_SaturatingVector()->is_unsigned());
+ match(Set dst (SaturatingAddV src1 src2));
+ match(Set dst (SaturatingSubV src1 src2));
+ format %{ "vector_addsub_saturating_unsigned_subword $dst, $src1, $src2" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ vector_saturating_op(this->ideal_Opcode(), elem_bt, $dst$$XMMRegister,
+ $src1$$XMMRegister, $src2$$XMMRegister, true, vlen_enc);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct vector_addsub_saturating_reg_evex(vec dst, vec src1, vec src2, vec xtmp1, vec xtmp2, kReg ktmp1, kReg ktmp2)
+%{
+ predicate(!is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && !n->as_SaturatingVector()->is_unsigned() &&
+ (Matcher::vector_length_in_bytes(n) == 64 || VM_Version::supports_avx512vl()));
+ match(Set dst (SaturatingAddV src1 src2));
+ match(Set dst (SaturatingSubV src1 src2));
+ effect(TEMP dst, TEMP xtmp1, TEMP xtmp2, TEMP ktmp1, TEMP ktmp2);
+ format %{ "vector_addsub_saturating_evex $dst, $src1, $src2 \t! using $xtmp1, $xtmp2, $ktmp1 and $ktmp2 as TEMP" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ vector_addsub_dq_saturating_evex(this->ideal_Opcode(), elem_bt, $dst$$XMMRegister,
+ $src1$$XMMRegister, $src2$$XMMRegister,
+ $xtmp1$$XMMRegister, $xtmp2$$XMMRegister,
+ $ktmp1$$KRegister, $ktmp2$$KRegister, vlen_enc);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct vector_addsub_saturating_reg_avx(vec dst, vec src1, vec src2, vec xtmp1, vec xtmp2, vec xtmp3, vec xtmp4)
+%{
+ predicate(!is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && !n->as_SaturatingVector()->is_unsigned() &&
+ Matcher::vector_length_in_bytes(n) <= 32 && !VM_Version::supports_avx512vl());
+ match(Set dst (SaturatingAddV src1 src2));
+ match(Set dst (SaturatingSubV src1 src2));
+ effect(TEMP dst, TEMP xtmp1, TEMP xtmp2, TEMP xtmp3, TEMP xtmp4);
+ format %{ "vector_addsub_saturating_avx $dst, $src1, $src2 \t! using $xtmp1, $xtmp2, $xtmp3 and $xtmp4 as TEMP" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ vector_addsub_dq_saturating_avx(this->ideal_Opcode(), elem_bt, $dst$$XMMRegister, $src1$$XMMRegister,
+ $src2$$XMMRegister, $xtmp1$$XMMRegister, $xtmp2$$XMMRegister,
+ $xtmp3$$XMMRegister, $xtmp4$$XMMRegister, vlen_enc);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct vector_add_saturating_unsigned_reg_evex(vec dst, vec src1, vec src2, vec xtmp1, vec xtmp2, kReg ktmp)
+%{
+ predicate(!is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && n->as_SaturatingVector()->is_unsigned() &&
+ (Matcher::vector_length_in_bytes(n) == 64 || VM_Version::supports_avx512vl()));
+ match(Set dst (SaturatingAddV src1 src2));
+ effect(TEMP dst, TEMP xtmp1, TEMP xtmp2, TEMP ktmp);
+ format %{ "vector_add_saturating_unsigned_evex $dst, $src1, $src2 \t! using $xtmp1, $xtmp2 and $ktmp as TEMP" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ vector_add_dq_saturating_unsigned_evex(elem_bt, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister,
+ $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, $ktmp$$KRegister, vlen_enc);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct vector_add_saturating_unsigned_reg_avx(vec dst, vec src1, vec src2, vec xtmp1, vec xtmp2, vec xtmp3)
+%{
+ predicate(!is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && n->as_SaturatingVector()->is_unsigned() &&
+ Matcher::vector_length_in_bytes(n) <= 32 && !VM_Version::supports_avx512vl());
+ match(Set dst (SaturatingAddV src1 src2));
+ effect(TEMP dst, TEMP xtmp1, TEMP xtmp2, TEMP xtmp3);
+ format %{ "vector_add_saturating_unsigned_avx $dst, $src1, $src2 \t! using $xtmp1, $xtmp2 and $xtmp3 as TEMP" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ vector_add_dq_saturating_unsigned_avx(elem_bt, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister,
+ $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, $xtmp3$$XMMRegister, vlen_enc);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct vector_sub_saturating_unsigned_reg_evex(vec dst, vec src1, vec src2, kReg ktmp)
+%{
+ predicate(!is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && n->as_SaturatingVector()->is_unsigned() &&
+ (Matcher::vector_length_in_bytes(n) == 64 || VM_Version::supports_avx512vl()));
+ match(Set dst (SaturatingSubV src1 src2));
+ effect(TEMP ktmp);
+ format %{ "vector_sub_saturating_unsigned_evex $dst, $src1, $src2 \t! using $ktmp as TEMP" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ vector_sub_dq_saturating_unsigned_evex(elem_bt, $dst$$XMMRegister, $src1$$XMMRegister,
+ $src2$$XMMRegister, $ktmp$$KRegister, vlen_enc);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct vector_sub_saturating_unsigned_reg_avx(vec dst, vec src1, vec src2, vec xtmp1, vec xtmp2)
+%{
+ predicate(!is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && n->as_SaturatingVector()->is_unsigned() &&
+ Matcher::vector_length_in_bytes(n) <= 32 && !VM_Version::supports_avx512vl());
+ match(Set dst (SaturatingSubV src1 src2));
+ effect(TEMP dst, TEMP xtmp1, TEMP xtmp2);
+ format %{ "vector_sub_saturating_unsigned_avx $dst, $src1, $src2 \t! using $xtmp1 and $xtmp2 as TEMP" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ vector_sub_dq_saturating_unsigned_avx(elem_bt, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister,
+ $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, vlen_enc);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct vector_addsub_saturating_subword_mem(vec dst, vec src1, memory src2)
+%{
+ predicate(is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && !n->as_SaturatingVector()->is_unsigned());
+ match(Set dst (SaturatingAddV src1 (LoadVector src2)));
+ match(Set dst (SaturatingSubV src1 (LoadVector src2)));
+ format %{ "vector_addsub_saturating_subword $dst, $src1, $src2" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ vector_saturating_op(this->ideal_Opcode(), elem_bt, $dst$$XMMRegister,
+ $src1$$XMMRegister, $src2$$Address, false, vlen_enc);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct vector_addsub_saturating_unsigned_subword_mem(vec dst, vec src1, memory src2)
+%{
+ predicate(is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && n->as_SaturatingVector()->is_unsigned());
+ match(Set dst (SaturatingAddV src1 (LoadVector src2)));
+ match(Set dst (SaturatingSubV src1 (LoadVector src2)));
+ format %{ "vector_addsub_saturating_unsigned_subword $dst, $src1, $src2" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ vector_saturating_op(this->ideal_Opcode(), elem_bt, $dst$$XMMRegister,
+ $src1$$XMMRegister, $src2$$Address, true, vlen_enc);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct vector_addsub_saturating_subword_masked_reg(vec dst, vec src, kReg mask) %{
+ predicate(is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && !n->as_SaturatingVector()->is_unsigned());
+ match(Set dst (SaturatingAddV (Binary dst src) mask));
+ match(Set dst (SaturatingSubV (Binary dst src) mask));
+ format %{ "vector_addsub_saturating_subword_masked $dst, $mask, $src" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ evmasked_saturating_op(this->ideal_Opcode(), elem_bt, $mask$$KRegister, $dst$$XMMRegister,
+ $dst$$XMMRegister, $src$$XMMRegister, false, true, vlen_enc);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vector_addsub_saturating_unsigned_subword_masked_reg(vec dst, vec src, kReg mask) %{
+ predicate(is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && n->as_SaturatingVector()->is_unsigned());
+ match(Set dst (SaturatingAddV (Binary dst src) mask));
+ match(Set dst (SaturatingSubV (Binary dst src) mask));
+ format %{ "vector_addsub_saturating_unsigned_subword_masked $dst, $mask, $src" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ evmasked_saturating_op(this->ideal_Opcode(), elem_bt, $mask$$KRegister, $dst$$XMMRegister,
+ $dst$$XMMRegister, $src$$XMMRegister, true, true, vlen_enc);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vector_addsub_saturating_subword_masked_mem(vec dst, memory src, kReg mask) %{
+ predicate(is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && !n->as_SaturatingVector()->is_unsigned());
+ match(Set dst (SaturatingAddV (Binary dst (LoadVector src)) mask));
+ match(Set dst (SaturatingSubV (Binary dst (LoadVector src)) mask));
+ format %{ "vector_addsub_saturating_subword_masked $dst, $mask, $src" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ evmasked_saturating_op(this->ideal_Opcode(), elem_bt, $mask$$KRegister, $dst$$XMMRegister,
+ $dst$$XMMRegister, $src$$Address, false, true, vlen_enc);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vector_addsub_saturating_unsigned_subword_masked_mem(vec dst, memory src, kReg mask) %{
+ predicate(is_subword_type(Matcher::vector_element_basic_type(n)) &&
+ n->is_SaturatingVector() && n->as_SaturatingVector()->is_unsigned());
+ match(Set dst (SaturatingAddV (Binary dst (LoadVector src)) mask));
+ match(Set dst (SaturatingSubV (Binary dst (LoadVector src)) mask));
+ format %{ "vector_addsub_saturating_unsigned_subword_masked $dst, $mask, $src" %}
+ ins_encode %{
+ int vlen_enc = vector_length_encoding(this);
+ BasicType elem_bt = Matcher::vector_element_basic_type(this);
+ __ evmasked_saturating_op(this->ideal_Opcode(), elem_bt, $mask$$KRegister, $dst$$XMMRegister,
+ $dst$$XMMRegister, $src$$Address, true, true, vlen_enc);
+ %}
+ ins_pipe( pipe_slow );
+%}
instruct vector_selectfrom_twovectors_reg_evex(vec index, vec src1, vec src2)
%{
diff --git a/src/hotspot/os/bsd/gc/x/xNUMA_bsd.cpp b/src/hotspot/os/bsd/gc/x/xNUMA_bsd.cpp
deleted file mode 100644
index b0e23a1716a..00000000000
--- a/src/hotspot/os/bsd/gc/x/xNUMA_bsd.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xNUMA.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-void XNUMA::pd_initialize() {
- _enabled = false;
-}
-
-uint32_t XNUMA::count() {
- return 1;
-}
-
-uint32_t XNUMA::id() {
- return 0;
-}
-
-uint32_t XNUMA::memory_id(uintptr_t addr) {
- // NUMA support not enabled, assume everything belongs to node zero
- return 0;
-}
diff --git a/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp b/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp
deleted file mode 100644
index 2c64c3788d3..00000000000
--- a/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/gcLogPrecious.hpp"
-#include "gc/x/xErrno.hpp"
-#include "gc/x/xGlobals.hpp"
-#include "gc/x/xLargePages.inline.hpp"
-#include "gc/x/xPhysicalMemory.inline.hpp"
-#include "gc/x/xPhysicalMemoryBacking_bsd.hpp"
-#include "logging/log.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
-#include "utilities/align.hpp"
-#include "utilities/debug.hpp"
-
-#include
-#include
-#include
-#include
-
-// The backing is represented by a reserved virtual address space, in which
-// we commit and uncommit physical memory. Multi-mapping the different heap
-// views is done by simply remapping the backing memory using mach_vm_remap().
-
-static int vm_flags_superpage() {
- if (!XLargePages::is_explicit()) {
- return 0;
- }
-
- const int page_size_in_megabytes = XGranuleSize >> 20;
- return page_size_in_megabytes << VM_FLAGS_SUPERPAGE_SHIFT;
-}
-
-static XErrno mremap(uintptr_t from_addr, uintptr_t to_addr, size_t size) {
- mach_vm_address_t remap_addr = to_addr;
- vm_prot_t remap_cur_prot;
- vm_prot_t remap_max_prot;
-
- // Remap memory to an additional location
- const kern_return_t res = mach_vm_remap(mach_task_self(),
- &remap_addr,
- size,
- 0 /* mask */,
- VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | vm_flags_superpage(),
- mach_task_self(),
- from_addr,
- FALSE /* copy */,
- &remap_cur_prot,
- &remap_max_prot,
- VM_INHERIT_COPY);
-
- return (res == KERN_SUCCESS) ? XErrno(0) : XErrno(EINVAL);
-}
-
-XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) :
- _base(0),
- _initialized(false) {
-
- // Reserve address space for backing memory
- _base = (uintptr_t)os::reserve_memory(max_capacity);
- if (_base == 0) {
- // Failed
- log_error_pd(gc)("Failed to reserve address space for backing memory");
- return;
- }
-
- // Successfully initialized
- _initialized = true;
-}
-
-bool XPhysicalMemoryBacking::is_initialized() const {
- return _initialized;
-}
-
-void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
- // Does nothing
-}
-
-bool XPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
- assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
- assert(is_aligned(length, os::vm_page_size()), "Invalid length");
-
- log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
- offset / M, (offset + length) / M, length / M);
-
- const uintptr_t addr = _base + offset;
- const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- if (res == MAP_FAILED) {
- XErrno err;
- log_error(gc)("Failed to commit memory (%s)", err.to_string());
- return false;
- }
-
- // Success
- return true;
-}
-
-size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
- // Try to commit the whole region
- if (commit_inner(offset, length)) {
- // Success
- return length;
- }
-
- // Failed, try to commit as much as possible
- size_t start = offset;
- size_t end = offset + length;
-
- for (;;) {
- length = align_down((end - start) / 2, XGranuleSize);
- if (length == 0) {
- // Done, don't commit more
- return start - offset;
- }
-
- if (commit_inner(start, length)) {
- // Success, try commit more
- start += length;
- } else {
- // Failed, try commit less
- end -= length;
- }
- }
-}
-
-size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
- assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
- assert(is_aligned(length, os::vm_page_size()), "Invalid length");
-
- log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
- offset / M, (offset + length) / M, length / M);
-
- const uintptr_t start = _base + offset;
- const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
- if (res == MAP_FAILED) {
- XErrno err;
- log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
- return 0;
- }
-
- return length;
-}
-
-void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {
- const XErrno err = mremap(_base + offset, addr, size);
- if (err) {
- fatal("Failed to remap memory (%s)", err.to_string());
- }
-}
-
-void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
- // Note that we must keep the address space reservation intact and just detach
- // the backing memory. For this reason we map a new anonymous, non-accessible
- // and non-reserved page over the mapping instead of actually unmapping.
- const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
- if (res == MAP_FAILED) {
- XErrno err;
- fatal("Failed to map memory (%s)", err.to_string());
- }
-}
diff --git a/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.hpp b/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.hpp
deleted file mode 100644
index 8b4747026ff..00000000000
--- a/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.hpp
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP
-#define OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP
-
-class XPhysicalMemoryBacking {
-private:
- uintptr_t _base;
- bool _initialized;
-
- bool commit_inner(size_t offset, size_t length) const;
-
-public:
- XPhysicalMemoryBacking(size_t max_capacity);
-
- bool is_initialized() const;
-
- void warn_commit_limits(size_t max_capacity) const;
-
- size_t commit(size_t offset, size_t length) const;
- size_t uncommit(size_t offset, size_t length) const;
-
- void map(uintptr_t addr, size_t size, uintptr_t offset) const;
- void unmap(uintptr_t addr, size_t size) const;
-};
-
-#endif // OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP
diff --git a/src/hotspot/os/linux/gc/x/xLargePages_linux.cpp b/src/hotspot/os/linux/gc/x/xLargePages_linux.cpp
deleted file mode 100644
index 6ad956b1e63..00000000000
--- a/src/hotspot/os/linux/gc/x/xLargePages_linux.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xLargePages.hpp"
-#include "runtime/globals.hpp"
-
-void XLargePages::pd_initialize() {
- if (UseLargePages) {
- if (UseTransparentHugePages) {
- _state = Transparent;
- } else {
- _state = Explicit;
- }
- } else {
- _state = Disabled;
- }
-}
diff --git a/src/hotspot/os/linux/gc/x/xMountPoint_linux.cpp b/src/hotspot/os/linux/gc/x/xMountPoint_linux.cpp
deleted file mode 100644
index 96c0f2f92db..00000000000
--- a/src/hotspot/os/linux/gc/x/xMountPoint_linux.cpp
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/gcLogPrecious.hpp"
-#include "gc/x/xArray.inline.hpp"
-#include "gc/x/xErrno.hpp"
-#include "gc/x/xMountPoint_linux.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-#include
-#include
-
-// Mount information, see proc(5) for more details.
-#define PROC_SELF_MOUNTINFO "/proc/self/mountinfo"
-
-XMountPoint::XMountPoint(const char* filesystem, const char** preferred_mountpoints) {
- if (AllocateHeapAt != nullptr) {
- // Use specified path
- _path = os::strdup(AllocateHeapAt, mtGC);
- } else {
- // Find suitable path
- _path = find_mountpoint(filesystem, preferred_mountpoints);
- }
-}
-
-XMountPoint::~XMountPoint() {
- os::free(_path);
- _path = nullptr;
-}
-
-char* XMountPoint::get_mountpoint(const char* line, const char* filesystem) const {
- char* line_mountpoint = nullptr;
- char* line_filesystem = nullptr;
-
- // Parse line and return a newly allocated string containing the mount point if
- // the line contains a matching filesystem and the mount point is accessible by
- // the current user.
- // sscanf, using %m, will return malloced memory. Need raw ::free, not os::free.
- if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
- strcmp(line_filesystem, filesystem) != 0 ||
- access(line_mountpoint, R_OK|W_OK|X_OK) != 0) {
- // Not a matching or accessible filesystem
- ALLOW_C_FUNCTION(::free, ::free(line_mountpoint);)
- line_mountpoint = nullptr;
- }
-
- ALLOW_C_FUNCTION(::free, ::free(line_filesystem);)
-
- return line_mountpoint;
-}
-
-void XMountPoint::get_mountpoints(const char* filesystem, XArray* mountpoints) const {
- FILE* fd = os::fopen(PROC_SELF_MOUNTINFO, "r");
- if (fd == nullptr) {
- XErrno err;
- log_error_p(gc)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
- return;
- }
-
- char* line = nullptr;
- size_t length = 0;
-
- while (getline(&line, &length, fd) != -1) {
- char* const mountpoint = get_mountpoint(line, filesystem);
- if (mountpoint != nullptr) {
- mountpoints->append(mountpoint);
- }
- }
-
- // readline will return malloced memory. Need raw ::free, not os::free.
- ALLOW_C_FUNCTION(::free, ::free(line);)
- fclose(fd);
-}
-
-void XMountPoint::free_mountpoints(XArray* mountpoints) const {
- XArrayIterator iter(mountpoints);
- for (char* mountpoint; iter.next(&mountpoint);) {
- ALLOW_C_FUNCTION(::free, ::free(mountpoint);) // *not* os::free
- }
- mountpoints->clear();
-}
-
-char* XMountPoint::find_preferred_mountpoint(const char* filesystem,
- XArray* mountpoints,
- const char** preferred_mountpoints) const {
- // Find preferred mount point
- XArrayIterator iter1(mountpoints);
- for (char* mountpoint; iter1.next(&mountpoint);) {
- for (const char** preferred = preferred_mountpoints; *preferred != nullptr; preferred++) {
- if (!strcmp(mountpoint, *preferred)) {
- // Preferred mount point found
- return os::strdup(mountpoint, mtGC);
- }
- }
- }
-
- // Preferred mount point not found
- log_error_p(gc)("More than one %s filesystem found:", filesystem);
- XArrayIterator iter2(mountpoints);
- for (char* mountpoint; iter2.next(&mountpoint);) {
- log_error_p(gc)(" %s", mountpoint);
- }
-
- return nullptr;
-}
-
-char* XMountPoint::find_mountpoint(const char* filesystem, const char** preferred_mountpoints) const {
- char* path = nullptr;
- XArray mountpoints;
-
- get_mountpoints(filesystem, &mountpoints);
-
- if (mountpoints.length() == 0) {
- // No mount point found
- log_error_p(gc)("Failed to find an accessible %s filesystem", filesystem);
- } else if (mountpoints.length() == 1) {
- // One mount point found
- path = os::strdup(mountpoints.at(0), mtGC);
- } else {
- // More than one mount point found
- path = find_preferred_mountpoint(filesystem, &mountpoints, preferred_mountpoints);
- }
-
- free_mountpoints(&mountpoints);
-
- return path;
-}
-
-const char* XMountPoint::get() const {
- return _path;
-}
diff --git a/src/hotspot/os/linux/gc/x/xMountPoint_linux.hpp b/src/hotspot/os/linux/gc/x/xMountPoint_linux.hpp
deleted file mode 100644
index e0ca126e066..00000000000
--- a/src/hotspot/os/linux/gc/x/xMountPoint_linux.hpp
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP
-#define OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP
-
-#include "gc/x/xArray.hpp"
-#include "memory/allocation.hpp"
-
-class XMountPoint : public StackObj {
-private:
- char* _path;
-
- char* get_mountpoint(const char* line,
- const char* filesystem) const;
- void get_mountpoints(const char* filesystem,
- XArray* mountpoints) const;
- void free_mountpoints(XArray* mountpoints) const;
- char* find_preferred_mountpoint(const char* filesystem,
- XArray* mountpoints,
- const char** preferred_mountpoints) const;
- char* find_mountpoint(const char* filesystem,
- const char** preferred_mountpoints) const;
-
-public:
- XMountPoint(const char* filesystem, const char** preferred_mountpoints);
- ~XMountPoint();
-
- const char* get() const;
-};
-
-#endif // OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP
diff --git a/src/hotspot/os/linux/gc/x/xNUMA_linux.cpp b/src/hotspot/os/linux/gc/x/xNUMA_linux.cpp
deleted file mode 100644
index 0cc557dde6e..00000000000
--- a/src/hotspot/os/linux/gc/x/xNUMA_linux.cpp
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "gc/x/xCPU.inline.hpp"
-#include "gc/x/xErrno.hpp"
-#include "gc/x/xNUMA.hpp"
-#include "gc/x/xSyscall_linux.hpp"
-#include "os_linux.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
-#include "utilities/debug.hpp"
-
-void XNUMA::pd_initialize() {
- _enabled = UseNUMA;
-}
-
-uint32_t XNUMA::count() {
- if (!_enabled) {
- // NUMA support not enabled
- return 1;
- }
-
- return os::Linux::numa_max_node() + 1;
-}
-
-uint32_t XNUMA::id() {
- if (!_enabled) {
- // NUMA support not enabled
- return 0;
- }
-
- return os::Linux::get_node_by_cpu(XCPU::id());
-}
-
-uint32_t XNUMA::memory_id(uintptr_t addr) {
- if (!_enabled) {
- // NUMA support not enabled, assume everything belongs to node zero
- return 0;
- }
-
- uint32_t id = (uint32_t)-1;
-
- if (XSyscall::get_mempolicy((int*)&id, nullptr, 0, (void*)addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
- XErrno err;
- fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string());
- }
-
- assert(id < count(), "Invalid NUMA id");
-
- return id;
-}
diff --git a/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp
deleted file mode 100644
index 35625f613d3..00000000000
--- a/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp
+++ /dev/null
@@ -1,724 +0,0 @@
-/*
- * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/gcLogPrecious.hpp"
-#include "gc/x/xArray.inline.hpp"
-#include "gc/x/xErrno.hpp"
-#include "gc/x/xGlobals.hpp"
-#include "gc/x/xLargePages.inline.hpp"
-#include "gc/x/xMountPoint_linux.hpp"
-#include "gc/x/xNUMA.inline.hpp"
-#include "gc/x/xPhysicalMemoryBacking_linux.hpp"
-#include "gc/x/xSyscall_linux.hpp"
-#include "logging/log.hpp"
-#include "os_linux.hpp"
-#include "runtime/init.hpp"
-#include "runtime/os.hpp"
-#include "runtime/safefetch.hpp"
-#include "utilities/align.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/growableArray.hpp"
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-//
-// Support for building on older Linux systems
-//
-
-// memfd_create(2) flags
-#ifndef MFD_CLOEXEC
-#define MFD_CLOEXEC 0x0001U
-#endif
-#ifndef MFD_HUGETLB
-#define MFD_HUGETLB 0x0004U
-#endif
-#ifndef MFD_HUGE_2MB
-#define MFD_HUGE_2MB 0x54000000U
-#endif
-
-// open(2) flags
-#ifndef O_CLOEXEC
-#define O_CLOEXEC 02000000
-#endif
-#ifndef O_TMPFILE
-#define O_TMPFILE (020000000 | O_DIRECTORY)
-#endif
-
-// fallocate(2) flags
-#ifndef FALLOC_FL_KEEP_SIZE
-#define FALLOC_FL_KEEP_SIZE 0x01
-#endif
-#ifndef FALLOC_FL_PUNCH_HOLE
-#define FALLOC_FL_PUNCH_HOLE 0x02
-#endif
-
-// Filesystem types, see statfs(2)
-#ifndef TMPFS_MAGIC
-#define TMPFS_MAGIC 0x01021994
-#endif
-#ifndef HUGETLBFS_MAGIC
-#define HUGETLBFS_MAGIC 0x958458f6
-#endif
-
-// Filesystem names
-#define XFILESYSTEM_TMPFS "tmpfs"
-#define XFILESYSTEM_HUGETLBFS "hugetlbfs"
-
-// Proc file entry for max map mount
-#define XFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count"
-
-// Sysfs file for transparent huge page on tmpfs
-#define XFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
-
-// Java heap filename
-#define XFILENAME_HEAP "java_heap"
-
-// Preferred tmpfs mount points, ordered by priority
-static const char* z_preferred_tmpfs_mountpoints[] = {
- "/dev/shm",
- "/run/shm",
- nullptr
-};
-
-// Preferred hugetlbfs mount points, ordered by priority
-static const char* z_preferred_hugetlbfs_mountpoints[] = {
- "/dev/hugepages",
- "/hugepages",
- nullptr
-};
-
-static int z_fallocate_hugetlbfs_attempts = 3;
-static bool z_fallocate_supported = true;
-
-XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) :
- _fd(-1),
- _filesystem(0),
- _block_size(0),
- _available(0),
- _initialized(false) {
-
- // Create backing file
- _fd = create_fd(XFILENAME_HEAP);
- if (_fd == -1) {
- return;
- }
-
- // Truncate backing file
- while (ftruncate(_fd, max_capacity) == -1) {
- if (errno != EINTR) {
- XErrno err;
- log_error_p(gc)("Failed to truncate backing file (%s)", err.to_string());
- return;
- }
- }
-
- // Get filesystem statistics
- struct statfs buf;
- if (fstatfs(_fd, &buf) == -1) {
- XErrno err;
- log_error_p(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string());
- return;
- }
-
- _filesystem = buf.f_type;
- _block_size = buf.f_bsize;
- _available = buf.f_bavail * _block_size;
-
- log_info_p(gc, init)("Heap Backing Filesystem: %s (" UINT64_FORMAT_X ")",
- is_tmpfs() ? XFILESYSTEM_TMPFS : is_hugetlbfs() ? XFILESYSTEM_HUGETLBFS : "other", _filesystem);
-
- // Make sure the filesystem type matches requested large page type
- if (XLargePages::is_transparent() && !is_tmpfs()) {
- log_error_p(gc)("-XX:+UseTransparentHugePages can only be enabled when using a %s filesystem",
- XFILESYSTEM_TMPFS);
- return;
- }
-
- if (XLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
- log_error_p(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
- XFILESYSTEM_TMPFS);
- return;
- }
-
- if (XLargePages::is_explicit() && !is_hugetlbfs()) {
- log_error_p(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled "
- "when using a %s filesystem", XFILESYSTEM_HUGETLBFS);
- return;
- }
-
- if (!XLargePages::is_explicit() && is_hugetlbfs()) {
- log_error_p(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem",
- XFILESYSTEM_HUGETLBFS);
- return;
- }
-
- // Make sure the filesystem block size is compatible
- if (XGranuleSize % _block_size != 0) {
- log_error_p(gc)("Filesystem backing the heap has incompatible block size (" SIZE_FORMAT ")",
- _block_size);
- return;
- }
-
- if (is_hugetlbfs() && _block_size != XGranuleSize) {
- log_error_p(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")",
- XFILESYSTEM_HUGETLBFS, _block_size, XGranuleSize);
- return;
- }
-
- // Successfully initialized
- _initialized = true;
-}
-
-int XPhysicalMemoryBacking::create_mem_fd(const char* name) const {
- assert(XGranuleSize == 2 * M, "Granule size must match MFD_HUGE_2MB");
-
- // Create file name
- char filename[PATH_MAX];
- snprintf(filename, sizeof(filename), "%s%s", name, XLargePages::is_explicit() ? ".hugetlb" : "");
-
- // Create file
- const int extra_flags = XLargePages::is_explicit() ? (MFD_HUGETLB | MFD_HUGE_2MB) : 0;
- const int fd = XSyscall::memfd_create(filename, MFD_CLOEXEC | extra_flags);
- if (fd == -1) {
- XErrno err;
- log_debug_p(gc, init)("Failed to create memfd file (%s)",
- (XLargePages::is_explicit() && (err == EINVAL || err == ENODEV)) ?
- "Hugepages (2M) not available" : err.to_string());
- return -1;
- }
-
- log_info_p(gc, init)("Heap Backing File: /memfd:%s", filename);
-
- return fd;
-}
-
-int XPhysicalMemoryBacking::create_file_fd(const char* name) const {
- const char* const filesystem = XLargePages::is_explicit()
- ? XFILESYSTEM_HUGETLBFS
- : XFILESYSTEM_TMPFS;
- const char** const preferred_mountpoints = XLargePages::is_explicit()
- ? z_preferred_hugetlbfs_mountpoints
- : z_preferred_tmpfs_mountpoints;
-
- // Find mountpoint
- XMountPoint mountpoint(filesystem, preferred_mountpoints);
- if (mountpoint.get() == nullptr) {
- log_error_p(gc)("Use -XX:AllocateHeapAt to specify the path to a %s filesystem", filesystem);
- return -1;
- }
-
- // Try to create an anonymous file using the O_TMPFILE flag. Note that this
- // flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
- const int fd_anon = os::open(mountpoint.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
- if (fd_anon == -1) {
- XErrno err;
- log_debug_p(gc, init)("Failed to create anonymous file in %s (%s)", mountpoint.get(),
- (err == EINVAL ? "Not supported" : err.to_string()));
- } else {
- // Get inode number for anonymous file
- struct stat stat_buf;
- if (fstat(fd_anon, &stat_buf) == -1) {
- XErrno err;
- log_error_pd(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string());
- return -1;
- }
-
- log_info_p(gc, init)("Heap Backing File: %s/#" UINT64_FORMAT, mountpoint.get(), (uint64_t)stat_buf.st_ino);
-
- return fd_anon;
- }
-
- log_debug_p(gc, init)("Falling back to open/unlink");
-
- // Create file name
- char filename[PATH_MAX];
- snprintf(filename, sizeof(filename), "%s/%s.%d", mountpoint.get(), name, os::current_process_id());
-
- // Create file
- const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
- if (fd == -1) {
- XErrno err;
- log_error_p(gc)("Failed to create file %s (%s)", filename, err.to_string());
- return -1;
- }
-
- // Unlink file
- if (unlink(filename) == -1) {
- XErrno err;
- log_error_p(gc)("Failed to unlink file %s (%s)", filename, err.to_string());
- return -1;
- }
-
- log_info_p(gc, init)("Heap Backing File: %s", filename);
-
- return fd;
-}
-
-int XPhysicalMemoryBacking::create_fd(const char* name) const {
- if (AllocateHeapAt == nullptr) {
- // If the path is not explicitly specified, then we first try to create a memfd file
- // instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might
- // not be supported at all (requires kernel >= 3.17), or it might not support large
- // pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a
- // file on an accessible tmpfs or hugetlbfs mount point.
- const int fd = create_mem_fd(name);
- if (fd != -1) {
- return fd;
- }
-
- log_debug_p(gc)("Falling back to searching for an accessible mount point");
- }
-
- return create_file_fd(name);
-}
-
-bool XPhysicalMemoryBacking::is_initialized() const {
- return _initialized;
-}
-
-void XPhysicalMemoryBacking::warn_available_space(size_t max_capacity) const {
- // Note that the available space on a tmpfs or a hugetlbfs filesystem
- // will be zero if no size limit was specified when it was mounted.
- if (_available == 0) {
- // No size limit set, skip check
- log_info_p(gc, init)("Available space on backing filesystem: N/A");
- return;
- }
-
- log_info_p(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", _available / M);
-
- // Warn if the filesystem doesn't currently have enough space available to hold
- // the max heap size. The max heap size will be capped if we later hit this limit
- // when trying to expand the heap.
- if (_available < max_capacity) {
- log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
- log_warning_p(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
- log_warning_p(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
- "(available", max_capacity / M);
- log_warning_p(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
- "size could", _available / M);
- log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to commit memory.");
- }
-}
-
-void XPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const {
- const char* const filename = XFILENAME_PROC_MAX_MAP_COUNT;
- FILE* const file = os::fopen(filename, "r");
- if (file == nullptr) {
- // Failed to open file, skip check
- log_debug_p(gc, init)("Failed to open %s", filename);
- return;
- }
-
- size_t actual_max_map_count = 0;
- const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);
- fclose(file);
- if (result != 1) {
- // Failed to read file, skip check
- log_debug_p(gc, init)("Failed to read %s", filename);
- return;
- }
-
- // The required max map count is impossible to calculate exactly since subsystems
- // other than ZGC are also creating memory mappings, and we have no control over that.
- // However, ZGC tends to create the most mappings and dominate the total count.
- // In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
- // We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
- const size_t required_max_map_count = (max_capacity / XGranuleSize) * 3 * 1.2;
- if (actual_max_map_count < required_max_map_count) {
- log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
- log_warning_p(gc)("The system limit on number of memory mappings per process might be too low for the given");
- log_warning_p(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
- max_capacity / M, filename);
- log_warning_p(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "
- "with the current", required_max_map_count, actual_max_map_count);
- log_warning_p(gc)("limit could lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
- }
-}
-
-void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
- // Warn if available space is too low
- warn_available_space(max_capacity);
-
- // Warn if max map count is too low
- warn_max_map_count(max_capacity);
-}
-
-bool XPhysicalMemoryBacking::is_tmpfs() const {
- return _filesystem == TMPFS_MAGIC;
-}
-
-bool XPhysicalMemoryBacking::is_hugetlbfs() const {
- return _filesystem == HUGETLBFS_MAGIC;
-}
-
-bool XPhysicalMemoryBacking::tmpfs_supports_transparent_huge_pages() const {
- // If the shmem_enabled file exists and is readable then we
- // know the kernel supports transparent huge pages for tmpfs.
- return access(XFILENAME_SHMEM_ENABLED, R_OK) == 0;
-}
-
-XErrno XPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const {
- // On hugetlbfs, mapping a file segment will fail immediately, without
- // the need to touch the mapped pages first, if there aren't enough huge
- // pages available to back the mapping.
- void* const addr = mmap(nullptr, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
- if (addr == MAP_FAILED) {
- // Failed
- return errno;
- }
-
- // Once mapped, the huge pages are only reserved. We need to touch them
- // to associate them with the file segment. Note that we can not punch
- // hole in file segments which only have reserved pages.
- if (touch) {
- char* const start = (char*)addr;
- char* const end = start + length;
- os::pretouch_memory(start, end, _block_size);
- }
-
- // Unmap again. From now on, the huge pages that were mapped are allocated
- // to this file. There's no risk of getting a SIGBUS when mapping and
- // touching these pages again.
- if (munmap(addr, length) == -1) {
- // Failed
- return errno;
- }
-
- // Success
- return 0;
-}
-
-static bool safe_touch_mapping(void* addr, size_t length, size_t page_size) {
- char* const start = (char*)addr;
- char* const end = start + length;
-
- // Touching a mapping that can't be backed by memory will generate a
- // SIGBUS. By using SafeFetch32 any SIGBUS will be safely caught and
- // handled. On tmpfs, doing a fetch (rather than a store) is enough
- // to cause backing pages to be allocated (there's no zero-page to
- // worry about).
- for (char *p = start; p < end; p += page_size) {
- if (SafeFetch32((int*)p, -1) == -1) {
- // Failed
- return false;
- }
- }
-
- // Success
- return true;
-}
-
-XErrno XPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const {
- // On tmpfs, we need to touch the mapped pages to figure out
- // if there are enough pages available to back the mapping.
- void* const addr = mmap(nullptr, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
- if (addr == MAP_FAILED) {
- // Failed
- return errno;
- }
-
- // Advise mapping to use transparent huge pages
- os::realign_memory((char*)addr, length, XGranuleSize);
-
- // Touch the mapping (safely) to make sure it's backed by memory
- const bool backed = safe_touch_mapping(addr, length, _block_size);
-
- // Unmap again. If successfully touched, the backing memory will
- // be allocated to this file. There's no risk of getting a SIGBUS
- // when mapping and touching these pages again.
- if (munmap(addr, length) == -1) {
- // Failed
- return errno;
- }
-
- // Success
- return backed ? 0 : ENOMEM;
-}
-
-XErrno XPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t length) const {
- uint8_t data = 0;
-
- // Allocate backing memory by writing to each block
- for (size_t pos = offset; pos < offset + length; pos += _block_size) {
- if (pwrite(_fd, &data, sizeof(data), pos) == -1) {
- // Failed
- return errno;
- }
- }
-
- // Success
- return 0;
-}
-
-XErrno XPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const {
- // fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
- // since Linux 4.3. When fallocate(2) is not supported we emulate it using
- // mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite
- // (for tmpfs without transparent huge pages and other filesystem types).
- if (XLargePages::is_explicit()) {
- return fallocate_compat_mmap_hugetlbfs(offset, length, false /* touch */);
- } else if (XLargePages::is_transparent()) {
- return fallocate_compat_mmap_tmpfs(offset, length);
- } else {
- return fallocate_compat_pwrite(offset, length);
- }
-}
-
-XErrno XPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const {
- const int mode = 0; // Allocate
- const int res = XSyscall::fallocate(_fd, mode, offset, length);
- if (res == -1) {
- // Failed
- return errno;
- }
-
- // Success
- return 0;
-}
-
-XErrno XPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const {
- // Using compat mode is more efficient when allocating space on hugetlbfs.
- // Note that allocating huge pages this way will only reserve them, and not
- // associate them with segments of the file. We must guarantee that we at
- // some point touch these segments, otherwise we can not punch hole in them.
- // Also note that we need to use compat mode when using transparent huge pages,
- // since we need to use madvise(2) on the mapping before the page is allocated.
- if (z_fallocate_supported && !XLargePages::is_enabled()) {
- const XErrno err = fallocate_fill_hole_syscall(offset, length);
- if (!err) {
- // Success
- return 0;
- }
-
- if (err != ENOSYS && err != EOPNOTSUPP) {
- // Failed
- return err;
- }
-
- // Not supported
- log_debug_p(gc)("Falling back to fallocate() compatibility mode");
- z_fallocate_supported = false;
- }
-
- return fallocate_fill_hole_compat(offset, length);
-}
-
-XErrno XPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const {
- if (XLargePages::is_explicit()) {
- // We can only punch hole in pages that have been touched. Non-touched
- // pages are only reserved, and not associated with any specific file
- // segment. We don't know which pages have been previously touched, so
- // we always touch them here to guarantee that we can punch hole.
- const XErrno err = fallocate_compat_mmap_hugetlbfs(offset, length, true /* touch */);
- if (err) {
- // Failed
- return err;
- }
- }
-
- const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE;
- if (XSyscall::fallocate(_fd, mode, offset, length) == -1) {
- // Failed
- return errno;
- }
-
- // Success
- return 0;
-}
-
-XErrno XPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const {
- // Try first half
- const size_t offset0 = offset;
- const size_t length0 = align_up(length / 2, _block_size);
- const XErrno err0 = fallocate(punch_hole, offset0, length0);
- if (err0) {
- return err0;
- }
-
- // Try second half
- const size_t offset1 = offset0 + length0;
- const size_t length1 = length - length0;
- const XErrno err1 = fallocate(punch_hole, offset1, length1);
- if (err1) {
- return err1;
- }
-
- // Success
- return 0;
-}
-
-XErrno XPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const {
- assert(is_aligned(offset, _block_size), "Invalid offset");
- assert(is_aligned(length, _block_size), "Invalid length");
-
- const XErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length);
- if (err == EINTR && length > _block_size) {
- // Calling fallocate(2) with a large length can take a long time to
- // complete. When running profilers, such as VTune, this syscall will
- // be constantly interrupted by signals. Expanding the file in smaller
- // steps avoids this problem.
- return split_and_fallocate(punch_hole, offset, length);
- }
-
- return err;
-}
-
-bool XPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
- log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
- offset / M, (offset + length) / M, length / M);
-
-retry:
- const XErrno err = fallocate(false /* punch_hole */, offset, length);
- if (err) {
- if (err == ENOSPC && !is_init_completed() && XLargePages::is_explicit() && z_fallocate_hugetlbfs_attempts-- > 0) {
- // If we fail to allocate during initialization, due to lack of space on
- // the hugetlbfs filesystem, then we wait and retry a few times before
- // giving up. Otherwise there is a risk that running JVMs back-to-back
- // will fail, since there is a delay between process termination and the
- // huge pages owned by that process being returned to the huge page pool
- // and made available for new allocations.
- log_debug_p(gc, init)("Failed to commit memory (%s), retrying", err.to_string());
-
- // Wait and retry in one second, in the hope that huge pages will be
- // available by then.
- sleep(1);
- goto retry;
- }
-
- // Failed
- log_error_p(gc)("Failed to commit memory (%s)", err.to_string());
- return false;
- }
-
- // Success
- return true;
-}
-
-static int offset_to_node(size_t offset) {
- const GrowableArray* mapping = os::Linux::numa_nindex_to_node();
- const size_t nindex = (offset >> XGranuleSizeShift) % mapping->length();
- return mapping->at((int)nindex);
-}
-
-size_t XPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const {
- size_t committed = 0;
-
- // Commit one granule at a time, so that each granule
- // can be allocated from a different preferred node.
- while (committed < length) {
- const size_t granule_offset = offset + committed;
-
- // Setup NUMA policy to allocate memory from a preferred node
- os::Linux::numa_set_preferred(offset_to_node(granule_offset));
-
- if (!commit_inner(granule_offset, XGranuleSize)) {
- // Failed
- break;
- }
-
- committed += XGranuleSize;
- }
-
- // Restore NUMA policy
- os::Linux::numa_set_preferred(-1);
-
- return committed;
-}
-
-size_t XPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const {
- // Try to commit the whole region
- if (commit_inner(offset, length)) {
- // Success
- return length;
- }
-
- // Failed, try to commit as much as possible
- size_t start = offset;
- size_t end = offset + length;
-
- for (;;) {
- length = align_down((end - start) / 2, XGranuleSize);
- if (length < XGranuleSize) {
- // Done, don't commit more
- return start - offset;
- }
-
- if (commit_inner(start, length)) {
- // Success, try commit more
- start += length;
- } else {
- // Failed, try commit less
- end -= length;
- }
- }
-}
-
-size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
- if (XNUMA::is_enabled() && !XLargePages::is_explicit()) {
- // To get granule-level NUMA interleaving when using non-large pages,
- // we must explicitly interleave the memory at commit/fallocate time.
- return commit_numa_interleaved(offset, length);
- }
-
- return commit_default(offset, length);
-}
-
-size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
- log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
- offset / M, (offset + length) / M, length / M);
-
- const XErrno err = fallocate(true /* punch_hole */, offset, length);
- if (err) {
- log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
- return 0;
- }
-
- return length;
-}
-
-void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {
- const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, offset);
- if (res == MAP_FAILED) {
- XErrno err;
- fatal("Failed to map memory (%s)", err.to_string());
- }
-}
-
-void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
- // Note that we must keep the address space reservation intact and just detach
- // the backing memory. For this reason we map a new anonymous, non-accessible
- // and non-reserved page over the mapping instead of actually unmapping.
- const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
- if (res == MAP_FAILED) {
- XErrno err;
- fatal("Failed to map memory (%s)", err.to_string());
- }
-}
diff --git a/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.hpp b/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.hpp
deleted file mode 100644
index 253a3f87ef4..00000000000
--- a/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.hpp
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP
-#define OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP
-
-class XErrno;
-
-class XPhysicalMemoryBacking {
-private:
- int _fd;
- size_t _size;
- uint64_t _filesystem;
- size_t _block_size;
- size_t _available;
- bool _initialized;
-
- void warn_available_space(size_t max_capacity) const;
- void warn_max_map_count(size_t max_capacity) const;
-
- int create_mem_fd(const char* name) const;
- int create_file_fd(const char* name) const;
- int create_fd(const char* name) const;
-
- bool is_tmpfs() const;
- bool is_hugetlbfs() const;
- bool tmpfs_supports_transparent_huge_pages() const;
-
- XErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const;
- XErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const;
- XErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
- XErrno fallocate_fill_hole_compat(size_t offset, size_t length) const;
- XErrno fallocate_fill_hole_syscall(size_t offset, size_t length) const;
- XErrno fallocate_fill_hole(size_t offset, size_t length) const;
- XErrno fallocate_punch_hole(size_t offset, size_t length) const;
- XErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length) const;
- XErrno fallocate(bool punch_hole, size_t offset, size_t length) const;
-
- bool commit_inner(size_t offset, size_t length) const;
- size_t commit_numa_interleaved(size_t offset, size_t length) const;
- size_t commit_default(size_t offset, size_t length) const;
-
-public:
- XPhysicalMemoryBacking(size_t max_capacity);
-
- bool is_initialized() const;
-
- void warn_commit_limits(size_t max_capacity) const;
-
- size_t commit(size_t offset, size_t length) const;
- size_t uncommit(size_t offset, size_t length) const;
-
- void map(uintptr_t addr, size_t size, uintptr_t offset) const;
- void unmap(uintptr_t addr, size_t size) const;
-};
-
-#endif // OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP
diff --git a/src/hotspot/os/linux/gc/x/xSyscall_linux.cpp b/src/hotspot/os/linux/gc/x/xSyscall_linux.cpp
deleted file mode 100644
index 6035eaae61b..00000000000
--- a/src/hotspot/os/linux/gc/x/xSyscall_linux.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xSyscall_linux.hpp"
-#include OS_CPU_HEADER(gc/x/xSyscall)
-
-#include
-
-int XSyscall::memfd_create(const char *name, unsigned int flags) {
- return syscall(SYS_memfd_create, name, flags);
-}
-
-int XSyscall::fallocate(int fd, int mode, size_t offset, size_t length) {
- return syscall(SYS_fallocate, fd, mode, offset, length);
-}
-
-long XSyscall::get_mempolicy(int* mode, unsigned long* nodemask, unsigned long maxnode, void* addr, unsigned long flags) {
- return syscall(SYS_get_mempolicy, mode, nodemask, maxnode, addr, flags);
-}
diff --git a/src/hotspot/os/linux/gc/x/xSyscall_linux.hpp b/src/hotspot/os/linux/gc/x/xSyscall_linux.hpp
deleted file mode 100644
index f16d2b2ffdc..00000000000
--- a/src/hotspot/os/linux/gc/x/xSyscall_linux.hpp
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_LINUX_GC_X_XSYSCALL_LINUX_HPP
-#define OS_LINUX_GC_X_XSYSCALL_LINUX_HPP
-
-#include "memory/allStatic.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-// Flags for get_mempolicy()
-#ifndef MPOL_F_NODE
-#define MPOL_F_NODE (1<<0)
-#endif
-#ifndef MPOL_F_ADDR
-#define MPOL_F_ADDR (1<<1)
-#endif
-
-class XSyscall : public AllStatic {
-public:
- static int memfd_create(const char* name, unsigned int flags);
- static int fallocate(int fd, int mode, size_t offset, size_t length);
- static long get_mempolicy(int* mode, unsigned long* nodemask, unsigned long maxnode, void* addr, unsigned long flags);
-};
-
-#endif // OS_LINUX_GC_X_XSYSCALL_LINUX_HPP
diff --git a/src/hotspot/os/posix/gc/x/xArguments_posix.cpp b/src/hotspot/os/posix/gc/x/xArguments_posix.cpp
deleted file mode 100644
index 6df0a9bd074..00000000000
--- a/src/hotspot/os/posix/gc/x/xArguments_posix.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xArguments.hpp"
-
-bool XArguments::is_os_supported() {
- return true;
-}
diff --git a/src/hotspot/os/posix/gc/x/xInitialize_posix.cpp b/src/hotspot/os/posix/gc/x/xInitialize_posix.cpp
deleted file mode 100644
index acf71e98901..00000000000
--- a/src/hotspot/os/posix/gc/x/xInitialize_posix.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xInitialize.hpp"
-
-void XInitialize::pd_initialize() {
- // Does nothing
-}
diff --git a/src/hotspot/os/posix/gc/x/xUtils_posix.cpp b/src/hotspot/os/posix/gc/x/xUtils_posix.cpp
deleted file mode 100644
index eee3e5cfbe6..00000000000
--- a/src/hotspot/os/posix/gc/x/xUtils_posix.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xUtils.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-#include
-
-uintptr_t XUtils::alloc_aligned(size_t alignment, size_t size) {
- void* res = nullptr;
-
- // Use raw posix_memalign as long as we have no wrapper for it
- ALLOW_C_FUNCTION(::posix_memalign, int rc = posix_memalign(&res, alignment, size);)
- if (rc != 0) {
- fatal("posix_memalign() failed");
- }
-
- memset(res, 0, size);
-
- return (uintptr_t)res;
-}
diff --git a/src/hotspot/os/posix/gc/x/xVirtualMemory_posix.cpp b/src/hotspot/os/posix/gc/x/xVirtualMemory_posix.cpp
deleted file mode 100644
index e2422eb0978..00000000000
--- a/src/hotspot/os/posix/gc/x/xVirtualMemory_posix.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xAddress.inline.hpp"
-#include "gc/x/xVirtualMemory.hpp"
-#include "logging/log.hpp"
-
-#include
-#include
-
-void XVirtualMemoryManager::pd_initialize_before_reserve() {
- // Does nothing
-}
-
-void XVirtualMemoryManager::pd_initialize_after_reserve() {
- // Does nothing
-}
-
-bool XVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) {
- const uintptr_t res = (uintptr_t)mmap((void*)addr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
- if (res == (uintptr_t)MAP_FAILED) {
- // Failed to reserve memory
- return false;
- }
-
- if (res != addr) {
- // Failed to reserve memory at the requested address
- munmap((void*)res, size);
- return false;
- }
-
- // Success
- return true;
-}
-
-void XVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) {
- const int res = munmap((void*)addr, size);
- assert(res == 0, "Failed to unmap memory");
-}
diff --git a/src/hotspot/os/posix/perfMemory_posix.cpp b/src/hotspot/os/posix/perfMemory_posix.cpp
index 4eb46169878..17bf63092c2 100644
--- a/src/hotspot/os/posix/perfMemory_posix.cpp
+++ b/src/hotspot/os/posix/perfMemory_posix.cpp
@@ -1086,7 +1086,7 @@ static char* mmap_create_shared(size_t size) {
static void unmap_shared(char* addr, size_t bytes) {
int res;
if (MemTracker::enabled()) {
- ThreadCritical tc;
+ NmtVirtualMemoryLocker ml;
res = ::munmap(addr, bytes);
if (res == 0) {
MemTracker::record_virtual_memory_release((address)addr, bytes);
diff --git a/src/hotspot/os/windows/attachListener_windows.cpp b/src/hotspot/os/windows/attachListener_windows.cpp
index 3f6ca941c20..bfa377d52cf 100644
--- a/src/hotspot/os/windows/attachListener_windows.cpp
+++ b/src/hotspot/os/windows/attachListener_windows.cpp
@@ -32,17 +32,24 @@
#include // SIGBREAK
#include
-// The AttachListener thread services a queue of operations. It blocks in the dequeue
-// function until an operation is enqueued. A client enqueues an operation by creating
+// The AttachListener thread services a queue of operation requests. It blocks in the dequeue
+// function until a request is enqueued. A client enqueues a request by creating
// a thread in this process using the Win32 CreateRemoteThread function. That thread
// executes a small stub generated by the client. The stub invokes the
-// JVM_EnqueueOperation function which checks the operation parameters and enqueues
-// the operation to the queue serviced by the attach listener. The thread created by
+// JVM_EnqueueOperation or JVM_EnqueueOperation_v2 function which checks the operation parameters
+// and enqueues the operation request to the queue. The thread created by
// the client is a native thread and is restricted to a single page of stack. To keep
-// it simple operations are pre-allocated at initialization time. An enqueue thus
-// takes a preallocated operation, populates the operation parameters, adds it to
+// it simple operation requests are pre-allocated at initialization time. An enqueue thus
+// takes a preallocated request, populates the operation parameters, adds it to
// queue and wakes up the attach listener.
//
+// Differences between Attach API v1 and v2:
+// In v1 (jdk6+) client calls JVM_EnqueueOperation function and passes all operation parameters
+// as arguments of the function.
+// In v2 (jdk24+) client calls JVM_EnqueueOperation_v2 function and passes only pipe name.
+// Attach listeners connects to the pipe (in read/write mode) and reads all operation parameters
+// (the same way as other platform implementations read them using sockets).
+//
// When an operation has completed the attach listener is required to send the
// operation result and any result data to the client. In this implementation the
// client is a pipe server. In the enqueue operation it provides the name of pipe
@@ -55,8 +62,154 @@
// this wasn't worth worrying about.
-// forward reference
-class Win32AttachOperation;
+class PipeChannel : public AttachOperation::RequestReader, public AttachOperation::ReplyWriter {
+private:
+ HANDLE _hPipe;
+public:
+ PipeChannel() : _hPipe(INVALID_HANDLE_VALUE) {}
+ ~PipeChannel() {
+ close();
+ }
+
+ bool opened() const {
+ return _hPipe != INVALID_HANDLE_VALUE;
+ }
+
+ bool open(const char* pipe, bool write_only) {
+ _hPipe = ::CreateFile(pipe,
+ GENERIC_WRITE | (write_only ? 0 : GENERIC_READ),
+ 0, // no sharing
+ nullptr, // default security attributes
+ OPEN_EXISTING, // opens existing pipe
+ 0, // default attributes
+ nullptr); // no template file
+ if (_hPipe == INVALID_HANDLE_VALUE) {
+ log_error(attach)("could not open (%d) pipe %s", GetLastError(), pipe);
+ return false;
+ }
+ return true;
+ }
+
+ void close() {
+ if (opened()) {
+ CloseHandle(_hPipe);
+ _hPipe = INVALID_HANDLE_VALUE;
+ }
+ }
+
+ // RequestReader
+ int read(void* buffer, int size) override {
+ assert(opened(), "must be");
+ DWORD nread;
+ BOOL fSuccess = ReadFile(_hPipe,
+ buffer,
+ (DWORD)size,
+ &nread,
+ nullptr); // not overlapped
+ return fSuccess ? (int)nread : -1;
+ }
+
+ // ReplyWriter
+ int write(const void* buffer, int size) override {
+ assert(opened(), "must be");
+ DWORD written;
+ BOOL fSuccess = WriteFile(_hPipe,
+ buffer,
+ (DWORD)size,
+ &written,
+ nullptr); // not overlapped
+ return fSuccess ? (int)written : -1;
+ }
+
+ void flush() override {
+ assert(opened(), "must be");
+ FlushFileBuffers(_hPipe);
+ }
+};
+
+class Win32AttachOperation: public AttachOperation {
+public:
+ enum {
+ pipe_name_max = 256 // maximum pipe name
+ };
+
+private:
+ PipeChannel _pipe;
+
+public:
+ // for v1 pipe must be write-only
+ void open_pipe(const char* pipe_name, bool write_only) {
+ _pipe.open(pipe_name, write_only);
+ }
+
+ bool read_request() {
+ return AttachOperation::read_request(&_pipe);
+ }
+
+public:
+ void complete(jint result, bufferedStream* result_stream) override;
+};
+
+
+// Win32AttachOperationRequest is an element of AttachOperation request list.
+class Win32AttachOperationRequest {
+private:
+ AttachAPIVersion _ver;
+ char _name[AttachOperation::name_length_max + 1];
+ char _arg[AttachOperation::arg_count_max][AttachOperation::arg_length_max + 1];
+ char _pipe[Win32AttachOperation::pipe_name_max + 1];
+
+ Win32AttachOperationRequest* _next;
+
+ void set_value(char* dst, const char* str, size_t dst_size) {
+ if (str != nullptr) {
+ assert(strlen(str) < dst_size, "exceeds maximum length");
+ strncpy(dst, str, dst_size - 1);
+ dst[dst_size - 1] = '\0';
+ } else {
+ strcpy(dst, "");
+ }
+ }
+
+public:
+ void set(AttachAPIVersion ver, const char* pipename,
+ const char* cmd = nullptr,
+ const char* arg0 = nullptr,
+ const char* arg1 = nullptr,
+ const char* arg2 = nullptr) {
+ _ver = ver;
+ set_value(_name, cmd, sizeof(_name));
+ set_value(_arg[0], arg0, sizeof(_arg[0]));
+ set_value(_arg[1], arg1, sizeof(_arg[1]));
+ set_value(_arg[2], arg2, sizeof(_arg[2]));
+ set_value(_pipe, pipename, sizeof(_pipe));
+ }
+ AttachAPIVersion ver() const {
+ return _ver;
+ }
+ const char* cmd() const {
+ return _name;
+ }
+ const char* arg(int i) const {
+ return (i >= 0 && i < AttachOperation::arg_count_max) ? _arg[i] : nullptr;
+ }
+ const char* pipe() const {
+ return _pipe;
+ }
+
+ Win32AttachOperationRequest* next() const {
+ return _next;
+ }
+ void set_next(Win32AttachOperationRequest* next) {
+ _next = next;
+ }
+
+ // noarg constructor as operation is preallocated
+ Win32AttachOperationRequest() {
+ set(ATTACH_API_V1, "");
+ set_next(nullptr);
+ }
+};
class Win32AttachListener: AllStatic {
@@ -69,18 +222,18 @@ class Win32AttachListener: AllStatic {
static HANDLE _mutex;
// head of preallocated operations list
- static Win32AttachOperation* _avail;
+ static Win32AttachOperationRequest* _avail;
// head and tail of enqueue operations list
- static Win32AttachOperation* _head;
- static Win32AttachOperation* _tail;
+ static Win32AttachOperationRequest* _head;
+ static Win32AttachOperationRequest* _tail;
- static Win32AttachOperation* head() { return _head; }
- static void set_head(Win32AttachOperation* head) { _head = head; }
+ static Win32AttachOperationRequest* head() { return _head; }
+ static void set_head(Win32AttachOperationRequest* head) { _head = head; }
- static Win32AttachOperation* tail() { return _tail; }
- static void set_tail(Win32AttachOperation* tail) { _tail = tail; }
+ static Win32AttachOperationRequest* tail() { return _tail; }
+ static void set_tail(Win32AttachOperationRequest* tail) { _tail = tail; }
// A semaphore is used for communication about enqueued operations.
@@ -101,11 +254,12 @@ class Win32AttachListener: AllStatic {
static int init();
static HANDLE mutex() { return _mutex; }
- static Win32AttachOperation* available() { return _avail; }
- static void set_available(Win32AttachOperation* avail) { _avail = avail; }
+ static Win32AttachOperationRequest* available() { return _avail; }
+ static void set_available(Win32AttachOperationRequest* avail) { _avail = avail; }
// enqueue an operation to the end of the list
- static int enqueue(char* cmd, char* arg1, char* arg2, char* arg3, char* pipename);
+ static int enqueue(AttachAPIVersion ver, const char* cmd,
+ const char* arg1, const char* arg2, const char* arg3, const char* pipename);
// dequeue an operation from from head of the list
static Win32AttachOperation* dequeue();
@@ -114,48 +268,9 @@ class Win32AttachListener: AllStatic {
// statics
HANDLE Win32AttachListener::_mutex;
HANDLE Win32AttachListener::_enqueued_ops_semaphore;
-Win32AttachOperation* Win32AttachListener::_avail;
-Win32AttachOperation* Win32AttachListener::_head;
-Win32AttachOperation* Win32AttachListener::_tail;
-
-
-// Win32AttachOperation is an AttachOperation that additionally encapsulates the name
-// of a pipe which is used to send the operation reply/output to the client.
-// Win32AttachOperation can also be linked in a list.
-
-class Win32AttachOperation: public AttachOperation {
- private:
- friend class Win32AttachListener;
-
- enum {
- pipe_name_max = 256 // maximum pipe name
- };
-
- char _pipe[pipe_name_max + 1];
-
- const char* pipe() const { return _pipe; }
- void set_pipe(const char* pipe) {
- assert(strlen(pipe) <= pipe_name_max, "exceeds maximum length of pipe name");
- os::snprintf(_pipe, sizeof(_pipe), "%s", pipe);
- }
-
- HANDLE open_pipe();
- static BOOL write_pipe(HANDLE hPipe, char* buf, int len);
-
- Win32AttachOperation* _next;
-
- Win32AttachOperation* next() const { return _next; }
- void set_next(Win32AttachOperation* next) { _next = next; }
-
- // noarg constructor as operation is preallocated
- Win32AttachOperation() : AttachOperation("") {
- set_pipe("");
- set_next(nullptr);
- }
-
- public:
- void complete(jint result, bufferedStream* result_stream);
-};
+Win32AttachOperationRequest* Win32AttachListener::_avail;
+Win32AttachOperationRequest* Win32AttachListener::_head;
+Win32AttachOperationRequest* Win32AttachListener::_tail;
// Preallocate the maximum number of operations that can be enqueued.
@@ -171,18 +286,24 @@ int Win32AttachListener::init() {
set_available(nullptr);
for (int i=0; iset_next(available());
set_available(op);
}
+ AttachListener::set_supported_version(ATTACH_API_V2);
+
return 0;
}
// Enqueue an operation. This is called from a native thread that is not attached to VM.
// Also we need to be careful not to execute anything that results in more than a 4k stack.
//
-int Win32AttachListener::enqueue(char* cmd, char* arg0, char* arg1, char* arg2, char* pipename) {
+int Win32AttachListener::enqueue(AttachAPIVersion ver, const char* cmd,
+ const char* arg0, const char* arg1, const char* arg2, const char* pipename) {
+
+ log_debug(attach)("AttachListener::enqueue, ver = %d, cmd = %s", (int)ver, cmd);
+
// wait up to 10 seconds for listener to be up and running
int sleep_count = 0;
while (!AttachListener::is_initialized()) {
@@ -210,7 +331,7 @@ int Win32AttachListener::enqueue(char* cmd, char* arg0, char* arg1, char* arg2,
}
// try to get an operation from the available list
- Win32AttachOperation* op = available();
+ Win32AttachOperationRequest* op = available();
if (op != nullptr) {
set_available(op->next());
@@ -223,11 +344,7 @@ int Win32AttachListener::enqueue(char* cmd, char* arg0, char* arg1, char* arg2,
}
set_tail(op);
- op->set_name(cmd);
- op->set_arg(0, arg0);
- op->set_arg(1, arg1);
- op->set_arg(2, arg2);
- op->set_pipe(pipename);
+ op->set(ver, pipename, cmd, arg0, arg1, arg2);
// Increment number of enqueued operations.
// Side effect: Semaphore will be signaled and will release
@@ -236,6 +353,7 @@ int Win32AttachListener::enqueue(char* cmd, char* arg0, char* arg1, char* arg2,
::ReleaseSemaphore(enqueued_ops_semaphore(), 1, nullptr);
guarantee(not_exceeding_semaphore_maximum_count, "invariant");
}
+
::ReleaseMutex(mutex());
return (op != nullptr) ? 0 : ATTACH_ERROR_RESOURCE;
@@ -255,107 +373,63 @@ Win32AttachOperation* Win32AttachListener::dequeue() {
guarantee(res != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError());
guarantee(res == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", res);
+ Win32AttachOperation* op = nullptr;
+ Win32AttachOperationRequest* request = head();
+ if (request != nullptr) {
+ log_debug(attach)("AttachListener::dequeue, got request, ver = %d, cmd = %s", request->ver(), request->cmd());
- Win32AttachOperation* op = head();
- if (op != nullptr) {
- set_head(op->next());
+ set_head(request->next());
if (head() == nullptr) { // list is empty
set_tail(nullptr);
}
+
+ switch (request->ver()) {
+ case ATTACH_API_V1:
+ op = new Win32AttachOperation();
+ op->set_name(request->cmd());
+ for (int i = 0; i < AttachOperation::arg_count_max; i++) {
+ op->append_arg(request->arg(i));
+ }
+ op->open_pipe(request->pipe(), true/*write-only*/);
+ break;
+ case ATTACH_API_V2:
+ op = new Win32AttachOperation();
+ op->open_pipe(request->pipe(), false/*write-only*/);
+ if (!op->read_request()) {
+ log_error(attach)("AttachListener::dequeue, reading request ERROR");
+ delete op;
+ op = nullptr;
+ }
+ break;
+ default:
+ log_error(attach)("AttachListener::dequeue, unsupported version: %d", request->ver(), request->cmd());
+ break;
+ }
}
+ // put the operation back on the available list
+ request->set_next(Win32AttachListener::available());
+ Win32AttachListener::set_available(request);
+
::ReleaseMutex(mutex());
if (op != nullptr) {
+ log_debug(attach)("AttachListener::dequeue, return op: %s", op->name());
return op;
}
}
}
-
-// open the pipe to the client
-HANDLE Win32AttachOperation::open_pipe() {
- HANDLE hPipe = ::CreateFile( pipe(), // pipe name
- GENERIC_WRITE, // write only
- 0, // no sharing
- nullptr, // default security attributes
- OPEN_EXISTING, // opens existing pipe
- 0, // default attributes
- nullptr); // no template file
- return hPipe;
-}
-
-// write to the pipe
-BOOL Win32AttachOperation::write_pipe(HANDLE hPipe, char* buf, int len) {
- do {
- DWORD nwrote;
-
- BOOL fSuccess = WriteFile( hPipe, // pipe handle
- (LPCVOID)buf, // message
- (DWORD)len, // message length
- &nwrote, // bytes written
- nullptr); // not overlapped
- if (!fSuccess) {
- return fSuccess;
- }
- buf += nwrote;
- len -= nwrote;
- } while (len > 0);
- return TRUE;
-}
-
-// Complete the operation:
-// - open the pipe to the client
-// - write the operation result (a jint)
-// - write the operation output (the result stream)
-//
void Win32AttachOperation::complete(jint result, bufferedStream* result_stream) {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- HANDLE hPipe = open_pipe();
- int lastError = (int)::GetLastError();
- if (hPipe != INVALID_HANDLE_VALUE) {
- BOOL fSuccess;
-
- char msg[32];
- os::snprintf(msg, sizeof(msg), "%d\n", result);
- msg[sizeof(msg) - 1] = '\0';
-
- fSuccess = write_pipe(hPipe, msg, (int)strlen(msg));
- if (fSuccess) {
- fSuccess = write_pipe(hPipe, (char*)result_stream->base(), (int)(result_stream->size()));
- }
- lastError = (int)::GetLastError();
-
- // Need to flush buffers
- FlushFileBuffers(hPipe);
- CloseHandle(hPipe);
+ write_reply(&_pipe, result, result_stream);
- if (fSuccess) {
- log_debug(attach)("wrote result of attach operation %s to pipe %s", name(), pipe());
- } else {
- log_error(attach)("failure (%d) writing result of operation %s to pipe %s", lastError, name(), pipe());
- }
- } else {
- log_error(attach)("could not open (%d) pipe %s to send result of operation %s", lastError, pipe(), name());
- }
-
- DWORD res = ::WaitForSingleObject(Win32AttachListener::mutex(), INFINITE);
- assert(res != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError());
- assert(res == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", res);
-
- if (res == WAIT_OBJECT_0) {
-
- // put the operation back on the available list
- set_next(Win32AttachListener::available());
- Win32AttachListener::set_available(this);
-
- ::ReleaseMutex(Win32AttachListener::mutex());
- }
+ delete this;
}
-// AttachOperation functions
+// AttachListener functions
AttachOperation* AttachListener::dequeue() {
JavaThread* thread = JavaThread::current();
@@ -404,8 +478,12 @@ void AttachListener::pd_detachall() {
// Native thread started by remote client executes this.
extern "C" {
JNIEXPORT jint JNICALL
- JVM_EnqueueOperation(char* cmd, char* arg0, char* arg1, char* arg2, char* pipename) {
- return (jint)Win32AttachListener::enqueue(cmd, arg0, arg1, arg2, pipename);
- }
+ JVM_EnqueueOperation(char* cmd, char* arg0, char* arg1, char* arg2, char* pipename) {
+ return (jint)Win32AttachListener::enqueue(ATTACH_API_V1, cmd, arg0, arg1, arg2, pipename);
+ }
+ JNIEXPORT jint JNICALL
+ JVM_EnqueueOperation_v2(char* pipename) {
+ return (jint)Win32AttachListener::enqueue(ATTACH_API_V2, "", "", "", "", pipename);
+ }
} // extern
diff --git a/src/hotspot/os/windows/gc/x/xArguments_windows.cpp b/src/hotspot/os/windows/gc/x/xArguments_windows.cpp
deleted file mode 100644
index fc5f7eccb91..00000000000
--- a/src/hotspot/os/windows/gc/x/xArguments_windows.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xArguments.hpp"
-#include "gc/x/xSyscall_windows.hpp"
-
-bool XArguments::is_os_supported() {
- return XSyscall::is_supported();
-}
diff --git a/src/hotspot/os/windows/gc/x/xInitialize_windows.cpp b/src/hotspot/os/windows/gc/x/xInitialize_windows.cpp
deleted file mode 100644
index 99f64328033..00000000000
--- a/src/hotspot/os/windows/gc/x/xInitialize_windows.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xInitialize.hpp"
-#include "gc/x/xSyscall_windows.hpp"
-
-void XInitialize::pd_initialize() {
- XSyscall::initialize();
-}
diff --git a/src/hotspot/os/windows/gc/x/xLargePages_windows.cpp b/src/hotspot/os/windows/gc/x/xLargePages_windows.cpp
deleted file mode 100644
index 20b3c4911fc..00000000000
--- a/src/hotspot/os/windows/gc/x/xLargePages_windows.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/gcLogPrecious.hpp"
-#include "gc/x/xLargePages.hpp"
-#include "gc/x/xSyscall_windows.hpp"
-#include "runtime/globals.hpp"
-
-void XLargePages::pd_initialize() {
- if (UseLargePages) {
- if (XSyscall::is_large_pages_supported()) {
- _state = Explicit;
- return;
- }
- log_info_p(gc, init)("Shared large pages not supported on this OS version");
- }
-
- _state = Disabled;
-}
diff --git a/src/hotspot/os/windows/gc/x/xMapper_windows.cpp b/src/hotspot/os/windows/gc/x/xMapper_windows.cpp
deleted file mode 100644
index e69b6ec56e2..00000000000
--- a/src/hotspot/os/windows/gc/x/xMapper_windows.cpp
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xMapper_windows.hpp"
-#include "gc/x/xSyscall_windows.hpp"
-#include "logging/log.hpp"
-#include "utilities/debug.hpp"
-
-#include
-
-// Memory reservation, commit, views, and placeholders.
-//
-// To be able to up-front reserve address space for the heap views, and later
-// multi-map the heap views to the same physical memory, without ever losing the
-// reservation of the reserved address space, we use "placeholders".
-//
-// These placeholders block out the address space from being used by other parts
-// of the process. To commit memory in this address space, the placeholder must
-// be replaced by anonymous memory, or replaced by mapping a view against a
-// paging file mapping. We use the later to support multi-mapping.
-//
-// We want to be able to dynamically commit and uncommit the physical memory of
-// the heap (and also unmap ZPages), in granules of ZGranuleSize bytes. There is
-// no way to grow and shrink the committed memory of a paging file mapping.
-// Therefore, we create multiple granule-sized page file mappings. The memory is
-// committed by creating a page file mapping, map a view against it, commit the
-// memory, unmap the view. The memory will stay committed until all views are
-// unmapped, and the paging file mapping handle is closed.
-//
-// When replacing a placeholder address space reservation with a mapped view
-// against a paging file mapping, the virtual address space must exactly match
-// an existing placeholder's address and size. Therefore we only deal with
-// granule-sized placeholders at this layer. Higher layers that keep track of
-// reserved available address space can (and will) coalesce placeholders, but
-// they will be split before being used.
-
-#define fatal_error(msg, addr, size) \
- fatal(msg ": " PTR_FORMAT " " SIZE_FORMAT "M (%d)", \
- (addr), (size) / M, GetLastError())
-
-uintptr_t XMapper::reserve(uintptr_t addr, size_t size) {
- void* const res = XSyscall::VirtualAlloc2(
- GetCurrentProcess(), // Process
- (void*)addr, // BaseAddress
- size, // Size
- MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, // AllocationType
- PAGE_NOACCESS, // PageProtection
- nullptr, // ExtendedParameters
- 0 // ParameterCount
- );
-
- // Caller responsible for error handling
- return (uintptr_t)res;
-}
-
-void XMapper::unreserve(uintptr_t addr, size_t size) {
- const bool res = XSyscall::VirtualFreeEx(
- GetCurrentProcess(), // hProcess
- (void*)addr, // lpAddress
- size, // dwSize
- MEM_RELEASE // dwFreeType
- );
-
- if (!res) {
- fatal_error("Failed to unreserve memory", addr, size);
- }
-}
-
-HANDLE XMapper::create_paging_file_mapping(size_t size) {
- // Create mapping with SEC_RESERVE instead of SEC_COMMIT.
- //
- // We use MapViewOfFile3 for two different reasons:
- // 1) When committing memory for the created paging file
- // 2) When mapping a view of the memory created in (2)
- //
- // The non-platform code is only setup to deal with out-of-memory
- // errors in (1). By using SEC_RESERVE, we prevent MapViewOfFile3
- // from failing because of "commit limit" checks. To actually commit
- // memory in (1), a call to VirtualAlloc2 is done.
-
- HANDLE const res = XSyscall::CreateFileMappingW(
- INVALID_HANDLE_VALUE, // hFile
- nullptr, // lpFileMappingAttribute
- PAGE_READWRITE | SEC_RESERVE, // flProtect
- size >> 32, // dwMaximumSizeHigh
- size & 0xFFFFFFFF, // dwMaximumSizeLow
- nullptr // lpName
- );
-
- // Caller responsible for error handling
- return res;
-}
-
-bool XMapper::commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size) {
- const uintptr_t addr = map_view_no_placeholder(file_handle, file_offset, size);
- if (addr == 0) {
- log_error(gc)("Failed to map view of paging file mapping (%d)", GetLastError());
- return false;
- }
-
- const uintptr_t res = commit(addr, size);
- if (res != addr) {
- log_error(gc)("Failed to commit memory (%d)", GetLastError());
- }
-
- unmap_view_no_placeholder(addr, size);
-
- return res == addr;
-}
-
-uintptr_t XMapper::map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size) {
- void* const res = XSyscall::MapViewOfFile3(
- file_handle, // FileMapping
- GetCurrentProcess(), // ProcessHandle
- nullptr, // BaseAddress
- file_offset, // Offset
- size, // ViewSize
- 0, // AllocationType
- PAGE_NOACCESS, // PageProtection
- nullptr, // ExtendedParameters
- 0 // ParameterCount
- );
-
- // Caller responsible for error handling
- return (uintptr_t)res;
-}
-
-void XMapper::unmap_view_no_placeholder(uintptr_t addr, size_t size) {
- const bool res = XSyscall::UnmapViewOfFile2(
- GetCurrentProcess(), // ProcessHandle
- (void*)addr, // BaseAddress
- 0 // UnmapFlags
- );
-
- if (!res) {
- fatal_error("Failed to unmap memory", addr, size);
- }
-}
-
-uintptr_t XMapper::commit(uintptr_t addr, size_t size) {
- void* const res = XSyscall::VirtualAlloc2(
- GetCurrentProcess(), // Process
- (void*)addr, // BaseAddress
- size, // Size
- MEM_COMMIT, // AllocationType
- PAGE_NOACCESS, // PageProtection
- nullptr, // ExtendedParameters
- 0 // ParameterCount
- );
-
- // Caller responsible for error handling
- return (uintptr_t)res;
-}
-
-HANDLE XMapper::create_and_commit_paging_file_mapping(size_t size) {
- HANDLE const file_handle = create_paging_file_mapping(size);
- if (file_handle == 0) {
- log_error(gc)("Failed to create paging file mapping (%d)", GetLastError());
- return 0;
- }
-
- const bool res = commit_paging_file_mapping(file_handle, 0 /* file_offset */, size);
- if (!res) {
- close_paging_file_mapping(file_handle);
- return 0;
- }
-
- return file_handle;
-}
-
-void XMapper::close_paging_file_mapping(HANDLE file_handle) {
- const bool res = CloseHandle(
- file_handle // hObject
- );
-
- if (!res) {
- fatal("Failed to close paging file handle (%d)", GetLastError());
- }
-}
-
-HANDLE XMapper::create_shared_awe_section() {
- MEM_EXTENDED_PARAMETER parameter = { 0 };
- parameter.Type = MemSectionExtendedParameterUserPhysicalFlags;
- parameter.ULong64 = 0;
-
- HANDLE section = XSyscall::CreateFileMapping2(
- INVALID_HANDLE_VALUE, // File
- nullptr, // SecurityAttributes
- SECTION_MAP_READ | SECTION_MAP_WRITE, // DesiredAccess
- PAGE_READWRITE, // PageProtection
- SEC_RESERVE | SEC_LARGE_PAGES, // AllocationAttributes
- 0, // MaximumSize
- nullptr, // Name
- ¶meter, // ExtendedParameters
- 1 // ParameterCount
- );
-
- if (section == nullptr) {
- fatal("Could not create shared AWE section (%d)", GetLastError());
- }
-
- return section;
-}
-
-uintptr_t XMapper::reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size) {
- MEM_EXTENDED_PARAMETER parameter = { 0 };
- parameter.Type = MemExtendedParameterUserPhysicalHandle;
- parameter.Handle = awe_section;
-
- void* const res = XSyscall::VirtualAlloc2(
- GetCurrentProcess(), // Process
- (void*)addr, // BaseAddress
- size, // Size
- MEM_RESERVE | MEM_PHYSICAL, // AllocationType
- PAGE_READWRITE, // PageProtection
- ¶meter, // ExtendedParameters
- 1 // ParameterCount
- );
-
- // Caller responsible for error handling
- return (uintptr_t)res;
-}
-
-void XMapper::unreserve_for_shared_awe(uintptr_t addr, size_t size) {
- bool res = VirtualFree(
- (void*)addr, // lpAddress
- 0, // dwSize
- MEM_RELEASE // dwFreeType
- );
-
- if (!res) {
- fatal("Failed to unreserve memory: " PTR_FORMAT " " SIZE_FORMAT "M (%d)",
- addr, size / M, GetLastError());
- }
-}
-
-void XMapper::split_placeholder(uintptr_t addr, size_t size) {
- const bool res = VirtualFree(
- (void*)addr, // lpAddress
- size, // dwSize
- MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER // dwFreeType
- );
-
- if (!res) {
- fatal_error("Failed to split placeholder", addr, size);
- }
-}
-
-void XMapper::coalesce_placeholders(uintptr_t addr, size_t size) {
- const bool res = VirtualFree(
- (void*)addr, // lpAddress
- size, // dwSize
- MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS // dwFreeType
- );
-
- if (!res) {
- fatal_error("Failed to coalesce placeholders", addr, size);
- }
-}
-
-void XMapper::map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size) {
- void* const res = XSyscall::MapViewOfFile3(
- file_handle, // FileMapping
- GetCurrentProcess(), // ProcessHandle
- (void*)addr, // BaseAddress
- file_offset, // Offset
- size, // ViewSize
- MEM_REPLACE_PLACEHOLDER, // AllocationType
- PAGE_READWRITE, // PageProtection
- nullptr, // ExtendedParameters
- 0 // ParameterCount
- );
-
- if (res == nullptr) {
- fatal_error("Failed to map memory", addr, size);
- }
-}
-
-void XMapper::unmap_view_preserve_placeholder(uintptr_t addr, size_t size) {
- const bool res = XSyscall::UnmapViewOfFile2(
- GetCurrentProcess(), // ProcessHandle
- (void*)addr, // BaseAddress
- MEM_PRESERVE_PLACEHOLDER // UnmapFlags
- );
-
- if (!res) {
- fatal_error("Failed to unmap memory", addr, size);
- }
-}
diff --git a/src/hotspot/os/windows/gc/x/xMapper_windows.hpp b/src/hotspot/os/windows/gc/x/xMapper_windows.hpp
deleted file mode 100644
index 0f266d3fab7..00000000000
--- a/src/hotspot/os/windows/gc/x/xMapper_windows.hpp
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP
-#define OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP
-
-#include "memory/allStatic.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-#include
-
-class XMapper : public AllStatic {
-private:
- // Create paging file mapping
- static HANDLE create_paging_file_mapping(size_t size);
-
- // Commit paging file mapping
- static bool commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size);
-
- // Map a view anywhere without a placeholder
- static uintptr_t map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size);
-
- // Unmap a view without preserving a placeholder
- static void unmap_view_no_placeholder(uintptr_t addr, size_t size);
-
- // Commit memory covering the given virtual address range
- static uintptr_t commit(uintptr_t addr, size_t size);
-
-public:
- // Reserve memory with a placeholder
- static uintptr_t reserve(uintptr_t addr, size_t size);
-
- // Unreserve memory
- static void unreserve(uintptr_t addr, size_t size);
-
- // Create and commit paging file mapping
- static HANDLE create_and_commit_paging_file_mapping(size_t size);
-
- // Close paging file mapping
- static void close_paging_file_mapping(HANDLE file_handle);
-
- // Create a shared AWE section
- static HANDLE create_shared_awe_section();
-
- // Reserve memory attached to the shared AWE section
- static uintptr_t reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size);
-
- // Unreserve memory attached to a shared AWE section
- static void unreserve_for_shared_awe(uintptr_t addr, size_t size);
-
- // Split a placeholder
- //
- // A view can only replace an entire placeholder, so placeholders need to be
- // split and coalesced to be the exact size of the new views.
- // [addr, addr + size) needs to be a proper sub-placeholder of an existing
- // placeholder.
- static void split_placeholder(uintptr_t addr, size_t size);
-
- // Coalesce a placeholder
- //
- // [addr, addr + size) is the new placeholder. A sub-placeholder needs to
- // exist within that range.
- static void coalesce_placeholders(uintptr_t addr, size_t size);
-
- // Map a view of the file handle and replace the placeholder covering the
- // given virtual address range
- static void map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size);
-
- // Unmap the view and reinstate a placeholder covering the given virtual
- // address range
- static void unmap_view_preserve_placeholder(uintptr_t addr, size_t size);
-};
-
-#endif // OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP
diff --git a/src/hotspot/os/windows/gc/x/xNUMA_windows.cpp b/src/hotspot/os/windows/gc/x/xNUMA_windows.cpp
deleted file mode 100644
index 47a84df962e..00000000000
--- a/src/hotspot/os/windows/gc/x/xNUMA_windows.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xNUMA.hpp"
-
-void XNUMA::pd_initialize() {
- _enabled = false;
-}
-
-uint32_t XNUMA::count() {
- return 1;
-}
-
-uint32_t XNUMA::id() {
- return 0;
-}
-
-uint32_t XNUMA::memory_id(uintptr_t addr) {
- // NUMA support not enabled, assume everything belongs to node zero
- return 0;
-}
diff --git a/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.cpp b/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.cpp
deleted file mode 100644
index 92d47dfb7c8..00000000000
--- a/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.cpp
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xGlobals.hpp"
-#include "gc/x/xGranuleMap.inline.hpp"
-#include "gc/x/xLargePages.inline.hpp"
-#include "gc/x/xMapper_windows.hpp"
-#include "gc/x/xPhysicalMemoryBacking_windows.hpp"
-#include "logging/log.hpp"
-#include "runtime/globals.hpp"
-#include "utilities/debug.hpp"
-
-class XPhysicalMemoryBackingImpl : public CHeapObj {
-public:
- virtual size_t commit(size_t offset, size_t size) = 0;
- virtual size_t uncommit(size_t offset, size_t size) = 0;
- virtual void map(uintptr_t addr, size_t size, size_t offset) const = 0;
- virtual void unmap(uintptr_t addr, size_t size) const = 0;
-};
-
-// Implements small pages (paged) support using placeholder reservation.
-//
-// The backing commits and uncommits physical memory, that can be
-// multi-mapped into the virtual address space. To support fine-graned
-// committing and uncommitting, each XGranuleSize'd chunk is mapped to
-// a separate paging file mapping.
-
-class XPhysicalMemoryBackingSmallPages : public XPhysicalMemoryBackingImpl {
-private:
- XGranuleMap _handles;
-
- HANDLE get_handle(uintptr_t offset) const {
- HANDLE const handle = _handles.get(offset);
- assert(handle != 0, "Should be set");
- return handle;
- }
-
- void put_handle(uintptr_t offset, HANDLE handle) {
- assert(handle != INVALID_HANDLE_VALUE, "Invalid handle");
- assert(_handles.get(offset) == 0, "Should be cleared");
- _handles.put(offset, handle);
- }
-
- void clear_handle(uintptr_t offset) {
- assert(_handles.get(offset) != 0, "Should be set");
- _handles.put(offset, 0);
- }
-
-public:
- XPhysicalMemoryBackingSmallPages(size_t max_capacity) :
- XPhysicalMemoryBackingImpl(),
- _handles(max_capacity) {}
-
- size_t commit(size_t offset, size_t size) {
- for (size_t i = 0; i < size; i += XGranuleSize) {
- HANDLE const handle = XMapper::create_and_commit_paging_file_mapping(XGranuleSize);
- if (handle == 0) {
- return i;
- }
-
- put_handle(offset + i, handle);
- }
-
- return size;
- }
-
- size_t uncommit(size_t offset, size_t size) {
- for (size_t i = 0; i < size; i += XGranuleSize) {
- HANDLE const handle = get_handle(offset + i);
- clear_handle(offset + i);
- XMapper::close_paging_file_mapping(handle);
- }
-
- return size;
- }
-
- void map(uintptr_t addr, size_t size, size_t offset) const {
- assert(is_aligned(offset, XGranuleSize), "Misaligned");
- assert(is_aligned(addr, XGranuleSize), "Misaligned");
- assert(is_aligned(size, XGranuleSize), "Misaligned");
-
- for (size_t i = 0; i < size; i += XGranuleSize) {
- HANDLE const handle = get_handle(offset + i);
- XMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, XGranuleSize);
- }
- }
-
- void unmap(uintptr_t addr, size_t size) const {
- assert(is_aligned(addr, XGranuleSize), "Misaligned");
- assert(is_aligned(size, XGranuleSize), "Misaligned");
-
- for (size_t i = 0; i < size; i += XGranuleSize) {
- XMapper::unmap_view_preserve_placeholder(addr + i, XGranuleSize);
- }
- }
-};
-
-// Implements Large Pages (locked) support using shared AWE physical memory.
-//
-// Shared AWE physical memory also works with small pages, but it has
-// a few drawbacks that makes it a no-go to use it at this point:
-//
-// 1) It seems to use 8 bytes of committed memory per *reserved* memory.
-// Given our scheme to use a large address space range this turns out to
-// use too much memory.
-//
-// 2) It requires memory locking privileges, even for small pages. This
-// has always been a requirement for large pages, and would be an extra
-// restriction for usage with small pages.
-//
-// Note: The large pages size is tied to our XGranuleSize.
-
-extern HANDLE XAWESection;
-
-class XPhysicalMemoryBackingLargePages : public XPhysicalMemoryBackingImpl {
-private:
- ULONG_PTR* const _page_array;
-
- static ULONG_PTR* alloc_page_array(size_t max_capacity) {
- const size_t npages = max_capacity / XGranuleSize;
- const size_t array_size = npages * sizeof(ULONG_PTR);
-
- return (ULONG_PTR*)os::malloc(array_size, mtGC);
- }
-
-public:
- XPhysicalMemoryBackingLargePages(size_t max_capacity) :
- XPhysicalMemoryBackingImpl(),
- _page_array(alloc_page_array(max_capacity)) {}
-
- size_t commit(size_t offset, size_t size) {
- const size_t index = offset >> XGranuleSizeShift;
- const size_t npages = size >> XGranuleSizeShift;
-
- size_t npages_res = npages;
- const bool res = AllocateUserPhysicalPages(XAWESection, &npages_res, &_page_array[index]);
- if (!res) {
- fatal("Failed to allocate physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
- size / M, offset, GetLastError());
- } else {
- log_debug(gc)("Allocated physical memory: " SIZE_FORMAT "M @ " PTR_FORMAT, size / M, offset);
- }
-
- // AllocateUserPhysicalPages might not be able to allocate the requested amount of memory.
- // The allocated number of pages are written in npages_res.
- return npages_res << XGranuleSizeShift;
- }
-
- size_t uncommit(size_t offset, size_t size) {
- const size_t index = offset >> XGranuleSizeShift;
- const size_t npages = size >> XGranuleSizeShift;
-
- size_t npages_res = npages;
- const bool res = FreeUserPhysicalPages(XAWESection, &npages_res, &_page_array[index]);
- if (!res) {
- fatal("Failed to uncommit physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
- size, offset, GetLastError());
- }
-
- return npages_res << XGranuleSizeShift;
- }
-
- void map(uintptr_t addr, size_t size, size_t offset) const {
- const size_t npages = size >> XGranuleSizeShift;
- const size_t index = offset >> XGranuleSizeShift;
-
- const bool res = MapUserPhysicalPages((char*)addr, npages, &_page_array[index]);
- if (!res) {
- fatal("Failed to map view " PTR_FORMAT " " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
- addr, size / M, offset, GetLastError());
- }
- }
-
- void unmap(uintptr_t addr, size_t size) const {
- const size_t npages = size >> XGranuleSizeShift;
-
- const bool res = MapUserPhysicalPages((char*)addr, npages, nullptr);
- if (!res) {
- fatal("Failed to unmap view " PTR_FORMAT " " SIZE_FORMAT "M (%d)",
- addr, size / M, GetLastError());
- }
- }
-};
-
-static XPhysicalMemoryBackingImpl* select_impl(size_t max_capacity) {
- if (XLargePages::is_enabled()) {
- return new XPhysicalMemoryBackingLargePages(max_capacity);
- }
-
- return new XPhysicalMemoryBackingSmallPages(max_capacity);
-}
-
-XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) :
- _impl(select_impl(max_capacity)) {}
-
-bool XPhysicalMemoryBacking::is_initialized() const {
- return true;
-}
-
-void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
- // Does nothing
-}
-
-size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) {
- log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
- offset / M, (offset + length) / M, length / M);
-
- return _impl->commit(offset, length);
-}
-
-size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
- log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
- offset / M, (offset + length) / M, length / M);
-
- return _impl->uncommit(offset, length);
-}
-
-void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, size_t offset) const {
- assert(is_aligned(offset, XGranuleSize), "Misaligned: " PTR_FORMAT, offset);
- assert(is_aligned(addr, XGranuleSize), "Misaligned: " PTR_FORMAT, addr);
- assert(is_aligned(size, XGranuleSize), "Misaligned: " PTR_FORMAT, size);
-
- _impl->map(addr, size, offset);
-}
-
-void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
- assert(is_aligned(addr, XGranuleSize), "Misaligned");
- assert(is_aligned(size, XGranuleSize), "Misaligned");
-
- _impl->unmap(addr, size);
-}
diff --git a/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.hpp b/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.hpp
deleted file mode 100644
index d6e123f21e5..00000000000
--- a/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.hpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_WINDOWS_GC_X_XPHYSICALMEMORYBACKING_WINDOWS_HPP
-#define OS_WINDOWS_GC_X_XPHYSICALMEMORYBACKING_WINDOWS_HPP
-
-#include "utilities/globalDefinitions.hpp"
-
-#include
-
-class XPhysicalMemoryBackingImpl;
-
-class XPhysicalMemoryBacking {
-private:
- XPhysicalMemoryBackingImpl* _impl;
-
-public:
- XPhysicalMemoryBacking(size_t max_capacity);
-
- bool is_initialized() const;
-
- void warn_commit_limits(size_t max_capacity) const;
-
- size_t commit(size_t offset, size_t length);
- size_t uncommit(size_t offset, size_t length);
-
- void map(uintptr_t addr, size_t size, size_t offset) const;
- void unmap(uintptr_t addr, size_t size) const;
-};
-
-#endif // OS_WINDOWS_GC_X_XPHYSICALMEMORYBACKING_WINDOWS_HPP
diff --git a/src/hotspot/os/windows/gc/x/xSyscall_windows.cpp b/src/hotspot/os/windows/gc/x/xSyscall_windows.cpp
deleted file mode 100644
index f22966a5489..00000000000
--- a/src/hotspot/os/windows/gc/x/xSyscall_windows.cpp
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/gcLogPrecious.hpp"
-#include "gc/x/xSyscall_windows.hpp"
-#include "runtime/java.hpp"
-#include "runtime/os.hpp"
-
-XSyscall::CreateFileMappingWFn XSyscall::CreateFileMappingW;
-XSyscall::CreateFileMapping2Fn XSyscall::CreateFileMapping2;
-XSyscall::VirtualAlloc2Fn XSyscall::VirtualAlloc2;
-XSyscall::VirtualFreeExFn XSyscall::VirtualFreeEx;
-XSyscall::MapViewOfFile3Fn XSyscall::MapViewOfFile3;
-XSyscall::UnmapViewOfFile2Fn XSyscall::UnmapViewOfFile2;
-
-static void* lookup_kernelbase_library() {
- const char* const name = "KernelBase";
- char ebuf[1024];
- void* const handle = os::dll_load(name, ebuf, sizeof(ebuf));
- if (handle == nullptr) {
- log_error_p(gc)("Failed to load library: %s", name);
- }
- return handle;
-}
-
-static void* lookup_kernelbase_symbol(const char* name) {
- static void* const handle = lookup_kernelbase_library();
- if (handle == nullptr) {
- return nullptr;
- }
- return os::dll_lookup(handle, name);
-}
-
-static bool has_kernelbase_symbol(const char* name) {
- return lookup_kernelbase_symbol(name) != nullptr;
-}
-
-template
-static void install_kernelbase_symbol(Fn*& fn, const char* name) {
- fn = reinterpret_cast(lookup_kernelbase_symbol(name));
-}
-
-template
-static void install_kernelbase_1803_symbol_or_exit(Fn*& fn, const char* name) {
- install_kernelbase_symbol(fn, name);
- if (fn == nullptr) {
- log_error_p(gc)("Failed to lookup symbol: %s", name);
- vm_exit_during_initialization("ZGC requires Windows version 1803 or later");
- }
-}
-
-void XSyscall::initialize() {
- // Required
- install_kernelbase_1803_symbol_or_exit(CreateFileMappingW, "CreateFileMappingW");
- install_kernelbase_1803_symbol_or_exit(VirtualAlloc2, "VirtualAlloc2");
- install_kernelbase_1803_symbol_or_exit(VirtualFreeEx, "VirtualFreeEx");
- install_kernelbase_1803_symbol_or_exit(MapViewOfFile3, "MapViewOfFile3");
- install_kernelbase_1803_symbol_or_exit(UnmapViewOfFile2, "UnmapViewOfFile2");
-
- // Optional - for large pages support
- install_kernelbase_symbol(CreateFileMapping2, "CreateFileMapping2");
-}
-
-bool XSyscall::is_supported() {
- // Available in Windows version 1803 and later
- return has_kernelbase_symbol("VirtualAlloc2");
-}
-
-bool XSyscall::is_large_pages_supported() {
- // Available in Windows version 1809 and later
- return has_kernelbase_symbol("CreateFileMapping2");
-}
diff --git a/src/hotspot/os/windows/gc/x/xSyscall_windows.hpp b/src/hotspot/os/windows/gc/x/xSyscall_windows.hpp
deleted file mode 100644
index 89ba2573b10..00000000000
--- a/src/hotspot/os/windows/gc/x/xSyscall_windows.hpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_WINDOWS_GC_X_XSYSCALL_WINDOWS_HPP
-#define OS_WINDOWS_GC_X_XSYSCALL_WINDOWS_HPP
-
-#include "utilities/globalDefinitions.hpp"
-
-#include
-#include
-
-class XSyscall {
-private:
- typedef HANDLE (*CreateFileMappingWFn)(HANDLE, LPSECURITY_ATTRIBUTES, DWORD, DWORD, DWORD, LPCWSTR);
- typedef HANDLE (*CreateFileMapping2Fn)(HANDLE, LPSECURITY_ATTRIBUTES, ULONG, ULONG, ULONG, ULONG64, PCWSTR, PMEM_EXTENDED_PARAMETER, ULONG);
- typedef PVOID (*VirtualAlloc2Fn)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG);
- typedef BOOL (*VirtualFreeExFn)(HANDLE, LPVOID, SIZE_T, DWORD);
- typedef PVOID (*MapViewOfFile3Fn)(HANDLE, HANDLE, PVOID, ULONG64, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG);
- typedef BOOL (*UnmapViewOfFile2Fn)(HANDLE, PVOID, ULONG);
-
-public:
- static CreateFileMappingWFn CreateFileMappingW;
- static CreateFileMapping2Fn CreateFileMapping2;
- static VirtualAlloc2Fn VirtualAlloc2;
- static VirtualFreeExFn VirtualFreeEx;
- static MapViewOfFile3Fn MapViewOfFile3;
- static UnmapViewOfFile2Fn UnmapViewOfFile2;
-
- static void initialize();
-
- static bool is_supported();
- static bool is_large_pages_supported();
-};
-
-#endif // OS_WINDOWS_GC_X_XSYSCALL_WINDOWS_HPP
diff --git a/src/hotspot/os/windows/gc/x/xUtils_windows.cpp b/src/hotspot/os/windows/gc/x/xUtils_windows.cpp
deleted file mode 100644
index 788da80834a..00000000000
--- a/src/hotspot/os/windows/gc/x/xUtils_windows.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xUtils.hpp"
-#include "utilities/debug.hpp"
-
-#include
-
-uintptr_t XUtils::alloc_aligned(size_t alignment, size_t size) {
- void* const res = _aligned_malloc(size, alignment);
-
- if (res == nullptr) {
- fatal("_aligned_malloc failed");
- }
-
- memset(res, 0, size);
-
- return (uintptr_t)res;
-}
diff --git a/src/hotspot/os/windows/gc/x/xVirtualMemory_windows.cpp b/src/hotspot/os/windows/gc/x/xVirtualMemory_windows.cpp
deleted file mode 100644
index a54f1e3cbae..00000000000
--- a/src/hotspot/os/windows/gc/x/xVirtualMemory_windows.cpp
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xAddress.inline.hpp"
-#include "gc/x/xGlobals.hpp"
-#include "gc/x/xLargePages.inline.hpp"
-#include "gc/x/xMapper_windows.hpp"
-#include "gc/x/xSyscall_windows.hpp"
-#include "gc/x/xVirtualMemory.inline.hpp"
-#include "utilities/align.hpp"
-#include "utilities/debug.hpp"
-
-class XVirtualMemoryManagerImpl : public CHeapObj {
-public:
- virtual void initialize_before_reserve() {}
- virtual void initialize_after_reserve(XMemoryManager* manager) {}
- virtual bool reserve(uintptr_t addr, size_t size) = 0;
- virtual void unreserve(uintptr_t addr, size_t size) = 0;
-};
-
-// Implements small pages (paged) support using placeholder reservation.
-class XVirtualMemoryManagerSmallPages : public XVirtualMemoryManagerImpl {
-private:
- class PlaceholderCallbacks : public AllStatic {
- public:
- static void split_placeholder(uintptr_t start, size_t size) {
- XMapper::split_placeholder(XAddress::marked0(start), size);
- XMapper::split_placeholder(XAddress::marked1(start), size);
- XMapper::split_placeholder(XAddress::remapped(start), size);
- }
-
- static void coalesce_placeholders(uintptr_t start, size_t size) {
- XMapper::coalesce_placeholders(XAddress::marked0(start), size);
- XMapper::coalesce_placeholders(XAddress::marked1(start), size);
- XMapper::coalesce_placeholders(XAddress::remapped(start), size);
- }
-
- static void split_into_placeholder_granules(uintptr_t start, size_t size) {
- for (uintptr_t addr = start; addr < start + size; addr += XGranuleSize) {
- split_placeholder(addr, XGranuleSize);
- }
- }
-
- static void coalesce_into_one_placeholder(uintptr_t start, size_t size) {
- assert(is_aligned(size, XGranuleSize), "Must be granule aligned");
-
- if (size > XGranuleSize) {
- coalesce_placeholders(start, size);
- }
- }
-
- static void create_callback(const XMemory* area) {
- assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned");
- coalesce_into_one_placeholder(area->start(), area->size());
- }
-
- static void destroy_callback(const XMemory* area) {
- assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned");
- // Don't try split the last granule - VirtualFree will fail
- split_into_placeholder_granules(area->start(), area->size() - XGranuleSize);
- }
-
- static void shrink_from_front_callback(const XMemory* area, size_t size) {
- assert(is_aligned(size, XGranuleSize), "Must be granule aligned");
- split_into_placeholder_granules(area->start(), size);
- }
-
- static void shrink_from_back_callback(const XMemory* area, size_t size) {
- assert(is_aligned(size, XGranuleSize), "Must be granule aligned");
- // Don't try split the last granule - VirtualFree will fail
- split_into_placeholder_granules(area->end() - size, size - XGranuleSize);
- }
-
- static void grow_from_front_callback(const XMemory* area, size_t size) {
- assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned");
- coalesce_into_one_placeholder(area->start() - size, area->size() + size);
- }
-
- static void grow_from_back_callback(const XMemory* area, size_t size) {
- assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned");
- coalesce_into_one_placeholder(area->start(), area->size() + size);
- }
-
- static void register_with(XMemoryManager* manager) {
- // Each reserved virtual memory address area registered in _manager is
- // exactly covered by a single placeholder. Callbacks are installed so
- // that whenever a memory area changes, the corresponding placeholder
- // is adjusted.
- //
- // The create and grow callbacks are called when virtual memory is
- // returned to the memory manager. The new memory area is then covered
- // by a new single placeholder.
- //
- // The destroy and shrink callbacks are called when virtual memory is
- // allocated from the memory manager. The memory area is then is split
- // into granule-sized placeholders.
- //
- // See comment in zMapper_windows.cpp explaining why placeholders are
- // split into XGranuleSize sized placeholders.
-
- XMemoryManager::Callbacks callbacks;
-
- callbacks._create = &create_callback;
- callbacks._destroy = &destroy_callback;
- callbacks._shrink_from_front = &shrink_from_front_callback;
- callbacks._shrink_from_back = &shrink_from_back_callback;
- callbacks._grow_from_front = &grow_from_front_callback;
- callbacks._grow_from_back = &grow_from_back_callback;
-
- manager->register_callbacks(callbacks);
- }
- };
-
- virtual void initialize_after_reserve(XMemoryManager* manager) {
- PlaceholderCallbacks::register_with(manager);
- }
-
- virtual bool reserve(uintptr_t addr, size_t size) {
- const uintptr_t res = XMapper::reserve(addr, size);
-
- assert(res == addr || res == 0, "Should not reserve other memory than requested");
- return res == addr;
- }
-
- virtual void unreserve(uintptr_t addr, size_t size) {
- XMapper::unreserve(addr, size);
- }
-};
-
-// Implements Large Pages (locked) support using shared AWE physical memory.
-
-// XPhysicalMemory layer needs access to the section
-HANDLE XAWESection;
-
-class XVirtualMemoryManagerLargePages : public XVirtualMemoryManagerImpl {
-private:
- virtual void initialize_before_reserve() {
- XAWESection = XMapper::create_shared_awe_section();
- }
-
- virtual bool reserve(uintptr_t addr, size_t size) {
- const uintptr_t res = XMapper::reserve_for_shared_awe(XAWESection, addr, size);
-
- assert(res == addr || res == 0, "Should not reserve other memory than requested");
- return res == addr;
- }
-
- virtual void unreserve(uintptr_t addr, size_t size) {
- XMapper::unreserve_for_shared_awe(addr, size);
- }
-};
-
-static XVirtualMemoryManagerImpl* _impl = nullptr;
-
-void XVirtualMemoryManager::pd_initialize_before_reserve() {
- if (XLargePages::is_enabled()) {
- _impl = new XVirtualMemoryManagerLargePages();
- } else {
- _impl = new XVirtualMemoryManagerSmallPages();
- }
- _impl->initialize_before_reserve();
-}
-
-void XVirtualMemoryManager::pd_initialize_after_reserve() {
- _impl->initialize_after_reserve(&_manager);
-}
-
-bool XVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) {
- return _impl->reserve(addr, size);
-}
-
-void XVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) {
- _impl->unreserve(addr, size);
-}
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index 4dafef0c098..71efb57e0f2 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -3825,7 +3825,8 @@ bool os::pd_release_memory(char* addr, size_t bytes) {
if (err != nullptr) {
log_warning(os)("bad release: [" PTR_FORMAT "-" PTR_FORMAT "): %s", p2i(start), p2i(end), err);
#ifdef ASSERT
- os::print_memory_mappings((char*)start, bytes, tty);
+ fileStream fs(stdout);
+ os::print_memory_mappings((char*)start, bytes, &fs);
assert(false, "bad release: [" PTR_FORMAT "-" PTR_FORMAT "): %s", p2i(start), p2i(end), err);
#endif
return false;
diff --git a/src/hotspot/os/windows/perfMemory_windows.cpp b/src/hotspot/os/windows/perfMemory_windows.cpp
index 06b057315cb..959be982fab 100644
--- a/src/hotspot/os/windows/perfMemory_windows.cpp
+++ b/src/hotspot/os/windows/perfMemory_windows.cpp
@@ -1803,7 +1803,7 @@ void PerfMemory::detach(char* addr, size_t bytes) {
if (MemTracker::enabled()) {
// it does not go through os api, the operation has to record from here
- ThreadCritical tc;
+ NmtVirtualMemoryLocker ml;
remove_file_mapping(addr);
MemTracker::record_virtual_memory_release((address)addr, bytes);
} else {
diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
index b45d38650c0..f83aa603062 100644
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
@@ -462,12 +462,6 @@ void os::print_tos_pc(outputStream *st, const void *context) {
address pc = os::Posix::ucontext_get_pc(uc);
print_instructions(st, pc);
st->cr();
-
- // Try to decode the instructions.
- st->print_cr("Decoded instructions: (pc=" PTR_FORMAT ")", p2i(pc));
- st->print("");
- // TODO: PPC port Disassembler::decode(pc, 16, 16, st);
- st->cr();
}
void os::print_register_info(outputStream *st, const void *context, int& continuation) {
diff --git a/src/hotspot/os_cpu/linux_aarch64/gc/x/xSyscall_linux_aarch64.hpp b/src/hotspot/os_cpu/linux_aarch64/gc/x/xSyscall_linux_aarch64.hpp
deleted file mode 100644
index b4c49f477a6..00000000000
--- a/src/hotspot/os_cpu/linux_aarch64/gc/x/xSyscall_linux_aarch64.hpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_CPU_LINUX_AARCH64_GC_X_XSYSCALL_LINUX_AARCH64_HPP
-#define OS_CPU_LINUX_AARCH64_GC_X_XSYSCALL_LINUX_AARCH64_HPP
-
-#include
-
-//
-// Support for building on older Linux systems
-//
-
-#ifndef SYS_memfd_create
-#define SYS_memfd_create 279
-#endif
-#ifndef SYS_fallocate
-#define SYS_fallocate 47
-#endif
-
-#endif // OS_CPU_LINUX_AARCH64_GC_X_XSYSCALL_LINUX_AARCH64_HPP
diff --git a/src/hotspot/os_cpu/linux_ppc/gc/x/xSyscall_linux_ppc.hpp b/src/hotspot/os_cpu/linux_ppc/gc/x/xSyscall_linux_ppc.hpp
deleted file mode 100644
index 22d51cd58f5..00000000000
--- a/src/hotspot/os_cpu/linux_ppc/gc/x/xSyscall_linux_ppc.hpp
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2021 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_CPU_LINUX_PPC_GC_X_XSYSCALL_LINUX_PPC_HPP
-#define OS_CPU_LINUX_PPC_GC_X_XSYSCALL_LINUX_PPC_HPP
-
-#include
-
-//
-// Support for building on older Linux systems
-//
-
-
-#ifndef SYS_memfd_create
-#define SYS_memfd_create 360
-#endif
-#ifndef SYS_fallocate
-#define SYS_fallocate 309
-#endif
-
-#endif // OS_CPU_LINUX_PPC_GC_X_XSYSCALL_LINUX_PPC_HPP
diff --git a/src/hotspot/os_cpu/linux_riscv/gc/x/xSyscall_linux_riscv.hpp b/src/hotspot/os_cpu/linux_riscv/gc/x/xSyscall_linux_riscv.hpp
deleted file mode 100644
index bfd49b0bf4e..00000000000
--- a/src/hotspot/os_cpu/linux_riscv/gc/x/xSyscall_linux_riscv.hpp
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_RISCV_GC_X_XSYSCALL_LINUX_RISCV_HPP
-#define OS_CPU_LINUX_RISCV_GC_X_XSYSCALL_LINUX_RISCV_HPP
-
-#include
-
-//
-// Support for building on older Linux systems
-//
-
-#ifndef SYS_memfd_create
-#define SYS_memfd_create 279
-#endif
-#ifndef SYS_fallocate
-#define SYS_fallocate 47
-#endif
-
-#endif // OS_CPU_LINUX_RISCV_GC_X_XSYSCALL_LINUX_RISCV_HPP
diff --git a/src/hotspot/os_cpu/linux_riscv/riscv_hwprobe.cpp b/src/hotspot/os_cpu/linux_riscv/riscv_hwprobe.cpp
index df4a2e347cc..2020e2fdb24 100644
--- a/src/hotspot/os_cpu/linux_riscv/riscv_hwprobe.cpp
+++ b/src/hotspot/os_cpu/linux_riscv/riscv_hwprobe.cpp
@@ -74,7 +74,7 @@
#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28)
#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29)
#define RISCV_HWPROBE_EXT_ZVFH (1 << 30)
-#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31)
+#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31)
#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32)
#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)
@@ -178,6 +178,9 @@ void RiscvHwprobe::add_features_from_query_result() {
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZFH)) {
VM_Version::ext_Zfh.enable_feature();
}
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZVFH)) {
+ VM_Version::ext_Zvfh.enable_feature();
+ }
if (is_valid(RISCV_HWPROBE_KEY_CPUPERF_0)) {
VM_Version::unaligned_access.enable_feature(
query[RISCV_HWPROBE_KEY_CPUPERF_0].value & RISCV_HWPROBE_MISALIGNED_MASK);
diff --git a/src/hotspot/os_cpu/linux_x86/gc/x/xSyscall_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/gc/x/xSyscall_linux_x86.hpp
deleted file mode 100644
index 2709b373b28..00000000000
--- a/src/hotspot/os_cpu/linux_x86/gc/x/xSyscall_linux_x86.hpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_CPU_LINUX_X86_GC_X_XSYSCALL_LINUX_X86_HPP
-#define OS_CPU_LINUX_X86_GC_X_XSYSCALL_LINUX_X86_HPP
-
-#include
-
-//
-// Support for building on older Linux systems
-//
-
-#ifndef SYS_memfd_create
-#define SYS_memfd_create 319
-#endif
-#ifndef SYS_fallocate
-#define SYS_fallocate 285
-#endif
-
-#endif // OS_CPU_LINUX_X86_GC_X_XSYSCALL_LINUX_X86_HPP
diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
index 0d5d07fc8a8..8fdcbe63c7e 100644
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
@@ -544,6 +544,21 @@ void os::print_context(outputStream *st, const void *context) {
st->print(", ERR=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_ERR]);
st->cr();
st->print(" TRAPNO=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_TRAPNO]);
+ // Add XMM registers + MXCSR. Note that C2 uses XMM to spill GPR values including pointers.
+ st->cr();
+ st->cr();
+ // Sanity check: fpregs should point into the context.
+ if ((address)uc->uc_mcontext.fpregs < (address)uc ||
+ pointer_delta(uc->uc_mcontext.fpregs, uc, 1) >= sizeof(ucontext_t)) {
+ st->print_cr("bad uc->uc_mcontext.fpregs: " INTPTR_FORMAT " (uc: " INTPTR_FORMAT ")",
+ p2i(uc->uc_mcontext.fpregs), p2i(uc));
+ } else {
+ for (int i = 0; i < 16; ++i) {
+ const int64_t* xmm_val_addr = (int64_t*)&(uc->uc_mcontext.fpregs->_xmm[i]);
+ st->print_cr("XMM[%d]=" INTPTR_FORMAT " " INTPTR_FORMAT, i, xmm_val_addr[1], xmm_val_addr[0]);
+ }
+ st->print(" MXCSR=" UINT32_FORMAT_X_0, uc->uc_mcontext.fpregs->mxcsr);
+ }
#else
st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EAX]);
st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBX]);
diff --git a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
index 62022d780a2..de59a74cc24 100644
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
@@ -437,6 +437,15 @@ void os::print_context(outputStream *st, const void *context) {
st->cr();
st->print( "RIP=" INTPTR_FORMAT, uc->Rip);
st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
+ // Add XMM registers + MXCSR. Note that C2 uses XMM to spill GPR values including pointers.
+ st->cr();
+ st->cr();
+ for (int i = 0; i < 16; ++i) {
+ const uint64_t *xmm = ((const uint64_t*)&(uc->Xmm0)) + 2 * i;
+ st->print_cr("XMM[%d]=" INTPTR_FORMAT " " INTPTR_FORMAT,
+ i, xmm[1], xmm[0]);
+ }
+ st->print(" MXCSR=" UINT32_FORMAT_X_0, uc->MxCsr);
#else
st->print( "EAX=" INTPTR_FORMAT, uc->Eax);
st->print(", EBX=" INTPTR_FORMAT, uc->Ebx);
diff --git a/src/hotspot/share/adlc/formsopt.cpp b/src/hotspot/share/adlc/formsopt.cpp
index e1e4ed96c2e..5de8974e2c0 100644
--- a/src/hotspot/share/adlc/formsopt.cpp
+++ b/src/hotspot/share/adlc/formsopt.cpp
@@ -171,9 +171,13 @@ int RegisterForm::RegMask_Size() {
// on the stack (stack registers) up to some interesting limit. Methods
// that need more parameters will NOT be compiled. On Intel, the limit
// is something like 90+ parameters.
- // Add a few (3 words == 96 bits) for incoming & outgoing arguments to calls.
- // Round up to the next doubleword size.
- return (words_for_regs + 3 + 1) & ~1;
+ // - Add a few (3 words == 96 bits) for incoming & outgoing arguments to
+ // calls.
+ // - Round up to the next doubleword size.
+ // - Add one more word to accommodate a reasonable number of stack locations
+ // in the register mask regardless of how much slack is created by rounding.
+ // This was found necessary after adding 16 new registers for APX.
+ return (words_for_regs + 3 + 1 + 1) & ~1;
}
void RegisterForm::dump() { // Debug printer
diff --git a/src/hotspot/share/adlc/formssel.cpp b/src/hotspot/share/adlc/formssel.cpp
index e7dd00fa390..dfa414ef564 100644
--- a/src/hotspot/share/adlc/formssel.cpp
+++ b/src/hotspot/share/adlc/formssel.cpp
@@ -3957,15 +3957,15 @@ void MatchNode::count_commutative_op(int& count) {
"AndI","AndL",
"MaxI","MinI","MaxF","MinF","MaxD","MinD",
"MulI","MulL","MulF","MulD",
- "OrI","OrL",
- "XorI","XorL"
+ "OrI","OrL", "XorI","XorL",
+ "UMax","UMin"
};
static const char *commut_vector_op_list[] = {
"AddVB", "AddVS", "AddVI", "AddVL", "AddVF", "AddVD",
"MulVB", "MulVS", "MulVI", "MulVL", "MulVF", "MulVD",
"AndV", "OrV", "XorV",
- "MaxV", "MinV"
+ "MaxV", "MinV", "UMax","UMin"
};
if (_lChild && _rChild && (_lChild->_lChild || _rChild->_lChild)) {
@@ -4339,7 +4339,7 @@ bool MatchRule::is_vector() const {
"NegVF","NegVD","NegVI","NegVL",
"SqrtVD","SqrtVF",
"AndV" ,"XorV" ,"OrV",
- "MaxV", "MinV",
+ "MaxV", "MinV", "UMinV", "UMaxV",
"CompressV", "ExpandV", "CompressM", "CompressBitsV", "ExpandBitsV",
"AddReductionVI", "AddReductionVL",
"AddReductionVF", "AddReductionVD",
@@ -4362,7 +4362,7 @@ bool MatchRule::is_vector() const {
"VectorUCastB2X", "VectorUCastS2X", "VectorUCastI2X",
"VectorMaskWrapper","VectorMaskCmp","VectorReinterpret","LoadVectorMasked","StoreVectorMasked",
"FmaVD","FmaVF","PopCountVI","PopCountVL","PopulateIndex","VectorLongToMask",
- "CountLeadingZerosV", "CountTrailingZerosV", "SignumVF", "SignumVD",
+ "CountLeadingZerosV", "CountTrailingZerosV", "SignumVF", "SignumVD", "SaturatingAddV", "SaturatingSubV",
// Next are vector mask ops.
"MaskAll", "AndVMask", "OrVMask", "XorVMask", "VectorMaskCast",
"RoundVF", "RoundVD",
diff --git a/src/hotspot/share/cds/archiveHeapLoader.cpp b/src/hotspot/share/cds/archiveHeapLoader.cpp
index 0e7ef08064c..01831cf0f3e 100644
--- a/src/hotspot/share/cds/archiveHeapLoader.cpp
+++ b/src/hotspot/share/cds/archiveHeapLoader.cpp
@@ -142,15 +142,22 @@ class PatchCompressedEmbeddedPointersQuick: public BitMapClosure {
class PatchUncompressedEmbeddedPointers: public BitMapClosure {
oop* _start;
+ intptr_t _delta;
public:
- PatchUncompressedEmbeddedPointers(oop* start) : _start(start) {}
+ PatchUncompressedEmbeddedPointers(oop* start, intx runtime_offset) :
+ _start(start),
+ _delta(runtime_offset) {}
+
+ PatchUncompressedEmbeddedPointers(oop* start) :
+ _start(start),
+ _delta(ArchiveHeapLoader::mapped_heap_delta()) {}
bool do_bit(size_t offset) {
oop* p = _start + offset;
intptr_t dumptime_oop = (intptr_t)((void*)*p);
assert(dumptime_oop != 0, "null oops should have been filtered out at dump time");
- intptr_t runtime_oop = dumptime_oop + ArchiveHeapLoader::mapped_heap_delta();
+ intptr_t runtime_oop = dumptime_oop + _delta;
RawAccess::oop_store(p, cast_to_oop(runtime_oop));
return true;
}
@@ -221,10 +228,6 @@ void ArchiveHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loa
}
bool ArchiveHeapLoader::can_load() {
- if (!UseCompressedOops) {
- // Pointer relocation for uncompressed oops is unimplemented.
- return false;
- }
return Universe::heap()->can_load_archived_objects();
}
@@ -312,13 +315,18 @@ bool ArchiveHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiv
uintptr_t oopmap = bitmap_base + r->oopmap_offset();
BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
- PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region);
- bm.iterate(&patcher);
+ if (UseCompressedOops) {
+ PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region);
+ bm.iterate(&patcher);
+ } else {
+ PatchUncompressedEmbeddedPointers patcher((oop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region->_runtime_offset);
+ bm.iterate(&patcher);
+ }
return true;
}
bool ArchiveHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
- assert(UseCompressedOops, "loaded heap for !UseCompressedOops is unimplemented");
+ assert(can_load(), "loaded heap for must be supported");
init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
LoadedArchiveHeapRegion loaded_region;
@@ -358,8 +366,12 @@ class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
}
}
virtual void do_oop(oop* p) {
- // Uncompressed oops are not supported by loaded heaps.
- Unimplemented();
+ oop v = *p;
+ if(v != nullptr) {
+ uintptr_t u = cast_from_oop(v);
+ ArchiveHeapLoader::assert_in_loaded_heap(u);
+ guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
+ }
}
};
diff --git a/src/hotspot/share/cds/archiveHeapLoader.hpp b/src/hotspot/share/cds/archiveHeapLoader.hpp
index 700135a3816..8b9fab91aa3 100644
--- a/src/hotspot/share/cds/archiveHeapLoader.hpp
+++ b/src/hotspot/share/cds/archiveHeapLoader.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -146,6 +146,7 @@ class ArchiveHeapLoader : AllStatic {
inline static oop decode_from_archive_impl(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
class PatchLoadedRegionPointers;
+ class PatchUncompressedLoadedRegionPointers;
public:
diff --git a/src/hotspot/share/cds/cdsConfig.cpp b/src/hotspot/share/cds/cdsConfig.cpp
index 5915424c4fe..9e8a46e105e 100644
--- a/src/hotspot/share/cds/cdsConfig.cpp
+++ b/src/hotspot/share/cds/cdsConfig.cpp
@@ -236,7 +236,9 @@ void CDSConfig::init_shared_archive_paths() {
}
void CDSConfig::check_internal_module_property(const char* key, const char* value) {
- if (Arguments::is_internal_module_property(key) && !Arguments::is_module_path_property(key)) {
+ if (Arguments::is_internal_module_property(key) &&
+ !Arguments::is_module_path_property(key) &&
+ !Arguments::is_add_modules_property(key)) {
stop_using_optimized_module_handling();
log_info(cds)("optimized module handling: disabled due to incompatible property: %s=%s", key, value);
}
diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp
index 715fce5f3fc..33a81a81da0 100644
--- a/src/hotspot/share/cds/filemap.cpp
+++ b/src/hotspot/share/cds/filemap.cpp
@@ -934,7 +934,7 @@ void FileMapInfo::extract_module_paths(const char* runtime_path, GrowableArraysort(ClassLoaderExt::compare_module_path_by_name);
+ module_paths->sort(ClassLoaderExt::compare_module_names);
}
bool FileMapInfo::check_module_paths() {
@@ -2054,8 +2054,7 @@ void FileMapInfo::map_or_load_heap_region() {
success = ArchiveHeapLoader::load_heap_region(this);
} else {
if (!UseCompressedOops && !ArchiveHeapLoader::can_map()) {
- // TODO - remove implicit knowledge of G1
- log_info(cds)("Cannot use CDS heap data. UseG1GC is required for -XX:-UseCompressedOops");
+ log_info(cds)("Cannot use CDS heap data. Selected GC not compatible -XX:-UseCompressedOops");
} else {
log_info(cds)("Cannot use CDS heap data. UseEpsilonGC, UseG1GC, UseSerialGC, UseParallelGC, or UseShenandoahGC are required.");
}
@@ -2135,7 +2134,7 @@ address FileMapInfo::heap_region_requested_address() {
assert(CDSConfig::is_using_archive(), "runtime only");
FileMapRegion* r = region_at(MetaspaceShared::hp);
assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
- assert(ArchiveHeapLoader::can_map(), "cannot be used by ArchiveHeapLoader::can_load() mode");
+ assert(ArchiveHeapLoader::can_use(), "GC must support mapping or loading");
if (UseCompressedOops) {
// We can avoid relocation if each region's offset from the runtime CompressedOops::base()
// is the same as its offset from the CompressedOops::base() during dumptime.
diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp
index efd7a906a46..2a43fd2dd4f 100644
--- a/src/hotspot/share/cds/metaspaceShared.cpp
+++ b/src/hotspot/share/cds/metaspaceShared.cpp
@@ -403,6 +403,7 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
soc->do_tag(--tag);
CDS_JAVA_HEAP_ONLY(Modules::serialize(soc);)
+ CDS_JAVA_HEAP_ONLY(Modules::serialize_addmods_names(soc);)
CDS_JAVA_HEAP_ONLY(ClassLoaderDataShared::serialize(soc);)
LambdaFormInvokers::serialize(soc);
@@ -502,6 +503,8 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
LambdaFormInvokers::dump_static_archive_invokers();
// Write module name into archive
CDS_JAVA_HEAP_ONLY(Modules::dump_main_module_name();)
+ // Write module names from --add-modules into archive
+ CDS_JAVA_HEAP_ONLY(Modules::dump_addmods_names();)
// Write the other data to the output array.
DumpRegion* ro_region = ArchiveBuilder::current()->ro_region();
char* start = ro_region->top();
diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp
index 155ce032400..6bf8f44553d 100644
--- a/src/hotspot/share/ci/ciEnv.cpp
+++ b/src/hotspot/share/ci/ciEnv.cpp
@@ -1597,6 +1597,8 @@ void ciEnv::dump_replay_data_helper(outputStream* out) {
NoSafepointVerifier no_safepoint;
ResourceMark rm;
+ assert(this->task() != nullptr, "task must not be null");
+
dump_replay_data_version(out);
#if INCLUDE_JVMTI
out->print_cr("JvmtiExport can_access_local_variables %d", _jvmti_can_access_local_variables);
@@ -1617,9 +1619,7 @@ void ciEnv::dump_replay_data_helper(outputStream* out) {
objects->at(i)->dump_replay_data(out);
}
- if (this->task() != nullptr) {
- dump_compile_data(out);
- }
+ dump_compile_data(out);
out->flush();
}
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index dd7cecdd9c2..93dd0af65e7 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -926,6 +926,7 @@ class AnnotationCollector : public ResourceObj{
_method_ForceInline,
_method_DontInline,
_method_ChangesCurrentThread,
+ _method_JvmtiHideEvents,
_method_JvmtiMountTransition,
_method_InjectedProfile,
_method_LambdaForm_Compiled,
@@ -1830,6 +1831,11 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
if (!privileged) break; // only allow in privileged code
return _method_ChangesCurrentThread;
}
+ case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_JvmtiHideEvents_signature): {
+ if (_location != _in_method) break; // only allow for methods
+ if (!privileged) break; // only allow in privileged code
+ return _method_JvmtiHideEvents;
+ }
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_JvmtiMountTransition_signature): {
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
@@ -1917,6 +1923,8 @@ void MethodAnnotationCollector::apply_to(const methodHandle& m) {
m->set_dont_inline();
if (has_annotation(_method_ChangesCurrentThread))
m->set_changes_current_thread();
+ if (has_annotation(_method_JvmtiHideEvents))
+ m->set_jvmti_hide_events();
if (has_annotation(_method_JvmtiMountTransition))
m->set_jvmti_mount_transition();
if (has_annotation(_method_InjectedProfile))
diff --git a/src/hotspot/share/classfile/classLoaderExt.cpp b/src/hotspot/share/classfile/classLoaderExt.cpp
index 16981669deb..b9e420899c2 100644
--- a/src/hotspot/share/classfile/classLoaderExt.cpp
+++ b/src/hotspot/share/classfile/classLoaderExt.cpp
@@ -90,7 +90,7 @@ void ClassLoaderExt::setup_app_search_path(JavaThread* current) {
os::free(app_class_path);
}
-int ClassLoaderExt::compare_module_path_by_name(const char** p1, const char** p2) {
+int ClassLoaderExt::compare_module_names(const char** p1, const char** p2) {
return strcmp(*p1, *p2);
}
@@ -121,7 +121,7 @@ void ClassLoaderExt::process_module_table(JavaThread* current, ModuleEntryTable*
// Sort the module paths before storing into CDS archive for simpler
// checking at runtime.
- module_paths->sort(compare_module_path_by_name);
+ module_paths->sort(compare_module_names);
for (int i = 0; i < module_paths->length(); i++) {
ClassLoader::setup_module_search_path(current, module_paths->at(i));
diff --git a/src/hotspot/share/classfile/classLoaderExt.hpp b/src/hotspot/share/classfile/classLoaderExt.hpp
index c3c0b00d55e..1f1b38cd312 100644
--- a/src/hotspot/share/classfile/classLoaderExt.hpp
+++ b/src/hotspot/share/classfile/classLoaderExt.hpp
@@ -72,7 +72,7 @@ class ClassLoaderExt: public ClassLoader { // AllStatic
static void setup_search_paths(JavaThread* current);
static void setup_module_paths(JavaThread* current);
static void extract_jar_files_from_path(const char* path, GrowableArray* module_paths);
- static int compare_module_path_by_name(const char** p1, const char** p2);
+ static int compare_module_names(const char** p1, const char** p2);
static char* read_manifest(JavaThread* current, ClassPathEntry* entry, jint *manifest_size) {
// Remove all the new-line continuations (which wrap long lines at 72 characters, see
diff --git a/src/hotspot/share/classfile/modules.cpp b/src/hotspot/share/classfile/modules.cpp
index dee67ce1dde..94e407d045f 100644
--- a/src/hotspot/share/classfile/modules.cpp
+++ b/src/hotspot/share/classfile/modules.cpp
@@ -30,6 +30,7 @@
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/classLoaderDataShared.hpp"
+#include "classfile/classLoaderExt.hpp"
#include "classfile/javaAssertions.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/javaClasses.inline.hpp"
@@ -560,6 +561,7 @@ void Modules::verify_archived_modules() {
}
char* Modules::_archived_main_module_name = nullptr;
+char* Modules::_archived_addmods_names = nullptr;
void Modules::dump_main_module_name() {
const char* module_name = Arguments::get_property("jdk.module.main");
@@ -600,6 +602,100 @@ void Modules::serialize(SerializeClosure* soc) {
}
}
+void Modules::dump_addmods_names() {
+ unsigned int count = Arguments::addmods_count();
+ const char* addmods_names = get_addmods_names_as_sorted_string();
+ if (addmods_names != nullptr) {
+ _archived_addmods_names = ArchiveBuilder::current()->ro_strdup(addmods_names);
+ }
+ ArchivePtrMarker::mark_pointer(&_archived_addmods_names);
+}
+
+void Modules::serialize_addmods_names(SerializeClosure* soc) {
+ soc->do_ptr(&_archived_addmods_names);
+ if (soc->reading()) {
+ bool disable = false;
+ if (_archived_addmods_names[0] != '\0') {
+ if (Arguments::addmods_count() == 0) {
+ log_info(cds)("--add-modules module name(s) found in archive but not specified during runtime: %s",
+ _archived_addmods_names);
+ disable = true;
+ } else {
+ const char* addmods_names = get_addmods_names_as_sorted_string();
+ if (strcmp((const char*)_archived_addmods_names, addmods_names) != 0) {
+ log_info(cds)("Mismatched --add-modules module name(s).");
+ log_info(cds)(" dump time: %s runtime: %s", _archived_addmods_names, addmods_names);
+ disable = true;
+ }
+ }
+ } else {
+ if (Arguments::addmods_count() > 0) {
+ log_info(cds)("--add-modules module name(s) specified during runtime but not found in archive: %s",
+ get_addmods_names_as_sorted_string());
+ disable = true;
+ }
+ }
+ if (disable) {
+ log_info(cds)("Disabling optimized module handling");
+ CDSConfig::stop_using_optimized_module_handling();
+ }
+ log_info(cds)("optimized module handling: %s", CDSConfig::is_using_optimized_module_handling() ? "enabled" : "disabled");
+ log_info(cds)("full module graph: %s", CDSConfig::is_using_full_module_graph() ? "enabled" : "disabled");
+ }
+}
+
+const char* Modules::get_addmods_names_as_sorted_string() {
+ ResourceMark rm;
+ const int max_digits = 3;
+ const int extra_symbols_count = 2; // includes '.', '\0'
+ size_t prop_len = strlen("jdk.module.addmods") + max_digits + extra_symbols_count;
+ char* prop_name = resource_allocate_bytes(prop_len);
+ GrowableArray list;
+ for (unsigned int i = 0; i < Arguments::addmods_count(); i++) {
+ jio_snprintf(prop_name, prop_len, "jdk.module.addmods.%d", i);
+ const char* prop_value = Arguments::get_property(prop_name);
+ char* p = resource_allocate_bytes(strlen(prop_value) + 1);
+ strcpy(p, prop_value);
+ while (*p == ',') p++; // skip leading commas
+ while (*p) {
+ char* next = strchr(p, ',');
+ if (next == nullptr) {
+ // no more commas, p is the last element
+ list.append(p);
+ break;
+ } else {
+ *next = 0;
+ list.append(p);
+ p = next + 1;
+ }
+ }
+ }
+
+ // Example:
+ // --add-modules=java.compiler --add-modules=java.base,java.base,,
+ //
+ // list[0] = "java.compiler"
+ // list[1] = "java.base"
+ // list[2] = "java.base"
+ // list[3] = ""
+ // list[4] = ""
+ list.sort(ClassLoaderExt::compare_module_names);
+
+ const char* prefix = "";
+ stringStream st;
+ const char* last_string = ""; // This also filters out all empty strings
+ for (int i = 0; i < list.length(); i++) {
+ const char* m = list.at(i);
+ if (strcmp(m, last_string) != 0) { // filter out duplicates
+ st.print("%s%s", prefix, m);
+ last_string = m;
+ prefix = "\n";
+ }
+ }
+
+ return (const char*)os::strdup(st.as_string()); // Example: "java.base,java.compiler"
+}
+
void Modules::define_archived_modules(Handle h_platform_loader, Handle h_system_loader, TRAPS) {
assert(CDSConfig::is_using_full_module_graph(), "must be");
diff --git a/src/hotspot/share/classfile/modules.hpp b/src/hotspot/share/classfile/modules.hpp
index 3866f0d6f9b..3ef6f57a57b 100644
--- a/src/hotspot/share/classfile/modules.hpp
+++ b/src/hotspot/share/classfile/modules.hpp
@@ -61,9 +61,13 @@ class Modules : AllStatic {
static void verify_archived_modules() NOT_CDS_JAVA_HEAP_RETURN;
static void dump_main_module_name() NOT_CDS_JAVA_HEAP_RETURN;
static void serialize(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
+ static void dump_addmods_names() NOT_CDS_JAVA_HEAP_RETURN;
+ static void serialize_addmods_names(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
+ static const char* get_addmods_names_as_sorted_string() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
#if INCLUDE_CDS_JAVA_HEAP
static char* _archived_main_module_name;
+ static char* _archived_addmods_names;
#endif
// Provides the java.lang.Module for the unnamed module defined
diff --git a/src/hotspot/share/classfile/vmIntrinsics.hpp b/src/hotspot/share/classfile/vmIntrinsics.hpp
index 54912a5ded7..68121c56c32 100644
--- a/src/hotspot/share/classfile/vmIntrinsics.hpp
+++ b/src/hotspot/share/classfile/vmIntrinsics.hpp
@@ -611,7 +611,6 @@ class methodHandle;
do_intrinsic(_notifyJvmtiVThreadEnd, java_lang_VirtualThread, notifyJvmtiEnd_name, void_method_signature, F_RN) \
do_intrinsic(_notifyJvmtiVThreadMount, java_lang_VirtualThread, notifyJvmtiMount_name, bool_void_signature, F_RN) \
do_intrinsic(_notifyJvmtiVThreadUnmount, java_lang_VirtualThread, notifyJvmtiUnmount_name, bool_void_signature, F_RN) \
- do_intrinsic(_notifyJvmtiVThreadHideFrames, java_lang_VirtualThread, notifyJvmtiHideFrames_name, bool_void_signature, F_SN) \
do_intrinsic(_notifyJvmtiVThreadDisableSuspend, java_lang_VirtualThread, notifyJvmtiDisableSuspend_name, bool_void_signature, F_SN) \
\
/* support for UnsafeConstants */ \
diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp
index 58e1551e20c..d8018cd0c8a 100644
--- a/src/hotspot/share/classfile/vmSymbols.hpp
+++ b/src/hotspot/share/classfile/vmSymbols.hpp
@@ -306,6 +306,7 @@ class SerializeClosure;
template(jdk_internal_vm_annotation_Stable_signature, "Ljdk/internal/vm/annotation/Stable;") \
\
template(jdk_internal_vm_annotation_ChangesCurrentThread_signature, "Ljdk/internal/vm/annotation/ChangesCurrentThread;") \
+ template(jdk_internal_vm_annotation_JvmtiHideEvents_signature, "Ljdk/internal/vm/annotation/JvmtiHideEvents;") \
template(jdk_internal_vm_annotation_JvmtiMountTransition_signature, "Ljdk/internal/vm/annotation/JvmtiMountTransition;") \
\
/* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \
@@ -397,7 +398,6 @@ class SerializeClosure;
template(notifyJvmtiEnd_name, "notifyJvmtiEnd") \
template(notifyJvmtiMount_name, "notifyJvmtiMount") \
template(notifyJvmtiUnmount_name, "notifyJvmtiUnmount") \
- template(notifyJvmtiHideFrames_name, "notifyJvmtiHideFrames") \
template(notifyJvmtiDisableSuspend_name, "notifyJvmtiDisableSuspend") \
template(doYield_name, "doYield") \
template(enter_name, "enter") \
diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp
index f22d1c22ac8..b2b522c1435 100644
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2022, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -128,7 +128,7 @@ class EpsilonHeap : public CollectedHeap {
bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
// Support for loading objects from CDS archive into the heap
- bool can_load_archived_objects() const override { return UseCompressedOops; }
+ bool can_load_archived_objects() const override { return true; }
HeapWord* allocate_loaded_archive_space(size_t size) override;
void print_on(outputStream* st) const override;
diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp
index d90121b0eaf..01d4e6bb04d 100644
--- a/src/hotspot/share/gc/parallel/mutableSpace.cpp
+++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/parallel/mutableSpace.hpp"
#include "gc/shared/pretouchTask.hpp"
+#include "gc/shared/spaceDecorator.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
index a6601611c9b..22d1296507d 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
@@ -246,7 +246,7 @@ class ParallelScavengeHeap : public CollectedHeap {
}
// Support for loading objects from CDS archive into the heap
- bool can_load_archived_objects() const override { return UseCompressedOops; }
+ bool can_load_archived_objects() const override { return true; }
HeapWord* allocate_loaded_archive_space(size_t size) override;
void complete_loaded_archive_space(MemRegion archive_space) override;
diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp
index 2715ab90768..52ea4dc042c 100644
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp
@@ -31,6 +31,7 @@
#include "gc/parallel/psOldGen.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/gcLocker.hpp"
+#include "gc/shared/spaceDecorator.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
index 1ab7b2af7ed..66ce20295f5 100644
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
@@ -60,6 +60,7 @@
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
+#include "gc/shared/spaceDecorator.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp
index 7df2143ccbb..7701cea313b 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp
@@ -51,6 +51,7 @@
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
#include "gc/shared/scavengableNMethods.hpp"
+#include "gc/shared/spaceDecorator.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
diff --git a/src/hotspot/share/gc/parallel/psYoungGen.cpp b/src/hotspot/share/gc/parallel/psYoungGen.cpp
index 7b0d3b21507..dd9619e4546 100644
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "gc/parallel/psYoungGen.hpp"
#include "gc/shared/gcUtil.hpp"
#include "gc/shared/genArguments.hpp"
+#include "gc/shared/spaceDecorator.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
diff --git a/src/hotspot/share/gc/serial/serialHeap.hpp b/src/hotspot/share/gc/serial/serialHeap.hpp
index 750bb322b2a..d787d216e37 100644
--- a/src/hotspot/share/gc/serial/serialHeap.hpp
+++ b/src/hotspot/share/gc/serial/serialHeap.hpp
@@ -291,7 +291,7 @@ class SerialHeap : public CollectedHeap {
void safepoint_synchronize_end() override;
// Support for loading objects from CDS archive into the heap
- bool can_load_archived_objects() const override { return UseCompressedOops; }
+ bool can_load_archived_objects() const override { return true; }
HeapWord* allocate_loaded_archive_space(size_t size) override;
void complete_loaded_archive_space(MemRegion archive_space) override;
diff --git a/src/hotspot/share/gc/shared/barrierSetConfig.hpp b/src/hotspot/share/gc/shared/barrierSetConfig.hpp
index 76681aa8986..368312af06b 100644
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp
@@ -33,7 +33,6 @@
EPSILONGC_ONLY(f(EpsilonBarrierSet)) \
G1GC_ONLY(f(G1BarrierSet)) \
SHENANDOAHGC_ONLY(f(ShenandoahBarrierSet)) \
- ZGC_ONLY(f(XBarrierSet)) \
ZGC_ONLY(f(ZBarrierSet))
#define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
diff --git a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp
index 9523428821b..001b5b00372 100644
--- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp
@@ -40,7 +40,6 @@
#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
#endif
#if INCLUDE_ZGC
-#include "gc/x/xBarrierSet.inline.hpp"
#include "gc/z/zBarrierSet.inline.hpp"
#endif
diff --git a/src/hotspot/share/gc/shared/gcConfig.cpp b/src/hotspot/share/gc/shared/gcConfig.cpp
index 506b368d6cf..8eb265b54d9 100644
--- a/src/hotspot/share/gc/shared/gcConfig.cpp
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp
@@ -44,7 +44,7 @@
#include "gc/shenandoah/shenandoahArguments.hpp"
#endif
#if INCLUDE_ZGC
-#include "gc/z/shared/zSharedArguments.hpp"
+#include "gc/z/zArguments.hpp"
#endif
struct IncludedGC {
@@ -62,7 +62,7 @@ struct IncludedGC {
PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
SERIALGC_ONLY(static SerialArguments serialArguments;)
SHENANDOAHGC_ONLY(static ShenandoahArguments shenandoahArguments;)
- ZGC_ONLY(static ZSharedArguments zArguments;)
+ ZGC_ONLY(static ZArguments zArguments;)
// Table of included GCs, for translating between command
// line flag, CollectedHeap::Name and GCArguments instance.
diff --git a/src/hotspot/share/gc/shared/gcConfiguration.cpp b/src/hotspot/share/gc/shared/gcConfiguration.cpp
index 2e8d3eb2a51..824e119e696 100644
--- a/src/hotspot/share/gc/shared/gcConfiguration.cpp
+++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp
@@ -43,11 +43,7 @@ GCName GCConfiguration::young_collector() const {
}
if (UseZGC) {
- if (ZGenerational) {
- return ZMinor;
- } else {
- return NA;
- }
+ return ZMinor;
}
if (UseShenandoahGC) {
@@ -66,12 +62,8 @@ GCName GCConfiguration::old_collector() const {
return ParallelOld;
}
- if (UseZGC) {
- if (ZGenerational) {
- return ZMajor;
- } else {
- return Z;
- }
+if (UseZGC) {
+ return ZMajor;
}
if (UseShenandoahGC) {
diff --git a/src/hotspot/share/gc/shared/gcName.hpp b/src/hotspot/share/gc/shared/gcName.hpp
index 3d2dd350ac1..b9b87c231ca 100644
--- a/src/hotspot/share/gc/shared/gcName.hpp
+++ b/src/hotspot/share/gc/shared/gcName.hpp
@@ -37,7 +37,6 @@ enum GCName {
G1Full,
ZMinor,
ZMajor,
- Z, // Support for the legacy, single-gen mode
Shenandoah,
NA,
GCNameEndSentinel
@@ -56,7 +55,6 @@ class GCNameHelper {
case G1Full: return "G1Full";
case ZMinor: return "ZGC Minor";
case ZMajor: return "ZGC Major";
- case Z: return "Z";
case Shenandoah: return "Shenandoah";
case NA: return "N/A";
default: ShouldNotReachHere(); return nullptr;
diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp
index 34bc638c9ba..9086c25ee48 100644
--- a/src/hotspot/share/gc/shared/gc_globals.hpp
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp
@@ -43,7 +43,7 @@
#include "gc/shenandoah/shenandoah_globals.hpp"
#endif
#if INCLUDE_ZGC
-#include "gc/z/shared/z_shared_globals.hpp"
+#include "gc/z/z_globals.hpp"
#endif
#define GC_FLAGS(develop, \
@@ -93,7 +93,7 @@
range, \
constraint)) \
\
- ZGC_ONLY(GC_Z_SHARED_FLAGS( \
+ ZGC_ONLY(GC_Z_FLAGS( \
develop, \
develop_pd, \
product, \
@@ -118,9 +118,6 @@
product(bool, UseZGC, false, \
"Use the Z garbage collector") \
\
- product(bool, ZGenerational, true, \
- "Use the generational version of ZGC") \
- \
product(bool, UseShenandoahGC, false, \
"Use the Shenandoah garbage collector") \
\
diff --git a/src/hotspot/share/gc/shared/vmStructs_gc.hpp b/src/hotspot/share/gc/shared/vmStructs_gc.hpp
index c4acb0b1ed6..bba9c9e099f 100644
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp
@@ -46,7 +46,7 @@
#include "gc/shenandoah/vmStructs_shenandoah.hpp"
#endif
#if INCLUDE_ZGC
-#include "gc/z/shared/vmStructs_z_shared.hpp"
+#include "gc/z/vmStructs_z.hpp"
#endif
#define VM_STRUCTS_GC(nonstatic_field, \
@@ -69,7 +69,7 @@
SHENANDOAHGC_ONLY(VM_STRUCTS_SHENANDOAH(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
- ZGC_ONLY(VM_STRUCTS_Z_SHARED(nonstatic_field, \
+ ZGC_ONLY(VM_STRUCTS_Z(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
\
@@ -91,6 +91,7 @@
nonstatic_field(CardTableBarrierSet, _defer_initial_card_mark, bool) \
nonstatic_field(CardTableBarrierSet, _card_table, CardTable*) \
\
+ static_field(CollectedHeap, _lab_alignment_reserve, size_t) \
nonstatic_field(CollectedHeap, _reserved, MemRegion) \
nonstatic_field(CollectedHeap, _is_stw_gc_active, bool) \
nonstatic_field(CollectedHeap, _total_collections, unsigned int) \
@@ -120,7 +121,7 @@
SHENANDOAHGC_ONLY(VM_TYPES_SHENANDOAH(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
- ZGC_ONLY(VM_TYPES_Z_SHARED(declare_type, \
+ ZGC_ONLY(VM_TYPES_Z(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
\
@@ -174,7 +175,7 @@
declare_constant_with_value)) \
SHENANDOAHGC_ONLY(VM_INT_CONSTANTS_SHENANDOAH(declare_constant, \
declare_constant_with_value)) \
- ZGC_ONLY(VM_INT_CONSTANTS_Z_SHARED(declare_constant, \
+ ZGC_ONLY(VM_INT_CONSTANTS_Z(declare_constant, \
declare_constant_with_value)) \
\
/********************************************/ \
@@ -198,6 +199,6 @@
declare_constant(CollectedHeap::G1) \
#define VM_LONG_CONSTANTS_GC(declare_constant) \
- ZGC_ONLY(VM_LONG_CONSTANTS_Z_SHARED(declare_constant))
+ ZGC_ONLY(VM_LONG_CONSTANTS_Z(declare_constant))
#endif // SHARE_GC_SHARED_VMSTRUCTS_GC_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp b/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp
index 2291015edc6..58527e808e4 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp
@@ -25,18 +25,78 @@
#define SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP
#include "code/nmethod.hpp"
+#include "gc/shared/stringdedup/stringDedup.hpp"
+#include "gc/shenandoah/shenandoahGenerationType.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
#include "memory/iterator.hpp"
-#include "oops/accessDecorators.hpp"
-#include "runtime/handshake.hpp"
+#include "runtime/javaThread.hpp"
class BarrierSetNMethod;
class ShenandoahBarrierSet;
class ShenandoahHeap;
class ShenandoahMarkingContext;
-class ShenandoahHeapRegionSet;
-class Thread;
+class ShenandoahReferenceProcessor;
-class ShenandoahForwardedIsAliveClosure: public BoolObjectClosure {
+//
+// ========= Super
+//
+
+class ShenandoahSuperClosure : public MetadataVisitingOopIterateClosure {
+protected:
+ ShenandoahHeap* const _heap;
+
+public:
+ inline ShenandoahSuperClosure();
+ inline ShenandoahSuperClosure(ShenandoahReferenceProcessor* rp);
+ inline void do_nmethod(nmethod* nm);
+};
+
+//
+// ========= Marking
+//
+
+class ShenandoahMarkRefsSuperClosure : public ShenandoahSuperClosure {
+private:
+ ShenandoahObjToScanQueue* _queue;
+ ShenandoahMarkingContext* const _mark_context;
+ bool _weak;
+
+protected:
+ template
+ void work(T *p);
+
+public:
+ inline ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp);
+
+ bool is_weak() const {
+ return _weak;
+ }
+
+ void set_weak(bool weak) {
+ _weak = weak;
+ }
+
+ virtual void do_nmethod(nmethod* nm) {
+ assert(!is_weak(), "Can't handle weak marking of nmethods");
+ ShenandoahSuperClosure::do_nmethod(nm);
+ }
+};
+
+template
+class ShenandoahMarkRefsClosure : public ShenandoahMarkRefsSuperClosure {
+private:
+ template
+ inline void do_oop_work(T* p) { work(p); }
+
+public:
+ ShenandoahMarkRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
+ ShenandoahMarkRefsSuperClosure(q, rp) {};
+
+ virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+ virtual void do_oop(oop* p) { do_oop_work(p); }
+};
+
+class ShenandoahForwardedIsAliveClosure : public BoolObjectClosure {
private:
ShenandoahMarkingContext* const _mark_context;
public:
@@ -44,7 +104,7 @@ class ShenandoahForwardedIsAliveClosure: public BoolObjectClosure {
inline bool do_object_b(oop obj);
};
-class ShenandoahIsAliveClosure: public BoolObjectClosure {
+class ShenandoahIsAliveClosure : public BoolObjectClosure {
private:
ShenandoahMarkingContext* const _mark_context;
public:
@@ -63,27 +123,29 @@ class ShenandoahIsAliveSelector : public StackObj {
class ShenandoahKeepAliveClosure : public OopClosure {
private:
ShenandoahBarrierSet* const _bs;
-public:
- inline ShenandoahKeepAliveClosure();
- inline void do_oop(oop* p);
- inline void do_oop(narrowOop* p);
-private:
template
void do_oop_work(T* p);
-};
-class ShenandoahOopClosureBase : public MetadataVisitingOopIterateClosure {
public:
- inline void do_nmethod(nmethod* nm);
+ inline ShenandoahKeepAliveClosure();
+ inline void do_oop(oop* p) { do_oop_work(p); }
+ inline void do_oop(narrowOop* p) { do_oop_work(p); }
};
-template
-class ShenandoahEvacuateUpdateRootClosureBase : public ShenandoahOopClosureBase {
+
+//
+// ========= Evacuating + Roots
+//
+
+template
+class ShenandoahEvacuateUpdateRootClosureBase : public ShenandoahSuperClosure {
protected:
- ShenandoahHeap* const _heap;
Thread* const _thread;
public:
- inline ShenandoahEvacuateUpdateRootClosureBase();
+ inline ShenandoahEvacuateUpdateRootClosureBase() :
+ ShenandoahSuperClosure(),
+ _thread(STABLE_THREAD ? Thread::current() : nullptr) {}
+
inline void do_oop(oop* p);
inline void do_oop(narrowOop* p);
protected:
@@ -91,10 +153,11 @@ class ShenandoahEvacuateUpdateRootClosureBase : public ShenandoahOopClosureBase
inline void do_oop_work(T* p);
};
-using ShenandoahEvacuateUpdateMetadataClosure = ShenandoahEvacuateUpdateRootClosureBase;
-using ShenandoahEvacuateUpdateRootsClosure = ShenandoahEvacuateUpdateRootClosureBase;
+using ShenandoahEvacuateUpdateMetadataClosure = ShenandoahEvacuateUpdateRootClosureBase;
+using ShenandoahEvacuateUpdateRootsClosure = ShenandoahEvacuateUpdateRootClosureBase;
using ShenandoahContextEvacuateUpdateRootsClosure = ShenandoahEvacuateUpdateRootClosureBase;
+
template
class ShenandoahCleanUpdateWeakOopsClosure : public OopClosure {
private:
@@ -107,7 +170,7 @@ class ShenandoahCleanUpdateWeakOopsClosure : public OopClosure {
inline void do_oop(narrowOop* p);
};
-class ShenandoahNMethodAndDisarmClosure: public NMethodToOopClosure {
+class ShenandoahNMethodAndDisarmClosure : public NMethodToOopClosure {
private:
BarrierSetNMethod* const _bs;
@@ -116,6 +179,51 @@ class ShenandoahNMethodAndDisarmClosure: public NMethodToOopClosure {
inline void do_nmethod(nmethod* nm);
};
+
+//
+// ========= Update References
+//
+
+template
+class ShenandoahMarkUpdateRefsClosure : public ShenandoahMarkRefsSuperClosure {
+private:
+ template
+ inline void work(T* p);
+
+public:
+ ShenandoahMarkUpdateRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp);
+
+ virtual void do_oop(narrowOop* p) { work(p); }
+ virtual void do_oop(oop* p) { work(p); }
+};
+
+class ShenandoahUpdateRefsSuperClosure : public ShenandoahSuperClosure {};
+
+class ShenandoahNonConcUpdateRefsClosure : public ShenandoahUpdateRefsSuperClosure {
+private:
+ template
+ inline void work(T* p);
+
+public:
+ virtual void do_oop(narrowOop* p) { work(p); }
+ virtual void do_oop(oop* p) { work(p); }
+};
+
+class ShenandoahConcUpdateRefsClosure : public ShenandoahUpdateRefsSuperClosure {
+private:
+ template
+ inline void work(T* p);
+
+public:
+ virtual void do_oop(narrowOop* p) { work(p); }
+ virtual void do_oop(oop* p) { work(p); }
+};
+
+
+//
+// ========= Utilities
+//
+
#ifdef ASSERT
class ShenandoahAssertNotForwardedClosure : public OopClosure {
private:
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp
index 53921be8d20..edfb62b4046 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp
@@ -33,15 +33,46 @@
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahNMethod.inline.hpp"
+#include "gc/shenandoah/shenandoahMark.inline.hpp"
+#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/javaThread.hpp"
-ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
- _mark_context(ShenandoahHeap::heap()->marking_context()) {
+//
+// ========= Super
+//
+
+ShenandoahSuperClosure::ShenandoahSuperClosure() :
+ MetadataVisitingOopIterateClosure(), _heap(ShenandoahHeap::heap()) {}
+
+ShenandoahSuperClosure::ShenandoahSuperClosure(ShenandoahReferenceProcessor* rp) :
+ MetadataVisitingOopIterateClosure(rp), _heap(ShenandoahHeap::heap()) {}
+
+void ShenandoahSuperClosure::do_nmethod(nmethod* nm) {
+ nm->run_nmethod_entry_barrier();
+}
+
+//
+// ========= Marking
+//
+
+ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
+ ShenandoahSuperClosure(rp),
+ _queue(q),
+ _mark_context(ShenandoahHeap::heap()->marking_context()),
+ _weak(false) {}
+
+template
+inline void ShenandoahMarkRefsSuperClosure::work(T* p) {
+ ShenandoahMark::mark_through_ref(p, _queue, _mark_context, _weak);
}
+ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
+ _mark_context(ShenandoahHeap::heap()->marking_context()) {}
+
bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
if (CompressedOops::is_null(obj)) {
return false;
@@ -52,8 +83,7 @@ bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
}
ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
- _mark_context(ShenandoahHeap::heap()->marking_context()) {
-}
+ _mark_context(ShenandoahHeap::heap()->marking_context()) {}
bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
if (CompressedOops::is_null(obj)) {
@@ -69,21 +99,8 @@ BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() {
reinterpret_cast(&_alive_cl);
}
-void ShenandoahOopClosureBase::do_nmethod(nmethod* nm) {
- nm->run_nmethod_entry_barrier();
-}
-
ShenandoahKeepAliveClosure::ShenandoahKeepAliveClosure() :
- _bs(ShenandoahBarrierSet::barrier_set()) {
-}
-
-void ShenandoahKeepAliveClosure::do_oop(oop* p) {
- do_oop_work(p);
-}
-
-void ShenandoahKeepAliveClosure::do_oop(narrowOop* p) {
- do_oop_work(p);
-}
+ _bs(ShenandoahBarrierSet::barrier_set()) {}
template
void ShenandoahKeepAliveClosure::do_oop_work(T* p) {
@@ -97,14 +114,14 @@ void ShenandoahKeepAliveClosure::do_oop_work(T* p) {
}
}
-template
-ShenandoahEvacuateUpdateRootClosureBase::ShenandoahEvacuateUpdateRootClosureBase() :
- _heap(ShenandoahHeap::heap()), _thread(stable_thread ? Thread::current() : nullptr) {
-}
-template
-void ShenandoahEvacuateUpdateRootClosureBase::do_oop(oop* p) {
- if (concurrent) {
+//
+// ========= Evacuating + Roots
+//
+
+template
+void ShenandoahEvacuateUpdateRootClosureBase::do_oop(oop* p) {
+ if (CONCURRENT) {
ShenandoahEvacOOMScope scope;
do_oop_work(p);
} else {
@@ -112,9 +129,9 @@ void ShenandoahEvacuateUpdateRootClosureBase::do_oop(
}
}
-template
-void ShenandoahEvacuateUpdateRootClosureBase::do_oop(narrowOop* p) {
- if (concurrent) {
+template
+void ShenandoahEvacuateUpdateRootClosureBase::do_oop(narrowOop* p) {
+ if (CONCURRENT) {
ShenandoahEvacOOMScope scope;
do_oop_work(p);
} else {
@@ -122,9 +139,9 @@ void ShenandoahEvacuateUpdateRootClosureBase::do_oop(
}
}
-template
+template
template
-void ShenandoahEvacuateUpdateRootClosureBase::do_oop_work(T* p) {
+void ShenandoahEvacuateUpdateRootClosureBase::do_oop_work(T* p) {
assert(_heap->is_concurrent_weak_root_in_progress() ||
_heap->is_concurrent_strong_root_in_progress(),
"Only do this in root processing phase");
@@ -137,12 +154,12 @@ void ShenandoahEvacuateUpdateRootClosureBase::do_oop_work
shenandoah_assert_marked(p, obj);
oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
if (resolved == obj) {
- Thread* thr = stable_thread ? _thread : Thread::current();
+ Thread* thr = STABLE_THREAD ? _thread : Thread::current();
assert(thr == Thread::current(), "Wrong thread");
resolved = _heap->evacuate_object(obj, thr);
}
- if (atomic) {
+ if (CONCURRENT) {
ShenandoahHeap::atomic_update_oop(resolved, p, o);
} else {
RawAccess::oop_store(p, resolved);
@@ -192,6 +209,42 @@ void ShenandoahNMethodAndDisarmClosure::do_nmethod(nmethod* nm) {
_bs->disarm(nm);
}
+
+//
+// ========= Update References
+//
+
+template
+ShenandoahMarkUpdateRefsClosure::ShenandoahMarkUpdateRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
+ ShenandoahMarkRefsSuperClosure(q, rp) {
+ assert(_heap->is_stw_gc_in_progress(), "Can only be used for STW GC");
+}
+
+template
+template
+inline void ShenandoahMarkUpdateRefsClosure::work(T* p) {
+ // Update the location
+ _heap->non_conc_update_with_forwarded(p);
+
+ // ...then do the usual thing
+ ShenandoahMarkRefsSuperClosure::work(p);
+}
+
+template
+inline void ShenandoahNonConcUpdateRefsClosure::work(T* p) {
+ _heap->non_conc_update_with_forwarded(p);
+}
+
+template
+inline void ShenandoahConcUpdateRefsClosure::work(T* p) {
+ _heap->conc_update_with_forwarded(p);
+}
+
+
+//
+// ========= Utilities
+//
+
#ifdef ASSERT
template
void ShenandoahAssertNotForwardedClosure::do_oop_work(T* p) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
index 6da2d2e83f7..d801dda372e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
@@ -29,13 +29,13 @@
#include "gc/shared/collectorCounters.hpp"
#include "gc/shared/continuationGCSupport.inline.hpp"
#include "gc/shenandoah/shenandoahBreakpoint.hpp"
+#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahConcurrentGC.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahLock.hpp"
#include "gc/shenandoah/shenandoahMark.inline.hpp"
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
-#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
index 75cdb99e177..0e2ef4144ad 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
@@ -34,7 +34,6 @@
#include "gc/shenandoah/shenandoahMark.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
-#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahStringDedup.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
index 81d07a414cd..2249c38455f 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
@@ -32,7 +32,6 @@
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahMetrics.hpp"
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
-#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahSTWMark.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
index de7d81d0f43..c2d94353d54 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
@@ -31,6 +31,7 @@
#include "gc/shared/tlab_globals.hpp"
#include "gc/shared/workerThread.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahConcurrentGC.hpp"
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
@@ -44,7 +45,6 @@
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
#include "gc/shenandoah/shenandoahMetrics.hpp"
-#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahSTWMark.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp
index 6195f445f7b..719abde4b16 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp
@@ -29,7 +29,6 @@
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahGC.hpp"
#include "gc/shenandoah/shenandoahHeap.hpp"
-#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index e94f43b8886..07914947ead 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -53,7 +53,6 @@
#include "gc/shenandoah/shenandoahMemoryPool.hpp"
#include "gc/shenandoah/shenandoahMetrics.hpp"
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
-#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahPacer.inline.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index a3e0b9397da..7e616f925d0 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -543,7 +543,7 @@ class ShenandoahHeap : public CollectedHeap, public ShenandoahSpaceInfo {
// ---------- CDS archive support
- bool can_load_archived_objects() const override { return UseCompressedOops; }
+ bool can_load_archived_objects() const override { return true; }
HeapWord* allocate_loaded_archive_space(size_t size) override;
void complete_loaded_archive_space(MemRegion archive_space) override;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
index 4c45f4a7659..775b84a8966 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
@@ -28,19 +28,11 @@
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahMark.inline.hpp"
-#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahVerifier.hpp"
-ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
- MetadataVisitingOopIterateClosure(rp),
- _queue(q),
- _mark_context(ShenandoahHeap::heap()->marking_context()),
- _weak(false)
-{ }
-
ShenandoahMark::ShenandoahMark() :
_task_queues(ShenandoahHeap::heap()->marking_context()->task_queues()) {
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp
index 99995c469eb..2eca17bde27 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp
@@ -30,9 +30,9 @@
#include "gc/shared/continuationGCSupport.inline.hpp"
#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
+#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
-#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahStringDedup.inline.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
index 5eb0b277b5e..75687302ca5 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
@@ -28,7 +28,6 @@
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahNMethod.inline.hpp"
-#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/continuation.hpp"
#include "runtime/safepointVerifiers.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp
deleted file mode 100644
index a2869b6ead6..00000000000
--- a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP
-#define SHARE_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP
-
-#include "gc/shared/stringdedup/stringDedup.hpp"
-#include "gc/shenandoah/shenandoahClosures.inline.hpp"
-#include "gc/shenandoah/shenandoahGenerationType.hpp"
-#include "gc/shenandoah/shenandoahHeap.inline.hpp"
-#include "gc/shenandoah/shenandoahTaskqueue.hpp"
-#include "gc/shenandoah/shenandoahUtils.hpp"
-#include "memory/iterator.hpp"
-#include "runtime/javaThread.hpp"
-
-class ShenandoahMarkRefsSuperClosure : public MetadataVisitingOopIterateClosure {
-private:
- ShenandoahObjToScanQueue* _queue;
- ShenandoahMarkingContext* const _mark_context;
- bool _weak;
-
-protected:
- template
- void work(T *p);
-
-public:
- ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp);
-
- bool is_weak() const {
- return _weak;
- }
-
- void set_weak(bool weak) {
- _weak = weak;
- }
-
- virtual void do_nmethod(nmethod* nm) {
- assert(!is_weak(), "Can't handle weak marking of nmethods");
- nm->run_nmethod_entry_barrier();
- }
-};
-
-template
-class ShenandoahMarkUpdateRefsClosure : public ShenandoahMarkRefsSuperClosure {
-private:
- ShenandoahHeap* const _heap;
-
- template
- inline void work(T* p);
-
-public:
- ShenandoahMarkUpdateRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
- ShenandoahMarkRefsSuperClosure(q, rp),
- _heap(ShenandoahHeap::heap()) {
- assert(_heap->is_stw_gc_in_progress(), "Can only be used for STW GC");
- }
-
- virtual void do_oop(narrowOop* p) { work(p); }
- virtual void do_oop(oop* p) { work(p); }
-};
-
-template
-class ShenandoahMarkRefsClosure : public ShenandoahMarkRefsSuperClosure {
-private:
- template
- inline void do_oop_work(T* p) { work(p); }
-
-public:
- ShenandoahMarkRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
- ShenandoahMarkRefsSuperClosure(q, rp) {};
-
- virtual void do_oop(narrowOop* p) { do_oop_work(p); }
- virtual void do_oop(oop* p) { do_oop_work(p); }
-};
-
-class ShenandoahUpdateRefsSuperClosure : public ShenandoahOopClosureBase {
-protected:
- ShenandoahHeap* _heap;
-
-public:
- ShenandoahUpdateRefsSuperClosure() : _heap(ShenandoahHeap::heap()) {}
-};
-
-class ShenandoahNonConcUpdateRefsClosure : public ShenandoahUpdateRefsSuperClosure {
-private:
- template
- inline void work(T* p);
-
-public:
- ShenandoahNonConcUpdateRefsClosure() : ShenandoahUpdateRefsSuperClosure() {}
-
- virtual void do_oop(narrowOop* p) { work(p); }
- virtual void do_oop(oop* p) { work(p); }
-};
-
-class ShenandoahConcUpdateRefsClosure : public ShenandoahUpdateRefsSuperClosure {
-private:
- template
- inline void work(T* p);
-
-public:
- ShenandoahConcUpdateRefsClosure() : ShenandoahUpdateRefsSuperClosure() {}
-
- virtual void do_oop(narrowOop* p) { work(p); }
- virtual void do_oop(oop* p) { work(p); }
-};
-
-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp
deleted file mode 100644
index e614893aab9..00000000000
--- a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP
-#define SHARE_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP
-
-#include "gc/shenandoah/shenandoahOopClosures.hpp"
-
-#include "gc/shenandoah/shenandoahHeap.inline.hpp"
-#include "gc/shenandoah/shenandoahMark.inline.hpp"
-
-template
-inline void ShenandoahMarkRefsSuperClosure::work(T* p) {
- ShenandoahMark::mark_through_ref(p, _queue, _mark_context, _weak);
-}
-
-template
-template
-inline void ShenandoahMarkUpdateRefsClosure::work(T* p) {
- // Update the location
- _heap->non_conc_update_with_forwarded(p);
-
- // ...then do the usual thing
- ShenandoahMarkRefsSuperClosure::work(p);
-}
-
-template
-inline void ShenandoahNonConcUpdateRefsClosure::work(T* p) {
- _heap->non_conc_update_with_forwarded(p);
-}
-
-template
-inline void ShenandoahConcUpdateRefsClosure::work(T* p) {
- _heap->conc_update_with_forwarded(p);
-}
-
-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
index 42c8d0ad271..fe1d6d69bd8 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
@@ -26,7 +26,7 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "gc/shared/workerThread.hpp"
-#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
+#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp
index 83e897994cb..2e581d918d4 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp
@@ -31,7 +31,6 @@
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahGenerationType.hpp"
#include "gc/shenandoah/shenandoahMark.inline.hpp"
-#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahSTWMark.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
index 9d2782502fe..2dd6018ecd4 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
@@ -28,8 +28,6 @@
#include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
#include "gc/shenandoah/shenandoahFullGC.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
-#include "gc/shenandoah/shenandoahMark.inline.hpp"
-#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahVMOperations.hpp"
#include "interpreter/oopMapCache.hpp"
diff --git a/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp b/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp
deleted file mode 100644
index 6f64392cefc..00000000000
--- a/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "c1/c1_LIR.hpp"
-#include "c1/c1_LIRGenerator.hpp"
-#include "c1/c1_CodeStubs.hpp"
-#include "gc/x/c1/xBarrierSetC1.hpp"
-#include "gc/x/xBarrierSet.hpp"
-#include "gc/x/xBarrierSetAssembler.hpp"
-#include "gc/x/xThreadLocalData.hpp"
-#include "utilities/macros.hpp"
-
-XLoadBarrierStubC1::XLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub) :
- _decorators(access.decorators()),
- _ref_addr(access.resolved_addr()),
- _ref(ref),
- _tmp(LIR_OprFact::illegalOpr),
- _runtime_stub(runtime_stub) {
-
- assert(_ref_addr->is_address(), "Must be an address");
- assert(_ref->is_register(), "Must be a register");
-
- // Allocate tmp register if needed
- if (_ref_addr->as_address_ptr()->index()->is_valid() ||
- _ref_addr->as_address_ptr()->disp() != 0) {
- // Has index or displacement, need tmp register to load address into
- _tmp = access.gen()->new_pointer_register();
- }
-
- FrameMap* f = Compilation::current()->frame_map();
- f->update_reserved_argument_area_size(2 * BytesPerWord);
-}
-
-DecoratorSet XLoadBarrierStubC1::decorators() const {
- return _decorators;
-}
-
-LIR_Opr XLoadBarrierStubC1::ref() const {
- return _ref;
-}
-
-LIR_Opr XLoadBarrierStubC1::ref_addr() const {
- return _ref_addr;
-}
-
-LIR_Opr XLoadBarrierStubC1::tmp() const {
- return _tmp;
-}
-
-address XLoadBarrierStubC1::runtime_stub() const {
- return _runtime_stub;
-}
-
-void XLoadBarrierStubC1::visit(LIR_OpVisitState* visitor) {
- visitor->do_slow_case();
- visitor->do_input(_ref_addr);
- visitor->do_output(_ref);
- if (_tmp->is_valid()) {
- visitor->do_temp(_tmp);
- }
-}
-
-void XLoadBarrierStubC1::emit_code(LIR_Assembler* ce) {
- XBarrierSet::assembler()->generate_c1_load_barrier_stub(ce, this);
-}
-
-#ifndef PRODUCT
-void XLoadBarrierStubC1::print_name(outputStream* out) const {
- out->print("XLoadBarrierStubC1");
-}
-#endif // PRODUCT
-
-class LIR_OpXLoadBarrierTest : public LIR_Op {
-private:
- LIR_Opr _opr;
-
-public:
- LIR_OpXLoadBarrierTest(LIR_Opr opr) :
- LIR_Op(lir_xloadbarrier_test, LIR_OprFact::illegalOpr, nullptr),
- _opr(opr) {}
-
- virtual void visit(LIR_OpVisitState* state) {
- state->do_input(_opr);
- }
-
- virtual void emit_code(LIR_Assembler* ce) {
- XBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr);
- }
-
- virtual void print_instr(outputStream* out) const {
- _opr->print(out);
- out->print(" ");
- }
-
-#ifndef PRODUCT
- virtual const char* name() const {
- return "lir_z_load_barrier_test";
- }
-#endif // PRODUCT
-};
-
-static bool barrier_needed(LIRAccess& access) {
- return XBarrierSet::barrier_needed(access.decorators(), access.type());
-}
-
-XBarrierSetC1::XBarrierSetC1() :
- _load_barrier_on_oop_field_preloaded_runtime_stub(nullptr),
- _load_barrier_on_weak_oop_field_preloaded_runtime_stub(nullptr) {}
-
-address XBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const {
- assert((decorators & ON_PHANTOM_OOP_REF) == 0, "Unsupported decorator");
- //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unsupported decorator");
-
- if ((decorators & ON_WEAK_OOP_REF) != 0) {
- return _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
- } else {
- return _load_barrier_on_oop_field_preloaded_runtime_stub;
- }
-}
-
-#ifdef ASSERT
-#define __ access.gen()->lir(__FILE__, __LINE__)->
-#else
-#define __ access.gen()->lir()->
-#endif
-
-void XBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const {
- // Fast path
- __ append(new LIR_OpXLoadBarrierTest(result));
-
- // Slow path
- const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators());
- CodeStub* const stub = new XLoadBarrierStubC1(access, result, runtime_stub);
- __ branch(lir_cond_notEqual, stub);
- __ branch_destination(stub->continuation());
-}
-
-LIR_Opr XBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
- // We must resolve in register when patching. This is to avoid
- // having a patch area in the load barrier stub, since the call
- // into the runtime to patch will not have the proper oop map.
- const bool patch_before_barrier = barrier_needed(access) && (access.decorators() & C1_NEEDS_PATCHING) != 0;
- return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier);
-}
-
-#undef __
-
-void XBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
- BarrierSetC1::load_at_resolved(access, result);
-
- if (barrier_needed(access)) {
- load_barrier(access, result);
- }
-}
-
-static void pre_load_barrier(LIRAccess& access) {
- DecoratorSet decorators = access.decorators();
-
- // Downgrade access to MO_UNORDERED
- decorators = (decorators & ~MO_DECORATOR_MASK) | MO_UNORDERED;
-
- // Remove ACCESS_WRITE
- decorators = (decorators & ~ACCESS_WRITE);
-
- // Generate synthetic load at
- access.gen()->access_load_at(decorators,
- access.type(),
- access.base().item(),
- access.offset().opr(),
- access.gen()->new_register(access.type()),
- nullptr /* patch_emit_info */,
- nullptr /* load_emit_info */);
-}
-
-LIR_Opr XBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
- if (barrier_needed(access)) {
- pre_load_barrier(access);
- }
-
- return BarrierSetC1::atomic_xchg_at_resolved(access, value);
-}
-
-LIR_Opr XBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
- if (barrier_needed(access)) {
- pre_load_barrier(access);
- }
-
- return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
-}
-
-class XLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure {
-private:
- const DecoratorSet _decorators;
-
-public:
- XLoadBarrierRuntimeStubCodeGenClosure(DecoratorSet decorators) :
- _decorators(decorators) {}
-
- virtual OopMapSet* generate_code(StubAssembler* sasm) {
- XBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators);
- return nullptr;
- }
-};
-
-static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) {
- XLoadBarrierRuntimeStubCodeGenClosure cl(decorators);
- CodeBlob* const code_blob = Runtime1::generate_blob(blob, C1StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl);
- return code_blob->code_begin();
-}
-
-void XBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* blob) {
- _load_barrier_on_oop_field_preloaded_runtime_stub =
- generate_c1_runtime_stub(blob, ON_STRONG_OOP_REF, "load_barrier_on_oop_field_preloaded_runtime_stub");
- _load_barrier_on_weak_oop_field_preloaded_runtime_stub =
- generate_c1_runtime_stub(blob, ON_WEAK_OOP_REF, "load_barrier_on_weak_oop_field_preloaded_runtime_stub");
-}
diff --git a/src/hotspot/share/gc/x/c1/xBarrierSetC1.hpp b/src/hotspot/share/gc/x/c1/xBarrierSetC1.hpp
deleted file mode 100644
index 26c2e142cdf..00000000000
--- a/src/hotspot/share/gc/x/c1/xBarrierSetC1.hpp
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_C1_XBARRIERSETC1_HPP
-#define SHARE_GC_X_C1_XBARRIERSETC1_HPP
-
-#include "c1/c1_CodeStubs.hpp"
-#include "c1/c1_IR.hpp"
-#include "c1/c1_LIR.hpp"
-#include "gc/shared/c1/barrierSetC1.hpp"
-#include "oops/accessDecorators.hpp"
-
-class XLoadBarrierStubC1 : public CodeStub {
-private:
- DecoratorSet _decorators;
- LIR_Opr _ref_addr;
- LIR_Opr _ref;
- LIR_Opr _tmp;
- address _runtime_stub;
-
-public:
- XLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub);
-
- DecoratorSet decorators() const;
- LIR_Opr ref() const;
- LIR_Opr ref_addr() const;
- LIR_Opr tmp() const;
- address runtime_stub() const;
-
- virtual void emit_code(LIR_Assembler* ce);
- virtual void visit(LIR_OpVisitState* visitor);
-
-#ifndef PRODUCT
- virtual void print_name(outputStream* out) const;
-#endif // PRODUCT
-};
-
-class XBarrierSetC1 : public BarrierSetC1 {
-private:
- address _load_barrier_on_oop_field_preloaded_runtime_stub;
- address _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
-
- address load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const;
- void load_barrier(LIRAccess& access, LIR_Opr result) const;
-
-protected:
- virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register);
- virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
- virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
- virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
-
-public:
- XBarrierSetC1();
-
- virtual void generate_c1_runtime_stubs(BufferBlob* blob);
-};
-
-#endif // SHARE_GC_X_C1_XBARRIERSETC1_HPP
diff --git a/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp b/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp
deleted file mode 100644
index d006b37e7d2..00000000000
--- a/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "classfile/javaClasses.hpp"
-#include "gc/x/c2/xBarrierSetC2.hpp"
-#include "gc/x/xBarrierSet.hpp"
-#include "gc/x/xBarrierSetAssembler.hpp"
-#include "gc/x/xBarrierSetRuntime.hpp"
-#include "opto/arraycopynode.hpp"
-#include "opto/addnode.hpp"
-#include "opto/block.hpp"
-#include "opto/compile.hpp"
-#include "opto/graphKit.hpp"
-#include "opto/machnode.hpp"
-#include "opto/macro.hpp"
-#include "opto/memnode.hpp"
-#include "opto/node.hpp"
-#include "opto/output.hpp"
-#include "opto/regalloc.hpp"
-#include "opto/rootnode.hpp"
-#include "opto/runtime.hpp"
-#include "opto/type.hpp"
-#include "utilities/growableArray.hpp"
-#include "utilities/macros.hpp"
-
-class XBarrierSetC2State : public ArenaObj {
-private:
- GrowableArray* _stubs;
- Node_Array _live;
-
-public:
- XBarrierSetC2State(Arena* arena) :
- _stubs(new (arena) GrowableArray(arena, 8, 0, nullptr)),
- _live(arena) {}
-
- GrowableArray* stubs() {
- return _stubs;
- }
-
- RegMask* live(const Node* node) {
- if (!node->is_Mach()) {
- // Don't need liveness for non-MachNodes
- return nullptr;
- }
-
- const MachNode* const mach = node->as_Mach();
- if (mach->barrier_data() == XLoadBarrierElided) {
- // Don't need liveness data for nodes without barriers
- return nullptr;
- }
-
- RegMask* live = (RegMask*)_live[node->_idx];
- if (live == nullptr) {
- live = new (Compile::current()->comp_arena()->AmallocWords(sizeof(RegMask))) RegMask();
- _live.map(node->_idx, (Node*)live);
- }
-
- return live;
- }
-};
-
-static XBarrierSetC2State* barrier_set_state() {
- return reinterpret_cast(Compile::current()->barrier_set_state());
-}
-
-XLoadBarrierStubC2* XLoadBarrierStubC2::create(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
- XLoadBarrierStubC2* const stub = new (Compile::current()->comp_arena()) XLoadBarrierStubC2(node, ref_addr, ref, tmp, barrier_data);
- if (!Compile::current()->output()->in_scratch_emit_size()) {
- barrier_set_state()->stubs()->append(stub);
- }
-
- return stub;
-}
-
-XLoadBarrierStubC2::XLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) :
- _node(node),
- _ref_addr(ref_addr),
- _ref(ref),
- _tmp(tmp),
- _barrier_data(barrier_data),
- _entry(),
- _continuation() {
- assert_different_registers(ref, ref_addr.base());
- assert_different_registers(ref, ref_addr.index());
-}
-
-Address XLoadBarrierStubC2::ref_addr() const {
- return _ref_addr;
-}
-
-Register XLoadBarrierStubC2::ref() const {
- return _ref;
-}
-
-Register XLoadBarrierStubC2::tmp() const {
- return _tmp;
-}
-
-address XLoadBarrierStubC2::slow_path() const {
- DecoratorSet decorators = DECORATORS_NONE;
- if (_barrier_data & XLoadBarrierStrong) {
- decorators |= ON_STRONG_OOP_REF;
- }
- if (_barrier_data & XLoadBarrierWeak) {
- decorators |= ON_WEAK_OOP_REF;
- }
- if (_barrier_data & XLoadBarrierPhantom) {
- decorators |= ON_PHANTOM_OOP_REF;
- }
- if (_barrier_data & XLoadBarrierNoKeepalive) {
- decorators |= AS_NO_KEEPALIVE;
- }
- return XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators);
-}
-
-RegMask& XLoadBarrierStubC2::live() const {
- RegMask* mask = barrier_set_state()->live(_node);
- assert(mask != nullptr, "must be mach-node with barrier");
- return *mask;
-}
-
-Label* XLoadBarrierStubC2::entry() {
- // The _entry will never be bound when in_scratch_emit_size() is true.
- // However, we still need to return a label that is not bound now, but
- // will eventually be bound. Any label will do, as it will only act as
- // a placeholder, so we return the _continuation label.
- return Compile::current()->output()->in_scratch_emit_size() ? &_continuation : &_entry;
-}
-
-Label* XLoadBarrierStubC2::continuation() {
- return &_continuation;
-}
-
-void* XBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
- return new (comp_arena) XBarrierSetC2State(comp_arena);
-}
-
-void XBarrierSetC2::late_barrier_analysis() const {
- analyze_dominating_barriers();
- compute_liveness_at_stubs();
-}
-
-void XBarrierSetC2::emit_stubs(CodeBuffer& cb) const {
- MacroAssembler masm(&cb);
- GrowableArray* const stubs = barrier_set_state()->stubs();
-
- for (int i = 0; i < stubs->length(); i++) {
- // Make sure there is enough space in the code buffer
- if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == nullptr) {
- ciEnv::current()->record_failure("CodeCache is full");
- return;
- }
-
- XBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i));
- }
-
- masm.flush();
-}
-
-int XBarrierSetC2::estimate_stub_size() const {
- Compile* const C = Compile::current();
- BufferBlob* const blob = C->output()->scratch_buffer_blob();
- GrowableArray* const stubs = barrier_set_state()->stubs();
- int size = 0;
-
- for (int i = 0; i < stubs->length(); i++) {
- CodeBuffer cb(blob->content_begin(), (address)C->output()->scratch_locs_memory() - blob->content_begin());
- MacroAssembler masm(&cb);
- XBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i));
- size += cb.insts_size();
- }
-
- return size;
-}
-
-static void set_barrier_data(C2Access& access) {
- if (XBarrierSet::barrier_needed(access.decorators(), access.type())) {
- uint8_t barrier_data = 0;
-
- if (access.decorators() & ON_PHANTOM_OOP_REF) {
- barrier_data |= XLoadBarrierPhantom;
- } else if (access.decorators() & ON_WEAK_OOP_REF) {
- barrier_data |= XLoadBarrierWeak;
- } else {
- barrier_data |= XLoadBarrierStrong;
- }
-
- if (access.decorators() & AS_NO_KEEPALIVE) {
- barrier_data |= XLoadBarrierNoKeepalive;
- }
-
- access.set_barrier_data(barrier_data);
- }
-}
-
-Node* XBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
- set_barrier_data(access);
- return BarrierSetC2::load_at_resolved(access, val_type);
-}
-
-Node* XBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
- Node* new_val, const Type* val_type) const {
- set_barrier_data(access);
- return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
-}
-
-Node* XBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
- Node* new_val, const Type* value_type) const {
- set_barrier_data(access);
- return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
-}
-
-Node* XBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const {
- set_barrier_data(access);
- return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
-}
-
-bool XBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type,
- bool is_clone, bool is_clone_instance,
- ArrayCopyPhase phase) const {
- if (phase == ArrayCopyPhase::Parsing) {
- return false;
- }
- if (phase == ArrayCopyPhase::Optimization) {
- return is_clone_instance;
- }
- // else ArrayCopyPhase::Expansion
- return type == T_OBJECT || type == T_ARRAY;
-}
-
-// This TypeFunc assumes a 64bit system
-static const TypeFunc* clone_type() {
- // Create input type (domain)
- const Type** domain_fields = TypeTuple::fields(4);
- domain_fields[TypeFunc::Parms + 0] = TypeInstPtr::NOTNULL; // src
- domain_fields[TypeFunc::Parms + 1] = TypeInstPtr::NOTNULL; // dst
- domain_fields[TypeFunc::Parms + 2] = TypeLong::LONG; // size lower
- domain_fields[TypeFunc::Parms + 3] = Type::HALF; // size upper
- const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + 4, domain_fields);
-
- // Create result type (range)
- const Type** range_fields = TypeTuple::fields(0);
- const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 0, range_fields);
-
- return TypeFunc::make(domain, range);
-}
-
-#define XTOP LP64_ONLY(COMMA phase->top())
-
-void XBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
- Node* const src = ac->in(ArrayCopyNode::Src);
- const TypeAryPtr* ary_ptr = src->get_ptr_type()->isa_aryptr();
-
- if (ac->is_clone_array() && ary_ptr != nullptr) {
- BasicType bt = ary_ptr->elem()->array_element_basic_type();
- if (is_reference_type(bt)) {
- // Clone object array
- bt = T_OBJECT;
- } else {
- // Clone primitive array
- bt = T_LONG;
- }
-
- Node* ctrl = ac->in(TypeFunc::Control);
- Node* mem = ac->in(TypeFunc::Memory);
- Node* src = ac->in(ArrayCopyNode::Src);
- Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
- Node* dest = ac->in(ArrayCopyNode::Dest);
- Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
- Node* length = ac->in(ArrayCopyNode::Length);
-
- if (bt == T_OBJECT) {
- // BarrierSetC2::clone sets the offsets via BarrierSetC2::arraycopy_payload_base_offset
- // which 8-byte aligns them to allow for word size copies. Make sure the offsets point
- // to the first element in the array when cloning object arrays. Otherwise, load
- // barriers are applied to parts of the header. Also adjust the length accordingly.
- assert(src_offset == dest_offset, "should be equal");
- jlong offset = src_offset->get_long();
- if (offset != arrayOopDesc::base_offset_in_bytes(T_OBJECT)) {
- assert(!UseCompressedClassPointers, "should only happen without compressed class pointers");
- assert((arrayOopDesc::base_offset_in_bytes(T_OBJECT) - offset) == BytesPerLong, "unexpected offset");
- length = phase->transform_later(new SubLNode(length, phase->longcon(1))); // Size is in longs
- src_offset = phase->longcon(arrayOopDesc::base_offset_in_bytes(T_OBJECT));
- dest_offset = src_offset;
- }
- }
- Node* payload_src = phase->basic_plus_adr(src, src_offset);
- Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
-
- const char* copyfunc_name = "arraycopy";
- address copyfunc_addr = phase->basictype2arraycopy(bt, nullptr, nullptr, true, copyfunc_name, true);
-
- const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
- const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
-
- Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
- phase->transform_later(call);
-
- phase->igvn().replace_node(ac, call);
- return;
- }
-
- // Clone instance
- Node* const ctrl = ac->in(TypeFunc::Control);
- Node* const mem = ac->in(TypeFunc::Memory);
- Node* const dst = ac->in(ArrayCopyNode::Dest);
- Node* const size = ac->in(ArrayCopyNode::Length);
-
- assert(size->bottom_type()->is_long(), "Should be long");
-
- // The native clone we are calling here expects the instance size in words
- // Add header/offset size to payload size to get instance size.
- Node* const base_offset = phase->longcon(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
- Node* const full_size = phase->transform_later(new AddLNode(size, base_offset));
-
- Node* const call = phase->make_leaf_call(ctrl,
- mem,
- clone_type(),
- XBarrierSetRuntime::clone_addr(),
- "XBarrierSetRuntime::clone",
- TypeRawPtr::BOTTOM,
- src,
- dst,
- full_size,
- phase->top());
- phase->transform_later(call);
- phase->igvn().replace_node(ac, call);
-}
-
-#undef XTOP
-
-// == Dominating barrier elision ==
-
-static bool block_has_safepoint(const Block* block, uint from, uint to) {
- for (uint i = from; i < to; i++) {
- if (block->get_node(i)->is_MachSafePoint()) {
- // Safepoint found
- return true;
- }
- }
-
- // Safepoint not found
- return false;
-}
-
-static bool block_has_safepoint(const Block* block) {
- return block_has_safepoint(block, 0, block->number_of_nodes());
-}
-
-static uint block_index(const Block* block, const Node* node) {
- for (uint j = 0; j < block->number_of_nodes(); ++j) {
- if (block->get_node(j) == node) {
- return j;
- }
- }
- ShouldNotReachHere();
- return 0;
-}
-
-void XBarrierSetC2::analyze_dominating_barriers() const {
- ResourceMark rm;
- Compile* const C = Compile::current();
- PhaseCFG* const cfg = C->cfg();
- Block_List worklist;
- Node_List mem_ops;
- Node_List barrier_loads;
-
- // Step 1 - Find accesses, and track them in lists
- for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
- const Block* const block = cfg->get_block(i);
- for (uint j = 0; j < block->number_of_nodes(); ++j) {
- const Node* const node = block->get_node(j);
- if (!node->is_Mach()) {
- continue;
- }
-
- MachNode* const mach = node->as_Mach();
- switch (mach->ideal_Opcode()) {
- case Op_LoadP:
- if ((mach->barrier_data() & XLoadBarrierStrong) != 0) {
- barrier_loads.push(mach);
- }
- if ((mach->barrier_data() & (XLoadBarrierStrong | XLoadBarrierNoKeepalive)) ==
- XLoadBarrierStrong) {
- mem_ops.push(mach);
- }
- break;
- case Op_CompareAndExchangeP:
- case Op_CompareAndSwapP:
- case Op_GetAndSetP:
- if ((mach->barrier_data() & XLoadBarrierStrong) != 0) {
- barrier_loads.push(mach);
- }
- case Op_StoreP:
- mem_ops.push(mach);
- break;
-
- default:
- break;
- }
- }
- }
-
- // Step 2 - Find dominating accesses for each load
- for (uint i = 0; i < barrier_loads.size(); i++) {
- MachNode* const load = barrier_loads.at(i)->as_Mach();
- const TypePtr* load_adr_type = nullptr;
- intptr_t load_offset = 0;
- const Node* const load_obj = load->get_base_and_disp(load_offset, load_adr_type);
- Block* const load_block = cfg->get_block_for_node(load);
- const uint load_index = block_index(load_block, load);
-
- for (uint j = 0; j < mem_ops.size(); j++) {
- MachNode* mem = mem_ops.at(j)->as_Mach();
- const TypePtr* mem_adr_type = nullptr;
- intptr_t mem_offset = 0;
- const Node* mem_obj = mem->get_base_and_disp(mem_offset, mem_adr_type);
- Block* mem_block = cfg->get_block_for_node(mem);
- uint mem_index = block_index(mem_block, mem);
-
- if (load_obj == NodeSentinel || mem_obj == NodeSentinel ||
- load_obj == nullptr || mem_obj == nullptr ||
- load_offset < 0 || mem_offset < 0) {
- continue;
- }
-
- if (mem_obj != load_obj || mem_offset != load_offset) {
- // Not the same addresses, not a candidate
- continue;
- }
-
- if (load_block == mem_block) {
- // Earlier accesses in the same block
- if (mem_index < load_index && !block_has_safepoint(mem_block, mem_index + 1, load_index)) {
- load->set_barrier_data(XLoadBarrierElided);
- }
- } else if (mem_block->dominates(load_block)) {
- // Dominating block? Look around for safepoints
- ResourceMark rm;
- Block_List stack;
- VectorSet visited;
- stack.push(load_block);
- bool safepoint_found = block_has_safepoint(load_block);
- while (!safepoint_found && stack.size() > 0) {
- Block* block = stack.pop();
- if (visited.test_set(block->_pre_order)) {
- continue;
- }
- if (block_has_safepoint(block)) {
- safepoint_found = true;
- break;
- }
- if (block == mem_block) {
- continue;
- }
-
- // Push predecessor blocks
- for (uint p = 1; p < block->num_preds(); ++p) {
- Block* pred = cfg->get_block_for_node(block->pred(p));
- stack.push(pred);
- }
- }
-
- if (!safepoint_found) {
- load->set_barrier_data(XLoadBarrierElided);
- }
- }
- }
- }
-}
-
-// == Reduced spilling optimization ==
-
-void XBarrierSetC2::compute_liveness_at_stubs() const {
- ResourceMark rm;
- Compile* const C = Compile::current();
- Arena* const A = Thread::current()->resource_area();
- PhaseCFG* const cfg = C->cfg();
- PhaseRegAlloc* const regalloc = C->regalloc();
- RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
- XBarrierSetAssembler* const bs = XBarrierSet::assembler();
- Block_List worklist;
-
- for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
- new ((void*)(live + i)) RegMask();
- worklist.push(cfg->get_block(i));
- }
-
- while (worklist.size() > 0) {
- const Block* const block = worklist.pop();
- RegMask& old_live = live[block->_pre_order];
- RegMask new_live;
-
- // Initialize to union of successors
- for (uint i = 0; i < block->_num_succs; i++) {
- const uint succ_id = block->_succs[i]->_pre_order;
- new_live.OR(live[succ_id]);
- }
-
- // Walk block backwards, computing liveness
- for (int i = block->number_of_nodes() - 1; i >= 0; --i) {
- const Node* const node = block->get_node(i);
-
- // Remove def bits
- const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node));
- const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node));
- if (first != OptoReg::Bad) {
- new_live.Remove(first);
- }
- if (second != OptoReg::Bad) {
- new_live.Remove(second);
- }
-
- // Add use bits
- for (uint j = 1; j < node->req(); ++j) {
- const Node* const use = node->in(j);
- const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use));
- const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use));
- if (first != OptoReg::Bad) {
- new_live.Insert(first);
- }
- if (second != OptoReg::Bad) {
- new_live.Insert(second);
- }
- }
-
- // If this node tracks liveness, update it
- RegMask* const regs = barrier_set_state()->live(node);
- if (regs != nullptr) {
- regs->OR(new_live);
- }
- }
-
- // Now at block top, see if we have any changes
- new_live.SUBTRACT(old_live);
- if (new_live.is_NotEmpty()) {
- // Liveness has refined, update and propagate to prior blocks
- old_live.OR(new_live);
- for (uint i = 1; i < block->num_preds(); ++i) {
- Block* const pred = cfg->get_block_for_node(block->pred(i));
- worklist.push(pred);
- }
- }
- }
-}
-
-#ifndef PRODUCT
-void XBarrierSetC2::dump_barrier_data(const MachNode* mach, outputStream* st) const {
- if ((mach->barrier_data() & XLoadBarrierStrong) != 0) {
- st->print("strong ");
- }
- if ((mach->barrier_data() & XLoadBarrierWeak) != 0) {
- st->print("weak ");
- }
- if ((mach->barrier_data() & XLoadBarrierPhantom) != 0) {
- st->print("phantom ");
- }
- if ((mach->barrier_data() & XLoadBarrierNoKeepalive) != 0) {
- st->print("nokeepalive ");
- }
-}
-#endif // !PRODUCT
diff --git a/src/hotspot/share/gc/x/c2/xBarrierSetC2.hpp b/src/hotspot/share/gc/x/c2/xBarrierSetC2.hpp
deleted file mode 100644
index 91835338fd7..00000000000
--- a/src/hotspot/share/gc/x/c2/xBarrierSetC2.hpp
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_C2_XBARRIERSETC2_HPP
-#define SHARE_GC_X_C2_XBARRIERSETC2_HPP
-
-#include "gc/shared/c2/barrierSetC2.hpp"
-#include "memory/allocation.hpp"
-#include "opto/node.hpp"
-#include "utilities/growableArray.hpp"
-
-const uint8_t XLoadBarrierElided = 0;
-const uint8_t XLoadBarrierStrong = 1;
-const uint8_t XLoadBarrierWeak = 2;
-const uint8_t XLoadBarrierPhantom = 4;
-const uint8_t XLoadBarrierNoKeepalive = 8;
-
-class XLoadBarrierStubC2 : public ArenaObj {
-private:
- const MachNode* _node;
- const Address _ref_addr;
- const Register _ref;
- const Register _tmp;
- const uint8_t _barrier_data;
- Label _entry;
- Label _continuation;
-
- XLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data);
-
-public:
- static XLoadBarrierStubC2* create(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data);
-
- Address ref_addr() const;
- Register ref() const;
- Register tmp() const;
- address slow_path() const;
- RegMask& live() const;
- Label* entry();
- Label* continuation();
-};
-
-class XBarrierSetC2 : public BarrierSetC2 {
-private:
- void compute_liveness_at_stubs() const;
- void analyze_dominating_barriers() const;
-
-protected:
- virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
- virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access,
- Node* expected_val,
- Node* new_val,
- const Type* val_type) const;
- virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access,
- Node* expected_val,
- Node* new_val,
- const Type* value_type) const;
- virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access,
- Node* new_val,
- const Type* val_type) const;
-
-public:
- virtual void* create_barrier_state(Arena* comp_arena) const;
- virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc,
- BasicType type,
- bool is_clone,
- bool is_clone_instance,
- ArrayCopyPhase phase) const;
- virtual void clone_at_expansion(PhaseMacroExpand* phase,
- ArrayCopyNode* ac) const;
-
- virtual void late_barrier_analysis() const;
- virtual int estimate_stub_size() const;
- virtual void emit_stubs(CodeBuffer& cb) const;
-
-#ifndef PRODUCT
- virtual void dump_barrier_data(const MachNode* mach, outputStream* st) const;
-#endif
-};
-
-#endif // SHARE_GC_X_C2_XBARRIERSETC2_HPP
diff --git a/src/hotspot/share/gc/x/vmStructs_x.cpp b/src/hotspot/share/gc/x/vmStructs_x.cpp
deleted file mode 100644
index 4c7d63f41b4..00000000000
--- a/src/hotspot/share/gc/x/vmStructs_x.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/vmStructs_x.hpp"
-
-XGlobalsForVMStructs::XGlobalsForVMStructs() :
- _XGlobalPhase(&XGlobalPhase),
- _XGlobalSeqNum(&XGlobalSeqNum),
- _XAddressOffsetMask(&XAddressOffsetMask),
- _XAddressMetadataMask(&XAddressMetadataMask),
- _XAddressMetadataFinalizable(&XAddressMetadataFinalizable),
- _XAddressGoodMask(&XAddressGoodMask),
- _XAddressBadMask(&XAddressBadMask),
- _XAddressWeakBadMask(&XAddressWeakBadMask),
- _XObjectAlignmentSmallShift(&XObjectAlignmentSmallShift),
- _XObjectAlignmentSmall(&XObjectAlignmentSmall) {
-}
-
-XGlobalsForVMStructs XGlobalsForVMStructs::_instance;
-XGlobalsForVMStructs* XGlobalsForVMStructs::_instance_p = &XGlobalsForVMStructs::_instance;
diff --git a/src/hotspot/share/gc/x/vmStructs_x.hpp b/src/hotspot/share/gc/x/vmStructs_x.hpp
deleted file mode 100644
index b911c21be23..00000000000
--- a/src/hotspot/share/gc/x/vmStructs_x.hpp
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_VMSTRUCTS_X_HPP
-#define SHARE_GC_X_VMSTRUCTS_X_HPP
-
-#include "gc/x/xAttachedArray.hpp"
-#include "gc/x/xCollectedHeap.hpp"
-#include "gc/x/xForwarding.hpp"
-#include "gc/x/xGranuleMap.hpp"
-#include "gc/x/xHeap.hpp"
-#include "gc/x/xPageAllocator.hpp"
-#include "utilities/macros.hpp"
-
-// Expose some ZGC globals to the SA agent.
-class XGlobalsForVMStructs {
- static XGlobalsForVMStructs _instance;
-
-public:
- static XGlobalsForVMStructs* _instance_p;
-
- XGlobalsForVMStructs();
-
- uint32_t* _XGlobalPhase;
-
- uint32_t* _XGlobalSeqNum;
-
- uintptr_t* _XAddressOffsetMask;
- uintptr_t* _XAddressMetadataMask;
- uintptr_t* _XAddressMetadataFinalizable;
- uintptr_t* _XAddressGoodMask;
- uintptr_t* _XAddressBadMask;
- uintptr_t* _XAddressWeakBadMask;
-
- const int* _XObjectAlignmentSmallShift;
- const int* _XObjectAlignmentSmall;
-};
-
-typedef XGranuleMap XGranuleMapForPageTable;
-typedef XGranuleMap XGranuleMapForForwarding;
-typedef XAttachedArray XAttachedArrayForForwarding;
-
-#define VM_STRUCTS_X(nonstatic_field, volatile_nonstatic_field, static_field) \
- static_field(XGlobalsForVMStructs, _instance_p, XGlobalsForVMStructs*) \
- nonstatic_field(XGlobalsForVMStructs, _XGlobalPhase, uint32_t*) \
- nonstatic_field(XGlobalsForVMStructs, _XGlobalSeqNum, uint32_t*) \
- nonstatic_field(XGlobalsForVMStructs, _XAddressOffsetMask, uintptr_t*) \
- nonstatic_field(XGlobalsForVMStructs, _XAddressMetadataMask, uintptr_t*) \
- nonstatic_field(XGlobalsForVMStructs, _XAddressMetadataFinalizable, uintptr_t*) \
- nonstatic_field(XGlobalsForVMStructs, _XAddressGoodMask, uintptr_t*) \
- nonstatic_field(XGlobalsForVMStructs, _XAddressBadMask, uintptr_t*) \
- nonstatic_field(XGlobalsForVMStructs, _XAddressWeakBadMask, uintptr_t*) \
- nonstatic_field(XGlobalsForVMStructs, _XObjectAlignmentSmallShift, const int*) \
- nonstatic_field(XGlobalsForVMStructs, _XObjectAlignmentSmall, const int*) \
- \
- nonstatic_field(XCollectedHeap, _heap, XHeap) \
- \
- nonstatic_field(XHeap, _page_allocator, XPageAllocator) \
- nonstatic_field(XHeap, _page_table, XPageTable) \
- nonstatic_field(XHeap, _forwarding_table, XForwardingTable) \
- nonstatic_field(XHeap, _relocate, XRelocate) \
- \
- nonstatic_field(XPage, _type, const uint8_t) \
- nonstatic_field(XPage, _seqnum, uint32_t) \
- nonstatic_field(XPage, _virtual, const XVirtualMemory) \
- volatile_nonstatic_field(XPage, _top, uintptr_t) \
- \
- nonstatic_field(XPageAllocator, _max_capacity, const size_t) \
- volatile_nonstatic_field(XPageAllocator, _capacity, size_t) \
- volatile_nonstatic_field(XPageAllocator, _used, size_t) \
- \
- nonstatic_field(XPageTable, _map, XGranuleMapForPageTable) \
- \
- nonstatic_field(XGranuleMapForPageTable, _map, XPage** const) \
- nonstatic_field(XGranuleMapForForwarding, _map, XForwarding** const) \
- \
- nonstatic_field(XForwardingTable, _map, XGranuleMapForForwarding) \
- \
- nonstatic_field(XVirtualMemory, _start, const uintptr_t) \
- nonstatic_field(XVirtualMemory, _end, const uintptr_t) \
- \
- nonstatic_field(XForwarding, _virtual, const XVirtualMemory) \
- nonstatic_field(XForwarding, _object_alignment_shift, const size_t) \
- volatile_nonstatic_field(XForwarding, _ref_count, int) \
- nonstatic_field(XForwarding, _entries, const XAttachedArrayForForwarding) \
- nonstatic_field(XForwardingEntry, _entry, uint64_t) \
- nonstatic_field(XAttachedArrayForForwarding, _length, const size_t)
-
-#define VM_INT_CONSTANTS_X(declare_constant, declare_constant_with_value) \
- declare_constant(XPhaseRelocate) \
- declare_constant(XPageTypeSmall) \
- declare_constant(XPageTypeMedium) \
- declare_constant(XPageTypeLarge) \
- declare_constant(XObjectAlignmentMediumShift) \
- declare_constant(XObjectAlignmentLargeShift)
-
-#define VM_LONG_CONSTANTS_X(declare_constant) \
- declare_constant(XGranuleSizeShift) \
- declare_constant(XPageSizeSmallShift) \
- declare_constant(XPageSizeMediumShift) \
- declare_constant(XAddressOffsetShift) \
- declare_constant(XAddressOffsetBits) \
- declare_constant(XAddressOffsetMask) \
- declare_constant(XAddressOffsetMax)
-
-#define VM_TYPES_X(declare_type, declare_toplevel_type, declare_integer_type) \
- declare_toplevel_type(XGlobalsForVMStructs) \
- declare_type(XCollectedHeap, CollectedHeap) \
- declare_toplevel_type(XHeap) \
- declare_toplevel_type(XRelocate) \
- declare_toplevel_type(XPage) \
- declare_toplevel_type(XPageAllocator) \
- declare_toplevel_type(XPageTable) \
- declare_toplevel_type(XAttachedArrayForForwarding) \
- declare_toplevel_type(XGranuleMapForPageTable) \
- declare_toplevel_type(XGranuleMapForForwarding) \
- declare_toplevel_type(XVirtualMemory) \
- declare_toplevel_type(XForwardingTable) \
- declare_toplevel_type(XForwarding) \
- declare_toplevel_type(XForwardingEntry) \
- declare_toplevel_type(XPhysicalMemoryManager)
-
-#endif // SHARE_GC_X_VMSTRUCTS_X_HPP
diff --git a/src/hotspot/share/gc/x/xAbort.cpp b/src/hotspot/share/gc/x/xAbort.cpp
deleted file mode 100644
index 11b8d840d22..00000000000
--- a/src/hotspot/share/gc/x/xAbort.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xAbort.hpp"
-#include "runtime/atomic.hpp"
-
-volatile bool XAbort::_should_abort = false;
-
-void XAbort::abort() {
- Atomic::release_store_fence(&_should_abort, true);
-}
diff --git a/src/hotspot/share/gc/x/xAbort.hpp b/src/hotspot/share/gc/x/xAbort.hpp
deleted file mode 100644
index 808a350584b..00000000000
--- a/src/hotspot/share/gc/x/xAbort.hpp
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XABORT_HPP
-#define SHARE_GC_X_XABORT_HPP
-
-#include "memory/allStatic.hpp"
-
-class XAbort : public AllStatic {
-private:
- static volatile bool _should_abort;
-
-public:
- static bool should_abort();
- static void abort();
-};
-
-#endif // SHARE_GC_X_XABORT_HPP
diff --git a/src/hotspot/share/gc/x/xAbort.inline.hpp b/src/hotspot/share/gc/x/xAbort.inline.hpp
deleted file mode 100644
index 8ef1219330a..00000000000
--- a/src/hotspot/share/gc/x/xAbort.inline.hpp
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XABORT_INLINE_HPP
-#define SHARE_GC_X_XABORT_INLINE_HPP
-
-#include "gc/x/xAbort.hpp"
-
-#include "runtime/atomic.hpp"
-
-inline bool XAbort::should_abort() {
- return Atomic::load_acquire(&_should_abort);
-}
-
-#endif // SHARE_GC_X_XABORT_INLINE_HPP
diff --git a/src/hotspot/share/gc/x/xAddress.cpp b/src/hotspot/share/gc/x/xAddress.cpp
deleted file mode 100644
index 33dffc662f1..00000000000
--- a/src/hotspot/share/gc/x/xAddress.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xAddress.hpp"
-#include "gc/x/xGlobals.hpp"
-
-void XAddress::set_good_mask(uintptr_t mask) {
- XAddressGoodMask = mask;
- XAddressBadMask = XAddressGoodMask ^ XAddressMetadataMask;
- XAddressWeakBadMask = (XAddressGoodMask | XAddressMetadataRemapped | XAddressMetadataFinalizable) ^ XAddressMetadataMask;
-}
-
-void XAddress::initialize() {
- XAddressOffsetBits = XPlatformAddressOffsetBits();
- XAddressOffsetMask = (((uintptr_t)1 << XAddressOffsetBits) - 1) << XAddressOffsetShift;
- XAddressOffsetMax = (uintptr_t)1 << XAddressOffsetBits;
-
- XAddressMetadataShift = XPlatformAddressMetadataShift();
- XAddressMetadataMask = (((uintptr_t)1 << XAddressMetadataBits) - 1) << XAddressMetadataShift;
-
- XAddressMetadataMarked0 = (uintptr_t)1 << (XAddressMetadataShift + 0);
- XAddressMetadataMarked1 = (uintptr_t)1 << (XAddressMetadataShift + 1);
- XAddressMetadataRemapped = (uintptr_t)1 << (XAddressMetadataShift + 2);
- XAddressMetadataFinalizable = (uintptr_t)1 << (XAddressMetadataShift + 3);
-
- XAddressMetadataMarked = XAddressMetadataMarked0;
- set_good_mask(XAddressMetadataRemapped);
-}
-
-void XAddress::flip_to_marked() {
- XAddressMetadataMarked ^= (XAddressMetadataMarked0 | XAddressMetadataMarked1);
- set_good_mask(XAddressMetadataMarked);
-}
-
-void XAddress::flip_to_remapped() {
- set_good_mask(XAddressMetadataRemapped);
-}
diff --git a/src/hotspot/share/gc/x/xAddress.hpp b/src/hotspot/share/gc/x/xAddress.hpp
deleted file mode 100644
index ff9d548f1af..00000000000
--- a/src/hotspot/share/gc/x/xAddress.hpp
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XADDRESS_HPP
-#define SHARE_GC_X_XADDRESS_HPP
-
-#include "memory/allStatic.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-class XAddress : public AllStatic {
- friend class XAddressTest;
-
-private:
- static void set_good_mask(uintptr_t mask);
-
-public:
- static void initialize();
-
- static void flip_to_marked();
- static void flip_to_remapped();
-
- static bool is_null(uintptr_t value);
- static bool is_bad(uintptr_t value);
- static bool is_good(uintptr_t value);
- static bool is_good_or_null(uintptr_t value);
- static bool is_weak_bad(uintptr_t value);
- static bool is_weak_good(uintptr_t value);
- static bool is_weak_good_or_null(uintptr_t value);
- static bool is_marked(uintptr_t value);
- static bool is_marked_or_null(uintptr_t value);
- static bool is_finalizable(uintptr_t value);
- static bool is_finalizable_good(uintptr_t value);
- static bool is_remapped(uintptr_t value);
- static bool is_in(uintptr_t value);
-
- static uintptr_t offset(uintptr_t value);
- static uintptr_t good(uintptr_t value);
- static uintptr_t good_or_null(uintptr_t value);
- static uintptr_t finalizable_good(uintptr_t value);
- static uintptr_t marked(uintptr_t value);
- static uintptr_t marked0(uintptr_t value);
- static uintptr_t marked1(uintptr_t value);
- static uintptr_t remapped(uintptr_t value);
- static uintptr_t remapped_or_null(uintptr_t value);
-};
-
-#endif // SHARE_GC_X_XADDRESS_HPP
diff --git a/src/hotspot/share/gc/x/xAddress.inline.hpp b/src/hotspot/share/gc/x/xAddress.inline.hpp
deleted file mode 100644
index 046ee10af00..00000000000
--- a/src/hotspot/share/gc/x/xAddress.inline.hpp
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XADDRESS_INLINE_HPP
-#define SHARE_GC_X_XADDRESS_INLINE_HPP
-
-#include "gc/x/xAddress.hpp"
-
-#include "gc/x/xGlobals.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/macros.hpp"
-#include "utilities/powerOfTwo.hpp"
-
-inline bool XAddress::is_null(uintptr_t value) {
- return value == 0;
-}
-
-inline bool XAddress::is_bad(uintptr_t value) {
- return value & XAddressBadMask;
-}
-
-inline bool XAddress::is_good(uintptr_t value) {
- return !is_bad(value) && !is_null(value);
-}
-
-inline bool XAddress::is_good_or_null(uintptr_t value) {
- // Checking if an address is "not bad" is an optimized version of
- // checking if it's "good or null", which eliminates an explicit
- // null check. However, the implicit null check only checks that
- // the mask bits are zero, not that the entire address is zero.
- // This means that an address without mask bits would pass through
- // the barrier as if it was null. This should be harmless as such
- // addresses should ever be passed through the barrier.
- const bool result = !is_bad(value);
- assert((is_good(value) || is_null(value)) == result, "Bad address");
- return result;
-}
-
-inline bool XAddress::is_weak_bad(uintptr_t value) {
- return value & XAddressWeakBadMask;
-}
-
-inline bool XAddress::is_weak_good(uintptr_t value) {
- return !is_weak_bad(value) && !is_null(value);
-}
-
-inline bool XAddress::is_weak_good_or_null(uintptr_t value) {
- return !is_weak_bad(value);
-}
-
-inline bool XAddress::is_marked(uintptr_t value) {
- return value & XAddressMetadataMarked;
-}
-
-inline bool XAddress::is_marked_or_null(uintptr_t value) {
- return is_marked(value) || is_null(value);
-}
-
-inline bool XAddress::is_finalizable(uintptr_t value) {
- return value & XAddressMetadataFinalizable;
-}
-
-inline bool XAddress::is_finalizable_good(uintptr_t value) {
- return is_finalizable(value) && is_good(value ^ XAddressMetadataFinalizable);
-}
-
-inline bool XAddress::is_remapped(uintptr_t value) {
- return value & XAddressMetadataRemapped;
-}
-
-inline bool XAddress::is_in(uintptr_t value) {
- // Check that exactly one non-offset bit is set
- if (!is_power_of_2(value & ~XAddressOffsetMask)) {
- return false;
- }
-
- // Check that one of the non-finalizable metadata is set
- return value & (XAddressMetadataMask & ~XAddressMetadataFinalizable);
-}
-
-inline uintptr_t XAddress::offset(uintptr_t value) {
- return value & XAddressOffsetMask;
-}
-
-inline uintptr_t XAddress::good(uintptr_t value) {
- return offset(value) | XAddressGoodMask;
-}
-
-inline uintptr_t XAddress::good_or_null(uintptr_t value) {
- return is_null(value) ? 0 : good(value);
-}
-
-inline uintptr_t XAddress::finalizable_good(uintptr_t value) {
- return offset(value) | XAddressMetadataFinalizable | XAddressGoodMask;
-}
-
-inline uintptr_t XAddress::marked(uintptr_t value) {
- return offset(value) | XAddressMetadataMarked;
-}
-
-inline uintptr_t XAddress::marked0(uintptr_t value) {
- return offset(value) | XAddressMetadataMarked0;
-}
-
-inline uintptr_t XAddress::marked1(uintptr_t value) {
- return offset(value) | XAddressMetadataMarked1;
-}
-
-inline uintptr_t XAddress::remapped(uintptr_t value) {
- return offset(value) | XAddressMetadataRemapped;
-}
-
-inline uintptr_t XAddress::remapped_or_null(uintptr_t value) {
- return is_null(value) ? 0 : remapped(value);
-}
-
-#endif // SHARE_GC_X_XADDRESS_INLINE_HPP
diff --git a/src/hotspot/share/gc/x/xAddressSpaceLimit.cpp b/src/hotspot/share/gc/x/xAddressSpaceLimit.cpp
deleted file mode 100644
index 6d3c7a295df..00000000000
--- a/src/hotspot/share/gc/x/xAddressSpaceLimit.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/gc_globals.hpp"
-#include "gc/x/xAddressSpaceLimit.hpp"
-#include "gc/x/xGlobals.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
-#include "utilities/align.hpp"
-
-static size_t address_space_limit() {
- size_t limit = 0;
-
- if (os::has_allocatable_memory_limit(&limit)) {
- return limit;
- }
-
- // No limit
- return SIZE_MAX;
-}
-
-size_t XAddressSpaceLimit::mark_stack() {
- // Allow mark stacks to occupy 10% of the address space
- const size_t limit = address_space_limit() / 10;
- return align_up(limit, XMarkStackSpaceExpandSize);
-}
-
-size_t XAddressSpaceLimit::heap_view() {
- // Allow all heap views to occupy 50% of the address space
- const size_t limit = address_space_limit() / MaxVirtMemFraction / XHeapViews;
- return align_up(limit, XGranuleSize);
-}
diff --git a/src/hotspot/share/gc/x/xAddressSpaceLimit.hpp b/src/hotspot/share/gc/x/xAddressSpaceLimit.hpp
deleted file mode 100644
index 9a3fcc27a29..00000000000
--- a/src/hotspot/share/gc/x/xAddressSpaceLimit.hpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XADDRESSSPACELIMIT_HPP
-#define SHARE_GC_X_XADDRESSSPACELIMIT_HPP
-
-#include "memory/allStatic.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-class XAddressSpaceLimit : public AllStatic {
-public:
- static size_t mark_stack();
- static size_t heap_view();
-};
-
-#endif // SHARE_GC_X_XADDRESSSPACELIMIT_HPP
diff --git a/src/hotspot/share/gc/x/xAllocationFlags.hpp b/src/hotspot/share/gc/x/xAllocationFlags.hpp
deleted file mode 100644
index 307d68c65ac..00000000000
--- a/src/hotspot/share/gc/x/xAllocationFlags.hpp
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XALLOCATIONFLAGS_HPP
-#define SHARE_GC_X_XALLOCATIONFLAGS_HPP
-
-#include "gc/x/xBitField.hpp"
-#include "memory/allocation.hpp"
-
-//
-// Allocation flags layout
-// -----------------------
-//
-// 7 2 1 0
-// +-----+-+-+-+
-// |00000|1|1|1|
-// +-----+-+-+-+
-// | | | |
-// | | | * 0-0 Non-Blocking Flag (1-bit)
-// | | |
-// | | * 1-1 Worker Relocation Flag (1-bit)
-// | |
-// | * 2-2 Low Address Flag (1-bit)
-// |
-// * 7-3 Unused (5-bits)
-//
-
-class XAllocationFlags {
-private:
- typedef XBitField field_non_blocking;
- typedef XBitField field_worker_relocation;
- typedef XBitField field_low_address;
-
- uint8_t _flags;
-
-public:
- XAllocationFlags() :
- _flags(0) {}
-
- void set_non_blocking() {
- _flags |= field_non_blocking::encode(true);
- }
-
- void set_worker_relocation() {
- _flags |= field_worker_relocation::encode(true);
- }
-
- void set_low_address() {
- _flags |= field_low_address::encode(true);
- }
-
- bool non_blocking() const {
- return field_non_blocking::decode(_flags);
- }
-
- bool worker_relocation() const {
- return field_worker_relocation::decode(_flags);
- }
-
- bool low_address() const {
- return field_low_address::decode(_flags);
- }
-};
-
-#endif // SHARE_GC_X_XALLOCATIONFLAGS_HPP
diff --git a/src/hotspot/share/gc/x/xArguments.cpp b/src/hotspot/share/gc/x/xArguments.cpp
deleted file mode 100644
index 13cb302d14a..00000000000
--- a/src/hotspot/share/gc/x/xArguments.cpp
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xAddressSpaceLimit.hpp"
-#include "gc/x/xArguments.hpp"
-#include "gc/x/xCollectedHeap.hpp"
-#include "gc/x/xGlobals.hpp"
-#include "gc/x/xHeuristics.hpp"
-#include "gc/shared/gcArguments.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/globals_extension.hpp"
-#include "runtime/java.hpp"
-
-void XArguments::initialize_alignments() {
- SpaceAlignment = XGranuleSize;
- HeapAlignment = SpaceAlignment;
-}
-
-void XArguments::initialize_heap_flags_and_sizes() {
- // Nothing extra to do
-}
-
-void XArguments::initialize() {
- warning("Non-generational ZGC is deprecated.");
-
- // Check mark stack size
- const size_t mark_stack_space_limit = XAddressSpaceLimit::mark_stack();
- if (ZMarkStackSpaceLimit > mark_stack_space_limit) {
- if (!FLAG_IS_DEFAULT(ZMarkStackSpaceLimit)) {
- vm_exit_during_initialization("ZMarkStackSpaceLimit too large for limited address space");
- }
- FLAG_SET_DEFAULT(ZMarkStackSpaceLimit, mark_stack_space_limit);
- }
-
- // Enable NUMA by default
- if (FLAG_IS_DEFAULT(UseNUMA)) {
- FLAG_SET_DEFAULT(UseNUMA, true);
- }
-
- if (FLAG_IS_DEFAULT(ZFragmentationLimit)) {
- FLAG_SET_DEFAULT(ZFragmentationLimit, 25.0);
- }
-
- // Select number of parallel threads
- if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
- FLAG_SET_DEFAULT(ParallelGCThreads, XHeuristics::nparallel_workers());
- }
-
- if (ParallelGCThreads == 0) {
- vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ParallelGCThreads=0");
- }
-
- // Select number of concurrent threads
- if (FLAG_IS_DEFAULT(ConcGCThreads)) {
- FLAG_SET_DEFAULT(ConcGCThreads, XHeuristics::nconcurrent_workers());
- }
-
- if (ConcGCThreads == 0) {
- vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
- }
-
- // Large page size must match granule size
- if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != XGranuleSize) {
- vm_exit_during_initialization(err_msg("Incompatible -XX:LargePageSizeInBytes, only "
- SIZE_FORMAT "M large pages are supported by ZGC",
- XGranuleSize / M));
- }
-
- // The heuristics used when UseDynamicNumberOfGCThreads is
- // enabled defaults to using a ZAllocationSpikeTolerance of 1.
- if (UseDynamicNumberOfGCThreads && FLAG_IS_DEFAULT(ZAllocationSpikeTolerance)) {
- FLAG_SET_DEFAULT(ZAllocationSpikeTolerance, 1);
- }
-
-#ifdef COMPILER2
- // Enable loop strip mining by default
- if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
- FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
- if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
- FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
- }
- }
-#endif
-
- // CompressedOops not supported
- FLAG_SET_DEFAULT(UseCompressedOops, false);
-
- // Verification before startup and after exit not (yet) supported
- FLAG_SET_DEFAULT(VerifyDuringStartup, false);
- FLAG_SET_DEFAULT(VerifyBeforeExit, false);
-
- if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
- FLAG_SET_DEFAULT(ZVerifyRoots, true);
- FLAG_SET_DEFAULT(ZVerifyObjects, true);
- }
-}
-
-size_t XArguments::heap_virtual_to_physical_ratio() {
- return XHeapViews * XVirtualToPhysicalRatio;
-}
-
-CollectedHeap* XArguments::create_heap() {
- return new XCollectedHeap();
-}
-
-bool XArguments::is_supported() {
- return is_os_supported();
-}
diff --git a/src/hotspot/share/gc/x/xArguments.hpp b/src/hotspot/share/gc/x/xArguments.hpp
deleted file mode 100644
index 196dd994cad..00000000000
--- a/src/hotspot/share/gc/x/xArguments.hpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XARGUMENTS_HPP
-#define SHARE_GC_X_XARGUMENTS_HPP
-
-#include "gc/shared/gcArguments.hpp"
-
-class CollectedHeap;
-
-class XArguments : AllStatic {
-public:
- static void initialize_alignments();
- static void initialize_heap_flags_and_sizes();
- static void initialize();
- static size_t heap_virtual_to_physical_ratio();
- static CollectedHeap* create_heap();
-
- static bool is_supported();
-
- static bool is_os_supported();
-};
-
-#endif // SHARE_GC_X_XARGUMENTS_HPP
diff --git a/src/hotspot/share/gc/x/xArray.hpp b/src/hotspot/share/gc/x/xArray.hpp
deleted file mode 100644
index b0b4b5bd81e..00000000000
--- a/src/hotspot/share/gc/x/xArray.hpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XARRAY_HPP
-#define SHARE_GC_X_XARRAY_HPP
-
-#include "memory/allocation.hpp"
-#include "utilities/growableArray.hpp"
-
-template using XArray = GrowableArrayCHeap;
-
-template
-class XArrayIteratorImpl : public StackObj {
-private:
- const T* _next;
- const T* const _end;
-
- bool next_serial(T* elem);
- bool next_parallel(T* elem);
-
-public:
- XArrayIteratorImpl(const T* array, size_t length);
- XArrayIteratorImpl(const XArray* array);
-
- bool next(T* elem);
-};
-
-template using XArrayIterator = XArrayIteratorImpl;
-template using XArrayParallelIterator = XArrayIteratorImpl;
-
-#endif // SHARE_GC_X_XARRAY_HPP
diff --git a/src/hotspot/share/gc/x/xArray.inline.hpp b/src/hotspot/share/gc/x/xArray.inline.hpp
deleted file mode 100644
index 721e3130095..00000000000
--- a/src/hotspot/share/gc/x/xArray.inline.hpp
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XARRAY_INLINE_HPP
-#define SHARE_GC_X_XARRAY_INLINE_HPP
-
-#include "gc/x/xArray.hpp"
-
-#include "runtime/atomic.hpp"
-
-template
-inline bool XArrayIteratorImpl::next_serial(T* elem) {
- if (_next == _end) {
- return false;
- }
-
- *elem = *_next;
- _next++;
-
- return true;
-}
-
-template
-inline bool XArrayIteratorImpl::next_parallel(T* elem) {
- const T* old_next = Atomic::load(&_next);
-
- for (;;) {
- if (old_next == _end) {
- return false;
- }
-
- const T* const new_next = old_next + 1;
- const T* const prev_next = Atomic::cmpxchg(&_next, old_next, new_next);
- if (prev_next == old_next) {
- *elem = *old_next;
- return true;
- }
-
- old_next = prev_next;
- }
-}
-
-template
-inline XArrayIteratorImpl::XArrayIteratorImpl(const T* array, size_t length) :
- _next(array),
- _end(array + length) {}
-
-template
-inline XArrayIteratorImpl::XArrayIteratorImpl(const XArray* array) :
- XArrayIteratorImpl(array->is_empty() ? nullptr : array->adr_at(0), array->length()) {}
-
-template
-inline bool XArrayIteratorImpl::next(T* elem) {
- if (Parallel) {
- return next_parallel(elem);
- } else {
- return next_serial(elem);
- }
-}
-
-#endif // SHARE_GC_X_XARRAY_INLINE_HPP
diff --git a/src/hotspot/share/gc/x/xAttachedArray.hpp b/src/hotspot/share/gc/x/xAttachedArray.hpp
deleted file mode 100644
index f039f602aab..00000000000
--- a/src/hotspot/share/gc/x/xAttachedArray.hpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XATTACHEDARRAY_HPP
-#define SHARE_GC_X_XATTACHEDARRAY_HPP
-
-#include "utilities/globalDefinitions.hpp"
-
-class VMStructs;
-
-template
-class XAttachedArray {
- friend class ::VMStructs;
-
-private:
- const size_t _length;
-
- static size_t object_size();
- static size_t array_size(size_t length);
-
-public:
- template
- static void* alloc(Allocator* allocator, size_t length);
-
- static void* alloc(size_t length);
- static void free(ObjectT* obj);
-
- XAttachedArray(size_t length);
-
- size_t length() const;
- ArrayT* operator()(const ObjectT* obj) const;
-};
-
-#endif // SHARE_GC_X_XATTACHEDARRAY_HPP
diff --git a/src/hotspot/share/gc/x/xAttachedArray.inline.hpp b/src/hotspot/share/gc/x/xAttachedArray.inline.hpp
deleted file mode 100644
index ba10de99673..00000000000
--- a/src/hotspot/share/gc/x/xAttachedArray.inline.hpp
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XATTACHEDARRAY_INLINE_HPP
-#define SHARE_GC_X_XATTACHEDARRAY_INLINE_HPP
-
-#include "gc/x/xAttachedArray.hpp"
-
-#include "memory/allocation.hpp"
-#include "utilities/align.hpp"
-
-template
-inline size_t XAttachedArray::object_size() {
- return align_up(sizeof(ObjectT), sizeof(ArrayT));
-}
-
-template
-inline size_t XAttachedArray::array_size(size_t length) {
- return sizeof(ArrayT) * length;
-}
-
-template
-template
-inline void* XAttachedArray::alloc(Allocator* allocator, size_t length) {
- // Allocate memory for object and array
- const size_t size = object_size() + array_size(length);
- void* const addr = allocator->alloc(size);
-
- // Placement new array
- void* const array_addr = reinterpret_cast(addr) + object_size();
- ::new (array_addr) ArrayT[length];
-
- // Return pointer to object
- return addr;
-}
-
-template
-inline void* XAttachedArray::alloc(size_t length) {
- struct Allocator {
- void* alloc(size_t size) const {
- return AllocateHeap(size, mtGC);
- }
- } allocator;
- return alloc(&allocator, length);
-}
-
-template
-inline void XAttachedArray::free(ObjectT* obj) {
- FreeHeap(obj);
-}
-
-template
-inline XAttachedArray::XAttachedArray(size_t length) :
- _length(length) {}
-
-template
-inline size_t XAttachedArray::length() const {
- return _length;
-}
-
-template
-inline ArrayT* XAttachedArray::operator()(const ObjectT* obj) const {
- return reinterpret_cast(reinterpret_cast(obj) + object_size());
-}
-
-#endif // SHARE_GC_X_XATTACHEDARRAY_INLINE_HPP
diff --git a/src/hotspot/share/gc/x/xBarrier.cpp b/src/hotspot/share/gc/x/xBarrier.cpp
deleted file mode 100644
index 726950092b2..00000000000
--- a/src/hotspot/share/gc/x/xBarrier.cpp
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "classfile/javaClasses.hpp"
-#include "gc/x/xBarrier.inline.hpp"
-#include "gc/x/xHeap.inline.hpp"
-#include "gc/x/xOop.inline.hpp"
-#include "gc/x/xThread.inline.hpp"
-#include "memory/iterator.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/safepoint.hpp"
-#include "utilities/debug.hpp"
-
-template
-bool XBarrier::should_mark_through(uintptr_t addr) {
- // Finalizable marked oops can still exists on the heap after marking
- // has completed, in which case we just want to convert this into a
- // good oop and not push it on the mark stack.
- if (!during_mark()) {
- assert(XAddress::is_marked(addr), "Should be marked");
- assert(XAddress::is_finalizable(addr), "Should be finalizable");
- return false;
- }
-
- // During marking, we mark through already marked oops to avoid having
- // some large part of the object graph hidden behind a pushed, but not
- // yet flushed, entry on a mutator mark stack. Always marking through
- // allows the GC workers to proceed through the object graph even if a
- // mutator touched an oop first, which in turn will reduce the risk of
- // having to flush mark stacks multiple times to terminate marking.
- //
- // However, when doing finalizable marking we don't always want to mark
- // through. First, marking through an already strongly marked oop would
- // be wasteful, since we will then proceed to do finalizable marking on
- // an object which is, or will be, marked strongly. Second, marking
- // through an already finalizable marked oop would also be wasteful,
- // since such oops can never end up on a mutator mark stack and can
- // therefore not hide some part of the object graph from GC workers.
- if (finalizable) {
- return !XAddress::is_marked(addr);
- }
-
- // Mark through
- return true;
-}
-
-template
-uintptr_t XBarrier::mark(uintptr_t addr) {
- uintptr_t good_addr;
-
- if (XAddress::is_marked(addr)) {
- // Already marked, but try to mark though anyway
- good_addr = XAddress::good(addr);
- } else if (XAddress::is_remapped(addr)) {
- // Already remapped, but also needs to be marked
- good_addr = XAddress::good(addr);
- } else {
- // Needs to be both remapped and marked
- good_addr = remap(addr);
- }
-
- // Mark
- if (should_mark_through(addr)) {
- XHeap::heap()->mark_object(good_addr);
- }
-
- if (finalizable) {
- // Make the oop finalizable marked/good, instead of normal marked/good.
- // This is needed because an object might first becomes finalizable
- // marked by the GC, and then loaded by a mutator thread. In this case,
- // the mutator thread must be able to tell that the object needs to be
- // strongly marked. The finalizable bit in the oop exists to make sure
- // that a load of a finalizable marked oop will fall into the barrier
- // slow path so that we can mark the object as strongly reachable.
- return XAddress::finalizable_good(good_addr);
- }
-
- return good_addr;
-}
-
-uintptr_t XBarrier::remap(uintptr_t addr) {
- assert(!XAddress::is_good(addr), "Should not be good");
- assert(!XAddress::is_weak_good(addr), "Should not be weak good");
- return XHeap::heap()->remap_object(addr);
-}
-
-uintptr_t XBarrier::relocate(uintptr_t addr) {
- assert(!XAddress::is_good(addr), "Should not be good");
- assert(!XAddress::is_weak_good(addr), "Should not be weak good");
- return XHeap::heap()->relocate_object(addr);
-}
-
-uintptr_t XBarrier::relocate_or_mark(uintptr_t addr) {
- return during_relocate() ? relocate(addr) : mark(addr);
-}
-
-uintptr_t XBarrier::relocate_or_mark_no_follow(uintptr_t addr) {
- return during_relocate() ? relocate(addr) : mark(addr);
-}
-
-uintptr_t XBarrier::relocate_or_remap(uintptr_t addr) {
- return during_relocate() ? relocate(addr) : remap(addr);
-}
-
-//
-// Load barrier
-//
-uintptr_t XBarrier::load_barrier_on_oop_slow_path(uintptr_t addr) {
- return relocate_or_mark(addr);
-}
-
-uintptr_t XBarrier::load_barrier_on_invisible_root_oop_slow_path(uintptr_t addr) {
- return relocate_or_mark_no_follow(addr);
-}
-
-void XBarrier::load_barrier_on_oop_fields(oop o) {
- assert(XAddress::is_good(XOop::to_address(o)), "Should be good");
- XLoadBarrierOopClosure cl;
- o->oop_iterate(&cl);
-}
-
-//
-// Weak load barrier
-//
-uintptr_t XBarrier::weak_load_barrier_on_oop_slow_path(uintptr_t addr) {
- return XAddress::is_weak_good(addr) ? XAddress::good(addr) : relocate_or_remap(addr);
-}
-
-uintptr_t XBarrier::weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr) {
- const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
- if (XHeap::heap()->is_object_strongly_live(good_addr)) {
- return good_addr;
- }
-
- // Not strongly live
- return 0;
-}
-
-uintptr_t XBarrier::weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
- const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
- if (XHeap::heap()->is_object_live(good_addr)) {
- return good_addr;
- }
-
- // Not live
- return 0;
-}
-
-//
-// Keep alive barrier
-//
-uintptr_t XBarrier::keep_alive_barrier_on_oop_slow_path(uintptr_t addr) {
- assert(during_mark(), "Invalid phase");
-
- // Mark
- return mark(addr);
-}
-
-uintptr_t XBarrier::keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr) {
- assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked");
- const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
- assert(XHeap::heap()->is_object_strongly_live(good_addr), "Should be live");
- return good_addr;
-}
-
-uintptr_t XBarrier::keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
- assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked");
- const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
- assert(XHeap::heap()->is_object_live(good_addr), "Should be live");
- return good_addr;
-}
-
-//
-// Mark barrier
-//
-uintptr_t XBarrier::mark_barrier_on_oop_slow_path(uintptr_t addr) {
- assert(during_mark(), "Invalid phase");
- assert(XThread::is_worker(), "Invalid thread");
-
- // Mark
- return mark(addr);
-}
-
-uintptr_t XBarrier::mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr) {
- assert(during_mark(), "Invalid phase");
- assert(XThread::is_worker(), "Invalid thread");
-
- // Mark
- return mark(addr);
-}
-
-//
-// Narrow oop variants, never used.
-//
-oop XBarrier::load_barrier_on_oop_field(volatile narrowOop* p) {
- ShouldNotReachHere();
- return nullptr;
-}
-
-oop XBarrier::load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
- ShouldNotReachHere();
- return nullptr;
-}
-
-void XBarrier::load_barrier_on_oop_array(volatile narrowOop* p, size_t length) {
- ShouldNotReachHere();
-}
-
-oop XBarrier::load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {
- ShouldNotReachHere();
- return nullptr;
-}
-
-oop XBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) {
- ShouldNotReachHere();
- return nullptr;
-}
-
-oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
- ShouldNotReachHere();
- return nullptr;
-}
-
-oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {
- ShouldNotReachHere();
- return nullptr;
-}
-
-oop XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) {
- ShouldNotReachHere();
- return nullptr;
-}
-
-#ifdef ASSERT
-
-// ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents.
-void XBarrier::verify_on_weak(volatile oop* referent_addr) {
- if (referent_addr != nullptr) {
- uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset();
- oop obj = cast_to_oop(base);
- assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base);
- assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset()), "Sanity");
- }
-}
-
-#endif
-
-void XLoadBarrierOopClosure::do_oop(oop* p) {
- XBarrier::load_barrier_on_oop_field(p);
-}
-
-void XLoadBarrierOopClosure::do_oop(narrowOop* p) {
- ShouldNotReachHere();
-}
diff --git a/src/hotspot/share/gc/x/xBarrier.hpp b/src/hotspot/share/gc/x/xBarrier.hpp
deleted file mode 100644
index e2ef210d7d2..00000000000
--- a/src/hotspot/share/gc/x/xBarrier.hpp
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XBARRIER_HPP
-#define SHARE_GC_X_XBARRIER_HPP
-
-#include "memory/allStatic.hpp"
-#include "memory/iterator.hpp"
-#include "oops/oop.hpp"
-
-typedef bool (*XBarrierFastPath)(uintptr_t);
-typedef uintptr_t (*XBarrierSlowPath)(uintptr_t);
-
-class XBarrier : public AllStatic {
-private:
- static const bool GCThread = true;
- static const bool AnyThread = false;
-
- static const bool Follow = true;
- static const bool DontFollow = false;
-
- static const bool Strong = false;
- static const bool Finalizable = true;
-
- static const bool Publish = true;
- static const bool Overflow = false;
-
- template static void self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr);
-
- template static oop barrier(volatile oop* p, oop o);
- template static oop weak_barrier(volatile oop* p, oop o);
- template static void root_barrier(oop* p, oop o);
-
- static bool is_good_or_null_fast_path(uintptr_t addr);
- static bool is_weak_good_or_null_fast_path(uintptr_t addr);
- static bool is_marked_or_null_fast_path(uintptr_t addr);
-
- static bool during_mark();
- static bool during_relocate();
- template static bool should_mark_through(uintptr_t addr);
- template static uintptr_t mark(uintptr_t addr);
- static uintptr_t remap(uintptr_t addr);
- static uintptr_t relocate(uintptr_t addr);
- static uintptr_t relocate_or_mark(uintptr_t addr);
- static uintptr_t relocate_or_mark_no_follow(uintptr_t addr);
- static uintptr_t relocate_or_remap(uintptr_t addr);
-
- static uintptr_t load_barrier_on_oop_slow_path(uintptr_t addr);
- static uintptr_t load_barrier_on_invisible_root_oop_slow_path(uintptr_t addr);
-
- static uintptr_t weak_load_barrier_on_oop_slow_path(uintptr_t addr);
- static uintptr_t weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr);
- static uintptr_t weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr);
-
- static uintptr_t keep_alive_barrier_on_oop_slow_path(uintptr_t addr);
- static uintptr_t keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr);
- static uintptr_t keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr);
-
- static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr);
- static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr);
-
- static void verify_on_weak(volatile oop* referent_addr) NOT_DEBUG_RETURN;
-
-public:
- // Load barrier
- static oop load_barrier_on_oop(oop o);
- static oop load_barrier_on_oop_field(volatile oop* p);
- static oop load_barrier_on_oop_field_preloaded(volatile oop* p, oop o);
- static void load_barrier_on_oop_array(volatile oop* p, size_t length);
- static void load_barrier_on_oop_fields(oop o);
- static oop load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
- static oop load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
- static void load_barrier_on_root_oop_field(oop* p);
- static void load_barrier_on_invisible_root_oop_field(oop* p);
-
- // Weak load barrier
- static oop weak_load_barrier_on_oop_field(volatile oop* p);
- static oop weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o);
- static oop weak_load_barrier_on_weak_oop(oop o);
- static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
- static oop weak_load_barrier_on_phantom_oop(oop o);
- static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
-
- // Is alive barrier
- static bool is_alive_barrier_on_weak_oop(oop o);
- static bool is_alive_barrier_on_phantom_oop(oop o);
-
- // Keep alive barrier
- static void keep_alive_barrier_on_oop(oop o);
- static void keep_alive_barrier_on_weak_oop_field(volatile oop* p);
- static void keep_alive_barrier_on_phantom_oop_field(volatile oop* p);
- static void keep_alive_barrier_on_phantom_root_oop_field(oop* p);
-
- // Mark barrier
- static void mark_barrier_on_oop_field(volatile oop* p, bool finalizable);
- static void mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable);
-
- // Narrow oop variants, never used.
- static oop load_barrier_on_oop_field(volatile narrowOop* p);
- static oop load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o);
- static void load_barrier_on_oop_array(volatile narrowOop* p, size_t length);
- static oop load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o);
- static oop load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o);
- static oop weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o);
- static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o);
- static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o);
-};
-
-class XLoadBarrierOopClosure : public BasicOopIterateClosure {
-public:
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-#endif // SHARE_GC_X_XBARRIER_HPP
diff --git a/src/hotspot/share/gc/x/xBarrier.inline.hpp b/src/hotspot/share/gc/x/xBarrier.inline.hpp
deleted file mode 100644
index 2319bda4d74..00000000000
--- a/src/hotspot/share/gc/x/xBarrier.inline.hpp
+++ /dev/null
@@ -1,394 +0,0 @@
-/*
- * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XBARRIER_INLINE_HPP
-#define SHARE_GC_X_XBARRIER_INLINE_HPP
-
-#include "gc/x/xBarrier.hpp"
-
-#include "code/codeCache.hpp"
-#include "gc/x/xAddress.inline.hpp"
-#include "gc/x/xOop.inline.hpp"
-#include "gc/x/xResurrection.inline.hpp"
-#include "oops/oop.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/continuation.hpp"
-
-// A self heal must always "upgrade" the address metadata bits in
-// accordance with the metadata bits state machine, which has the
-// valid state transitions as described below (where N is the GC
-// cycle).
-//
-// Note the subtleness of overlapping GC cycles. Specifically that
-// oops are colored Remapped(N) starting at relocation N and ending
-// at marking N + 1.
-//
-// +--- Mark Start
-// | +--- Mark End
-// | | +--- Relocate Start
-// | | | +--- Relocate End
-// | | | |
-// Marked |---N---|--N+1--|--N+2--|----
-// Finalizable |---N---|--N+1--|--N+2--|----
-// Remapped ----|---N---|--N+1--|--N+2--|
-//
-// VALID STATE TRANSITIONS
-//
-// Marked(N) -> Remapped(N)
-// -> Marked(N + 1)
-// -> Finalizable(N + 1)
-//
-// Finalizable(N) -> Marked(N)
-// -> Remapped(N)
-// -> Marked(N + 1)
-// -> Finalizable(N + 1)
-//
-// Remapped(N) -> Marked(N + 1)
-// -> Finalizable(N + 1)
-//
-// PHASE VIEW
-//
-// XPhaseMark
-// Load & Mark
-// Marked(N) <- Marked(N - 1)
-// <- Finalizable(N - 1)
-// <- Remapped(N - 1)
-// <- Finalizable(N)
-//
-// Mark(Finalizable)
-// Finalizable(N) <- Marked(N - 1)
-// <- Finalizable(N - 1)
-// <- Remapped(N - 1)
-//
-// Load(AS_NO_KEEPALIVE)
-// Remapped(N - 1) <- Marked(N - 1)
-// <- Finalizable(N - 1)
-//
-// XPhaseMarkCompleted (Resurrection blocked)
-// Load & Load(ON_WEAK/PHANTOM_OOP_REF | AS_NO_KEEPALIVE) & KeepAlive
-// Marked(N) <- Marked(N - 1)
-// <- Finalizable(N - 1)
-// <- Remapped(N - 1)
-// <- Finalizable(N)
-//
-// Load(ON_STRONG_OOP_REF | AS_NO_KEEPALIVE)
-// Remapped(N - 1) <- Marked(N - 1)
-// <- Finalizable(N - 1)
-//
-// XPhaseMarkCompleted (Resurrection unblocked)
-// Load
-// Marked(N) <- Finalizable(N)
-//
-// XPhaseRelocate
-// Load & Load(AS_NO_KEEPALIVE)
-// Remapped(N) <- Marked(N)
-// <- Finalizable(N)
-
-template
-inline void XBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) {
- if (heal_addr == 0) {
- // Never heal with null since it interacts badly with reference processing.
- // A mutator clearing an oop would be similar to calling Reference.clear(),
- // which would make the reference non-discoverable or silently dropped
- // by the reference processor.
- return;
- }
-
- assert(!fast_path(addr), "Invalid self heal");
- assert(fast_path(heal_addr), "Invalid self heal");
-
- for (;;) {
- // Heal
- const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr, memory_order_relaxed);
- if (prev_addr == addr) {
- // Success
- return;
- }
-
- if (fast_path(prev_addr)) {
- // Must not self heal
- return;
- }
-
- // The oop location was healed by another barrier, but still needs upgrading.
- // Re-apply healing to make sure the oop is not left with weaker (remapped or
- // finalizable) metadata bits than what this barrier tried to apply.
- assert(XAddress::offset(prev_addr) == XAddress::offset(heal_addr), "Invalid offset");
- addr = prev_addr;
- }
-}
-
-template
-inline oop XBarrier::barrier(volatile oop* p, oop o) {
- const uintptr_t addr = XOop::to_address(o);
-
- // Fast path
- if (fast_path(addr)) {
- return XOop::from_address(addr);
- }
-
- // Slow path
- const uintptr_t good_addr = slow_path(addr);
-
- if (p != nullptr) {
- self_heal(p, addr, good_addr);
- }
-
- return XOop::from_address(good_addr);
-}
-
-template
-inline oop XBarrier::weak_barrier(volatile oop* p, oop o) {
- const uintptr_t addr = XOop::to_address(o);
-
- // Fast path
- if (fast_path(addr)) {
- // Return the good address instead of the weak good address
- // to ensure that the currently active heap view is used.
- return XOop::from_address(XAddress::good_or_null(addr));
- }
-
- // Slow path
- const uintptr_t good_addr = slow_path(addr);
-
- if (p != nullptr) {
- // The slow path returns a good/marked address or null, but we never mark
- // oops in a weak load barrier so we always heal with the remapped address.
- self_heal(p, addr, XAddress::remapped_or_null(good_addr));
- }
-
- return XOop::from_address(good_addr);
-}
-
-template
-inline void XBarrier::root_barrier(oop* p, oop o) {
- const uintptr_t addr = XOop::to_address(o);
-
- // Fast path
- if (fast_path(addr)) {
- return;
- }
-
- // Slow path
- const uintptr_t good_addr = slow_path(addr);
-
- // Non-atomic healing helps speed up root scanning. This is safe to do
- // since we are always healing roots in a safepoint, or under a lock,
- // which ensures we are never racing with mutators modifying roots while
- // we are healing them. It's also safe in case multiple GC threads try
- // to heal the same root if it is aligned, since they would always heal
- // the root in the same way and it does not matter in which order it
- // happens. For misaligned oops, there needs to be mutual exclusion.
- *p = XOop::from_address(good_addr);
-}
-
-inline bool XBarrier::is_good_or_null_fast_path(uintptr_t addr) {
- return XAddress::is_good_or_null(addr);
-}
-
-inline bool XBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
- return XAddress::is_weak_good_or_null(addr);
-}
-
-inline bool XBarrier::is_marked_or_null_fast_path(uintptr_t addr) {
- return XAddress::is_marked_or_null(addr);
-}
-
-inline bool XBarrier::during_mark() {
- return XGlobalPhase == XPhaseMark;
-}
-
-inline bool XBarrier::during_relocate() {
- return XGlobalPhase == XPhaseRelocate;
-}
-
-//
-// Load barrier
-//
-inline oop XBarrier::load_barrier_on_oop(oop o) {
- return load_barrier_on_oop_field_preloaded((oop*)nullptr, o);
-}
-
-inline oop XBarrier::load_barrier_on_oop_field(volatile oop* p) {
- const oop o = Atomic::load(p);
- return load_barrier_on_oop_field_preloaded(p, o);
-}
-
-inline oop XBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
- return barrier(p, o);
-}
-
-inline void XBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
- for (volatile const oop* const end = p + length; p < end; p++) {
- load_barrier_on_oop_field(p);
- }
-}
-
-inline oop XBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
- verify_on_weak(p);
-
- if (XResurrection::is_blocked()) {
- return barrier(p, o);
- }
-
- return load_barrier_on_oop_field_preloaded(p, o);
-}
-
-inline oop XBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
- if (XResurrection::is_blocked()) {
- return barrier(p, o);
- }
-
- return load_barrier_on_oop_field_preloaded(p, o);
-}
-
-inline void XBarrier::load_barrier_on_root_oop_field(oop* p) {
- const oop o = *p;
- root_barrier(p, o);
-}
-
-inline void XBarrier::load_barrier_on_invisible_root_oop_field(oop* p) {
- const oop o = *p;
- root_barrier(p, o);
-}
-
-//
-// Weak load barrier
-//
-inline oop XBarrier::weak_load_barrier_on_oop_field(volatile oop* p) {
- assert(!XResurrection::is_blocked(), "Should not be called during resurrection blocked phase");
- const oop o = Atomic::load(p);
- return weak_load_barrier_on_oop_field_preloaded(p, o);
-}
-
-inline oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
- return weak_barrier(p, o);
-}
-
-inline oop XBarrier::weak_load_barrier_on_weak_oop(oop o) {
- return weak_load_barrier_on_weak_oop_field_preloaded((oop*)nullptr, o);
-}
-
-inline oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
- verify_on_weak(p);
-
- if (XResurrection::is_blocked()) {
- return barrier(p, o);
- }
-
- return weak_load_barrier_on_oop_field_preloaded(p, o);
-}
-
-inline oop XBarrier::weak_load_barrier_on_phantom_oop(oop o) {
- return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)nullptr, o);
-}
-
-inline oop XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
- if (XResurrection::is_blocked()) {
- return barrier(p, o);
- }
-
- return weak_load_barrier_on_oop_field_preloaded(p, o);
-}
-
-//
-// Is alive barrier
-//
-inline bool XBarrier::is_alive_barrier_on_weak_oop(oop o) {
- // Check if oop is logically non-null. This operation
- // is only valid when resurrection is blocked.
- assert(XResurrection::is_blocked(), "Invalid phase");
- return weak_load_barrier_on_weak_oop(o) != nullptr;
-}
-
-inline bool XBarrier::is_alive_barrier_on_phantom_oop(oop o) {
- // Check if oop is logically non-null. This operation
- // is only valid when resurrection is blocked.
- assert(XResurrection::is_blocked(), "Invalid phase");
- return weak_load_barrier_on_phantom_oop(o) != nullptr;
-}
-
-//
-// Keep alive barrier
-//
-inline void XBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) {
- assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked");
- const oop o = Atomic::load(p);
- barrier(p, o);
-}
-
-inline void XBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
- assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked");
- const oop o = Atomic::load(p);
- barrier(p, o);
-}
-
-inline void XBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) {
- // The keep alive operation is only valid when resurrection is blocked.
- //
- // Except with Loom, where we intentionally trigger arms nmethods after
- // unlinking, to get a sense of what nmethods are alive. This will trigger
- // the keep alive barriers, but the oops are healed and the slow-paths
- // will not trigger. We have stronger checks in the slow-paths.
- assert(XResurrection::is_blocked() || (CodeCache::contains((void*)p)),
- "This operation is only valid when resurrection is blocked");
- const oop o = *p;
- root_barrier(p, o);
-}
-
-inline void XBarrier::keep_alive_barrier_on_oop(oop o) {
- const uintptr_t addr = XOop::to_address(o);
- assert(XAddress::is_good(addr), "Invalid address");
-
- if (during_mark()) {
- keep_alive_barrier_on_oop_slow_path(addr);
- }
-}
-
-//
-// Mark barrier
-//
-inline void XBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
- const oop o = Atomic::load(p);
-
- if (finalizable) {
- barrier(p, o);
- } else {
- const uintptr_t addr = XOop::to_address(o);
- if (XAddress::is_good(addr)) {
- // Mark through good oop
- mark_barrier_on_oop_slow_path(addr);
- } else {
- // Mark through bad oop
- barrier(p, o);
- }
- }
-}
-
-inline void XBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
- for (volatile const oop* const end = p + length; p < end; p++) {
- mark_barrier_on_oop_field(p, finalizable);
- }
-}
-
-#endif // SHARE_GC_X_XBARRIER_INLINE_HPP
diff --git a/src/hotspot/share/gc/x/xBarrierSet.cpp b/src/hotspot/share/gc/x/xBarrierSet.cpp
deleted file mode 100644
index cee53e8c3fa..00000000000
--- a/src/hotspot/share/gc/x/xBarrierSet.cpp
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/x/xBarrierSet.hpp"
-#include "gc/x/xBarrierSetAssembler.hpp"
-#include "gc/x/xBarrierSetNMethod.hpp"
-#include "gc/x/xBarrierSetStackChunk.hpp"
-#include "gc/x/xGlobals.hpp"
-#include "gc/x/xHeap.inline.hpp"
-#include "gc/x/xStackWatermark.hpp"
-#include "gc/x/xThreadLocalData.hpp"
-#include "runtime/javaThread.hpp"
-#include "utilities/macros.hpp"
-#ifdef COMPILER1
-#include "gc/x/c1/xBarrierSetC1.hpp"
-#endif
-#ifdef COMPILER2
-#include "gc/x/c2/xBarrierSetC2.hpp"
-#endif
-
-class XBarrierSetC1;
-class XBarrierSetC2;
-
-XBarrierSet::XBarrierSet() :
- BarrierSet(make_barrier_set_assembler(),
- make_barrier_set_c1(),
- make_barrier_set_c2(),
- new XBarrierSetNMethod(),
- new XBarrierSetStackChunk(),
- BarrierSet::FakeRtti(BarrierSet::XBarrierSet)) {}
-
-XBarrierSetAssembler* XBarrierSet::assembler() {
- BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
- return reinterpret_cast(bsa);
-}
-
-bool XBarrierSet::barrier_needed(DecoratorSet decorators, BasicType type) {
- assert((decorators & AS_RAW) == 0, "Unexpected decorator");
- //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unexpected decorator");
-
- if (is_reference_type(type)) {
- assert((decorators & (IN_HEAP | IN_NATIVE)) != 0, "Where is reference?");
- // Barrier needed even when IN_NATIVE, to allow concurrent scanning.
- return true;
- }
-
- // Barrier not needed
- return false;
-}
-
-void XBarrierSet::on_thread_create(Thread* thread) {
- // Create thread local data
- XThreadLocalData::create(thread);
-}
-
-void XBarrierSet::on_thread_destroy(Thread* thread) {
- // Destroy thread local data
- XThreadLocalData::destroy(thread);
-}
-
-void XBarrierSet::on_thread_attach(Thread* thread) {
- // Set thread local address bad mask
- XThreadLocalData::set_address_bad_mask(thread, XAddressBadMask);
- if (thread->is_Java_thread()) {
- JavaThread* const jt = JavaThread::cast(thread);
- StackWatermark* const watermark = new XStackWatermark(jt);
- StackWatermarkSet::add_watermark(jt, watermark);
- }
-}
-
-void XBarrierSet::on_thread_detach(Thread* thread) {
- // Flush and free any remaining mark stacks
- XHeap::heap()->mark_flush_and_free(thread);
-}
-
-void XBarrierSet::print_on(outputStream* st) const {
- st->print_cr("XBarrierSet");
-}
diff --git a/src/hotspot/share/gc/x/xBarrierSet.hpp b/src/hotspot/share/gc/x/xBarrierSet.hpp
deleted file mode 100644
index 3f1eb760033..00000000000
--- a/src/hotspot/share/gc/x/xBarrierSet.hpp
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XBARRIERSET_HPP
-#define SHARE_GC_X_XBARRIERSET_HPP
-
-#include "gc/shared/barrierSet.hpp"
-
-class XBarrierSetAssembler;
-
-class XBarrierSet : public BarrierSet {
-public:
- XBarrierSet();
-
- static XBarrierSetAssembler* assembler();
- static bool barrier_needed(DecoratorSet decorators, BasicType type);
-
- virtual void on_thread_create(Thread* thread);
- virtual void on_thread_destroy(Thread* thread);
- virtual void on_thread_attach(Thread* thread);
- virtual void on_thread_detach(Thread* thread);
-
- virtual void print_on(outputStream* st) const;
-
- template
- class AccessBarrier : public BarrierSet::AccessBarrier {
- private:
- typedef BarrierSet::AccessBarrier Raw;
-
- template
- static void verify_decorators_present();
-
- template
- static void verify_decorators_absent();
-
- static oop* field_addr(oop base, ptrdiff_t offset);
-
- template
- static oop load_barrier_on_oop_field_preloaded(T* addr, oop o);
-
- template
- static oop load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o);
-
- public:
- //
- // In heap
- //
- template
- static oop oop_load_in_heap(T* addr);
- static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
-
- template
- static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value);
- static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value);
-
- template
- static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
- static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value);
-
- template
- static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
- arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
- size_t length);
-
- static void clone_in_heap(oop src, oop dst, size_t size);
-
- //
- // Not in heap
- //
- template
- static oop oop_load_not_in_heap(T* addr);
-
- template
- static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value);
-
- template
- static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value);
- };
-};
-
-template<> struct BarrierSet::GetName {
- static const BarrierSet::Name value = BarrierSet::XBarrierSet;
-};
-
-template<> struct BarrierSet::GetType {
- typedef ::XBarrierSet type;
-};
-
-#endif // SHARE_GC_X_XBARRIERSET_HPP
diff --git a/src/hotspot/share/gc/x/xBarrierSet.inline.hpp b/src/hotspot/share/gc/x/xBarrierSet.inline.hpp
deleted file mode 100644
index 3d92c38647d..00000000000
--- a/src/hotspot/share/gc/x/xBarrierSet.inline.hpp
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_X_XBARRIERSET_INLINE_HPP
-#define SHARE_GC_X_XBARRIERSET_INLINE_HPP
-
-#include "gc/x/xBarrierSet.hpp"
-
-#include "gc/shared/accessBarrierSupport.inline.hpp"
-#include "gc/x/xBarrier.inline.hpp"
-#include "utilities/debug.hpp"
-
-template
-template
-inline void XBarrierSet::AccessBarrier::verify_decorators_present() {
- if ((decorators & expected) == 0) {
- fatal("Using unsupported access decorators");
- }
-}
-
-template
-template
-inline void XBarrierSet::AccessBarrier::verify_decorators_absent() {
- if ((decorators & expected) != 0) {
- fatal("Using unsupported access decorators");
- }
-}
-
-template
-inline oop* XBarrierSet::AccessBarrier::field_addr(oop base, ptrdiff_t offset) {
- assert(base != nullptr, "Invalid base");
- return reinterpret_cast(reinterpret_cast((void*)base) + offset);
-}
-
-template
-template
-inline oop XBarrierSet::AccessBarrier::load_barrier_on_oop_field_preloaded(T* addr, oop o) {
- verify_decorators_absent