8217856: ZGC: Break out C2 matching rules into separate AD file

Reviewed-by: neliasso, kvn
This commit is contained in:
Per Lidén 2019-01-29 10:23:38 +01:00
parent 9f3059e20c
commit 667bba8e95
3 changed files with 163 additions and 172 deletions

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -142,6 +142,12 @@ ifeq ($(call check-jvm-feature, compiler2), true)
)))
endif
ifeq ($(call check-jvm-feature, zgc), true)
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU).ad \
)))
endif
SINGLE_AD_SRCFILE := $(ADLC_SUPPORT_DIR)/all-ad-src.ad
INSERT_FILENAME_AWK_SCRIPT := \

View File

@ -0,0 +1,155 @@
//
// Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
source %{
#include "gc/z/zBarrierSetAssembler.hpp"
static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Address src, bool weak) {
assert(dst != r12, "Invalid register");
assert(dst != r15, "Invalid register");
assert(dst != rsp, "Invalid register");
const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
: ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
__ lea(dst, src);
__ call(RuntimeAddress(stub));
}
%}
// For XMM and YMM enabled processors
instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
match(Set dst (LoadBarrierSlowReg src));
predicate(UseAVX <= 2);
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15);
format %{ "zLoadBarrierSlowRegXmmAndYmm $dst, $src" %}
ins_encode %{
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
%}
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
match(Set dst (LoadBarrierSlowReg src));
predicate(UseAVX == 3);
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15,
KILL x16, KILL x17, KILL x18, KILL x19,
KILL x20, KILL x21, KILL x22, KILL x23,
KILL x24, KILL x25, KILL x26, KILL x27,
KILL x28, KILL x29, KILL x30, KILL x31);
format %{ "zLoadBarrierSlowRegZmm $dst, $src" %}
ins_encode %{
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
%}
ins_pipe(pipe_slow);
%}
// For XMM and YMM enabled processors
instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
match(Set dst (LoadBarrierWeakSlowReg src));
predicate(UseAVX <= 2);
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15);
format %{ "zLoadBarrierWeakSlowRegXmmAndYmm $dst, $src" %}
ins_encode %{
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
%}
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
match(Set dst (LoadBarrierWeakSlowReg src));
predicate(UseAVX == 3);
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15,
KILL x16, KILL x17, KILL x18, KILL x19,
KILL x20, KILL x21, KILL x22, KILL x23,
KILL x24, KILL x25, KILL x26, KILL x27,
KILL x28, KILL x29, KILL x30, KILL x31);
format %{ "zLoadBarrierWeakSlowRegZmm $dst, $src" %}
ins_encode %{
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
%}
ins_pipe(pipe_slow);
%}

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -526,12 +526,6 @@ reg_class int_rdi_reg(RDI);
%}
source_hpp %{
#if INCLUDE_ZGC
#include "gc/z/zBarrierSetAssembler.hpp"
#endif
%}
//----------SOURCE BLOCK-------------------------------------------------------
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
@ -12711,170 +12705,6 @@ instruct RethrowException()
ins_pipe(pipe_jmp);
%}
//
// Execute ZGC load barrier (strong) slow path
//
// For XMM and YMM enabled processors
instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate(UseAVX <= 2);
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15);
format %{"LoadBarrierSlowRegXmmAndYmm $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d, $mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate(UseAVX == 3);
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15,
KILL x16, KILL x17, KILL x18, KILL x19,
KILL x20, KILL x21, KILL x22, KILL x23,
KILL x24, KILL x25, KILL x26, KILL x27,
KILL x28, KILL x29, KILL x30, KILL x31);
format %{"LoadBarrierSlowRegZmm $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d, $mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
//
// Execute ZGC load barrier (weak) slow path
//
// For XMM and YMM enabled processors
instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
match(Set dst (LoadBarrierWeakSlowReg mem));
predicate(UseAVX <= 2);
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15);
format %{"LoadBarrierWeakSlowRegXmmAndYmm $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d,$mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
match(Set dst (LoadBarrierWeakSlowReg mem));
predicate(UseAVX == 3);
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15,
KILL x16, KILL x17, KILL x18, KILL x19,
KILL x20, KILL x21, KILL x22, KILL x23,
KILL x24, KILL x25, KILL x26, KILL x27,
KILL x28, KILL x29, KILL x30, KILL x31);
format %{"LoadBarrierWeakSlowRegZmm $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d,$mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// ============================================================================
// This name is KNOWN by the ADLC and cannot be changed.
// The ADLC forces a 'TypeRawPtr::BOTTOM' output type