mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-13 23:18:58 +00:00
[X86] Introduce a new td file to hold patterns some of the non instruction patterns from SSE and AVX512
This patch moves some of similar non-instruction patterns from X86InstrSSE.td and X86InstrAVX512.td to a common file. This is intended as a starting point. There are many other optimization patterns that exist in both files that we could move here. Differential Revision: https://reviews.llvm.org/D37455 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@312649 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
e429f678d6
commit
4d0cce8013
@ -428,39 +428,6 @@ multiclass AVX512_maskable_logic<bits<8> O, Format F, X86VectorVTInfo _,
|
||||
_.ImmAllZerosV))],
|
||||
"$src0 = $dst", itin, IsCommutable>;
|
||||
|
||||
// Bitcasts between 512-bit vector types. Return the original type since
|
||||
// no instruction is needed for the conversion.
|
||||
def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
|
||||
def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
|
||||
def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
|
||||
def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
|
||||
def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
|
||||
def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
|
||||
def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
|
||||
def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
|
||||
def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
|
||||
def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
|
||||
def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
|
||||
def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
|
||||
def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
|
||||
def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
|
||||
def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
|
||||
def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
|
||||
def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
|
||||
def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
|
||||
def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
|
||||
def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
|
||||
def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
|
||||
def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
|
||||
def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
|
||||
def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
|
||||
def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
|
||||
def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
|
||||
def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
|
||||
def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
|
||||
def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
|
||||
def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
|
||||
def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
|
||||
|
||||
// Alias instruction that maps zero vector to pxor / xorp* for AVX-512.
|
||||
// This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
|
||||
@ -804,20 +771,6 @@ defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v32i16_info, v16i16x_info,
|
||||
defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v64i8_info, v32i8x_info,
|
||||
vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>;
|
||||
|
||||
// A 128-bit subvector extract from the first 256-bit vector position
|
||||
// is a subregister copy that needs no instruction.
|
||||
def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
|
||||
(v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
|
||||
def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
|
||||
(v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
|
||||
def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
|
||||
(v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
|
||||
def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
|
||||
(v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
|
||||
def : Pat<(v8i16 (extract_subvector (v32i16 VR512:$src), (iPTR 0))),
|
||||
(v8i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_xmm))>;
|
||||
def : Pat<(v16i8 (extract_subvector (v64i8 VR512:$src), (iPTR 0))),
|
||||
(v16i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_xmm))>;
|
||||
|
||||
// A 128-bit extract from bits [255:128] of a 512-bit vector should use a
|
||||
// smaller extract to enable EVEX->VEX.
|
||||
@ -877,52 +830,6 @@ def : Pat<(v16i8 (extract_subvector (v64i8 VR512:$src), (iPTR 16))),
|
||||
(iPTR 1)))>;
|
||||
}
|
||||
|
||||
// A 256-bit subvector extract from the first 256-bit vector position
|
||||
// is a subregister copy that needs no instruction.
|
||||
def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
|
||||
(v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>;
|
||||
def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
|
||||
(v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>;
|
||||
def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
|
||||
(v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>;
|
||||
def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
|
||||
(v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>;
|
||||
def : Pat<(v16i16 (extract_subvector (v32i16 VR512:$src), (iPTR 0))),
|
||||
(v16i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_ymm))>;
|
||||
def : Pat<(v32i8 (extract_subvector (v64i8 VR512:$src), (iPTR 0))),
|
||||
(v32i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_ymm))>;
|
||||
|
||||
let AddedComplexity = 25 in { // to give priority over vinsertf128rm
|
||||
// A 128-bit subvector insert to the first 512-bit vector position
|
||||
// is a subregister copy that needs no instruction.
|
||||
def : Pat<(v8i64 (insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0))),
|
||||
(INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
|
||||
def : Pat<(v8f64 (insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0))),
|
||||
(INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
|
||||
def : Pat<(v16i32 (insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0))),
|
||||
(INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
|
||||
def : Pat<(v16f32 (insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0))),
|
||||
(INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
|
||||
def : Pat<(v32i16 (insert_subvector undef, (v8i16 VR128X:$src), (iPTR 0))),
|
||||
(INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
|
||||
def : Pat<(v64i8 (insert_subvector undef, (v16i8 VR128X:$src), (iPTR 0))),
|
||||
(INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
|
||||
|
||||
// A 256-bit subvector insert to the first 512-bit vector position
|
||||
// is a subregister copy that needs no instruction.
|
||||
def : Pat<(v8i64 (insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0))),
|
||||
(INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
|
||||
def : Pat<(v8f64 (insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0))),
|
||||
(INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
|
||||
def : Pat<(v16i32 (insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0))),
|
||||
(INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
|
||||
def : Pat<(v16f32 (insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0))),
|
||||
(INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
|
||||
def : Pat<(v32i16 (insert_subvector undef, (v16i16 VR256X:$src), (iPTR 0))),
|
||||
(INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
|
||||
def : Pat<(v64i8 (insert_subvector undef, (v32i8 VR256X:$src), (iPTR 0))),
|
||||
(INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
|
||||
}
|
||||
|
||||
// Additional patterns for handling a bitcast between the vselect and the
|
||||
// extract_subvector.
|
||||
|
@ -2767,6 +2767,7 @@ include "X86InstrSystem.td"
|
||||
|
||||
// Compiler Pseudo Instructions and Pat Patterns
|
||||
include "X86InstrCompiler.td"
|
||||
include "X86InstrVecCompiler.td"
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Assembler Mnemonic Aliases
|
||||
|
@ -319,124 +319,6 @@ multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
|
||||
Sched<[WriteVecLogicLd, ReadAfterLd]>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Non-instruction patterns
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// A vector extract of the first f32/f64 position is a subregister copy
|
||||
def : Pat<(f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
|
||||
(COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
|
||||
def : Pat<(f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
|
||||
(COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
|
||||
|
||||
// A 128-bit subvector extract from the first 256-bit vector position
|
||||
// is a subregister copy that needs no instruction.
|
||||
def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (iPTR 0))),
|
||||
(v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
|
||||
def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))),
|
||||
(v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
|
||||
|
||||
def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))),
|
||||
(v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
|
||||
def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))),
|
||||
(v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
|
||||
|
||||
def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (iPTR 0))),
|
||||
(v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
|
||||
def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (iPTR 0))),
|
||||
(v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
|
||||
|
||||
// A 128-bit subvector insert to the first 256-bit vector position
|
||||
// is a subregister copy that needs no instruction.
|
||||
let AddedComplexity = 25 in { // to give priority over vinsertf128rm
|
||||
def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)),
|
||||
(INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
|
||||
def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)),
|
||||
(INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
|
||||
def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)),
|
||||
(INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
|
||||
def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)),
|
||||
(INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
|
||||
def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (iPTR 0)),
|
||||
(INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
|
||||
def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (iPTR 0)),
|
||||
(INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
|
||||
}
|
||||
|
||||
// Implicitly promote a 32-bit scalar to a vector.
|
||||
def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
|
||||
(COPY_TO_REGCLASS FR32:$src, VR128)>;
|
||||
// Implicitly promote a 64-bit scalar to a vector.
|
||||
def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
|
||||
(COPY_TO_REGCLASS FR64:$src, VR128)>;
|
||||
|
||||
// Bitcasts between 128-bit vector types. Return the original type since
|
||||
// no instruction is needed for the conversion
|
||||
def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
|
||||
def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
|
||||
def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
|
||||
def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
|
||||
def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
|
||||
def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
|
||||
def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
|
||||
def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
|
||||
def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
|
||||
def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
|
||||
def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
|
||||
def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
|
||||
def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
|
||||
def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
|
||||
def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
|
||||
def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
|
||||
def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
|
||||
def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
|
||||
def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
|
||||
def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
|
||||
def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
|
||||
def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
|
||||
def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
|
||||
def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
|
||||
def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
|
||||
def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
|
||||
def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
|
||||
def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
|
||||
def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
|
||||
def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
|
||||
def : Pat<(f128 (bitconvert (i128 FR128:$src))), (f128 FR128:$src)>;
|
||||
def : Pat<(i128 (bitconvert (f128 FR128:$src))), (i128 FR128:$src)>;
|
||||
|
||||
// Bitcasts between 256-bit vector types. Return the original type since
|
||||
// no instruction is needed for the conversion
|
||||
def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
|
||||
def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
|
||||
def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
|
||||
def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
|
||||
def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
|
||||
def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
|
||||
def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
|
||||
def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
|
||||
def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
|
||||
def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
|
||||
def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
|
||||
def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
|
||||
def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
|
||||
def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
|
||||
def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
|
||||
def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
|
||||
def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
|
||||
def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
|
||||
def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
|
||||
def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
|
||||
def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
|
||||
def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
|
||||
def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
|
||||
def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
|
||||
def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
|
||||
def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
|
||||
def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
|
||||
def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
|
||||
def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
|
||||
def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
|
||||
|
||||
// Alias instructions that map fld0 to xorps for sse or vxorps for avx.
|
||||
// This is expanded by ExpandPostRAPseudos.
|
||||
|
Loading…
Reference in New Issue
Block a user