riscv: Add vector bitwise/compare funcs.

This commit is contained in:
Unknown W. Brackets 2023-01-21 19:02:02 -08:00
parent 6fa50eaa82
commit b3be901557
2 changed files with 274 additions and 0 deletions

View File

@ -2358,6 +2358,219 @@ void RiscVEmitter::VMSBC_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1) {
Write32(EncodeIVX(vd, rs1, vs2, VUseMask::NONE, Funct6::VMSBC)); Write32(EncodeIVX(vd, rs1, vs2, VUseMask::NONE, Funct6::VMSBC));
} }
void RiscVEmitter::VAND_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VAND));
}
void RiscVEmitter::VAND_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VAND));
}
void RiscVEmitter::VAND_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm) {
Write32(EncodeIVI(vd, simm5, vs2, vm, Funct6::VAND));
}
void RiscVEmitter::VOR_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VOR));
}
void RiscVEmitter::VOR_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VOR));
}
void RiscVEmitter::VOR_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm) {
Write32(EncodeIVI(vd, simm5, vs2, vm, Funct6::VOR));
}
void RiscVEmitter::VXOR_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VXOR));
}
void RiscVEmitter::VXOR_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VXOR));
}
void RiscVEmitter::VXOR_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm) {
Write32(EncodeIVI(vd, simm5, vs2, vm, Funct6::VXOR));
}
void RiscVEmitter::VSLL_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VSLL));
}
void RiscVEmitter::VSLL_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VSLL));
}
void RiscVEmitter::VSLL_VI(RiscVReg vd, RiscVReg vs2, u8 uimm5, VUseMask vm) {
_assert_msg_((uimm5 & 0x1F) == uimm5, "%s shift must be <= 0x1F", __func__);
Write32(EncodeIVI(vd, SignReduce32(uimm5, 5), vs2, vm, Funct6::VSLL));
}
void RiscVEmitter::VSRL_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VSRL));
}
void RiscVEmitter::VSRL_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VSRL));
}
void RiscVEmitter::VSRL_VI(RiscVReg vd, RiscVReg vs2, u8 uimm5, VUseMask vm) {
_assert_msg_((uimm5 & 0x1F) == uimm5, "%s shift must be <= 0x1F", __func__);
Write32(EncodeIVI(vd, SignReduce32(uimm5, 5), vs2, vm, Funct6::VSRL));
}
void RiscVEmitter::VSRA_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VSRA));
}
void RiscVEmitter::VSRA_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VSRA));
}
void RiscVEmitter::VSRA_VI(RiscVReg vd, RiscVReg vs2, u8 uimm5, VUseMask vm) {
_assert_msg_((uimm5 & 0x1F) == uimm5, "%s shift must be <= 0x1F", __func__);
Write32(EncodeIVI(vd, SignReduce32(uimm5, 5), vs2, vm, Funct6::VSRA));
}
void RiscVEmitter::VNSRL_WV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VNSRL));
}
void RiscVEmitter::VNSRL_WX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VNSRL));
}
void RiscVEmitter::VNSRL_WI(RiscVReg vd, RiscVReg vs2, u8 uimm5, VUseMask vm) {
_assert_msg_((uimm5 & 0x1F) == uimm5, "%s shift must be <= 0x1F", __func__);
Write32(EncodeIVI(vd, SignReduce32(uimm5, 5), vs2, vm, Funct6::VNSRL));
}
void RiscVEmitter::VNSRA_WV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VNSRA));
}
void RiscVEmitter::VNSRA_WX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VNSRA));
}
void RiscVEmitter::VNSRA_WI(RiscVReg vd, RiscVReg vs2, u8 uimm5, VUseMask vm) {
_assert_msg_((uimm5 & 0x1F) == uimm5, "%s shift must be <= 0x1F", __func__);
Write32(EncodeIVI(vd, SignReduce32(uimm5, 5), vs2, vm, Funct6::VNSRA));
}
void RiscVEmitter::VMSEQ_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VMSEQ));
}
void RiscVEmitter::VMSNE_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VMSNE));
}
void RiscVEmitter::VMSLTU_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VMSLTU));
}
void RiscVEmitter::VMSLT_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VMSLT));
}
void RiscVEmitter::VMSLEU_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VMSLEU));
}
void RiscVEmitter::VMSLE_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VMSLE));
}
void RiscVEmitter::VMSEQ_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VMSEQ));
}
void RiscVEmitter::VMSNE_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VMSNE));
}
void RiscVEmitter::VMSLTU_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VMSLTU));
}
void RiscVEmitter::VMSLT_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VMSLT));
}
void RiscVEmitter::VMSLEU_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VMSLEU));
}
void RiscVEmitter::VMSLE_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VMSLE));
}
void RiscVEmitter::VMSGTU_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VMSGTU));
}
void RiscVEmitter::VMSGT_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVX(vd, rs1, vs2, vm, Funct6::VMSGT));
}
void RiscVEmitter::VMSEQ_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm) {
Write32(EncodeIVI(vd, simm5, vs2, vm, Funct6::VMSEQ));
}
void RiscVEmitter::VMSNE_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm) {
Write32(EncodeIVI(vd, simm5, vs2, vm, Funct6::VMSNE));
}
void RiscVEmitter::VMSLEU_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm) {
Write32(EncodeIVI(vd, simm5, vs2, vm, Funct6::VMSLEU));
}
void RiscVEmitter::VMSLE_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm) {
Write32(EncodeIVI(vd, simm5, vs2, vm, Funct6::VMSLE));
}
void RiscVEmitter::VMSGTU_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm) {
Write32(EncodeIVI(vd, simm5, vs2, vm, Funct6::VMSGTU));
}
void RiscVEmitter::VMSGT_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm) {
Write32(EncodeIVI(vd, simm5, vs2, vm, Funct6::VMSGT));
}
void RiscVEmitter::VMINU_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VMINU));
}
void RiscVEmitter::VMINU_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVV(vd, rs1, vs2, vm, Funct6::VMINU));
}
void RiscVEmitter::VMIN_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VMIN));
}
void RiscVEmitter::VMIN_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVV(vd, rs1, vs2, vm, Funct6::VMIN));
}
void RiscVEmitter::VMAXU_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VMAXU));
}
void RiscVEmitter::VMAXU_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVV(vd, rs1, vs2, vm, Funct6::VMAXU));
}
void RiscVEmitter::VMAX_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm) {
Write32(EncodeIVV(vd, vs1, vs2, vm, Funct6::VMAX));
}
void RiscVEmitter::VMAX_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm) {
Write32(EncodeIVV(vd, rs1, vs2, vm, Funct6::VMAX));
}
bool RiscVEmitter::AutoCompress() const { bool RiscVEmitter::AutoCompress() const {
return SupportsCompressed() && autoCompress_; return SupportsCompressed() && autoCompress_;
} }

View File

@ -518,6 +518,67 @@ public:
void VMSBC_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1); void VMSBC_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1);
void VMSBC_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1); void VMSBC_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1);
void VAND_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VAND_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VAND_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm = VUseMask::NONE);
void VOR_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VOR_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VOR_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm = VUseMask::NONE);
void VXOR_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VXOR_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VXOR_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm = VUseMask::NONE);
void VNOT_V(RiscVReg vd, RiscVReg vs2, VUseMask vm = VUseMask::NONE) {
VXOR_VI(vd, vs2, -1, vm);
}
void VSLL_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VSLL_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VSLL_VI(RiscVReg vd, RiscVReg vs2, u8 uimm5, VUseMask vm = VUseMask::NONE);
void VSRL_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VSRL_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VSRL_VI(RiscVReg vd, RiscVReg vs2, u8 uimm5, VUseMask vm = VUseMask::NONE);
void VSRA_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VSRA_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VSRA_VI(RiscVReg vd, RiscVReg vs2, u8 uimm5, VUseMask vm = VUseMask::NONE);
void VNSRL_WV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VNSRL_WX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VNSRL_WI(RiscVReg vd, RiscVReg vs2, u8 uimm5, VUseMask vm = VUseMask::NONE);
void VNSRA_WV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VNSRA_WX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VNSRA_WI(RiscVReg vd, RiscVReg vs2, u8 uimm5, VUseMask vm = VUseMask::NONE);
// Using a mask creates an AND condition, assuming vtype has MU not MA.
// Note: VV and VI don't have all comparison ops, VX does (there's no GE/GEU at all, though.)
void VMSEQ_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VMSNE_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VMSLTU_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VMSLT_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VMSLEU_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VMSLE_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VMSEQ_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VMSNE_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VMSLTU_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VMSLT_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VMSLEU_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VMSLE_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VMSGTU_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VMSGT_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VMSEQ_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm = VUseMask::NONE);
void VMSNE_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm = VUseMask::NONE);
void VMSLEU_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm = VUseMask::NONE);
void VMSLE_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm = VUseMask::NONE);
void VMSGTU_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm = VUseMask::NONE);
void VMSGT_VI(RiscVReg vd, RiscVReg vs2, s8 simm5, VUseMask vm = VUseMask::NONE);
void VMINU_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VMINU_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VMIN_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VMIN_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VMAXU_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VMAXU_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
void VMAX_VV(RiscVReg vd, RiscVReg vs2, RiscVReg vs1, VUseMask vm = VUseMask::NONE);
void VMAX_VX(RiscVReg vd, RiscVReg vs2, RiscVReg rs1, VUseMask vm = VUseMask::NONE);
// Compressed instructions. // Compressed instructions.
void C_ADDI4SPN(RiscVReg rd, u32 nzuimm10); void C_ADDI4SPN(RiscVReg rd, u32 nzuimm10);
void C_FLD(RiscVReg rd, RiscVReg addr, u8 uimm8); void C_FLD(RiscVReg rd, RiscVReg addr, u8 uimm8);