Masked Load/Store - Changed the order of parameters in intrinsics.

No functional changes.
The documentation is coming.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224829 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Elena Demikhovsky 2014-12-25 07:49:20 +00:00
parent e277a13a71
commit b31322328a
11 changed files with 101 additions and 74 deletions

View File

@ -77,7 +77,7 @@ namespace Intrinsic {
Void, VarArg, MMX, Metadata, Half, Float, Double,
Integer, Vector, Pointer, Struct,
Argument, ExtendArgument, TruncArgument, HalfVecArgument,
SameVecWidthArgument
SameVecWidthArgument, PtrToArgument
} Kind;
union {
@ -98,13 +98,13 @@ namespace Intrinsic {
unsigned getArgumentNumber() const {
assert(Kind == Argument || Kind == ExtendArgument ||
Kind == TruncArgument || Kind == HalfVecArgument ||
Kind == SameVecWidthArgument);
Kind == SameVecWidthArgument || Kind == PtrToArgument);
return Argument_Info >> 2;
}
ArgKind getArgumentKind() const {
assert(Kind == Argument || Kind == ExtendArgument ||
Kind == TruncArgument || Kind == HalfVecArgument ||
Kind == SameVecWidthArgument);
Kind == SameVecWidthArgument || Kind == PtrToArgument);
return (ArgKind)(Argument_Info & 3);
}

View File

@ -116,6 +116,7 @@ class LLVMVectorSameWidth<int num, LLVMType elty>
: LLVMMatchType<num> {
ValueType ElTy = elty.VT;
}
class LLVMPointerTo<int num> : LLVMMatchType<num>;
// Match the type of another intrinsic parameter that is expected to be a
// vector type, but change the element count to be half as many
@ -567,14 +568,14 @@ def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
//===-------------------------- Masked Intrinsics -------------------------===//
//
def int_masked_store : Intrinsic<[], [llvm_ptr_ty, llvm_anyvector_ty,
def int_masked_store : Intrinsic<[], [llvm_anyvector_ty, LLVMPointerTo<0>,
llvm_i32_ty,
LLVMVectorSameWidth<0, llvm_i1_ty>],
[IntrReadWriteArgMem]>;
def int_masked_load : Intrinsic<[llvm_anyvector_ty],
[llvm_ptr_ty, LLVMMatchType<0>, llvm_i32_ty,
LLVMVectorSameWidth<0, llvm_i1_ty>],
[LLVMPointerTo<0>, llvm_i32_ty,
LLVMVectorSameWidth<0, llvm_i1_ty>, LLVMMatchType<0>],
[IntrReadArgMem]>;
//===----------------------------------------------------------------------===//
// Target-specific intrinsics

View File

@ -3644,9 +3644,10 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
void SelectionDAGBuilder::visitMaskedStore(const CallInst &I) {
SDLoc sdl = getCurSDLoc();
Value *PtrOperand = I.getArgOperand(0);
// llvm.masked.store.*(Src0, Ptr, alignemt, Mask)
Value *PtrOperand = I.getArgOperand(1);
SDValue Ptr = getValue(PtrOperand);
SDValue Src0 = getValue(I.getArgOperand(1));
SDValue Src0 = getValue(I.getArgOperand(0));
SDValue Mask = getValue(I.getArgOperand(3));
EVT VT = Src0.getValueType();
unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
@ -3669,14 +3670,15 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I) {
void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I) {
SDLoc sdl = getCurSDLoc();
// @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
Value *PtrOperand = I.getArgOperand(0);
SDValue Ptr = getValue(PtrOperand);
SDValue Src0 = getValue(I.getArgOperand(1));
SDValue Mask = getValue(I.getArgOperand(3));
SDValue Src0 = getValue(I.getArgOperand(3));
SDValue Mask = getValue(I.getArgOperand(2));
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = TLI.getValueType(I.getType());
unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
if (!Alignment)
Alignment = DAG.getEVTAlignment(VT);

View File

@ -537,7 +537,8 @@ enum IIT_Info {
IIT_V1 = 27,
IIT_VARARG = 28,
IIT_HALF_VEC_ARG = 29,
IIT_SAME_VEC_WIDTH_ARG = 30
IIT_SAME_VEC_WIDTH_ARG = 30,
IIT_PTR_TO_ARG = 31
};
@ -651,6 +652,12 @@ static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
ArgInfo));
return;
}
case IIT_PTR_TO_ARG: {
unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
OutputTable.push_back(IITDescriptor::get(IITDescriptor::PtrToArgument,
ArgInfo));
return;
}
case IIT_EMPTYSTRUCT:
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0));
return;
@ -758,13 +765,18 @@ static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
case IITDescriptor::HalfVecArgument:
return VectorType::getHalfElementsVectorType(cast<VectorType>(
Tys[D.getArgumentNumber()]));
case IITDescriptor::SameVecWidthArgument:
case IITDescriptor::SameVecWidthArgument: {
Type *EltTy = DecodeFixedType(Infos, Tys, Context);
Type *Ty = Tys[D.getArgumentNumber()];
if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
return VectorType::get(EltTy, VTy->getNumElements());
}
llvm_unreachable("unhandled");
}
case IITDescriptor::PtrToArgument: {
Type *Ty = Tys[D.getArgumentNumber()];
return PointerType::getUnqual(Ty);
}
}
llvm_unreachable("unhandled");
}

View File

@ -187,7 +187,7 @@ CallInst *IRBuilderBase::CreateAssumption(Value *Cond) {
/// Ops - an array of operands.
CallInst *IRBuilderBase::CreateMaskedLoad(ArrayRef<Value *> Ops) {
// The only one overloaded type - the type of passthru value in this case
Type *DataTy = Ops[1]->getType();
Type *DataTy = Ops[3]->getType();
return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops, DataTy);
}
@ -195,7 +195,7 @@ CallInst *IRBuilderBase::CreateMaskedLoad(ArrayRef<Value *> Ops) {
/// Ops - an array of operands.
CallInst *IRBuilderBase::CreateMaskedStore(ArrayRef<Value *> Ops) {
// DataTy - type of the data to be stored - the only one overloaded type
Type *DataTy = Ops[1]->getType();
Type *DataTy = Ops[0]->getType();
return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, DataTy);
}

View File

@ -2441,6 +2441,13 @@ bool Verifier::VerifyIntrinsicType(Type *Ty,
return VerifyIntrinsicType(ThisArgType->getVectorElementType(),
Infos, ArgTys);
}
case IITDescriptor::PtrToArgument: {
if (D.getArgumentNumber() >= ArgTys.size())
return true;
Type * ReferenceType = ArgTys[D.getArgumentNumber()];
PointerType *ThisArgType = dyn_cast<PointerType>(Ty);
return (!ThisArgType || ThisArgType->getElementType() != ReferenceType);
}
}
llvm_unreachable("unhandled");
}

View File

@ -1159,11 +1159,11 @@ unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
return X86TTI::getIntImmCost(Imm, Ty);
}
bool X86TTI::isLegalMaskedLoad(Type *DataType, int Consecutive) const {
int ScalarWidth = DataType->getScalarSizeInBits();
bool X86TTI::isLegalMaskedLoad(Type *DataTy, int Consecutive) const {
int DataWidth = DataTy->getPrimitiveSizeInBits();
// Todo: AVX512 allows gather/scatter, works with strided and random as well
if ((ScalarWidth < 32) || (Consecutive == 0))
if ((DataWidth < 32) || (Consecutive == 0))
return false;
if (ST->hasAVX512() || ST->hasAVX2())
return true;

View File

@ -1880,15 +1880,10 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
Instruction *NewSI;
if (Legal->isMaskRequired(SI)) {
Type *I8PtrTy =
Builder.getInt8PtrTy(PartPtr->getType()->getPointerAddressSpace());
Value *I8Ptr = Builder.CreateBitCast(PartPtr, I8PtrTy);
VectorParts Cond = createBlockInMask(SI->getParent());
SmallVector <Value *, 8> Ops;
Ops.push_back(I8Ptr);
Ops.push_back(StoredVal[Part]);
Ops.push_back(VecPtr);
Ops.push_back(Builder.getInt32(Alignment));
Ops.push_back(Cond[Part]);
NewSI = Builder.CreateMaskedStore(Ops);
@ -1915,23 +1910,18 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
}
Instruction* NewLI;
Value *VecPtr = Builder.CreateBitCast(PartPtr,
DataTy->getPointerTo(AddressSpace));
if (Legal->isMaskRequired(LI)) {
Type *I8PtrTy =
Builder.getInt8PtrTy(PartPtr->getType()->getPointerAddressSpace());
Value *I8Ptr = Builder.CreateBitCast(PartPtr, I8PtrTy);
VectorParts SrcMask = createBlockInMask(LI->getParent());
SmallVector <Value *, 8> Ops;
Ops.push_back(I8Ptr);
Ops.push_back(UndefValue::get(DataTy));
Ops.push_back(VecPtr);
Ops.push_back(Builder.getInt32(Alignment));
Ops.push_back(SrcMask[Part]);
Ops.push_back(UndefValue::get(DataTy));
NewLI = Builder.CreateMaskedLoad(Ops);
}
else {
Value *VecPtr = Builder.CreateBitCast(PartPtr,
DataTy->getPointerTo(AddressSpace));
NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
}
propagateMetadata(NewLI, LI);

View File

@ -9,9 +9,9 @@
; AVX2: vpmaskmovd (%rdi)
; AVX2-NOT: blend
define <16 x i32> @test1(<16 x i32> %trigger, i8* %addr) {
define <16 x i32> @test1(<16 x i32> %trigger, <16 x i32>* %addr) {
%mask = icmp eq <16 x i32> %trigger, zeroinitializer
%res = call <16 x i32> @llvm.masked.load.v16i32(i8* %addr, <16 x i32>undef, i32 4, <16 x i1>%mask)
%res = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %addr, i32 4, <16 x i1>%mask, <16 x i32>undef)
ret <16 x i32> %res
}
@ -22,18 +22,18 @@ define <16 x i32> @test1(<16 x i32> %trigger, i8* %addr) {
; AVX2: vpmaskmovd {{.*}}(%rdi)
; AVX2: vpmaskmovd {{.*}}(%rdi)
; AVX2-NOT: blend
define <16 x i32> @test2(<16 x i32> %trigger, i8* %addr) {
define <16 x i32> @test2(<16 x i32> %trigger, <16 x i32>* %addr) {
%mask = icmp eq <16 x i32> %trigger, zeroinitializer
%res = call <16 x i32> @llvm.masked.load.v16i32(i8* %addr, <16 x i32>zeroinitializer, i32 4, <16 x i1>%mask)
%res = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %addr, i32 4, <16 x i1>%mask, <16 x i32>zeroinitializer)
ret <16 x i32> %res
}
; AVX512-LABEL: test3
; AVX512: vmovdqu32 %zmm1, (%rdi) {%k1}
define void @test3(<16 x i32> %trigger, i8* %addr, <16 x i32> %val) {
define void @test3(<16 x i32> %trigger, <16 x i32>* %addr, <16 x i32> %val) {
%mask = icmp eq <16 x i32> %trigger, zeroinitializer
call void @llvm.masked.store.v16i32(i8* %addr, <16 x i32>%val, i32 4, <16 x i1>%mask)
call void @llvm.masked.store.v16i32(<16 x i32>%val, <16 x i32>* %addr, i32 4, <16 x i1>%mask)
ret void
}
@ -44,9 +44,9 @@ define void @test3(<16 x i32> %trigger, i8* %addr, <16 x i32> %val) {
; AVX2: vmaskmovps {{.*}}(%rdi)
; AVX2: vmaskmovps {{.*}}(%rdi)
; AVX2: blend
define <16 x float> @test4(<16 x i32> %trigger, i8* %addr, <16 x float> %dst) {
define <16 x float> @test4(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %dst) {
%mask = icmp eq <16 x i32> %trigger, zeroinitializer
%res = call <16 x float> @llvm.masked.load.v16f32(i8* %addr, <16 x float>%dst, i32 4, <16 x i1>%mask)
%res = call <16 x float> @llvm.masked.load.v16f32(<16 x float>* %addr, i32 4, <16 x i1>%mask, <16 x float> %dst)
ret <16 x float> %res
}
@ -58,86 +58,96 @@ define <16 x float> @test4(<16 x i32> %trigger, i8* %addr, <16 x float> %dst) {
; AVX2: vblendvpd
; AVX2: vmaskmovpd
; AVX2: vblendvpd
define <8 x double> @test5(<8 x i32> %trigger, i8* %addr, <8 x double> %dst) {
define <8 x double> @test5(<8 x i32> %trigger, <8 x double>* %addr, <8 x double> %dst) {
%mask = icmp eq <8 x i32> %trigger, zeroinitializer
%res = call <8 x double> @llvm.masked.load.v8f64(i8* %addr, <8 x double>%dst, i32 4, <8 x i1>%mask)
%res = call <8 x double> @llvm.masked.load.v8f64(<8 x double>* %addr, i32 4, <8 x i1>%mask, <8 x double>%dst)
ret <8 x double> %res
}
; AVX2-LABEL: test6
; AVX2: vmaskmovpd
; AVX2: vblendvpd
define <2 x double> @test6(<2 x i64> %trigger, i8* %addr, <2 x double> %dst) {
define <2 x double> @test6(<2 x i64> %trigger, <2 x double>* %addr, <2 x double> %dst) {
%mask = icmp eq <2 x i64> %trigger, zeroinitializer
%res = call <2 x double> @llvm.masked.load.v2f64(i8* %addr, <2 x double>%dst, i32 4, <2 x i1>%mask)
%res = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %addr, i32 4, <2 x i1>%mask, <2 x double>%dst)
ret <2 x double> %res
}
; AVX2-LABEL: test7
; AVX2: vmaskmovps {{.*}}(%rdi)
; AVX2: blend
define <4 x float> @test7(<4 x i32> %trigger, i8* %addr, <4 x float> %dst) {
define <4 x float> @test7(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %dst) {
%mask = icmp eq <4 x i32> %trigger, zeroinitializer
%res = call <4 x float> @llvm.masked.load.v4f32(i8* %addr, <4 x float>%dst, i32 4, <4 x i1>%mask)
%res = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %addr, i32 4, <4 x i1>%mask, <4 x float>%dst)
ret <4 x float> %res
}
; AVX2-LABEL: test8
; AVX2: vpmaskmovd {{.*}}(%rdi)
; AVX2: blend
define <4 x i32> @test8(<4 x i32> %trigger, i8* %addr, <4 x i32> %dst) {
define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) {
%mask = icmp eq <4 x i32> %trigger, zeroinitializer
%res = call <4 x i32> @llvm.masked.load.v4i32(i8* %addr, <4 x i32>%dst, i32 4, <4 x i1>%mask)
%res = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %addr, i32 4, <4 x i1>%mask, <4 x i32>%dst)
ret <4 x i32> %res
}
; AVX2-LABEL: test9
; AVX2: vpmaskmovd %xmm
define void @test9(<4 x i32> %trigger, i8* %addr, <4 x i32> %val) {
define void @test9(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
%mask = icmp eq <4 x i32> %trigger, zeroinitializer
call void @llvm.masked.store.v4i32(i8* %addr, <4 x i32>%val, i32 4, <4 x i1>%mask)
call void @llvm.masked.store.v4i32(<4 x i32>%val, <4 x i32>* %addr, i32 4, <4 x i1>%mask)
ret void
}
; AVX2-LABEL: test10
; AVX2: vmaskmovpd (%rdi), %ymm
; AVX2: blend
define <4 x double> @test10(<4 x i32> %trigger, i8* %addr, <4 x double> %dst) {
define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double> %dst) {
%mask = icmp eq <4 x i32> %trigger, zeroinitializer
%res = call <4 x double> @llvm.masked.load.v4f64(i8* %addr, <4 x double>%dst, i32 4, <4 x i1>%mask)
%res = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %addr, i32 4, <4 x i1>%mask, <4 x double>%dst)
ret <4 x double> %res
}
; AVX2-LABEL: test11
; AVX2: vmaskmovps
; AVX2: vblendvps
define <8 x float> @test11(<8 x i32> %trigger, i8* %addr, <8 x float> %dst) {
define <8 x float> @test11(<8 x i32> %trigger, <8 x float>* %addr, <8 x float> %dst) {
%mask = icmp eq <8 x i32> %trigger, zeroinitializer
%res = call <8 x float> @llvm.masked.load.v8f32(i8* %addr, <8 x float>%dst, i32 4, <8 x i1>%mask)
%res = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %addr, i32 4, <8 x i1>%mask, <8 x float>%dst)
ret <8 x float> %res
}
; AVX2-LABEL: test12
; AVX2: vpmaskmovd %ymm
define void @test12(<8 x i32> %trigger, i8* %addr, <8 x i32> %val) {
define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
%mask = icmp eq <8 x i32> %trigger, zeroinitializer
call void @llvm.masked.store.v8i32(i8* %addr, <8 x i32>%val, i32 4, <8 x i1>%mask)
call void @llvm.masked.store.v8i32(<8 x i32>%val, <8 x i32>* %addr, i32 4, <8 x i1>%mask)
ret void
}
declare <16 x i32> @llvm.masked.load.v16i32(i8*, <16 x i32>, i32, <16 x i1>)
declare <4 x i32> @llvm.masked.load.v4i32(i8*, <4 x i32>, i32, <4 x i1>)
declare void @llvm.masked.store.v16i32(i8*, <16 x i32>, i32, <16 x i1>)
declare void @llvm.masked.store.v8i32(i8*, <8 x i32>, i32, <8 x i1>)
declare void @llvm.masked.store.v4i32(i8*, <4 x i32>, i32, <4 x i1>)
declare <16 x float> @llvm.masked.load.v16f32(i8*, <16 x float>, i32, <16 x i1>)
declare <8 x float> @llvm.masked.load.v8f32(i8*, <8 x float>, i32, <8 x i1>)
declare <4 x float> @llvm.masked.load.v4f32(i8*, <4 x float>, i32, <4 x i1>)
declare void @llvm.masked.store.v16f32(i8*, <16 x float>, i32, <16 x i1>)
declare <8 x double> @llvm.masked.load.v8f64(i8*, <8 x double>, i32, <8 x i1>)
declare <4 x double> @llvm.masked.load.v4f64(i8*, <4 x double>, i32, <4 x i1>)
declare <2 x double> @llvm.masked.load.v2f64(i8*, <2 x double>, i32, <2 x i1>)
declare void @llvm.masked.store.v8f64(i8*, <8 x double>, i32, <8 x i1>)
declare void @llvm.masked.store.v2f64(i8*, <2 x double>, i32, <2 x i1>)
declare void @llvm.masked.store.v2i64(i8*, <2 x i64>, i32, <2 x i1>)
; AVX512-LABEL: test13
; AVX512: vmovups %zmm1, (%rdi) {%k1}
define void @test13(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %val) {
%mask = icmp eq <16 x i32> %trigger, zeroinitializer
call void @llvm.masked.store.v16f32(<16 x float>%val, <16 x float>* %addr, i32 4, <16 x i1>%mask)
ret void
}
declare <16 x i32> @llvm.masked.load.v16i32(<16 x i32>*, i32, <16 x i1>, <16 x i32>)
declare <4 x i32> @llvm.masked.load.v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
declare void @llvm.masked.store.v16i32(<16 x i32>, <16 x i32>*, i32, <16 x i1>)
declare void @llvm.masked.store.v8i32(<8 x i32>, <8 x i32>*, i32, <8 x i1>)
declare void @llvm.masked.store.v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
declare void @llvm.masked.store.v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>)
declare void @llvm.masked.store.v16f32p(<16 x float>*, <16 x float>**, i32, <16 x i1>)
declare <16 x float> @llvm.masked.load.v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>)
declare <8 x float> @llvm.masked.load.v8f32(<8 x float>*, i32, <8 x i1>, <8 x float>)
declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
declare <8 x double> @llvm.masked.load.v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>)
declare <4 x double> @llvm.masked.load.v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>)
declare <2 x double> @llvm.masked.load.v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>)
declare void @llvm.masked.store.v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>)
declare void @llvm.masked.store.v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>)
declare void @llvm.masked.store.v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)

View File

@ -531,7 +531,8 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
// overloaded, all the types can be specified directly.
assert(((!TyEl->isSubClassOf("LLVMExtendedType") &&
!TyEl->isSubClassOf("LLVMTruncatedType") &&
!TyEl->isSubClassOf("LLVMVectorSameWidth")) ||
!TyEl->isSubClassOf("LLVMVectorSameWidth") &&
!TyEl->isSubClassOf("LLVMPointerToElt")) ||
VT == MVT::iAny || VT == MVT::vAny) &&
"Expected iAny or vAny type");
} else

View File

@ -258,7 +258,8 @@ enum IIT_Info {
IIT_V1 = 27,
IIT_VARARG = 28,
IIT_HALF_VEC_ARG = 29,
IIT_SAME_VEC_WIDTH_ARG = 30
IIT_SAME_VEC_WIDTH_ARG = 30,
IIT_PTR_TO_ARG = 31
};
@ -313,6 +314,9 @@ static void EncodeFixedType(Record *R, std::vector<unsigned char> &ArgCodes,
EncodeFixedValueType(VT, Sig);
return;
}
else if (R->isSubClassOf("LLVMPointerTo")) {
Sig.push_back(IIT_PTR_TO_ARG);
}
else
Sig.push_back(IIT_ARG);
return Sig.push_back((Number << 2) | ArgCodes[Number]);