mirror of
https://github.com/RPCSX/llvm.git
synced 2024-11-24 20:29:53 +00:00
Always compute all the bits in ComputeMaskedBits.
This allows us to keep passing reduced masks to SimplifyDemandedBits, but know about all the bits if SimplifyDemandedBits fails. This allows instcombine to simplify cases like the one in the included testcase. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@154011 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
00b73a5e44
commit
26c8dcc692
@ -36,11 +36,9 @@ namespace llvm {
|
|||||||
/// where V is a vector, the mask, known zero, and known one values are the
|
/// where V is a vector, the mask, known zero, and known one values are the
|
||||||
/// same width as the vector element, and the bit is set only if it is true
|
/// same width as the vector element, and the bit is set only if it is true
|
||||||
/// for all of the elements in the vector.
|
/// for all of the elements in the vector.
|
||||||
void ComputeMaskedBits(Value *V, const APInt &Mask, APInt &KnownZero,
|
void ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
|
||||||
APInt &KnownOne, const TargetData *TD = 0,
|
const TargetData *TD = 0, unsigned Depth = 0);
|
||||||
unsigned Depth = 0);
|
void computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero);
|
||||||
void computeMaskedBitsLoad(const MDNode &Ranges, const APInt &Mask,
|
|
||||||
APInt &KnownZero);
|
|
||||||
|
|
||||||
/// ComputeSignBit - Determine whether the sign bit is known to be zero or
|
/// ComputeSignBit - Determine whether the sign bit is known to be zero or
|
||||||
/// one. Convenience wrapper around ComputeMaskedBits.
|
/// one. Convenience wrapper around ComputeMaskedBits.
|
||||||
|
@ -980,8 +980,8 @@ public:
|
|||||||
/// bitsets. This code only analyzes bits in Mask, in order to short-circuit
|
/// bitsets. This code only analyzes bits in Mask, in order to short-circuit
|
||||||
/// processing. Targets can implement the computeMaskedBitsForTargetNode
|
/// processing. Targets can implement the computeMaskedBitsForTargetNode
|
||||||
/// method in the TargetLowering class to allow target nodes to be understood.
|
/// method in the TargetLowering class to allow target nodes to be understood.
|
||||||
void ComputeMaskedBits(SDValue Op, const APInt &Mask, APInt &KnownZero,
|
void ComputeMaskedBits(SDValue Op, APInt &KnownZero, APInt &KnownOne,
|
||||||
APInt &KnownOne, unsigned Depth = 0) const;
|
unsigned Depth = 0) const;
|
||||||
|
|
||||||
/// ComputeNumSignBits - Return the number of times the sign bit of the
|
/// ComputeNumSignBits - Return the number of times the sign bit of the
|
||||||
/// register is replicated into the other bits. We know that at least 1 bit
|
/// register is replicated into the other bits. We know that at least 1 bit
|
||||||
|
@ -873,7 +873,6 @@ public:
|
|||||||
/// Mask are known to be either zero or one and return them in the
|
/// Mask are known to be either zero or one and return them in the
|
||||||
/// KnownZero/KnownOne bitsets.
|
/// KnownZero/KnownOne bitsets.
|
||||||
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
|
@ -416,9 +416,8 @@ void Lint::visitMemoryReference(Instruction &I,
|
|||||||
|
|
||||||
if (Align != 0) {
|
if (Align != 0) {
|
||||||
unsigned BitWidth = TD->getTypeSizeInBits(Ptr->getType());
|
unsigned BitWidth = TD->getTypeSizeInBits(Ptr->getType());
|
||||||
APInt Mask = APInt::getAllOnesValue(BitWidth),
|
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||||
KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
ComputeMaskedBits(Ptr, KnownZero, KnownOne, TD);
|
||||||
ComputeMaskedBits(Ptr, Mask, KnownZero, KnownOne, TD);
|
|
||||||
Assert1(!(KnownOne & APInt::getLowBitsSet(BitWidth, Log2_32(Align))),
|
Assert1(!(KnownOne & APInt::getLowBitsSet(BitWidth, Log2_32(Align))),
|
||||||
"Undefined behavior: Memory reference address is misaligned", &I);
|
"Undefined behavior: Memory reference address is misaligned", &I);
|
||||||
}
|
}
|
||||||
@ -476,9 +475,8 @@ static bool isZero(Value *V, TargetData *TD) {
|
|||||||
if (isa<UndefValue>(V)) return true;
|
if (isa<UndefValue>(V)) return true;
|
||||||
|
|
||||||
unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
|
unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
|
||||||
APInt Mask = APInt::getAllOnesValue(BitWidth),
|
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||||
KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
ComputeMaskedBits(V, KnownZero, KnownOne, TD);
|
||||||
ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD);
|
|
||||||
return KnownZero.isAllOnesValue();
|
return KnownZero.isAllOnesValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3261,9 +3261,8 @@ ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
|
|||||||
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
|
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
|
||||||
// For a SCEVUnknown, ask ValueTracking.
|
// For a SCEVUnknown, ask ValueTracking.
|
||||||
unsigned BitWidth = getTypeSizeInBits(U->getType());
|
unsigned BitWidth = getTypeSizeInBits(U->getType());
|
||||||
APInt Mask = APInt::getAllOnesValue(BitWidth);
|
|
||||||
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
|
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
|
||||||
ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
|
ComputeMaskedBits(U->getValue(), Zeros, Ones);
|
||||||
return Zeros.countTrailingOnes();
|
return Zeros.countTrailingOnes();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3401,9 +3400,8 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
|
|||||||
|
|
||||||
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
|
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
|
||||||
// For a SCEVUnknown, ask ValueTracking.
|
// For a SCEVUnknown, ask ValueTracking.
|
||||||
APInt Mask = APInt::getAllOnesValue(BitWidth);
|
|
||||||
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
|
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
|
||||||
ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
|
ComputeMaskedBits(U->getValue(), Zeros, Ones, TD);
|
||||||
if (Ones == ~Zeros + 1)
|
if (Ones == ~Zeros + 1)
|
||||||
return setUnsignedRange(U, ConservativeResult);
|
return setUnsignedRange(U, ConservativeResult);
|
||||||
return setUnsignedRange(U,
|
return setUnsignedRange(U,
|
||||||
@ -3660,9 +3658,8 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
|
|||||||
// knew about to reconstruct a low-bits mask value.
|
// knew about to reconstruct a low-bits mask value.
|
||||||
unsigned LZ = A.countLeadingZeros();
|
unsigned LZ = A.countLeadingZeros();
|
||||||
unsigned BitWidth = A.getBitWidth();
|
unsigned BitWidth = A.getBitWidth();
|
||||||
APInt AllOnes = APInt::getAllOnesValue(BitWidth);
|
|
||||||
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
|
ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD);
|
||||||
|
|
||||||
APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
|
APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
|
||||||
|
|
||||||
|
@ -44,7 +44,6 @@ static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
|
static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero, APInt &KnownOne,
|
APInt &KnownZero, APInt &KnownOne,
|
||||||
APInt &KnownZero2, APInt &KnownOne2,
|
APInt &KnownZero2, APInt &KnownOne2,
|
||||||
const TargetData *TD, unsigned Depth) {
|
const TargetData *TD, unsigned Depth) {
|
||||||
@ -54,11 +53,11 @@ static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
|
|||||||
// than C (i.e. no wrap-around can happen). For example, 20-X is
|
// than C (i.e. no wrap-around can happen). For example, 20-X is
|
||||||
// positive if we can prove that X is >= 0 and < 16.
|
// positive if we can prove that X is >= 0 and < 16.
|
||||||
if (!CLHS->getValue().isNegative()) {
|
if (!CLHS->getValue().isNegative()) {
|
||||||
unsigned BitWidth = Mask.getBitWidth();
|
unsigned BitWidth = KnownZero.getBitWidth();
|
||||||
unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
|
unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
|
||||||
// NLZ can't be BitWidth with no sign bit
|
// NLZ can't be BitWidth with no sign bit
|
||||||
APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
|
APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
|
||||||
llvm::ComputeMaskedBits(Op1, MaskV, KnownZero2, KnownOne2, TD, Depth+1);
|
llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
|
||||||
|
|
||||||
// If all of the MaskV bits are known to be zero, then we know the
|
// If all of the MaskV bits are known to be zero, then we know the
|
||||||
// output top bits are zero, because we now know that the output is
|
// output top bits are zero, because we now know that the output is
|
||||||
@ -66,27 +65,25 @@ static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
|
|||||||
if ((KnownZero2 & MaskV) == MaskV) {
|
if ((KnownZero2 & MaskV) == MaskV) {
|
||||||
unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
|
unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
|
||||||
// Top bits known zero.
|
// Top bits known zero.
|
||||||
KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask;
|
KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned BitWidth = Mask.getBitWidth();
|
unsigned BitWidth = KnownZero.getBitWidth();
|
||||||
|
|
||||||
// If one of the operands has trailing zeros, then the bits that the
|
// If one of the operands has trailing zeros, then the bits that the
|
||||||
// other operand has in those bit positions will be preserved in the
|
// other operand has in those bit positions will be preserved in the
|
||||||
// result. For an add, this works with either operand. For a subtract,
|
// result. For an add, this works with either operand. For a subtract,
|
||||||
// this only works if the known zeros are in the right operand.
|
// this only works if the known zeros are in the right operand.
|
||||||
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
|
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
|
||||||
APInt Mask2 = APInt::getLowBitsSet(BitWidth,
|
llvm::ComputeMaskedBits(Op0, LHSKnownZero, LHSKnownOne, TD, Depth+1);
|
||||||
BitWidth - Mask.countLeadingZeros());
|
|
||||||
llvm::ComputeMaskedBits(Op0, Mask2, LHSKnownZero, LHSKnownOne, TD, Depth+1);
|
|
||||||
assert((LHSKnownZero & LHSKnownOne) == 0 &&
|
assert((LHSKnownZero & LHSKnownOne) == 0 &&
|
||||||
"Bits known to be one AND zero?");
|
"Bits known to be one AND zero?");
|
||||||
unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes();
|
unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes();
|
||||||
|
|
||||||
llvm::ComputeMaskedBits(Op1, Mask2, KnownZero2, KnownOne2, TD, Depth+1);
|
llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes();
|
unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes();
|
||||||
|
|
||||||
@ -111,7 +108,7 @@ static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Are we still trying to solve for the sign bit?
|
// Are we still trying to solve for the sign bit?
|
||||||
if (Mask.isNegative() && !KnownZero.isNegative() && !KnownOne.isNegative()) {
|
if (!KnownZero.isNegative() && !KnownOne.isNegative()) {
|
||||||
if (NSW) {
|
if (NSW) {
|
||||||
if (Add) {
|
if (Add) {
|
||||||
// Adding two positive numbers can't wrap into negative
|
// Adding two positive numbers can't wrap into negative
|
||||||
@ -133,21 +130,19 @@ static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
|
static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero, APInt &KnownOne,
|
APInt &KnownZero, APInt &KnownOne,
|
||||||
APInt &KnownZero2, APInt &KnownOne2,
|
APInt &KnownZero2, APInt &KnownOne2,
|
||||||
const TargetData *TD, unsigned Depth) {
|
const TargetData *TD, unsigned Depth) {
|
||||||
unsigned BitWidth = Mask.getBitWidth();
|
unsigned BitWidth = KnownZero.getBitWidth();
|
||||||
APInt Mask2 = APInt::getAllOnesValue(BitWidth);
|
ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1);
|
||||||
ComputeMaskedBits(Op1, Mask2, KnownZero, KnownOne, TD, Depth+1);
|
ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1);
|
||||||
ComputeMaskedBits(Op0, Mask2, KnownZero2, KnownOne2, TD, Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
|
|
||||||
bool isKnownNegative = false;
|
bool isKnownNegative = false;
|
||||||
bool isKnownNonNegative = false;
|
bool isKnownNonNegative = false;
|
||||||
// If the multiplication is known not to overflow, compute the sign bit.
|
// If the multiplication is known not to overflow, compute the sign bit.
|
||||||
if (Mask.isNegative() && NSW) {
|
if (NSW) {
|
||||||
if (Op0 == Op1) {
|
if (Op0 == Op1) {
|
||||||
// The product of a number with itself is non-negative.
|
// The product of a number with itself is non-negative.
|
||||||
isKnownNonNegative = true;
|
isKnownNonNegative = true;
|
||||||
@ -184,7 +179,6 @@ static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
|
|||||||
LeadZ = std::min(LeadZ, BitWidth);
|
LeadZ = std::min(LeadZ, BitWidth);
|
||||||
KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
|
KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
|
||||||
APInt::getHighBitsSet(BitWidth, LeadZ);
|
APInt::getHighBitsSet(BitWidth, LeadZ);
|
||||||
KnownZero &= Mask;
|
|
||||||
|
|
||||||
// Only make use of no-wrap flags if we failed to compute the sign bit
|
// Only make use of no-wrap flags if we failed to compute the sign bit
|
||||||
// directly. This matters if the multiplication always overflows, in
|
// directly. This matters if the multiplication always overflows, in
|
||||||
@ -197,9 +191,8 @@ static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
|
|||||||
KnownOne.setBit(BitWidth - 1);
|
KnownOne.setBit(BitWidth - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void llvm::computeMaskedBitsLoad(const MDNode &Ranges, const APInt &Mask,
|
void llvm::computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero) {
|
||||||
APInt &KnownZero) {
|
unsigned BitWidth = KnownZero.getBitWidth();
|
||||||
unsigned BitWidth = Mask.getBitWidth();
|
|
||||||
unsigned NumRanges = Ranges.getNumOperands() / 2;
|
unsigned NumRanges = Ranges.getNumOperands() / 2;
|
||||||
assert(NumRanges >= 1);
|
assert(NumRanges >= 1);
|
||||||
|
|
||||||
@ -215,12 +208,11 @@ void llvm::computeMaskedBitsLoad(const MDNode &Ranges, const APInt &Mask,
|
|||||||
MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros);
|
MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros);
|
||||||
}
|
}
|
||||||
|
|
||||||
KnownZero = Mask & APInt::getHighBitsSet(BitWidth, MinLeadingZeros);
|
KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros);
|
||||||
}
|
}
|
||||||
/// ComputeMaskedBits - Determine which of the bits specified in Mask are
|
/// ComputeMaskedBits - Determine which of the bits are known to be either zero
|
||||||
/// known to be either zero or one and return them in the KnownZero/KnownOne
|
/// or one and return them in the KnownZero/KnownOne bit sets.
|
||||||
/// bit sets. This code only analyzes bits in Mask, in order to short-circuit
|
///
|
||||||
/// processing.
|
|
||||||
/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
|
/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
|
||||||
/// we cannot optimize based on the assumption that it is zero without changing
|
/// we cannot optimize based on the assumption that it is zero without changing
|
||||||
/// it to be an explicit zero. If we don't change it to zero, other code could
|
/// it to be an explicit zero. If we don't change it to zero, other code could
|
||||||
@ -230,15 +222,15 @@ void llvm::computeMaskedBitsLoad(const MDNode &Ranges, const APInt &Mask,
|
|||||||
///
|
///
|
||||||
/// This function is defined on values with integer type, values with pointer
|
/// This function is defined on values with integer type, values with pointer
|
||||||
/// type (but only if TD is non-null), and vectors of integers. In the case
|
/// type (but only if TD is non-null), and vectors of integers. In the case
|
||||||
/// where V is a vector, the mask, known zero, and known one values are the
|
/// where V is a vector, known zero, and known one values are the
|
||||||
/// same width as the vector element, and the bit is set only if it is true
|
/// same width as the vector element, and the bit is set only if it is true
|
||||||
/// for all of the elements in the vector.
|
/// for all of the elements in the vector.
|
||||||
void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
|
||||||
APInt &KnownZero, APInt &KnownOne,
|
|
||||||
const TargetData *TD, unsigned Depth) {
|
const TargetData *TD, unsigned Depth) {
|
||||||
assert(V && "No Value?");
|
assert(V && "No Value?");
|
||||||
assert(Depth <= MaxDepth && "Limit Search Depth");
|
assert(Depth <= MaxDepth && "Limit Search Depth");
|
||||||
unsigned BitWidth = Mask.getBitWidth();
|
unsigned BitWidth = KnownZero.getBitWidth();
|
||||||
|
|
||||||
assert((V->getType()->isIntOrIntVectorTy() ||
|
assert((V->getType()->isIntOrIntVectorTy() ||
|
||||||
V->getType()->getScalarType()->isPointerTy()) &&
|
V->getType()->getScalarType()->isPointerTy()) &&
|
||||||
"Not integer or pointer type!");
|
"Not integer or pointer type!");
|
||||||
@ -252,15 +244,15 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
|
|
||||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
|
||||||
// We know all of the bits for a constant!
|
// We know all of the bits for a constant!
|
||||||
KnownOne = CI->getValue() & Mask;
|
KnownOne = CI->getValue();
|
||||||
KnownZero = ~KnownOne & Mask;
|
KnownZero = ~KnownOne;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Null and aggregate-zero are all-zeros.
|
// Null and aggregate-zero are all-zeros.
|
||||||
if (isa<ConstantPointerNull>(V) ||
|
if (isa<ConstantPointerNull>(V) ||
|
||||||
isa<ConstantAggregateZero>(V)) {
|
isa<ConstantAggregateZero>(V)) {
|
||||||
KnownOne.clearAllBits();
|
KnownOne.clearAllBits();
|
||||||
KnownZero = Mask;
|
KnownZero = APInt::getAllOnesValue(BitWidth);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Handle a constant vector by taking the intersection of the known bits of
|
// Handle a constant vector by taking the intersection of the known bits of
|
||||||
@ -297,8 +289,8 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (Align > 0)
|
if (Align > 0)
|
||||||
KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
|
KnownZero = APInt::getLowBitsSet(BitWidth,
|
||||||
CountTrailingZeros_32(Align));
|
CountTrailingZeros_32(Align));
|
||||||
else
|
else
|
||||||
KnownZero.clearAllBits();
|
KnownZero.clearAllBits();
|
||||||
KnownOne.clearAllBits();
|
KnownOne.clearAllBits();
|
||||||
@ -310,8 +302,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
if (GA->mayBeOverridden()) {
|
if (GA->mayBeOverridden()) {
|
||||||
KnownZero.clearAllBits(); KnownOne.clearAllBits();
|
KnownZero.clearAllBits(); KnownOne.clearAllBits();
|
||||||
} else {
|
} else {
|
||||||
ComputeMaskedBits(GA->getAliasee(), Mask, KnownZero, KnownOne,
|
ComputeMaskedBits(GA->getAliasee(), KnownZero, KnownOne, TD, Depth+1);
|
||||||
TD, Depth+1);
|
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -320,15 +311,15 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
// Get alignment information off byval arguments if specified in the IR.
|
// Get alignment information off byval arguments if specified in the IR.
|
||||||
if (A->hasByValAttr())
|
if (A->hasByValAttr())
|
||||||
if (unsigned Align = A->getParamAlignment())
|
if (unsigned Align = A->getParamAlignment())
|
||||||
KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
|
KnownZero = APInt::getLowBitsSet(BitWidth,
|
||||||
CountTrailingZeros_32(Align));
|
CountTrailingZeros_32(Align));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start out not knowing anything.
|
// Start out not knowing anything.
|
||||||
KnownZero.clearAllBits(); KnownOne.clearAllBits();
|
KnownZero.clearAllBits(); KnownOne.clearAllBits();
|
||||||
|
|
||||||
if (Depth == MaxDepth || Mask == 0)
|
if (Depth == MaxDepth)
|
||||||
return; // Limit search depth.
|
return; // Limit search depth.
|
||||||
|
|
||||||
Operator *I = dyn_cast<Operator>(V);
|
Operator *I = dyn_cast<Operator>(V);
|
||||||
@ -339,14 +330,12 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
default: break;
|
default: break;
|
||||||
case Instruction::Load:
|
case Instruction::Load:
|
||||||
if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
|
if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
|
||||||
computeMaskedBitsLoad(*MD, Mask, KnownZero);
|
computeMaskedBitsLoad(*MD, KnownZero);
|
||||||
return;
|
return;
|
||||||
case Instruction::And: {
|
case Instruction::And: {
|
||||||
// If either the LHS or the RHS are Zero, the result is zero.
|
// If either the LHS or the RHS are Zero, the result is zero.
|
||||||
ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1);
|
ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
|
||||||
APInt Mask2(Mask & ~KnownZero);
|
ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
|
||||||
ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
|
|
||||||
Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
|
|
||||||
@ -357,10 +346,8 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case Instruction::Or: {
|
case Instruction::Or: {
|
||||||
ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1);
|
ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
|
||||||
APInt Mask2(Mask & ~KnownOne);
|
ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
|
||||||
ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
|
|
||||||
Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
|
|
||||||
@ -371,9 +358,8 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case Instruction::Xor: {
|
case Instruction::Xor: {
|
||||||
ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1);
|
ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
|
||||||
ComputeMaskedBits(I->getOperand(0), Mask, KnownZero2, KnownOne2, TD,
|
ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
|
||||||
Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
|
|
||||||
@ -387,34 +373,30 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
case Instruction::Mul: {
|
case Instruction::Mul: {
|
||||||
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
|
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
|
||||||
ComputeMaskedBitsMul(I->getOperand(0), I->getOperand(1), NSW,
|
ComputeMaskedBitsMul(I->getOperand(0), I->getOperand(1), NSW,
|
||||||
Mask, KnownZero, KnownOne, KnownZero2, KnownOne2,
|
KnownZero, KnownOne, KnownZero2, KnownOne2, TD, Depth);
|
||||||
TD, Depth);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Instruction::UDiv: {
|
case Instruction::UDiv: {
|
||||||
// For the purposes of computing leading zeros we can conservatively
|
// For the purposes of computing leading zeros we can conservatively
|
||||||
// treat a udiv as a logical right shift by the power of 2 known to
|
// treat a udiv as a logical right shift by the power of 2 known to
|
||||||
// be less than the denominator.
|
// be less than the denominator.
|
||||||
APInt AllOnes = APInt::getAllOnesValue(BitWidth);
|
ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
|
||||||
ComputeMaskedBits(I->getOperand(0),
|
|
||||||
AllOnes, KnownZero2, KnownOne2, TD, Depth+1);
|
|
||||||
unsigned LeadZ = KnownZero2.countLeadingOnes();
|
unsigned LeadZ = KnownZero2.countLeadingOnes();
|
||||||
|
|
||||||
KnownOne2.clearAllBits();
|
KnownOne2.clearAllBits();
|
||||||
KnownZero2.clearAllBits();
|
KnownZero2.clearAllBits();
|
||||||
ComputeMaskedBits(I->getOperand(1),
|
ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1);
|
||||||
AllOnes, KnownZero2, KnownOne2, TD, Depth+1);
|
|
||||||
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
|
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
|
||||||
if (RHSUnknownLeadingOnes != BitWidth)
|
if (RHSUnknownLeadingOnes != BitWidth)
|
||||||
LeadZ = std::min(BitWidth,
|
LeadZ = std::min(BitWidth,
|
||||||
LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
|
LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
|
||||||
|
|
||||||
KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask;
|
KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case Instruction::Select:
|
case Instruction::Select:
|
||||||
ComputeMaskedBits(I->getOperand(2), Mask, KnownZero, KnownOne, TD, Depth+1);
|
ComputeMaskedBits(I->getOperand(2), KnownZero, KnownOne, TD, Depth+1);
|
||||||
ComputeMaskedBits(I->getOperand(1), Mask, KnownZero2, KnownOne2, TD,
|
ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD,
|
||||||
Depth+1);
|
Depth+1);
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
@ -447,11 +429,9 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
else
|
else
|
||||||
SrcBitWidth = SrcTy->getScalarSizeInBits();
|
SrcBitWidth = SrcTy->getScalarSizeInBits();
|
||||||
|
|
||||||
APInt MaskIn = Mask.zextOrTrunc(SrcBitWidth);
|
|
||||||
KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
|
KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
|
||||||
KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
|
KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
|
||||||
ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD,
|
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
|
||||||
Depth+1);
|
|
||||||
KnownZero = KnownZero.zextOrTrunc(BitWidth);
|
KnownZero = KnownZero.zextOrTrunc(BitWidth);
|
||||||
KnownOne = KnownOne.zextOrTrunc(BitWidth);
|
KnownOne = KnownOne.zextOrTrunc(BitWidth);
|
||||||
// Any top bits are known to be zero.
|
// Any top bits are known to be zero.
|
||||||
@ -465,8 +445,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
// TODO: For now, not handling conversions like:
|
// TODO: For now, not handling conversions like:
|
||||||
// (bitcast i64 %x to <2 x i32>)
|
// (bitcast i64 %x to <2 x i32>)
|
||||||
!I->getType()->isVectorTy()) {
|
!I->getType()->isVectorTy()) {
|
||||||
ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, TD,
|
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
|
||||||
Depth+1);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -475,11 +454,9 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
// Compute the bits in the result that are not present in the input.
|
// Compute the bits in the result that are not present in the input.
|
||||||
unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
|
unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
|
||||||
|
|
||||||
APInt MaskIn = Mask.trunc(SrcBitWidth);
|
|
||||||
KnownZero = KnownZero.trunc(SrcBitWidth);
|
KnownZero = KnownZero.trunc(SrcBitWidth);
|
||||||
KnownOne = KnownOne.trunc(SrcBitWidth);
|
KnownOne = KnownOne.trunc(SrcBitWidth);
|
||||||
ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD,
|
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
|
||||||
Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
KnownZero = KnownZero.zext(BitWidth);
|
KnownZero = KnownZero.zext(BitWidth);
|
||||||
KnownOne = KnownOne.zext(BitWidth);
|
KnownOne = KnownOne.zext(BitWidth);
|
||||||
@ -496,9 +473,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
// (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
|
// (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
|
||||||
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
|
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
|
||||||
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
|
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
|
||||||
APInt Mask2(Mask.lshr(ShiftAmt));
|
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
|
||||||
ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
|
|
||||||
Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
KnownZero <<= ShiftAmt;
|
KnownZero <<= ShiftAmt;
|
||||||
KnownOne <<= ShiftAmt;
|
KnownOne <<= ShiftAmt;
|
||||||
@ -513,9 +488,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
|
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
|
||||||
|
|
||||||
// Unsigned shift right.
|
// Unsigned shift right.
|
||||||
APInt Mask2(Mask.shl(ShiftAmt));
|
ComputeMaskedBits(I->getOperand(0), KnownZero,KnownOne, TD, Depth+1);
|
||||||
ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero,KnownOne, TD,
|
|
||||||
Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
|
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
|
||||||
KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
|
KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
|
||||||
@ -531,9 +504,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
|
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
|
||||||
|
|
||||||
// Signed shift right.
|
// Signed shift right.
|
||||||
APInt Mask2(Mask.shl(ShiftAmt));
|
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
|
||||||
ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
|
|
||||||
Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
|
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
|
||||||
KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
|
KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
|
||||||
@ -549,15 +520,15 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
case Instruction::Sub: {
|
case Instruction::Sub: {
|
||||||
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
|
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
|
||||||
ComputeMaskedBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
|
ComputeMaskedBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
|
||||||
Mask, KnownZero, KnownOne, KnownZero2, KnownOne2,
|
KnownZero, KnownOne, KnownZero2, KnownOne2, TD,
|
||||||
TD, Depth);
|
Depth);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Instruction::Add: {
|
case Instruction::Add: {
|
||||||
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
|
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
|
||||||
ComputeMaskedBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
|
ComputeMaskedBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
|
||||||
Mask, KnownZero, KnownOne, KnownZero2, KnownOne2,
|
KnownZero, KnownOne, KnownZero2, KnownOne2, TD,
|
||||||
TD, Depth);
|
Depth);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Instruction::SRem:
|
case Instruction::SRem:
|
||||||
@ -565,9 +536,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
APInt RA = Rem->getValue().abs();
|
APInt RA = Rem->getValue().abs();
|
||||||
if (RA.isPowerOf2()) {
|
if (RA.isPowerOf2()) {
|
||||||
APInt LowBits = RA - 1;
|
APInt LowBits = RA - 1;
|
||||||
APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
|
ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
|
||||||
ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
|
|
||||||
Depth+1);
|
|
||||||
|
|
||||||
// The low bits of the first operand are unchanged by the srem.
|
// The low bits of the first operand are unchanged by the srem.
|
||||||
KnownZero = KnownZero2 & LowBits;
|
KnownZero = KnownZero2 & LowBits;
|
||||||
@ -583,19 +552,15 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
|
if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
|
||||||
KnownOne |= ~LowBits;
|
KnownOne |= ~LowBits;
|
||||||
|
|
||||||
KnownZero &= Mask;
|
|
||||||
KnownOne &= Mask;
|
|
||||||
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The sign bit is the LHS's sign bit, except when the result of the
|
// The sign bit is the LHS's sign bit, except when the result of the
|
||||||
// remainder is zero.
|
// remainder is zero.
|
||||||
if (Mask.isNegative() && KnownZero.isNonNegative()) {
|
if (KnownZero.isNonNegative()) {
|
||||||
APInt Mask2 = APInt::getSignBit(BitWidth);
|
|
||||||
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
|
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(I->getOperand(0), Mask2, LHSKnownZero, LHSKnownOne, TD,
|
ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, TD,
|
||||||
Depth+1);
|
Depth+1);
|
||||||
// If it's known zero, our sign bit is also zero.
|
// If it's known zero, our sign bit is also zero.
|
||||||
if (LHSKnownZero.isNegative())
|
if (LHSKnownZero.isNegative())
|
||||||
@ -608,27 +573,24 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
APInt RA = Rem->getValue();
|
APInt RA = Rem->getValue();
|
||||||
if (RA.isPowerOf2()) {
|
if (RA.isPowerOf2()) {
|
||||||
APInt LowBits = (RA - 1);
|
APInt LowBits = (RA - 1);
|
||||||
APInt Mask2 = LowBits & Mask;
|
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD,
|
||||||
KnownZero |= ~LowBits & Mask;
|
|
||||||
ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
|
|
||||||
Depth+1);
|
Depth+1);
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
|
KnownZero |= ~LowBits;
|
||||||
|
KnownOne &= LowBits;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since the result is less than or equal to either operand, any leading
|
// Since the result is less than or equal to either operand, any leading
|
||||||
// zero bits in either operand must also exist in the result.
|
// zero bits in either operand must also exist in the result.
|
||||||
APInt AllOnes = APInt::getAllOnesValue(BitWidth);
|
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
|
||||||
ComputeMaskedBits(I->getOperand(0), AllOnes, KnownZero, KnownOne,
|
ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1);
|
||||||
TD, Depth+1);
|
|
||||||
ComputeMaskedBits(I->getOperand(1), AllOnes, KnownZero2, KnownOne2,
|
|
||||||
TD, Depth+1);
|
|
||||||
|
|
||||||
unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
|
unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
|
||||||
KnownZero2.countLeadingOnes());
|
KnownZero2.countLeadingOnes());
|
||||||
KnownOne.clearAllBits();
|
KnownOne.clearAllBits();
|
||||||
KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask;
|
KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -639,17 +601,15 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
Align = TD->getABITypeAlignment(AI->getType()->getElementType());
|
Align = TD->getABITypeAlignment(AI->getType()->getElementType());
|
||||||
|
|
||||||
if (Align > 0)
|
if (Align > 0)
|
||||||
KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
|
KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align));
|
||||||
CountTrailingZeros_32(Align));
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Instruction::GetElementPtr: {
|
case Instruction::GetElementPtr: {
|
||||||
// Analyze all of the subscripts of this getelementptr instruction
|
// Analyze all of the subscripts of this getelementptr instruction
|
||||||
// to determine if we can prove known low zero bits.
|
// to determine if we can prove known low zero bits.
|
||||||
APInt LocalMask = APInt::getAllOnesValue(BitWidth);
|
|
||||||
APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
|
APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(I->getOperand(0), LocalMask,
|
ComputeMaskedBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, TD,
|
||||||
LocalKnownZero, LocalKnownOne, TD, Depth+1);
|
Depth+1);
|
||||||
unsigned TrailZ = LocalKnownZero.countTrailingOnes();
|
unsigned TrailZ = LocalKnownZero.countTrailingOnes();
|
||||||
|
|
||||||
gep_type_iterator GTI = gep_type_begin(I);
|
gep_type_iterator GTI = gep_type_begin(I);
|
||||||
@ -669,17 +629,15 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
if (!IndexedTy->isSized()) return;
|
if (!IndexedTy->isSized()) return;
|
||||||
unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
|
unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
|
||||||
uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1;
|
uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1;
|
||||||
LocalMask = APInt::getAllOnesValue(GEPOpiBits);
|
|
||||||
LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
|
LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
|
||||||
ComputeMaskedBits(Index, LocalMask,
|
ComputeMaskedBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1);
|
||||||
LocalKnownZero, LocalKnownOne, TD, Depth+1);
|
|
||||||
TrailZ = std::min(TrailZ,
|
TrailZ = std::min(TrailZ,
|
||||||
unsigned(CountTrailingZeros_64(TypeSize) +
|
unsigned(CountTrailingZeros_64(TypeSize) +
|
||||||
LocalKnownZero.countTrailingOnes()));
|
LocalKnownZero.countTrailingOnes()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) & Mask;
|
KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Instruction::PHI: {
|
case Instruction::PHI: {
|
||||||
@ -714,17 +672,13 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
break;
|
break;
|
||||||
// Ok, we have a PHI of the form L op= R. Check for low
|
// Ok, we have a PHI of the form L op= R. Check for low
|
||||||
// zero bits.
|
// zero bits.
|
||||||
APInt Mask2 = APInt::getAllOnesValue(BitWidth);
|
ComputeMaskedBits(R, KnownZero2, KnownOne2, TD, Depth+1);
|
||||||
ComputeMaskedBits(R, Mask2, KnownZero2, KnownOne2, TD, Depth+1);
|
|
||||||
Mask2 = APInt::getLowBitsSet(BitWidth,
|
|
||||||
KnownZero2.countTrailingOnes());
|
|
||||||
|
|
||||||
// We need to take the minimum number of known bits
|
// We need to take the minimum number of known bits
|
||||||
APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
|
APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
|
||||||
ComputeMaskedBits(L, Mask2, KnownZero3, KnownOne3, TD, Depth+1);
|
ComputeMaskedBits(L, KnownZero3, KnownOne3, TD, Depth+1);
|
||||||
|
|
||||||
KnownZero = Mask &
|
KnownZero = APInt::getLowBitsSet(BitWidth,
|
||||||
APInt::getLowBitsSet(BitWidth,
|
|
||||||
std::min(KnownZero2.countTrailingOnes(),
|
std::min(KnownZero2.countTrailingOnes(),
|
||||||
KnownZero3.countTrailingOnes()));
|
KnownZero3.countTrailingOnes()));
|
||||||
break;
|
break;
|
||||||
@ -743,8 +697,8 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
if (P->hasConstantValue() == P)
|
if (P->hasConstantValue() == P)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
KnownZero = Mask;
|
KnownZero = APInt::getAllOnesValue(BitWidth);
|
||||||
KnownOne = Mask;
|
KnownOne = APInt::getAllOnesValue(BitWidth);
|
||||||
for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) {
|
for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) {
|
||||||
// Skip direct self references.
|
// Skip direct self references.
|
||||||
if (P->getIncomingValue(i) == P) continue;
|
if (P->getIncomingValue(i) == P) continue;
|
||||||
@ -753,8 +707,8 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
KnownOne2 = APInt(BitWidth, 0);
|
KnownOne2 = APInt(BitWidth, 0);
|
||||||
// Recurse, but cap the recursion to one level, because we don't
|
// Recurse, but cap the recursion to one level, because we don't
|
||||||
// want to waste time spinning around in loops.
|
// want to waste time spinning around in loops.
|
||||||
ComputeMaskedBits(P->getIncomingValue(i), KnownZero | KnownOne,
|
ComputeMaskedBits(P->getIncomingValue(i), KnownZero2, KnownOne2, TD,
|
||||||
KnownZero2, KnownOne2, TD, MaxDepth-1);
|
MaxDepth-1);
|
||||||
KnownZero &= KnownZero2;
|
KnownZero &= KnownZero2;
|
||||||
KnownOne &= KnownOne2;
|
KnownOne &= KnownOne2;
|
||||||
// If all bits have been ruled out, there's no need to check
|
// If all bits have been ruled out, there's no need to check
|
||||||
@ -775,17 +729,17 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
// If this call is undefined for 0, the result will be less than 2^n.
|
// If this call is undefined for 0, the result will be less than 2^n.
|
||||||
if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
|
if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
|
||||||
LowBits -= 1;
|
LowBits -= 1;
|
||||||
KnownZero = Mask & APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
|
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Intrinsic::ctpop: {
|
case Intrinsic::ctpop: {
|
||||||
unsigned LowBits = Log2_32(BitWidth)+1;
|
unsigned LowBits = Log2_32(BitWidth)+1;
|
||||||
KnownZero = Mask & APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
|
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Intrinsic::x86_sse42_crc32_64_8:
|
case Intrinsic::x86_sse42_crc32_64_8:
|
||||||
case Intrinsic::x86_sse42_crc32_64_64:
|
case Intrinsic::x86_sse42_crc32_64_64:
|
||||||
KnownZero = Mask & APInt::getHighBitsSet(64, 32);
|
KnownZero = APInt::getHighBitsSet(64, 32);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -800,21 +754,19 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
|||||||
case Intrinsic::uadd_with_overflow:
|
case Intrinsic::uadd_with_overflow:
|
||||||
case Intrinsic::sadd_with_overflow:
|
case Intrinsic::sadd_with_overflow:
|
||||||
ComputeMaskedBitsAddSub(true, II->getArgOperand(0),
|
ComputeMaskedBitsAddSub(true, II->getArgOperand(0),
|
||||||
II->getArgOperand(1), false, Mask,
|
II->getArgOperand(1), false, KnownZero,
|
||||||
KnownZero, KnownOne, KnownZero2, KnownOne2,
|
KnownOne, KnownZero2, KnownOne2, TD, Depth);
|
||||||
TD, Depth);
|
|
||||||
break;
|
break;
|
||||||
case Intrinsic::usub_with_overflow:
|
case Intrinsic::usub_with_overflow:
|
||||||
case Intrinsic::ssub_with_overflow:
|
case Intrinsic::ssub_with_overflow:
|
||||||
ComputeMaskedBitsAddSub(false, II->getArgOperand(0),
|
ComputeMaskedBitsAddSub(false, II->getArgOperand(0),
|
||||||
II->getArgOperand(1), false, Mask,
|
II->getArgOperand(1), false, KnownZero,
|
||||||
KnownZero, KnownOne, KnownZero2, KnownOne2,
|
KnownOne, KnownZero2, KnownOne2, TD, Depth);
|
||||||
TD, Depth);
|
|
||||||
break;
|
break;
|
||||||
case Intrinsic::umul_with_overflow:
|
case Intrinsic::umul_with_overflow:
|
||||||
case Intrinsic::smul_with_overflow:
|
case Intrinsic::smul_with_overflow:
|
||||||
ComputeMaskedBitsMul(II->getArgOperand(0), II->getArgOperand(1),
|
ComputeMaskedBitsMul(II->getArgOperand(0), II->getArgOperand(1),
|
||||||
false, Mask, KnownZero, KnownOne,
|
false, KnownZero, KnownOne,
|
||||||
KnownZero2, KnownOne2, TD, Depth);
|
KnownZero2, KnownOne2, TD, Depth);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -835,8 +787,7 @@ void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
|
|||||||
}
|
}
|
||||||
APInt ZeroBits(BitWidth, 0);
|
APInt ZeroBits(BitWidth, 0);
|
||||||
APInt OneBits(BitWidth, 0);
|
APInt OneBits(BitWidth, 0);
|
||||||
ComputeMaskedBits(V, APInt::getSignBit(BitWidth), ZeroBits, OneBits, TD,
|
ComputeMaskedBits(V, ZeroBits, OneBits, TD, Depth);
|
||||||
Depth);
|
|
||||||
KnownOne = OneBits[BitWidth - 1];
|
KnownOne = OneBits[BitWidth - 1];
|
||||||
KnownZero = ZeroBits[BitWidth - 1];
|
KnownZero = ZeroBits[BitWidth - 1];
|
||||||
}
|
}
|
||||||
@ -944,7 +895,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
|
|||||||
|
|
||||||
APInt KnownZero(BitWidth, 0);
|
APInt KnownZero(BitWidth, 0);
|
||||||
APInt KnownOne(BitWidth, 0);
|
APInt KnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(X, APInt(BitWidth, 1), KnownZero, KnownOne, TD, Depth);
|
ComputeMaskedBits(X, KnownZero, KnownOne, TD, Depth);
|
||||||
if (KnownOne[0])
|
if (KnownOne[0])
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -986,12 +937,12 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
|
|||||||
APInt Mask = APInt::getSignedMaxValue(BitWidth);
|
APInt Mask = APInt::getSignedMaxValue(BitWidth);
|
||||||
// The sign bit of X is set. If some other bit is set then X is not equal
|
// The sign bit of X is set. If some other bit is set then X is not equal
|
||||||
// to INT_MIN.
|
// to INT_MIN.
|
||||||
ComputeMaskedBits(X, Mask, KnownZero, KnownOne, TD, Depth);
|
ComputeMaskedBits(X, KnownZero, KnownOne, TD, Depth);
|
||||||
if ((KnownOne & Mask) != 0)
|
if ((KnownOne & Mask) != 0)
|
||||||
return true;
|
return true;
|
||||||
// The sign bit of Y is set. If some other bit is set then Y is not equal
|
// The sign bit of Y is set. If some other bit is set then Y is not equal
|
||||||
// to INT_MIN.
|
// to INT_MIN.
|
||||||
ComputeMaskedBits(Y, Mask, KnownZero, KnownOne, TD, Depth);
|
ComputeMaskedBits(Y, KnownZero, KnownOne, TD, Depth);
|
||||||
if ((KnownOne & Mask) != 0)
|
if ((KnownOne & Mask) != 0)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -1021,8 +972,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
|
|||||||
if (!BitWidth) return false;
|
if (!BitWidth) return false;
|
||||||
APInt KnownZero(BitWidth, 0);
|
APInt KnownZero(BitWidth, 0);
|
||||||
APInt KnownOne(BitWidth, 0);
|
APInt KnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(V, APInt::getAllOnesValue(BitWidth), KnownZero, KnownOne,
|
ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
|
||||||
TD, Depth);
|
|
||||||
return KnownOne != 0;
|
return KnownOne != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1038,7 +988,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
|
|||||||
bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
|
bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
|
||||||
const TargetData *TD, unsigned Depth) {
|
const TargetData *TD, unsigned Depth) {
|
||||||
APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
|
APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
|
||||||
ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
|
ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
return (KnownZero & Mask) == Mask;
|
return (KnownZero & Mask) == Mask;
|
||||||
}
|
}
|
||||||
@ -1129,13 +1079,11 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
|
|||||||
if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1)))
|
if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1)))
|
||||||
if (CRHS->isAllOnesValue()) {
|
if (CRHS->isAllOnesValue()) {
|
||||||
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
|
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
|
||||||
APInt Mask = APInt::getAllOnesValue(TyBits);
|
ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
|
||||||
ComputeMaskedBits(U->getOperand(0), Mask, KnownZero, KnownOne, TD,
|
|
||||||
Depth+1);
|
|
||||||
|
|
||||||
// If the input is known to be 0 or 1, the output is 0/-1, which is all
|
// If the input is known to be 0 or 1, the output is 0/-1, which is all
|
||||||
// sign bits set.
|
// sign bits set.
|
||||||
if ((KnownZero | APInt(TyBits, 1)) == Mask)
|
if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
|
||||||
return TyBits;
|
return TyBits;
|
||||||
|
|
||||||
// If we are subtracting one from a positive number, there is no carry
|
// If we are subtracting one from a positive number, there is no carry
|
||||||
@ -1156,12 +1104,10 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
|
|||||||
if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0)))
|
if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0)))
|
||||||
if (CLHS->isNullValue()) {
|
if (CLHS->isNullValue()) {
|
||||||
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
|
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
|
||||||
APInt Mask = APInt::getAllOnesValue(TyBits);
|
ComputeMaskedBits(U->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
|
||||||
ComputeMaskedBits(U->getOperand(1), Mask, KnownZero, KnownOne,
|
|
||||||
TD, Depth+1);
|
|
||||||
// If the input is known to be 0 or 1, the output is 0/-1, which is all
|
// If the input is known to be 0 or 1, the output is 0/-1, which is all
|
||||||
// sign bits set.
|
// sign bits set.
|
||||||
if ((KnownZero | APInt(TyBits, 1)) == Mask)
|
if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
|
||||||
return TyBits;
|
return TyBits;
|
||||||
|
|
||||||
// If the input is known to be positive (the sign bit is known clear),
|
// If the input is known to be positive (the sign bit is known clear),
|
||||||
@ -1203,8 +1149,8 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
|
|||||||
// Finally, if we can prove that the top bits of the result are 0's or 1's,
|
// Finally, if we can prove that the top bits of the result are 0's or 1's,
|
||||||
// use this information.
|
// use this information.
|
||||||
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
|
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
|
||||||
APInt Mask = APInt::getAllOnesValue(TyBits);
|
APInt Mask;
|
||||||
ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
|
ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
|
||||||
|
|
||||||
if (KnownZero.isNegative()) { // sign bit is 0
|
if (KnownZero.isNegative()) { // sign bit is 0
|
||||||
Mask = KnownZero;
|
Mask = KnownZero;
|
||||||
@ -1896,8 +1842,7 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
|
|||||||
return false;
|
return false;
|
||||||
APInt KnownZero(BitWidth, 0);
|
APInt KnownZero(BitWidth, 0);
|
||||||
APInt KnownOne(BitWidth, 0);
|
APInt KnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(Op, APInt::getAllOnesValue(BitWidth),
|
ComputeMaskedBits(Op, KnownZero, KnownOne, TD);
|
||||||
KnownZero, KnownOne, TD);
|
|
||||||
return !!KnownZero;
|
return !!KnownZero;
|
||||||
}
|
}
|
||||||
case Instruction::Load: {
|
case Instruction::Load: {
|
||||||
|
@ -1452,16 +1452,14 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
|
|||||||
if (VT.isInteger() && !VT.isVector()) {
|
if (VT.isInteger() && !VT.isVector()) {
|
||||||
APInt LHSZero, LHSOne;
|
APInt LHSZero, LHSOne;
|
||||||
APInt RHSZero, RHSOne;
|
APInt RHSZero, RHSOne;
|
||||||
APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
|
DAG.ComputeMaskedBits(N0, LHSZero, LHSOne);
|
||||||
DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
|
|
||||||
|
|
||||||
if (LHSZero.getBoolValue()) {
|
if (LHSZero.getBoolValue()) {
|
||||||
DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
|
DAG.ComputeMaskedBits(N1, RHSZero, RHSOne);
|
||||||
|
|
||||||
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
|
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
|
||||||
// If all possibly-set bits on the RHS are clear on the LHS, return an OR.
|
// If all possibly-set bits on the RHS are clear on the LHS, return an OR.
|
||||||
if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) ||
|
if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero)
|
||||||
(LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
|
|
||||||
return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1);
|
return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1547,16 +1545,14 @@ SDValue DAGCombiner::visitADDC(SDNode *N) {
|
|||||||
// fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
|
// fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
|
||||||
APInt LHSZero, LHSOne;
|
APInt LHSZero, LHSOne;
|
||||||
APInt RHSZero, RHSOne;
|
APInt RHSZero, RHSOne;
|
||||||
APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
|
DAG.ComputeMaskedBits(N0, LHSZero, LHSOne);
|
||||||
DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
|
|
||||||
|
|
||||||
if (LHSZero.getBoolValue()) {
|
if (LHSZero.getBoolValue()) {
|
||||||
DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
|
DAG.ComputeMaskedBits(N1, RHSZero, RHSOne);
|
||||||
|
|
||||||
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
|
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
|
||||||
// If all possibly-set bits on the RHS are clear on the LHS, return an OR.
|
// If all possibly-set bits on the RHS are clear on the LHS, return an OR.
|
||||||
if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) ||
|
if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero)
|
||||||
(LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
|
|
||||||
return CombineTo(N, DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1),
|
return CombineTo(N, DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1),
|
||||||
DAG.getNode(ISD::CARRY_FALSE,
|
DAG.getNode(ISD::CARRY_FALSE,
|
||||||
N->getDebugLoc(), MVT::Glue));
|
N->getDebugLoc(), MVT::Glue));
|
||||||
@ -3835,8 +3831,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
|
|||||||
if (N1C && N0.getOpcode() == ISD::CTLZ &&
|
if (N1C && N0.getOpcode() == ISD::CTLZ &&
|
||||||
N1C->getAPIntValue() == Log2_32(VT.getSizeInBits())) {
|
N1C->getAPIntValue() == Log2_32(VT.getSizeInBits())) {
|
||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
|
DAG.ComputeMaskedBits(N0.getOperand(0), KnownZero, KnownOne);
|
||||||
DAG.ComputeMaskedBits(N0.getOperand(0), Mask, KnownZero, KnownOne);
|
|
||||||
|
|
||||||
// If any of the input bits are KnownOne, then the input couldn't be all
|
// If any of the input bits are KnownOne, then the input couldn't be all
|
||||||
// zeros, thus the result of the srl will always be zero.
|
// zeros, thus the result of the srl will always be zero.
|
||||||
@ -3844,7 +3839,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
|
|||||||
|
|
||||||
// If all of the bits input the to ctlz node are known to be zero, then
|
// If all of the bits input the to ctlz node are known to be zero, then
|
||||||
// the result of the ctlz is "32" and the result of the shift is one.
|
// the result of the ctlz is "32" and the result of the shift is one.
|
||||||
APInt UnknownBits = ~KnownZero & Mask;
|
APInt UnknownBits = ~KnownZero;
|
||||||
if (UnknownBits == 0) return DAG.getConstant(1, VT);
|
if (UnknownBits == 0) return DAG.getConstant(1, VT);
|
||||||
|
|
||||||
// Otherwise, check to see if there is exactly one bit input to the ctlz.
|
// Otherwise, check to see if there is exactly one bit input to the ctlz.
|
||||||
@ -4439,8 +4434,8 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
|
|||||||
std::min(Op.getValueSizeInBits(),
|
std::min(Op.getValueSizeInBits(),
|
||||||
VT.getSizeInBits()));
|
VT.getSizeInBits()));
|
||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
DAG.ComputeMaskedBits(Op, TruncatedBits, KnownZero, KnownOne);
|
DAG.ComputeMaskedBits(Op, KnownZero, KnownOne);
|
||||||
if (TruncatedBits == KnownZero) {
|
if (TruncatedBits == (KnownZero & TruncatedBits)) {
|
||||||
if (VT.bitsGT(Op.getValueType()))
|
if (VT.bitsGT(Op.getValueType()))
|
||||||
return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, Op);
|
return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, Op);
|
||||||
if (VT.bitsLT(Op.getValueType()))
|
if (VT.bitsLT(Op.getValueType()))
|
||||||
|
@ -1362,7 +1362,7 @@ ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
|
|||||||
|
|
||||||
APInt HighBitMask = APInt::getHighBitsSet(ShBits, ShBits - Log2_32(NVTBits));
|
APInt HighBitMask = APInt::getHighBitsSet(ShBits, ShBits - Log2_32(NVTBits));
|
||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
DAG.ComputeMaskedBits(N->getOperand(1), HighBitMask, KnownZero, KnownOne);
|
DAG.ComputeMaskedBits(N->getOperand(1), KnownZero, KnownOne);
|
||||||
|
|
||||||
// If we don't know anything about the high bits, exit.
|
// If we don't know anything about the high bits, exit.
|
||||||
if (((KnownZero|KnownOne) & HighBitMask) == 0)
|
if (((KnownZero|KnownOne) & HighBitMask) == 0)
|
||||||
|
@ -1627,7 +1627,7 @@ bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
|
|||||||
bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
|
bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
|
||||||
unsigned Depth) const {
|
unsigned Depth) const {
|
||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
|
ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
return (KnownZero & Mask) == Mask;
|
return (KnownZero & Mask) == Mask;
|
||||||
}
|
}
|
||||||
@ -1636,15 +1636,12 @@ bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
|
|||||||
/// known to be either zero or one and return them in the KnownZero/KnownOne
|
/// known to be either zero or one and return them in the KnownZero/KnownOne
|
||||||
/// bitsets. This code only analyzes bits in Mask, in order to short-circuit
|
/// bitsets. This code only analyzes bits in Mask, in order to short-circuit
|
||||||
/// processing.
|
/// processing.
|
||||||
void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
|
||||||
APInt &KnownZero, APInt &KnownOne,
|
APInt &KnownOne, unsigned Depth) const {
|
||||||
unsigned Depth) const {
|
unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
|
||||||
unsigned BitWidth = Mask.getBitWidth();
|
|
||||||
assert(BitWidth == Op.getValueType().getScalarType().getSizeInBits() &&
|
|
||||||
"Mask size mismatches value type size!");
|
|
||||||
|
|
||||||
KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
|
KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
|
||||||
if (Depth == 6 || Mask == 0)
|
if (Depth == 6)
|
||||||
return; // Limit search depth.
|
return; // Limit search depth.
|
||||||
|
|
||||||
APInt KnownZero2, KnownOne2;
|
APInt KnownZero2, KnownOne2;
|
||||||
@ -1652,14 +1649,13 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
switch (Op.getOpcode()) {
|
switch (Op.getOpcode()) {
|
||||||
case ISD::Constant:
|
case ISD::Constant:
|
||||||
// We know all of the bits for a constant!
|
// We know all of the bits for a constant!
|
||||||
KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & Mask;
|
KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
|
||||||
KnownZero = ~KnownOne & Mask;
|
KnownZero = ~KnownOne;
|
||||||
return;
|
return;
|
||||||
case ISD::AND:
|
case ISD::AND:
|
||||||
// If either the LHS or the RHS are Zero, the result is zero.
|
// If either the LHS or the RHS are Zero, the result is zero.
|
||||||
ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
|
ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
|
||||||
ComputeMaskedBits(Op.getOperand(0), Mask & ~KnownZero,
|
ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
|
||||||
KnownZero2, KnownOne2, Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
|
|
||||||
@ -1669,9 +1665,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
KnownZero |= KnownZero2;
|
KnownZero |= KnownZero2;
|
||||||
return;
|
return;
|
||||||
case ISD::OR:
|
case ISD::OR:
|
||||||
ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
|
ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
|
||||||
ComputeMaskedBits(Op.getOperand(0), Mask & ~KnownOne,
|
ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
|
||||||
KnownZero2, KnownOne2, Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
|
|
||||||
@ -1681,8 +1676,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
KnownOne |= KnownOne2;
|
KnownOne |= KnownOne2;
|
||||||
return;
|
return;
|
||||||
case ISD::XOR: {
|
case ISD::XOR: {
|
||||||
ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
|
ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
|
||||||
ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
|
ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
|
|
||||||
@ -1694,9 +1689,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case ISD::MUL: {
|
case ISD::MUL: {
|
||||||
APInt Mask2 = APInt::getAllOnesValue(BitWidth);
|
ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
|
||||||
ComputeMaskedBits(Op.getOperand(1), Mask2, KnownZero, KnownOne, Depth+1);
|
ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
|
||||||
ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
|
|
||||||
@ -1715,33 +1709,29 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
LeadZ = std::min(LeadZ, BitWidth);
|
LeadZ = std::min(LeadZ, BitWidth);
|
||||||
KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
|
KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
|
||||||
APInt::getHighBitsSet(BitWidth, LeadZ);
|
APInt::getHighBitsSet(BitWidth, LeadZ);
|
||||||
KnownZero &= Mask;
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case ISD::UDIV: {
|
case ISD::UDIV: {
|
||||||
// For the purposes of computing leading zeros we can conservatively
|
// For the purposes of computing leading zeros we can conservatively
|
||||||
// treat a udiv as a logical right shift by the power of 2 known to
|
// treat a udiv as a logical right shift by the power of 2 known to
|
||||||
// be less than the denominator.
|
// be less than the denominator.
|
||||||
APInt AllOnes = APInt::getAllOnesValue(BitWidth);
|
ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
|
||||||
ComputeMaskedBits(Op.getOperand(0),
|
|
||||||
AllOnes, KnownZero2, KnownOne2, Depth+1);
|
|
||||||
unsigned LeadZ = KnownZero2.countLeadingOnes();
|
unsigned LeadZ = KnownZero2.countLeadingOnes();
|
||||||
|
|
||||||
KnownOne2.clearAllBits();
|
KnownOne2.clearAllBits();
|
||||||
KnownZero2.clearAllBits();
|
KnownZero2.clearAllBits();
|
||||||
ComputeMaskedBits(Op.getOperand(1),
|
ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
|
||||||
AllOnes, KnownZero2, KnownOne2, Depth+1);
|
|
||||||
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
|
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
|
||||||
if (RHSUnknownLeadingOnes != BitWidth)
|
if (RHSUnknownLeadingOnes != BitWidth)
|
||||||
LeadZ = std::min(BitWidth,
|
LeadZ = std::min(BitWidth,
|
||||||
LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
|
LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
|
||||||
|
|
||||||
KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask;
|
KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case ISD::SELECT:
|
case ISD::SELECT:
|
||||||
ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero, KnownOne, Depth+1);
|
ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
|
||||||
ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero2, KnownOne2, Depth+1);
|
ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
|
|
||||||
@ -1750,8 +1740,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
KnownZero &= KnownZero2;
|
KnownZero &= KnownZero2;
|
||||||
return;
|
return;
|
||||||
case ISD::SELECT_CC:
|
case ISD::SELECT_CC:
|
||||||
ComputeMaskedBits(Op.getOperand(3), Mask, KnownZero, KnownOne, Depth+1);
|
ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
|
||||||
ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero2, KnownOne2, Depth+1);
|
ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
|
|
||||||
@ -1783,8 +1773,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
if (ShAmt >= BitWidth)
|
if (ShAmt >= BitWidth)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ComputeMaskedBits(Op.getOperand(0), Mask.lshr(ShAmt),
|
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
KnownZero, KnownOne, Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
KnownZero <<= ShAmt;
|
KnownZero <<= ShAmt;
|
||||||
KnownOne <<= ShAmt;
|
KnownOne <<= ShAmt;
|
||||||
@ -1801,13 +1790,12 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
if (ShAmt >= BitWidth)
|
if (ShAmt >= BitWidth)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ComputeMaskedBits(Op.getOperand(0), (Mask << ShAmt),
|
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
KnownZero, KnownOne, Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
KnownZero = KnownZero.lshr(ShAmt);
|
KnownZero = KnownZero.lshr(ShAmt);
|
||||||
KnownOne = KnownOne.lshr(ShAmt);
|
KnownOne = KnownOne.lshr(ShAmt);
|
||||||
|
|
||||||
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt) & Mask;
|
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
|
||||||
KnownZero |= HighBits; // High bits known zero.
|
KnownZero |= HighBits; // High bits known zero.
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
@ -1819,15 +1807,11 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
if (ShAmt >= BitWidth)
|
if (ShAmt >= BitWidth)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
APInt InDemandedMask = (Mask << ShAmt);
|
|
||||||
// If any of the demanded bits are produced by the sign extension, we also
|
// If any of the demanded bits are produced by the sign extension, we also
|
||||||
// demand the input sign bit.
|
// demand the input sign bit.
|
||||||
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt) & Mask;
|
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
|
||||||
if (HighBits.getBoolValue())
|
|
||||||
InDemandedMask |= APInt::getSignBit(BitWidth);
|
|
||||||
|
|
||||||
ComputeMaskedBits(Op.getOperand(0), InDemandedMask, KnownZero, KnownOne,
|
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
KnownZero = KnownZero.lshr(ShAmt);
|
KnownZero = KnownZero.lshr(ShAmt);
|
||||||
KnownOne = KnownOne.lshr(ShAmt);
|
KnownOne = KnownOne.lshr(ShAmt);
|
||||||
@ -1849,10 +1833,10 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
|
|
||||||
// Sign extension. Compute the demanded bits in the result that are not
|
// Sign extension. Compute the demanded bits in the result that are not
|
||||||
// present in the input.
|
// present in the input.
|
||||||
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits) & Mask;
|
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
|
||||||
|
|
||||||
APInt InSignBit = APInt::getSignBit(EBits);
|
APInt InSignBit = APInt::getSignBit(EBits);
|
||||||
APInt InputDemandedBits = Mask & APInt::getLowBitsSet(BitWidth, EBits);
|
APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
|
||||||
|
|
||||||
// If the sign extended bits are demanded, we know that the sign
|
// If the sign extended bits are demanded, we know that the sign
|
||||||
// bit is demanded.
|
// bit is demanded.
|
||||||
@ -1860,8 +1844,9 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
if (NewBits.getBoolValue())
|
if (NewBits.getBoolValue())
|
||||||
InputDemandedBits |= InSignBit;
|
InputDemandedBits |= InSignBit;
|
||||||
|
|
||||||
ComputeMaskedBits(Op.getOperand(0), InputDemandedBits,
|
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
KnownZero, KnownOne, Depth+1);
|
KnownOne &= InputDemandedBits;
|
||||||
|
KnownZero &= InputDemandedBits;
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
|
|
||||||
// If the sign bit of the input is known set or clear, then we know the
|
// If the sign bit of the input is known set or clear, then we know the
|
||||||
@ -1893,20 +1878,19 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
if (ISD::isZEXTLoad(Op.getNode())) {
|
if (ISD::isZEXTLoad(Op.getNode())) {
|
||||||
EVT VT = LD->getMemoryVT();
|
EVT VT = LD->getMemoryVT();
|
||||||
unsigned MemBits = VT.getScalarType().getSizeInBits();
|
unsigned MemBits = VT.getScalarType().getSizeInBits();
|
||||||
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits) & Mask;
|
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
|
||||||
} else if (const MDNode *Ranges = LD->getRanges()) {
|
} else if (const MDNode *Ranges = LD->getRanges()) {
|
||||||
computeMaskedBitsLoad(*Ranges, Mask, KnownZero);
|
computeMaskedBitsLoad(*Ranges, KnownZero);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case ISD::ZERO_EXTEND: {
|
case ISD::ZERO_EXTEND: {
|
||||||
EVT InVT = Op.getOperand(0).getValueType();
|
EVT InVT = Op.getOperand(0).getValueType();
|
||||||
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
||||||
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
|
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
|
||||||
APInt InMask = Mask.trunc(InBits);
|
|
||||||
KnownZero = KnownZero.trunc(InBits);
|
KnownZero = KnownZero.trunc(InBits);
|
||||||
KnownOne = KnownOne.trunc(InBits);
|
KnownOne = KnownOne.trunc(InBits);
|
||||||
ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
|
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
KnownZero = KnownZero.zext(BitWidth);
|
KnownZero = KnownZero.zext(BitWidth);
|
||||||
KnownOne = KnownOne.zext(BitWidth);
|
KnownOne = KnownOne.zext(BitWidth);
|
||||||
KnownZero |= NewBits;
|
KnownZero |= NewBits;
|
||||||
@ -1916,17 +1900,11 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
EVT InVT = Op.getOperand(0).getValueType();
|
EVT InVT = Op.getOperand(0).getValueType();
|
||||||
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
||||||
APInt InSignBit = APInt::getSignBit(InBits);
|
APInt InSignBit = APInt::getSignBit(InBits);
|
||||||
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
|
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
|
||||||
APInt InMask = Mask.trunc(InBits);
|
|
||||||
|
|
||||||
// If any of the sign extended bits are demanded, we know that the sign
|
|
||||||
// bit is demanded. Temporarily set this bit in the mask for our callee.
|
|
||||||
if (NewBits.getBoolValue())
|
|
||||||
InMask |= InSignBit;
|
|
||||||
|
|
||||||
KnownZero = KnownZero.trunc(InBits);
|
KnownZero = KnownZero.trunc(InBits);
|
||||||
KnownOne = KnownOne.trunc(InBits);
|
KnownOne = KnownOne.trunc(InBits);
|
||||||
ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
|
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
|
|
||||||
// Note if the sign bit is known to be zero or one.
|
// Note if the sign bit is known to be zero or one.
|
||||||
bool SignBitKnownZero = KnownZero.isNegative();
|
bool SignBitKnownZero = KnownZero.isNegative();
|
||||||
@ -1934,13 +1912,6 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
assert(!(SignBitKnownZero && SignBitKnownOne) &&
|
assert(!(SignBitKnownZero && SignBitKnownOne) &&
|
||||||
"Sign bit can't be known to be both zero and one!");
|
"Sign bit can't be known to be both zero and one!");
|
||||||
|
|
||||||
// If the sign bit wasn't actually demanded by our caller, we don't
|
|
||||||
// want it set in the KnownZero and KnownOne result values. Reset the
|
|
||||||
// mask and reapply it to the result values.
|
|
||||||
InMask = Mask.trunc(InBits);
|
|
||||||
KnownZero &= InMask;
|
|
||||||
KnownOne &= InMask;
|
|
||||||
|
|
||||||
KnownZero = KnownZero.zext(BitWidth);
|
KnownZero = KnownZero.zext(BitWidth);
|
||||||
KnownOne = KnownOne.zext(BitWidth);
|
KnownOne = KnownOne.zext(BitWidth);
|
||||||
|
|
||||||
@ -1954,10 +1925,9 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
case ISD::ANY_EXTEND: {
|
case ISD::ANY_EXTEND: {
|
||||||
EVT InVT = Op.getOperand(0).getValueType();
|
EVT InVT = Op.getOperand(0).getValueType();
|
||||||
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
||||||
APInt InMask = Mask.trunc(InBits);
|
|
||||||
KnownZero = KnownZero.trunc(InBits);
|
KnownZero = KnownZero.trunc(InBits);
|
||||||
KnownOne = KnownOne.trunc(InBits);
|
KnownOne = KnownOne.trunc(InBits);
|
||||||
ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
|
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
KnownZero = KnownZero.zext(BitWidth);
|
KnownZero = KnownZero.zext(BitWidth);
|
||||||
KnownOne = KnownOne.zext(BitWidth);
|
KnownOne = KnownOne.zext(BitWidth);
|
||||||
return;
|
return;
|
||||||
@ -1965,10 +1935,9 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
case ISD::TRUNCATE: {
|
case ISD::TRUNCATE: {
|
||||||
EVT InVT = Op.getOperand(0).getValueType();
|
EVT InVT = Op.getOperand(0).getValueType();
|
||||||
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
||||||
APInt InMask = Mask.zext(InBits);
|
|
||||||
KnownZero = KnownZero.zext(InBits);
|
KnownZero = KnownZero.zext(InBits);
|
||||||
KnownOne = KnownOne.zext(InBits);
|
KnownOne = KnownOne.zext(InBits);
|
||||||
ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
|
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
KnownZero = KnownZero.trunc(BitWidth);
|
KnownZero = KnownZero.trunc(BitWidth);
|
||||||
KnownOne = KnownOne.trunc(BitWidth);
|
KnownOne = KnownOne.trunc(BitWidth);
|
||||||
@ -1977,9 +1946,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
case ISD::AssertZext: {
|
case ISD::AssertZext: {
|
||||||
EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
|
EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
|
||||||
APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
|
APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
|
||||||
ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
|
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
KnownOne, Depth+1);
|
KnownZero |= (~InMask);
|
||||||
KnownZero |= (~InMask) & Mask;
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case ISD::FGETSIGN:
|
case ISD::FGETSIGN:
|
||||||
@ -1996,8 +1964,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
|
unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
|
||||||
// NLZ can't be BitWidth with no sign bit
|
// NLZ can't be BitWidth with no sign bit
|
||||||
APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
|
APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
|
||||||
ComputeMaskedBits(Op.getOperand(1), MaskV, KnownZero2, KnownOne2,
|
ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
|
||||||
Depth+1);
|
|
||||||
|
|
||||||
// If all of the MaskV bits are known to be zero, then we know the
|
// If all of the MaskV bits are known to be zero, then we know the
|
||||||
// output top bits are zero, because we now know that the output is
|
// output top bits are zero, because we now know that the output is
|
||||||
@ -2005,7 +1972,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
if ((KnownZero2 & MaskV) == MaskV) {
|
if ((KnownZero2 & MaskV) == MaskV) {
|
||||||
unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
|
unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
|
||||||
// Top bits known zero.
|
// Top bits known zero.
|
||||||
KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask;
|
KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2016,13 +1983,11 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
// Output known-0 bits are known if clear or set in both the low clear bits
|
// Output known-0 bits are known if clear or set in both the low clear bits
|
||||||
// common to both LHS & RHS. For example, 8+(X<<3) is known to have the
|
// common to both LHS & RHS. For example, 8+(X<<3) is known to have the
|
||||||
// low 3 bits clear.
|
// low 3 bits clear.
|
||||||
APInt Mask2 = APInt::getLowBitsSet(BitWidth,
|
ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
|
||||||
BitWidth - Mask.countLeadingZeros());
|
|
||||||
ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
|
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
|
unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
|
||||||
|
|
||||||
ComputeMaskedBits(Op.getOperand(1), Mask2, KnownZero2, KnownOne2, Depth+1);
|
ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
KnownZeroOut = std::min(KnownZeroOut,
|
KnownZeroOut = std::min(KnownZeroOut,
|
||||||
KnownZero2.countTrailingOnes());
|
KnownZero2.countTrailingOnes());
|
||||||
@ -2046,7 +2011,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
if (RA.isPowerOf2()) {
|
if (RA.isPowerOf2()) {
|
||||||
APInt LowBits = RA - 1;
|
APInt LowBits = RA - 1;
|
||||||
APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
|
APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
|
||||||
ComputeMaskedBits(Op.getOperand(0), Mask2,KnownZero2,KnownOne2,Depth+1);
|
ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
|
||||||
|
|
||||||
// The low bits of the first operand are unchanged by the srem.
|
// The low bits of the first operand are unchanged by the srem.
|
||||||
KnownZero = KnownZero2 & LowBits;
|
KnownZero = KnownZero2 & LowBits;
|
||||||
@ -2061,10 +2026,6 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
// the upper bits are all one.
|
// the upper bits are all one.
|
||||||
if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
|
if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
|
||||||
KnownOne |= ~LowBits;
|
KnownOne |= ~LowBits;
|
||||||
|
|
||||||
KnownZero &= Mask;
|
|
||||||
KnownOne &= Mask;
|
|
||||||
|
|
||||||
assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2074,9 +2035,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
const APInt &RA = Rem->getAPIntValue();
|
const APInt &RA = Rem->getAPIntValue();
|
||||||
if (RA.isPowerOf2()) {
|
if (RA.isPowerOf2()) {
|
||||||
APInt LowBits = (RA - 1);
|
APInt LowBits = (RA - 1);
|
||||||
APInt Mask2 = LowBits & Mask;
|
KnownZero |= ~LowBits;
|
||||||
KnownZero |= ~LowBits & Mask;
|
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1);
|
||||||
ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero, KnownOne,Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -2084,16 +2044,13 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
|
|
||||||
// Since the result is less than or equal to either operand, any leading
|
// Since the result is less than or equal to either operand, any leading
|
||||||
// zero bits in either operand must also exist in the result.
|
// zero bits in either operand must also exist in the result.
|
||||||
APInt AllOnes = APInt::getAllOnesValue(BitWidth);
|
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
ComputeMaskedBits(Op.getOperand(0), AllOnes, KnownZero, KnownOne,
|
ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
|
||||||
Depth+1);
|
|
||||||
ComputeMaskedBits(Op.getOperand(1), AllOnes, KnownZero2, KnownOne2,
|
|
||||||
Depth+1);
|
|
||||||
|
|
||||||
uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
|
uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
|
||||||
KnownZero2.countLeadingOnes());
|
KnownZero2.countLeadingOnes());
|
||||||
KnownOne.clearAllBits();
|
KnownOne.clearAllBits();
|
||||||
KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask;
|
KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case ISD::FrameIndex:
|
case ISD::FrameIndex:
|
||||||
@ -2113,8 +2070,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
|
|||||||
case ISD::INTRINSIC_W_CHAIN:
|
case ISD::INTRINSIC_W_CHAIN:
|
||||||
case ISD::INTRINSIC_VOID:
|
case ISD::INTRINSIC_VOID:
|
||||||
// Allow the target to implement this method for its nodes.
|
// Allow the target to implement this method for its nodes.
|
||||||
TLI.computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne, *this,
|
TLI.computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
|
||||||
Depth);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2238,12 +2194,11 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
|
|||||||
if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
|
if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
|
||||||
if (CRHS->isAllOnesValue()) {
|
if (CRHS->isAllOnesValue()) {
|
||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
APInt Mask = APInt::getAllOnesValue(VTBits);
|
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
|
|
||||||
|
|
||||||
// If the input is known to be 0 or 1, the output is 0/-1, which is all
|
// If the input is known to be 0 or 1, the output is 0/-1, which is all
|
||||||
// sign bits set.
|
// sign bits set.
|
||||||
if ((KnownZero | APInt(VTBits, 1)) == Mask)
|
if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
|
||||||
return VTBits;
|
return VTBits;
|
||||||
|
|
||||||
// If we are subtracting one from a positive number, there is no carry
|
// If we are subtracting one from a positive number, there is no carry
|
||||||
@ -2264,11 +2219,10 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
|
|||||||
if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
|
if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
|
||||||
if (CLHS->isNullValue()) {
|
if (CLHS->isNullValue()) {
|
||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
APInt Mask = APInt::getAllOnesValue(VTBits);
|
ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
|
||||||
ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
|
|
||||||
// If the input is known to be 0 or 1, the output is 0/-1, which is all
|
// If the input is known to be 0 or 1, the output is 0/-1, which is all
|
||||||
// sign bits set.
|
// sign bits set.
|
||||||
if ((KnownZero | APInt(VTBits, 1)) == Mask)
|
if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
|
||||||
return VTBits;
|
return VTBits;
|
||||||
|
|
||||||
// If the input is known to be positive (the sign bit is known clear),
|
// If the input is known to be positive (the sign bit is known clear),
|
||||||
@ -2317,9 +2271,9 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
|
|||||||
// Finally, if we can prove that the top bits of the result are 0's or 1's,
|
// Finally, if we can prove that the top bits of the result are 0's or 1's,
|
||||||
// use this information.
|
// use this information.
|
||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
APInt Mask = APInt::getAllOnesValue(VTBits);
|
ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
|
||||||
ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
|
|
||||||
|
|
||||||
|
APInt Mask;
|
||||||
if (KnownZero.isNegative()) { // sign bit is 0
|
if (KnownZero.isNegative()) { // sign bit is 0
|
||||||
Mask = KnownZero;
|
Mask = KnownZero;
|
||||||
} else if (KnownOne.isNegative()) { // sign bit is 1;
|
} else if (KnownOne.isNegative()) { // sign bit is 1;
|
||||||
@ -6040,10 +5994,9 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
|
|||||||
int64_t GVOffset = 0;
|
int64_t GVOffset = 0;
|
||||||
if (TLI.isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
|
if (TLI.isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
|
||||||
unsigned PtrWidth = TLI.getPointerTy().getSizeInBits();
|
unsigned PtrWidth = TLI.getPointerTy().getSizeInBits();
|
||||||
APInt AllOnes = APInt::getAllOnesValue(PtrWidth);
|
|
||||||
APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
|
APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
|
||||||
llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), AllOnes,
|
llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
|
||||||
KnownZero, KnownOne, TLI.getTargetData());
|
TLI.getTargetData());
|
||||||
unsigned AlignBits = KnownZero.countTrailingOnes();
|
unsigned AlignBits = KnownZero.countTrailingOnes();
|
||||||
unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
|
unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
|
||||||
if (Align)
|
if (Align)
|
||||||
|
@ -508,7 +508,6 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() {
|
|||||||
|
|
||||||
Worklist.push_back(CurDAG->getRoot().getNode());
|
Worklist.push_back(CurDAG->getRoot().getNode());
|
||||||
|
|
||||||
APInt Mask;
|
|
||||||
APInt KnownZero;
|
APInt KnownZero;
|
||||||
APInt KnownOne;
|
APInt KnownOne;
|
||||||
|
|
||||||
@ -539,8 +538,7 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() {
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
|
unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
|
||||||
Mask = APInt::getAllOnesValue(SrcVT.getSizeInBits());
|
CurDAG->ComputeMaskedBits(Src, KnownZero, KnownOne);
|
||||||
CurDAG->ComputeMaskedBits(Src, Mask, KnownZero, KnownOne);
|
|
||||||
FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, KnownZero, KnownOne);
|
FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, KnownZero, KnownOne);
|
||||||
} while (!Worklist.empty());
|
} while (!Worklist.empty());
|
||||||
}
|
}
|
||||||
@ -1444,7 +1442,7 @@ bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
|
|||||||
APInt NeededMask = DesiredMask & ~ActualMask;
|
APInt NeededMask = DesiredMask & ~ActualMask;
|
||||||
|
|
||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
CurDAG->ComputeMaskedBits(LHS, NeededMask, KnownZero, KnownOne);
|
CurDAG->ComputeMaskedBits(LHS, KnownZero, KnownOne);
|
||||||
|
|
||||||
// If all the missing bits in the or are already known to be set, match!
|
// If all the missing bits in the or are already known to be set, match!
|
||||||
if ((NeededMask & KnownOne) == NeededMask)
|
if ((NeededMask & KnownOne) == NeededMask)
|
||||||
|
@ -1244,7 +1244,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||||||
if (Depth != 0) {
|
if (Depth != 0) {
|
||||||
// If not at the root, Just compute the KnownZero/KnownOne bits to
|
// If not at the root, Just compute the KnownZero/KnownOne bits to
|
||||||
// simplify things downstream.
|
// simplify things downstream.
|
||||||
TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth);
|
TLO.DAG.ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// If this is the root being simplified, allow it to have multiple uses,
|
// If this is the root being simplified, allow it to have multiple uses,
|
||||||
@ -1263,8 +1263,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||||||
switch (Op.getOpcode()) {
|
switch (Op.getOpcode()) {
|
||||||
case ISD::Constant:
|
case ISD::Constant:
|
||||||
// We know all of the bits for a constant!
|
// We know all of the bits for a constant!
|
||||||
KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & NewMask;
|
KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
|
||||||
KnownZero = ~KnownOne & NewMask;
|
KnownZero = ~KnownOne;
|
||||||
return false; // Don't fall through, will infinitely loop.
|
return false; // Don't fall through, will infinitely loop.
|
||||||
case ISD::AND:
|
case ISD::AND:
|
||||||
// If the RHS is a constant, check to see if the LHS would be zero without
|
// If the RHS is a constant, check to see if the LHS would be zero without
|
||||||
@ -1274,8 +1274,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||||||
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
|
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
|
||||||
APInt LHSZero, LHSOne;
|
APInt LHSZero, LHSOne;
|
||||||
// Do not increment Depth here; that can cause an infinite loop.
|
// Do not increment Depth here; that can cause an infinite loop.
|
||||||
TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask,
|
TLO.DAG.ComputeMaskedBits(Op.getOperand(0), LHSZero, LHSOne, Depth);
|
||||||
LHSZero, LHSOne, Depth);
|
|
||||||
// If the LHS already has zeros where RHSC does, this and is dead.
|
// If the LHS already has zeros where RHSC does, this and is dead.
|
||||||
if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
|
if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
|
||||||
return TLO.CombineTo(Op, Op.getOperand(0));
|
return TLO.CombineTo(Op, Op.getOperand(0));
|
||||||
@ -1725,11 +1724,11 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||||||
|
|
||||||
// If the sign bit is known one, the top bits match.
|
// If the sign bit is known one, the top bits match.
|
||||||
if (KnownOne.intersects(InSignBit)) {
|
if (KnownOne.intersects(InSignBit)) {
|
||||||
KnownOne |= NewBits;
|
KnownOne |= NewBits;
|
||||||
KnownZero &= ~NewBits;
|
assert((KnownZero & NewBits) == 0);
|
||||||
} else { // Otherwise, top bits aren't known.
|
} else { // Otherwise, top bits aren't known.
|
||||||
KnownOne &= ~NewBits;
|
assert((KnownOne & NewBits) == 0);
|
||||||
KnownZero &= ~NewBits;
|
assert((KnownZero & NewBits) == 0);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1863,7 +1862,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||||||
// FALL THROUGH
|
// FALL THROUGH
|
||||||
default:
|
default:
|
||||||
// Just use ComputeMaskedBits to compute output bits.
|
// Just use ComputeMaskedBits to compute output bits.
|
||||||
TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth);
|
TLO.DAG.ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1879,7 +1878,6 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||||||
/// in Mask are known to be either zero or one and return them in the
|
/// in Mask are known to be either zero or one and return them in the
|
||||||
/// KnownZero/KnownOne bitsets.
|
/// KnownZero/KnownOne bitsets.
|
||||||
void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
@ -1890,7 +1888,7 @@ void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
|||||||
Op.getOpcode() == ISD::INTRINSIC_VOID) &&
|
Op.getOpcode() == ISD::INTRINSIC_VOID) &&
|
||||||
"Should use MaskedValueIsZero if you don't know whether Op"
|
"Should use MaskedValueIsZero if you don't know whether Op"
|
||||||
" is a target node!");
|
" is a target node!");
|
||||||
KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
|
KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// ComputeNumSignBitsForTargetNode - This method can be implemented by
|
/// ComputeNumSignBitsForTargetNode - This method can be implemented by
|
||||||
@ -1934,9 +1932,8 @@ static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) {
|
|||||||
// Fall back to ComputeMaskedBits to catch other known cases.
|
// Fall back to ComputeMaskedBits to catch other known cases.
|
||||||
EVT OpVT = Val.getValueType();
|
EVT OpVT = Val.getValueType();
|
||||||
unsigned BitWidth = OpVT.getScalarType().getSizeInBits();
|
unsigned BitWidth = OpVT.getScalarType().getSizeInBits();
|
||||||
APInt Mask = APInt::getAllOnesValue(BitWidth);
|
|
||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
DAG.ComputeMaskedBits(Val, Mask, KnownZero, KnownOne);
|
DAG.ComputeMaskedBits(Val, KnownZero, KnownOne);
|
||||||
return (KnownZero.countPopulation() == BitWidth - 1) &&
|
return (KnownZero.countPopulation() == BitWidth - 1) &&
|
||||||
(KnownOne.countPopulation() == 1);
|
(KnownOne.countPopulation() == 1);
|
||||||
}
|
}
|
||||||
|
@ -8288,8 +8288,7 @@ ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
|
|||||||
|
|
||||||
if (Res.getNode()) {
|
if (Res.getNode()) {
|
||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
|
DAG.ComputeMaskedBits(SDValue(N,0), KnownZero, KnownOne);
|
||||||
DAG.ComputeMaskedBits(SDValue(N,0), Mask, KnownZero, KnownOne);
|
|
||||||
// Capture demanded bits information that would be otherwise lost.
|
// Capture demanded bits information that would be otherwise lost.
|
||||||
if (KnownZero == 0xfffffffe)
|
if (KnownZero == 0xfffffffe)
|
||||||
Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
|
Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
|
||||||
@ -8805,22 +8804,20 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
unsigned Depth) const {
|
unsigned Depth) const {
|
||||||
KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
|
KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
|
||||||
switch (Op.getOpcode()) {
|
switch (Op.getOpcode()) {
|
||||||
default: break;
|
default: break;
|
||||||
case ARMISD::CMOV: {
|
case ARMISD::CMOV: {
|
||||||
// Bits are known zero/one if known on the LHS and RHS.
|
// Bits are known zero/one if known on the LHS and RHS.
|
||||||
DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
|
DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
if (KnownZero == 0 && KnownOne == 0) return;
|
if (KnownZero == 0 && KnownOne == 0) return;
|
||||||
|
|
||||||
APInt KnownZeroRHS, KnownOneRHS;
|
APInt KnownZeroRHS, KnownOneRHS;
|
||||||
DAG.ComputeMaskedBits(Op.getOperand(1), Mask,
|
DAG.ComputeMaskedBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1);
|
||||||
KnownZeroRHS, KnownOneRHS, Depth+1);
|
|
||||||
KnownZero &= KnownZeroRHS;
|
KnownZero &= KnownZeroRHS;
|
||||||
KnownOne &= KnownOneRHS;
|
KnownOne &= KnownOneRHS;
|
||||||
return;
|
return;
|
||||||
|
@ -315,7 +315,6 @@ namespace llvm {
|
|||||||
SelectionDAG &DAG) const;
|
SelectionDAG &DAG) const;
|
||||||
|
|
||||||
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
|
@ -3158,7 +3158,6 @@ SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
|
|||||||
//! Compute used/known bits for a SPU operand
|
//! Compute used/known bits for a SPU operand
|
||||||
void
|
void
|
||||||
SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
|
@ -121,7 +121,6 @@ namespace llvm {
|
|||||||
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
||||||
|
|
||||||
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
|
@ -377,8 +377,8 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
|
|||||||
DebugLoc dl = N->getDebugLoc();
|
DebugLoc dl = N->getDebugLoc();
|
||||||
|
|
||||||
APInt LKZ, LKO, RKZ, RKO;
|
APInt LKZ, LKO, RKZ, RKO;
|
||||||
CurDAG->ComputeMaskedBits(Op0, APInt::getAllOnesValue(32), LKZ, LKO);
|
CurDAG->ComputeMaskedBits(Op0, LKZ, LKO);
|
||||||
CurDAG->ComputeMaskedBits(Op1, APInt::getAllOnesValue(32), RKZ, RKO);
|
CurDAG->ComputeMaskedBits(Op1, RKZ, RKO);
|
||||||
|
|
||||||
unsigned TargetMask = LKZ.getZExtValue();
|
unsigned TargetMask = LKZ.getZExtValue();
|
||||||
unsigned InsertMask = RKZ.getZExtValue();
|
unsigned InsertMask = RKZ.getZExtValue();
|
||||||
|
@ -860,14 +860,10 @@ bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
|
|||||||
APInt LHSKnownZero, LHSKnownOne;
|
APInt LHSKnownZero, LHSKnownOne;
|
||||||
APInt RHSKnownZero, RHSKnownOne;
|
APInt RHSKnownZero, RHSKnownOne;
|
||||||
DAG.ComputeMaskedBits(N.getOperand(0),
|
DAG.ComputeMaskedBits(N.getOperand(0),
|
||||||
APInt::getAllOnesValue(N.getOperand(0)
|
|
||||||
.getValueSizeInBits()),
|
|
||||||
LHSKnownZero, LHSKnownOne);
|
LHSKnownZero, LHSKnownOne);
|
||||||
|
|
||||||
if (LHSKnownZero.getBoolValue()) {
|
if (LHSKnownZero.getBoolValue()) {
|
||||||
DAG.ComputeMaskedBits(N.getOperand(1),
|
DAG.ComputeMaskedBits(N.getOperand(1),
|
||||||
APInt::getAllOnesValue(N.getOperand(1)
|
|
||||||
.getValueSizeInBits()),
|
|
||||||
RHSKnownZero, RHSKnownOne);
|
RHSKnownZero, RHSKnownOne);
|
||||||
// If all of the bits are known zero on the LHS or RHS, the add won't
|
// If all of the bits are known zero on the LHS or RHS, the add won't
|
||||||
// carry.
|
// carry.
|
||||||
@ -922,10 +918,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
|
|||||||
// (for better address arithmetic) if the LHS and RHS of the OR are
|
// (for better address arithmetic) if the LHS and RHS of the OR are
|
||||||
// provably disjoint.
|
// provably disjoint.
|
||||||
APInt LHSKnownZero, LHSKnownOne;
|
APInt LHSKnownZero, LHSKnownOne;
|
||||||
DAG.ComputeMaskedBits(N.getOperand(0),
|
DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
|
||||||
APInt::getAllOnesValue(N.getOperand(0)
|
|
||||||
.getValueSizeInBits()),
|
|
||||||
LHSKnownZero, LHSKnownOne);
|
|
||||||
|
|
||||||
if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
|
if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
|
||||||
// If all of the bits are known zero on the LHS or RHS, the add won't
|
// If all of the bits are known zero on the LHS or RHS, the add won't
|
||||||
@ -1038,10 +1031,7 @@ bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp,
|
|||||||
// (for better address arithmetic) if the LHS and RHS of the OR are
|
// (for better address arithmetic) if the LHS and RHS of the OR are
|
||||||
// provably disjoint.
|
// provably disjoint.
|
||||||
APInt LHSKnownZero, LHSKnownOne;
|
APInt LHSKnownZero, LHSKnownOne;
|
||||||
DAG.ComputeMaskedBits(N.getOperand(0),
|
DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
|
||||||
APInt::getAllOnesValue(N.getOperand(0)
|
|
||||||
.getValueSizeInBits()),
|
|
||||||
LHSKnownZero, LHSKnownOne);
|
|
||||||
if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
|
if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
|
||||||
// If all of the bits are known zero on the LHS or RHS, the add won't
|
// If all of the bits are known zero on the LHS or RHS, the add won't
|
||||||
// carry.
|
// carry.
|
||||||
@ -5517,12 +5507,11 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
|
|||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
unsigned Depth) const {
|
unsigned Depth) const {
|
||||||
KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
|
KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
|
||||||
switch (Op.getOpcode()) {
|
switch (Op.getOpcode()) {
|
||||||
default: break;
|
default: break;
|
||||||
case PPCISD::LBRX: {
|
case PPCISD::LBRX: {
|
||||||
|
@ -296,7 +296,6 @@ namespace llvm {
|
|||||||
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
||||||
|
|
||||||
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
|
@ -832,22 +832,19 @@ const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|||||||
/// be zero. Op is expected to be a target specific node. Used by DAG
|
/// be zero. Op is expected to be a target specific node. Used by DAG
|
||||||
/// combiner.
|
/// combiner.
|
||||||
void SparcTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
void SparcTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
unsigned Depth) const {
|
unsigned Depth) const {
|
||||||
APInt KnownZero2, KnownOne2;
|
APInt KnownZero2, KnownOne2;
|
||||||
KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything.
|
KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
|
||||||
|
|
||||||
switch (Op.getOpcode()) {
|
switch (Op.getOpcode()) {
|
||||||
default: break;
|
default: break;
|
||||||
case SPISD::SELECT_ICC:
|
case SPISD::SELECT_ICC:
|
||||||
case SPISD::SELECT_FCC:
|
case SPISD::SELECT_FCC:
|
||||||
DAG.ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne,
|
DAG.ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
|
||||||
Depth+1);
|
DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
|
||||||
DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2,
|
|
||||||
Depth+1);
|
|
||||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||||
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
|
||||||
|
|
||||||
|
@ -50,7 +50,6 @@ namespace llvm {
|
|||||||
/// in Mask are known to be either zero or one and return them in the
|
/// in Mask are known to be either zero or one and return them in the
|
||||||
/// KnownZero/KnownOne bitsets.
|
/// KnownZero/KnownOne bitsets.
|
||||||
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
|
@ -896,7 +896,7 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
|
|||||||
APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(),
|
APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(),
|
||||||
MaskLZ);
|
MaskLZ);
|
||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
DAG.ComputeMaskedBits(X, MaskedHighBits, KnownZero, KnownOne);
|
DAG.ComputeMaskedBits(X, KnownZero, KnownOne);
|
||||||
if (MaskedHighBits != KnownZero) return true;
|
if (MaskedHighBits != KnownZero) return true;
|
||||||
|
|
||||||
// We've identified a pattern that can be transformed into a single shift
|
// We've identified a pattern that can be transformed into a single shift
|
||||||
|
@ -8099,8 +8099,8 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
|
|||||||
unsigned BitWidth = Op0.getValueSizeInBits();
|
unsigned BitWidth = Op0.getValueSizeInBits();
|
||||||
unsigned AndBitWidth = And.getValueSizeInBits();
|
unsigned AndBitWidth = And.getValueSizeInBits();
|
||||||
if (BitWidth > AndBitWidth) {
|
if (BitWidth > AndBitWidth) {
|
||||||
APInt Mask = APInt::getAllOnesValue(BitWidth), Zeros, Ones;
|
APInt Zeros, Ones;
|
||||||
DAG.ComputeMaskedBits(Op0, Mask, Zeros, Ones);
|
DAG.ComputeMaskedBits(Op0, Zeros, Ones);
|
||||||
if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
|
if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
|
||||||
return SDValue();
|
return SDValue();
|
||||||
}
|
}
|
||||||
@ -12620,11 +12620,11 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
|||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
unsigned Depth) const {
|
unsigned Depth) const {
|
||||||
|
unsigned BitWidth = KnownZero.getBitWidth();
|
||||||
unsigned Opc = Op.getOpcode();
|
unsigned Opc = Op.getOpcode();
|
||||||
assert((Opc >= ISD::BUILTIN_OP_END ||
|
assert((Opc >= ISD::BUILTIN_OP_END ||
|
||||||
Opc == ISD::INTRINSIC_WO_CHAIN ||
|
Opc == ISD::INTRINSIC_WO_CHAIN ||
|
||||||
@ -12633,7 +12633,7 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
|||||||
"Should use MaskedValueIsZero if you don't know whether Op"
|
"Should use MaskedValueIsZero if you don't know whether Op"
|
||||||
" is a target node!");
|
" is a target node!");
|
||||||
|
|
||||||
KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything.
|
KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
|
||||||
switch (Opc) {
|
switch (Opc) {
|
||||||
default: break;
|
default: break;
|
||||||
case X86ISD::ADD:
|
case X86ISD::ADD:
|
||||||
@ -12652,8 +12652,7 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
|||||||
break;
|
break;
|
||||||
// Fallthrough
|
// Fallthrough
|
||||||
case X86ISD::SETCC:
|
case X86ISD::SETCC:
|
||||||
KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(),
|
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
|
||||||
Mask.getBitWidth() - 1);
|
|
||||||
break;
|
break;
|
||||||
case ISD::INTRINSIC_WO_CHAIN: {
|
case ISD::INTRINSIC_WO_CHAIN: {
|
||||||
unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
|
unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
|
||||||
@ -12678,8 +12677,7 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
|||||||
case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
|
case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
|
||||||
case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
|
case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
|
||||||
}
|
}
|
||||||
KnownZero = APInt::getHighBitsSet(Mask.getBitWidth(),
|
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
|
||||||
Mask.getBitWidth() - NumLoBits);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -504,7 +504,6 @@ namespace llvm {
|
|||||||
/// in Mask are known to be either zero or one and return them in the
|
/// in Mask are known to be either zero or one and return them in the
|
||||||
/// KnownZero/KnownOne bitsets.
|
/// KnownZero/KnownOne bitsets.
|
||||||
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
|
@ -1167,12 +1167,10 @@ def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
|
|||||||
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
|
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
|
||||||
return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
|
return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
|
||||||
|
|
||||||
unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
|
|
||||||
APInt Mask = APInt::getAllOnesValue(BitWidth);
|
|
||||||
APInt KnownZero0, KnownOne0;
|
APInt KnownZero0, KnownOne0;
|
||||||
CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
|
CurDAG->ComputeMaskedBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
|
||||||
APInt KnownZero1, KnownOne1;
|
APInt KnownZero1, KnownOne1;
|
||||||
CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
|
CurDAG->ComputeMaskedBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
|
||||||
return (~KnownZero0 & ~KnownZero1) == 0;
|
return (~KnownZero0 & ~KnownZero1) == 0;
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
|
@ -1363,8 +1363,8 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
|
|||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
|
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
|
||||||
VT.getSizeInBits() - 1);
|
VT.getSizeInBits() - 1);
|
||||||
DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne);
|
DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
|
||||||
if (KnownZero == Mask) {
|
if ((KnownZero & Mask) == Mask) {
|
||||||
SDValue Carry = DAG.getConstant(0, VT);
|
SDValue Carry = DAG.getConstant(0, VT);
|
||||||
SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
|
SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
|
||||||
SDValue Ops [] = { Carry, Result };
|
SDValue Ops [] = { Carry, Result };
|
||||||
@ -1386,8 +1386,8 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
|
|||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
|
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
|
||||||
VT.getSizeInBits() - 1);
|
VT.getSizeInBits() - 1);
|
||||||
DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne);
|
DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
|
||||||
if (KnownZero == Mask) {
|
if ((KnownZero & Mask) == Mask) {
|
||||||
SDValue Borrow = N2;
|
SDValue Borrow = N2;
|
||||||
SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
|
SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
|
||||||
DAG.getConstant(0, VT), N2);
|
DAG.getConstant(0, VT), N2);
|
||||||
@ -1402,8 +1402,8 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
|
|||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
|
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
|
||||||
VT.getSizeInBits() - 1);
|
VT.getSizeInBits() - 1);
|
||||||
DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne);
|
DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
|
||||||
if (KnownZero == Mask) {
|
if ((KnownZero & Mask) == Mask) {
|
||||||
SDValue Borrow = DAG.getConstant(0, VT);
|
SDValue Borrow = DAG.getConstant(0, VT);
|
||||||
SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
|
SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
|
||||||
SDValue Ops [] = { Borrow, Result };
|
SDValue Ops [] = { Borrow, Result };
|
||||||
@ -1521,21 +1521,19 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
unsigned Depth) const {
|
unsigned Depth) const {
|
||||||
KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
|
KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
|
||||||
switch (Op.getOpcode()) {
|
switch (Op.getOpcode()) {
|
||||||
default: break;
|
default: break;
|
||||||
case XCoreISD::LADD:
|
case XCoreISD::LADD:
|
||||||
case XCoreISD::LSUB:
|
case XCoreISD::LSUB:
|
||||||
if (Op.getResNo() == 0) {
|
if (Op.getResNo() == 0) {
|
||||||
// Top bits of carry / borrow are clear.
|
// Top bits of carry / borrow are clear.
|
||||||
KnownZero = APInt::getHighBitsSet(Mask.getBitWidth(),
|
KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
|
||||||
Mask.getBitWidth() - 1);
|
KnownZero.getBitWidth() - 1);
|
||||||
KnownZero &= Mask;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -160,7 +160,6 @@ namespace llvm {
|
|||||||
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
||||||
|
|
||||||
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
||||||
const APInt &Mask,
|
|
||||||
APInt &KnownZero,
|
APInt &KnownZero,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
|
@ -291,9 +291,9 @@ public:
|
|||||||
return 0; // Don't do anything with FI
|
return 0; // Don't do anything with FI
|
||||||
}
|
}
|
||||||
|
|
||||||
void ComputeMaskedBits(Value *V, const APInt &Mask, APInt &KnownZero,
|
void ComputeMaskedBits(Value *V, APInt &KnownZero,
|
||||||
APInt &KnownOne, unsigned Depth = 0) const {
|
APInt &KnownOne, unsigned Depth = 0) const {
|
||||||
return llvm::ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
|
return llvm::ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MaskedValueIsZero(Value *V, const APInt &Mask,
|
bool MaskedValueIsZero(Value *V, const APInt &Mask,
|
||||||
|
@ -141,10 +141,9 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
|
|||||||
// a sub and fuse this add with it.
|
// a sub and fuse this add with it.
|
||||||
if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) {
|
if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) {
|
||||||
IntegerType *IT = cast<IntegerType>(I.getType());
|
IntegerType *IT = cast<IntegerType>(I.getType());
|
||||||
APInt Mask = APInt::getAllOnesValue(IT->getBitWidth());
|
|
||||||
APInt LHSKnownOne(IT->getBitWidth(), 0);
|
APInt LHSKnownOne(IT->getBitWidth(), 0);
|
||||||
APInt LHSKnownZero(IT->getBitWidth(), 0);
|
APInt LHSKnownZero(IT->getBitWidth(), 0);
|
||||||
ComputeMaskedBits(XorLHS, Mask, LHSKnownZero, LHSKnownOne);
|
ComputeMaskedBits(XorLHS, LHSKnownZero, LHSKnownOne);
|
||||||
if ((XorRHS->getValue() | LHSKnownZero).isAllOnesValue())
|
if ((XorRHS->getValue() | LHSKnownZero).isAllOnesValue())
|
||||||
return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
|
return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
|
||||||
XorLHS);
|
XorLHS);
|
||||||
@ -202,14 +201,13 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
|
|||||||
|
|
||||||
// A+B --> A|B iff A and B have no bits set in common.
|
// A+B --> A|B iff A and B have no bits set in common.
|
||||||
if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
|
if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
|
||||||
APInt Mask = APInt::getAllOnesValue(IT->getBitWidth());
|
|
||||||
APInt LHSKnownOne(IT->getBitWidth(), 0);
|
APInt LHSKnownOne(IT->getBitWidth(), 0);
|
||||||
APInt LHSKnownZero(IT->getBitWidth(), 0);
|
APInt LHSKnownZero(IT->getBitWidth(), 0);
|
||||||
ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
|
ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
|
||||||
if (LHSKnownZero != 0) {
|
if (LHSKnownZero != 0) {
|
||||||
APInt RHSKnownOne(IT->getBitWidth(), 0);
|
APInt RHSKnownOne(IT->getBitWidth(), 0);
|
||||||
APInt RHSKnownZero(IT->getBitWidth(), 0);
|
APInt RHSKnownZero(IT->getBitWidth(), 0);
|
||||||
ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
|
ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
|
||||||
|
|
||||||
// No bits in common -> bitwise or.
|
// No bits in common -> bitwise or.
|
||||||
if ((LHSKnownZero|RHSKnownZero).isAllOnesValue())
|
if ((LHSKnownZero|RHSKnownZero).isAllOnesValue())
|
||||||
|
@ -361,8 +361,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||||||
uint32_t BitWidth = IT->getBitWidth();
|
uint32_t BitWidth = IT->getBitWidth();
|
||||||
APInt KnownZero(BitWidth, 0);
|
APInt KnownZero(BitWidth, 0);
|
||||||
APInt KnownOne(BitWidth, 0);
|
APInt KnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
|
ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
|
||||||
KnownZero, KnownOne);
|
|
||||||
unsigned TrailingZeros = KnownOne.countTrailingZeros();
|
unsigned TrailingZeros = KnownOne.countTrailingZeros();
|
||||||
APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
|
APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
|
||||||
if ((Mask & KnownZero) == Mask)
|
if ((Mask & KnownZero) == Mask)
|
||||||
@ -380,8 +379,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||||||
uint32_t BitWidth = IT->getBitWidth();
|
uint32_t BitWidth = IT->getBitWidth();
|
||||||
APInt KnownZero(BitWidth, 0);
|
APInt KnownZero(BitWidth, 0);
|
||||||
APInt KnownOne(BitWidth, 0);
|
APInt KnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
|
ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
|
||||||
KnownZero, KnownOne);
|
|
||||||
unsigned LeadingZeros = KnownOne.countLeadingZeros();
|
unsigned LeadingZeros = KnownOne.countLeadingZeros();
|
||||||
APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
|
APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
|
||||||
if ((Mask & KnownZero) == Mask)
|
if ((Mask & KnownZero) == Mask)
|
||||||
@ -394,17 +392,16 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||||||
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
|
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
|
||||||
IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
|
IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
|
||||||
uint32_t BitWidth = IT->getBitWidth();
|
uint32_t BitWidth = IT->getBitWidth();
|
||||||
APInt Mask = APInt::getSignBit(BitWidth);
|
|
||||||
APInt LHSKnownZero(BitWidth, 0);
|
APInt LHSKnownZero(BitWidth, 0);
|
||||||
APInt LHSKnownOne(BitWidth, 0);
|
APInt LHSKnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
|
ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
|
||||||
bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
|
bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
|
||||||
bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
|
bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
|
||||||
|
|
||||||
if (LHSKnownNegative || LHSKnownPositive) {
|
if (LHSKnownNegative || LHSKnownPositive) {
|
||||||
APInt RHSKnownZero(BitWidth, 0);
|
APInt RHSKnownZero(BitWidth, 0);
|
||||||
APInt RHSKnownOne(BitWidth, 0);
|
APInt RHSKnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
|
ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
|
||||||
bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
|
bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
|
||||||
bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
|
bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
|
||||||
if (LHSKnownNegative && RHSKnownNegative) {
|
if (LHSKnownNegative && RHSKnownNegative) {
|
||||||
@ -488,14 +485,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||||||
case Intrinsic::umul_with_overflow: {
|
case Intrinsic::umul_with_overflow: {
|
||||||
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
|
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
|
||||||
unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth();
|
unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth();
|
||||||
APInt Mask = APInt::getAllOnesValue(BitWidth);
|
|
||||||
|
|
||||||
APInt LHSKnownZero(BitWidth, 0);
|
APInt LHSKnownZero(BitWidth, 0);
|
||||||
APInt LHSKnownOne(BitWidth, 0);
|
APInt LHSKnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
|
ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
|
||||||
APInt RHSKnownZero(BitWidth, 0);
|
APInt RHSKnownZero(BitWidth, 0);
|
||||||
APInt RHSKnownOne(BitWidth, 0);
|
APInt RHSKnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
|
ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
|
||||||
|
|
||||||
// Get the largest possible values for each operand.
|
// Get the largest possible values for each operand.
|
||||||
APInt LHSMax = ~LHSKnownZero;
|
APInt LHSMax = ~LHSKnownZero;
|
||||||
|
@ -541,8 +541,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
|
|||||||
// If Op1C some other power of two, convert:
|
// If Op1C some other power of two, convert:
|
||||||
uint32_t BitWidth = Op1C->getType()->getBitWidth();
|
uint32_t BitWidth = Op1C->getType()->getBitWidth();
|
||||||
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||||
APInt TypeMask(APInt::getAllOnesValue(BitWidth));
|
ComputeMaskedBits(ICI->getOperand(0), KnownZero, KnownOne);
|
||||||
ComputeMaskedBits(ICI->getOperand(0), TypeMask, KnownZero, KnownOne);
|
|
||||||
|
|
||||||
APInt KnownZeroMask(~KnownZero);
|
APInt KnownZeroMask(~KnownZero);
|
||||||
if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
|
if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
|
||||||
@ -590,9 +589,8 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
|
|||||||
|
|
||||||
APInt KnownZeroLHS(BitWidth, 0), KnownOneLHS(BitWidth, 0);
|
APInt KnownZeroLHS(BitWidth, 0), KnownOneLHS(BitWidth, 0);
|
||||||
APInt KnownZeroRHS(BitWidth, 0), KnownOneRHS(BitWidth, 0);
|
APInt KnownZeroRHS(BitWidth, 0), KnownOneRHS(BitWidth, 0);
|
||||||
APInt TypeMask(APInt::getAllOnesValue(BitWidth));
|
ComputeMaskedBits(LHS, KnownZeroLHS, KnownOneLHS);
|
||||||
ComputeMaskedBits(LHS, TypeMask, KnownZeroLHS, KnownOneLHS);
|
ComputeMaskedBits(RHS, KnownZeroRHS, KnownOneRHS);
|
||||||
ComputeMaskedBits(RHS, TypeMask, KnownZeroRHS, KnownOneRHS);
|
|
||||||
|
|
||||||
if (KnownZeroLHS == KnownZeroRHS && KnownOneLHS == KnownOneRHS) {
|
if (KnownZeroLHS == KnownZeroRHS && KnownOneLHS == KnownOneRHS) {
|
||||||
APInt KnownBits = KnownZeroLHS | KnownOneLHS;
|
APInt KnownBits = KnownZeroLHS | KnownOneLHS;
|
||||||
@ -911,8 +909,7 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
|
|||||||
ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
|
ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
|
||||||
unsigned BitWidth = Op1C->getType()->getBitWidth();
|
unsigned BitWidth = Op1C->getType()->getBitWidth();
|
||||||
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||||
APInt TypeMask(APInt::getAllOnesValue(BitWidth));
|
ComputeMaskedBits(Op0, KnownZero, KnownOne);
|
||||||
ComputeMaskedBits(Op0, TypeMask, KnownZero, KnownOne);
|
|
||||||
|
|
||||||
APInt KnownZeroMask(~KnownZero);
|
APInt KnownZeroMask(~KnownZero);
|
||||||
if (KnownZeroMask.isPowerOf2()) {
|
if (KnownZeroMask.isPowerOf2()) {
|
||||||
|
@ -1028,9 +1028,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
|
|||||||
// of the high bits truncated out of x are known.
|
// of the high bits truncated out of x are known.
|
||||||
unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(),
|
unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(),
|
||||||
SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits();
|
SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits();
|
||||||
APInt Mask(APInt::getHighBitsSet(SrcBits, SrcBits-DstBits));
|
|
||||||
APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
|
APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
|
||||||
ComputeMaskedBits(LHSI->getOperand(0), Mask, KnownZero, KnownOne);
|
ComputeMaskedBits(LHSI->getOperand(0), KnownZero, KnownOne);
|
||||||
|
|
||||||
// If all the high bits are known, we can do this xform.
|
// If all the high bits are known, we can do this xform.
|
||||||
if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
|
if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
|
||||||
|
@ -142,7 +142,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
|
|||||||
|
|
||||||
Instruction *I = dyn_cast<Instruction>(V);
|
Instruction *I = dyn_cast<Instruction>(V);
|
||||||
if (!I) {
|
if (!I) {
|
||||||
ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
|
ComputeMaskedBits(V, KnownZero, KnownOne, Depth);
|
||||||
return 0; // Only analyze instructions.
|
return 0; // Only analyze instructions.
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -156,10 +156,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
|
|||||||
// this instruction has a simpler value in that context.
|
// this instruction has a simpler value in that context.
|
||||||
if (I->getOpcode() == Instruction::And) {
|
if (I->getOpcode() == Instruction::And) {
|
||||||
// If either the LHS or the RHS are Zero, the result is zero.
|
// If either the LHS or the RHS are Zero, the result is zero.
|
||||||
ComputeMaskedBits(I->getOperand(1), DemandedMask,
|
ComputeMaskedBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1);
|
||||||
RHSKnownZero, RHSKnownOne, Depth+1);
|
ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
|
||||||
ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownZero,
|
|
||||||
LHSKnownZero, LHSKnownOne, Depth+1);
|
|
||||||
|
|
||||||
// If all of the demanded bits are known 1 on one side, return the other.
|
// If all of the demanded bits are known 1 on one side, return the other.
|
||||||
// These bits cannot contribute to the result of the 'and' in this
|
// These bits cannot contribute to the result of the 'and' in this
|
||||||
@ -180,10 +178,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
|
|||||||
// only bits from X or Y are demanded.
|
// only bits from X or Y are demanded.
|
||||||
|
|
||||||
// If either the LHS or the RHS are One, the result is One.
|
// If either the LHS or the RHS are One, the result is One.
|
||||||
ComputeMaskedBits(I->getOperand(1), DemandedMask,
|
ComputeMaskedBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1);
|
||||||
RHSKnownZero, RHSKnownOne, Depth+1);
|
ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
|
||||||
ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownOne,
|
|
||||||
LHSKnownZero, LHSKnownOne, Depth+1);
|
|
||||||
|
|
||||||
// If all of the demanded bits are known zero on one side, return the
|
// If all of the demanded bits are known zero on one side, return the
|
||||||
// other. These bits cannot contribute to the result of the 'or' in this
|
// other. These bits cannot contribute to the result of the 'or' in this
|
||||||
@ -206,7 +202,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Compute the KnownZero/KnownOne bits to simplify things downstream.
|
// Compute the KnownZero/KnownOne bits to simplify things downstream.
|
||||||
ComputeMaskedBits(I, DemandedMask, KnownZero, KnownOne, Depth);
|
ComputeMaskedBits(I, KnownZero, KnownOne, Depth);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -219,7 +215,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
|
|||||||
|
|
||||||
switch (I->getOpcode()) {
|
switch (I->getOpcode()) {
|
||||||
default:
|
default:
|
||||||
ComputeMaskedBits(I, DemandedMask, KnownZero, KnownOne, Depth);
|
ComputeMaskedBits(I, KnownZero, KnownOne, Depth);
|
||||||
break;
|
break;
|
||||||
case Instruction::And:
|
case Instruction::And:
|
||||||
// If either the LHS or the RHS are Zero, the result is zero.
|
// If either the LHS or the RHS are Zero, the result is zero.
|
||||||
@ -570,7 +566,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
|
|||||||
|
|
||||||
// Otherwise just hand the sub off to ComputeMaskedBits to fill in
|
// Otherwise just hand the sub off to ComputeMaskedBits to fill in
|
||||||
// the known zeros and ones.
|
// the known zeros and ones.
|
||||||
ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
|
ComputeMaskedBits(V, KnownZero, KnownOne, Depth);
|
||||||
|
|
||||||
// Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
|
// Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
|
||||||
// zero.
|
// zero.
|
||||||
@ -729,10 +725,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
|
|||||||
// The sign bit is the LHS's sign bit, except when the result of the
|
// The sign bit is the LHS's sign bit, except when the result of the
|
||||||
// remainder is zero.
|
// remainder is zero.
|
||||||
if (DemandedMask.isNegative() && KnownZero.isNonNegative()) {
|
if (DemandedMask.isNegative() && KnownZero.isNonNegative()) {
|
||||||
APInt Mask2 = APInt::getSignBit(BitWidth);
|
|
||||||
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
|
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(I->getOperand(0), Mask2, LHSKnownZero, LHSKnownOne,
|
ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
|
||||||
Depth+1);
|
|
||||||
// If it's known zero, our sign bit is also zero.
|
// If it's known zero, our sign bit is also zero.
|
||||||
if (LHSKnownZero.isNegative())
|
if (LHSKnownZero.isNegative())
|
||||||
KnownZero |= LHSKnownZero;
|
KnownZero |= LHSKnownZero;
|
||||||
@ -795,7 +789,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
|
ComputeMaskedBits(V, KnownZero, KnownOne, Depth);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -762,9 +762,8 @@ unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
|
|||||||
assert(V->getType()->isPointerTy() &&
|
assert(V->getType()->isPointerTy() &&
|
||||||
"getOrEnforceKnownAlignment expects a pointer!");
|
"getOrEnforceKnownAlignment expects a pointer!");
|
||||||
unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
|
unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
|
||||||
APInt Mask = APInt::getAllOnesValue(BitWidth);
|
|
||||||
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD);
|
ComputeMaskedBits(V, KnownZero, KnownOne, TD);
|
||||||
unsigned TrailZ = KnownZero.countTrailingOnes();
|
unsigned TrailZ = KnownZero.countTrailingOnes();
|
||||||
|
|
||||||
// Avoid trouble with rediculously large TrailZ values, such as
|
// Avoid trouble with rediculously large TrailZ values, such as
|
||||||
|
@ -2562,7 +2562,7 @@ static bool EliminateDeadSwitchCases(SwitchInst *SI) {
|
|||||||
Value *Cond = SI->getCondition();
|
Value *Cond = SI->getCondition();
|
||||||
unsigned Bits = cast<IntegerType>(Cond->getType())->getBitWidth();
|
unsigned Bits = cast<IntegerType>(Cond->getType())->getBitWidth();
|
||||||
APInt KnownZero(Bits, 0), KnownOne(Bits, 0);
|
APInt KnownZero(Bits, 0), KnownOne(Bits, 0);
|
||||||
ComputeMaskedBits(Cond, APInt::getAllOnesValue(Bits), KnownZero, KnownOne);
|
ComputeMaskedBits(Cond, KnownZero, KnownOne);
|
||||||
|
|
||||||
// Gather dead cases.
|
// Gather dead cases.
|
||||||
SmallVector<ConstantInt*, 8> DeadCases;
|
SmallVector<ConstantInt*, 8> DeadCases;
|
||||||
|
15
test/Transforms/InstCombine/pr12251.ll
Normal file
15
test/Transforms/InstCombine/pr12251.ll
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
; RUN: opt < %s -instcombine -S | FileCheck %s
|
||||||
|
|
||||||
|
define zeroext i1 @_Z3fooPb(i8* nocapture %x) {
|
||||||
|
entry:
|
||||||
|
%a = load i8* %x, align 1, !range !0
|
||||||
|
%b = and i8 %a, 1
|
||||||
|
%tobool = icmp ne i8 %b, 0
|
||||||
|
ret i1 %tobool
|
||||||
|
}
|
||||||
|
|
||||||
|
; CHECK: %a = load i8* %x, align 1, !range !0
|
||||||
|
; CHECK-NEXT: %tobool = icmp ne i8 %a, 0
|
||||||
|
; CHECK-NEXT: ret i1 %tobool
|
||||||
|
|
||||||
|
!0 = metadata !{i8 0, i8 2}
|
Loading…
Reference in New Issue
Block a user