Reverting r55190, r55191, and r55192. They broke the build with this error message:

{standard input}:17:bad register name `%sil'
make[4]: *** [libgcc/./_addvsi3.o] Error 1
make[4]: *** Waiting for unfinished jobs....
{standard input}:23:bad register name `%dil'
{standard input}:28:bad register name `%dil'
make[4]: *** [libgcc/./_addvdi3.o] Error 1
{standard input}:18:bad register name `%sil'
make[4]: *** [libgcc/./_subvsi3.o] Error 1

llvm-svn: 55200
This commit is contained in:
Bill Wendling 2008-08-22 20:51:05 +00:00
parent 557a7db2eb
commit 60e176391d
5 changed files with 26 additions and 34 deletions

View File

@ -17,7 +17,6 @@
#include "llvm/BasicBlock.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include <map>
namespace llvm {
@ -53,7 +52,6 @@ public:
BasicBlock::iterator
SelectInstructions(BasicBlock::iterator Begin, BasicBlock::iterator End,
DenseMap<const Value*, unsigned> &ValueMap,
std::map<const BasicBlock*, MachineBasicBlock *> &MBBMap,
MachineBasicBlock *MBB);
virtual ~FastISel();

View File

@ -145,8 +145,6 @@ BasicBlock::iterator
FastISel::SelectInstructions(BasicBlock::iterator Begin,
BasicBlock::iterator End,
DenseMap<const Value*, unsigned> &ValueMap,
std::map<const BasicBlock*,
MachineBasicBlock *> &MBBMap,
MachineBasicBlock *mbb) {
MBB = mbb;
BasicBlock::iterator I = Begin;
@ -197,24 +195,19 @@ FastISel::SelectInstructions(BasicBlock::iterator Begin,
case Instruction::Br: {
BranchInst *BI = cast<BranchInst>(I);
// For now, check for and handle just the most trivial case: an
// unconditional fall-through branch.
if (BI->isUnconditional()) {
MachineFunction::iterator NextMBB =
MachineFunction::iterator NextMBB =
next(MachineFunction::iterator(MBB));
BasicBlock *LLVMSucc = BI->getSuccessor(0);
MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
if (NextMBB != MF.end() && MSucc == NextMBB) {
// The unconditional fall-through case, which needs no instructions.
} else {
// The unconditional branch case.
TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
if (NextMBB != MF.end() &&
NextMBB->getBasicBlock() == BI->getSuccessor(0)) {
MBB->addSuccessor(NextMBB);
break;
}
MBB->addSuccessor(MSucc);
break;
}
// Conditional branches are not handed yet.
// Halt "fast" selection and bail.
// Something more complicated. Halt "fast" selection and bail.
return I;
}

View File

@ -5113,7 +5113,7 @@ void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
cast<BranchInst>(LLVMBB->getTerminator())->isUnconditional()) {
if (FastISel *F = TLI.createFastISel(FuncInfo.MF)) {
Begin = F->SelectInstructions(Begin, LLVMBB->end(),
FuncInfo.ValueMap, FuncInfo.MBBMap, BB);
FuncInfo.ValueMap, BB);
// Clean up the FastISel object. TODO: Reorganize what data is
// stored in the FastISel class itself and what is merely passed

View File

@ -1254,9 +1254,15 @@ def : Pat<(i64 (zext GR32:$src)),
def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
// extload
def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
def : Pat<(extloadi64i1 addr:$src),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV8rm addr:$src),
x86_subreg_8bit)>;
def : Pat<(extloadi64i8 addr:$src),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV8rm addr:$src),
x86_subreg_8bit)>;
def : Pat<(extloadi64i16 addr:$src),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV16rm addr:$src),
x86_subreg_16bit)>;
def : Pat<(extloadi64i32 addr:$src),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV32rm addr:$src),
x86_subreg_32bit)>;

View File

@ -2784,24 +2784,19 @@ def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
// extload bool -> extload byte
def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
def : Pat<(extloadi16i1 addr:$src),
(INSERT_SUBREG (i16 (IMPLICIT_DEF)), (MOV8rm addr:$src),
x86_subreg_8bit)>;
def : Pat<(extloadi16i8 addr:$src),
(INSERT_SUBREG (i16 (IMPLICIT_DEF)), (MOV8rm addr:$src),
x86_subreg_8bit)>;
// For extloads with 32-bit results, chose instructions that
// define the whole 32 bits of the result, to avoid partial-register
// updates.
def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>,
Requires<[In32BitMode]>;
def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>,
Requires<[In32BitMode]>;
def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
// anyext
def : Pat<(i16 (anyext GR8:$src)),
(INSERT_SUBREG (i16 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>;
def : Pat<(i32 (anyext GR8:$src)),
(INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>;
def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>,
Requires<[In32BitMode]>;
def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>,
Requires<[In32BitMode]>;
def : Pat<(i32 (anyext GR16:$src)),
(INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, x86_subreg_16bit)>;