Remove the IA-64 backend.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@76920 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dan Gohman 2009-07-24 00:30:09 +00:00
parent 5ff58b5c3a
commit d2cb3d2c32
53 changed files with 13 additions and 4544 deletions

View File

@ -52,7 +52,6 @@ set(LLVM_ALL_TARGETS
CBackend
CellSPU
CppBackend
IA64
Mips
MSIL
MSP430

View File

@ -255,8 +255,6 @@
CF33BE160AF62B4200E93805 /* SmallString.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SmallString.h; sourceTree = "<group>"; };
CF341DAD0AB07A8B0099B064 /* AlphaTargetAsmInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AlphaTargetAsmInfo.h; sourceTree = "<group>"; };
CF341DAE0AB07A8B0099B064 /* AlphaTargetAsmInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AlphaTargetAsmInfo.cpp; sourceTree = "<group>"; };
CF341DE80AB07F890099B064 /* IA64TargetAsmInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IA64TargetAsmInfo.h; sourceTree = "<group>"; };
CF341DE90AB07F890099B064 /* IA64TargetAsmInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = IA64TargetAsmInfo.cpp; sourceTree = "<group>"; };
CF341E010AB080220099B064 /* PPCTargetAsmInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PPCTargetAsmInfo.h; sourceTree = "<group>"; };
CF341E020AB080220099B064 /* PPCTargetAsmInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = PPCTargetAsmInfo.cpp; sourceTree = "<group>"; };
CF341E220AB0814B0099B064 /* SparcTargetAsmInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SparcTargetAsmInfo.h; sourceTree = "<group>"; };
@ -315,7 +313,6 @@
CF73C0AE098A51AD00627152 /* Alarm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Alarm.h; sourceTree = "<group>"; };
CF73C0AF098A51DD00627152 /* RSProfiling.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RSProfiling.h; sourceTree = "<group>"; };
CF73C0B0098A523C00627152 /* ConstantFolding.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConstantFolding.cpp; sourceTree = "<group>"; };
CF73C0B6098A53EF00627152 /* IA64Bundling.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = IA64Bundling.cpp; sourceTree = "<group>"; };
CF73C0B7098A546000627152 /* RSProfiling.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RSProfiling.cpp; sourceTree = "<group>"; };
CF73C0B8098A546000627152 /* RSProfiling.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RSProfiling.h; sourceTree = "<group>"; };
CF73C0B9098A546000627152 /* Reg2Mem.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Reg2Mem.cpp; sourceTree = "<group>"; };
@ -415,13 +412,6 @@
CFA702C10A6FA85F0006009A /* AlphaGenRegisterInfo.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; path = AlphaGenRegisterInfo.inc; sourceTree = "<group>"; };
CFA702C20A6FA85F0006009A /* AlphaGenRegisterNames.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; path = AlphaGenRegisterNames.inc; sourceTree = "<group>"; };
CFA702C30A6FA85F0006009A /* AlphaGenSubtarget.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; path = AlphaGenSubtarget.inc; sourceTree = "<group>"; };
CFA702C40A6FA8910006009A /* IA64GenAsmWriter.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; path = IA64GenAsmWriter.inc; sourceTree = "<group>"; };
CFA702C50A6FA8910006009A /* IA64GenDAGISel.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; path = IA64GenDAGISel.inc; sourceTree = "<group>"; };
CFA702C60A6FA8910006009A /* IA64GenInstrInfo.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; path = IA64GenInstrInfo.inc; sourceTree = "<group>"; };
CFA702C70A6FA8910006009A /* IA64GenInstrNames.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; path = IA64GenInstrNames.inc; sourceTree = "<group>"; };
CFA702C80A6FA8910006009A /* IA64GenRegisterInfo.h.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; path = IA64GenRegisterInfo.h.inc; sourceTree = "<group>"; };
CFA702C90A6FA8910006009A /* IA64GenRegisterInfo.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; path = IA64GenRegisterInfo.inc; sourceTree = "<group>"; };
CFA702CA0A6FA8910006009A /* IA64GenRegisterNames.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; path = IA64GenRegisterNames.inc; sourceTree = "<group>"; };
CFA702CB0A6FA8AD0006009A /* PPCGenAsmWriter.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; path = PPCGenAsmWriter.inc; sourceTree = "<group>"; };
CFA702CC0A6FA8AD0006009A /* PPCGenCodeEmitter.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; path = PPCGenCodeEmitter.inc; sourceTree = "<group>"; };
CFA702CD0A6FA8AD0006009A /* PPCGenDAGISel.inc */ = {isa = PBXFileReference; explicitFileType = sourcecode.pascal; fileEncoding = 4; path = PPCGenDAGISel.inc; sourceTree = "<group>"; };
@ -450,9 +440,6 @@
CFC244BB0959F24C009F8C47 /* X86ISelDAGToDAG.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = X86ISelDAGToDAG.cpp; sourceTree = "<group>"; };
CFC244BC0959F24C009F8C47 /* X86ISelLowering.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = X86ISelLowering.cpp; sourceTree = "<group>"; };
CFC244BD0959F24C009F8C47 /* X86ISelLowering.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = X86ISelLowering.h; sourceTree = "<group>"; };
CFC244BF0959F2E3009F8C47 /* IA64ISelDAGToDAG.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = IA64ISelDAGToDAG.cpp; sourceTree = "<group>"; };
CFC244C00959F2E3009F8C47 /* IA64ISelLowering.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = IA64ISelLowering.cpp; sourceTree = "<group>"; };
CFC244C10959F2E3009F8C47 /* IA64ISelLowering.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IA64ISelLowering.h; sourceTree = "<group>"; };
CFD7E4F30A798FC3000C7379 /* LinkAllCodegenComponents.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LinkAllCodegenComponents.h; sourceTree = "<group>"; };
CFD99AA80AFE827B0068D19C /* LICENSE.TXT */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = LICENSE.TXT; path = ../LICENSE.TXT; sourceTree = SOURCE_ROOT; };
CFD99AAD0AFE827B0068D19C /* README.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = README.txt; path = ../README.txt; sourceTree = SOURCE_ROOT; };
@ -682,20 +669,6 @@
DE66EEAF08ABEE5E00323D32 /* AlphaTargetMachine.cpp */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.cpp.cpp; path = AlphaTargetMachine.cpp; sourceTree = "<group>"; };
DE66EEB008ABEE5E00323D32 /* AlphaTargetMachine.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = AlphaTargetMachine.h; sourceTree = "<group>"; };
DE66EECA08ABEE5E00323D32 /* CTargetMachine.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = CTargetMachine.h; sourceTree = "<group>"; };
DE66EEF808ABEE5E00323D32 /* IA64.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = IA64.h; sourceTree = "<group>"; };
DE66EEF908ABEE5E00323D32 /* IA64.td */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = IA64.td; sourceTree = "<group>"; };
DE66EEFA08ABEE5E00323D32 /* IA64AsmPrinter.cpp */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.cpp.cpp; path = IA64AsmPrinter.cpp; sourceTree = "<group>"; };
DE66EF0108ABEE5E00323D32 /* IA64InstrBuilder.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = IA64InstrBuilder.h; sourceTree = "<group>"; };
DE66EF0208ABEE5E00323D32 /* IA64InstrFormats.td */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = IA64InstrFormats.td; sourceTree = "<group>"; };
DE66EF0308ABEE5E00323D32 /* IA64InstrInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.cpp.cpp; path = IA64InstrInfo.cpp; sourceTree = "<group>"; };
DE66EF0408ABEE5E00323D32 /* IA64InstrInfo.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = IA64InstrInfo.h; sourceTree = "<group>"; };
DE66EF0508ABEE5E00323D32 /* IA64InstrInfo.td */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = IA64InstrInfo.td; sourceTree = "<group>"; };
DE66EF0708ABEE5E00323D32 /* IA64MachineFunctionInfo.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = IA64MachineFunctionInfo.h; sourceTree = "<group>"; };
DE66EF0808ABEE5E00323D32 /* IA64RegisterInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.cpp.cpp; path = IA64RegisterInfo.cpp; sourceTree = "<group>"; };
DE66EF0908ABEE5E00323D32 /* IA64RegisterInfo.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = IA64RegisterInfo.h; sourceTree = "<group>"; };
DE66EF0A08ABEE5E00323D32 /* IA64RegisterInfo.td */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = IA64RegisterInfo.td; sourceTree = "<group>"; };
DE66EF0B08ABEE5E00323D32 /* IA64TargetMachine.cpp */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.cpp.cpp; path = IA64TargetMachine.cpp; sourceTree = "<group>"; };
DE66EF0C08ABEE5E00323D32 /* IA64TargetMachine.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = IA64TargetMachine.h; sourceTree = "<group>"; };
DE66EF0E08ABEE5E00323D32 /* README */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = README; sourceTree = "<group>"; };
DE66EF1008ABEE5E00323D32 /* TargetRegisterInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.cpp.cpp; path = TargetRegisterInfo.cpp; sourceTree = "<group>"; };
DE66F08A08ABEE6000323D32 /* Target.td */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = Target.td; sourceTree = "<group>"; };
@ -1830,7 +1803,6 @@
DE66EE9708ABEE5D00323D32 /* Alpha */,
CF8F1BCF0B64FC8A00BB4199 /* ARM */,
DE66EEC908ABEE5E00323D32 /* CBackend */,
DE66EEE508ABEE5E00323D32 /* IA64 */,
9F7794120C73CB6100551F9C /* Mips */,
DE66EF1108ABEE5E00323D32 /* PowerPC */,
DE66EF7008ABEE5F00323D32 /* Sparc */,
@ -1906,41 +1878,6 @@
path = CBackend;
sourceTree = "<group>";
};
DE66EEE508ABEE5E00323D32 /* IA64 */ = {
isa = PBXGroup;
children = (
CFA702C40A6FA8910006009A /* IA64GenAsmWriter.inc */,
CFA702C50A6FA8910006009A /* IA64GenDAGISel.inc */,
CFA702C60A6FA8910006009A /* IA64GenInstrInfo.inc */,
CFA702C70A6FA8910006009A /* IA64GenInstrNames.inc */,
CFA702C80A6FA8910006009A /* IA64GenRegisterInfo.h.inc */,
CFA702C90A6FA8910006009A /* IA64GenRegisterInfo.inc */,
CFA702CA0A6FA8910006009A /* IA64GenRegisterNames.inc */,
DE66EEF808ABEE5E00323D32 /* IA64.h */,
DE66EEF908ABEE5E00323D32 /* IA64.td */,
DE66EEFA08ABEE5E00323D32 /* IA64AsmPrinter.cpp */,
CF73C0B6098A53EF00627152 /* IA64Bundling.cpp */,
DE66EF0108ABEE5E00323D32 /* IA64InstrBuilder.h */,
DE66EF0208ABEE5E00323D32 /* IA64InstrFormats.td */,
DE66EF0308ABEE5E00323D32 /* IA64InstrInfo.cpp */,
DE66EF0408ABEE5E00323D32 /* IA64InstrInfo.h */,
DE66EF0508ABEE5E00323D32 /* IA64InstrInfo.td */,
CFC244BF0959F2E3009F8C47 /* IA64ISelDAGToDAG.cpp */,
CFC244C00959F2E3009F8C47 /* IA64ISelLowering.cpp */,
CFC244C10959F2E3009F8C47 /* IA64ISelLowering.h */,
DE66EF0708ABEE5E00323D32 /* IA64MachineFunctionInfo.h */,
DE66EF0808ABEE5E00323D32 /* IA64RegisterInfo.cpp */,
DE66EF0908ABEE5E00323D32 /* IA64RegisterInfo.h */,
DE66EF0A08ABEE5E00323D32 /* IA64RegisterInfo.td */,
CF341DE90AB07F890099B064 /* IA64TargetAsmInfo.cpp */,
CF341DE80AB07F890099B064 /* IA64TargetAsmInfo.h */,
DE66EF0B08ABEE5E00323D32 /* IA64TargetMachine.cpp */,
DE66EF0C08ABEE5E00323D32 /* IA64TargetMachine.h */,
DE66EF0E08ABEE5E00323D32 /* README */,
);
path = IA64;
sourceTree = "<group>";
};
DE66EF1108ABEE5E00323D32 /* PowerPC */ = {
isa = PBXGroup;
children = (

View File

@ -221,7 +221,6 @@ AC_CACHE_CHECK([target architecture],[llvm_cv_target_arch],
sparc*-*) llvm_cv_target_arch="Sparc" ;;
powerpc*-*) llvm_cv_target_arch="PowerPC" ;;
alpha*-*) llvm_cv_target_arch="Alpha" ;;
ia64-*) llvm_cv_target_arch="IA64" ;;
arm*-*) llvm_cv_target_arch="ARM" ;;
mips-*) llvm_cv_target_arch="Mips" ;;
pic16-*) llvm_cv_target_arch="PIC16" ;;
@ -347,7 +346,6 @@ else
PowerPC) AC_SUBST(TARGET_HAS_JIT,1) ;;
x86_64) AC_SUBST(TARGET_HAS_JIT,1) ;;
Alpha) AC_SUBST(TARGET_HAS_JIT,1) ;;
IA64) AC_SUBST(TARGET_HAS_JIT,0) ;;
ARM) AC_SUBST(TARGET_HAS_JIT,0) ;;
Mips) AC_SUBST(TARGET_HAS_JIT,0) ;;
PIC16) AC_SUBST(TARGET_HAS_JIT,0) ;;
@ -403,7 +401,7 @@ AC_ARG_ENABLE([targets],AS_HELP_STRING([--enable-targets],
[Build specific host targets: all,host-only,{target-name} (default=all)]),,
enableval=all)
case "$enableval" in
all) TARGETS_TO_BUILD="X86 Sparc PowerPC Alpha IA64 ARM Mips CellSPU PIC16 XCore MSP430 SystemZ CBackend MSIL CppBackend" ;;
all) TARGETS_TO_BUILD="X86 Sparc PowerPC Alpha ARM Mips CellSPU PIC16 XCore MSP430 SystemZ CBackend MSIL CppBackend" ;;
host-only)
case "$llvm_cv_target_arch" in
x86) TARGETS_TO_BUILD="X86" ;;
@ -411,7 +409,6 @@ case "$enableval" in
Sparc) TARGETS_TO_BUILD="Sparc" ;;
PowerPC) TARGETS_TO_BUILD="PowerPC" ;;
Alpha) TARGETS_TO_BUILD="Alpha" ;;
IA64) TARGETS_TO_BUILD="IA64" ;;
ARM) TARGETS_TO_BUILD="ARM" ;;
Mips) TARGETS_TO_BUILD="Mips" ;;
CellSPU|SPU) TARGETS_TO_BUILD="CellSPU" ;;
@ -429,7 +426,6 @@ case "$enableval" in
sparc) TARGETS_TO_BUILD="Sparc $TARGETS_TO_BUILD" ;;
powerpc) TARGETS_TO_BUILD="PowerPC $TARGETS_TO_BUILD" ;;
alpha) TARGETS_TO_BUILD="Alpha $TARGETS_TO_BUILD" ;;
ia64) TARGETS_TO_BUILD="IA64 $TARGETS_TO_BUILD" ;;
arm) TARGETS_TO_BUILD="ARM $TARGETS_TO_BUILD" ;;
mips) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;;
spu) TARGETS_TO_BUILD="CellSPU $TARGETS_TO_BUILD" ;;

View File

@ -101,8 +101,6 @@ elseif (LLVM_NATIVE_ARCH MATCHES "powerpc")
set(LLVM_NATIVE_ARCH PowerPC)
elseif (LLVM_NATIVE_ARCH MATCHES "alpha")
set(LLVM_NATIVE_ARCH Alpha)
elseif (LLVM_NATIVE_ARCH MATCHES "ia64")
set(LLVM_NATIVE_ARCH IA64)
elseif (LLVM_NATIVE_ARCH MATCHES "arm")
set(LLVM_NATIVE_ARCH ARM)
elseif (LLVM_NATIVE_ARCH MATCHES "mips")

View File

@ -148,9 +148,6 @@ set(MSVC_LIB_DEPS_LLVMCppBackend LLVMCore LLVMCppBackendInfo LLVMSupport LLVMSys
set(MSVC_LIB_DEPS_LLVMCppBackendInfo LLVMSupport)
set(MSVC_LIB_DEPS_LLVMDebugger LLVMAnalysis LLVMBitReader LLVMCore LLVMSupport LLVMSystem)
set(MSVC_LIB_DEPS_LLVMExecutionEngine LLVMCore LLVMSupport LLVMSystem LLVMTarget)
set(MSVC_LIB_DEPS_LLVMIA64AsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMIA64Info LLVMSupport LLVMSystem LLVMTarget)
set(MSVC_LIB_DEPS_LLVMIA64CodeGen LLVMCodeGen LLVMCore LLVMIA64Info LLVMSelectionDAG LLVMSupport LLVMSystem LLVMTarget)
set(MSVC_LIB_DEPS_LLVMIA64Info LLVMSupport)
set(MSVC_LIB_DEPS_LLVMInstrumentation LLVMCore LLVMScalarOpts LLVMSupport LLVMSystem LLVMTransformUtils)
set(MSVC_LIB_DEPS_LLVMInterpreter LLVMCodeGen LLVMCore LLVMExecutionEngine LLVMSupport LLVMSystem LLVMTarget)
set(MSVC_LIB_DEPS_LLVMJIT LLVMCodeGen LLVMCore LLVMExecutionEngine LLVMSupport LLVMSystem LLVMTarget)

7
configure vendored
View File

@ -2400,7 +2400,6 @@ else
sparc*-*) llvm_cv_target_arch="Sparc" ;;
powerpc*-*) llvm_cv_target_arch="PowerPC" ;;
alpha*-*) llvm_cv_target_arch="Alpha" ;;
ia64-*) llvm_cv_target_arch="IA64" ;;
arm*-*) llvm_cv_target_arch="ARM" ;;
mips-*) llvm_cv_target_arch="Mips" ;;
pic16-*) llvm_cv_target_arch="PIC16" ;;
@ -4842,8 +4841,6 @@ else
x86_64) TARGET_HAS_JIT=1
;;
Alpha) TARGET_HAS_JIT=1
;;
IA64) TARGET_HAS_JIT=0
;;
ARM) TARGET_HAS_JIT=0
;;
@ -4938,7 +4935,7 @@ else
fi
case "$enableval" in
all) TARGETS_TO_BUILD="X86 Sparc PowerPC Alpha IA64 ARM Mips CellSPU PIC16 XCore MSP430 SystemZ CBackend MSIL CppBackend" ;;
all) TARGETS_TO_BUILD="X86 Sparc PowerPC Alpha ARM Mips CellSPU PIC16 XCore MSP430 SystemZ CBackend MSIL CppBackend" ;;
host-only)
case "$llvm_cv_target_arch" in
x86) TARGETS_TO_BUILD="X86" ;;
@ -4946,7 +4943,6 @@ case "$enableval" in
Sparc) TARGETS_TO_BUILD="Sparc" ;;
PowerPC) TARGETS_TO_BUILD="PowerPC" ;;
Alpha) TARGETS_TO_BUILD="Alpha" ;;
IA64) TARGETS_TO_BUILD="IA64" ;;
ARM) TARGETS_TO_BUILD="ARM" ;;
Mips) TARGETS_TO_BUILD="Mips" ;;
CellSPU|SPU) TARGETS_TO_BUILD="CellSPU" ;;
@ -4966,7 +4962,6 @@ echo "$as_me: error: Can not set target to build" >&2;}
sparc) TARGETS_TO_BUILD="Sparc $TARGETS_TO_BUILD" ;;
powerpc) TARGETS_TO_BUILD="PowerPC $TARGETS_TO_BUILD" ;;
alpha) TARGETS_TO_BUILD="Alpha $TARGETS_TO_BUILD" ;;
ia64) TARGETS_TO_BUILD="IA64 $TARGETS_TO_BUILD" ;;
arm) TARGETS_TO_BUILD="ARM $TARGETS_TO_BUILD" ;;
mips) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;;
spu) TARGETS_TO_BUILD="CellSPU $TARGETS_TO_BUILD" ;;

View File

@ -1380,9 +1380,9 @@ bool RegMapping_Fer::compatible_class(MachineFunction &amp;mf,
for <tt>RegisterClass</tt>, the last parameter of which is a list of
registers. Just commenting some out is one simple way to avoid them being
used. A more polite way is to explicitly exclude some registers from
the <i>allocation order</i>. See the definition of the <tt>GR</tt> register
class in <tt>lib/Target/IA64/IA64RegisterInfo.td</tt> for an example of this
(e.g., <tt>numReservedRegs</tt> registers are hidden.)</p>
the <i>allocation order</i>. See the definition of the <tt>GR8</tt> register
class in <tt>lib/Target/X86/X86RegisterInfo.td</tt> for an example of this.
</p>
<p>Virtual registers are also denoted by integer numbers. Contrary to physical
registers, different virtual registers never share the same number. The

View File

@ -491,7 +491,6 @@ and 64-bit modes.</li>
support is available for native builds with Visual C++).</li>
<li>Sun UltraSPARC workstations running Solaris 10.</li>
<li>Alpha-based machines running Debian GNU/Linux.</li>
<li>Itanium-based (IA64) machines running Linux and HP-UX.</li>
</ul>
<p>The core LLVM infrastructure uses GNU autoconf to adapt itself
@ -540,7 +539,7 @@ components, please contact us on the <a
href="http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev">LLVMdev list</a>.</p>
<ul>
<li>The MSIL, IA64, Alpha, SPU, MIPS, and PIC16 backends are experimental.</li>
<li>The MSIL, Alpha, SPU, MIPS, and PIC16 backends are experimental.</li>
<li>The <tt>llc</tt> "<tt>-filetype=asm</tt>" (the default) is the only
supported value for this option.</li>
</ul>
@ -652,21 +651,6 @@ appropriate nops inserted to ensure restartability.</li>
</ul>
</div>
<!-- ======================================================================= -->
<div class="doc_subsection">
<a name="ia64-be">Known problems with the IA64 back-end</a>
</div>
<div class="doc_text">
<ul>
<li>The Itanium backend is highly experimental and has a number of known
issues. We are looking for a maintainer for the Itanium backend. If you
are interested, please contact the LLVMdev mailing list.</li>
</ul>
</div>
<!-- ======================================================================= -->
<div class="doc_subsection">
<a name="c-be">Known problems with the C back-end</a>

View File

@ -128,8 +128,6 @@
<td>Code generation for ARM architecture</td></tr>
<tr><td>LLVMCBackend</td><td><tt>.o</tt></td>
<td>'C' language code generator.</td></tr>
<tr><td>LLVMIA64</td><td><tt>.o</tt></td>
<td>Code generation for IA64 architecture</td></tr>
<tr><td>LLVMPowerPC</td><td><tt>.o</tt></td>
<td>Code generation for PowerPC architecture</td></tr>
<tr><td>LLVMSparc</td><td><tt>.o</tt></td>
@ -356,14 +354,6 @@
<li>libLLVMSystem.a</li>
<li>libLLVMTarget.a</li>
</ul></dd>
<dt><b>LLVMIA64.o</b></dt><dd><ul>
<li>libLLVMCodeGen.a</li>
<li>libLLVMCore.a</li>
<li>libLLVMSelectionDAG.a</li>
<li>libLLVMSupport.a</li>
<li>libLLVMSystem.a</li>
<li>libLLVMTarget.a</li>
</ul></dd>
<dt><b>LLVMInterpreter.o</b></dt><dd><ul>
<li>LLVMExecutionEngine.o</li>
<li>libLLVMCodeGen.a</li>

View File

@ -265,10 +265,6 @@ namespace llvm {
/// FunctionAddrPrefix/Suffix - If these are nonempty, these strings
/// will enclose any GlobalVariable that points to a function.
/// For example, this is used by the IA64 backend to materialize
/// function descriptors, by decorating the ".data8" object with the
/// @verbatim @fptr( ) @endverbatim
/// link-relocation operator.
///
const char *FunctionAddrPrefix; // Defaults to ""
const char *FunctionAddrSuffix; // Defaults to ""

View File

@ -1,9 +0,0 @@
include_directories(
${CMAKE_CURRENT_BINARY_DIR}/..
${CMAKE_CURRENT_SOURCE_DIR}/..
)
add_llvm_library(LLVMIA64AsmPrinter
IA64AsmPrinter.cpp
)
add_dependencies(LLVMIA64AsmPrinter IA64CodeGenTable_gen)

View File

@ -1,377 +0,0 @@
//===-- IA64AsmPrinter.cpp - Print out IA64 LLVM as assembly --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a printer that converts from our internal representation
// of machine-dependent LLVM code to assembly accepted by the GNU binutils 'gas'
// assembler. The Intel 'ias' and HP-UX 'as' assemblers *may* choke on this
// output, but if so that's a bug I'd like to hear about: please file a bug
// report in bugzilla. FYI, the not too bad 'ias' assembler is bundled with
// the Intel C/C++ compiler for Itanium Linux.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "asm-printer"
#include "IA64.h"
#include "IA64TargetMachine.h"
#include "llvm/Module.h"
#include "llvm/MDNode.h"
#include "llvm/Type.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/DwarfWriter.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetRegistry.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/Mangler.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
STATISTIC(EmittedInsts, "Number of machine instrs printed");
namespace {
class IA64AsmPrinter : public AsmPrinter {
std::set<std::string> ExternalFunctionNames, ExternalObjectNames;
public:
explicit IA64AsmPrinter(formatted_raw_ostream &O, TargetMachine &TM,
const TargetAsmInfo *T, bool V)
: AsmPrinter(O, TM, T, V) {}
virtual const char *getPassName() const {
return "IA64 Assembly Printer";
}
/// printInstruction - This method is automatically generated by tablegen
/// from the instruction set description. This method returns true if the
/// machine instruction was sufficiently described to print it, otherwise it
/// returns false.
bool printInstruction(const MachineInstr *MI);
// This method is used by the tablegen'erated instruction printer.
void printOperand(const MachineInstr *MI, unsigned OpNo){
const MachineOperand &MO = MI->getOperand(OpNo);
if (MO.getType() == MachineOperand::MO_Register) {
assert(TargetRegisterInfo::isPhysicalRegister(MO.getReg()) &&
"Not physref??");
//XXX Bug Workaround: See note in Printer::doInitialization about %.
O << TM.getRegisterInfo()->get(MO.getReg()).AsmName;
} else {
printOp(MO);
}
}
void printS8ImmOperand(const MachineInstr *MI, unsigned OpNo) {
int val=(unsigned int)MI->getOperand(OpNo).getImm();
if(val>=128) val=val-256; // if negative, flip sign
O << val;
}
void printS14ImmOperand(const MachineInstr *MI, unsigned OpNo) {
int val=(unsigned int)MI->getOperand(OpNo).getImm();
if(val>=8192) val=val-16384; // if negative, flip sign
O << val;
}
void printS22ImmOperand(const MachineInstr *MI, unsigned OpNo) {
int val=(unsigned int)MI->getOperand(OpNo).getImm();
if(val>=2097152) val=val-4194304; // if negative, flip sign
O << val;
}
void printU64ImmOperand(const MachineInstr *MI, unsigned OpNo) {
O << (uint64_t)MI->getOperand(OpNo).getImm();
}
void printS64ImmOperand(const MachineInstr *MI, unsigned OpNo) {
// XXX : nasty hack to avoid GPREL22 "relocation truncated to fit" linker
// errors - instead of add rX = @gprel(CPI<whatever>), r1;; we now
// emit movl rX = @gprel(CPI<whatever);;
// add rX = rX, r1;
// this gives us 64 bits instead of 22 (for the add long imm) to play
// with, which shuts up the linker. The problem is that the constant
// pool entries aren't immediates at this stage, so we check here.
// If it's an immediate, print it the old fashioned way. If it's
// not, we print it as a constant pool index.
if (MI->getOperand(OpNo).isImm()) {
O << (int64_t)MI->getOperand(OpNo).getImm();
} else { // this is a constant pool reference: FIXME: assert this
printOp(MI->getOperand(OpNo));
}
}
void printGlobalOperand(const MachineInstr *MI, unsigned OpNo) {
printOp(MI->getOperand(OpNo), false); // this is NOT a br.call instruction
}
void printCallOperand(const MachineInstr *MI, unsigned OpNo) {
printOp(MI->getOperand(OpNo), true); // this is a br.call instruction
}
void printMachineInstruction(const MachineInstr *MI);
void printOp(const MachineOperand &MO, bool isBRCALLinsn= false);
void PrintGlobalVariable(const GlobalVariable *GVar);
bool runOnMachineFunction(MachineFunction &F);
bool doInitialization(Module &M);
bool doFinalization(Module &M);
};
} // end of anonymous namespace
// Include the auto-generated portion of the assembly writer.
#include "IA64GenAsmWriter.inc"
/// runOnMachineFunction - This uses the printMachineInstruction()
/// method to print assembly for each instruction.
///
bool IA64AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
this->MF = &MF;
SetupMachineFunction(MF);
O << "\n\n";
// Print out constants referenced by the function
EmitConstantPool(MF.getConstantPool());
const Function *F = MF.getFunction();
SwitchToSection(TAI->SectionForGlobal(F));
// Print out labels for the function.
EmitAlignment(MF.getAlignment());
O << "\t.global\t" << CurrentFnName << '\n';
printVisibility(CurrentFnName, F->getVisibility());
O << "\t.type\t" << CurrentFnName << ", @function\n";
O << CurrentFnName << ":\n";
// Print out code for the function.
for (MachineFunction::const_iterator I = MF.begin(), E = MF.end();
I != E; ++I) {
// Print a label for the basic block if there are any predecessors.
if (!I->pred_empty()) {
printBasicBlockLabel(I, true, true);
O << '\n';
}
for (MachineBasicBlock::const_iterator II = I->begin(), E = I->end();
II != E; ++II) {
// Print the assembly for the instruction.
printMachineInstruction(II);
}
}
// We didn't modify anything.
return false;
}
void IA64AsmPrinter::printOp(const MachineOperand &MO,
bool isBRCALLinsn /* = false */) {
const TargetRegisterInfo &RI = *TM.getRegisterInfo();
switch (MO.getType()) {
case MachineOperand::MO_Register:
O << RI.get(MO.getReg()).AsmName;
return;
case MachineOperand::MO_Immediate:
O << MO.getImm();
return;
case MachineOperand::MO_MachineBasicBlock:
printBasicBlockLabel(MO.getMBB());
return;
case MachineOperand::MO_ConstantPoolIndex: {
O << "@gprel(" << TAI->getPrivateGlobalPrefix()
<< "CPI" << getFunctionNumber() << "_" << MO.getIndex() << ")";
return;
}
case MachineOperand::MO_GlobalAddress: {
// functions need @ltoff(@fptr(fn_name)) form
GlobalValue *GV = MO.getGlobal();
Function *F = dyn_cast<Function>(GV);
bool Needfptr=false; // if we're computing an address @ltoff(X), do
// we need to decorate it so it becomes
// @ltoff(@fptr(X)) ?
if (F && !isBRCALLinsn /*&& F->isDeclaration()*/)
Needfptr=true;
// if this is the target of a call instruction, we should define
// the function somewhere (GNU gas has no problem without this, but
// Intel ias rightly complains of an 'undefined symbol')
if (F /*&& isBRCALLinsn*/ && F->isDeclaration())
ExternalFunctionNames.insert(Mang->getMangledName(MO.getGlobal()));
else
if (GV->isDeclaration()) // e.g. stuff like 'stdin'
ExternalObjectNames.insert(Mang->getMangledName(MO.getGlobal()));
if (!isBRCALLinsn)
O << "@ltoff(";
if (Needfptr)
O << "@fptr(";
O << Mang->getMangledName(MO.getGlobal());
if (Needfptr && !isBRCALLinsn)
O << "#))"; // close both fptr( and ltoff(
else {
if (Needfptr)
O << "#)"; // close only fptr(
if (!isBRCALLinsn)
O << "#)"; // close only ltoff(
}
int Offset = MO.getOffset();
if (Offset > 0)
O << " + " << Offset;
else if (Offset < 0)
O << " - " << -Offset;
return;
}
case MachineOperand::MO_ExternalSymbol:
O << MO.getSymbolName();
ExternalFunctionNames.insert(MO.getSymbolName());
return;
default:
O << "<AsmPrinter: unknown operand type: " << MO.getType() << " >"; return;
}
}
/// printMachineInstruction -- Print out a single IA64 LLVM instruction
/// MI to the current output stream.
///
void IA64AsmPrinter::printMachineInstruction(const MachineInstr *MI) {
++EmittedInsts;
// Call the autogenerated instruction printer routines.
printInstruction(MI);
}
bool IA64AsmPrinter::doInitialization(Module &M) {
bool Result = AsmPrinter::doInitialization(M);
O << "\n.ident \"LLVM-ia64\"\n\n"
<< "\t.psr lsb\n" // should be "msb" on HP-UX, for starters
<< "\t.radix C\n"
<< "\t.psr abi64\n"; // we only support 64 bits for now
return Result;
}
void IA64AsmPrinter::PrintGlobalVariable(const GlobalVariable *GVar) {
const TargetData *TD = TM.getTargetData();
if (!GVar->hasInitializer())
return; // External global require no code
// Check to see if this is a special global used by LLVM, if so, emit it.
if (EmitSpecialLLVMGlobal(GVar))
return;
O << "\n\n";
std::string name = Mang->getMangledName(GVar);
Constant *C = GVar->getInitializer();
if (isa<MDNode>(C) || isa<MDString>(C))
return;
unsigned Size = TD->getTypeAllocSize(C->getType());
unsigned Align = TD->getPreferredAlignmentLog(GVar);
printVisibility(name, GVar->getVisibility());
SwitchToSection(TAI->SectionForGlobal(GVar));
if (C->isNullValue() && !GVar->hasSection()) {
if (!GVar->isThreadLocal() &&
(GVar->hasLocalLinkage() || GVar->isWeakForLinker())) {
if (Size == 0) Size = 1; // .comm Foo, 0 is undefined, avoid it.
if (GVar->hasLocalLinkage()) {
O << "\t.lcomm " << name << "#," << Size
<< ',' << (1 << Align);
O << '\n';
} else {
O << "\t.common " << name << "#," << Size
<< ',' << (1 << Align);
O << '\n';
}
return;
}
}
switch (GVar->getLinkage()) {
case GlobalValue::LinkOnceAnyLinkage:
case GlobalValue::LinkOnceODRLinkage:
case GlobalValue::CommonLinkage:
case GlobalValue::WeakAnyLinkage:
case GlobalValue::WeakODRLinkage:
// Nonnull linkonce -> weak
O << "\t.weak " << name << '\n';
break;
case GlobalValue::AppendingLinkage:
// FIXME: appending linkage variables should go into a section of
// their name or something. For now, just emit them as external.
case GlobalValue::ExternalLinkage:
// If external or appending, declare as a global symbol
O << TAI->getGlobalDirective() << name << '\n';
// FALL THROUGH
case GlobalValue::InternalLinkage:
case GlobalValue::PrivateLinkage:
case GlobalValue::LinkerPrivateLinkage:
break;
case GlobalValue::GhostLinkage:
llvm_unreachable("GhostLinkage cannot appear in IA64AsmPrinter!");
case GlobalValue::DLLImportLinkage:
llvm_unreachable("DLLImport linkage is not supported by this target!");
case GlobalValue::DLLExportLinkage:
llvm_unreachable("DLLExport linkage is not supported by this target!");
default:
llvm_unreachable("Unknown linkage type!");
}
EmitAlignment(Align, GVar);
if (TAI->hasDotTypeDotSizeDirective()) {
O << "\t.type " << name << ",@object\n";
O << "\t.size " << name << ',' << Size << '\n';
}
O << name << ":\n";
EmitGlobalConstant(C);
}
bool IA64AsmPrinter::doFinalization(Module &M) {
// we print out ".global X \n .type X, @function" for each external function
O << "\n\n// br.call targets referenced (and not defined) above: \n";
for (std::set<std::string>::iterator i = ExternalFunctionNames.begin(),
e = ExternalFunctionNames.end(); i!=e; ++i) {
O << "\t.global " << *i << "\n\t.type " << *i << ", @function\n";
}
O << "\n\n";
// we print out ".global X \n .type X, @object" for each external object
O << "\n\n// (external) symbols referenced (and not defined) above: \n";
for (std::set<std::string>::iterator i = ExternalObjectNames.begin(),
e = ExternalObjectNames.end(); i!=e; ++i) {
O << "\t.global " << *i << "\n\t.type " << *i << ", @object\n";
}
O << "\n\n";
return AsmPrinter::doFinalization(M);
}
/// createIA64CodePrinterPass - Returns a pass that prints the IA64
/// assembly code for a MachineFunction to the given output stream, using
/// the given target machine description.
///
FunctionPass *llvm::createIA64CodePrinterPass(formatted_raw_ostream &o,
TargetMachine &tm,
bool verbose) {
return new IA64AsmPrinter(o, tm, tm.getTargetAsmInfo(), verbose);
}
// Force static initialization.
extern "C" void LLVMInitializeIA64AsmPrinter() {
TargetRegistry::RegisterAsmPrinter(TheIA64Target, createIA64CodePrinterPass);
}

View File

@ -1,17 +0,0 @@
##===- lib/Target/IA64/AsmPrinter/Makefile -----------------*- Makefile -*-===##
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
##===----------------------------------------------------------------------===##
LEVEL = ../../../..
LIBRARYNAME = LLVMIA64AsmPrinter
# Hack: we need to include 'main' IA64 target directory to grab
# private headers
CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
include $(LEVEL)/Makefile.common

View File

@ -1,22 +0,0 @@
set(LLVM_TARGET_DEFINITIONS IA64.td)
tablegen(IA64GenRegisterInfo.h.inc -gen-register-desc-header)
tablegen(IA64GenRegisterNames.inc -gen-register-enums)
tablegen(IA64GenRegisterInfo.inc -gen-register-desc)
tablegen(IA64GenInstrNames.inc -gen-instr-enums)
tablegen(IA64GenInstrInfo.inc -gen-instr-desc)
tablegen(IA64GenAsmWriter.inc -gen-asm-writer)
tablegen(IA64GenDAGISel.inc -gen-dag-isel)
add_llvm_target(IA64CodeGen
IA64Bundling.cpp
IA64InstrInfo.cpp
IA64ISelDAGToDAG.cpp
IA64ISelLowering.cpp
IA64RegisterInfo.cpp
IA64Subtarget.cpp
IA64TargetAsmInfo.cpp
IA64TargetMachine.cpp
)
target_link_libraries (LLVMIA64CodeGen LLVMSelectionDAG)

View File

@ -1,59 +0,0 @@
//===-- IA64.h - Top-level interface for IA64 representation ------*- C++ -*-===//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the entry points for global functions defined in the IA64
// target library, as used by the LLVM JIT.
//
//===----------------------------------------------------------------------===//
#ifndef TARGET_IA64_H
#define TARGET_IA64_H
#include "llvm/Target/TargetMachine.h"
namespace llvm {
class IA64TargetMachine;
class FunctionPass;
class formatted_raw_ostream;
/// createIA64DAGToDAGInstructionSelector - This pass converts an LLVM
/// function into IA64 machine code in a sane, DAG->DAG transform.
///
FunctionPass *createIA64DAGToDAGInstructionSelector(IA64TargetMachine &TM);
/// createIA64BundlingPass - This pass adds stop bits and bundles
/// instructions.
///
FunctionPass *createIA64BundlingPass(IA64TargetMachine &TM);
/// createIA64CodePrinterPass - Returns a pass that prints the IA64
/// assembly code for a MachineFunction to the given output stream,
/// using the given target machine description. This should work
/// regardless of whether the function is in SSA form.
///
FunctionPass *createIA64CodePrinterPass(formatted_raw_ostream &o,
TargetMachine &tm,
bool verbose);
extern Target TheIA64Target;
} // End llvm namespace
// Defines symbolic names for IA64 registers. This defines a mapping from
// register name to register number.
//
#include "IA64GenRegisterNames.inc"
// Defines symbolic names for the IA64 instructions.
//
#include "IA64GenInstrNames.inc"
#endif

View File

@ -1,39 +0,0 @@
//===-- IA64.td - Target definition file for Intel IA64 -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is a target description file for the Intel IA64 architecture,
// also known variously as ia64, IA-64, IPF, "the Itanium architecture" etc.
//
//===----------------------------------------------------------------------===//
// Get the target-independent interfaces which we are implementing...
//
include "llvm/Target/Target.td"
//===----------------------------------------------------------------------===//
// Register File Description
//===----------------------------------------------------------------------===//
include "IA64RegisterInfo.td"
//===----------------------------------------------------------------------===//
// Instruction Descriptions
//===----------------------------------------------------------------------===//
include "IA64InstrInfo.td"
def IA64InstrInfo : InstrInfo { }
def IA64 : Target {
// Our instruction set
let InstructionSet = IA64InstrInfo;
}

View File

@ -1,118 +0,0 @@
//===-- IA64Bundling.cpp - IA-64 instruction bundling pass. ------------ --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Add stops where required to prevent read-after-write and write-after-write
// dependencies, for both registers and memory addresses. There are exceptions:
//
// - Compare instructions (cmp*, tbit, tnat, fcmp, frcpa) are OK with
// WAW dependencies so long as they all target p0, or are of parallel
// type (.and*/.or*)
//
// FIXME: bundling, for now, is left to the assembler.
// FIXME: this might be an appropriate place to translate between different
// instructions that do the same thing, if this helps bundling.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "ia64-codegen"
#include "IA64.h"
#include "IA64InstrInfo.h"
#include "IA64TargetMachine.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Debug.h"
#include <set>
using namespace llvm;
STATISTIC(StopBitsAdded, "Number of stop bits added");
namespace {
struct IA64BundlingPass : public MachineFunctionPass {
static char ID;
/// Target machine description which we query for reg. names, data
/// layout, etc.
///
IA64TargetMachine &TM;
IA64BundlingPass(IA64TargetMachine &tm)
: MachineFunctionPass(&ID), TM(tm) { }
virtual const char *getPassName() const {
return "IA64 (Itanium) Bundling Pass";
}
bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
bool runOnMachineFunction(MachineFunction &F) {
bool Changed = false;
for (MachineFunction::iterator FI = F.begin(), FE = F.end();
FI != FE; ++FI)
Changed |= runOnMachineBasicBlock(*FI);
return Changed;
}
// XXX: ugly global, but pending writes can cross basic blocks. Note that
// taken branches end instruction groups. So we only need to worry about
// 'fallthrough' code
std::set<unsigned> PendingRegWrites;
};
char IA64BundlingPass::ID = 0;
} // end of anonymous namespace
/// createIA64BundlingPass - Returns a pass that adds STOP (;;) instructions
/// and arranges the result into bundles.
///
FunctionPass *llvm::createIA64BundlingPass(IA64TargetMachine &tm) {
return new IA64BundlingPass(tm);
}
/// runOnMachineBasicBlock - add stops and bundle this MBB.
///
bool IA64BundlingPass::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
bool Changed = false;
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) {
MachineInstr *CurrentInsn = I++;
std::set<unsigned> CurrentReads, CurrentWrites, OrigWrites;
for(unsigned i=0; i < CurrentInsn->getNumOperands(); i++) {
MachineOperand &MO=CurrentInsn->getOperand(i);
if (MO.isReg()) {
if(MO.isUse()) { // TODO: exclude p0
CurrentReads.insert(MO.getReg());
}
if(MO.isDef()) { // TODO: exclude p0
CurrentWrites.insert(MO.getReg());
OrigWrites.insert(MO.getReg()); // FIXME: use a nondestructive
// set_intersect instead?
}
}
}
// CurrentReads/CurrentWrites contain info for the current instruction.
// Does it read or write any registers that are pending a write?
// (i.e. not separated by a stop)
set_intersect(CurrentReads, PendingRegWrites);
set_intersect(CurrentWrites, PendingRegWrites);
if(! (CurrentReads.empty() && CurrentWrites.empty()) ) {
// there is a conflict, insert a stop and reset PendingRegWrites
CurrentInsn = BuildMI(MBB, CurrentInsn, CurrentInsn->getDebugLoc(),
TM.getInstrInfo()->get(IA64::STOP), 0);
PendingRegWrites=OrigWrites; // carry over current writes to next insn
Changed=true; StopBitsAdded++; // update stats
} else { // otherwise, track additional pending writes
set_union(PendingRegWrites, OrigWrites);
}
} // onto the next insn in the MBB
return Changed;
}

View File

@ -1,577 +0,0 @@
//===---- IA64ISelDAGToDAG.cpp - IA64 pattern matching inst selector ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a pattern matching instruction selector for IA64,
// converting a legalized dag to an IA64 dag.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "ia64-codegen"
#include "IA64.h"
#include "IA64TargetMachine.h"
#include "IA64ISelLowering.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Constants.h"
#include "llvm/GlobalValue.h"
#include "llvm/Intrinsics.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
namespace {
//===--------------------------------------------------------------------===//
/// IA64DAGToDAGISel - IA64 specific code to select IA64 machine
/// instructions for SelectionDAG operations.
///
class IA64DAGToDAGISel : public SelectionDAGISel {
unsigned GlobalBaseReg;
public:
explicit IA64DAGToDAGISel(IA64TargetMachine &TM)
: SelectionDAGISel(TM) {}
virtual bool runOnFunction(Function &Fn) {
// Make sure we re-emit a set of the global base reg if necessary
GlobalBaseReg = 0;
return SelectionDAGISel::runOnFunction(Fn);
}
/// getI64Imm - Return a target constant with the specified value, of type
/// i64.
inline SDValue getI64Imm(uint64_t Imm) {
return CurDAG->getTargetConstant(Imm, MVT::i64);
}
/// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC
/// base register. Return the virtual register that holds this value.
// SDValue getGlobalBaseReg(); TODO: hmm
// Select - Convert the specified operand from a target-independent to a
// target-specific node if it hasn't already been changed.
SDNode *Select(SDValue N);
SDNode *SelectIntImmediateExpr(SDValue LHS, SDValue RHS,
unsigned OCHi, unsigned OCLo,
bool IsArithmetic = false,
bool Negate = false);
SDNode *SelectBitfieldInsert(SDNode *N);
/// SelectCC - Select a comparison of the specified values with the
/// specified condition code, returning the CR# of the expression.
SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC);
/// SelectAddr - Given the specified address, return the two operands for a
/// load/store instruction, and return true if it should be an indexed [r+r]
/// operation.
bool SelectAddr(SDValue Addr, SDValue &Op1, SDValue &Op2);
/// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
virtual void InstructionSelect();
virtual const char *getPassName() const {
return "IA64 (Itanium) DAG->DAG Instruction Selector";
}
// Include the pieces autogenerated from the target description.
#include "IA64GenDAGISel.inc"
private:
SDNode *SelectDIV(SDValue Op);
};
}
/// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
void IA64DAGToDAGISel::InstructionSelect() {
DEBUG(BB->dump());
// Select target instructions for the DAG.
SelectRoot(*CurDAG);
CurDAG->RemoveDeadNodes();
}
SDNode *IA64DAGToDAGISel::SelectDIV(SDValue Op) {
SDNode *N = Op.getNode();
SDValue Chain = N->getOperand(0);
SDValue Tmp1 = N->getOperand(0);
SDValue Tmp2 = N->getOperand(1);
DebugLoc dl = N->getDebugLoc();
bool isFP=false;
if(Tmp1.getValueType().isFloatingPoint())
isFP=true;
bool isModulus=false; // is it a division or a modulus?
bool isSigned=false;
switch(N->getOpcode()) {
case ISD::FDIV:
case ISD::SDIV: isModulus=false; isSigned=true; break;
case ISD::UDIV: isModulus=false; isSigned=false; break;
case ISD::FREM:
case ISD::SREM: isModulus=true; isSigned=true; break;
case ISD::UREM: isModulus=true; isSigned=false; break;
}
// TODO: check for integer divides by powers of 2 (or other simple patterns?)
SDValue TmpPR, TmpPR2;
SDValue TmpF1, TmpF2, TmpF3, TmpF4, TmpF5, TmpF6, TmpF7, TmpF8;
SDValue TmpF9, TmpF10,TmpF11,TmpF12,TmpF13,TmpF14,TmpF15;
SDNode *Result;
// we'll need copies of F0 and F1
SDValue F0 = CurDAG->getRegister(IA64::F0, MVT::f64);
SDValue F1 = CurDAG->getRegister(IA64::F1, MVT::f64);
// OK, emit some code:
if(!isFP) {
// first, load the inputs into FP regs.
TmpF1 =
SDValue(CurDAG->getTargetNode(IA64::SETFSIG, dl, MVT::f64, Tmp1), 0);
Chain = TmpF1.getValue(1);
TmpF2 =
SDValue(CurDAG->getTargetNode(IA64::SETFSIG, dl, MVT::f64, Tmp2), 0);
Chain = TmpF2.getValue(1);
// next, convert the inputs to FP
if(isSigned) {
TmpF3 =
SDValue(CurDAG->getTargetNode(IA64::FCVTXF, dl, MVT::f64, TmpF1), 0);
Chain = TmpF3.getValue(1);
TmpF4 =
SDValue(CurDAG->getTargetNode(IA64::FCVTXF, dl, MVT::f64, TmpF2), 0);
Chain = TmpF4.getValue(1);
} else { // is unsigned
TmpF3 =
SDValue(CurDAG->getTargetNode(IA64::FCVTXUFS1, dl, MVT::f64, TmpF1),
0);
Chain = TmpF3.getValue(1);
TmpF4 =
SDValue(CurDAG->getTargetNode(IA64::FCVTXUFS1, dl, MVT::f64, TmpF2),
0);
Chain = TmpF4.getValue(1);
}
} else { // this is an FP divide/remainder, so we 'leak' some temp
// regs and assign TmpF3=Tmp1, TmpF4=Tmp2
TmpF3=Tmp1;
TmpF4=Tmp2;
}
// we start by computing an approximate reciprocal (good to 9 bits?)
// note, this instruction writes _both_ TmpF5 (answer) and TmpPR (predicate)
if(isFP)
TmpF5 = SDValue(CurDAG->getTargetNode(IA64::FRCPAS0, dl, MVT::f64,
MVT::i1, TmpF3, TmpF4), 0);
else
TmpF5 = SDValue(CurDAG->getTargetNode(IA64::FRCPAS1, dl, MVT::f64,
MVT::i1, TmpF3, TmpF4), 0);
TmpPR = TmpF5.getValue(1);
Chain = TmpF5.getValue(2);
SDValue minusB;
if(isModulus) { // for remainders, it'll be handy to have
// copies of -input_b
minusB = SDValue(CurDAG->getTargetNode(IA64::SUB, dl, MVT::i64,
CurDAG->getRegister(IA64::r0, MVT::i64), Tmp2), 0);
Chain = minusB.getValue(1);
}
SDValue TmpE0, TmpY1, TmpE1, TmpY2;
SDValue OpsE0[] = { TmpF4, TmpF5, F1, TmpPR };
TmpE0 = SDValue(CurDAG->getTargetNode(IA64::CFNMAS1, dl, MVT::f64,
OpsE0, 4), 0);
Chain = TmpE0.getValue(1);
SDValue OpsY1[] = { TmpF5, TmpE0, TmpF5, TmpPR };
TmpY1 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, dl, MVT::f64,
OpsY1, 4), 0);
Chain = TmpY1.getValue(1);
SDValue OpsE1[] = { TmpE0, TmpE0, F0, TmpPR };
TmpE1 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, dl, MVT::f64,
OpsE1, 4), 0);
Chain = TmpE1.getValue(1);
SDValue OpsY2[] = { TmpY1, TmpE1, TmpY1, TmpPR };
TmpY2 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, dl, MVT::f64,
OpsY2, 4), 0);
Chain = TmpY2.getValue(1);
if(isFP) { // if this is an FP divide, we finish up here and exit early
if(isModulus)
llvm_unreachable("Sorry, try another FORTRAN compiler.");
SDValue TmpE2, TmpY3, TmpQ0, TmpR0;
SDValue OpsE2[] = { TmpE1, TmpE1, F0, TmpPR };
TmpE2 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, dl, MVT::f64,
OpsE2, 4), 0);
Chain = TmpE2.getValue(1);
SDValue OpsY3[] = { TmpY2, TmpE2, TmpY2, TmpPR };
TmpY3 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, dl, MVT::f64,
OpsY3, 4), 0);
Chain = TmpY3.getValue(1);
SDValue OpsQ0[] = { Tmp1, TmpY3, F0, TmpPR };
TmpQ0 =
SDValue(CurDAG->getTargetNode(IA64::CFMADS1, dl, // double prec!
MVT::f64, OpsQ0, 4), 0);
Chain = TmpQ0.getValue(1);
SDValue OpsR0[] = { Tmp2, TmpQ0, Tmp1, TmpPR };
TmpR0 =
SDValue(CurDAG->getTargetNode(IA64::CFNMADS1, dl, // double prec!
MVT::f64, OpsR0, 4), 0);
Chain = TmpR0.getValue(1);
// we want Result to have the same target register as the frcpa, so
// we two-address hack it. See the comment "for this to work..." on
// page 48 of Intel application note #245415
SDValue Ops[] = { TmpF5, TmpY3, TmpR0, TmpQ0, TmpPR };
Result = CurDAG->getTargetNode(IA64::TCFMADS0, dl, // d.p. s0 rndg!
MVT::f64, Ops, 5);
Chain = SDValue(Result, 1);
return Result; // XXX: early exit!
} else { // this is *not* an FP divide, so there's a bit left to do:
SDValue TmpQ2, TmpR2, TmpQ3, TmpQ;
SDValue OpsQ2[] = { TmpF3, TmpY2, F0, TmpPR };
TmpQ2 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, dl, MVT::f64,
OpsQ2, 4), 0);
Chain = TmpQ2.getValue(1);
SDValue OpsR2[] = { TmpF4, TmpQ2, TmpF3, TmpPR };
TmpR2 = SDValue(CurDAG->getTargetNode(IA64::CFNMAS1, dl, MVT::f64,
OpsR2, 4), 0);
Chain = TmpR2.getValue(1);
// we want TmpQ3 to have the same target register as the frcpa? maybe we
// should two-address hack it. See the comment "for this to work..." on page
// 48 of Intel application note #245415
SDValue OpsQ3[] = { TmpF5, TmpR2, TmpY2, TmpQ2, TmpPR };
TmpQ3 = SDValue(CurDAG->getTargetNode(IA64::TCFMAS1, dl, MVT::f64,
OpsQ3, 5), 0);
Chain = TmpQ3.getValue(1);
// STORY: without these two-address instructions (TCFMAS1 and TCFMADS0)
// the FPSWA won't be able to help out in the case of large/tiny
// arguments. Other fun bugs may also appear, e.g. 0/x = x, not 0.
if(isSigned)
TmpQ = SDValue(CurDAG->getTargetNode(IA64::FCVTFXTRUNCS1, dl,
MVT::f64, TmpQ3), 0);
else
TmpQ = SDValue(CurDAG->getTargetNode(IA64::FCVTFXUTRUNCS1, dl,
MVT::f64, TmpQ3), 0);
Chain = TmpQ.getValue(1);
if(isModulus) {
SDValue FPminusB =
SDValue(CurDAG->getTargetNode(IA64::SETFSIG, dl, MVT::f64, minusB),
0);
Chain = FPminusB.getValue(1);
SDValue Remainder =
SDValue(CurDAG->getTargetNode(IA64::XMAL, dl, MVT::f64,
TmpQ, FPminusB, TmpF1), 0);
Chain = Remainder.getValue(1);
Result = CurDAG->getTargetNode(IA64::GETFSIG, dl, MVT::i64, Remainder);
Chain = SDValue(Result, 1);
} else { // just an integer divide
Result = CurDAG->getTargetNode(IA64::GETFSIG, dl, MVT::i64, TmpQ);
Chain = SDValue(Result, 1);
}
return Result;
} // wasn't an FP divide
}
// Select - Convert the specified operand from a target-independent to a
// target-specific node if it hasn't already been changed.
SDNode *IA64DAGToDAGISel::Select(SDValue Op) {
SDNode *N = Op.getNode();
if (N->isMachineOpcode())
return NULL; // Already selected.
DebugLoc dl = Op.getDebugLoc();
switch (N->getOpcode()) {
default: break;
case IA64ISD::BRCALL: { // XXX: this is also a hack!
SDValue Chain = N->getOperand(0);
SDValue InFlag; // Null incoming flag value.
if(N->getNumOperands()==3) { // we have an incoming chain, callee and flag
InFlag = N->getOperand(2);
}
unsigned CallOpcode;
SDValue CallOperand;
// if we can call directly, do so
if (GlobalAddressSDNode *GASD =
dyn_cast<GlobalAddressSDNode>(N->getOperand(1))) {
CallOpcode = IA64::BRCALL_IPREL_GA;
CallOperand = CurDAG->getTargetGlobalAddress(GASD->getGlobal(), MVT::i64);
} else if (isa<ExternalSymbolSDNode>(N->getOperand(1))) {
// FIXME: we currently NEED this case for correctness, to avoid
// "non-pic code with imm reloc.n against dynamic symbol" errors
CallOpcode = IA64::BRCALL_IPREL_ES;
CallOperand = N->getOperand(1);
} else {
// otherwise we need to load the function descriptor,
// load the branch target (function)'s entry point and GP,
// branch (call) then restore the GP
SDValue FnDescriptor = N->getOperand(1);
// load the branch target's entry point [mem] and
// GP value [mem+8]
SDValue targetEntryPoint=
SDValue(CurDAG->getTargetNode(IA64::LD8, dl, MVT::i64, MVT::Other,
FnDescriptor, CurDAG->getEntryNode()), 0);
Chain = targetEntryPoint.getValue(1);
SDValue targetGPAddr=
SDValue(CurDAG->getTargetNode(IA64::ADDS, dl, MVT::i64,
FnDescriptor,
CurDAG->getConstant(8, MVT::i64)), 0);
Chain = targetGPAddr.getValue(1);
SDValue targetGP =
SDValue(CurDAG->getTargetNode(IA64::LD8, dl, MVT::i64,MVT::Other,
targetGPAddr, CurDAG->getEntryNode()), 0);
Chain = targetGP.getValue(1);
Chain = CurDAG->getCopyToReg(Chain, dl, IA64::r1, targetGP, InFlag);
InFlag = Chain.getValue(1);
Chain = CurDAG->getCopyToReg(Chain, dl, IA64::B6,
targetEntryPoint, InFlag); // FLAG these?
InFlag = Chain.getValue(1);
CallOperand = CurDAG->getRegister(IA64::B6, MVT::i64);
CallOpcode = IA64::BRCALL_INDIRECT;
}
// Finally, once everything is setup, emit the call itself
if (InFlag.getNode())
Chain = SDValue(CurDAG->getTargetNode(CallOpcode, dl, MVT::Other,
MVT::Flag, CallOperand, InFlag), 0);
else // there might be no arguments
Chain = SDValue(CurDAG->getTargetNode(CallOpcode, dl, MVT::Other,
MVT::Flag, CallOperand, Chain), 0);
InFlag = Chain.getValue(1);
std::vector<SDValue> CallResults;
CallResults.push_back(Chain);
CallResults.push_back(InFlag);
for (unsigned i = 0, e = CallResults.size(); i != e; ++i)
ReplaceUses(Op.getValue(i), CallResults[i]);
return NULL;
}
case IA64ISD::GETFD: {
SDValue Input = N->getOperand(0);
return CurDAG->getTargetNode(IA64::GETFD, dl, MVT::i64, Input);
}
case ISD::FDIV:
case ISD::SDIV:
case ISD::UDIV:
case ISD::SREM:
case ISD::UREM:
return SelectDIV(Op);
case ISD::TargetConstantFP: {
SDValue Chain = CurDAG->getEntryNode(); // this is a constant, so..
SDValue V;
ConstantFPSDNode* N2 = cast<ConstantFPSDNode>(N);
if (N2->getValueAPF().isPosZero()) {
V = CurDAG->getCopyFromReg(Chain, dl, IA64::F0, MVT::f64);
} else if (N2->isExactlyValue(N2->getValueType(0) == MVT::f32 ?
APFloat(+1.0f) : APFloat(+1.0))) {
V = CurDAG->getCopyFromReg(Chain, dl, IA64::F1, MVT::f64);
} else
llvm_unreachable("Unexpected FP constant!");
ReplaceUses(SDValue(N, 0), V);
return 0;
}
case ISD::FrameIndex: { // TODO: reduce creepyness
int FI = cast<FrameIndexSDNode>(N)->getIndex();
if (N->hasOneUse())
return CurDAG->SelectNodeTo(N, IA64::MOV, MVT::i64,
CurDAG->getTargetFrameIndex(FI, MVT::i64));
else
return CurDAG->getTargetNode(IA64::MOV, dl, MVT::i64,
CurDAG->getTargetFrameIndex(FI, MVT::i64));
}
case ISD::ConstantPool: { // TODO: nuke the constant pool
// (ia64 doesn't need one)
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
Constant *C = CP->getConstVal();
SDValue CPI = CurDAG->getTargetConstantPool(C, MVT::i64,
CP->getAlignment());
return CurDAG->getTargetNode(IA64::ADDL_GA, dl, MVT::i64, // ?
CurDAG->getRegister(IA64::r1, MVT::i64), CPI);
}
case ISD::GlobalAddress: {
GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
SDValue GA = CurDAG->getTargetGlobalAddress(GV, MVT::i64);
SDValue Tmp =
SDValue(CurDAG->getTargetNode(IA64::ADDL_GA, dl, MVT::i64,
CurDAG->getRegister(IA64::r1,
MVT::i64), GA), 0);
return CurDAG->getTargetNode(IA64::LD8, dl, MVT::i64, MVT::Other, Tmp,
CurDAG->getEntryNode());
}
/* XXX
case ISD::ExternalSymbol: {
SDValue EA = CurDAG->getTargetExternalSymbol(
cast<ExternalSymbolSDNode>(N)->getSymbol(),
MVT::i64);
SDValue Tmp = CurDAG->getTargetNode(IA64::ADDL_EA, dl, MVT::i64,
CurDAG->getRegister(IA64::r1,
MVT::i64),
EA);
return CurDAG->getTargetNode(IA64::LD8, dl, MVT::i64, Tmp);
}
*/
case ISD::LOAD: { // FIXME: load -1, not 1, for bools?
LoadSDNode *LD = cast<LoadSDNode>(N);
SDValue Chain = LD->getChain();
SDValue Address = LD->getBasePtr();
MVT TypeBeingLoaded = LD->getMemoryVT();
unsigned Opc;
switch (TypeBeingLoaded.getSimpleVT()) {
default:
#ifndef NDEBUG
N->dump(CurDAG);
#endif
llvm_unreachable("Cannot load this type!");
case MVT::i1: { // this is a bool
Opc = IA64::LD1; // first we load a byte, then compare for != 0
if(N->getValueType(0) == MVT::i1) { // XXX: early exit!
return CurDAG->SelectNodeTo(N, IA64::CMPNE, MVT::i1, MVT::Other,
SDValue(CurDAG->getTargetNode(Opc, dl,
MVT::i64,
Address), 0),
CurDAG->getRegister(IA64::r0, MVT::i64),
Chain);
}
/* otherwise, we want to load a bool into something bigger: LD1
will do that for us, so we just fall through */
}
case MVT::i8: Opc = IA64::LD1; break;
case MVT::i16: Opc = IA64::LD2; break;
case MVT::i32: Opc = IA64::LD4; break;
case MVT::i64: Opc = IA64::LD8; break;
case MVT::f32: Opc = IA64::LDF4; break;
case MVT::f64: Opc = IA64::LDF8; break;
}
// TODO: comment this
return CurDAG->SelectNodeTo(N, Opc, N->getValueType(0), MVT::Other,
Address, Chain);
}
case ISD::STORE: {
StoreSDNode *ST = cast<StoreSDNode>(N);
SDValue Address = ST->getBasePtr();
SDValue Chain = ST->getChain();
unsigned Opc;
if (ISD::isNON_TRUNCStore(N)) {
switch (N->getOperand(1).getValueType().getSimpleVT()) {
default: llvm_unreachable("unknown type in store");
case MVT::i1: { // this is a bool
Opc = IA64::ST1; // we store either 0 or 1 as a byte
// first load zero!
SDValue Initial = CurDAG->getCopyFromReg(Chain, dl, IA64::r0, MVT::i64);
Chain = Initial.getValue(1);
// then load 1 into the same reg iff the predicate to store is 1
SDValue Tmp = ST->getValue();
Tmp =
SDValue(CurDAG->getTargetNode(IA64::TPCADDS, dl, MVT::i64, Initial,
CurDAG->getTargetConstant(1,
MVT::i64),
Tmp), 0);
return CurDAG->SelectNodeTo(N, Opc, MVT::Other, Address, Tmp, Chain);
}
case MVT::i64: Opc = IA64::ST8; break;
case MVT::f64: Opc = IA64::STF8; break;
}
} else { // Truncating store
switch(ST->getMemoryVT().getSimpleVT()) {
default: llvm_unreachable("unknown type in truncstore");
case MVT::i8: Opc = IA64::ST1; break;
case MVT::i16: Opc = IA64::ST2; break;
case MVT::i32: Opc = IA64::ST4; break;
case MVT::f32: Opc = IA64::STF4; break;
}
}
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
return CurDAG->SelectNodeTo(N, Opc, MVT::Other, N2, N1, Chain);
}
case ISD::BRCOND: {
SDValue Chain = N->getOperand(0);
SDValue CC = N->getOperand(1);
MachineBasicBlock *Dest =
cast<BasicBlockSDNode>(N->getOperand(2))->getBasicBlock();
//FIXME - we do NOT need long branches all the time
return CurDAG->SelectNodeTo(N, IA64::BRLCOND_NOTCALL, MVT::Other, CC,
CurDAG->getBasicBlock(Dest), Chain);
}
case ISD::CALLSEQ_START:
case ISD::CALLSEQ_END: {
int64_t Amt = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
unsigned Opc = N->getOpcode() == ISD::CALLSEQ_START ?
IA64::ADJUSTCALLSTACKDOWN : IA64::ADJUSTCALLSTACKUP;
SDValue N0 = N->getOperand(0);
return CurDAG->SelectNodeTo(N, Opc, MVT::Other, getI64Imm(Amt), N0);
}
case ISD::BR:
// FIXME: we don't need long branches all the time!
SDValue N0 = N->getOperand(0);
return CurDAG->SelectNodeTo(N, IA64::BRL_NOTCALL, MVT::Other,
N->getOperand(1), N0);
}
return SelectCode(Op);
}
/// createIA64DAGToDAGInstructionSelector - This pass converts a legalized DAG
/// into an IA64-specific DAG, ready for instruction scheduling.
///
FunctionPass
*llvm::createIA64DAGToDAGInstructionSelector(IA64TargetMachine &TM) {
return new IA64DAGToDAGISel(TM);
}

View File

@ -1,633 +0,0 @@
//===-- IA64ISelLowering.cpp - IA64 DAG Lowering Implementation -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the IA64ISelLowering class.
//
//===----------------------------------------------------------------------===//
#include "IA64ISelLowering.h"
#include "IA64MachineFunctionInfo.h"
#include "IA64TargetMachine.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
using namespace llvm;
IA64TargetLowering::IA64TargetLowering(TargetMachine &TM)
: TargetLowering(TM) {
// register class for general registers
addRegisterClass(MVT::i64, IA64::GRRegisterClass);
// register class for FP registers
addRegisterClass(MVT::f64, IA64::FPRegisterClass);
// register class for predicate registers
addRegisterClass(MVT::i1, IA64::PRRegisterClass);
setLoadExtAction(ISD::EXTLOAD , MVT::i1 , Promote);
setLoadExtAction(ISD::ZEXTLOAD , MVT::i1 , Promote);
setLoadExtAction(ISD::SEXTLOAD , MVT::i1 , Promote);
setLoadExtAction(ISD::SEXTLOAD , MVT::i8 , Expand);
setLoadExtAction(ISD::SEXTLOAD , MVT::i16 , Expand);
setLoadExtAction(ISD::SEXTLOAD , MVT::i32 , Expand);
setOperationAction(ISD::BRIND , MVT::Other, Expand);
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
setOperationAction(ISD::BR_CC , MVT::Other, Expand);
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
// ia64 uses SELECT not SELECT_CC
setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
// We need to handle ISD::RET for void functions ourselves,
// so we get a chance to restore ar.pfs before adding a
// br.ret insn
setOperationAction(ISD::RET, MVT::Other, Custom);
setShiftAmountType(MVT::i64);
setOperationAction(ISD::FREM , MVT::f32 , Expand);
setOperationAction(ISD::FREM , MVT::f64 , Expand);
setOperationAction(ISD::UREM , MVT::f32 , Expand);
setOperationAction(ISD::UREM , MVT::f64 , Expand);
setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
// We don't support sin/cos/sqrt/pow
setOperationAction(ISD::FSIN , MVT::f64, Expand);
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FSQRT, MVT::f64, Expand);
setOperationAction(ISD::FPOW , MVT::f64, Expand);
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FSQRT, MVT::f32, Expand);
setOperationAction(ISD::FPOW , MVT::f32, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
// FIXME: IA64 supports fcopysign natively!
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
// We don't have line number support yet.
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
// IA64 has ctlz in the form of the 'fnorm' instruction. The Legalizer
// expansion for ctlz/cttz in terms of ctpop is much larger, but lower
// latency.
// FIXME: Custom lower CTLZ when compiling for size?
setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
setOperationAction(ISD::ROTL , MVT::i64 , Expand);
setOperationAction(ISD::ROTR , MVT::i64 , Expand);
// FIXME: IA64 has this, but is not implemented. should be mux @rev
setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VAARG , MVT::Other, Custom);
setOperationAction(ISD::VASTART , MVT::Other, Custom);
// FIXME: These should be legal
setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand);
// Use the default implementation.
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
// Thread Local Storage
setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
setStackPointerRegisterToSaveRestore(IA64::r12);
setJumpBufSize(704); // on ia64-linux, jmp_bufs are 704 bytes..
setJumpBufAlignment(16); // ...and must be 16-byte aligned
computeRegisterProperties();
addLegalFPImmediate(APFloat(+0.0));
addLegalFPImmediate(APFloat(-0.0));
addLegalFPImmediate(APFloat(+1.0));
addLegalFPImmediate(APFloat(-1.0));
}
const char *IA64TargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
default: return 0;
case IA64ISD::GETFD: return "IA64ISD::GETFD";
case IA64ISD::BRCALL: return "IA64ISD::BRCALL";
case IA64ISD::RET_FLAG: return "IA64ISD::RET_FLAG";
}
}
MVT IA64TargetLowering::getSetCCResultType(MVT VT) const {
return MVT::i1;
}
/// getFunctionAlignment - Return the Log2 alignment of this function.
unsigned IA64TargetLowering::getFunctionAlignment(const Function *) const {
return 5;
}
void IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &ArgValues,
DebugLoc dl) {
//
// add beautiful description of IA64 stack frame format
// here (from intel 24535803.pdf most likely)
//
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
GP = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
SP = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
RP = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
MachineBasicBlock& BB = MF.front();
unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35,
IA64::r36, IA64::r37, IA64::r38, IA64::r39};
unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
IA64::F12,IA64::F13,IA64::F14, IA64::F15};
unsigned argVreg[8];
unsigned argPreg[8];
unsigned argOpc[8];
unsigned used_FPArgs = 0; // how many FP args have been used so far?
unsigned ArgOffset = 0;
int count = 0;
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
{
SDValue newroot, argt;
if(count < 8) { // need to fix this logic? maybe.
switch (getValueType(I->getType()).getSimpleVT()) {
default:
llvm_unreachable("ERROR in LowerArgs: can't lower this type of arg.");
case MVT::f32:
// fixme? (well, will need to for weird FP structy stuff,
// see intel ABI docs)
case MVT::f64:
//XXX BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]);
MF.getRegInfo().addLiveIn(args_FP[used_FPArgs]);
// mark this reg as liveIn
// floating point args go into f8..f15 as-needed, the increment
argVreg[count] = // is below..:
MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::f64));
// FP args go into f8..f15 as needed: (hence the ++)
argPreg[count] = args_FP[used_FPArgs++];
argOpc[count] = IA64::FMOV;
argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), dl,
argVreg[count], MVT::f64);
if (I->getType() == Type::FloatTy)
argt = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, argt,
DAG.getIntPtrConstant(0));
break;
case MVT::i1: // NOTE: as far as C abi stuff goes,
// bools are just boring old ints
case MVT::i8:
case MVT::i16:
case MVT::i32:
case MVT::i64:
//XXX BuildMI(&BB, IA64::IDEF, 0, args_int[count]);
MF.getRegInfo().addLiveIn(args_int[count]);
// mark this register as liveIn
argVreg[count] =
MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
argPreg[count] = args_int[count];
argOpc[count] = IA64::MOV;
argt = newroot =
DAG.getCopyFromReg(DAG.getRoot(), dl, argVreg[count], MVT::i64);
if ( getValueType(I->getType()) != MVT::i64)
argt = DAG.getNode(ISD::TRUNCATE, dl, getValueType(I->getType()),
newroot);
break;
}
} else { // more than 8 args go into the frame
// Create the frame index object for this incoming parameter...
ArgOffset = 16 + 8 * (count - 8);
int FI = MFI->CreateFixedObject(8, ArgOffset);
// Create the SelectionDAG nodes corresponding to a load
//from this parameter
SDValue FIN = DAG.getFrameIndex(FI, MVT::i64);
argt = newroot = DAG.getLoad(getValueType(I->getType()), dl,
DAG.getEntryNode(), FIN, NULL, 0);
}
++count;
DAG.setRoot(newroot.getValue(1));
ArgValues.push_back(argt);
}
// Create a vreg to hold the output of (what will become)
// the "alloc" instruction
VirtGPR = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
BuildMI(&BB, dl, TII->get(IA64::PSEUDO_ALLOC), VirtGPR);
// we create a PSEUDO_ALLOC (pseudo)instruction for now
/*
BuildMI(&BB, IA64::IDEF, 0, IA64::r1);
// hmm:
BuildMI(&BB, IA64::IDEF, 0, IA64::r12);
BuildMI(&BB, IA64::IDEF, 0, IA64::rp);
// ..hmm.
BuildMI(&BB, IA64::MOV, 1, GP).addReg(IA64::r1);
// hmm:
BuildMI(&BB, IA64::MOV, 1, SP).addReg(IA64::r12);
BuildMI(&BB, IA64::MOV, 1, RP).addReg(IA64::rp);
// ..hmm.
*/
unsigned tempOffset=0;
// if this is a varargs function, we simply lower llvm.va_start by
// pointing to the first entry
if(F.isVarArg()) {
tempOffset=0;
VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset);
}
// here we actually do the moving of args, and store them to the stack
// too if this is a varargs function:
for (int i = 0; i < count && i < 8; ++i) {
BuildMI(&BB, dl, TII->get(argOpc[i]), argVreg[i]).addReg(argPreg[i]);
if(F.isVarArg()) {
// if this is a varargs function, we copy the input registers to the stack
int FI = MFI->CreateFixedObject(8, tempOffset);
tempOffset+=8; //XXX: is it safe to use r22 like this?
BuildMI(&BB, dl, TII->get(IA64::MOV), IA64::r22).addFrameIndex(FI);
// FIXME: we should use st8.spill here, one day
BuildMI(&BB, dl, TII->get(IA64::ST8), IA64::r22).addReg(argPreg[i]);
}
}
// Finally, inform the code generator which regs we return values in.
// (see the ISD::RET: case in the instruction selector)
switch (getValueType(F.getReturnType()).getSimpleVT()) {
default: llvm_unreachable("i have no idea where to return this type!");
case MVT::isVoid: break;
case MVT::i1:
case MVT::i8:
case MVT::i16:
case MVT::i32:
case MVT::i64:
MF.getRegInfo().addLiveOut(IA64::r8);
break;
case MVT::f32:
case MVT::f64:
MF.getRegInfo().addLiveOut(IA64::F8);
break;
}
}
std::pair<SDValue, SDValue>
IA64TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
bool RetSExt, bool RetZExt, bool isVarArg,
bool isInreg, unsigned NumFixedArgs,
unsigned CallingConv,
bool isTailCall, SDValue Callee,
ArgListTy &Args, SelectionDAG &DAG,
DebugLoc dl) {
MachineFunction &MF = DAG.getMachineFunction();
unsigned NumBytes = 16;
unsigned outRegsUsed = 0;
if (Args.size() > 8) {
NumBytes += (Args.size() - 8) * 8;
outRegsUsed = 8;
} else {
outRegsUsed = Args.size();
}
// FIXME? this WILL fail if we ever try to pass around an arg that
// consumes more than a single output slot (a 'real' double, int128
// some sort of aggregate etc.), as we'll underestimate how many 'outX'
// registers we use. Hopefully, the assembler will notice.
MF.getInfo<IA64FunctionInfo>()->outRegsUsed=
std::max(outRegsUsed, MF.getInfo<IA64FunctionInfo>()->outRegsUsed);
// keep stack frame 16-byte aligned
// assert(NumBytes==((NumBytes+15) & ~15) &&
// "stack frame not 16-byte aligned!");
NumBytes = (NumBytes+15) & ~15;
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
SDValue StackPtr;
std::vector<SDValue> Stores;
std::vector<SDValue> Converts;
std::vector<SDValue> RegValuesToPass;
unsigned ArgOffset = 16;
for (unsigned i = 0, e = Args.size(); i != e; ++i)
{
SDValue Val = Args[i].Node;
MVT ObjectVT = Val.getValueType();
SDValue ValToStore(0, 0), ValToConvert(0, 0);
unsigned ObjSize=8;
switch (ObjectVT.getSimpleVT()) {
default: llvm_unreachable("unexpected argument type!");
case MVT::i1:
case MVT::i8:
case MVT::i16:
case MVT::i32: {
//promote to 64-bits, sign/zero extending based on type
//of the argument
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
if (Args[i].isSExt)
ExtendKind = ISD::SIGN_EXTEND;
else if (Args[i].isZExt)
ExtendKind = ISD::ZERO_EXTEND;
Val = DAG.getNode(ExtendKind, dl, MVT::i64, Val);
// XXX: fall through
}
case MVT::i64:
//ObjSize = 8;
if(RegValuesToPass.size() >= 8) {
ValToStore = Val;
} else {
RegValuesToPass.push_back(Val);
}
break;
case MVT::f32:
//promote to 64-bits
Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
// XXX: fall through
case MVT::f64:
if(RegValuesToPass.size() >= 8) {
ValToStore = Val;
} else {
RegValuesToPass.push_back(Val);
if(1 /* TODO: if(calling external or varadic function)*/ ) {
ValToConvert = Val; // additionally pass this FP value as an int
}
}
break;
}
if(ValToStore.getNode()) {
if(!StackPtr.getNode()) {
StackPtr = DAG.getRegister(IA64::r12, MVT::i64);
}
SDValue PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, PtrOff);
Stores.push_back(DAG.getStore(Chain, dl, ValToStore, PtrOff, NULL, 0));
ArgOffset += ObjSize;
}
if(ValToConvert.getNode()) {
Converts.push_back(DAG.getNode(IA64ISD::GETFD, dl,
MVT::i64, ValToConvert));
}
}
// Emit all stores, make sure they occur before any copies into physregs.
if (!Stores.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl,
MVT::Other, &Stores[0],Stores.size());
static const unsigned IntArgRegs[] = {
IA64::out0, IA64::out1, IA64::out2, IA64::out3,
IA64::out4, IA64::out5, IA64::out6, IA64::out7
};
static const unsigned FPArgRegs[] = {
IA64::F8, IA64::F9, IA64::F10, IA64::F11,
IA64::F12, IA64::F13, IA64::F14, IA64::F15
};
SDValue InFlag;
// save the current GP, SP and RP : FIXME: do we need to do all 3 always?
SDValue GPBeforeCall = DAG.getCopyFromReg(Chain, dl, IA64::r1,
MVT::i64, InFlag);
Chain = GPBeforeCall.getValue(1);
InFlag = Chain.getValue(2);
SDValue SPBeforeCall = DAG.getCopyFromReg(Chain, dl, IA64::r12,
MVT::i64, InFlag);
Chain = SPBeforeCall.getValue(1);
InFlag = Chain.getValue(2);
SDValue RPBeforeCall = DAG.getCopyFromReg(Chain, dl, IA64::rp,
MVT::i64, InFlag);
Chain = RPBeforeCall.getValue(1);
InFlag = Chain.getValue(2);
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing integer args into regs out[0-7]
// mapped 1:1 and the FP args into regs F8-F15 "lazily"
// TODO: for performance, we should only copy FP args into int regs when we
// know this is required (i.e. for varardic or external (unknown) functions)
// first to the FP->(integer representation) conversions, these are
// flagged for now, but shouldn't have to be (TODO)
unsigned seenConverts = 0;
for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
if(RegValuesToPass[i].getValueType().isFloatingPoint()) {
Chain = DAG.getCopyToReg(Chain, dl, IntArgRegs[i],
Converts[seenConverts++], InFlag);
InFlag = Chain.getValue(1);
}
}
// next copy args into the usual places, these are flagged
unsigned usedFPArgs = 0;
for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain, dl,
RegValuesToPass[i].getValueType().isInteger() ?
IntArgRegs[i] : FPArgRegs[usedFPArgs++], RegValuesToPass[i], InFlag);
InFlag = Chain.getValue(1);
}
// If the callee is a GlobalAddress node (quite common, every direct call is)
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
/*
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i64);
}
*/
std::vector<MVT> NodeTys;
std::vector<SDValue> CallOperands;
NodeTys.push_back(MVT::Other); // Returns a chain
NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
CallOperands.push_back(Chain);
CallOperands.push_back(Callee);
// emit the call itself
if (InFlag.getNode())
CallOperands.push_back(InFlag);
else
llvm_unreachable("this should never happen!");
// to make way for a hack:
Chain = DAG.getNode(IA64ISD::BRCALL, dl, NodeTys,
&CallOperands[0], CallOperands.size());
InFlag = Chain.getValue(1);
// restore the GP, SP and RP after the call
Chain = DAG.getCopyToReg(Chain, dl, IA64::r1, GPBeforeCall, InFlag);
InFlag = Chain.getValue(1);
Chain = DAG.getCopyToReg(Chain, dl, IA64::r12, SPBeforeCall, InFlag);
InFlag = Chain.getValue(1);
Chain = DAG.getCopyToReg(Chain, dl, IA64::rp, RPBeforeCall, InFlag);
InFlag = Chain.getValue(1);
std::vector<MVT> RetVals;
RetVals.push_back(MVT::Other);
RetVals.push_back(MVT::Flag);
MVT RetTyVT = getValueType(RetTy);
SDValue RetVal;
if (RetTyVT != MVT::isVoid) {
switch (RetTyVT.getSimpleVT()) {
default: llvm_unreachable("Unknown value type to return!");
case MVT::i1: { // bools are just like other integers (returned in r8)
// we *could* fall through to the truncate below, but this saves a
// few redundant predicate ops
SDValue boolInR8 = DAG.getCopyFromReg(Chain, dl, IA64::r8,
MVT::i64,InFlag);
InFlag = boolInR8.getValue(2);
Chain = boolInR8.getValue(1);
SDValue zeroReg = DAG.getCopyFromReg(Chain, dl, IA64::r0,
MVT::i64, InFlag);
InFlag = zeroReg.getValue(2);
Chain = zeroReg.getValue(1);
RetVal = DAG.getSetCC(dl, MVT::i1, boolInR8, zeroReg, ISD::SETNE);
break;
}
case MVT::i8:
case MVT::i16:
case MVT::i32:
RetVal = DAG.getCopyFromReg(Chain, dl, IA64::r8, MVT::i64, InFlag);
Chain = RetVal.getValue(1);
// keep track of whether it is sign or zero extended (todo: bools?)
/* XXX
RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext :ISD::AssertZext,
dl, MVT::i64, RetVal, DAG.getValueType(RetTyVT));
*/
RetVal = DAG.getNode(ISD::TRUNCATE, dl, RetTyVT, RetVal);
break;
case MVT::i64:
RetVal = DAG.getCopyFromReg(Chain, dl, IA64::r8, MVT::i64, InFlag);
Chain = RetVal.getValue(1);
InFlag = RetVal.getValue(2); // XXX dead
break;
case MVT::f32:
RetVal = DAG.getCopyFromReg(Chain, dl, IA64::F8, MVT::f64, InFlag);
Chain = RetVal.getValue(1);
RetVal = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, RetVal,
DAG.getIntPtrConstant(0));
break;
case MVT::f64:
RetVal = DAG.getCopyFromReg(Chain, dl, IA64::F8, MVT::f64, InFlag);
Chain = RetVal.getValue(1);
InFlag = RetVal.getValue(2); // XXX dead
break;
}
}
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
DAG.getIntPtrConstant(0, true), SDValue());
return std::make_pair(RetVal, Chain);
}
SDValue IA64TargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
case ISD::GlobalTLSAddress:
llvm_unreachable("TLS not implemented for IA64.");
case ISD::RET: {
SDValue AR_PFSVal, Copy;
switch(Op.getNumOperands()) {
default:
llvm_unreachable("Do not know how to return this many arguments!");
case 1:
AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), dl, VirtGPR, MVT::i64);
AR_PFSVal = DAG.getCopyToReg(AR_PFSVal.getValue(1), dl, IA64::AR_PFS,
AR_PFSVal);
return DAG.getNode(IA64ISD::RET_FLAG, dl, MVT::Other, AR_PFSVal);
case 3: {
// Copy the result into the output register & restore ar.pfs
MVT ArgVT = Op.getOperand(1).getValueType();
unsigned ArgReg = ArgVT.isInteger() ? IA64::r8 : IA64::F8;
AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), dl, VirtGPR, MVT::i64);
Copy = DAG.getCopyToReg(AR_PFSVal.getValue(1), dl, ArgReg,
Op.getOperand(1), SDValue());
AR_PFSVal = DAG.getCopyToReg(Copy.getValue(0), dl,
IA64::AR_PFS, AR_PFSVal, Copy.getValue(1));
return DAG.getNode(IA64ISD::RET_FLAG, dl, MVT::Other,
AR_PFSVal, AR_PFSVal.getValue(1));
}
}
return SDValue();
}
case ISD::VAARG: {
MVT VT = getPointerTy();
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
SDValue VAList = DAG.getLoad(VT, dl, Op.getOperand(0), Op.getOperand(1),
SV, 0);
// Increment the pointer, VAList, to the next vaarg
SDValue VAIncr = DAG.getNode(ISD::ADD, dl, VT, VAList,
DAG.getConstant(VT.getSizeInBits()/8,
VT));
// Store the incremented VAList to the legalized pointer
VAIncr = DAG.getStore(VAList.getValue(1), dl, VAIncr,
Op.getOperand(1), SV, 0);
// Load the actual argument out of the pointer VAList
return DAG.getLoad(Op.getValueType(), dl, VAIncr, VAList, NULL, 0);
}
case ISD::VASTART: {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0);
}
// Frame & Return address. Currently unimplemented
case ISD::RETURNADDR: break;
case ISD::FRAMEADDR: break;
}
return SDValue();
}

View File

@ -1,78 +0,0 @@
//===-- IA64ISelLowering.h - IA64 DAG Lowering Interface --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that IA64 uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_IA64_IA64ISELLOWERING_H
#define LLVM_TARGET_IA64_IA64ISELLOWERING_H
#include "llvm/Target/TargetLowering.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "IA64.h"
namespace llvm {
namespace IA64ISD {
enum NodeType {
// Start the numbering where the builting ops and target ops leave off.
FIRST_NUMBER = ISD::BUILTIN_OP_END,
/// GETFD - the getf.d instruction takes a floating point operand and
/// returns its 64-bit memory representation as an i64
GETFD,
// TODO: explain this hack
BRCALL,
// RET_FLAG - Return with a flag operand
RET_FLAG
};
}
class IA64TargetLowering : public TargetLowering {
int VarArgsFrameIndex; // FrameIndex for start of varargs area.
//int ReturnAddrIndex; // FrameIndex for return slot.
unsigned GP, SP, RP; // FIXME - clean this mess up
public:
explicit IA64TargetLowering(TargetMachine &TM);
unsigned VirtGPR; // this is public so it can be accessed in the selector
// for ISD::RET. add an accessor instead? FIXME
const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType: return ISD::SETCC's result type.
virtual MVT getSetCCResultType(MVT VT) const;
/// LowerArguments - This hook must be implemented to indicate how we should
/// lower the arguments for the specified function, into the specified DAG.
virtual void LowerArguments(Function &F, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &ArgValues,
DebugLoc dl);
/// LowerCallTo - This hook lowers an abstract call to a function into an
/// actual call.
virtual std::pair<SDValue, SDValue>
LowerCallTo(SDValue Chain, const Type *RetTy,
bool RetSExt, bool RetZExt, bool isVarArg, bool isInreg,
unsigned NumFixedArgs, unsigned CC, bool isTailCall,
SDValue Callee, ArgListTy &Args, SelectionDAG &DAG,
DebugLoc dl);
/// LowerOperation - for custom lowering specific ops
/// (currently, only "ret void")
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *F) const;
};
}
#endif // LLVM_TARGET_IA64_IA64ISELLOWERING_H

View File

@ -1,40 +0,0 @@
//===-- IA64PCInstrBuilder.h - Aids for building IA64 insts -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file exposes functions that may be used with BuildMI from the
// MachineInstrBuilder.h file to simplify generating frame and constant pool
// references.
//
//===----------------------------------------------------------------------===//
#ifndef IA64_INSTRBUILDER_H
#define IA64_INSTRBUILDER_H
#include "llvm/CodeGen/MachineInstrBuilder.h"
namespace llvm {
/// addFrameReference - This function is used to add a reference to the base of
/// an abstract object on the stack frame of the current function. This
/// reference has base register as the FrameIndex offset until it is resolved.
/// This allows a constant offset to be specified as well...
///
inline const MachineInstrBuilder&
addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0,
bool mem = true) {
if (mem)
return MIB.addImm(Offset).addFrameIndex(FI);
else
return MIB.addFrameIndex(FI).addImm(Offset);
}
} // End llvm namespace
#endif

View File

@ -1,80 +0,0 @@
//===- IA64InstrFormats.td - IA64 Instruction Formats --*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// - Warning: the stuff in here isn't really being used, so is mostly
// junk. It'll get fixed as the JIT gets built.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// Instruction format superclass
//===----------------------------------------------------------------------===//
class InstIA64<bits<4> op, dag OOL, dag IOL, string asmstr> : Instruction {
// IA64 instruction baseline
field bits<41> Inst;
let Namespace = "IA64";
let OutOperandList = OOL;
let InOperandList = IOL;
let AsmString = asmstr;
let Inst{40-37} = op;
}
//"Each Itanium instruction is categorized into one of six types."
//We should have:
// A, I, M, F, B, L+X
class AForm<bits<4> opcode, bits<6> qpReg, dag OOL, dag IOL, string asmstr> :
InstIA64<opcode, OOL, IOL, asmstr> {
let Inst{5-0} = qpReg;
}
class AForm_DAG<bits<4> opcode, bits<6> qpReg, dag OOL, dag IOL, string asmstr,
list<dag> pattern> :
InstIA64<opcode, OOL, IOL, asmstr> {
let Pattern = pattern;
let Inst{5-0} = qpReg;
}
let isBranch = 1, isTerminator = 1 in
class BForm<bits<4> opcode, bits<6> x6, bits<3> btype, dag OOL, dag IOL, string asmstr> :
InstIA64<opcode, OOL, IOL, asmstr> {
let Inst{32-27} = x6;
let Inst{8-6} = btype;
}
class MForm<bits<4> opcode, bits<6> x6, dag OOL, dag IOL, string asmstr> :
InstIA64<opcode, OOL, IOL, asmstr> {
bits<7> Ra;
bits<7> Rb;
bits<16> disp;
let Inst{35-30} = x6;
// let Inst{20-16} = Rb;
let Inst{15-0} = disp;
}
class RawForm<bits<4> opcode, bits<26> rest, dag OOL, dag IOL, string asmstr> :
InstIA64<opcode, OOL, IOL, asmstr> {
let Inst{25-0} = rest;
}
// Pseudo instructions.
class PseudoInstIA64<dag OOL, dag IOL, string nm> : InstIA64<0, OOL, IOL, nm> {
}
class PseudoInstIA64_DAG<dag OOL, dag IOL, string nm, list<dag> pattern>
: InstIA64<0, OOL, IOL, nm> {
let Pattern = pattern;
}

View File

@ -1,193 +0,0 @@
//===- IA64InstrInfo.cpp - IA64 Instruction Information -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the IA64 implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#include "IA64InstrInfo.h"
#include "IA64.h"
#include "IA64InstrBuilder.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ErrorHandling.h"
#include "IA64GenInstrInfo.inc"
using namespace llvm;
IA64InstrInfo::IA64InstrInfo()
: TargetInstrInfoImpl(IA64Insts, sizeof(IA64Insts)/sizeof(IA64Insts[0])),
RI(*this) {
}
bool IA64InstrInfo::isMoveInstr(const MachineInstr& MI,
unsigned& sourceReg,
unsigned& destReg,
unsigned& SrcSR, unsigned& DstSR) const {
SrcSR = DstSR = 0; // No sub-registers.
unsigned oc = MI.getOpcode();
if (oc == IA64::MOV || oc == IA64::FMOV) {
// TODO: this doesn't detect predicate moves
assert(MI.getNumOperands() >= 2 &&
/* MI.getOperand(0).isReg() &&
MI.getOperand(1).isReg() && */
"invalid register-register move instruction");
if (MI.getOperand(0).isReg() &&
MI.getOperand(1).isReg()) {
// if both operands of the MOV/FMOV are registers, then
// yes, this is a move instruction
sourceReg = MI.getOperand(1).getReg();
destReg = MI.getOperand(0).getReg();
return true;
}
}
return false; // we don't consider e.g. %regN = MOV <FrameIndex #x> a
// move instruction
}
unsigned
IA64InstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond)const {
// FIXME this should probably have a DebugLoc argument
DebugLoc dl = DebugLoc::getUnknownLoc();
// Can only insert uncond branches so far.
assert(Cond.empty() && !FBB && TBB && "Can only handle uncond branches!");
BuildMI(&MBB, dl, get(IA64::BRL_NOTCALL)).addMBB(TBB);
return 1;
}
bool IA64InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const {
if (DestRC != SrcRC) {
// Not yet supported!
return false;
}
DebugLoc DL = DebugLoc::getUnknownLoc();
if (MI != MBB.end()) DL = MI->getDebugLoc();
if(DestRC == IA64::PRRegisterClass ) // if a bool, we use pseudocode
// (SrcReg) DestReg = cmp.eq.unc(r0, r0)
BuildMI(MBB, MI, DL, get(IA64::PCMPEQUNC), DestReg)
.addReg(IA64::r0).addReg(IA64::r0).addReg(SrcReg);
else // otherwise, MOV works (for both gen. regs and FP regs)
BuildMI(MBB, MI, DL, get(IA64::MOV), DestReg).addReg(SrcReg);
return true;
}
void IA64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill,
int FrameIdx,
const TargetRegisterClass *RC) const{
DebugLoc DL = DebugLoc::getUnknownLoc();
if (MI != MBB.end()) DL = MI->getDebugLoc();
if (RC == IA64::FPRegisterClass) {
BuildMI(MBB, MI, DL, get(IA64::STF_SPILL)).addFrameIndex(FrameIdx)
.addReg(SrcReg, getKillRegState(isKill));
} else if (RC == IA64::GRRegisterClass) {
BuildMI(MBB, MI, DL, get(IA64::ST8)).addFrameIndex(FrameIdx)
.addReg(SrcReg, getKillRegState(isKill));
} else if (RC == IA64::PRRegisterClass) {
/* we use IA64::r2 as a temporary register for doing this hackery. */
// first we load 0:
BuildMI(MBB, MI, DL, get(IA64::MOV), IA64::r2).addReg(IA64::r0);
// then conditionally add 1:
BuildMI(MBB, MI, DL, get(IA64::CADDIMM22), IA64::r2).addReg(IA64::r2)
.addImm(1).addReg(SrcReg, getKillRegState(isKill));
// and then store it to the stack
BuildMI(MBB, MI, DL, get(IA64::ST8))
.addFrameIndex(FrameIdx)
.addReg(IA64::r2);
} else
llvm_unreachable("sorry, I don't know how to store this sort of reg"
"in the stack");
}
void IA64InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
unsigned Opc = 0;
if (RC == IA64::FPRegisterClass) {
Opc = IA64::STF8;
} else if (RC == IA64::GRRegisterClass) {
Opc = IA64::ST8;
} else if (RC == IA64::PRRegisterClass) {
Opc = IA64::ST1;
} else {
llvm_unreachable("sorry, I don't know how to store this sort of reg");
}
DebugLoc DL = DebugLoc::getUnknownLoc();
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.addOperand(Addr[i]);
MIB.addReg(SrcReg, getKillRegState(isKill));
NewMIs.push_back(MIB);
return;
}
void IA64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC)const{
DebugLoc DL = DebugLoc::getUnknownLoc();
if (MI != MBB.end()) DL = MI->getDebugLoc();
if (RC == IA64::FPRegisterClass) {
BuildMI(MBB, MI, DL, get(IA64::LDF_FILL), DestReg).addFrameIndex(FrameIdx);
} else if (RC == IA64::GRRegisterClass) {
BuildMI(MBB, MI, DL, get(IA64::LD8), DestReg).addFrameIndex(FrameIdx);
} else if (RC == IA64::PRRegisterClass) {
// first we load a byte from the stack into r2, our 'predicate hackery'
// scratch reg
BuildMI(MBB, MI, DL, get(IA64::LD8), IA64::r2).addFrameIndex(FrameIdx);
// then we compare it to zero. If it _is_ zero, compare-not-equal to
// r0 gives us 0, which is what we want, so that's nice.
BuildMI(MBB, MI, DL, get(IA64::CMPNE), DestReg)
.addReg(IA64::r2)
.addReg(IA64::r0);
} else {
llvm_unreachable("sorry, I don't know how to load this sort of reg"
"from the stack");
}
}
void IA64InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
unsigned Opc = 0;
if (RC == IA64::FPRegisterClass) {
Opc = IA64::LDF8;
} else if (RC == IA64::GRRegisterClass) {
Opc = IA64::LD8;
} else if (RC == IA64::PRRegisterClass) {
Opc = IA64::LD1;
} else {
llvm_unreachable("sorry, I don't know how to load this sort of reg");
}
DebugLoc DL = DebugLoc::getUnknownLoc();
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.addOperand(Addr[i]);
NewMIs.push_back(MIB);
return;
}

View File

@ -1,70 +0,0 @@
//===- IA64InstrInfo.h - IA64 Instruction Information ----------*- C++ -*- ===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the IA64 implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#ifndef IA64INSTRUCTIONINFO_H
#define IA64INSTRUCTIONINFO_H
#include "llvm/Target/TargetInstrInfo.h"
#include "IA64RegisterInfo.h"
namespace llvm {
class IA64InstrInfo : public TargetInstrInfoImpl {
const IA64RegisterInfo RI;
public:
IA64InstrInfo();
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
virtual const IA64RegisterInfo &getRegisterInfo() const { return RI; }
/// Return true if the instruction is a register to register move and return
/// the source and dest operands and their sub-register indices by reference.
virtual bool isMoveInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SrcSubIdx, unsigned &DstSubIdx) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond) const;
virtual bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC) const;
virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC) const;
virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
};
} // End llvm namespace
#endif

View File

@ -1,751 +0,0 @@
//===- IA64InstrInfo.td - Describe the IA64 Instruction Set -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes the IA64 instruction set, defining the instructions, and
// properties of the instructions which are needed for code generation, machine
// code emission, and analysis.
//
//===----------------------------------------------------------------------===//
include "IA64InstrFormats.td"
//===----------------------------------------------------------------------===//
// IA-64 specific DAG Nodes.
//
def IA64getfd : SDNode<"IA64ISD::GETFD", SDTFPToIntOp, []>;
def retflag : SDNode<"IA64ISD::RET_FLAG", SDTNone,
[SDNPHasChain, SDNPOptInFlag]>;
//===---------
// Instruction types
class isA { bit A=1; } // I or M unit
class isM { bit M=1; } // M unit
class isI { bit I=1; } // I unit
class isB { bit B=1; } // B unit
class isF { bit F=1; } // F unit
class isLX { bit LX=1; } // I/B
//===---------
def u2imm : Operand<i8>;
def u6imm : Operand<i8>;
def s8imm : Operand<i8> {
let PrintMethod = "printS8ImmOperand";
}
def s14imm : Operand<i64> {
let PrintMethod = "printS14ImmOperand";
}
def s22imm : Operand<i64> {
let PrintMethod = "printS22ImmOperand";
}
def u64imm : Operand<i64> {
let PrintMethod = "printU64ImmOperand";
}
def s64imm : Operand<i64> {
let PrintMethod = "printS64ImmOperand";
}
let PrintMethod = "printGlobalOperand" in
def globaladdress : Operand<i64>;
// the asmprinter needs to know about calls
let PrintMethod = "printCallOperand" in
def calltarget : Operand<i64>;
/* new daggy action!!! */
def is32ones : PatLeaf<(i64 imm), [{
// is32ones predicate - True if the immediate is 0x00000000FFFFFFFF
// Used to create ZXT4s appropriately
uint64_t v = (uint64_t)N->getZExtValue();
return (v == 0x00000000FFFFFFFFLL);
}]>;
// isMIXable predicates - True if the immediate is
// 0xFF00FF00FF00FF00, 0x00FF00FF00FF00FF
// etc, through 0x00000000FFFFFFFF
// Used to test for the suitability of mix*
def isMIX1Lable: PatLeaf<(i64 imm), [{
return((uint64_t)N->getZExtValue()==0xFF00FF00FF00FF00LL);
}]>;
def isMIX1Rable: PatLeaf<(i64 imm), [{
return((uint64_t)N->getZExtValue()==0x00FF00FF00FF00FFLL);
}]>;
def isMIX2Lable: PatLeaf<(i64 imm), [{
return((uint64_t)N->getZExtValue()==0xFFFF0000FFFF0000LL);
}]>;
def isMIX2Rable: PatLeaf<(i64 imm), [{
return((uint64_t)N->getZExtValue()==0x0000FFFF0000FFFFLL);
}]>;
def isMIX4Lable: PatLeaf<(i64 imm), [{
return((uint64_t)N->getZExtValue()==0xFFFFFFFF00000000LL);
}]>;
def isMIX4Rable: PatLeaf<(i64 imm), [{
return((uint64_t)N->getZExtValue()==0x00000000FFFFFFFFLL);
}]>;
def isSHLADDimm: PatLeaf<(i64 imm), [{
// isSHLADDimm predicate - True if the immediate is exactly 1, 2, 3 or 4
// - 0 is *not* okay.
// Used to create shladd instructions appropriately
int64_t v = (int64_t)N->getZExtValue();
return (v >= 1 && v <= 4);
}]>;
def immSExt14 : PatLeaf<(i64 imm), [{
// immSExt14 predicate - True if the immediate fits in a 14-bit sign extended
// field. Used by instructions like 'adds'.
int64_t v = (int64_t)N->getZExtValue();
return (v <= 8191 && v >= -8192);
}]>;
// imm64 predicate - True if the immediate fits in a 64-bit
// field - i.e., true. used to keep movl happy
def imm64 : PatLeaf<(i64 imm)>;
def ADD : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"add $dst = $src1, $src2",
[(set GR:$dst, (add GR:$src1, GR:$src2))]>, isA;
def ADD1 : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"add $dst = $src1, $src2, 1",
[(set GR:$dst, (add (add GR:$src1, GR:$src2), 1))]>, isA;
def ADDS : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, s14imm:$imm),
"adds $dst = $imm, $src1",
[(set GR:$dst, (add GR:$src1, immSExt14:$imm))]>, isA;
def MOVL : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins s64imm:$imm),
"movl $dst = $imm",
[(set GR:$dst, imm64:$imm)]>, isLX;
def ADDL_GA : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, globaladdress:$imm),
"addl $dst = $imm, $src1",
[]>, isA;
// hmm
def ADDL_EA : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, calltarget:$imm),
"addl $dst = $imm, $src1",
[]>, isA;
def SUB : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"sub $dst = $src1, $src2",
[(set GR:$dst, (sub GR:$src1, GR:$src2))]>, isA;
def SUB1 : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"sub $dst = $src1, $src2, 1",
[(set GR:$dst, (add (sub GR: $src1, GR:$src2), -1))]>, isA;
let isTwoAddress = 1 in {
def TPCADDIMM22 : AForm<0x03, 0x0b,
(outs GR:$dst), (ins GR:$src1, s22imm:$imm, PR:$qp),
"($qp) add $dst = $imm, $dst">, isA;
def TPCADDS : AForm_DAG<0x03, 0x0b,
(outs GR:$dst), (ins GR:$src1, s14imm:$imm, PR:$qp),
"($qp) adds $dst = $imm, $dst",
[]>, isA;
def TPCMPIMM8NE : AForm<0x03, 0x0b,
(outs PR:$dst), (ins PR:$src1, s22imm:$imm, GR:$src2, PR:$qp),
"($qp) cmp.ne $dst , p0 = $imm, $src2">, isA;
}
// zero extend a bool (predicate reg) into an integer reg
def ZXTb : Pat<(zext PR:$src),
(TPCADDIMM22 (ADDS r0, 0), 1, PR:$src)>;
def AXTb : Pat<(anyext PR:$src),
(TPCADDIMM22 (ADDS r0, 0), 1, PR:$src)>;
// normal sign/zero-extends
def SXT1 : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src), "sxt1 $dst = $src",
[(set GR:$dst, (sext_inreg GR:$src, i8))]>, isI;
def ZXT1 : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src), "zxt1 $dst = $src",
[(set GR:$dst, (and GR:$src, 255))]>, isI;
def SXT2 : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src), "sxt2 $dst = $src",
[(set GR:$dst, (sext_inreg GR:$src, i16))]>, isI;
def ZXT2 : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src), "zxt2 $dst = $src",
[(set GR:$dst, (and GR:$src, 65535))]>, isI;
def SXT4 : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src), "sxt4 $dst = $src",
[(set GR:$dst, (sext_inreg GR:$src, i32))]>, isI;
def ZXT4 : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src), "zxt4 $dst = $src",
[(set GR:$dst, (and GR:$src, is32ones))]>, isI;
// fixme: shrs vs shru?
def MIX1L : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"mix1.l $dst = $src1, $src2",
[(set GR:$dst, (or (and GR:$src1, isMIX1Lable),
(and (srl GR:$src2, (i64 8)), isMIX1Lable)))]>, isI;
def MIX2L : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"mix2.l $dst = $src1, $src2",
[(set GR:$dst, (or (and GR:$src1, isMIX2Lable),
(and (srl GR:$src2, (i64 16)), isMIX2Lable)))]>, isI;
def MIX4L : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"mix4.l $dst = $src1, $src2",
[(set GR:$dst, (or (and GR:$src1, isMIX4Lable),
(and (srl GR:$src2, (i64 32)), isMIX4Lable)))]>, isI;
def MIX1R : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"mix1.r $dst = $src1, $src2",
[(set GR:$dst, (or (and (shl GR:$src1, (i64 8)), isMIX1Rable),
(and GR:$src2, isMIX1Rable)))]>, isI;
def MIX2R : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"mix2.r $dst = $src1, $src2",
[(set GR:$dst, (or (and (shl GR:$src1, (i64 16)), isMIX2Rable),
(and GR:$src2, isMIX2Rable)))]>, isI;
def MIX4R : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"mix4.r $dst = $src1, $src2",
[(set GR:$dst, (or (and (shl GR:$src1, (i64 32)), isMIX4Rable),
(and GR:$src2, isMIX4Rable)))]>, isI;
def GETFSIGD : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins FP:$src),
"getf.sig $dst = $src",
[]>, isM;
def SETFSIGD : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins GR:$src),
"setf.sig $dst = $src",
[]>, isM;
def XMALD : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins FP:$src1, FP:$src2, FP:$src3),
"xma.l $dst = $src1, $src2, $src3",
[]>, isF;
def XMAHD : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins FP:$src1, FP:$src2, FP:$src3),
"xma.h $dst = $src1, $src2, $src3",
[]>, isF;
def XMAHUD : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins FP:$src1, FP:$src2, FP:$src3),
"xma.hu $dst = $src1, $src2, $src3",
[]>, isF;
// pseudocode for integer multiplication
def : Pat<(mul GR:$src1, GR:$src2),
(GETFSIGD (XMALD (SETFSIGD GR:$src1), (SETFSIGD GR:$src2), F0))>;
def : Pat<(mulhs GR:$src1, GR:$src2),
(GETFSIGD (XMAHD (SETFSIGD GR:$src1), (SETFSIGD GR:$src2), F0))>;
def : Pat<(mulhu GR:$src1, GR:$src2),
(GETFSIGD (XMAHUD (SETFSIGD GR:$src1), (SETFSIGD GR:$src2), F0))>;
// TODO: addp4 (addp4 dst = src, r0 is a 32-bit add)
// has imm form, too
// def ADDS : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, s14imm:$imm),
// "adds $dst = $imm, $src1">;
def AND : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"and $dst = $src1, $src2",
[(set GR:$dst, (and GR:$src1, GR:$src2))]>, isA;
def ANDCM : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"andcm $dst = $src1, $src2",
[(set GR:$dst, (and GR:$src1, (not GR:$src2)))]>, isA;
// TODO: and/andcm/or/xor/add/sub/shift immediate forms
def OR : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"or $dst = $src1, $src2",
[(set GR:$dst, (or GR:$src1, GR:$src2))]>, isA;
def pOR : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2, PR:$qp),
"($qp) or $dst = $src1, $src2">, isA;
// the following are all a bit unfortunate: we throw away the complement
// of the compare!
def CMPEQ : AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2),
"cmp.eq $dst, p0 = $src1, $src2",
[(set PR:$dst, (seteq GR:$src1, GR:$src2))]>, isA;
def CMPGT : AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2),
"cmp.gt $dst, p0 = $src1, $src2",
[(set PR:$dst, (setgt GR:$src1, GR:$src2))]>, isA;
def CMPGE : AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2),
"cmp.ge $dst, p0 = $src1, $src2",
[(set PR:$dst, (setge GR:$src1, GR:$src2))]>, isA;
def CMPLT : AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2),
"cmp.lt $dst, p0 = $src1, $src2",
[(set PR:$dst, (setlt GR:$src1, GR:$src2))]>, isA;
def CMPLE : AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2),
"cmp.le $dst, p0 = $src1, $src2",
[(set PR:$dst, (setle GR:$src1, GR:$src2))]>, isA;
def CMPNE : AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2),
"cmp.ne $dst, p0 = $src1, $src2",
[(set PR:$dst, (setne GR:$src1, GR:$src2))]>, isA;
def CMPLTU: AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2),
"cmp.ltu $dst, p0 = $src1, $src2",
[(set PR:$dst, (setult GR:$src1, GR:$src2))]>, isA;
def CMPGTU: AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2),
"cmp.gtu $dst, p0 = $src1, $src2",
[(set PR:$dst, (setugt GR:$src1, GR:$src2))]>, isA;
def CMPLEU: AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2),
"cmp.leu $dst, p0 = $src1, $src2",
[(set PR:$dst, (setule GR:$src1, GR:$src2))]>, isA;
def CMPGEU: AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2),
"cmp.geu $dst, p0 = $src1, $src2",
[(set PR:$dst, (setuge GR:$src1, GR:$src2))]>, isA;
// and we do the whole thing again for FP compares!
def FCMPEQ : AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins FP:$src1, FP:$src2),
"fcmp.eq $dst, p0 = $src1, $src2",
[(set PR:$dst, (seteq FP:$src1, FP:$src2))]>, isF;
def FCMPGT : AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins FP:$src1, FP:$src2),
"fcmp.gt $dst, p0 = $src1, $src2",
[(set PR:$dst, (setgt FP:$src1, FP:$src2))]>, isF;
def FCMPGE : AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins FP:$src1, FP:$src2),
"fcmp.ge $dst, p0 = $src1, $src2",
[(set PR:$dst, (setge FP:$src1, FP:$src2))]>, isF;
def FCMPLT : AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins FP:$src1, FP:$src2),
"fcmp.lt $dst, p0 = $src1, $src2",
[(set PR:$dst, (setlt FP:$src1, FP:$src2))]>, isF;
def FCMPLE : AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins FP:$src1, FP:$src2),
"fcmp.le $dst, p0 = $src1, $src2",
[(set PR:$dst, (setle FP:$src1, FP:$src2))]>, isF;
def FCMPNE : AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins FP:$src1, FP:$src2),
"fcmp.neq $dst, p0 = $src1, $src2",
[(set PR:$dst, (setne FP:$src1, FP:$src2))]>, isF;
def FCMPLTU: AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins FP:$src1, FP:$src2),
"fcmp.lt $dst, p0 = $src1, $src2",
[(set PR:$dst, (setult FP:$src1, FP:$src2))]>, isF;
def FCMPGTU: AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins FP:$src1, FP:$src2),
"fcmp.gt $dst, p0 = $src1, $src2",
[(set PR:$dst, (setugt FP:$src1, FP:$src2))]>, isF;
def FCMPLEU: AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins FP:$src1, FP:$src2),
"fcmp.le $dst, p0 = $src1, $src2",
[(set PR:$dst, (setule FP:$src1, FP:$src2))]>, isF;
def FCMPGEU: AForm_DAG<0x03, 0x0b, (outs PR:$dst), (ins FP:$src1, FP:$src2),
"fcmp.ge $dst, p0 = $src1, $src2",
[(set PR:$dst, (setuge FP:$src1, FP:$src2))]>, isF;
def PCMPEQUNCR0R0 : AForm<0x03, 0x0b, (outs PR:$dst), (ins PR:$qp),
"($qp) cmp.eq.unc $dst, p0 = r0, r0">, isA;
def : Pat<(trunc GR:$src), // truncate i64 to i1
(CMPNE GR:$src, r0)>; // $src!=0? If so, PR:$dst=true
let isTwoAddress=1 in {
def TPCMPEQR0R0 : AForm<0x03, 0x0b, (outs PR:$dst), (ins PR:$bogus, PR:$qp),
"($qp) cmp.eq $dst, p0 = r0, r0">, isA;
def TPCMPNER0R0 : AForm<0x03, 0x0b, (outs PR:$dst), (ins PR:$bogus, PR:$qp),
"($qp) cmp.ne $dst, p0 = r0, r0">, isA;
}
/* our pseudocode for OR on predicates is:
pC = pA OR pB
-------------
(pA) cmp.eq.unc pC,p0 = r0,r0 // pC = pA
;;
(pB) cmp.eq pC,p0 = r0,r0 // if (pB) pC = 1 */
def bOR : Pat<(or PR:$src1, PR:$src2),
(TPCMPEQR0R0 (PCMPEQUNCR0R0 PR:$src1), PR:$src2)>;
/* our pseudocode for AND on predicates is:
*
(pA) cmp.eq.unc pC,p0 = r0,r0 // pC = pA
cmp.eq pTemp,p0 = r0,r0 // pTemp = NOT pB
;;
(pB) cmp.ne pTemp,p0 = r0,r0
;;
(pTemp)cmp.ne pC,p0 = r0,r0 // if (NOT pB) pC = 0 */
def bAND : Pat<(and PR:$src1, PR:$src2),
( TPCMPNER0R0 (PCMPEQUNCR0R0 PR:$src1),
(TPCMPNER0R0 (CMPEQ r0, r0), PR:$src2) )>;
/* one possible routine for XOR on predicates is:
// Compute px = py ^ pz
// using sum of products: px = (py & !pz) | (pz & !py)
// Uses 5 instructions in 3 cycles.
// cycle 1
(pz) cmp.eq.unc px = r0, r0 // px = pz
(py) cmp.eq.unc pt = r0, r0 // pt = py
;;
// cycle 2
(pt) cmp.ne.and px = r0, r0 // px = px & !pt (px = pz & !pt)
(pz) cmp.ne.and pt = r0, r0 // pt = pt & !pz
;;
} { .mmi
// cycle 3
(pt) cmp.eq.or px = r0, r0 // px = px | pt
*** Another, which we use here, requires one scratch GR. it is:
mov rt = 0 // initialize rt off critical path
;;
// cycle 1
(pz) cmp.eq.unc px = r0, r0 // px = pz
(pz) mov rt = 1 // rt = pz
;;
// cycle 2
(py) cmp.ne px = 1, rt // if (py) px = !pz
.. these routines kindly provided by Jim Hull
*/
def bXOR : Pat<(xor PR:$src1, PR:$src2),
(TPCMPIMM8NE (PCMPEQUNCR0R0 PR:$src2), 1,
(TPCADDS (ADDS r0, 0), 1, PR:$src2),
PR:$src1)>;
def XOR : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"xor $dst = $src1, $src2",
[(set GR:$dst, (xor GR:$src1, GR:$src2))]>, isA;
def SHLADD: AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1,s64imm:$imm,GR:$src2),
"shladd $dst = $src1, $imm, $src2",
[(set GR:$dst, (add GR:$src2, (shl GR:$src1, isSHLADDimm:$imm)))]>, isA;
def SHL : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"shl $dst = $src1, $src2",
[(set GR:$dst, (shl GR:$src1, GR:$src2))]>, isI;
def SHRU : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"shr.u $dst = $src1, $src2",
[(set GR:$dst, (srl GR:$src1, GR:$src2))]>, isI;
def SHRS : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, GR:$src2),
"shr $dst = $src1, $src2",
[(set GR:$dst, (sra GR:$src1, GR:$src2))]>, isI;
def MOV : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$src), "mov $dst = $src">, isA;
def FMOV : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"mov $dst = $src">, isF; // XXX: there _is_ no fmov
def PMOV : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$src, PR:$qp),
"($qp) mov $dst = $src">, isA;
def SPILL_ALL_PREDICATES_TO_GR : AForm<0x03, 0x0b, (outs GR:$dst), (ins),
"mov $dst = pr">, isI;
def FILL_ALL_PREDICATES_FROM_GR : AForm<0x03, 0x0b, (outs), (ins GR:$src),
"mov pr = $src">, isI;
let isTwoAddress = 1 in {
def CMOV : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$src2, GR:$src, PR:$qp),
"($qp) mov $dst = $src">, isA;
}
def PFMOV : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src, PR:$qp),
"($qp) mov $dst = $src">, isF;
let isTwoAddress = 1 in {
def CFMOV : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src2, FP:$src, PR:$qp),
"($qp) mov $dst = $src">, isF;
}
def SELECTINT : Pat<(select PR:$which, GR:$src1, GR:$src2),
(CMOV (MOV GR:$src2), GR:$src1, PR:$which)>; // note order!
def SELECTFP : Pat<(select PR:$which, FP:$src1, FP:$src2),
(CFMOV (FMOV FP:$src2), FP:$src1, PR:$which)>; // note order!
// TODO: can do this faster, w/o using any integer regs (see pattern isel)
def SELECTBOOL : Pat<(select PR:$which, PR:$src1, PR:$src2), // note order!
(CMPNE (CMOV
(MOV (TPCADDIMM22 (ADDS r0, 0), 1, PR:$src2)),
(TPCADDIMM22 (ADDS r0, 0), 1, PR:$src1), PR:$which), r0)>;
// load constants of various sizes // FIXME: prettyprint -ve constants
def : Pat<(i64 immSExt14:$imm), (ADDS r0, immSExt14:$imm)>;
def : Pat<(i1 -1), (CMPEQ r0, r0)>; // TODO: this should just be a ref to p0
def : Pat<(i1 0), (CMPNE r0, r0)>; // TODO: any instruction actually *using*
// this predicate should be killed!
// TODO: support postincrement (reg, imm9) loads+stores - this needs more
// tablegen support
def IUSE : PseudoInstIA64<(outs), (ins variable_ops), "// IUSE">;
def ADJUSTCALLSTACKUP : PseudoInstIA64<(outs), (ins variable_ops),
"// ADJUSTCALLSTACKUP">;
def ADJUSTCALLSTACKDOWN : PseudoInstIA64<(outs), (ins variable_ops),
"// ADJUSTCALLSTACKDOWN">;
def PSEUDO_ALLOC : PseudoInstIA64<(outs), (ins GR:$foo), "// PSEUDO_ALLOC">;
def ALLOC : AForm<0x03, 0x0b,
(outs GR:$dst), (ins i8imm:$inputs, i8imm:$locals, i8imm:$outputs, i8imm:$rotating),
"alloc $dst = ar.pfs,$inputs,$locals,$outputs,$rotating">, isM;
let isTwoAddress = 1 in {
def TCMPNE : AForm<0x03, 0x0b,
(outs PR:$dst), (ins PR:$src2, GR:$src3, GR:$src4),
"cmp.ne $dst, p0 = $src3, $src4">, isA;
def TPCMPEQOR : AForm<0x03, 0x0b,
(outs PR:$dst), (ins PR:$src2, GR:$src3, GR:$src4, PR:$qp),
"($qp) cmp.eq.or $dst, p0 = $src3, $src4">, isA;
def TPCMPNE : AForm<0x03, 0x0b,
(outs PR:$dst), (ins PR:$src2, GR:$src3, GR:$src4, PR:$qp),
"($qp) cmp.ne $dst, p0 = $src3, $src4">, isA;
def TPCMPEQ : AForm<0x03, 0x0b,
(outs PR:$dst), (ins PR:$src2, GR:$src3, GR:$src4, PR:$qp),
"($qp) cmp.eq $dst, p0 = $src3, $src4">, isA;
}
def MOVSIMM14 : AForm<0x03, 0x0b, (outs GR:$dst), (ins s14imm:$imm),
"mov $dst = $imm">, isA;
def MOVSIMM22 : AForm<0x03, 0x0b, (outs GR:$dst), (ins s22imm:$imm),
"mov $dst = $imm">, isA;
def MOVLIMM64 : AForm<0x03, 0x0b, (outs GR:$dst), (ins s64imm:$imm),
"movl $dst = $imm">, isLX;
def SHLI : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, u6imm:$imm),
"shl $dst = $src1, $imm">, isI;
def SHRUI : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, u6imm:$imm),
"shr.u $dst = $src1, $imm">, isI;
def SHRSI : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, u6imm:$imm),
"shr $dst = $src1, $imm">, isI;
def EXTRU : AForm<0x03, 0x0b,
(outs GR:$dst), (ins GR:$src1, u6imm:$imm1, u6imm:$imm2),
"extr.u $dst = $src1, $imm1, $imm2">, isI;
def DEPZ : AForm<0x03, 0x0b,
(outs GR:$dst), (ins GR:$src1, u6imm:$imm1, u6imm:$imm2),
"dep.z $dst = $src1, $imm1, $imm2">, isI;
def PCMPEQOR : AForm<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2, PR:$qp),
"($qp) cmp.eq.or $dst, p0 = $src1, $src2">, isA;
def PCMPEQUNC : AForm<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2, PR:$qp),
"($qp) cmp.eq.unc $dst, p0 = $src1, $src2">, isA;
def PCMPNE : AForm<0x03, 0x0b, (outs PR:$dst), (ins GR:$src1, GR:$src2, PR:$qp),
"($qp) cmp.ne $dst, p0 = $src1, $src2">, isA;
// two destinations!
def BCMPEQ : AForm<0x03, 0x0b, (outs PR:$dst1, PR:$dst2), (ins GR:$src1, GR:$src2),
"cmp.eq $dst1, dst2 = $src1, $src2">, isA;
def ADDIMM14 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, s14imm:$imm),
"adds $dst = $imm, $src1">, isA;
def ADDIMM22 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, s22imm:$imm),
"add $dst = $imm, $src1">, isA;
def CADDIMM22 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$src1, s22imm:$imm, PR:$qp),
"($qp) add $dst = $imm, $src1">, isA;
def SUBIMM8 : AForm<0x03, 0x0b, (outs GR:$dst), (ins s8imm:$imm, GR:$src2),
"sub $dst = $imm, $src2">, isA;
let mayStore = 1 in {
def ST1 : AForm<0x03, 0x0b, (outs), (ins GR:$dstPtr, GR:$value),
"st1 [$dstPtr] = $value">, isM;
def ST2 : AForm<0x03, 0x0b, (outs), (ins GR:$dstPtr, GR:$value),
"st2 [$dstPtr] = $value">, isM;
def ST4 : AForm<0x03, 0x0b, (outs), (ins GR:$dstPtr, GR:$value),
"st4 [$dstPtr] = $value">, isM;
def ST8 : AForm<0x03, 0x0b, (outs), (ins GR:$dstPtr, GR:$value),
"st8 [$dstPtr] = $value">, isM;
def STF4 : AForm<0x03, 0x0b, (outs), (ins GR:$dstPtr, FP:$value),
"stfs [$dstPtr] = $value">, isM;
def STF8 : AForm<0x03, 0x0b, (outs), (ins GR:$dstPtr, FP:$value),
"stfd [$dstPtr] = $value">, isM;
def STF_SPILL : AForm<0x03, 0x0b, (outs), (ins GR:$dstPtr, FP:$value),
"stf.spill [$dstPtr] = $value">, isM;
}
let canFoldAsLoad = 1 in {
def LD1 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$srcPtr),
"ld1 $dst = [$srcPtr]">, isM;
def LD2 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$srcPtr),
"ld2 $dst = [$srcPtr]">, isM;
def LD4 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$srcPtr),
"ld4 $dst = [$srcPtr]">, isM;
def LD8 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$srcPtr),
"ld8 $dst = [$srcPtr]">, isM;
def LDF4 : AForm<0x03, 0x0b, (outs FP:$dst), (ins GR:$srcPtr),
"ldfs $dst = [$srcPtr]">, isM;
def LDF8 : AForm<0x03, 0x0b, (outs FP:$dst), (ins GR:$srcPtr),
"ldfd $dst = [$srcPtr]">, isM;
def LDF_FILL : AForm<0x03, 0x0b, (outs FP:$dst), (ins GR:$srcPtr),
"ldf.fill $dst = [$srcPtr]">, isM;
}
def POPCNT : AForm_DAG<0x03, 0x0b, (outs GR:$dst), (ins GR:$src),
"popcnt $dst = $src",
[(set GR:$dst, (ctpop GR:$src))]>, isI;
// some FP stuff: // TODO: single-precision stuff?
def FADD : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins FP:$src1, FP:$src2),
"fadd $dst = $src1, $src2",
[(set FP:$dst, (fadd FP:$src1, FP:$src2))]>, isF;
def FADDS: AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src1, FP:$src2),
"fadd.s $dst = $src1, $src2">, isF;
def FSUB : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins FP:$src1, FP:$src2),
"fsub $dst = $src1, $src2",
[(set FP:$dst, (fsub FP:$src1, FP:$src2))]>, isF;
def FMPY : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins FP:$src1, FP:$src2),
"fmpy $dst = $src1, $src2",
[(set FP:$dst, (fmul FP:$src1, FP:$src2))]>, isF;
def FMA : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins FP:$src1, FP:$src2, FP:$src3),
"fma $dst = $src1, $src2, $src3",
[(set FP:$dst, (fadd (fmul FP:$src1, FP:$src2), FP:$src3))]>, isF;
def FMS : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins FP:$src1, FP:$src2, FP:$src3),
"fms $dst = $src1, $src2, $src3",
[(set FP:$dst, (fsub (fmul FP:$src1, FP:$src2), FP:$src3))]>, isF;
def FNMA : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins FP:$src1, FP:$src2, FP:$src3),
"fnma $dst = $src1, $src2, $src3",
[(set FP:$dst, (fneg (fadd (fmul FP:$src1, FP:$src2), FP:$src3)))]>, isF;
def FABS : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fabs $dst = $src",
[(set FP:$dst, (fabs FP:$src))]>, isF;
def FNEG : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fneg $dst = $src",
[(set FP:$dst, (fneg FP:$src))]>, isF;
def FNEGABS : AForm_DAG<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fnegabs $dst = $src",
[(set FP:$dst, (fneg (fabs FP:$src)))]>, isF;
let isTwoAddress=1 in {
def TCFMAS1 : AForm<0x03, 0x0b,
(outs FP:$dst), (ins FP:$bogussrc, FP:$src1, FP:$src2, FP:$src3, PR:$qp),
"($qp) fma.s1 $dst = $src1, $src2, $src3">, isF;
def TCFMADS0 : AForm<0x03, 0x0b,
(outs FP:$dst), (ins FP:$bogussrc, FP:$src1, FP:$src2, FP:$src3, PR:$qp),
"($qp) fma.d.s0 $dst = $src1, $src2, $src3">, isF;
}
def CFMAS1 : AForm<0x03, 0x0b,
(outs FP:$dst), (ins FP:$src1, FP:$src2, FP:$src3, PR:$qp),
"($qp) fma.s1 $dst = $src1, $src2, $src3">, isF;
def CFNMAS1 : AForm<0x03, 0x0b,
(outs FP:$dst), (ins FP:$src1, FP:$src2, FP:$src3, PR:$qp),
"($qp) fnma.s1 $dst = $src1, $src2, $src3">, isF;
def CFMADS1 : AForm<0x03, 0x0b,
(outs FP:$dst), (ins FP:$src1, FP:$src2, FP:$src3, PR:$qp),
"($qp) fma.d.s1 $dst = $src1, $src2, $src3">, isF;
def CFMADS0 : AForm<0x03, 0x0b,
(outs FP:$dst), (ins FP:$src1, FP:$src2, FP:$src3, PR:$qp),
"($qp) fma.d.s0 $dst = $src1, $src2, $src3">, isF;
def CFNMADS1 : AForm<0x03, 0x0b,
(outs FP:$dst), (ins FP:$src1, FP:$src2, FP:$src3, PR:$qp),
"($qp) fnma.d.s1 $dst = $src1, $src2, $src3">, isF;
def FRCPAS0 : AForm<0x03, 0x0b, (outs FP:$dstFR, PR:$dstPR), (ins FP:$src1, FP:$src2),
"frcpa.s0 $dstFR, $dstPR = $src1, $src2">, isF;
def FRCPAS1 : AForm<0x03, 0x0b, (outs FP:$dstFR, PR:$dstPR), (ins FP:$src1, FP:$src2),
"frcpa.s1 $dstFR, $dstPR = $src1, $src2">, isF;
def XMAL : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src1, FP:$src2, FP:$src3),
"xma.l $dst = $src1, $src2, $src3">, isF;
def FCVTXF : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fcvt.xf $dst = $src">, isF;
def FCVTXUF : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fcvt.xuf $dst = $src">, isF;
def FCVTXUFS1 : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fcvt.xuf.s1 $dst = $src">, isF;
def FCVTFX : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fcvt.fx $dst = $src">, isF;
def FCVTFXU : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fcvt.fxu $dst = $src">, isF;
def FCVTFXTRUNC : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fcvt.fx.trunc $dst = $src">, isF;
def FCVTFXUTRUNC : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fcvt.fxu.trunc $dst = $src">, isF;
def FCVTFXTRUNCS1 : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fcvt.fx.trunc.s1 $dst = $src">, isF;
def FCVTFXUTRUNCS1 : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fcvt.fxu.trunc.s1 $dst = $src">, isF;
def FNORMD : AForm<0x03, 0x0b, (outs FP:$dst), (ins FP:$src),
"fnorm.d $dst = $src">, isF;
def GETFD : AForm<0x03, 0x0b, (outs GR:$dst), (ins FP:$src),
"getf.d $dst = $src">, isM;
def SETFD : AForm<0x03, 0x0b, (outs FP:$dst), (ins GR:$src),
"setf.d $dst = $src">, isM;
def GETFSIG : AForm<0x03, 0x0b, (outs GR:$dst), (ins FP:$src),
"getf.sig $dst = $src">, isM;
def SETFSIG : AForm<0x03, 0x0b, (outs FP:$dst), (ins GR:$src),
"setf.sig $dst = $src">, isM;
// these four FP<->int conversion patterns need checking/cleaning
def SINT_TO_FP : Pat<(sint_to_fp GR:$src),
(FNORMD (FCVTXF (SETFSIG GR:$src)))>;
def UINT_TO_FP : Pat<(uint_to_fp GR:$src),
(FNORMD (FCVTXUF (SETFSIG GR:$src)))>;
def FP_TO_SINT : Pat<(i64 (fp_to_sint FP:$src)),
(GETFSIG (FCVTFXTRUNC FP:$src))>;
def FP_TO_UINT : Pat<(i64 (fp_to_uint FP:$src)),
(GETFSIG (FCVTFXUTRUNC FP:$src))>;
def fpimm0 : PatLeaf<(fpimm), [{
return N->isExactlyValue(+0.0);
}]>;
def fpimm1 : PatLeaf<(fpimm), [{
return N->isExactlyValue(+1.0);
}]>;
def fpimmn0 : PatLeaf<(fpimm), [{
return N->isExactlyValue(-0.0);
}]>;
def fpimmn1 : PatLeaf<(fpimm), [{
return N->isExactlyValue(-1.0);
}]>;
def : Pat<(f64 fpimm0), (FMOV F0)>;
def : Pat<(f64 fpimm1), (FMOV F1)>;
def : Pat<(f64 fpimmn0), (FNEG F0)>;
def : Pat<(f64 fpimmn1), (FNEG F1)>;
let isTerminator = 1, isBranch = 1 in {
def BRL_NOTCALL : RawForm<0x03, 0xb0, (outs), (ins i64imm:$dst),
"(p0) brl.cond.sptk $dst">, isB;
def BRLCOND_NOTCALL : RawForm<0x03, 0xb0, (outs), (ins PR:$qp, i64imm:$dst),
"($qp) brl.cond.sptk $dst">, isB;
def BRCOND_NOTCALL : RawForm<0x03, 0xb0, (outs), (ins PR:$qp, GR:$dst),
"($qp) br.cond.sptk $dst">, isB;
}
let isCall = 1, /* isTerminator = 1, isBranch = 1, */
Uses = [out0,out1,out2,out3,out4,out5,out6,out7],
// all calls clobber non-callee-saved registers, and for now, they are these:
Defs = [r2,r3,r8,r9,r10,r11,r14,r15,r16,r17,r18,r19,r20,r21,r22,r23,r24,
r25,r26,r27,r28,r29,r30,r31,
p6,p7,p8,p9,p10,p11,p12,p13,p14,p15,
F6,F7,F8,F9,F10,F11,F12,F13,F14,F15,
F32,F33,F34,F35,F36,F37,F38,F39,F40,F41,F42,F43,F44,F45,F46,F47,F48,F49,
F50,F51,F52,F53,F54,F55,F56,
F57,F58,F59,F60,F61,F62,F63,F64,F65,F66,F67,F68,F69,F70,F71,F72,F73,F74,
F75,F76,F77,F78,F79,F80,F81,
F82,F83,F84,F85,F86,F87,F88,F89,F90,F91,F92,F93,F94,F95,F96,F97,F98,F99,
F100,F101,F102,F103,F104,F105,
F106,F107,F108,F109,F110,F111,F112,F113,F114,F115,F116,F117,F118,F119,
F120,F121,F122,F123,F124,F125,F126,F127,
out0,out1,out2,out3,out4,out5,out6,out7] in {
// old pattern call
def BRCALL: RawForm<0x03, 0xb0, (outs), (ins calltarget:$dst),
"br.call.sptk rp = $dst">, isB; // FIXME: teach llvm about branch regs?
// new daggy stuff!
// calls a globaladdress
def BRCALL_IPREL_GA : RawForm<0x03, 0xb0, (outs), (ins calltarget:$dst),
"br.call.sptk rp = $dst">, isB; // FIXME: teach llvm about branch regs?
// calls an externalsymbol
def BRCALL_IPREL_ES : RawForm<0x03, 0xb0, (outs), (ins calltarget:$dst),
"br.call.sptk rp = $dst">, isB; // FIXME: teach llvm about branch regs?
// calls through a function descriptor
def BRCALL_INDIRECT : RawForm<0x03, 0xb0, (outs), (ins GR:$branchreg),
"br.call.sptk rp = $branchreg">, isB; // FIXME: teach llvm about branch regs?
def BRLCOND_CALL : RawForm<0x03, 0xb0, (outs), (ins PR:$qp, i64imm:$dst),
"($qp) brl.cond.call.sptk $dst">, isB;
def BRCOND_CALL : RawForm<0x03, 0xb0, (outs), (ins PR:$qp, GR:$dst),
"($qp) br.cond.call.sptk $dst">, isB;
}
// Return branch:
let isTerminator = 1, isReturn = 1 in
def RET : AForm_DAG<0x03, 0x0b, (outs), (ins),
"br.ret.sptk.many rp",
[(retflag)]>, isB; // return
def : Pat<(ret), (RET)>;
// the evil stop bit of despair
def STOP : PseudoInstIA64<(outs), (ins variable_ops), ";;">;

View File

@ -1,34 +0,0 @@
//===-- IA64MachineFunctionInfo.h - IA64-specific information ---*- C++ -*-===//
//===-- for MachineFunction ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
//===----------------------------------------------------------------------===//
//
// This file declares IA64-specific per-machine-function information.
//
//===----------------------------------------------------------------------===//
#ifndef IA64MACHINEFUNCTIONINFO_H
#define IA64MACHINEFUNCTIONINFO_H
#include "llvm/CodeGen/MachineFunction.h"
//#include "IA64JITInfo.h"
namespace llvm {
class IA64FunctionInfo : public MachineFunctionInfo {
public:
unsigned outRegsUsed; // how many 'out' registers are used
// by this machinefunction? (used to compute the appropriate
// entry in the 'alloc' instruction at the top of the
// machinefunction)
explicit IA64FunctionInfo(MachineFunction& MF) { outRegsUsed=0; };
};
} // End llvm namespace
#endif

View File

@ -1,320 +0,0 @@
//===- IA64RegisterInfo.cpp - IA64 Register Information ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the IA64 implementation of the TargetRegisterInfo class.
// This file is responsible for the frame pointer elimination optimization
// on IA64.
//
//===----------------------------------------------------------------------===//
#include "IA64.h"
#include "IA64RegisterInfo.h"
#include "IA64InstrBuilder.h"
#include "IA64MachineFunctionInfo.h"
#include "llvm/Constants.h"
#include "llvm/Type.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineLocation.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
using namespace llvm;
IA64RegisterInfo::IA64RegisterInfo(const TargetInstrInfo &tii)
: IA64GenRegisterInfo(IA64::ADJUSTCALLSTACKDOWN, IA64::ADJUSTCALLSTACKUP),
TII(tii) {}
const unsigned* IA64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
const {
static const unsigned CalleeSavedRegs[] = {
IA64::r5, 0
};
return CalleeSavedRegs;
}
const TargetRegisterClass* const*
IA64RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
static const TargetRegisterClass * const CalleeSavedRegClasses[] = {
&IA64::GRRegClass, 0
};
return CalleeSavedRegClasses;
}
BitVector IA64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
Reserved.set(IA64::r0);
Reserved.set(IA64::r1);
Reserved.set(IA64::r2);
Reserved.set(IA64::r5);
Reserved.set(IA64::r12);
Reserved.set(IA64::r13);
Reserved.set(IA64::r22);
Reserved.set(IA64::rp);
return Reserved;
}
//===----------------------------------------------------------------------===//
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
// hasFP - Return true if the specified function should have a dedicated frame
// pointer register. This is true if the function has variable sized allocas or
// if frame pointer elimination is disabled.
//
bool IA64RegisterInfo::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
return NoFramePointerElim || MFI->hasVarSizedObjects();
}
void IA64RegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
if (hasFP(MF)) {
// If we have a frame pointer, turn the adjcallstackup instruction into a
// 'sub SP, <amt>' and the adjcallstackdown instruction into 'add SP,
// <amt>'
MachineInstr *Old = I;
unsigned Amount = Old->getOperand(0).getImm();
DebugLoc dl = Old->getDebugLoc();
if (Amount != 0) {
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
Amount = (Amount+Align-1)/Align*Align;
// Replace the pseudo instruction with a new instruction...
if (Old->getOpcode() == IA64::ADJUSTCALLSTACKDOWN) {
BuildMI(MBB, I, dl, TII.get(IA64::ADDIMM22), IA64::r12)
.addReg(IA64::r12).addImm(-Amount);
} else {
assert(Old->getOpcode() == IA64::ADJUSTCALLSTACKUP);
BuildMI(MBB, I, dl, TII.get(IA64::ADDIMM22), IA64::r12)
.addReg(IA64::r12).addImm(Amount);
}
}
}
MBB.erase(I);
}
void IA64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS)const{
assert(SPAdj == 0 && "Unexpected");
unsigned i = 0;
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
DebugLoc dl = MI.getDebugLoc();
bool FP = hasFP(MF);
while (!MI.getOperand(i).isFI()) {
++i;
assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
}
int FrameIndex = MI.getOperand(i).getIndex();
// choose a base register: ( hasFP? framepointer : stack pointer )
unsigned BaseRegister = FP ? IA64::r5 : IA64::r12;
// Add the base register
MI.getOperand(i).ChangeToRegister(BaseRegister, false);
// Now add the frame object offset to the offset from r1.
int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
// If we're not using a Frame Pointer that has been set to the value of the
// SP before having the stack size subtracted from it, then add the stack size
// to Offset to get the correct offset.
Offset += MF.getFrameInfo()->getStackSize();
// XXX: we use 'r22' as another hack+slash temporary register here :(
if (Offset <= 8191 && Offset >= -8192) { // smallish offset
// Fix up the old:
MI.getOperand(i).ChangeToRegister(IA64::r22, false);
//insert the new
BuildMI(MBB, II, dl, TII.get(IA64::ADDIMM22), IA64::r22)
.addReg(BaseRegister).addImm(Offset);
} else { // it's big
//fix up the old:
MI.getOperand(i).ChangeToRegister(IA64::r22, false);
BuildMI(MBB, II, dl, TII.get(IA64::MOVLIMM64), IA64::r22).addImm(Offset);
BuildMI(MBB, II, dl, TII.get(IA64::ADD), IA64::r22).addReg(BaseRegister)
.addReg(IA64::r22);
}
}
void IA64RegisterInfo::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo();
bool FP = hasFP(MF);
DebugLoc dl = (MBBI != MBB.end() ?
MBBI->getDebugLoc() : DebugLoc::getUnknownLoc());
// first, we handle the 'alloc' instruction, that should be right up the
// top of any function
static const unsigned RegsInOrder[96] = { // there are 96 GPRs the
// RSE worries about
IA64::r32, IA64::r33, IA64::r34, IA64::r35,
IA64::r36, IA64::r37, IA64::r38, IA64::r39, IA64::r40, IA64::r41,
IA64::r42, IA64::r43, IA64::r44, IA64::r45, IA64::r46, IA64::r47,
IA64::r48, IA64::r49, IA64::r50, IA64::r51, IA64::r52, IA64::r53,
IA64::r54, IA64::r55, IA64::r56, IA64::r57, IA64::r58, IA64::r59,
IA64::r60, IA64::r61, IA64::r62, IA64::r63, IA64::r64, IA64::r65,
IA64::r66, IA64::r67, IA64::r68, IA64::r69, IA64::r70, IA64::r71,
IA64::r72, IA64::r73, IA64::r74, IA64::r75, IA64::r76, IA64::r77,
IA64::r78, IA64::r79, IA64::r80, IA64::r81, IA64::r82, IA64::r83,
IA64::r84, IA64::r85, IA64::r86, IA64::r87, IA64::r88, IA64::r89,
IA64::r90, IA64::r91, IA64::r92, IA64::r93, IA64::r94, IA64::r95,
IA64::r96, IA64::r97, IA64::r98, IA64::r99, IA64::r100, IA64::r101,
IA64::r102, IA64::r103, IA64::r104, IA64::r105, IA64::r106, IA64::r107,
IA64::r108, IA64::r109, IA64::r110, IA64::r111, IA64::r112, IA64::r113,
IA64::r114, IA64::r115, IA64::r116, IA64::r117, IA64::r118, IA64::r119,
IA64::r120, IA64::r121, IA64::r122, IA64::r123, IA64::r124, IA64::r125,
IA64::r126, IA64::r127 };
unsigned numStackedGPRsUsed=0;
for (int i=0; i != 96; i++) {
if (MF.getRegInfo().isPhysRegUsed(RegsInOrder[i]))
numStackedGPRsUsed=i+1; // (i+1 and not ++ - consider fn(fp, fp, int)
}
unsigned numOutRegsUsed=MF.getInfo<IA64FunctionInfo>()->outRegsUsed;
// XXX FIXME : this code should be a bit more reliable (in case there _isn't_
// a pseudo_alloc in the MBB)
unsigned dstRegOfPseudoAlloc;
for(MBBI = MBB.begin(); /*MBBI->getOpcode() != IA64::PSEUDO_ALLOC*/; ++MBBI) {
assert(MBBI != MBB.end());
if(MBBI->getOpcode() == IA64::PSEUDO_ALLOC) {
dstRegOfPseudoAlloc=MBBI->getOperand(0).getReg();
break;
}
}
if (MBBI != MBB.end()) dl = MBBI->getDebugLoc();
BuildMI(MBB, MBBI, dl, TII.get(IA64::ALLOC)).
addReg(dstRegOfPseudoAlloc).addImm(0).
addImm(numStackedGPRsUsed).addImm(numOutRegsUsed).addImm(0);
// Get the number of bytes to allocate from the FrameInfo
unsigned NumBytes = MFI->getStackSize();
if(FP)
NumBytes += 8; // reserve space for the old FP
// Do we need to allocate space on the stack?
if (NumBytes == 0)
return;
// Add 16 bytes at the bottom of the stack (scratch area)
// and round the size to a multiple of the alignment.
unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
unsigned Size = 16 + (FP ? 8 : 0);
NumBytes = (NumBytes+Size+Align-1)/Align*Align;
// Update frame info to pretend that this is part of the stack...
MFI->setStackSize(NumBytes);
// adjust stack pointer: r12 -= numbytes
if (NumBytes <= 8191) {
BuildMI(MBB, MBBI, dl, TII.get(IA64::ADDIMM22),IA64::r12).addReg(IA64::r12).
addImm(-NumBytes);
} else { // we use r22 as a scratch register here
// first load the decrement into r22
BuildMI(MBB, MBBI, dl, TII.get(IA64::MOVLIMM64), IA64::r22).
addImm(-NumBytes);
// FIXME: MOVLSI32 expects a _u_32imm
// then add (subtract) it to r12 (stack ptr)
BuildMI(MBB, MBBI, dl, TII.get(IA64::ADD), IA64::r12)
.addReg(IA64::r12).addReg(IA64::r22);
}
// now if we need to, save the old FP and set the new
if (FP) {
BuildMI(MBB, MBBI,dl,TII.get(IA64::ST8)).addReg(IA64::r12).addReg(IA64::r5);
// this must be the last instr in the prolog ? (XXX: why??)
BuildMI(MBB, MBBI, dl, TII.get(IA64::MOV), IA64::r5).addReg(IA64::r12);
}
}
void IA64RegisterInfo::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
MachineBasicBlock::iterator MBBI = prior(MBB.end());
assert(MBBI->getOpcode() == IA64::RET &&
"Can only insert epilog into returning blocks");
DebugLoc dl = MBBI->getDebugLoc();
bool FP = hasFP(MF);
// Get the number of bytes allocated from the FrameInfo...
unsigned NumBytes = MFI->getStackSize();
//now if we need to, restore the old FP
if (FP) {
//copy the FP into the SP (discards allocas)
BuildMI(MBB, MBBI, dl, TII.get(IA64::MOV), IA64::r12).addReg(IA64::r5);
//restore the FP
BuildMI(MBB, MBBI, dl, TII.get(IA64::LD8), IA64::r5).addReg(IA64::r5);
}
if (NumBytes != 0) {
if (NumBytes <= 8191) {
BuildMI(MBB, MBBI, dl, TII.get(IA64::ADDIMM22),IA64::r12).
addReg(IA64::r12).addImm(NumBytes);
} else {
BuildMI(MBB, MBBI, dl, TII.get(IA64::MOVLIMM64), IA64::r22).
addImm(NumBytes);
BuildMI(MBB, MBBI, dl, TII.get(IA64::ADD), IA64::r12).addReg(IA64::r12).
addReg(IA64::r22);
}
}
}
unsigned IA64RegisterInfo::getRARegister() const {
llvm_unreachable("What is the return address register");
return 0;
}
unsigned IA64RegisterInfo::getFrameRegister(MachineFunction &MF) const {
return hasFP(MF) ? IA64::r5 : IA64::r12;
}
unsigned IA64RegisterInfo::getEHExceptionRegister() const {
llvm_unreachable("What is the exception register");
return 0;
}
unsigned IA64RegisterInfo::getEHHandlerRegister() const {
llvm_unreachable("What is the exception handler register");
return 0;
}
int IA64RegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
llvm_unreachable("What is the dwarf register number");
return -1;
}
#include "IA64GenRegisterInfo.inc"

View File

@ -1,63 +0,0 @@
//===- IA64RegisterInfo.h - IA64 Register Information Impl ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the IA64 implementation of the TargetRegisterInfo class.
//
//===----------------------------------------------------------------------===//
#ifndef IA64REGISTERINFO_H
#define IA64REGISTERINFO_H
#include "llvm/Target/TargetRegisterInfo.h"
#include "IA64GenRegisterInfo.h.inc"
namespace llvm {
class TargetInstrInfo;
struct IA64RegisterInfo : public IA64GenRegisterInfo {
const TargetInstrInfo &TII;
IA64RegisterInfo(const TargetInstrInfo &tii);
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const TargetRegisterClass* const* getCalleeSavedRegClasses(
const MachineFunction *MF = 0) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
bool hasFP(const MachineFunction &MF) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
void eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, RegScavenger *RS = NULL) const;
void emitPrologue(MachineFunction &MF) const;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(MachineFunction &MF) const;
// Exception handling queries.
unsigned getEHExceptionRegister() const;
unsigned getEHHandlerRegister() const;
int getDwarfRegNum(unsigned RegNum, bool isEH) const;
};
} // End llvm namespace
#endif

View File

@ -1,509 +0,0 @@
//===- IA64RegisterInfo.td - Describe the IA64 Register File ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes the IA64 register file, defining the registers
// themselves, aliases between the registers, and the register classes built
// out of the registers.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// Register definitions...
//
class IA64Register<string n> : Register<n> {
let Namespace = "IA64";
}
// GR - One of 128 32-bit general registers
class GR<bits<7> num, string n> : IA64Register<n> {
field bits<7> Num = num;
}
// FP - One of 128 82-bit floating-point registers
class FP<bits<7> num, string n> : IA64Register<n> {
field bits<7> Num = num;
}
// PR - One of 64 1-bit predicate registers
class PR<bits<6> num, string n> : IA64Register<n> {
field bits<6> Num = num;
}
/* general registers */
def r0 : GR< 0, "r0">, DwarfRegNum<[0]>;
def r1 : GR< 1, "r1">, DwarfRegNum<[1]>;
def r2 : GR< 2, "r2">, DwarfRegNum<[2]>;
def r3 : GR< 3, "r3">, DwarfRegNum<[3]>;
def r4 : GR< 4, "r4">, DwarfRegNum<[4]>;
def r5 : GR< 5, "r5">, DwarfRegNum<[5]>;
def r6 : GR< 6, "r6">, DwarfRegNum<[6]>;
def r7 : GR< 7, "r7">, DwarfRegNum<[7]>;
def r8 : GR< 8, "r8">, DwarfRegNum<[8]>;
def r9 : GR< 9, "r9">, DwarfRegNum<[9]>;
def r10 : GR< 10, "r10">, DwarfRegNum<[10]>;
def r11 : GR< 11, "r11">, DwarfRegNum<[11]>;
def r12 : GR< 12, "r12">, DwarfRegNum<[12]>;
def r13 : GR< 13, "r13">, DwarfRegNum<[13]>;
def r14 : GR< 14, "r14">, DwarfRegNum<[14]>;
def r15 : GR< 15, "r15">, DwarfRegNum<[15]>;
def r16 : GR< 16, "r16">, DwarfRegNum<[16]>;
def r17 : GR< 17, "r17">, DwarfRegNum<[17]>;
def r18 : GR< 18, "r18">, DwarfRegNum<[18]>;
def r19 : GR< 19, "r19">, DwarfRegNum<[19]>;
def r20 : GR< 20, "r20">, DwarfRegNum<[20]>;
def r21 : GR< 21, "r21">, DwarfRegNum<[21]>;
def r22 : GR< 22, "r22">, DwarfRegNum<[22]>;
def r23 : GR< 23, "r23">, DwarfRegNum<[23]>;
def r24 : GR< 24, "r24">, DwarfRegNum<[24]>;
def r25 : GR< 25, "r25">, DwarfRegNum<[25]>;
def r26 : GR< 26, "r26">, DwarfRegNum<[26]>;
def r27 : GR< 27, "r27">, DwarfRegNum<[27]>;
def r28 : GR< 28, "r28">, DwarfRegNum<[28]>;
def r29 : GR< 29, "r29">, DwarfRegNum<[29]>;
def r30 : GR< 30, "r30">, DwarfRegNum<[30]>;
def r31 : GR< 31, "r31">, DwarfRegNum<[31]>;
def r32 : GR< 32, "r32">, DwarfRegNum<[32]>;
def r33 : GR< 33, "r33">, DwarfRegNum<[33]>;
def r34 : GR< 34, "r34">, DwarfRegNum<[34]>;
def r35 : GR< 35, "r35">, DwarfRegNum<[35]>;
def r36 : GR< 36, "r36">, DwarfRegNum<[36]>;
def r37 : GR< 37, "r37">, DwarfRegNum<[37]>;
def r38 : GR< 38, "r38">, DwarfRegNum<[38]>;
def r39 : GR< 39, "r39">, DwarfRegNum<[39]>;
def r40 : GR< 40, "r40">, DwarfRegNum<[40]>;
def r41 : GR< 41, "r41">, DwarfRegNum<[41]>;
def r42 : GR< 42, "r42">, DwarfRegNum<[42]>;
def r43 : GR< 43, "r43">, DwarfRegNum<[43]>;
def r44 : GR< 44, "r44">, DwarfRegNum<[44]>;
def r45 : GR< 45, "r45">, DwarfRegNum<[45]>;
def r46 : GR< 46, "r46">, DwarfRegNum<[46]>;
def r47 : GR< 47, "r47">, DwarfRegNum<[47]>;
def r48 : GR< 48, "r48">, DwarfRegNum<[48]>;
def r49 : GR< 49, "r49">, DwarfRegNum<[49]>;
def r50 : GR< 50, "r50">, DwarfRegNum<[50]>;
def r51 : GR< 51, "r51">, DwarfRegNum<[51]>;
def r52 : GR< 52, "r52">, DwarfRegNum<[52]>;
def r53 : GR< 53, "r53">, DwarfRegNum<[53]>;
def r54 : GR< 54, "r54">, DwarfRegNum<[54]>;
def r55 : GR< 55, "r55">, DwarfRegNum<[55]>;
def r56 : GR< 56, "r56">, DwarfRegNum<[56]>;
def r57 : GR< 57, "r57">, DwarfRegNum<[57]>;
def r58 : GR< 58, "r58">, DwarfRegNum<[58]>;
def r59 : GR< 59, "r59">, DwarfRegNum<[59]>;
def r60 : GR< 60, "r60">, DwarfRegNum<[60]>;
def r61 : GR< 61, "r61">, DwarfRegNum<[61]>;
def r62 : GR< 62, "r62">, DwarfRegNum<[62]>;
def r63 : GR< 63, "r63">, DwarfRegNum<[63]>;
def r64 : GR< 64, "r64">, DwarfRegNum<[64]>;
def r65 : GR< 65, "r65">, DwarfRegNum<[65]>;
def r66 : GR< 66, "r66">, DwarfRegNum<[66]>;
def r67 : GR< 67, "r67">, DwarfRegNum<[67]>;
def r68 : GR< 68, "r68">, DwarfRegNum<[68]>;
def r69 : GR< 69, "r69">, DwarfRegNum<[69]>;
def r70 : GR< 70, "r70">, DwarfRegNum<[70]>;
def r71 : GR< 71, "r71">, DwarfRegNum<[71]>;
def r72 : GR< 72, "r72">, DwarfRegNum<[72]>;
def r73 : GR< 73, "r73">, DwarfRegNum<[73]>;
def r74 : GR< 74, "r74">, DwarfRegNum<[74]>;
def r75 : GR< 75, "r75">, DwarfRegNum<[75]>;
def r76 : GR< 76, "r76">, DwarfRegNum<[76]>;
def r77 : GR< 77, "r77">, DwarfRegNum<[77]>;
def r78 : GR< 78, "r78">, DwarfRegNum<[78]>;
def r79 : GR< 79, "r79">, DwarfRegNum<[79]>;
def r80 : GR< 80, "r80">, DwarfRegNum<[80]>;
def r81 : GR< 81, "r81">, DwarfRegNum<[81]>;
def r82 : GR< 82, "r82">, DwarfRegNum<[82]>;
def r83 : GR< 83, "r83">, DwarfRegNum<[83]>;
def r84 : GR< 84, "r84">, DwarfRegNum<[84]>;
def r85 : GR< 85, "r85">, DwarfRegNum<[85]>;
def r86 : GR< 86, "r86">, DwarfRegNum<[86]>;
def r87 : GR< 87, "r87">, DwarfRegNum<[87]>;
def r88 : GR< 88, "r88">, DwarfRegNum<[88]>;
def r89 : GR< 89, "r89">, DwarfRegNum<[89]>;
def r90 : GR< 90, "r90">, DwarfRegNum<[90]>;
def r91 : GR< 91, "r91">, DwarfRegNum<[91]>;
def r92 : GR< 92, "r92">, DwarfRegNum<[92]>;
def r93 : GR< 93, "r93">, DwarfRegNum<[93]>;
def r94 : GR< 94, "r94">, DwarfRegNum<[94]>;
def r95 : GR< 95, "r95">, DwarfRegNum<[95]>;
def r96 : GR< 96, "r96">, DwarfRegNum<[96]>;
def r97 : GR< 97, "r97">, DwarfRegNum<[97]>;
def r98 : GR< 98, "r98">, DwarfRegNum<[98]>;
def r99 : GR< 99, "r99">, DwarfRegNum<[99]>;
def r100 : GR< 100, "r100">, DwarfRegNum<[100]>;
def r101 : GR< 101, "r101">, DwarfRegNum<[101]>;
def r102 : GR< 102, "r102">, DwarfRegNum<[102]>;
def r103 : GR< 103, "r103">, DwarfRegNum<[103]>;
def r104 : GR< 104, "r104">, DwarfRegNum<[104]>;
def r105 : GR< 105, "r105">, DwarfRegNum<[105]>;
def r106 : GR< 106, "r106">, DwarfRegNum<[106]>;
def r107 : GR< 107, "r107">, DwarfRegNum<[107]>;
def r108 : GR< 108, "r108">, DwarfRegNum<[108]>;
def r109 : GR< 109, "r109">, DwarfRegNum<[109]>;
def r110 : GR< 110, "r110">, DwarfRegNum<[110]>;
def r111 : GR< 111, "r111">, DwarfRegNum<[111]>;
def r112 : GR< 112, "r112">, DwarfRegNum<[112]>;
def r113 : GR< 113, "r113">, DwarfRegNum<[113]>;
def r114 : GR< 114, "r114">, DwarfRegNum<[114]>;
def r115 : GR< 115, "r115">, DwarfRegNum<[115]>;
def r116 : GR< 116, "r116">, DwarfRegNum<[116]>;
def r117 : GR< 117, "r117">, DwarfRegNum<[117]>;
def r118 : GR< 118, "r118">, DwarfRegNum<[118]>;
def r119 : GR< 119, "r119">, DwarfRegNum<[119]>;
def r120 : GR< 120, "r120">, DwarfRegNum<[120]>;
def r121 : GR< 121, "r121">, DwarfRegNum<[121]>;
def r122 : GR< 122, "r122">, DwarfRegNum<[122]>;
def r123 : GR< 123, "r123">, DwarfRegNum<[123]>;
def r124 : GR< 124, "r124">, DwarfRegNum<[124]>;
def r125 : GR< 125, "r125">, DwarfRegNum<[125]>;
def r126 : GR< 126, "r126">, DwarfRegNum<[126]>;
def r127 : GR< 127, "r127">, DwarfRegNum<[127]>;
/* floating-point registers */
def F0 : FP< 0, "f0">, DwarfRegNum<[128]>;
def F1 : FP< 1, "f1">, DwarfRegNum<[129]>;
def F2 : FP< 2, "f2">, DwarfRegNum<[130]>;
def F3 : FP< 3, "f3">, DwarfRegNum<[131]>;
def F4 : FP< 4, "f4">, DwarfRegNum<[132]>;
def F5 : FP< 5, "f5">, DwarfRegNum<[133]>;
def F6 : FP< 6, "f6">, DwarfRegNum<[134]>;
def F7 : FP< 7, "f7">, DwarfRegNum<[135]>;
def F8 : FP< 8, "f8">, DwarfRegNum<[136]>;
def F9 : FP< 9, "f9">, DwarfRegNum<[137]>;
def F10 : FP< 10, "f10">, DwarfRegNum<[138]>;
def F11 : FP< 11, "f11">, DwarfRegNum<[139]>;
def F12 : FP< 12, "f12">, DwarfRegNum<[140]>;
def F13 : FP< 13, "f13">, DwarfRegNum<[141]>;
def F14 : FP< 14, "f14">, DwarfRegNum<[142]>;
def F15 : FP< 15, "f15">, DwarfRegNum<[143]>;
def F16 : FP< 16, "f16">, DwarfRegNum<[144]>;
def F17 : FP< 17, "f17">, DwarfRegNum<[145]>;
def F18 : FP< 18, "f18">, DwarfRegNum<[146]>;
def F19 : FP< 19, "f19">, DwarfRegNum<[147]>;
def F20 : FP< 20, "f20">, DwarfRegNum<[148]>;
def F21 : FP< 21, "f21">, DwarfRegNum<[149]>;
def F22 : FP< 22, "f22">, DwarfRegNum<[150]>;
def F23 : FP< 23, "f23">, DwarfRegNum<[151]>;
def F24 : FP< 24, "f24">, DwarfRegNum<[152]>;
def F25 : FP< 25, "f25">, DwarfRegNum<[153]>;
def F26 : FP< 26, "f26">, DwarfRegNum<[154]>;
def F27 : FP< 27, "f27">, DwarfRegNum<[155]>;
def F28 : FP< 28, "f28">, DwarfRegNum<[156]>;
def F29 : FP< 29, "f29">, DwarfRegNum<[157]>;
def F30 : FP< 30, "f30">, DwarfRegNum<[158]>;
def F31 : FP< 31, "f31">, DwarfRegNum<[159]>;
def F32 : FP< 32, "f32">, DwarfRegNum<[160]>;
def F33 : FP< 33, "f33">, DwarfRegNum<[161]>;
def F34 : FP< 34, "f34">, DwarfRegNum<[162]>;
def F35 : FP< 35, "f35">, DwarfRegNum<[163]>;
def F36 : FP< 36, "f36">, DwarfRegNum<[164]>;
def F37 : FP< 37, "f37">, DwarfRegNum<[165]>;
def F38 : FP< 38, "f38">, DwarfRegNum<[166]>;
def F39 : FP< 39, "f39">, DwarfRegNum<[167]>;
def F40 : FP< 40, "f40">, DwarfRegNum<[168]>;
def F41 : FP< 41, "f41">, DwarfRegNum<[169]>;
def F42 : FP< 42, "f42">, DwarfRegNum<[170]>;
def F43 : FP< 43, "f43">, DwarfRegNum<[171]>;
def F44 : FP< 44, "f44">, DwarfRegNum<[172]>;
def F45 : FP< 45, "f45">, DwarfRegNum<[173]>;
def F46 : FP< 46, "f46">, DwarfRegNum<[174]>;
def F47 : FP< 47, "f47">, DwarfRegNum<[175]>;
def F48 : FP< 48, "f48">, DwarfRegNum<[176]>;
def F49 : FP< 49, "f49">, DwarfRegNum<[177]>;
def F50 : FP< 50, "f50">, DwarfRegNum<[178]>;
def F51 : FP< 51, "f51">, DwarfRegNum<[179]>;
def F52 : FP< 52, "f52">, DwarfRegNum<[180]>;
def F53 : FP< 53, "f53">, DwarfRegNum<[181]>;
def F54 : FP< 54, "f54">, DwarfRegNum<[182]>;
def F55 : FP< 55, "f55">, DwarfRegNum<[183]>;
def F56 : FP< 56, "f56">, DwarfRegNum<[184]>;
def F57 : FP< 57, "f57">, DwarfRegNum<[185]>;
def F58 : FP< 58, "f58">, DwarfRegNum<[186]>;
def F59 : FP< 59, "f59">, DwarfRegNum<[187]>;
def F60 : FP< 60, "f60">, DwarfRegNum<[188]>;
def F61 : FP< 61, "f61">, DwarfRegNum<[189]>;
def F62 : FP< 62, "f62">, DwarfRegNum<[190]>;
def F63 : FP< 63, "f63">, DwarfRegNum<[191]>;
def F64 : FP< 64, "f64">, DwarfRegNum<[192]>;
def F65 : FP< 65, "f65">, DwarfRegNum<[193]>;
def F66 : FP< 66, "f66">, DwarfRegNum<[194]>;
def F67 : FP< 67, "f67">, DwarfRegNum<[195]>;
def F68 : FP< 68, "f68">, DwarfRegNum<[196]>;
def F69 : FP< 69, "f69">, DwarfRegNum<[197]>;
def F70 : FP< 70, "f70">, DwarfRegNum<[198]>;
def F71 : FP< 71, "f71">, DwarfRegNum<[199]>;
def F72 : FP< 72, "f72">, DwarfRegNum<[200]>;
def F73 : FP< 73, "f73">, DwarfRegNum<[201]>;
def F74 : FP< 74, "f74">, DwarfRegNum<[202]>;
def F75 : FP< 75, "f75">, DwarfRegNum<[203]>;
def F76 : FP< 76, "f76">, DwarfRegNum<[204]>;
def F77 : FP< 77, "f77">, DwarfRegNum<[205]>;
def F78 : FP< 78, "f78">, DwarfRegNum<[206]>;
def F79 : FP< 79, "f79">, DwarfRegNum<[207]>;
def F80 : FP< 80, "f80">, DwarfRegNum<[208]>;
def F81 : FP< 81, "f81">, DwarfRegNum<[209]>;
def F82 : FP< 82, "f82">, DwarfRegNum<[210]>;
def F83 : FP< 83, "f83">, DwarfRegNum<[211]>;
def F84 : FP< 84, "f84">, DwarfRegNum<[212]>;
def F85 : FP< 85, "f85">, DwarfRegNum<[213]>;
def F86 : FP< 86, "f86">, DwarfRegNum<[214]>;
def F87 : FP< 87, "f87">, DwarfRegNum<[215]>;
def F88 : FP< 88, "f88">, DwarfRegNum<[216]>;
def F89 : FP< 89, "f89">, DwarfRegNum<[217]>;
def F90 : FP< 90, "f90">, DwarfRegNum<[218]>;
def F91 : FP< 91, "f91">, DwarfRegNum<[219]>;
def F92 : FP< 92, "f92">, DwarfRegNum<[220]>;
def F93 : FP< 93, "f93">, DwarfRegNum<[221]>;
def F94 : FP< 94, "f94">, DwarfRegNum<[222]>;
def F95 : FP< 95, "f95">, DwarfRegNum<[223]>;
def F96 : FP< 96, "f96">, DwarfRegNum<[224]>;
def F97 : FP< 97, "f97">, DwarfRegNum<[225]>;
def F98 : FP< 98, "f98">, DwarfRegNum<[226]>;
def F99 : FP< 99, "f99">, DwarfRegNum<[227]>;
def F100 : FP< 100, "f100">, DwarfRegNum<[228]>;
def F101 : FP< 101, "f101">, DwarfRegNum<[229]>;
def F102 : FP< 102, "f102">, DwarfRegNum<[230]>;
def F103 : FP< 103, "f103">, DwarfRegNum<[231]>;
def F104 : FP< 104, "f104">, DwarfRegNum<[232]>;
def F105 : FP< 105, "f105">, DwarfRegNum<[233]>;
def F106 : FP< 106, "f106">, DwarfRegNum<[234]>;
def F107 : FP< 107, "f107">, DwarfRegNum<[235]>;
def F108 : FP< 108, "f108">, DwarfRegNum<[236]>;
def F109 : FP< 109, "f109">, DwarfRegNum<[237]>;
def F110 : FP< 110, "f110">, DwarfRegNum<[238]>;
def F111 : FP< 111, "f111">, DwarfRegNum<[239]>;
def F112 : FP< 112, "f112">, DwarfRegNum<[240]>;
def F113 : FP< 113, "f113">, DwarfRegNum<[241]>;
def F114 : FP< 114, "f114">, DwarfRegNum<[242]>;
def F115 : FP< 115, "f115">, DwarfRegNum<[243]>;
def F116 : FP< 116, "f116">, DwarfRegNum<[244]>;
def F117 : FP< 117, "f117">, DwarfRegNum<[245]>;
def F118 : FP< 118, "f118">, DwarfRegNum<[246]>;
def F119 : FP< 119, "f119">, DwarfRegNum<[247]>;
def F120 : FP< 120, "f120">, DwarfRegNum<[248]>;
def F121 : FP< 121, "f121">, DwarfRegNum<[249]>;
def F122 : FP< 122, "f122">, DwarfRegNum<[250]>;
def F123 : FP< 123, "f123">, DwarfRegNum<[251]>;
def F124 : FP< 124, "f124">, DwarfRegNum<[252]>;
def F125 : FP< 125, "f125">, DwarfRegNum<[253]>;
def F126 : FP< 126, "f126">, DwarfRegNum<[254]>;
def F127 : FP< 127, "f127">, DwarfRegNum<[255]>;
/* predicate registers */
def p0 : PR< 0, "p0">, DwarfRegNum<[256]>;
def p1 : PR< 1, "p1">, DwarfRegNum<[257]>;
def p2 : PR< 2, "p2">, DwarfRegNum<[258]>;
def p3 : PR< 3, "p3">, DwarfRegNum<[259]>;
def p4 : PR< 4, "p4">, DwarfRegNum<[260]>;
def p5 : PR< 5, "p5">, DwarfRegNum<[261]>;
def p6 : PR< 6, "p6">, DwarfRegNum<[262]>;
def p7 : PR< 7, "p7">, DwarfRegNum<[263]>;
def p8 : PR< 8, "p8">, DwarfRegNum<[264]>;
def p9 : PR< 9, "p9">, DwarfRegNum<[265]>;
def p10 : PR< 10, "p10">, DwarfRegNum<[266]>;
def p11 : PR< 11, "p11">, DwarfRegNum<[267]>;
def p12 : PR< 12, "p12">, DwarfRegNum<[268]>;
def p13 : PR< 13, "p13">, DwarfRegNum<[269]>;
def p14 : PR< 14, "p14">, DwarfRegNum<[270]>;
def p15 : PR< 15, "p15">, DwarfRegNum<[271]>;
def p16 : PR< 16, "p16">, DwarfRegNum<[272]>;
def p17 : PR< 17, "p17">, DwarfRegNum<[273]>;
def p18 : PR< 18, "p18">, DwarfRegNum<[274]>;
def p19 : PR< 19, "p19">, DwarfRegNum<[275]>;
def p20 : PR< 20, "p20">, DwarfRegNum<[276]>;
def p21 : PR< 21, "p21">, DwarfRegNum<[277]>;
def p22 : PR< 22, "p22">, DwarfRegNum<[278]>;
def p23 : PR< 23, "p23">, DwarfRegNum<[279]>;
def p24 : PR< 24, "p24">, DwarfRegNum<[280]>;
def p25 : PR< 25, "p25">, DwarfRegNum<[281]>;
def p26 : PR< 26, "p26">, DwarfRegNum<[282]>;
def p27 : PR< 27, "p27">, DwarfRegNum<[283]>;
def p28 : PR< 28, "p28">, DwarfRegNum<[284]>;
def p29 : PR< 29, "p29">, DwarfRegNum<[285]>;
def p30 : PR< 30, "p30">, DwarfRegNum<[286]>;
def p31 : PR< 31, "p31">, DwarfRegNum<[287]>;
def p32 : PR< 32, "p32">, DwarfRegNum<[288]>;
def p33 : PR< 33, "p33">, DwarfRegNum<[289]>;
def p34 : PR< 34, "p34">, DwarfRegNum<[290]>;
def p35 : PR< 35, "p35">, DwarfRegNum<[291]>;
def p36 : PR< 36, "p36">, DwarfRegNum<[292]>;
def p37 : PR< 37, "p37">, DwarfRegNum<[293]>;
def p38 : PR< 38, "p38">, DwarfRegNum<[294]>;
def p39 : PR< 39, "p39">, DwarfRegNum<[295]>;
def p40 : PR< 40, "p40">, DwarfRegNum<[296]>;
def p41 : PR< 41, "p41">, DwarfRegNum<[297]>;
def p42 : PR< 42, "p42">, DwarfRegNum<[298]>;
def p43 : PR< 43, "p43">, DwarfRegNum<[299]>;
def p44 : PR< 44, "p44">, DwarfRegNum<[300]>;
def p45 : PR< 45, "p45">, DwarfRegNum<[301]>;
def p46 : PR< 46, "p46">, DwarfRegNum<[302]>;
def p47 : PR< 47, "p47">, DwarfRegNum<[303]>;
def p48 : PR< 48, "p48">, DwarfRegNum<[304]>;
def p49 : PR< 49, "p49">, DwarfRegNum<[305]>;
def p50 : PR< 50, "p50">, DwarfRegNum<[306]>;
def p51 : PR< 51, "p51">, DwarfRegNum<[307]>;
def p52 : PR< 52, "p52">, DwarfRegNum<[308]>;
def p53 : PR< 53, "p53">, DwarfRegNum<[309]>;
def p54 : PR< 54, "p54">, DwarfRegNum<[310]>;
def p55 : PR< 55, "p55">, DwarfRegNum<[311]>;
def p56 : PR< 56, "p56">, DwarfRegNum<[312]>;
def p57 : PR< 57, "p57">, DwarfRegNum<[313]>;
def p58 : PR< 58, "p58">, DwarfRegNum<[314]>;
def p59 : PR< 59, "p59">, DwarfRegNum<[315]>;
def p60 : PR< 60, "p60">, DwarfRegNum<[316]>;
def p61 : PR< 61, "p61">, DwarfRegNum<[317]>;
def p62 : PR< 62, "p62">, DwarfRegNum<[318]>;
def p63 : PR< 63, "p63">, DwarfRegNum<[319]>;
// XXX : this is temporary, we'll eventually have the output registers
// in the general purpose register class too?
def out0 : GR<0, "out0">, DwarfRegNum<[120]>;
def out1 : GR<1, "out1">, DwarfRegNum<[121]>;
def out2 : GR<2, "out2">, DwarfRegNum<[122]>;
def out3 : GR<3, "out3">, DwarfRegNum<[123]>;
def out4 : GR<4, "out4">, DwarfRegNum<[124]>;
def out5 : GR<5, "out5">, DwarfRegNum<[125]>;
def out6 : GR<6, "out6">, DwarfRegNum<[126]>;
def out7 : GR<7, "out7">, DwarfRegNum<[127]>;
// application (special) registers:
// "previous function state" application register
def AR_PFS : GR<0, "ar.pfs">, DwarfRegNum<[331]>;
// "return pointer" (this is really branch register b0)
def rp : GR<0, "rp">, DwarfRegNum<[-1]>;
// branch reg 6
def B6 : GR<0, "b6">, DwarfRegNum<[326]>;
//===----------------------------------------------------------------------===//
// Register Class Definitions... now that we have all of the pieces, define the
// top-level register classes. The order specified in the register list is
// implicitly defined to be the register allocation order.
//
// these are the scratch (+stacked) general registers
// FIXME/XXX we also reserve a frame pointer (r5)
// FIXME/XXX we also reserve r2 for spilling/filling predicates
// in IA64RegisterInfo.cpp
// FIXME/XXX we also reserve r22 for calculating addresses
// in IA64RegisterInfo.cpp
def GR : RegisterClass<"IA64", [i64], 64,
[
//FIXME!: for both readability and performance, we don't want the out
// registers to be the first ones allocated
out7, out6, out5, out4, out3, out2, out1, out0,
r3, r8, r9, r10, r11, r14, r15,
r16, r17, r18, r19, r20, r21, r23,
r24, r25, r26, r27, r28, r29, r30, r31,
r32, r33, r34, r35, r36, r37, r38, r39,
r40, r41, r42, r43, r44, r45, r46, r47,
r48, r49, r50, r51, r52, r53, r54, r55,
r56, r57, r58, r59, r60, r61, r62, r63,
r64, r65, r66, r67, r68, r69, r70, r71,
r72, r73, r74, r75, r76, r77, r78, r79,
r80, r81, r82, r83, r84, r85, r86, r87,
r88, r89, r90, r91, r92, r93, r94, r95,
r96, r97, r98, r99, r100, r101, r102, r103,
r104, r105, r106, r107, r108, r109, r110, r111,
r112, r113, r114, r115, r116, r117, r118, r119,
// last 17 are special (look down)
r120, r121, r122, r123, r124, r125, r126, r127,
r0, r1, r2, r5, r12, r13, r22, rp, AR_PFS]>
{
let MethodProtos = [{
iterator allocation_order_begin(const MachineFunction &MF) const;
iterator allocation_order_end(const MachineFunction &MF) const;
}];
let MethodBodies = [{
GRClass::iterator
GRClass::allocation_order_begin(const MachineFunction &MF) const {
// hide the 8 out? registers appropriately:
return begin()+(8-(MF.getInfo<IA64FunctionInfo>()->outRegsUsed));
}
GRClass::iterator
GRClass::allocation_order_end(const MachineFunction &MF) const {
// the 9 special registers r0,r1,r2,r5,r12,r13 etc
int numReservedRegs=9;
// we also can't allocate registers for use as locals if they're already
// required as 'out' registers
numReservedRegs+=MF.getInfo<IA64FunctionInfo>()->outRegsUsed;
return end()-numReservedRegs; // hide registers appropriately
}
}];
}
// these are the scratch (+stacked) FP registers
def FP : RegisterClass<"IA64", [f64], 64,
[F6, F7,
F8, F9, F10, F11, F12, F13, F14, F15,
F32, F33, F34, F35, F36, F37, F38, F39,
F40, F41, F42, F43, F44, F45, F46, F47,
F48, F49, F50, F51, F52, F53, F54, F55,
F56, F57, F58, F59, F60, F61, F62, F63,
F64, F65, F66, F67, F68, F69, F70, F71,
F72, F73, F74, F75, F76, F77, F78, F79,
F80, F81, F82, F83, F84, F85, F86, F87,
F88, F89, F90, F91, F92, F93, F94, F95,
F96, F97, F98, F99, F100, F101, F102, F103,
F104, F105, F106, F107, F108, F109, F110, F111,
F112, F113, F114, F115, F116, F117, F118, F119,
F120, F121, F122, F123, F124, F125, F126, F127,
F0, F1]> // these last two are hidden
{
// the 128s here are to make stf.spill/ldf.fill happy,
// when storing full (82-bit) FP regs to stack slots
// we need to 16-byte align
let Size=128;
let Alignment=128;
let MethodProtos = [{
iterator allocation_order_begin(const MachineFunction &MF) const;
iterator allocation_order_end(const MachineFunction &MF) const;
}];
let MethodBodies = [{
FPClass::iterator
FPClass::allocation_order_begin(const MachineFunction &MF) const {
return begin(); // we don't hide any FP regs from the start
}
FPClass::iterator
FPClass::allocation_order_end(const MachineFunction &MF) const {
return end()-2; // we hide regs F0, F1 from the end
}
}];
}
// these are the predicate registers, p0 (1/TRUE) is not here
def PR : RegisterClass<"IA64", [i1], 64,
// for now, let's be wimps and only have the scratch predicate regs
[p6, p7, p8, p9, p10, p11, p12, p13, p14, p15]> {
let Size = 64;
}
/*
[p1, p2, p3, p4, p5, p6, p7,
p8, p9, p10, p11, p12, p13, p14, p15,
p16, p17, p18, p19, p20, p21, p22, p23,
p24, p25, p26, p27, p28, p29, p30, p31,
p32, p33, p34, p35, p36, p37, p38, p39,
p40, p41, p42, p43, p44, p45, p46, p47,
p48, p49, p50, p51, p52, p53, p54, p55,
p56, p57, p58, p59, p60, p61, p62, p63]>;
*/

View File

@ -1,18 +0,0 @@
//===-- IA64Subtarget.cpp - IA64 Subtarget Information ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the IA64 specific subclass of TargetSubtarget.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "subtarget"
#include "IA64Subtarget.h"
using namespace llvm;
IA64Subtarget::IA64Subtarget() {}

View File

@ -1,28 +0,0 @@
//====---- IA64Subtarget.h - Define Subtarget for the IA64 -----*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the IA64 specific subclass of TargetSubtarget.
//
//===----------------------------------------------------------------------===//
#ifndef IA64SUBTARGET_H
#define IA64SUBTARGET_H
#include "llvm/Target/TargetSubtarget.h"
namespace llvm {
class IA64Subtarget : public TargetSubtarget {
public:
IA64Subtarget();
};
} // End llvm namespace
#endif

View File

@ -1,39 +0,0 @@
//===-- IA64TargetAsmInfo.cpp - IA64 asm properties -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declarations of the IA64TargetAsmInfo properties.
//
//===----------------------------------------------------------------------===//
#include "IA64TargetAsmInfo.h"
#include "llvm/Constants.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
IA64TargetAsmInfo::IA64TargetAsmInfo(const TargetMachine &TM):
ELFTargetAsmInfo(TM) {
CommentString = "//";
Data8bitsDirective = "\tdata1\t"; // FIXME: check that we are
Data16bitsDirective = "\tdata2.ua\t"; // disabling auto-alignment
Data32bitsDirective = "\tdata4.ua\t"; // properly
Data64bitsDirective = "\tdata8.ua\t";
ZeroDirective = "\t.skip\t";
AsciiDirective = "\tstring\t";
GlobalVarAddrPrefix="";
GlobalVarAddrSuffix="";
FunctionAddrPrefix="@fptr(";
FunctionAddrSuffix=")";
// FIXME: would be nice to have rodata (no 'w') when appropriate?
ConstantPoolSection = "\n\t.section .data, \"aw\", \"progbits\"\n";
}
// FIXME: Support small data/bss/rodata sections someday.

View File

@ -1,32 +0,0 @@
//=====-- IA64TargetAsmInfo.h - IA64 asm properties -----------*- C++ -*--====//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the IA64TargetAsmInfo class.
//
//===----------------------------------------------------------------------===//
#ifndef IA64TARGETASMINFO_H
#define IA64TARGETASMINFO_H
#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/ELFTargetAsmInfo.h"
namespace llvm {
// Forward declaration.
class TargetMachine;
struct IA64TargetAsmInfo : public ELFTargetAsmInfo {
explicit IA64TargetAsmInfo(const TargetMachine &TM);
};
} // namespace llvm
#endif

View File

@ -1,59 +0,0 @@
//===-- IA64TargetMachine.cpp - Define TargetMachine for IA64 -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the IA64 specific subclass of TargetMachine.
//
//===----------------------------------------------------------------------===//
#include "IA64TargetAsmInfo.h"
#include "IA64TargetMachine.h"
#include "IA64.h"
#include "llvm/Module.h"
#include "llvm/PassManager.h"
#include "llvm/Target/TargetMachineRegistry.h"
using namespace llvm;
// Register the target
static RegisterTarget<IA64TargetMachine> X(TheIA64Target, "ia64",
"IA-64 (Itanium) [experimental]");
// Force static initialization.
extern "C" void LLVMInitializeIA64Target() { }
const TargetAsmInfo *IA64TargetMachine::createTargetAsmInfo() const {
return new IA64TargetAsmInfo(*this);
}
/// IA64TargetMachine ctor - Create an LP64 architecture model
///
IA64TargetMachine::IA64TargetMachine(const Target &T, const Module &M,
const std::string &FS)
: LLVMTargetMachine(T),
DataLayout("e-f80:128:128"),
FrameInfo(TargetFrameInfo::StackGrowsDown, 16, 0),
TLInfo(*this) { // FIXME? check this stuff
}
//===----------------------------------------------------------------------===//
// Pass Pipeline Configuration
//===----------------------------------------------------------------------===//
bool IA64TargetMachine::addInstSelector(PassManagerBase &PM,
CodeGenOpt::Level OptLevel) {
PM.add(createIA64DAGToDAGInstructionSelector(*this));
return false;
}
bool IA64TargetMachine::addPreEmitPass(PassManagerBase &PM,
CodeGenOpt::Level OptLevel) {
// Make sure everything is bundled happily
PM.add(createIA64BundlingPass(*this));
return true;
}

View File

@ -1,59 +0,0 @@
//===-- IA64TargetMachine.h - Define TargetMachine for IA64 ---*- C++ -*---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the IA64 specific subclass of TargetMachine.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_IA64TARGETMACHINE_H
#define LLVM_TARGET_IA64TARGETMACHINE_H
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetFrameInfo.h"
#include "IA64InstrInfo.h"
#include "IA64ISelLowering.h"
#include "IA64Subtarget.h"
namespace llvm {
class IA64TargetMachine : public LLVMTargetMachine {
IA64Subtarget Subtarget;
const TargetData DataLayout; // Calculates type size & alignment
IA64InstrInfo InstrInfo;
TargetFrameInfo FrameInfo;
//IA64JITInfo JITInfo;
IA64TargetLowering TLInfo;
protected:
virtual const TargetAsmInfo *createTargetAsmInfo() const;
public:
IA64TargetMachine(const Target &T, const Module &M, const std::string &FS);
virtual const IA64InstrInfo *getInstrInfo() const { return &InstrInfo; }
virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
virtual const IA64Subtarget *getSubtargetImpl() const { return &Subtarget; }
virtual IA64TargetLowering *getTargetLowering() const {
return const_cast<IA64TargetLowering*>(&TLInfo);
}
virtual const IA64RegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
virtual const TargetData *getTargetData() const { return &DataLayout; }
// Pass Pipeline Configuration
virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
virtual bool addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
};
} // End llvm namespace
#endif

View File

@ -1,20 +0,0 @@
##===- lib/Target/IA64/Makefile -----------------------------*- Makefile -*-===##
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
##===----------------------------------------------------------------------===##
LEVEL = ../../..
LIBRARYNAME = LLVMIA64CodeGen
TARGET = IA64
# Make sure that tblgen is run, first thing.
BUILT_SOURCES = IA64GenRegisterInfo.h.inc IA64GenRegisterNames.inc \
IA64GenRegisterInfo.inc IA64GenInstrNames.inc \
IA64GenInstrInfo.inc IA64GenAsmWriter.inc \
IA64GenDAGISel.inc
DIRS = AsmPrinter TargetInfo
include $(LEVEL)/Makefile.common

View File

@ -1,48 +0,0 @@
TODO:
- Un-bitrot ISel
- Hook up If-Conversion a la ARM target
- Hook up all branch analysis functions
- Instruction scheduling
- Bundling
- Dynamic Optimization
- Testing and bugfixing
- stop passing FP args in both FP *and* integer regs when not required
- allocate low (nonstacked) registers more aggressively
- clean up and thoroughly test the isel patterns.
- fix stacked register allocation order: (for readability) we don't want
the out? registers being the first ones used
- fix up floating point
(nb http://gcc.gnu.org/wiki?pagename=ia64%20floating%20point )
- bundling!
(we will avoid the mess that is:
http://gcc.gnu.org/ml/gcc/2003-12/msg00832.html )
- instruction scheduling (hmmmm! ;)
- counted loop support
- make integer + FP mul/div more clever (we have fixed pseudocode atm)
- track and use comparison complements
INFO:
- we are strictly LP64 here, no support for ILP32 on HP-UX. Linux users
don't need to worry about this.
- i have instruction scheduling/bundling pseudocode, that really works
(has been tested, albeit at the perl-script level).
so, before you go write your own, send me an email!
KNOWN DEFECTS AT THE CURRENT TIME:
- C++ vtables contain naked function pointers, not function descriptors,
which is bad. see http://llvm.cs.uiuc.edu/bugs/show_bug.cgi?id=406
- varargs are broken
- alloca doesn't work (indeed, stack frame layout is bogus)
- no support for big-endian environments
- (not really the backend, but...) the CFE has some issues on IA64.
these will probably be fixed soon.
ACKNOWLEDGEMENTS:
- Chris Lattner (x100)
- Other LLVM developers ("hey, that looks familiar")
CONTACT:
- You can email me at duraid@octopus.com.au. If you find a small bug,
just email me. If you find a big bug, please file a bug report
in bugzilla! http://llvm.cs.uiuc.edu is your one stop shop for all
things LLVM.

View File

@ -1,7 +0,0 @@
include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
add_llvm_library(LLVMIA64Info
IA64TargetInfo.cpp
)
add_dependencies(LLVMIA64Info IA64CodeGenTable_gen)

View File

@ -1,58 +0,0 @@
//===-- IA64TargetInfo.cpp - IA64 Target Implementation -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "IA64.h"
#include "llvm/Module.h"
#include "llvm/Target/TargetRegistry.h"
using namespace llvm;
Target llvm::TheIA64Target;
static unsigned IA64_JITMatchQuality() {
return 0;
}
static unsigned IA64_TripleMatchQuality(const std::string &TT) {
// we match [iI][aA]*64
if (TT.size() >= 4) {
if ((TT[0]=='i' || TT[0]=='I') &&
(TT[1]=='a' || TT[1]=='A')) {
for(unsigned int i=2; i<(TT.size()-1); i++)
if(TT[i]=='6' && TT[i+1]=='4')
return 20; // strong match
}
}
return 0;
}
static unsigned IA64_ModuleMatchQuality(const Module &M) {
// Check for a triple match.
if (unsigned Q = IA64_TripleMatchQuality(M.getTargetTriple()))
return Q;
// Otherwise if the target triple is non-empty, we don't match.
if (!M.getTargetTriple().empty()) return 0;
// FIXME: This is bad, the target matching algorithm shouldn't depend on the
// host.
#if defined(__ia64__) || defined(__IA64__)
return 5;
#else
return 0;
#endif
}
extern "C" void LLVMInitializeIA64TargetInfo() {
TargetRegistry::RegisterTarget(TheIA64Target, "ia64",
"IA-64 (Itanium) [experimental]",
&IA64_TripleMatchQuality,
&IA64_ModuleMatchQuality,
&IA64_JITMatchQuality);
}

View File

@ -1,15 +0,0 @@
##===- lib/Target/IA64/TargetInfo/Makefile -----------------*- Makefile -*-===##
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
##===----------------------------------------------------------------------===##
LEVEL = ../../../..
LIBRARYNAME = LLVMIA64Info
# Hack: we need to include 'main' target directory to grab private headers
CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
include $(LEVEL)/Makefile.common

View File

@ -149,7 +149,7 @@ http://gcc.gnu.org/ml/gcc-patches/2006-02/msg00133.html
Implement Newton-Rhapson method for improving estimate instructions to the
correct accuracy, and implementing divide as multiply by reciprocal when it has
more than one use. Itanium will want this too.
more than one use. Itanium would want this too.
===-------------------------------------------------------------------------===

View File

@ -1,5 +1,4 @@
; RUN: llvm-as < %s | llc
; XFAIL: ia64
declare i1 @llvm.isunordered.f64(double, double)

View File

@ -1,10 +0,0 @@
; RUN: llvm-as < %s | llc -march=ia64
@_ZN9__gnu_cxx16__stl_prime_listE = external global [28 x i32] ; <[28 x i32]*> [#uses=3]
define fastcc i32* @_ZSt11lower_boundIPKmmET_S2_S2_RKT0_(i32 %__val.val) {
entry:
%retval = select i1 icmp slt (i32 ashr (i32 sub (i32 ptrtoint (i32* getelementptr ([28 x i32]* @_ZN9__gnu_cxx16__stl_prime_listE, i32 0, i32 28) to i32), i32 ptrtoint ([28 x i32]* @_ZN9__gnu_cxx16__stl_prime_listE to i32)), i32 2), i32 0), i32* null, i32* getelementptr ([28 x i32]* @_ZN9__gnu_cxx16__stl_prime_listE, i32 0, i32 0) ; <i32*> [#uses=1]
ret i32* %retval
}

View File

@ -1,9 +0,0 @@
; this should turn into shladd
; RUN: llvm-as < %s | llc -march=ia64 | grep shladd
define i64 @bogglesmoggle(i64 %X, i64 %Y) {
%A = shl i64 %X, 3 ; <i64> [#uses=1]
%B = add i64 %A, %Y ; <i64> [#uses=1]
ret i64 %B
}

View File

@ -1,5 +0,0 @@
load_lib llvm.exp
if { [llvm_supports_target IA64] } {
RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
}

View File

@ -1,21 +0,0 @@
; Test to make sure that the 'private' is used correctly.
;
; RUN: llvm-as < %s | llc -march=ia64 > %t
; RUN: grep .foo: %t
; RUN: grep br.call.sptk.*\.foo %t
; RUN: grep .baz: %t
; RUN: grep ltoff.*\.baz %t
declare void @foo()
define private void @foo() {
ret void
}
@baz = private global i32 4;
define i32 @bar() {
call void @foo()
%1 = load i32* @baz, align 4
ret i32 %1
}

View File

@ -1,5 +0,0 @@
; RUN: llvm-as < %s | llc -march=ia64
define double @test() {
ret double 0.0
}

View File

@ -11,7 +11,7 @@
// RUN: grep {#7 0x.* in main.*(argc=\[12\],.*argv=.*)}
// Only works on ppc, x86 and x86_64. Should generalize?
// XFAIL: alpha|ia64|arm
// XFAIL: alpha|arm
#include <stdlib.h>

View File

@ -7,7 +7,7 @@
// RUN: echo {break main\nrun\np NoCompileUnit::pubname} > %t2
// RUN: gdb -q -batch -n -x %t2 NoCompileUnit.exe | \
// RUN: tee NoCompileUnit.out | not grep {"low == high"}
// XFAIL: alpha|ia64|arm
// XFAIL: alpha|arm
// XFAIL: *
// See PR2454

View File

@ -7,7 +7,7 @@
// RUN: %llvmdsymutil %t.exe
// RUN: echo {break main\nrun\np Pubnames::pubname} > %t.in
// RUN: gdb -q -batch -n -x %t.in %t.exe | tee %t.out | grep {\$1 = 10}
// XFAIL: alpha|ia64|arm
// XFAIL: alpha|arm
struct Pubnames {
static int pubname;
};

View File

@ -1,5 +1,5 @@
// RUN: %llvmgcc %s -S -emit-llvm -o - | grep llvm.atomic
// XFAIL: sparc-sun-solaris2|arm|ia64
// XFAIL: sparc-sun-solaris2|arm
// Feature currently implemented only for x86, alpha, powerpc.
int* foo(int** a, int* b, int* c) {

View File

@ -9,7 +9,7 @@
// Currently this is implemented only for Alpha, X86, PowerPC.
// Add your target here if it doesn't work.
// This version of the test does not include long long.
// XFAIL: sparc|arm|ia64
// XFAIL: sparc|arm
signed char sc;
unsigned char uc;

View File

@ -9,7 +9,7 @@
// Currently this is implemented only for Alpha, X86, PowerPC.
// Add your target here if it doesn't work.
// PPC32 does not translate the long long variants, so fails this test.
// XFAIL: sparc|arm|ia64|powerpc
// XFAIL: sparc|arm|powerpc
signed char sc;
unsigned char uc;