diff --git a/lib/Target/X86/README.txt b/lib/Target/X86/README.txt index e63dd7d30a4..9ab17dc3ee9 100644 --- a/lib/Target/X86/README.txt +++ b/lib/Target/X86/README.txt @@ -1004,6 +1004,33 @@ _foo: movl %edi, %eax ret +Another example is: + +;; X's live range extends beyond the shift, so the register allocator +;; cannot coalesce it with Y. Because of this, a copy needs to be +;; emitted before the shift to save the register value before it is +;; clobbered. However, this copy is not needed if the register +;; allocator turns the shift into an LEA. This also occurs for ADD. + +; Check that the shift gets turned into an LEA. +; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \ +; RUN: not grep {mov E.X, E.X} + +%G = external global int + +int %test1(int %X, int %Y) { + %Z = add int %X, %Y + volatile store int %Y, int* %G + volatile store int %Z, int* %G + ret int %X +} + +int %test2(int %X) { + %Z = add int %X, 1 ;; inc + volatile store int %Z, int* %G + ret int %X +} + //===---------------------------------------------------------------------===// We use push/pop of stack space around calls in situations where we don't have to.