[Hexagon] Preclude non-memory test from being optimized away. NFC.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@307153 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Nirav Dave 2017-07-05 13:08:03 +00:00
parent 1f536a1112
commit bef33d7af3
11 changed files with 38 additions and 38 deletions

View File

@ -12,10 +12,10 @@ entry:
%b = alloca double, align 8
%c = alloca double, align 8
store i32 0, i32* %retval
store double 1.540000e+01, double* %a, align 8
store double 9.100000e+00, double* %b, align 8
%0 = load double, double* %a, align 8
%1 = load double, double* %b, align 8
store volatile double 1.540000e+01, double* %a, align 8
store volatile double 9.100000e+00, double* %b, align 8
%0 = load volatile double, double* %a, align 8
%1 = load volatile double, double* %b, align 8
%add = fadd double %0, %1
store double %add, double* %c, align 8
%2 = load double, double* %c, align 8

View File

@ -17,8 +17,8 @@ entry:
%0 = load double, double* %a, align 8
%1 = load double, double* %b, align 8
%add = fadd double %0, %1
store double %add, double* %c, align 8
%2 = load double, double* %c, align 8
store volatile double %add, double* %c, align 8
%2 = load volatile double, double* %c, align 8
%conv = fptosi double %2 to i64
store i64 %conv, i64* %i, align 8
%3 = load i64, i64* %i, align 8

View File

@ -17,8 +17,8 @@ entry:
%0 = load float, float* %a, align 4
%1 = load float, float* %b, align 4
%add = fadd float %0, %1
store float %add, float* %c, align 4
%2 = load float, float* %c, align 4
store volatile float %add, float* %c, align 4
%2 = load volatile float, float* %c, align 4
%conv = fptosi float %2 to i32
store i32 %conv, i32* %i, align 4
%3 = load i32, i32* %i, align 4

View File

@ -17,8 +17,8 @@ entry:
%0 = load float, float* %a, align 4
%1 = load float, float* %b, align 4
%add = fadd float %0, %1
store float %add, float* %c, align 4
%2 = load float, float* %c, align 4
store volatile float %add, float* %c, align 4
%2 = load volatile float, float* %c, align 4
%conv = fptosi float %2 to i64
store i64 %conv, i64* %i, align 8
%3 = load i64, i64* %i, align 8

View File

@ -9,10 +9,10 @@ entry:
%a = alloca double, align 8
%b = alloca double, align 8
%c = alloca double, align 8
store double 1.540000e+01, double* %a, align 8
store double 9.100000e+00, double* %b, align 8
%0 = load double, double* %a, align 8
%1 = load double, double* %b, align 8
store volatile double 1.540000e+01, double* %a, align 8
store volatile double 9.100000e+00, double* %b, align 8
%0 = load volatile double, double* %a, align 8
%1 = load volatile double, double* %b, align 8
%add = fadd double %0, %1
store double %add, double* %c, align 8
ret i32 0

View File

@ -8,10 +8,10 @@ entry:
%a = alloca double, align 8
%b = alloca double, align 8
%c = alloca double, align 8
store double 1.540000e+01, double* %a, align 8
store double 9.100000e+00, double* %b, align 8
%0 = load double, double* %b, align 8
%1 = load double, double* %a, align 8
store volatile double 1.540000e+01, double* %a, align 8
store volatile double 9.100000e+00, double* %b, align 8
%0 = load volatile double, double* %b, align 8
%1 = load volatile double, double* %a, align 8
%mul = fmul double %0, %1
store double %mul, double* %c, align 8
ret i32 0

View File

@ -12,10 +12,10 @@ entry:
%b = alloca double, align 8
%c = alloca double, align 8
store i32 0, i32* %retval
store double 1.540000e+01, double* %a, align 8
store double 9.100000e+00, double* %b, align 8
%0 = load double, double* %a, align 8
%1 = load double, double* %b, align 8
store volatile double 1.540000e+01, double* %a, align 8
store volatile double 9.100000e+00, double* %b, align 8
%0 = load volatile double, double* %a, align 8
%1 = load volatile double, double* %b, align 8
%add = fadd double %0, %1
store double %add, double* %c, align 8
%2 = load double, double* %c, align 8

View File

@ -8,10 +8,10 @@ entry:
%a = alloca double, align 8
%b = alloca double, align 8
%c = alloca double, align 8
store double 1.540000e+01, double* %a, align 8
store double 9.100000e+00, double* %b, align 8
%0 = load double, double* %b, align 8
%1 = load double, double* %a, align 8
store volatile double 1.540000e+01, double* %a, align 8
store volatile double 9.100000e+00, double* %b, align 8
%0 = load volatile double, double* %b, align 8
%1 = load volatile double, double* %a, align 8
%sub = fsub double %0, %1
store double %sub, double* %c, align 8
ret i32 0

View File

@ -8,10 +8,10 @@ entry:
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
store float 0x402ECCCCC0000000, float* %a, align 4
store float 0x4022333340000000, float* %b, align 4
%0 = load float, float* %a, align 4
%1 = load float, float* %b, align 4
store volatile float 0x402ECCCCC0000000, float* %a, align 4
store volatile float 0x4022333340000000, float* %b, align 4
%0 = load volatile float, float* %a, align 4
%1 = load volatile float, float* %b, align 4
%add = fadd float %0, %1
store float %add, float* %c, align 4
ret i32 0

View File

@ -9,10 +9,10 @@ entry:
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
store float 0x402ECCCCC0000000, float* %a, align 4
store float 0x4022333340000000, float* %b, align 4
%0 = load float, float* %b, align 4
%1 = load float, float* %a, align 4
store volatile float 0x402ECCCCC0000000, float* %a, align 4
store volatile float 0x4022333340000000, float* %b, align 4
%0 = load volatile float, float* %b, align 4
%1 = load volatile float, float* %a, align 4
%mul = fmul float %0, %1
store float %mul, float* %c, align 4
ret i32 0

View File

@ -8,10 +8,10 @@ entry:
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
store float 0x402ECCCCC0000000, float* %a, align 4
store float 0x4022333340000000, float* %b, align 4
%0 = load float, float* %b, align 4
%1 = load float, float* %a, align 4
store volatile float 0x402ECCCCC0000000, float* %a, align 4
store volatile float 0x4022333340000000, float* %b, align 4
%0 = load volatile float, float* %b, align 4
%1 = load volatile float, float* %a, align 4
%sub = fsub float %0, %1
store float %sub, float* %c, align 4
ret i32 0