mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2024-11-23 22:00:10 +00:00
[ConstantFold] Get rid of special cases for sizeof etc.
Target-dependent constant folding will fold these down to simple constants (or at least, expressions that don't involve a GEP). We don't need heroics to try to optimize the form of the expression before that happens. Fixes https://bugs.llvm.org/show_bug.cgi?id=51232 . Differential Revision: https://reviews.llvm.org/D107116
This commit is contained in:
parent
697ea09d47
commit
2a2847823f
@ -1,3 +1,4 @@
|
|||||||
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
// RUN: %clang_cc1 -x c -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s
|
// RUN: %clang_cc1 -x c -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s
|
||||||
// RUN: %clang_cc1 -x c -fsanitize=pointer-overflow -fno-sanitize-recover=pointer-overflow -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s
|
// RUN: %clang_cc1 -x c -fsanitize=pointer-overflow -fno-sanitize-recover=pointer-overflow -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s
|
||||||
|
|
||||||
@ -14,19 +15,19 @@ struct S {
|
|||||||
int x, y;
|
int x, y;
|
||||||
};
|
};
|
||||||
|
|
||||||
// CHECK-LABEL: define{{.*}} i64 @{{.*}}get_offset_of_y_naively{{.*}}(
|
// CHECK-LABEL: @get_offset_of_y_naively(
|
||||||
|
// CHECK-NEXT: entry:
|
||||||
|
// CHECK-NEXT: ret i64 ptrtoint (i32* getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* null, i32 0, i32 1) to i64)
|
||||||
|
//
|
||||||
uintptr_t get_offset_of_y_naively() {
|
uintptr_t get_offset_of_y_naively() {
|
||||||
// CHECK: [[ENTRY:.*]]:
|
|
||||||
// CHECK-NEXT: ret i64 ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
|
|
||||||
// CHECK-NEXT: }
|
|
||||||
return ((uintptr_t)(&(((struct S *)0)->y)));
|
return ((uintptr_t)(&(((struct S *)0)->y)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-LABEL: define{{.*}} i64 @{{.*}}get_offset_of_y_via_builtin{{.*}}(
|
// CHECK-LABEL: @get_offset_of_y_via_builtin(
|
||||||
|
// CHECK-NEXT: entry:
|
||||||
|
// CHECK-NEXT: ret i64 4
|
||||||
|
//
|
||||||
uintptr_t get_offset_of_y_via_builtin() {
|
uintptr_t get_offset_of_y_via_builtin() {
|
||||||
// CHECK: [[ENTRY:.*]]:
|
|
||||||
// CHECK-NEXT: ret i64 4
|
|
||||||
// CHECK-NEXT: }
|
|
||||||
return __builtin_offsetof(struct S, y);
|
return __builtin_offsetof(struct S, y);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ char *nullptr_allones_BAD() {
|
|||||||
// CHECK-SANITIZE-C-NEXT: br i1 false, label %[[CONT:.*]], label %[[HANDLER_POINTER_OVERFLOW:[^,]+]],{{.*}} !nosanitize
|
// CHECK-SANITIZE-C-NEXT: br i1 false, label %[[CONT:.*]], label %[[HANDLER_POINTER_OVERFLOW:[^,]+]],{{.*}} !nosanitize
|
||||||
// CHECK-SANITIZE-CPP-NEXT: br i1 icmp eq (i64 mul (i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64), i64 -1), i64 0), label %[[CONT:.*]], label %[[HANDLER_POINTER_OVERFLOW:[^,]+]],{{.*}} !nosanitize
|
// CHECK-SANITIZE-CPP-NEXT: br i1 icmp eq (i64 mul (i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64), i64 -1), i64 0), label %[[CONT:.*]], label %[[HANDLER_POINTER_OVERFLOW:[^,]+]],{{.*}} !nosanitize
|
||||||
// CHECK-SANITIZE: [[HANDLER_POINTER_OVERFLOW]]:
|
// CHECK-SANITIZE: [[HANDLER_POINTER_OVERFLOW]]:
|
||||||
// CHECK-SANITIZE-NORECOVER-NEXT: call void @__ubsan_handle_pointer_overflow_abort(i8* bitcast ({ {{{.*}}} }* @[[LINE_800]] to i8*), i64 0, i64 mul (i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64), i64 -1))
|
// CHECK-SANITIZE-NORECOVER-NEXT: call void @__ubsan_handle_pointer_overflow_abort(i8* bitcast ({ {{{.*}}} }* @[[LINE_800]] to i8*), i64 0, i64 ptrtoint (i8* getelementptr inbounds (i8, i8* null, i64 -1) to i64))
|
||||||
// CHECK-SANITIZE-RECOVER-NEXT: call void @__ubsan_handle_pointer_overflow(i8* bitcast ({ {{{.*}}} }* @[[LINE_800]] to i8*), i64 0, i64 mul (i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64), i64 -1))
|
// CHECK-SANITIZE-RECOVER-NEXT: call void @__ubsan_handle_pointer_overflow(i8* bitcast ({ {{{.*}}} }* @[[LINE_800]] to i8*), i64 0, i64 mul (i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64), i64 -1))
|
||||||
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.ubsantrap(i8 19){{.*}}, !nosanitize
|
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.ubsantrap(i8 19){{.*}}, !nosanitize
|
||||||
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize
|
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
// RUN: %clang_cc1 -x c++ -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s
|
// RUN: %clang_cc1 -x c++ -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s
|
||||||
// RUN: %clang_cc1 -x c++ -fsanitize=pointer-overflow -fno-sanitize-recover=pointer-overflow -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s
|
// RUN: %clang_cc1 -x c++ -fsanitize=pointer-overflow -fno-sanitize-recover=pointer-overflow -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s
|
||||||
|
|
||||||
@ -7,18 +8,18 @@ struct S {
|
|||||||
int x, y;
|
int x, y;
|
||||||
};
|
};
|
||||||
|
|
||||||
// CHECK-LABEL: define{{.*}} i64 @{{.*}}get_offset_of_y_naively{{.*}}(
|
// CHECK-LABEL: @_Z23get_offset_of_y_naivelyv(
|
||||||
|
// CHECK-NEXT: entry:
|
||||||
|
// CHECK-NEXT: ret i64 ptrtoint (i32* getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* null, i32 0, i32 1) to i64)
|
||||||
|
//
|
||||||
uintptr_t get_offset_of_y_naively() {
|
uintptr_t get_offset_of_y_naively() {
|
||||||
// CHECK: [[ENTRY:.*]]:
|
|
||||||
// CHECK-NEXT: ret i64 ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
|
|
||||||
// CHECK-NEXT: }
|
|
||||||
return ((uintptr_t)(&(((S *)nullptr)->y)));
|
return ((uintptr_t)(&(((S *)nullptr)->y)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-LABEL: define{{.*}} i64 @{{.*}}get_offset_of_y_via_builtin{{.*}}(
|
// CHECK-LABEL: @_Z27get_offset_of_y_via_builtinv(
|
||||||
|
// CHECK-NEXT: entry:
|
||||||
|
// CHECK-NEXT: ret i64 4
|
||||||
|
//
|
||||||
uintptr_t get_offset_of_y_via_builtin() {
|
uintptr_t get_offset_of_y_via_builtin() {
|
||||||
// CHECK: [[ENTRY:.*]]:
|
|
||||||
// CHECK-NEXT: ret i64 4
|
|
||||||
// CHECK-NEXT: }
|
|
||||||
return __builtin_offsetof(S, y);
|
return __builtin_offsetof(S, y);
|
||||||
}
|
}
|
||||||
|
@ -1113,9 +1113,9 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP24:%.*]] = ptrtoint %struct.S* [[ARRAYIDX14]] to i64
|
// CHECK1-NEXT: [[TMP24:%.*]] = ptrtoint %struct.S* [[ARRAYIDX14]] to i64
|
||||||
// CHECK1-NEXT: [[TMP25:%.*]] = ptrtoint %struct.S* [[ARRAYIDX9]] to i64
|
// CHECK1-NEXT: [[TMP25:%.*]] = ptrtoint %struct.S* [[ARRAYIDX9]] to i64
|
||||||
// CHECK1-NEXT: [[TMP26:%.*]] = sub i64 [[TMP24]], [[TMP25]]
|
// CHECK1-NEXT: [[TMP26:%.*]] = sub i64 [[TMP24]], [[TMP25]]
|
||||||
// CHECK1-NEXT: [[TMP27:%.*]] = sdiv exact i64 [[TMP26]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP27:%.*]] = sdiv exact i64 [[TMP26]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP28:%.*]] = add nuw i64 [[TMP27]], 1
|
// CHECK1-NEXT: [[TMP28:%.*]] = add nuw i64 [[TMP27]], 1
|
||||||
// CHECK1-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[VLA15:%.*]] = alloca [[STRUCT_S]], i64 [[TMP28]], align 16
|
// CHECK1-NEXT: [[VLA15:%.*]] = alloca [[STRUCT_S]], i64 [[TMP28]], align 16
|
||||||
// CHECK1-NEXT: store i64 [[TMP28]], i64* [[__VLA_EXPR1]], align 8
|
// CHECK1-NEXT: store i64 [[TMP28]], i64* [[__VLA_EXPR1]], align 8
|
||||||
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA15]], i64 [[TMP28]]
|
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA15]], i64 [[TMP28]]
|
||||||
@ -1132,7 +1132,7 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP32:%.*]] = ptrtoint %struct.S* [[TMP31]] to i64
|
// CHECK1-NEXT: [[TMP32:%.*]] = ptrtoint %struct.S* [[TMP31]] to i64
|
||||||
// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX9]] to i64
|
// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX9]] to i64
|
||||||
// CHECK1-NEXT: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
// CHECK1-NEXT: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
||||||
// CHECK1-NEXT: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA15]], i64 [[TMP35]]
|
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA15]], i64 [[TMP35]]
|
||||||
// CHECK1-NEXT: [[TMP37:%.*]] = bitcast %struct.S* [[TMP36]] to [10 x [4 x %struct.S]]*
|
// CHECK1-NEXT: [[TMP37:%.*]] = bitcast %struct.S* [[TMP36]] to [10 x [4 x %struct.S]]*
|
||||||
// CHECK1-NEXT: [[TMP38:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
// CHECK1-NEXT: [[TMP38:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
||||||
@ -1855,9 +1855,9 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP5:%.*]] = ptrtoint %struct.S* [[ARRAYIDX3]] to i64
|
// CHECK1-NEXT: [[TMP5:%.*]] = ptrtoint %struct.S* [[ARRAYIDX3]] to i64
|
||||||
// CHECK1-NEXT: [[TMP6:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
// CHECK1-NEXT: [[TMP6:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
||||||
// CHECK1-NEXT: [[TMP7:%.*]] = sub i64 [[TMP5]], [[TMP6]]
|
// CHECK1-NEXT: [[TMP7:%.*]] = sub i64 [[TMP5]], [[TMP6]]
|
||||||
// CHECK1-NEXT: [[TMP8:%.*]] = sdiv exact i64 [[TMP7]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP8:%.*]] = sdiv exact i64 [[TMP7]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP9:%.*]] = add nuw i64 [[TMP8]], 1
|
// CHECK1-NEXT: [[TMP9:%.*]] = add nuw i64 [[TMP8]], 1
|
||||||
// CHECK1-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP11:%.*]] = call i8* @llvm.stacksave()
|
// CHECK1-NEXT: [[TMP11:%.*]] = call i8* @llvm.stacksave()
|
||||||
// CHECK1-NEXT: store i8* [[TMP11]], i8** [[SAVED_STACK]], align 8
|
// CHECK1-NEXT: store i8* [[TMP11]], i8** [[SAVED_STACK]], align 8
|
||||||
// CHECK1-NEXT: [[VLA:%.*]] = alloca [[STRUCT_S]], i64 [[TMP9]], align 16
|
// CHECK1-NEXT: [[VLA:%.*]] = alloca [[STRUCT_S]], i64 [[TMP9]], align 16
|
||||||
@ -1877,7 +1877,7 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP15:%.*]] = ptrtoint %struct.S* [[TMP14]] to i64
|
// CHECK1-NEXT: [[TMP15:%.*]] = ptrtoint %struct.S* [[TMP14]] to i64
|
||||||
// CHECK1-NEXT: [[TMP16:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
// CHECK1-NEXT: [[TMP16:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
||||||
// CHECK1-NEXT: [[TMP17:%.*]] = sub i64 [[TMP15]], [[TMP16]]
|
// CHECK1-NEXT: [[TMP17:%.*]] = sub i64 [[TMP15]], [[TMP16]]
|
||||||
// CHECK1-NEXT: [[TMP18:%.*]] = sdiv exact i64 [[TMP17]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP18:%.*]] = sdiv exact i64 [[TMP17]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA]], i64 [[TMP18]]
|
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA]], i64 [[TMP18]]
|
||||||
// CHECK1-NEXT: store %struct.S** [[_TMP5]], %struct.S*** [[_TMP4]], align 8
|
// CHECK1-NEXT: store %struct.S** [[_TMP5]], %struct.S*** [[_TMP4]], align 8
|
||||||
// CHECK1-NEXT: store %struct.S* [[TMP19]], %struct.S** [[_TMP5]], align 8
|
// CHECK1-NEXT: store %struct.S* [[TMP19]], %struct.S** [[_TMP5]], align 8
|
||||||
@ -2084,7 +2084,7 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[TMP7]] to i64
|
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[TMP7]] to i64
|
||||||
// CHECK1-NEXT: [[TMP9:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
// CHECK1-NEXT: [[TMP9:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
||||||
// CHECK1-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
|
// CHECK1-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
|
||||||
// CHECK1-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast [1 x [6 x %struct.S]]* [[VAR24]] to %struct.S*
|
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast [1 x [6 x %struct.S]]* [[VAR24]] to %struct.S*
|
||||||
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP12]], i64 [[TMP11]]
|
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP12]], i64 [[TMP11]]
|
||||||
// CHECK1-NEXT: store %struct.S** [[_TMP6]], %struct.S*** [[_TMP5]], align 8
|
// CHECK1-NEXT: store %struct.S** [[_TMP6]], %struct.S*** [[_TMP5]], align 8
|
||||||
@ -2285,7 +2285,7 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[TMP7]] to i64
|
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[TMP7]] to i64
|
||||||
// CHECK1-NEXT: [[TMP9:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
// CHECK1-NEXT: [[TMP9:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
||||||
// CHECK1-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
|
// CHECK1-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
|
||||||
// CHECK1-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast [1 x [6 x %struct.S]]* [[VAR24]] to %struct.S*
|
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast [1 x [6 x %struct.S]]* [[VAR24]] to %struct.S*
|
||||||
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP12]], i64 [[TMP11]]
|
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP12]], i64 [[TMP11]]
|
||||||
// CHECK1-NEXT: store %struct.S** [[_TMP6]], %struct.S*** [[_TMP5]], align 8
|
// CHECK1-NEXT: store %struct.S** [[_TMP6]], %struct.S*** [[_TMP5]], align 8
|
||||||
@ -2476,7 +2476,7 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP7:%.*]] = ptrtoint %struct.S* [[TMP6]] to i64
|
// CHECK1-NEXT: [[TMP7:%.*]] = ptrtoint %struct.S* [[TMP6]] to i64
|
||||||
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
||||||
// CHECK1-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
|
// CHECK1-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
|
||||||
// CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VAR24]], i64 [[TMP10]]
|
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VAR24]], i64 [[TMP10]]
|
||||||
// CHECK1-NEXT: store %struct.S** [[_TMP6]], %struct.S*** [[_TMP5]], align 8
|
// CHECK1-NEXT: store %struct.S** [[_TMP6]], %struct.S*** [[_TMP5]], align 8
|
||||||
// CHECK1-NEXT: store %struct.S* [[TMP11]], %struct.S** [[_TMP6]], align 8
|
// CHECK1-NEXT: store %struct.S* [[TMP11]], %struct.S** [[_TMP6]], align 8
|
||||||
@ -2616,7 +2616,7 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP3:%.*]] = ptrtoint %struct.S* [[TMP2]] to i64
|
// CHECK1-NEXT: [[TMP3:%.*]] = ptrtoint %struct.S* [[TMP2]] to i64
|
||||||
// CHECK1-NEXT: [[TMP4:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
// CHECK1-NEXT: [[TMP4:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
||||||
// CHECK1-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]]
|
// CHECK1-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]]
|
||||||
// CHECK1-NEXT: [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast [5 x %struct.S]* [[VVAR22]] to %struct.S*
|
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast [5 x %struct.S]* [[VVAR22]] to %struct.S*
|
||||||
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP7]], i64 [[TMP6]]
|
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP7]], i64 [[TMP6]]
|
||||||
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.S* [[TMP8]] to [5 x %struct.S]*
|
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.S* [[TMP8]] to [5 x %struct.S]*
|
||||||
@ -2816,7 +2816,7 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP7:%.*]] = ptrtoint %struct.S* [[TMP6]] to i64
|
// CHECK1-NEXT: [[TMP7:%.*]] = ptrtoint %struct.S* [[TMP6]] to i64
|
||||||
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
||||||
// CHECK1-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
|
// CHECK1-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
|
||||||
// CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast [2 x %struct.S]* [[VAR34]] to %struct.S*
|
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast [2 x %struct.S]* [[VAR34]] to %struct.S*
|
||||||
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP11]], i64 [[TMP10]]
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP11]], i64 [[TMP10]]
|
||||||
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[TMP12]] to [4 x %struct.S]*
|
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[TMP12]] to [4 x %struct.S]*
|
||||||
@ -3017,7 +3017,7 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP7:%.*]] = ptrtoint %struct.S* [[TMP6]] to i64
|
// CHECK1-NEXT: [[TMP7:%.*]] = ptrtoint %struct.S* [[TMP6]] to i64
|
||||||
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
||||||
// CHECK1-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
|
// CHECK1-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
|
||||||
// CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast [2 x %struct.S]* [[VAR34]] to %struct.S*
|
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast [2 x %struct.S]* [[VAR34]] to %struct.S*
|
||||||
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP11]], i64 [[TMP10]]
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP11]], i64 [[TMP10]]
|
||||||
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[TMP12]] to [4 x %struct.S]*
|
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[TMP12]] to [4 x %struct.S]*
|
||||||
@ -3206,12 +3206,12 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP4:%.*]] = ptrtoint %struct.S* [[ARRAYIDX3]] to i64
|
// CHECK1-NEXT: [[TMP4:%.*]] = ptrtoint %struct.S* [[ARRAYIDX3]] to i64
|
||||||
// CHECK1-NEXT: [[TMP5:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
// CHECK1-NEXT: [[TMP5:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
||||||
// CHECK1-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
|
// CHECK1-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
|
||||||
// CHECK1-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S:%.*]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP8:%.*]] = add nuw i64 [[TMP7]], 1
|
// CHECK1-NEXT: [[TMP8:%.*]] = add nuw i64 [[TMP7]], 1
|
||||||
// CHECK1-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP10:%.*]] = call i8* @llvm.stacksave()
|
// CHECK1-NEXT: [[TMP10:%.*]] = call i8* @llvm.stacksave()
|
||||||
// CHECK1-NEXT: store i8* [[TMP10]], i8** [[SAVED_STACK]], align 8
|
// CHECK1-NEXT: store i8* [[TMP10]], i8** [[SAVED_STACK]], align 8
|
||||||
// CHECK1-NEXT: [[VLA:%.*]] = alloca [[STRUCT_S:%.*]], i64 [[TMP8]], align 16
|
// CHECK1-NEXT: [[VLA:%.*]] = alloca [[STRUCT_S]], i64 [[TMP8]], align 16
|
||||||
// CHECK1-NEXT: store i64 [[TMP8]], i64* [[__VLA_EXPR0]], align 8
|
// CHECK1-NEXT: store i64 [[TMP8]], i64* [[__VLA_EXPR0]], align 8
|
||||||
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA]], i64 [[TMP8]]
|
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA]], i64 [[TMP8]]
|
||||||
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq %struct.S* [[VLA]], [[TMP11]]
|
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq %struct.S* [[VLA]], [[TMP11]]
|
||||||
@ -3228,7 +3228,7 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint %struct.S* [[TMP13]] to i64
|
// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint %struct.S* [[TMP13]] to i64
|
||||||
// CHECK1-NEXT: [[TMP15:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
// CHECK1-NEXT: [[TMP15:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
||||||
// CHECK1-NEXT: [[TMP16:%.*]] = sub i64 [[TMP14]], [[TMP15]]
|
// CHECK1-NEXT: [[TMP16:%.*]] = sub i64 [[TMP14]], [[TMP15]]
|
||||||
// CHECK1-NEXT: [[TMP17:%.*]] = sdiv exact i64 [[TMP16]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP17:%.*]] = sdiv exact i64 [[TMP16]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA]], i64 [[TMP17]]
|
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA]], i64 [[TMP17]]
|
||||||
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast %struct.S* [[TMP18]] to [4 x %struct.S]*
|
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast %struct.S* [[TMP18]] to [4 x %struct.S]*
|
||||||
// CHECK1-NEXT: store [4 x %struct.S]* [[TMP19]], [4 x %struct.S]** [[_TMP4]], align 8
|
// CHECK1-NEXT: store [4 x %struct.S]* [[TMP19]], [4 x %struct.S]** [[_TMP4]], align 8
|
||||||
@ -4246,7 +4246,7 @@ int main() {
|
|||||||
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S.0* [[TMP7]] to i64
|
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S.0* [[TMP7]] to i64
|
||||||
// CHECK1-NEXT: [[TMP9:%.*]] = ptrtoint %struct.S.0* [[ARRAYIDX]] to i64
|
// CHECK1-NEXT: [[TMP9:%.*]] = ptrtoint %struct.S.0* [[ARRAYIDX]] to i64
|
||||||
// CHECK1-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
|
// CHECK1-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
|
||||||
// CHECK1-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
|
// CHECK1-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (%struct.S.0* getelementptr ([[STRUCT_S_0]], %struct.S.0* null, i32 1) to i64)
|
||||||
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast [40 x %struct.S.0]* [[ARR4]] to %struct.S.0*
|
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast [40 x %struct.S.0]* [[ARR4]] to %struct.S.0*
|
||||||
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[TMP12]], i64 [[TMP11]]
|
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[TMP12]], i64 [[TMP11]]
|
||||||
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast %struct.S.0* [[TMP13]] to [42 x %struct.S.0]*
|
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast %struct.S.0* [[TMP13]] to [42 x %struct.S.0]*
|
||||||
@ -5048,9 +5048,9 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP24:%.*]] = ptrtoint %struct.S* [[ARRAYIDX14]] to i64
|
// CHECK2-NEXT: [[TMP24:%.*]] = ptrtoint %struct.S* [[ARRAYIDX14]] to i64
|
||||||
// CHECK2-NEXT: [[TMP25:%.*]] = ptrtoint %struct.S* [[ARRAYIDX9]] to i64
|
// CHECK2-NEXT: [[TMP25:%.*]] = ptrtoint %struct.S* [[ARRAYIDX9]] to i64
|
||||||
// CHECK2-NEXT: [[TMP26:%.*]] = sub i64 [[TMP24]], [[TMP25]]
|
// CHECK2-NEXT: [[TMP26:%.*]] = sub i64 [[TMP24]], [[TMP25]]
|
||||||
// CHECK2-NEXT: [[TMP27:%.*]] = sdiv exact i64 [[TMP26]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP27:%.*]] = sdiv exact i64 [[TMP26]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP28:%.*]] = add nuw i64 [[TMP27]], 1
|
// CHECK2-NEXT: [[TMP28:%.*]] = add nuw i64 [[TMP27]], 1
|
||||||
// CHECK2-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[VLA15:%.*]] = alloca [[STRUCT_S]], i64 [[TMP28]], align 16
|
// CHECK2-NEXT: [[VLA15:%.*]] = alloca [[STRUCT_S]], i64 [[TMP28]], align 16
|
||||||
// CHECK2-NEXT: store i64 [[TMP28]], i64* [[__VLA_EXPR1]], align 8
|
// CHECK2-NEXT: store i64 [[TMP28]], i64* [[__VLA_EXPR1]], align 8
|
||||||
// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA15]], i64 [[TMP28]]
|
// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA15]], i64 [[TMP28]]
|
||||||
@ -5067,7 +5067,7 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP32:%.*]] = ptrtoint %struct.S* [[TMP31]] to i64
|
// CHECK2-NEXT: [[TMP32:%.*]] = ptrtoint %struct.S* [[TMP31]] to i64
|
||||||
// CHECK2-NEXT: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX9]] to i64
|
// CHECK2-NEXT: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX9]] to i64
|
||||||
// CHECK2-NEXT: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
// CHECK2-NEXT: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
||||||
// CHECK2-NEXT: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA15]], i64 [[TMP35]]
|
// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA15]], i64 [[TMP35]]
|
||||||
// CHECK2-NEXT: [[TMP37:%.*]] = bitcast %struct.S* [[TMP36]] to [10 x [4 x %struct.S]]*
|
// CHECK2-NEXT: [[TMP37:%.*]] = bitcast %struct.S* [[TMP36]] to [10 x [4 x %struct.S]]*
|
||||||
// CHECK2-NEXT: [[TMP38:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
// CHECK2-NEXT: [[TMP38:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
||||||
@ -5790,9 +5790,9 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP5:%.*]] = ptrtoint %struct.S* [[ARRAYIDX3]] to i64
|
// CHECK2-NEXT: [[TMP5:%.*]] = ptrtoint %struct.S* [[ARRAYIDX3]] to i64
|
||||||
// CHECK2-NEXT: [[TMP6:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
// CHECK2-NEXT: [[TMP6:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
||||||
// CHECK2-NEXT: [[TMP7:%.*]] = sub i64 [[TMP5]], [[TMP6]]
|
// CHECK2-NEXT: [[TMP7:%.*]] = sub i64 [[TMP5]], [[TMP6]]
|
||||||
// CHECK2-NEXT: [[TMP8:%.*]] = sdiv exact i64 [[TMP7]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP8:%.*]] = sdiv exact i64 [[TMP7]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP9:%.*]] = add nuw i64 [[TMP8]], 1
|
// CHECK2-NEXT: [[TMP9:%.*]] = add nuw i64 [[TMP8]], 1
|
||||||
// CHECK2-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP11:%.*]] = call i8* @llvm.stacksave()
|
// CHECK2-NEXT: [[TMP11:%.*]] = call i8* @llvm.stacksave()
|
||||||
// CHECK2-NEXT: store i8* [[TMP11]], i8** [[SAVED_STACK]], align 8
|
// CHECK2-NEXT: store i8* [[TMP11]], i8** [[SAVED_STACK]], align 8
|
||||||
// CHECK2-NEXT: [[VLA:%.*]] = alloca [[STRUCT_S]], i64 [[TMP9]], align 16
|
// CHECK2-NEXT: [[VLA:%.*]] = alloca [[STRUCT_S]], i64 [[TMP9]], align 16
|
||||||
@ -5812,7 +5812,7 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP15:%.*]] = ptrtoint %struct.S* [[TMP14]] to i64
|
// CHECK2-NEXT: [[TMP15:%.*]] = ptrtoint %struct.S* [[TMP14]] to i64
|
||||||
// CHECK2-NEXT: [[TMP16:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
// CHECK2-NEXT: [[TMP16:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
||||||
// CHECK2-NEXT: [[TMP17:%.*]] = sub i64 [[TMP15]], [[TMP16]]
|
// CHECK2-NEXT: [[TMP17:%.*]] = sub i64 [[TMP15]], [[TMP16]]
|
||||||
// CHECK2-NEXT: [[TMP18:%.*]] = sdiv exact i64 [[TMP17]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP18:%.*]] = sdiv exact i64 [[TMP17]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA]], i64 [[TMP18]]
|
// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA]], i64 [[TMP18]]
|
||||||
// CHECK2-NEXT: store %struct.S** [[_TMP5]], %struct.S*** [[_TMP4]], align 8
|
// CHECK2-NEXT: store %struct.S** [[_TMP5]], %struct.S*** [[_TMP4]], align 8
|
||||||
// CHECK2-NEXT: store %struct.S* [[TMP19]], %struct.S** [[_TMP5]], align 8
|
// CHECK2-NEXT: store %struct.S* [[TMP19]], %struct.S** [[_TMP5]], align 8
|
||||||
@ -6019,7 +6019,7 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[TMP7]] to i64
|
// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[TMP7]] to i64
|
||||||
// CHECK2-NEXT: [[TMP9:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
// CHECK2-NEXT: [[TMP9:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
||||||
// CHECK2-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
|
// CHECK2-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
|
||||||
// CHECK2-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast [1 x [6 x %struct.S]]* [[VAR24]] to %struct.S*
|
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast [1 x [6 x %struct.S]]* [[VAR24]] to %struct.S*
|
||||||
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP12]], i64 [[TMP11]]
|
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP12]], i64 [[TMP11]]
|
||||||
// CHECK2-NEXT: store %struct.S** [[_TMP6]], %struct.S*** [[_TMP5]], align 8
|
// CHECK2-NEXT: store %struct.S** [[_TMP6]], %struct.S*** [[_TMP5]], align 8
|
||||||
@ -6220,7 +6220,7 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[TMP7]] to i64
|
// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[TMP7]] to i64
|
||||||
// CHECK2-NEXT: [[TMP9:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
// CHECK2-NEXT: [[TMP9:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
||||||
// CHECK2-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
|
// CHECK2-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
|
||||||
// CHECK2-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast [1 x [6 x %struct.S]]* [[VAR24]] to %struct.S*
|
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast [1 x [6 x %struct.S]]* [[VAR24]] to %struct.S*
|
||||||
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP12]], i64 [[TMP11]]
|
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP12]], i64 [[TMP11]]
|
||||||
// CHECK2-NEXT: store %struct.S** [[_TMP6]], %struct.S*** [[_TMP5]], align 8
|
// CHECK2-NEXT: store %struct.S** [[_TMP6]], %struct.S*** [[_TMP5]], align 8
|
||||||
@ -6411,7 +6411,7 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP7:%.*]] = ptrtoint %struct.S* [[TMP6]] to i64
|
// CHECK2-NEXT: [[TMP7:%.*]] = ptrtoint %struct.S* [[TMP6]] to i64
|
||||||
// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[ARRAYIDX1]] to i64
|
||||||
// CHECK2-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
|
// CHECK2-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
|
||||||
// CHECK2-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VAR24]], i64 [[TMP10]]
|
// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VAR24]], i64 [[TMP10]]
|
||||||
// CHECK2-NEXT: store %struct.S** [[_TMP6]], %struct.S*** [[_TMP5]], align 8
|
// CHECK2-NEXT: store %struct.S** [[_TMP6]], %struct.S*** [[_TMP5]], align 8
|
||||||
// CHECK2-NEXT: store %struct.S* [[TMP11]], %struct.S** [[_TMP6]], align 8
|
// CHECK2-NEXT: store %struct.S* [[TMP11]], %struct.S** [[_TMP6]], align 8
|
||||||
@ -6551,7 +6551,7 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint %struct.S* [[TMP2]] to i64
|
// CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint %struct.S* [[TMP2]] to i64
|
||||||
// CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
// CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
||||||
// CHECK2-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]]
|
// CHECK2-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]]
|
||||||
// CHECK2-NEXT: [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP7:%.*]] = bitcast [5 x %struct.S]* [[VVAR22]] to %struct.S*
|
// CHECK2-NEXT: [[TMP7:%.*]] = bitcast [5 x %struct.S]* [[VVAR22]] to %struct.S*
|
||||||
// CHECK2-NEXT: [[TMP8:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP7]], i64 [[TMP6]]
|
// CHECK2-NEXT: [[TMP8:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP7]], i64 [[TMP6]]
|
||||||
// CHECK2-NEXT: [[TMP9:%.*]] = bitcast %struct.S* [[TMP8]] to [5 x %struct.S]*
|
// CHECK2-NEXT: [[TMP9:%.*]] = bitcast %struct.S* [[TMP8]] to [5 x %struct.S]*
|
||||||
@ -6751,7 +6751,7 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP7:%.*]] = ptrtoint %struct.S* [[TMP6]] to i64
|
// CHECK2-NEXT: [[TMP7:%.*]] = ptrtoint %struct.S* [[TMP6]] to i64
|
||||||
// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
||||||
// CHECK2-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
|
// CHECK2-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
|
||||||
// CHECK2-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast [2 x %struct.S]* [[VAR34]] to %struct.S*
|
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast [2 x %struct.S]* [[VAR34]] to %struct.S*
|
||||||
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP11]], i64 [[TMP10]]
|
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP11]], i64 [[TMP10]]
|
||||||
// CHECK2-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[TMP12]] to [4 x %struct.S]*
|
// CHECK2-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[TMP12]] to [4 x %struct.S]*
|
||||||
@ -6952,7 +6952,7 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP7:%.*]] = ptrtoint %struct.S* [[TMP6]] to i64
|
// CHECK2-NEXT: [[TMP7:%.*]] = ptrtoint %struct.S* [[TMP6]] to i64
|
||||||
// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
||||||
// CHECK2-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
|
// CHECK2-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
|
||||||
// CHECK2-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast [2 x %struct.S]* [[VAR34]] to %struct.S*
|
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast [2 x %struct.S]* [[VAR34]] to %struct.S*
|
||||||
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP11]], i64 [[TMP10]]
|
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP11]], i64 [[TMP10]]
|
||||||
// CHECK2-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[TMP12]] to [4 x %struct.S]*
|
// CHECK2-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[TMP12]] to [4 x %struct.S]*
|
||||||
@ -7141,12 +7141,12 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint %struct.S* [[ARRAYIDX3]] to i64
|
// CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint %struct.S* [[ARRAYIDX3]] to i64
|
||||||
// CHECK2-NEXT: [[TMP5:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
// CHECK2-NEXT: [[TMP5:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
||||||
// CHECK2-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
|
// CHECK2-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
|
||||||
// CHECK2-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S:%.*]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP8:%.*]] = add nuw i64 [[TMP7]], 1
|
// CHECK2-NEXT: [[TMP8:%.*]] = add nuw i64 [[TMP7]], 1
|
||||||
// CHECK2-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP10:%.*]] = call i8* @llvm.stacksave()
|
// CHECK2-NEXT: [[TMP10:%.*]] = call i8* @llvm.stacksave()
|
||||||
// CHECK2-NEXT: store i8* [[TMP10]], i8** [[SAVED_STACK]], align 8
|
// CHECK2-NEXT: store i8* [[TMP10]], i8** [[SAVED_STACK]], align 8
|
||||||
// CHECK2-NEXT: [[VLA:%.*]] = alloca [[STRUCT_S:%.*]], i64 [[TMP8]], align 16
|
// CHECK2-NEXT: [[VLA:%.*]] = alloca [[STRUCT_S]], i64 [[TMP8]], align 16
|
||||||
// CHECK2-NEXT: store i64 [[TMP8]], i64* [[__VLA_EXPR0]], align 8
|
// CHECK2-NEXT: store i64 [[TMP8]], i64* [[__VLA_EXPR0]], align 8
|
||||||
// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA]], i64 [[TMP8]]
|
// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA]], i64 [[TMP8]]
|
||||||
// CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq %struct.S* [[VLA]], [[TMP11]]
|
// CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq %struct.S* [[VLA]], [[TMP11]]
|
||||||
@ -7163,7 +7163,7 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP14:%.*]] = ptrtoint %struct.S* [[TMP13]] to i64
|
// CHECK2-NEXT: [[TMP14:%.*]] = ptrtoint %struct.S* [[TMP13]] to i64
|
||||||
// CHECK2-NEXT: [[TMP15:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
// CHECK2-NEXT: [[TMP15:%.*]] = ptrtoint %struct.S* [[ARRAYIDX]] to i64
|
||||||
// CHECK2-NEXT: [[TMP16:%.*]] = sub i64 [[TMP14]], [[TMP15]]
|
// CHECK2-NEXT: [[TMP16:%.*]] = sub i64 [[TMP14]], [[TMP15]]
|
||||||
// CHECK2-NEXT: [[TMP17:%.*]] = sdiv exact i64 [[TMP16]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP17:%.*]] = sdiv exact i64 [[TMP16]], ptrtoint (%struct.S* getelementptr ([[STRUCT_S]], %struct.S* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA]], i64 [[TMP17]]
|
// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[VLA]], i64 [[TMP17]]
|
||||||
// CHECK2-NEXT: [[TMP19:%.*]] = bitcast %struct.S* [[TMP18]] to [4 x %struct.S]*
|
// CHECK2-NEXT: [[TMP19:%.*]] = bitcast %struct.S* [[TMP18]] to [4 x %struct.S]*
|
||||||
// CHECK2-NEXT: store [4 x %struct.S]* [[TMP19]], [4 x %struct.S]** [[_TMP4]], align 8
|
// CHECK2-NEXT: store [4 x %struct.S]* [[TMP19]], [4 x %struct.S]** [[_TMP4]], align 8
|
||||||
@ -8181,7 +8181,7 @@ int main() {
|
|||||||
// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S.0* [[TMP7]] to i64
|
// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint %struct.S.0* [[TMP7]] to i64
|
||||||
// CHECK2-NEXT: [[TMP9:%.*]] = ptrtoint %struct.S.0* [[ARRAYIDX]] to i64
|
// CHECK2-NEXT: [[TMP9:%.*]] = ptrtoint %struct.S.0* [[ARRAYIDX]] to i64
|
||||||
// CHECK2-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
|
// CHECK2-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
|
||||||
// CHECK2-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
|
// CHECK2-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (%struct.S.0* getelementptr ([[STRUCT_S_0]], %struct.S.0* null, i32 1) to i64)
|
||||||
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast [40 x %struct.S.0]* [[ARR4]] to %struct.S.0*
|
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast [40 x %struct.S.0]* [[ARR4]] to %struct.S.0*
|
||||||
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[TMP12]], i64 [[TMP11]]
|
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[TMP12]], i64 [[TMP11]]
|
||||||
// CHECK2-NEXT: [[TMP14:%.*]] = bitcast %struct.S.0* [[TMP13]] to [42 x %struct.S.0]*
|
// CHECK2-NEXT: [[TMP14:%.*]] = bitcast %struct.S.0* [[TMP13]] to [42 x %struct.S.0]*
|
||||||
@ -8832,4 +8832,3 @@ int main() {
|
|||||||
// CHECK4-NEXT: store double [[ADD2]], double* [[TMP17]], align 8
|
// CHECK4-NEXT: store double [[ADD2]], double* [[TMP17]], align 8
|
||||||
// CHECK4-NEXT: ret void
|
// CHECK4-NEXT: ret void
|
||||||
//
|
//
|
||||||
//
|
|
@ -99,9 +99,9 @@ sum = 0.0;
|
|||||||
// CHECK-DAG: [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
|
// CHECK-DAG: [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
|
||||||
// CHECK-DAG: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
|
// CHECK-DAG: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
|
||||||
// CHECK-DAG: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
// CHECK-DAG: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
||||||
// CHECK-DAG: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK-DAG: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (%struct.S* getelementptr (%struct.S, %struct.S* null, i32 1) to i64)
|
||||||
// CHECK-DAG: [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
|
// CHECK-DAG: [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
|
||||||
// CHECK-DAG: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK-DAG: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (%struct.S* getelementptr (%struct.S, %struct.S* null, i32 1) to i64)
|
||||||
// CHECK-DAG: store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
|
// CHECK-DAG: store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
|
||||||
// CHECK-DAG: [[TMP38]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
|
// CHECK-DAG: [[TMP38]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
|
||||||
// CHECK-DAG: [[TMP39:%.*]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
|
// CHECK-DAG: [[TMP39:%.*]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
|
||||||
|
@ -95,9 +95,9 @@ sum = 0.0;
|
|||||||
// CHECK-DAG: [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
|
// CHECK-DAG: [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
|
||||||
// CHECK-DAG: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
|
// CHECK-DAG: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
|
||||||
// CHECK-DAG: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
// CHECK-DAG: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
||||||
// CHECK-DAG: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK-DAG: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (%struct.S* getelementptr (%struct.S, %struct.S* null, i32 1) to i64)
|
||||||
// CHECK-DAG: [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
|
// CHECK-DAG: [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
|
||||||
// CHECK-DAG: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK-DAG: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (%struct.S* getelementptr (%struct.S, %struct.S* null, i32 1) to i64)
|
||||||
// CHECK-DAG: store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
|
// CHECK-DAG: store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
|
||||||
// CHECK-DAG: [[TMP38]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
|
// CHECK-DAG: [[TMP38]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
|
||||||
// CHECK-DAG: [[TMP39:%.*]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
|
// CHECK-DAG: [[TMP39:%.*]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
|
||||||
|
@ -99,9 +99,9 @@ sum = 0.0;
|
|||||||
// CHECK-DAG: [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
|
// CHECK-DAG: [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
|
||||||
// CHECK-DAG: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
|
// CHECK-DAG: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
|
||||||
// CHECK-DAG: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
// CHECK-DAG: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
||||||
// CHECK-DAG: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK-DAG: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (%struct.S* getelementptr (%struct.S, %struct.S* null, i32 1) to i64)
|
||||||
// CHECK-DAG: [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
|
// CHECK-DAG: [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
|
||||||
// CHECK-DAG: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK-DAG: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (%struct.S* getelementptr (%struct.S, %struct.S* null, i32 1) to i64)
|
||||||
// CHECK-DAG: store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
|
// CHECK-DAG: store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
|
||||||
// CHECK-DAG: [[TMP38]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
|
// CHECK-DAG: [[TMP38]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
|
||||||
// CHECK-DAG: [[TMP39:%.*]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
|
// CHECK-DAG: [[TMP39:%.*]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
|
||||||
|
@ -99,9 +99,9 @@ sum = 0.0;
|
|||||||
// CHECK-DAG: [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
|
// CHECK-DAG: [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
|
||||||
// CHECK-DAG: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
|
// CHECK-DAG: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
|
||||||
// CHECK-DAG: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
// CHECK-DAG: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
||||||
// CHECK-DAG: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK-DAG: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (%struct.S* getelementptr (%struct.S, %struct.S* null, i32 1) to i64)
|
||||||
// CHECK-DAG: [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
|
// CHECK-DAG: [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
|
||||||
// CHECK-DAG: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK-DAG: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (%struct.S* getelementptr (%struct.S, %struct.S* null, i32 1) to i64)
|
||||||
// CHECK-DAG: store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
|
// CHECK-DAG: store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
|
||||||
// CHECK-DAG: [[TMP38]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
|
// CHECK-DAG: [[TMP38]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
|
||||||
// CHECK-DAG: [[TMP39:%.*]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
|
// CHECK-DAG: [[TMP39:%.*]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
|
||||||
|
@ -98,9 +98,9 @@ sum = 0.0;
|
|||||||
// CHECK-DAG: [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
|
// CHECK-DAG: [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
|
||||||
// CHECK-DAG: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
|
// CHECK-DAG: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
|
||||||
// CHECK-DAG: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
// CHECK-DAG: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
||||||
// CHECK-DAG: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK-DAG: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (%struct.S* getelementptr (%struct.S, %struct.S* null, i32 1) to i64)
|
||||||
// CHECK-DAG: [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
|
// CHECK-DAG: [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
|
||||||
// CHECK-DAG: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK-DAG: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (%struct.S* getelementptr (%struct.S, %struct.S* null, i32 1) to i64)
|
||||||
// CHECK-DAG: store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
|
// CHECK-DAG: store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
|
||||||
// CHECK-DAG: [[TMP38]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
|
// CHECK-DAG: [[TMP38]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
|
||||||
// CHECK-DAG: [[TMP39:%.*]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
|
// CHECK-DAG: [[TMP39:%.*]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
|
||||||
|
@ -95,9 +95,9 @@ sum = 0.0;
|
|||||||
// CHECK-DAG: [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
|
// CHECK-DAG: [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
|
||||||
// CHECK-DAG: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
|
// CHECK-DAG: [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
|
||||||
// CHECK-DAG: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
// CHECK-DAG: [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
|
||||||
// CHECK-DAG: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK-DAG: [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (%struct.S* getelementptr (%struct.S, %struct.S* null, i32 1) to i64)
|
||||||
// CHECK-DAG: [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
|
// CHECK-DAG: [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
|
||||||
// CHECK-DAG: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
|
// CHECK-DAG: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (%struct.S* getelementptr (%struct.S, %struct.S* null, i32 1) to i64)
|
||||||
// CHECK-DAG: store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
|
// CHECK-DAG: store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
|
||||||
// CHECK-DAG: [[TMP38]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
|
// CHECK-DAG: [[TMP38]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
|
||||||
// CHECK-DAG: [[TMP39:%.*]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
|
// CHECK-DAG: [[TMP39:%.*]] = getelementptr inbounds %struct.kmp_taskred_input_t, %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
|
||||||
|
@ -349,200 +349,6 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wrapper around getFoldedSizeOfImpl() that adds caching.
|
|
||||||
static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy, bool Folded,
|
|
||||||
DenseMap<Type *, Constant *> &Cache);
|
|
||||||
|
|
||||||
/// Return a ConstantExpr with type DestTy for sizeof on Ty, with any known
|
|
||||||
/// factors factored out. If Folded is false, return null if no factoring was
|
|
||||||
/// possible, to avoid endlessly bouncing an unfoldable expression back into the
|
|
||||||
/// top-level folder.
|
|
||||||
static Constant *getFoldedSizeOfImpl(Type *Ty, Type *DestTy, bool Folded,
|
|
||||||
DenseMap<Type *, Constant *> &Cache) {
|
|
||||||
// This is the actual implementation of getFoldedSizeOf(). To get the caching
|
|
||||||
// behavior, we need to call getFoldedSizeOf() when we recurse.
|
|
||||||
|
|
||||||
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
|
||||||
Constant *N = ConstantInt::get(DestTy, ATy->getNumElements());
|
|
||||||
Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true, Cache);
|
|
||||||
return ConstantExpr::getNUWMul(E, N);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (StructType *STy = dyn_cast<StructType>(Ty))
|
|
||||||
if (!STy->isPacked()) {
|
|
||||||
unsigned NumElems = STy->getNumElements();
|
|
||||||
// An empty struct has size zero.
|
|
||||||
if (NumElems == 0)
|
|
||||||
return ConstantExpr::getNullValue(DestTy);
|
|
||||||
// Check for a struct with all members having the same size.
|
|
||||||
Constant *MemberSize =
|
|
||||||
getFoldedSizeOf(STy->getElementType(0), DestTy, true, Cache);
|
|
||||||
bool AllSame = true;
|
|
||||||
for (unsigned i = 1; i != NumElems; ++i)
|
|
||||||
if (MemberSize !=
|
|
||||||
getFoldedSizeOf(STy->getElementType(i), DestTy, true, Cache)) {
|
|
||||||
AllSame = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (AllSame) {
|
|
||||||
Constant *N = ConstantInt::get(DestTy, NumElems);
|
|
||||||
return ConstantExpr::getNUWMul(MemberSize, N);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pointer size doesn't depend on the pointee type, so canonicalize them
|
|
||||||
// to an arbitrary pointee.
|
|
||||||
if (PointerType *PTy = dyn_cast<PointerType>(Ty))
|
|
||||||
if (!PTy->getElementType()->isIntegerTy(1))
|
|
||||||
return getFoldedSizeOf(
|
|
||||||
PointerType::get(IntegerType::get(PTy->getContext(), 1),
|
|
||||||
PTy->getAddressSpace()),
|
|
||||||
DestTy, true, Cache);
|
|
||||||
|
|
||||||
// If there's no interesting folding happening, bail so that we don't create
|
|
||||||
// a constant that looks like it needs folding but really doesn't.
|
|
||||||
if (!Folded)
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
// Base case: Get a regular sizeof expression.
|
|
||||||
Constant *C = ConstantExpr::getSizeOf(Ty);
|
|
||||||
C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
|
|
||||||
DestTy, false),
|
|
||||||
C, DestTy);
|
|
||||||
return C;
|
|
||||||
}
|
|
||||||
|
|
||||||
static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy, bool Folded,
|
|
||||||
DenseMap<Type *, Constant *> &Cache) {
|
|
||||||
// Check for previously generated folded size constant.
|
|
||||||
auto It = Cache.find(Ty);
|
|
||||||
if (It != Cache.end())
|
|
||||||
return It->second;
|
|
||||||
return Cache[Ty] = getFoldedSizeOfImpl(Ty, DestTy, Folded, Cache);
|
|
||||||
}
|
|
||||||
|
|
||||||
static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy, bool Folded) {
|
|
||||||
DenseMap<Type *, Constant *> Cache;
|
|
||||||
return getFoldedSizeOf(Ty, DestTy, Folded, Cache);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return a ConstantExpr with type DestTy for alignof on Ty, with any known
|
|
||||||
/// factors factored out. If Folded is false, return null if no factoring was
|
|
||||||
/// possible, to avoid endlessly bouncing an unfoldable expression back into the
|
|
||||||
/// top-level folder.
|
|
||||||
static Constant *getFoldedAlignOf(Type *Ty, Type *DestTy, bool Folded) {
|
|
||||||
// The alignment of an array is equal to the alignment of the
|
|
||||||
// array element. Note that this is not always true for vectors.
|
|
||||||
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
|
||||||
Constant *C = ConstantExpr::getAlignOf(ATy->getElementType());
|
|
||||||
C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
|
|
||||||
DestTy,
|
|
||||||
false),
|
|
||||||
C, DestTy);
|
|
||||||
return C;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (StructType *STy = dyn_cast<StructType>(Ty)) {
|
|
||||||
// Packed structs always have an alignment of 1.
|
|
||||||
if (STy->isPacked())
|
|
||||||
return ConstantInt::get(DestTy, 1);
|
|
||||||
|
|
||||||
// Otherwise, struct alignment is the maximum alignment of any member.
|
|
||||||
// Without target data, we can't compare much, but we can check to see
|
|
||||||
// if all the members have the same alignment.
|
|
||||||
unsigned NumElems = STy->getNumElements();
|
|
||||||
// An empty struct has minimal alignment.
|
|
||||||
if (NumElems == 0)
|
|
||||||
return ConstantInt::get(DestTy, 1);
|
|
||||||
// Check for a struct with all members having the same alignment.
|
|
||||||
Constant *MemberAlign =
|
|
||||||
getFoldedAlignOf(STy->getElementType(0), DestTy, true);
|
|
||||||
bool AllSame = true;
|
|
||||||
for (unsigned i = 1; i != NumElems; ++i)
|
|
||||||
if (MemberAlign != getFoldedAlignOf(STy->getElementType(i), DestTy, true)) {
|
|
||||||
AllSame = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (AllSame)
|
|
||||||
return MemberAlign;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pointer alignment doesn't depend on the pointee type, so canonicalize them
|
|
||||||
// to an arbitrary pointee.
|
|
||||||
if (PointerType *PTy = dyn_cast<PointerType>(Ty))
|
|
||||||
if (!PTy->getElementType()->isIntegerTy(1))
|
|
||||||
return
|
|
||||||
getFoldedAlignOf(PointerType::get(IntegerType::get(PTy->getContext(),
|
|
||||||
1),
|
|
||||||
PTy->getAddressSpace()),
|
|
||||||
DestTy, true);
|
|
||||||
|
|
||||||
// If there's no interesting folding happening, bail so that we don't create
|
|
||||||
// a constant that looks like it needs folding but really doesn't.
|
|
||||||
if (!Folded)
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
// Base case: Get a regular alignof expression.
|
|
||||||
Constant *C = ConstantExpr::getAlignOf(Ty);
|
|
||||||
C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
|
|
||||||
DestTy, false),
|
|
||||||
C, DestTy);
|
|
||||||
return C;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return a ConstantExpr with type DestTy for offsetof on Ty and FieldNo, with
|
|
||||||
/// any known factors factored out. If Folded is false, return null if no
|
|
||||||
/// factoring was possible, to avoid endlessly bouncing an unfoldable expression
|
|
||||||
/// back into the top-level folder.
|
|
||||||
static Constant *getFoldedOffsetOf(Type *Ty, Constant *FieldNo, Type *DestTy,
|
|
||||||
bool Folded) {
|
|
||||||
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
|
||||||
Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo, false,
|
|
||||||
DestTy, false),
|
|
||||||
FieldNo, DestTy);
|
|
||||||
Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true);
|
|
||||||
return ConstantExpr::getNUWMul(E, N);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (StructType *STy = dyn_cast<StructType>(Ty))
|
|
||||||
if (!STy->isPacked()) {
|
|
||||||
unsigned NumElems = STy->getNumElements();
|
|
||||||
// An empty struct has no members.
|
|
||||||
if (NumElems == 0)
|
|
||||||
return nullptr;
|
|
||||||
// Check for a struct with all members having the same size.
|
|
||||||
Constant *MemberSize =
|
|
||||||
getFoldedSizeOf(STy->getElementType(0), DestTy, true);
|
|
||||||
bool AllSame = true;
|
|
||||||
for (unsigned i = 1; i != NumElems; ++i)
|
|
||||||
if (MemberSize !=
|
|
||||||
getFoldedSizeOf(STy->getElementType(i), DestTy, true)) {
|
|
||||||
AllSame = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (AllSame) {
|
|
||||||
Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo,
|
|
||||||
false,
|
|
||||||
DestTy,
|
|
||||||
false),
|
|
||||||
FieldNo, DestTy);
|
|
||||||
return ConstantExpr::getNUWMul(MemberSize, N);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there's no interesting folding happening, bail so that we don't create
|
|
||||||
// a constant that looks like it needs folding but really doesn't.
|
|
||||||
if (!Folded)
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
// Base case: Get a regular offsetof expression.
|
|
||||||
Constant *C = ConstantExpr::getOffsetOf(Ty, FieldNo);
|
|
||||||
C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
|
|
||||||
DestTy, false),
|
|
||||||
C, DestTy);
|
|
||||||
return C;
|
|
||||||
}
|
|
||||||
|
|
||||||
Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
|
Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
|
||||||
Type *DestTy) {
|
Type *DestTy) {
|
||||||
if (isa<PoisonValue>(V))
|
if (isa<PoisonValue>(V))
|
||||||
@ -666,53 +472,6 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
|
|||||||
// Is it a null pointer value?
|
// Is it a null pointer value?
|
||||||
if (V->isNullValue())
|
if (V->isNullValue())
|
||||||
return ConstantInt::get(DestTy, 0);
|
return ConstantInt::get(DestTy, 0);
|
||||||
// If this is a sizeof-like expression, pull out multiplications by
|
|
||||||
// known factors to expose them to subsequent folding. If it's an
|
|
||||||
// alignof-like expression, factor out known factors.
|
|
||||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
|
|
||||||
if (CE->getOpcode() == Instruction::GetElementPtr &&
|
|
||||||
CE->getOperand(0)->isNullValue()) {
|
|
||||||
// FIXME: Looks like getFoldedSizeOf(), getFoldedOffsetOf() and
|
|
||||||
// getFoldedAlignOf() don't handle the case when DestTy is a vector of
|
|
||||||
// pointers yet. We end up in asserts in CastInst::getCastOpcode (see
|
|
||||||
// test/Analysis/ConstantFolding/cast-vector.ll). I've only seen this
|
|
||||||
// happen in one "real" C-code test case, so it does not seem to be an
|
|
||||||
// important optimization to handle vectors here. For now, simply bail
|
|
||||||
// out.
|
|
||||||
if (DestTy->isVectorTy())
|
|
||||||
return nullptr;
|
|
||||||
GEPOperator *GEPO = cast<GEPOperator>(CE);
|
|
||||||
Type *Ty = GEPO->getSourceElementType();
|
|
||||||
if (CE->getNumOperands() == 2) {
|
|
||||||
// Handle a sizeof-like expression.
|
|
||||||
Constant *Idx = CE->getOperand(1);
|
|
||||||
bool isOne = isa<ConstantInt>(Idx) && cast<ConstantInt>(Idx)->isOne();
|
|
||||||
if (Constant *C = getFoldedSizeOf(Ty, DestTy, !isOne)) {
|
|
||||||
Idx = ConstantExpr::getCast(CastInst::getCastOpcode(Idx, true,
|
|
||||||
DestTy, false),
|
|
||||||
Idx, DestTy);
|
|
||||||
return ConstantExpr::getMul(C, Idx);
|
|
||||||
}
|
|
||||||
} else if (CE->getNumOperands() == 3 &&
|
|
||||||
CE->getOperand(1)->isNullValue()) {
|
|
||||||
// Handle an alignof-like expression.
|
|
||||||
if (StructType *STy = dyn_cast<StructType>(Ty))
|
|
||||||
if (!STy->isPacked()) {
|
|
||||||
ConstantInt *CI = cast<ConstantInt>(CE->getOperand(2));
|
|
||||||
if (CI->isOne() &&
|
|
||||||
STy->getNumElements() == 2 &&
|
|
||||||
STy->getElementType(0)->isIntegerTy(1)) {
|
|
||||||
return getFoldedAlignOf(STy->getElementType(1), DestTy, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Handle an offsetof-like expression.
|
|
||||||
if (Ty->isStructTy() || Ty->isArrayTy()) {
|
|
||||||
if (Constant *C = getFoldedOffsetOf(Ty, CE->getOperand(2),
|
|
||||||
DestTy, false))
|
|
||||||
return C;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Other pointer types cannot be casted
|
// Other pointer types cannot be casted
|
||||||
return nullptr;
|
return nullptr;
|
||||||
case Instruction::UIToFP:
|
case Instruction::UIToFP:
|
||||||
|
@ -49,15 +49,15 @@
|
|||||||
; simplifications on sizeof, alignof, and offsetof expressions. The
|
; simplifications on sizeof, alignof, and offsetof expressions. The
|
||||||
; target-dependent folder should fold these down to constants.
|
; target-dependent folder should fold these down to constants.
|
||||||
|
|
||||||
; PLAIN: @a = constant i64 mul (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 2310)
|
; PLAIN: @a = constant i64 mul (i64 ptrtoint ({ [7 x double], [7 x double] }* getelementptr ({ [7 x double], [7 x double] }, { [7 x double], [7 x double] }* null, i64 11) to i64), i64 15)
|
||||||
; PLAIN: @b = constant i64 ptrtoint (double* getelementptr ({ i1, double }, { i1, double }* null, i64 0, i32 1) to i64)
|
; PLAIN: @b = constant i64 ptrtoint ([13 x double]* getelementptr ({ i1, [13 x double] }, { i1, [13 x double] }* null, i64 0, i32 1) to i64)
|
||||||
; PLAIN: @c = constant i64 mul nuw (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 2)
|
; PLAIN: @c = constant i64 ptrtoint (double* getelementptr ({ double, double, double, double }, { double, double, double, double }* null, i64 0, i32 2) to i64)
|
||||||
; PLAIN: @d = constant i64 mul nuw (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 11)
|
; PLAIN: @d = constant i64 ptrtoint (double* getelementptr ([13 x double], [13 x double]* null, i64 0, i32 11) to i64)
|
||||||
; PLAIN: @e = constant i64 ptrtoint (double* getelementptr ({ double, float, double, double }, { double, float, double, double }* null, i64 0, i32 2) to i64)
|
; PLAIN: @e = constant i64 ptrtoint (double* getelementptr ({ double, float, double, double }, { double, float, double, double }* null, i64 0, i32 2) to i64)
|
||||||
; PLAIN: @f = constant i64 1
|
; PLAIN: @f = constant i64 ptrtoint (<{ i16, i128 }>* getelementptr ({ i1, <{ i16, i128 }> }, { i1, <{ i16, i128 }> }* null, i64 0, i32 1) to i64)
|
||||||
; PLAIN: @g = constant i64 ptrtoint (double* getelementptr ({ i1, double }, { i1, double }* null, i64 0, i32 1) to i64)
|
; PLAIN: @g = constant i64 ptrtoint ({ double, double }* getelementptr ({ i1, { double, double } }, { i1, { double, double } }* null, i64 0, i32 1) to i64)
|
||||||
; PLAIN: @h = constant i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)
|
; PLAIN: @h = constant i64 ptrtoint (double** getelementptr (double*, double** null, i64 1) to i64)
|
||||||
; PLAIN: @i = constant i64 ptrtoint (i1** getelementptr ({ i1, i1* }, { i1, i1* }* null, i64 0, i32 1) to i64)
|
; PLAIN: @i = constant i64 ptrtoint (double** getelementptr ({ i1, double* }, { i1, double* }* null, i64 0, i32 1) to i64)
|
||||||
; OPT: @a = local_unnamed_addr constant i64 18480
|
; OPT: @a = local_unnamed_addr constant i64 18480
|
||||||
; OPT: @b = local_unnamed_addr constant i64 8
|
; OPT: @b = local_unnamed_addr constant i64 8
|
||||||
; OPT: @c = local_unnamed_addr constant i64 16
|
; OPT: @c = local_unnamed_addr constant i64 16
|
||||||
@ -222,19 +222,19 @@ define i1* @hoo1() nounwind {
|
|||||||
}
|
}
|
||||||
|
|
||||||
; PLAIN: define i64 @fa() #0 {
|
; PLAIN: define i64 @fa() #0 {
|
||||||
; PLAIN: %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 2310) to i64
|
; PLAIN: %t = bitcast i64 mul (i64 ptrtoint ({ [7 x double], [7 x double] }* getelementptr ({ [7 x double], [7 x double] }, { [7 x double], [7 x double] }* null, i64 11) to i64), i64 15) to i64
|
||||||
; PLAIN: ret i64 %t
|
; PLAIN: ret i64 %t
|
||||||
; PLAIN: }
|
; PLAIN: }
|
||||||
; PLAIN: define i64 @fb() #0 {
|
; PLAIN: define i64 @fb() #0 {
|
||||||
; PLAIN: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }, { i1, double }* null, i64 0, i32 1) to i64) to i64
|
; PLAIN: %t = bitcast i64 ptrtoint ([13 x double]* getelementptr ({ i1, [13 x double] }, { i1, [13 x double] }* null, i64 0, i32 1) to i64) to i64
|
||||||
; PLAIN: ret i64 %t
|
; PLAIN: ret i64 %t
|
||||||
; PLAIN: }
|
; PLAIN: }
|
||||||
; PLAIN: define i64 @fc() #0 {
|
; PLAIN: define i64 @fc() #0 {
|
||||||
; PLAIN: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 2) to i64
|
; PLAIN: %t = bitcast i64 ptrtoint (double* getelementptr ({ double, double, double, double }, { double, double, double, double }* null, i64 0, i32 2) to i64) to i64
|
||||||
; PLAIN: ret i64 %t
|
; PLAIN: ret i64 %t
|
||||||
; PLAIN: }
|
; PLAIN: }
|
||||||
; PLAIN: define i64 @fd() #0 {
|
; PLAIN: define i64 @fd() #0 {
|
||||||
; PLAIN: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 11) to i64
|
; PLAIN: %t = bitcast i64 ptrtoint (double* getelementptr ([13 x double], [13 x double]* null, i64 0, i32 11) to i64) to i64
|
||||||
; PLAIN: ret i64 %t
|
; PLAIN: ret i64 %t
|
||||||
; PLAIN: }
|
; PLAIN: }
|
||||||
; PLAIN: define i64 @fe() #0 {
|
; PLAIN: define i64 @fe() #0 {
|
||||||
@ -242,19 +242,19 @@ define i1* @hoo1() nounwind {
|
|||||||
; PLAIN: ret i64 %t
|
; PLAIN: ret i64 %t
|
||||||
; PLAIN: }
|
; PLAIN: }
|
||||||
; PLAIN: define i64 @ff() #0 {
|
; PLAIN: define i64 @ff() #0 {
|
||||||
; PLAIN: %t = bitcast i64 1 to i64
|
; PLAIN: %t = bitcast i64 ptrtoint (<{ i16, i128 }>* getelementptr ({ i1, <{ i16, i128 }> }, { i1, <{ i16, i128 }> }* null, i64 0, i32 1) to i64) to i64
|
||||||
; PLAIN: ret i64 %t
|
; PLAIN: ret i64 %t
|
||||||
; PLAIN: }
|
; PLAIN: }
|
||||||
; PLAIN: define i64 @fg() #0 {
|
; PLAIN: define i64 @fg() #0 {
|
||||||
; PLAIN: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }, { i1, double }* null, i64 0, i32 1) to i64) to i64
|
; PLAIN: %t = bitcast i64 ptrtoint ({ double, double }* getelementptr ({ i1, { double, double } }, { i1, { double, double } }* null, i64 0, i32 1) to i64) to i64
|
||||||
; PLAIN: ret i64 %t
|
; PLAIN: ret i64 %t
|
||||||
; PLAIN: }
|
; PLAIN: }
|
||||||
; PLAIN: define i64 @fh() #0 {
|
; PLAIN: define i64 @fh() #0 {
|
||||||
; PLAIN: %t = bitcast i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64) to i64
|
; PLAIN: %t = bitcast i64 ptrtoint (double** getelementptr (double*, double** null, i32 1) to i64) to i64
|
||||||
; PLAIN: ret i64 %t
|
; PLAIN: ret i64 %t
|
||||||
; PLAIN: }
|
; PLAIN: }
|
||||||
; PLAIN: define i64 @fi() #0 {
|
; PLAIN: define i64 @fi() #0 {
|
||||||
; PLAIN: %t = bitcast i64 ptrtoint (i1** getelementptr ({ i1, i1* }, { i1, i1* }* null, i64 0, i32 1) to i64) to i64
|
; PLAIN: %t = bitcast i64 ptrtoint (double** getelementptr ({ i1, double* }, { i1, double* }* null, i64 0, i32 1) to i64) to i64
|
||||||
; PLAIN: ret i64 %t
|
; PLAIN: ret i64 %t
|
||||||
; PLAIN: }
|
; PLAIN: }
|
||||||
; OPT: define i64 @fa() local_unnamed_addr #0 {
|
; OPT: define i64 @fa() local_unnamed_addr #0 {
|
||||||
@ -311,32 +311,32 @@ define i1* @hoo1() nounwind {
|
|||||||
; TO: define i64 @fi() local_unnamed_addr #0 {
|
; TO: define i64 @fi() local_unnamed_addr #0 {
|
||||||
; TO: ret i64 8
|
; TO: ret i64 8
|
||||||
; TO: }
|
; TO: }
|
||||||
; SCEV: Classifying expressions for: @fa
|
; SCEV-LABEL: Classifying expressions for: @fa
|
||||||
; SCEV: %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 2310) to i64
|
; SCEV: %t = bitcast i64 mul (i64 ptrtoint ({ [7 x double], [7 x double] }* getelementptr ({ [7 x double], [7 x double] }, { [7 x double], [7 x double] }* null, i64 11) to i64), i64 15) to i64
|
||||||
; SCEV: --> 18480
|
; SCEV: --> 18480
|
||||||
; SCEV: Classifying expressions for: @fb
|
; SCEV-LABEL: Classifying expressions for: @fb
|
||||||
; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }, { i1, double }* null, i64 0, i32 1) to i64) to i64
|
; SCEV: %t = bitcast i64 ptrtoint ([13 x double]* getelementptr ({ i1, [13 x double] }, { i1, [13 x double] }* null, i64 0, i32 1) to i64) to i64
|
||||||
; SCEV: --> 8
|
; SCEV: --> 8
|
||||||
; SCEV: Classifying expressions for: @fc
|
; SCEV-LABEL: Classifying expressions for: @fc
|
||||||
; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 2) to i64
|
; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ double, double, double, double }, { double, double, double, double }* null, i64 0, i32 2) to i64) to i64
|
||||||
; SCEV: --> 16
|
; SCEV: --> 16
|
||||||
; SCEV: Classifying expressions for: @fd
|
; SCEV-LABEL: Classifying expressions for: @fd
|
||||||
; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double, double* null, i32 1) to i64), i64 11) to i64
|
; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ([13 x double], [13 x double]* null, i64 0, i32 11) to i64) to i64
|
||||||
; SCEV: --> 88
|
; SCEV: --> 88
|
||||||
; SCEV: Classifying expressions for: @fe
|
; SCEV-LABEL: Classifying expressions for: @fe
|
||||||
; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ double, float, double, double }, { double, float, double, double }* null, i64 0, i32 2) to i64) to i64
|
; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ double, float, double, double }, { double, float, double, double }* null, i64 0, i32 2) to i64) to i64
|
||||||
; SCEV: --> 16
|
; SCEV: --> 16
|
||||||
; SCEV: Classifying expressions for: @ff
|
; SCEV-LABEL: Classifying expressions for: @ff
|
||||||
; SCEV: %t = bitcast i64 1 to i64
|
; SCEV: %t = bitcast i64 ptrtoint (<{ i16, i128 }>* getelementptr ({ i1, <{ i16, i128 }> }, { i1, <{ i16, i128 }> }* null, i64 0, i32 1) to i64) to i64
|
||||||
; SCEV: --> 1
|
; SCEV: --> 1
|
||||||
; SCEV: Classifying expressions for: @fg
|
; SCEV-LABEL: Classifying expressions for: @fg
|
||||||
; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }, { i1, double }* null, i64 0, i32 1) to i64) to i64
|
; SCEV: %t = bitcast i64 ptrtoint ({ double, double }* getelementptr ({ i1, { double, double } }, { i1, { double, double } }* null, i64 0, i32 1) to i64) to i64
|
||||||
; SCEV: --> 8
|
; SCEV: --> 8
|
||||||
; SCEV: Classifying expressions for: @fh
|
; SCEV-LABEL: Classifying expressions for: @fh
|
||||||
; SCEV: %t = bitcast i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64) to i64
|
; SCEV: %t = bitcast i64 ptrtoint (double** getelementptr (double*, double** null, i32 1) to i64) to i64
|
||||||
; SCEV: --> 8
|
; SCEV: --> 8
|
||||||
; SCEV: Classifying expressions for: @fi
|
; SCEV-LABEL: Classifying expressions for: @fi
|
||||||
; SCEV: %t = bitcast i64 ptrtoint (i1** getelementptr ({ i1, i1* }, { i1, i1* }* null, i64 0, i32 1) to i64) to i64
|
; SCEV: %t = bitcast i64 ptrtoint (double** getelementptr ({ i1, double* }, { i1, double* }* null, i64 0, i32 1) to i64) to i64
|
||||||
; SCEV: --> 8
|
; SCEV: --> 8
|
||||||
|
|
||||||
define i64 @fa() nounwind {
|
define i64 @fa() nounwind {
|
||||||
|
@ -33,7 +33,7 @@ define i1 @foo(i8* %p) {
|
|||||||
; WASM32: icmp eq i64 {{.*}}, ptrtoint (i8* getelementptr (i8, i8* null, i64 1) to i64)
|
; WASM32: icmp eq i64 {{.*}}, ptrtoint (i8* getelementptr (i8, i8* null, i64 1) to i64)
|
||||||
%x = call i1 @llvm.type.test(i8* %p, metadata !"typeid1")
|
%x = call i1 @llvm.type.test(i8* %p, metadata !"typeid1")
|
||||||
; X64: icmp eq i64 {{.*}}, ptrtoint (void ()* @[[JT1]] to i64)
|
; X64: icmp eq i64 {{.*}}, ptrtoint (void ()* @[[JT1]] to i64)
|
||||||
; WASM32: icmp eq i64 {{.*}}, mul (i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64), i64 2)
|
; WASM32: icmp eq i64 {{.*}}, ptrtoint (i8* getelementptr (i8, i8* null, i64 2) to i64)
|
||||||
%y = call i1 @llvm.type.test(i8* %p, metadata !"typeid2")
|
%y = call i1 @llvm.type.test(i8* %p, metadata !"typeid2")
|
||||||
%z = add i1 %x, %y
|
%z = add i1 %x, %y
|
||||||
ret i1 %z
|
ret i1 %z
|
||||||
|
@ -19,12 +19,12 @@
|
|||||||
; to constant fold the size of %0
|
; to constant fold the size of %0
|
||||||
define i64 @f_i64() {
|
define i64 @f_i64() {
|
||||||
; CHECK-LABEL: @f_i64
|
; CHECK-LABEL: @f_i64
|
||||||
; CHECK: ret i64 mul (i64 ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64), i64 1099511627776)
|
; CHECK: ret i64 ptrtoint (%0* getelementptr (%0, %0* null, i32 1) to i64)
|
||||||
ret i64 ptrtoint (%0* getelementptr (%0, %0* null, i32 1) to i64)
|
ret i64 ptrtoint (%0* getelementptr (%0, %0* null, i32 1) to i64)
|
||||||
}
|
}
|
||||||
|
|
||||||
define i32 @f_i32() {
|
define i32 @f_i32() {
|
||||||
; CHECK-LABEL: @f_i32
|
; CHECK-LABEL: @f_i32
|
||||||
; CHECK: ret i32 mul (i32 ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i32), i32 -2147483648)
|
; CHECK: ret i32 ptrtoint (%3* getelementptr (%3, %3* null, i32 1) to i32)
|
||||||
ret i32 ptrtoint (%3* getelementptr (%3, %3* null, i32 1) to i32)
|
ret i32 ptrtoint (%3* getelementptr (%3, %3* null, i32 1) to i32)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user