From 3d84f4268dd9e1257e71938485fa23d17210ba44 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Fri, 17 Feb 2023 09:29:41 +0100 Subject: [PATCH] [Clang] Convert some tests to opaque pointers (NFC) --- clang/test/CodeGen/annotations-var.c | 23 +- clang/test/CodeGen/arm-abi-vector.c | 172 +++++----- clang/test/CodeGen/arm64-abi-vector.c | 127 +++----- clang/test/CodeGen/arm64-arguments.c | 305 ++++++++---------- clang/test/CodeGen/ms_abi.c | 147 ++++----- clang/test/CodeGen/ms_abi_aarch64.c | 51 ++- .../CodeGenCXX/homogeneous-aggregates.cpp | 59 ++-- .../CodeGenCXX/microsoft-abi-eh-cleanups.cpp | 106 +++--- ...coro-unhandled-exception-exp-namespace.cpp | 16 +- .../coro-unhandled-exception.cpp | 16 +- clang/test/CodeGenObjC/arc-unsafeclaim.m | 247 ++++++-------- .../convert-messages-to-runtime-calls.m | 42 +-- .../address-spaces-conversions.cl | 42 +-- 13 files changed, 592 insertions(+), 761 deletions(-) diff --git a/clang/test/CodeGen/annotations-var.c b/clang/test/CodeGen/annotations-var.c index 4c4ab22bffd6..20ef8febc1cf 100644 --- a/clang/test/CodeGen/annotations-var.c +++ b/clang/test/CodeGen/annotations-var.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-darwin10 -emit-llvm -o %t1 %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -o %t1 %s // RUN: FileCheck --check-prefix=LOCAL %s < %t1 // RUN: FileCheck --check-prefix=UNDEF %s < %t1 // RUN: FileCheck --check-prefix=PARAM %s < %t1 @@ -19,24 +19,18 @@ int foo(int v __attribute__((annotate("param_ann_0"))) __attribute__((annotate(" return v + 1; // PARAM: define {{.*}}@foo // PARAM: [[V:%.*]] = alloca i32 -// PARAM: bitcast i32* [[V]] to i8* -// PARAM-NEXT: call void @llvm.var.annotation.p0i8.p0i8( -// PARAM-NEXT: bitcast i32* [[V]] to i8* -// PARAM-NEXT: call void @llvm.var.annotation.p0i8.p0i8( -// PARAM-NEXT: bitcast i32* [[V]] to i8* -// PARAM-NEXT: call void @llvm.var.annotation.p0i8.p0i8( -// PARAM-NEXT: bitcast i32* [[V]] to i8* -// PARAM-NEXT: call void @llvm.var.annotation.p0i8.p0i8( +// PARAM: call void @llvm.var.annotation.p0.p0( +// PARAM-NEXT: call void @llvm.var.annotation.p0.p0( +// PARAM-NEXT: call void @llvm.var.annotation.p0.p0( +// PARAM-NEXT: call void @llvm.var.annotation.p0.p0( } void local(void) { int localvar __attribute__((annotate("localvar_ann_0"))) __attribute__((annotate("localvar_ann_1"))) = 3; // LOCAL-LABEL: define{{.*}} void @local() // LOCAL: [[LOCALVAR:%.*]] = alloca i32, -// LOCAL-NEXT: [[T0:%.*]] = bitcast i32* [[LOCALVAR]] to i8* -// LOCAL-NEXT: call void @llvm.var.annotation.p0i8.p0i8(i8* [[T0]], i8* getelementptr inbounds ([15 x i8], [15 x i8]* @{{.*}}), i8* getelementptr inbounds ({{.*}}), i32 33, i8* null) -// LOCAL-NEXT: [[T0:%.*]] = bitcast i32* [[LOCALVAR]] to i8* -// LOCAL-NEXT: call void @llvm.var.annotation.p0i8.p0i8(i8* [[T0]], i8* getelementptr inbounds ([15 x i8], [15 x i8]* @{{.*}}), i8* getelementptr inbounds ({{.*}}), i32 33, i8* null) +// LOCAL-NEXT: call void @llvm.var.annotation.p0.p0(ptr [[LOCALVAR]], ptr @{{.*}}, ptr @{{.*}}, i32 29, ptr null) +// LOCAL-NEXT: call void @llvm.var.annotation.p0.p0(ptr [[LOCALVAR]], ptr @{{.*}}, ptr @{{.*}}, i32 29, ptr null) } void local_after_return(void) { @@ -52,6 +46,5 @@ void undef(void) { int undefvar __attribute__((annotate("undefvar_ann_0"))); // UNDEF-LABEL: define{{.*}} void @undef() // UNDEF: [[UNDEFVAR:%.*]] = alloca i32, -// UNDEF-NEXT: [[T0:%.*]] = bitcast i32* [[UNDEFVAR]] to i8* -// UNDEF-NEXT: call void @llvm.var.annotation.p0i8.p0i8(i8* [[T0]], i8* getelementptr inbounds ([15 x i8], [15 x i8]* @{{.*}}), i8* getelementptr inbounds ({{.*}}), i32 52, i8* null) +// UNDEF-NEXT: call void @llvm.var.annotation.p0.p0(ptr [[UNDEFVAR]], ptr @{{.*}}, ptr @{{.*}}, i32 46, ptr null) } diff --git a/clang/test/CodeGen/arm-abi-vector.c b/clang/test/CodeGen/arm-abi-vector.c index 85a762136a70..43620f855003 100644 --- a/clang/test/CodeGen/arm-abi-vector.c +++ b/clang/test/CodeGen/arm-abi-vector.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -no-opaque-pointers -triple armv7-apple-darwin -target-abi aapcs -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -no-opaque-pointers -triple armv7-apple-darwin -target-abi apcs-gnu -emit-llvm -o - %s | FileCheck -check-prefix=APCS-GNU %s -// RUN: %clang_cc1 -no-opaque-pointers -triple arm-linux-androideabi -emit-llvm -o - %s | FileCheck -check-prefix=ANDROID %s +// RUN: %clang_cc1 -triple armv7-apple-darwin -target-abi aapcs -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple armv7-apple-darwin -target-abi apcs-gnu -emit-llvm -o - %s | FileCheck -check-prefix=APCS-GNU %s +// RUN: %clang_cc1 -triple arm-linux-androideabi -emit-llvm -o - %s | FileCheck -check-prefix=ANDROID %s #include @@ -17,26 +17,23 @@ double varargs_vec_2i(int fixed, ...) { // CHECK: varargs_vec_2i // CHECK: [[VAR:%.*]] = alloca <2 x i32>, align 8 // CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 -// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 8 -// CHECK: [[AP_CAST:%.*]] = bitcast i8* [[AP_ALIGN]] to <2 x i32>* -// CHECK: [[VEC:%.*]] = load <2 x i32>, <2 x i32>* [[AP_CAST]], align 8 -// CHECK: store <2 x i32> [[VEC]], <2 x i32>* [[VAR]], align 8 +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to ptr +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i32 8 +// CHECK: [[VEC:%.*]] = load <2 x i32>, ptr [[AP_ALIGN]], align 8 +// CHECK: store <2 x i32> [[VEC]], ptr [[VAR]], align 8 // APCS-GNU: varargs_vec_2i // APCS-GNU: [[VAR:%.*]] = alloca <2 x i32>, align 8 -// APCS-GNU: [[AP:%.*]] = load i8*, -// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP]], i32 8 -// APCS-GNU: [[AP_CAST:%.*]] = bitcast i8* [[AP]] to <2 x i32>* -// APCS-GNU: [[VEC:%.*]] = load <2 x i32>, <2 x i32>* [[AP_CAST]], align 4 -// APCS-GNU: store <2 x i32> [[VEC]], <2 x i32>* [[VAR]], align 8 +// APCS-GNU: [[AP:%.*]] = load ptr, +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP]], i32 8 +// APCS-GNU: [[VEC:%.*]] = load <2 x i32>, ptr [[AP]], align 4 +// APCS-GNU: store <2 x i32> [[VEC]], ptr [[VAR]], align 8 // ANDROID: varargs_vec_2i // ANDROID: [[VAR:%.*]] = alloca <2 x i32>, align 8 // ANDROID: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 -// ANDROID: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* -// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 8 -// ANDROID: [[AP_CAST:%.*]] = bitcast i8* [[AP_ALIGN]] to <2 x i32>* -// ANDROID: [[VEC:%.*]] = load <2 x i32>, <2 x i32>* [[AP_CAST]], align 8 -// ANDROID: store <2 x i32> [[VEC]], <2 x i32>* [[VAR]], align 8 +// ANDROID: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to ptr +// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i32 8 +// ANDROID: [[VEC:%.*]] = load <2 x i32>, ptr [[AP_ALIGN]], align 8 +// ANDROID: store <2 x i32> [[VEC]], ptr [[VAR]], align 8 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -59,16 +56,13 @@ double test_2i(__int2 *in) { double varargs_vec_3c(int fixed, ...) { // CHECK: varargs_vec_3c // CHECK: alloca <3 x i8>, align 4 -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP:%.*]], i32 4 -// CHECK: bitcast i8* [[AP]] to <3 x i8>* +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP:%.*]], i32 4 // APCS-GNU: varargs_vec_3c // APCS-GNU: alloca <3 x i8>, align 4 -// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP:%.*]], i32 4 -// APCS-GNU: bitcast i8* [[AP]] to <3 x i8>* +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP:%.*]], i32 4 // ANDROID: varargs_vec_3c // ANDROID: alloca <3 x i8>, align 4 -// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP:%.*]], i32 4 -// ANDROID: bitcast i8* [[AP]] to <3 x i8>* +// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP:%.*]], i32 4 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -92,26 +86,23 @@ double varargs_vec_5c(int fixed, ...) { // CHECK: varargs_vec_5c // CHECK: [[VAR:%.*]] = alloca <5 x i8>, align 8 // CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 -// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 8 -// CHECK: [[AP_CAST:%.*]] = bitcast i8* [[AP_ALIGN]] to <5 x i8>* -// CHECK: [[VEC:%.*]] = load <5 x i8>, <5 x i8>* [[AP_CAST]], align 8 -// CHECK: store <5 x i8> [[VEC]], <5 x i8>* [[VAR]], align 8 +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to ptr +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i32 8 +// CHECK: [[VEC:%.*]] = load <5 x i8>, ptr [[AP_ALIGN]], align 8 +// CHECK: store <5 x i8> [[VEC]], ptr [[VAR]], align 8 // APCS-GNU: varargs_vec_5c // APCS-GNU: [[VAR:%.*]] = alloca <5 x i8>, align 8 -// APCS-GNU: [[AP:%.*]] = load i8*, -// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP]], i32 8 -// APCS-GNU: [[AP_CAST:%.*]] = bitcast i8* [[AP]] to <5 x i8>* -// APCS-GNU: [[VEC:%.*]] = load <5 x i8>, <5 x i8>* [[AP_CAST]], align 4 -// APCS-GNU: store <5 x i8> [[VEC]], <5 x i8>* [[VAR]], align 8 +// APCS-GNU: [[AP:%.*]] = load ptr, +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP]], i32 8 +// APCS-GNU: [[VEC:%.*]] = load <5 x i8>, ptr [[AP]], align 4 +// APCS-GNU: store <5 x i8> [[VEC]], ptr [[VAR]], align 8 // ANDROID: varargs_vec_5c // ANDROID: [[VAR:%.*]] = alloca <5 x i8>, align 8 // ANDROID: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 -// ANDROID: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* -// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 8 -// ANDROID: [[AP_CAST:%.*]] = bitcast i8* [[AP_ALIGN]] to <5 x i8>* -// ANDROID: [[VEC:%.*]] = load <5 x i8>, <5 x i8>* [[AP_CAST]], align 8 -// ANDROID: store <5 x i8> [[VEC]], <5 x i8>* [[VAR]], align 8 +// ANDROID: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to ptr +// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i32 8 +// ANDROID: [[VEC:%.*]] = load <5 x i8>, ptr [[AP_ALIGN]], align 8 +// ANDROID: store <5 x i8> [[VEC]], ptr [[VAR]], align 8 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -135,26 +126,23 @@ double varargs_vec_9c(int fixed, ...) { // CHECK: varargs_vec_9c // CHECK: [[VAR:%.*]] = alloca <9 x i8>, align 16 // CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 -// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 16 -// CHECK: [[AP_CAST:%.*]] = bitcast i8* [[AP_ALIGN]] to <9 x i8>* -// CHECK: [[T0:%.*]] = load <9 x i8>, <9 x i8>* [[AP_CAST]], align 8 -// CHECK: store <9 x i8> [[T0]], <9 x i8>* [[VAR]], align 16 +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to ptr +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i32 16 +// CHECK: [[T0:%.*]] = load <9 x i8>, ptr [[AP_ALIGN]], align 8 +// CHECK: store <9 x i8> [[T0]], ptr [[VAR]], align 16 // APCS-GNU: varargs_vec_9c // APCS-GNU: [[VAR:%.*]] = alloca <9 x i8>, align 16 -// APCS-GNU: [[AP:%.*]] = load i8*, -// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP]], i32 16 -// APCS-GNU: [[AP_CAST:%.*]] = bitcast i8* [[AP]] to <9 x i8>* -// APCS-GNU: [[VEC:%.*]] = load <9 x i8>, <9 x i8>* [[AP_CAST]], align 4 -// APCS-GNU: store <9 x i8> [[VEC]], <9 x i8>* [[VAR]], align 16 +// APCS-GNU: [[AP:%.*]] = load ptr, +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP]], i32 16 +// APCS-GNU: [[VEC:%.*]] = load <9 x i8>, ptr [[AP]], align 4 +// APCS-GNU: store <9 x i8> [[VEC]], ptr [[VAR]], align 16 // ANDROID: varargs_vec_9c // ANDROID: [[VAR:%.*]] = alloca <9 x i8>, align 16 // ANDROID: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 -// ANDROID: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* -// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 16 -// ANDROID: [[AP_CAST:%.*]] = bitcast i8* [[AP_ALIGN]] to <9 x i8>* -// ANDROID: [[T0:%.*]] = load <9 x i8>, <9 x i8>* [[AP_CAST]], align 8 -// ANDROID: store <9 x i8> [[T0]], <9 x i8>* [[VAR]], align 16 +// ANDROID: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to ptr +// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i32 16 +// ANDROID: [[T0:%.*]] = load <9 x i8>, ptr [[AP_ALIGN]], align 8 +// ANDROID: store <9 x i8> [[T0]], ptr [[VAR]], align 16 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -176,17 +164,14 @@ double test_9c(__char9 *in) { double varargs_vec_19c(int fixed, ...) { // CHECK: varargs_vec_19c -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP:%.*]], i32 4 -// CHECK: [[VAR:%.*]] = bitcast i8* [[AP]] to <19 x i8>** -// CHECK: [[VAR2:%.*]] = load <19 x i8>*, <19 x i8>** [[VAR]] +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP:%.*]], i32 4 +// CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP]] // APCS-GNU: varargs_vec_19c -// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP:%.*]], i32 4 -// APCS-GNU: [[VAR:%.*]] = bitcast i8* [[AP]] to <19 x i8>** -// APCS-GNU: [[VAR2:%.*]] = load <19 x i8>*, <19 x i8>** [[VAR]] +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP:%.*]], i32 4 +// APCS-GNU: [[VAR2:%.*]] = load ptr, ptr [[AP]] // ANDROID: varargs_vec_19c -// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP:%.*]], i32 4 -// ANDROID: [[VAR:%.*]] = bitcast i8* [[AP]] to <19 x i8>** -// ANDROID: [[VAR2:%.*]] = load <19 x i8>*, <19 x i8>** [[VAR]] +// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP:%.*]], i32 4 +// ANDROID: [[VAR2:%.*]] = load ptr, ptr [[AP]] va_list ap; double sum = fixed; va_start(ap, fixed); @@ -198,11 +183,11 @@ double varargs_vec_19c(int fixed, ...) { double test_19c(__char19 *in) { // CHECK: test_19c -// CHECK: call arm_aapcscc double (i32, ...) @varargs_vec_19c(i32 noundef 19, <19 x i8>* noundef {{%.*}}) +// CHECK: call arm_aapcscc double (i32, ...) @varargs_vec_19c(i32 noundef 19, ptr noundef {{%.*}}) // APCS-GNU: test_19c -// APCS-GNU: call double (i32, ...) @varargs_vec_19c(i32 noundef 19, <19 x i8>* noundef {{%.*}}) +// APCS-GNU: call double (i32, ...) @varargs_vec_19c(i32 noundef 19, ptr noundef {{%.*}}) // ANDROID: test_19c -// ANDROID: call double (i32, ...) @varargs_vec_19c(i32 noundef 19, <19 x i8>* noundef {{%.*}}) +// ANDROID: call double (i32, ...) @varargs_vec_19c(i32 noundef 19, ptr noundef {{%.*}}) return varargs_vec_19c(19, *in); } @@ -210,21 +195,18 @@ double varargs_vec_3s(int fixed, ...) { // CHECK: varargs_vec_3s // CHECK: alloca <3 x i16>, align 8 // CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 -// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 8 -// CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i16>* +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to ptr +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i32 8 // APCS-GNU: varargs_vec_3s // APCS-GNU: [[VAR:%.*]] = alloca <3 x i16>, align 8 -// APCS-GNU: [[AP:%.*]] = load i8*, -// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP]], i32 8 -// APCS-GNU: [[AP_CAST:%.*]] = bitcast i8* [[AP]] to <3 x i16>* -// APCS-GNU: [[VEC:%.*]] = load <3 x i16>, <3 x i16>* [[AP_CAST]], align 4 +// APCS-GNU: [[AP:%.*]] = load ptr, +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP]], i32 8 +// APCS-GNU: [[VEC:%.*]] = load <3 x i16>, ptr [[AP]], align 4 // ANDROID: varargs_vec_3s // ANDROID: alloca <3 x i16>, align 8 // ANDROID: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 -// ANDROID: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* -// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 8 -// ANDROID: bitcast i8* [[AP_ALIGN]] to <3 x i16>* +// ANDROID: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to ptr +// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i32 8 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -248,25 +230,22 @@ double varargs_vec_5s(int fixed, ...) { // CHECK: varargs_vec_5s // CHECK: [[VAR_ALIGN:%.*]] = alloca <5 x i16>, align 16 // CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 -// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 16 -// CHECK: [[AP_CAST:%.*]] = bitcast i8* [[AP_ALIGN]] to <5 x i16>* -// CHECK: [[VEC:%.*]] = load <5 x i16>, <5 x i16>* [[AP_CAST]], align 8 -// CHECK: store <5 x i16> [[VEC]], <5 x i16>* [[VAR_ALIGN]], align 16 +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to ptr +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i32 16 +// CHECK: [[VEC:%.*]] = load <5 x i16>, ptr [[AP_ALIGN]], align 8 +// CHECK: store <5 x i16> [[VEC]], ptr [[VAR_ALIGN]], align 16 // APCS-GNU: varargs_vec_5s // APCS-GNU: [[VAR:%.*]] = alloca <5 x i16>, align 16 -// APCS-GNU: [[AP:%.*]] = load i8*, -// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP]], i32 16 -// APCS-GNU: [[AP_CAST:%.*]] = bitcast i8* [[AP]] to <5 x i16>* -// APCS-GNU: [[VEC:%.*]] = load <5 x i16>, <5 x i16>* [[AP_CAST]], align 4 +// APCS-GNU: [[AP:%.*]] = load ptr, +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP]], i32 16 +// APCS-GNU: [[VEC:%.*]] = load <5 x i16>, ptr [[AP]], align 4 // ANDROID: varargs_vec_5s // ANDROID: [[VAR_ALIGN:%.*]] = alloca <5 x i16>, align 16 // ANDROID: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 -// ANDROID: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* -// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 16 -// ANDROID: [[AP_CAST:%.*]] = bitcast i8* [[AP_ALIGN]] to <5 x i16>* -// ANDROID: [[VEC:%.*]] = load <5 x i16>, <5 x i16>* [[AP_CAST]], align 8 -// ANDROID: store <5 x i16> [[VEC]], <5 x i16>* [[VAR_ALIGN]], align 16 +// ANDROID: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to ptr +// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i32 16 +// ANDROID: [[VEC:%.*]] = load <5 x i16>, ptr [[AP_ALIGN]], align 8 +// ANDROID: store <5 x i16> [[VEC]], ptr [[VAR_ALIGN]], align 16 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -296,19 +275,16 @@ typedef struct double varargs_struct(int fixed, ...) { // CHECK: varargs_struct // CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 -// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 16 -// CHECK: bitcast i8* [[AP_ALIGN]] to %struct.StructWithVec* +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to ptr +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i32 16 // APCS-GNU: varargs_struct // APCS-GNU: [[VAR_ALIGN:%.*]] = alloca %struct.StructWithVec -// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* {{%.*}}, i32 16 -// APCS-GNU: bitcast %struct.StructWithVec* [[VAR_ALIGN]] to i8* +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr {{%.*}}, i32 16 // APCS-GNU: call void @llvm.memcpy // ANDROID: varargs_struct // ANDROID: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 -// ANDROID: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* -// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 16 -// ANDROID: bitcast i8* [[AP_ALIGN]] to %struct.StructWithVec* +// ANDROID: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to ptr +// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i32 16 va_list ap; double sum = fixed; va_start(ap, fixed); diff --git a/clang/test/CodeGen/arm64-abi-vector.c b/clang/test/CodeGen/arm64-abi-vector.c index 10f721ab1f2f..2063e46c414a 100644 --- a/clang/test/CodeGen/arm64-abi-vector.c +++ b/clang/test/CodeGen/arm64-abi-vector.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -no-opaque-pointers -triple arm64-apple-ios7 -target-abi darwinpcs -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -no-opaque-pointers -triple aarch64-linux-android -emit-llvm -o - %s | FileCheck -check-prefix=ANDROID %s +// RUN: %clang_cc1 -triple arm64-apple-ios7 -target-abi darwinpcs -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple aarch64-linux-android -emit-llvm -o - %s | FileCheck -check-prefix=ANDROID %s #include @@ -19,8 +19,7 @@ typedef __attribute__(( ext_vector_type(3) )) double __double3; double varargs_vec_2c(int fixed, ...) { // ANDROID: varargs_vec_2c // ANDROID: [[VAR:%.*]] = alloca <2 x i8>, align 2 -// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// ANDROID: bitcast i8* [[AP_CUR]] to <2 x i8>* +// ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -38,8 +37,7 @@ double test_2c(__char2 *in) { double varargs_vec_3c(int fixed, ...) { // CHECK: varargs_vec_3c // CHECK: alloca <3 x i8>, align 4 -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: bitcast i8* [[AP_CUR]] to <3 x i8>* +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -58,8 +56,7 @@ double test_3c(__char3 *in) { double varargs_vec_4c(int fixed, ...) { // CHECK: varargs_vec_4c // CHECK: alloca <4 x i8>, align 4 -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: bitcast i8* [[AP_CUR]] to <4 x i8>* +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -78,8 +75,7 @@ double test_4c(__char4 *in) { double varargs_vec_5c(int fixed, ...) { // CHECK: varargs_vec_5c // CHECK: alloca <5 x i8>, align 8 -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: bitcast i8* [[AP_CUR]] to <5 x i8>* +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -99,9 +95,8 @@ double varargs_vec_9c(int fixed, ...) { // CHECK: varargs_vec_9c // CHECK: alloca <9 x i8>, align 16 // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16 -// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8* -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16 -// CHECK: bitcast i8* [[AP_ALIGN]] to <9 x i8>* +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to ptr +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i64 16 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -119,9 +114,8 @@ double test_9c(__char9 *in) { double varargs_vec_19c(int fixed, ...) { // CHECK: varargs_vec_19c -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <19 x i8>** -// CHECK: [[VAR2:%.*]] = load <19 x i8>*, <19 x i8>** [[VAR]] +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 +// CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP_CUR]] va_list ap; double sum = fixed; va_start(ap, fixed); @@ -133,15 +127,14 @@ double varargs_vec_19c(int fixed, ...) { double test_19c(__char19 *in) { // CHECK: test_19c -// CHECK: call double (i32, ...) @varargs_vec_19c(i32 noundef 19, <19 x i8>* noundef {{%.*}}) +// CHECK: call double (i32, ...) @varargs_vec_19c(i32 noundef 19, ptr noundef {{%.*}}) return varargs_vec_19c(19, *in); } double varargs_vec_3s(int fixed, ...) { // CHECK: varargs_vec_3s // CHECK: alloca <3 x i16>, align 8 -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: bitcast i8* [[AP_CUR]] to <3 x i16>* +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -161,9 +154,8 @@ double varargs_vec_5s(int fixed, ...) { // CHECK: varargs_vec_5s // CHECK: alloca <5 x i16>, align 16 // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16 -// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8* -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16 -// CHECK: bitcast i8* [[AP_ALIGN]] to <5 x i16>* +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to ptr +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i64 16 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -183,9 +175,8 @@ double varargs_vec_3i(int fixed, ...) { // CHECK: varargs_vec_3i // CHECK: alloca <3 x i32>, align 16 // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16 -// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8* -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16 -// CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i32>* +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to ptr +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i64 16 va_list ap; double sum = fixed; va_start(ap, fixed); @@ -204,9 +195,8 @@ double test_3i(__int3 *in) { double varargs_vec_5i(int fixed, ...) { // CHECK: varargs_vec_5i // CHECK: alloca <5 x i32>, align 16 -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <5 x i32>** -// CHECK: [[VAR2:%.*]] = load <5 x i32>*, <5 x i32>** [[VAR]] +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 +// CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP_CUR]] va_list ap; double sum = fixed; va_start(ap, fixed); @@ -218,16 +208,15 @@ double varargs_vec_5i(int fixed, ...) { double test_5i(__int5 *in) { // CHECK: test_5i -// CHECK: call double (i32, ...) @varargs_vec_5i(i32 noundef 5, <5 x i32>* noundef {{%.*}}) +// CHECK: call double (i32, ...) @varargs_vec_5i(i32 noundef 5, ptr noundef {{%.*}}) return varargs_vec_5i(5, *in); } double varargs_vec_3d(int fixed, ...) { // CHECK: varargs_vec_3d // CHECK: alloca <3 x double>, align 16 -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <3 x double>** -// CHECK: [[VAR2:%.*]] = load <3 x double>*, <3 x double>** [[VAR]] +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 +// CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP_CUR]] va_list ap; double sum = fixed; va_start(ap, fixed); @@ -239,7 +228,7 @@ double varargs_vec_3d(int fixed, ...) { double test_3d(__double3 *in) { // CHECK: test_3d -// CHECK: call double (i32, ...) @varargs_vec_3d(i32 noundef 3, <3 x double>* noundef {{%.*}}) +// CHECK: call double (i32, ...) @varargs_vec_3d(i32 noundef 3, ptr noundef {{%.*}}) return varargs_vec_3d(3, *in); } @@ -249,49 +238,40 @@ double varargs_vec(int fixed, ...) { double sum = fixed; va_start(ap, fixed); __char3 c3 = va_arg(ap, __char3); -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: bitcast i8* [[AP_CUR]] to <3 x i8>* +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 sum = sum + c3.x + c3.y; __char5 c5 = va_arg(ap, __char5); -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: bitcast i8* [[AP_CUR]] to <5 x i8>* +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 sum = sum + c5.x + c5.y; __char9 c9 = va_arg(ap, __char9); // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16 -// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8* -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16 -// CHECK: bitcast i8* [[AP_ALIGN]] to <9 x i8>* +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to ptr +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i64 16 sum = sum + c9.x + c9.y; __char19 c19 = va_arg(ap, __char19); -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <19 x i8>** -// CHECK: [[VAR2:%.*]] = load <19 x i8>*, <19 x i8>** [[VAR]] +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 +// CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP_CUR]] sum = sum + c19.x + c19.y; __short3 s3 = va_arg(ap, __short3); -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: bitcast i8* [[AP_CUR]] to <3 x i16>* +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 sum = sum + s3.x + s3.y; __short5 s5 = va_arg(ap, __short5); // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16 -// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8* -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16 -// CHECK: bitcast i8* [[AP_ALIGN]] to <5 x i16>* +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to ptr +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i64 16 sum = sum + s5.x + s5.y; __int3 i3 = va_arg(ap, __int3); // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16 -// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8* -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16 -// CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i32>* +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to ptr +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i64 16 sum = sum + i3.x + i3.y; __int5 i5 = va_arg(ap, __int5); -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <5 x i32>** -// CHECK: [[VAR2:%.*]] = load <5 x i32>*, <5 x i32>** [[VAR]] +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 +// CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP_CUR]] sum = sum + i5.x + i5.y; __double3 d3 = va_arg(ap, __double3); -// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8 -// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <3 x double>** -// CHECK: [[VAR2:%.*]] = load <3 x double>*, <3 x double>** [[VAR]] +// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8 +// CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP_CUR]] sum = sum + d3.x + d3.y; va_end(ap); return sum; @@ -301,15 +281,14 @@ double test(__char3 *c3, __char5 *c5, __char9 *c9, __char19 *c19, __short3 *s3, __short5 *s5, __int3 *i3, __int5 *i5, __double3 *d3) { double ret = varargs_vec(3, *c3, *c5, *c9, *c19, *s3, *s5, *i3, *i5, *d3); -// CHECK: call double (i32, ...) @varargs_vec(i32 noundef 3, i32 {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, <19 x i8>* noundef {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, <4 x i32> {{%.*}}, <5 x i32>* noundef {{%.*}}, <3 x double>* noundef {{%.*}}) +// CHECK: call double (i32, ...) @varargs_vec(i32 noundef 3, i32 {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, ptr noundef {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, <4 x i32> {{%.*}}, ptr noundef {{%.*}}, ptr noundef {{%.*}}) return ret; } __attribute__((noinline)) double args_vec_3c(int fixed, __char3 c3) { // CHECK: args_vec_3c // CHECK: [[C3:%.*]] = alloca <3 x i8>, align 4 -// CHECK: [[TMP:%.*]] = bitcast <3 x i8>* [[C3]] to i32* -// CHECK: store i32 {{%.*}}, i32* [[TMP]] +// CHECK: store i32 {{%.*}}, ptr [[C3]] double sum = fixed; sum = sum + c3.x + c3.y; return sum; @@ -324,8 +303,7 @@ double fixed_3c(__char3 *in) { __attribute__((noinline)) double args_vec_5c(int fixed, __char5 c5) { // CHECK: args_vec_5c // CHECK: [[C5:%.*]] = alloca <5 x i8>, align 8 -// CHECK: [[TMP:%.*]] = bitcast <5 x i8>* [[C5]] to <2 x i32>* -// CHECK: store <2 x i32> {{%.*}}, <2 x i32>* [[TMP]], align 8 +// CHECK: store <2 x i32> {{%.*}}, ptr [[C5]], align 8 double sum = fixed; sum = sum + c5.x + c5.y; return sum; @@ -340,8 +318,7 @@ double fixed_5c(__char5 *in) { __attribute__((noinline)) double args_vec_9c(int fixed, __char9 c9) { // CHECK: args_vec_9c // CHECK: [[C9:%.*]] = alloca <9 x i8>, align 16 -// CHECK: [[TMP:%.*]] = bitcast <9 x i8>* [[C9]] to <4 x i32>* -// CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 16 +// CHECK: store <4 x i32> {{%.*}}, ptr [[C9]], align 16 double sum = fixed; sum = sum + c9.x + c9.y; return sum; @@ -355,7 +332,7 @@ double fixed_9c(__char9 *in) { __attribute__((noinline)) double args_vec_19c(int fixed, __char19 c19) { // CHECK: args_vec_19c -// CHECK: [[C19:%.*]] = load <19 x i8>, <19 x i8>* {{.*}}, align 16 +// CHECK: [[C19:%.*]] = load <19 x i8>, ptr {{.*}}, align 16 double sum = fixed; sum = sum + c19.x + c19.y; return sum; @@ -363,15 +340,14 @@ __attribute__((noinline)) double args_vec_19c(int fixed, __char19 c19) { double fixed_19c(__char19 *in) { // CHECK: fixed_19c -// CHECK: call double @args_vec_19c(i32 noundef 19, <19 x i8>* noundef {{%.*}}) +// CHECK: call double @args_vec_19c(i32 noundef 19, ptr noundef {{%.*}}) return args_vec_19c(19, *in); } __attribute__((noinline)) double args_vec_3s(int fixed, __short3 c3) { // CHECK: args_vec_3s // CHECK: [[C3:%.*]] = alloca <3 x i16>, align 8 -// CHECK: [[TMP:%.*]] = bitcast <3 x i16>* [[C3]] to <2 x i32>* -// CHECK: store <2 x i32> {{%.*}}, <2 x i32>* [[TMP]], align 8 +// CHECK: store <2 x i32> {{%.*}}, ptr [[C3]], align 8 double sum = fixed; sum = sum + c3.x + c3.y; return sum; @@ -386,8 +362,7 @@ double fixed_3s(__short3 *in) { __attribute__((noinline)) double args_vec_5s(int fixed, __short5 c5) { // CHECK: args_vec_5s // CHECK: [[C5:%.*]] = alloca <5 x i16>, align 16 -// CHECK: [[TMP:%.*]] = bitcast <5 x i16>* [[C5]] to <4 x i32>* -// CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 16 +// CHECK: store <4 x i32> {{%.*}}, ptr [[C5]], align 16 double sum = fixed; sum = sum + c5.x + c5.y; return sum; @@ -402,8 +377,7 @@ double fixed_5s(__short5 *in) { __attribute__((noinline)) double args_vec_3i(int fixed, __int3 c3) { // CHECK: args_vec_3i // CHECK: [[C3:%.*]] = alloca <3 x i32>, align 16 -// CHECK: [[TMP:%.*]] = bitcast <3 x i32>* [[C3]] to <4 x i32>* -// CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 16 +// CHECK: store <4 x i32> {{%.*}}, ptr [[C3]], align 16 double sum = fixed; sum = sum + c3.x + c3.y; return sum; @@ -417,7 +391,7 @@ double fixed_3i(__int3 *in) { __attribute__((noinline)) double args_vec_5i(int fixed, __int5 c5) { // CHECK: args_vec_5i -// CHECK: [[C5:%.*]] = load <5 x i32>, <5 x i32>* {{%.*}}, align 16 +// CHECK: [[C5:%.*]] = load <5 x i32>, ptr {{%.*}}, align 16 double sum = fixed; sum = sum + c5.x + c5.y; return sum; @@ -425,14 +399,13 @@ __attribute__((noinline)) double args_vec_5i(int fixed, __int5 c5) { double fixed_5i(__int5 *in) { // CHECK: fixed_5i -// CHECK: call double @args_vec_5i(i32 noundef 5, <5 x i32>* noundef {{%.*}}) +// CHECK: call double @args_vec_5i(i32 noundef 5, ptr noundef {{%.*}}) return args_vec_5i(5, *in); } __attribute__((noinline)) double args_vec_3d(int fixed, __double3 c3) { // CHECK: args_vec_3d - // CHECK: [[CAST:%.*]] = bitcast <3 x double>* {{%.*}} to <4 x double>* - // CHECK: [[LOAD:%.*]] = load <4 x double>, <4 x double>* [[CAST]] + // CHECK: [[LOAD:%.*]] = load <4 x double>, ptr {{%.*}} // CHECK: shufflevector <4 x double> [[LOAD]], <4 x double> poison, <3 x i32> double sum = fixed; sum = sum + c3.x + c3.y; @@ -441,6 +414,6 @@ __attribute__((noinline)) double args_vec_3d(int fixed, __double3 c3) { double fixed_3d(__double3 *in) { // CHECK: fixed_3d -// CHECK: call double @args_vec_3d(i32 noundef 3, <3 x double>* noundef {{%.*}}) +// CHECK: call double @args_vec_3d(i32 noundef 3, ptr noundef {{%.*}}) return args_vec_3d(3, *in); } diff --git a/clang/test/CodeGen/arm64-arguments.c b/clang/test/CodeGen/arm64-arguments.c index 6211eaa9c16a..13728e43dff6 100644 --- a/clang/test/CodeGen/arm64-arguments.c +++ b/clang/test/CodeGen/arm64-arguments.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -no-opaque-pointers -triple arm64-apple-ios7 -target-feature +neon -target-abi darwinpcs -ffreestanding -emit-llvm -w -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-LE -// RUN: %clang_cc1 -no-opaque-pointers -triple aarch64_be-none-linux-gnu -target-feature +neon -target-abi darwinpcs -ffreestanding -emit-llvm -w -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-BE +// RUN: %clang_cc1 -triple arm64-apple-ios7 -target-feature +neon -target-abi darwinpcs -ffreestanding -emit-llvm -w -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-LE +// RUN: %clang_cc1 -triple aarch64_be-none-linux-gnu -target-feature +neon -target-abi darwinpcs -ffreestanding -emit-llvm -w -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-BE // REQUIRES: aarch64-registered-target || arm-registered-target @@ -163,13 +163,13 @@ void f32(struct s32 s) { } // A composite type larger than 16 bytes should be passed indirectly. struct s33 { char buf[32*32]; }; void f33(struct s33 s) { } -// CHECK: define{{.*}} void @f33(%struct.s33* noundef %s) +// CHECK: define{{.*}} void @f33(ptr noundef %s) struct s34 { char c; }; void f34(struct s34 s); void g34(struct s34 *s) { f34(*s); } -// CHECK: @g34(%struct.s34* noundef %s) -// CHECK: %[[a:.*]] = load i8, i8* %{{.*}} +// CHECK: @g34(ptr noundef %s) +// CHECK: %[[a:.*]] = load i8, ptr %{{.*}} // CHECK: zext i8 %[[a]] to i64 // CHECK: call void @f34(i64 %{{.*}}) @@ -200,8 +200,8 @@ _Bool t3(int i, ...) { // CHECK: t3 __builtin_va_list ap; __builtin_va_start(ap, i); - // CHECK: %0 = va_arg {{.*}}* %ap, i8 - // CHECK-NEXT: store i8 %0, i8* %varet, align 1 + // CHECK: %0 = va_arg ptr %ap, i8 + // CHECK-NEXT: store i8 %0, ptr %varet, align 1 _Bool b = __builtin_va_arg(ap, _Bool); __builtin_va_end(ap); return b; @@ -226,9 +226,9 @@ T_float32x2 f1_0(T_float32x2 a0) { return a0; } // CHECK: define{{.*}} <4 x float> @f1_1(<4 x float> noundef %{{.*}}) T_float32x4 f1_1(T_float32x4 a0) { return a0; } // Vector with length bigger than 16-byte is illegal and is passed indirectly. -// CHECK: define{{.*}} void @f1_2(<8 x float>* noalias sret(<8 x float>) align 16 %{{.*}}, <8 x float>* noundef %0) +// CHECK: define{{.*}} void @f1_2(ptr noalias sret(<8 x float>) align 16 %{{.*}}, ptr noundef %0) T_float32x8 f1_2(T_float32x8 a0) { return a0; } -// CHECK: define{{.*}} void @f1_3(<16 x float>* noalias sret(<16 x float>) align 16 %{{.*}}, <16 x float>* noundef %0) +// CHECK: define{{.*}} void @f1_3(ptr noalias sret(<16 x float>) align 16 %{{.*}}, ptr noundef %0) T_float32x16 f1_3(T_float32x16 a0) { return a0; } // Testing alignment with aggregates: HFA, aggregates with size <= 16 bytes and @@ -244,10 +244,8 @@ float32x4_t f35(int i, s35_with_align s1, s35_with_align s2) { // CHECK: define{{.*}} <4 x float> @f35(i32 noundef %i, [4 x float] %s1.coerce, [4 x float] %s2.coerce) // CHECK: %s1 = alloca %struct.s35, align 16 // CHECK: %s2 = alloca %struct.s35, align 16 -// CHECK: %[[a:.*]] = bitcast %struct.s35* %s1 to <4 x float>* -// CHECK: load <4 x float>, <4 x float>* %[[a]], align 16 -// CHECK: %[[b:.*]] = bitcast %struct.s35* %s2 to <4 x float>* -// CHECK: load <4 x float>, <4 x float>* %[[b]], align 16 +// CHECK: load <4 x float>, ptr %s1, align 16 +// CHECK: load <4 x float>, ptr %s2, align 16 float32x4_t v = vaddq_f32(*(float32x4_t *)&s1, *(float32x4_t *)&s2); return v; @@ -264,12 +262,10 @@ int32x4_t f36(int i, s36_with_align s1, s36_with_align s2) { // CHECK: define{{.*}} <4 x i32> @f36(i32 noundef %i, i128 %s1.coerce, i128 %s2.coerce) // CHECK: %s1 = alloca %struct.s36, align 16 // CHECK: %s2 = alloca %struct.s36, align 16 -// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16 -// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 16 -// CHECK: %[[a:.*]] = bitcast %struct.s36* %s1 to <4 x i32>* -// CHECK: load <4 x i32>, <4 x i32>* %[[a]], align 16 -// CHECK: %[[b:.*]] = bitcast %struct.s36* %s2 to <4 x i32>* -// CHECK: load <4 x i32>, <4 x i32>* %[[b]], align 16 +// CHECK: store i128 %s1.coerce, ptr %{{.*}}, align 16 +// CHECK: store i128 %s2.coerce, ptr %{{.*}}, align 16 +// CHECK: load <4 x i32>, ptr %s1, align 16 +// CHECK: load <4 x i32>, ptr %s2, align 16 int32x4_t v = vaddq_s32(*(int32x4_t *)&s1, *(int32x4_t *)&s2); return v; @@ -282,11 +278,9 @@ struct s37 typedef struct s37 s37_with_align; int32x4_t f37(int i, s37_with_align s1, s37_with_align s2) { -// CHECK: define{{.*}} <4 x i32> @f37(i32 noundef %i, %struct.s37* noundef %s1, %struct.s37* noundef %s2) -// CHECK: %[[a:.*]] = bitcast %struct.s37* %s1 to <4 x i32>* -// CHECK: load <4 x i32>, <4 x i32>* %[[a]], align 16 -// CHECK: %[[b:.*]] = bitcast %struct.s37* %s2 to <4 x i32>* -// CHECK: load <4 x i32>, <4 x i32>* %[[b]], align 16 +// CHECK: define{{.*}} <4 x i32> @f37(i32 noundef %i, ptr noundef %s1, ptr noundef %s2) +// CHECK: load <4 x i32>, ptr %s1, align 16 +// CHECK: load <4 x i32>, ptr %s2, align 16 int32x4_t v = vaddq_s32(*(int32x4_t *)&s1, *(int32x4_t *)&s2); return v; @@ -298,7 +292,7 @@ int32x4_t caller37() { // CHECK: %[[b:.*]] = alloca %struct.s37, align 16 // CHECK: call void @llvm.memcpy // CHECK: call void @llvm.memcpy -// CHECK: call <4 x i32> @f37(i32 noundef 3, %struct.s37* noundef %[[a]], %struct.s37* noundef %[[b]]) +// CHECK: call <4 x i32> @f37(i32 noundef 3, ptr noundef %[[a]], ptr noundef %[[b]]) return f37(3, g37, g37); } @@ -320,20 +314,20 @@ int f38(int i, s38_no_align s1, s38_no_align s2) { // CHECK: define{{.*}} i32 @f38(i32 noundef %i, i64 %s1.coerce, i64 %s2.coerce) // CHECK: %s1 = alloca %struct.s38, align 4 // CHECK: %s2 = alloca %struct.s38, align 4 -// CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 4 -// CHECK: store i64 %s2.coerce, i64* %{{.*}}, align 4 -// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s1, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s2, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s1, i32 0, i32 1 -// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s2, i32 0, i32 1 +// CHECK: store i64 %s1.coerce, ptr %{{.*}}, align 4 +// CHECK: store i64 %s2.coerce, ptr %{{.*}}, align 4 +// CHECK: getelementptr inbounds %struct.s38, ptr %s1, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s38, ptr %s2, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s38, ptr %s1, i32 0, i32 1 +// CHECK: getelementptr inbounds %struct.s38, ptr %s2, i32 0, i32 1 return s1.i + s2.i + i + s1.s + s2.s; } s38_no_align g38; s38_no_align g38_2; int caller38() { // CHECK: define{{.*}} i32 @caller38() -// CHECK: %[[a:.*]] = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4 -// CHECK: %[[b:.*]] = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4 +// CHECK: %[[a:.*]] = load i64, ptr @g38, align 4 +// CHECK: %[[b:.*]] = load i64, ptr @g38_2, align 4 // CHECK: call i32 @f38(i32 noundef 3, i64 %[[a]], i64 %[[b]]) return f38(3, g38, g38_2); } @@ -344,18 +338,18 @@ int f38_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, // CHECK: define{{.*}} i32 @f38_stack(i32 noundef %i, i32 noundef %i2, i32 noundef %i3, i32 noundef %i4, i32 noundef %i5, i32 noundef %i6, i32 noundef %i7, i32 noundef %i8, i32 noundef %i9, i64 %s1.coerce, i64 %s2.coerce) // CHECK: %s1 = alloca %struct.s38, align 4 // CHECK: %s2 = alloca %struct.s38, align 4 -// CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 4 -// CHECK: store i64 %s2.coerce, i64* %{{.*}}, align 4 -// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s1, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s2, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s1, i32 0, i32 1 -// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s2, i32 0, i32 1 +// CHECK: store i64 %s1.coerce, ptr %{{.*}}, align 4 +// CHECK: store i64 %s2.coerce, ptr %{{.*}}, align 4 +// CHECK: getelementptr inbounds %struct.s38, ptr %s1, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s38, ptr %s2, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s38, ptr %s1, i32 0, i32 1 +// CHECK: getelementptr inbounds %struct.s38, ptr %s2, i32 0, i32 1 return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + s1.s + s2.s; } int caller38_stack() { // CHECK: define{{.*}} i32 @caller38_stack() -// CHECK: %[[a:.*]] = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4 -// CHECK: %[[b:.*]] = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4 +// CHECK: %[[a:.*]] = load i64, ptr @g38, align 4 +// CHECK: %[[b:.*]] = load i64, ptr @g38_2, align 4 // CHECK: call i32 @f38_stack(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, i32 noundef 6, i32 noundef 7, i32 noundef 8, i32 noundef 9, i64 %[[a]], i64 %[[b]]) return f38_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g38, g38_2); } @@ -373,20 +367,20 @@ int f39(int i, s39_with_align s1, s39_with_align s2) { // CHECK: define{{.*}} i32 @f39(i32 noundef %i, i128 %s1.coerce, i128 %s2.coerce) // CHECK: %s1 = alloca %struct.s39, align 16 // CHECK: %s2 = alloca %struct.s39, align 16 -// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16 -// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 16 -// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s1, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s2, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s1, i32 0, i32 1 -// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s2, i32 0, i32 1 +// CHECK: store i128 %s1.coerce, ptr %{{.*}}, align 16 +// CHECK: store i128 %s2.coerce, ptr %{{.*}}, align 16 +// CHECK: getelementptr inbounds %struct.s39, ptr %s1, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s39, ptr %s2, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s39, ptr %s1, i32 0, i32 1 +// CHECK: getelementptr inbounds %struct.s39, ptr %s2, i32 0, i32 1 return s1.i + s2.i + i + s1.s + s2.s; } s39_with_align g39; s39_with_align g39_2; int caller39() { // CHECK: define{{.*}} i32 @caller39() -// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16 -// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16 +// CHECK: %[[a:.*]] = load i128, ptr @g39, align 16 +// CHECK: %[[b:.*]] = load i128, ptr @g39_2, align 16 // CHECK: call i32 @f39(i32 noundef 3, i128 %[[a]], i128 %[[b]]) return f39(3, g39, g39_2); } @@ -397,18 +391,18 @@ int f39_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, // CHECK: define{{.*}} i32 @f39_stack(i32 noundef %i, i32 noundef %i2, i32 noundef %i3, i32 noundef %i4, i32 noundef %i5, i32 noundef %i6, i32 noundef %i7, i32 noundef %i8, i32 noundef %i9, i128 %s1.coerce, i128 %s2.coerce) // CHECK: %s1 = alloca %struct.s39, align 16 // CHECK: %s2 = alloca %struct.s39, align 16 -// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16 -// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 16 -// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s1, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s2, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s1, i32 0, i32 1 -// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s2, i32 0, i32 1 +// CHECK: store i128 %s1.coerce, ptr %{{.*}}, align 16 +// CHECK: store i128 %s2.coerce, ptr %{{.*}}, align 16 +// CHECK: getelementptr inbounds %struct.s39, ptr %s1, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s39, ptr %s2, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s39, ptr %s1, i32 0, i32 1 +// CHECK: getelementptr inbounds %struct.s39, ptr %s2, i32 0, i32 1 return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + s1.s + s2.s; } int caller39_stack() { // CHECK: define{{.*}} i32 @caller39_stack() -// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16 -// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16 +// CHECK: %[[a:.*]] = load i128, ptr @g39, align 16 +// CHECK: %[[b:.*]] = load i128, ptr @g39_2, align 16 // CHECK: call i32 @f39_stack(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, i32 noundef 6, i32 noundef 7, i32 noundef 8, i32 noundef 9, i128 %[[a]], i128 %[[b]]) return f39_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g39, g39_2); } @@ -428,20 +422,20 @@ int f40(int i, s40_no_align s1, s40_no_align s2) { // CHECK: define{{.*}} i32 @f40(i32 noundef %i, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce) // CHECK: %s1 = alloca %struct.s40, align 4 // CHECK: %s2 = alloca %struct.s40, align 4 -// CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 4 -// CHECK: store [2 x i64] %s2.coerce, [2 x i64]* %{{.*}}, align 4 -// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s1, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s2, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s1, i32 0, i32 1 -// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s2, i32 0, i32 1 +// CHECK: store [2 x i64] %s1.coerce, ptr %{{.*}}, align 4 +// CHECK: store [2 x i64] %s2.coerce, ptr %{{.*}}, align 4 +// CHECK: getelementptr inbounds %struct.s40, ptr %s1, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s40, ptr %s2, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s40, ptr %s1, i32 0, i32 1 +// CHECK: getelementptr inbounds %struct.s40, ptr %s2, i32 0, i32 1 return s1.i + s2.i + i + s1.s + s2.s; } s40_no_align g40; s40_no_align g40_2; int caller40() { // CHECK: define{{.*}} i32 @caller40() -// CHECK: %[[a:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4 -// CHECK: %[[b:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4 +// CHECK: %[[a:.*]] = load [2 x i64], ptr @g40, align 4 +// CHECK: %[[b:.*]] = load [2 x i64], ptr @g40_2, align 4 // CHECK: call i32 @f40(i32 noundef 3, [2 x i64] %[[a]], [2 x i64] %[[b]]) return f40(3, g40, g40_2); } @@ -452,18 +446,18 @@ int f40_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, // CHECK: define{{.*}} i32 @f40_stack(i32 noundef %i, i32 noundef %i2, i32 noundef %i3, i32 noundef %i4, i32 noundef %i5, i32 noundef %i6, i32 noundef %i7, i32 noundef %i8, i32 noundef %i9, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce) // CHECK: %s1 = alloca %struct.s40, align 4 // CHECK: %s2 = alloca %struct.s40, align 4 -// CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 4 -// CHECK: store [2 x i64] %s2.coerce, [2 x i64]* %{{.*}}, align 4 -// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s1, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s2, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s1, i32 0, i32 1 -// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s2, i32 0, i32 1 +// CHECK: store [2 x i64] %s1.coerce, ptr %{{.*}}, align 4 +// CHECK: store [2 x i64] %s2.coerce, ptr %{{.*}}, align 4 +// CHECK: getelementptr inbounds %struct.s40, ptr %s1, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s40, ptr %s2, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s40, ptr %s1, i32 0, i32 1 +// CHECK: getelementptr inbounds %struct.s40, ptr %s2, i32 0, i32 1 return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + s1.s + s2.s; } int caller40_stack() { // CHECK: define{{.*}} i32 @caller40_stack() -// CHECK: %[[a:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4 -// CHECK: %[[b:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4 +// CHECK: %[[a:.*]] = load [2 x i64], ptr @g40, align 4 +// CHECK: %[[b:.*]] = load [2 x i64], ptr @g40_2, align 4 // CHECK: call i32 @f40_stack(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, i32 noundef 6, i32 noundef 7, i32 noundef 8, i32 noundef 9, [2 x i64] %[[a]], [2 x i64] %[[b]]) return f40_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g40, g40_2); } @@ -483,20 +477,20 @@ int f41(int i, s41_with_align s1, s41_with_align s2) { // CHECK: define{{.*}} i32 @f41(i32 noundef %i, i128 %s1.coerce, i128 %s2.coerce) // CHECK: %s1 = alloca %struct.s41, align 16 // CHECK: %s2 = alloca %struct.s41, align 16 -// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16 -// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 16 -// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s1, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s2, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s1, i32 0, i32 1 -// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s2, i32 0, i32 1 +// CHECK: store i128 %s1.coerce, ptr %{{.*}}, align 16 +// CHECK: store i128 %s2.coerce, ptr %{{.*}}, align 16 +// CHECK: getelementptr inbounds %struct.s41, ptr %s1, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s41, ptr %s2, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s41, ptr %s1, i32 0, i32 1 +// CHECK: getelementptr inbounds %struct.s41, ptr %s2, i32 0, i32 1 return s1.i + s2.i + i + s1.s + s2.s; } s41_with_align g41; s41_with_align g41_2; int caller41() { // CHECK: define{{.*}} i32 @caller41() -// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16 -// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16 +// CHECK: %[[a:.*]] = load i128, ptr @g41, align 16 +// CHECK: %[[b:.*]] = load i128, ptr @g41_2, align 16 // CHECK: call i32 @f41(i32 noundef 3, i128 %[[a]], i128 %[[b]]) return f41(3, g41, g41_2); } @@ -507,18 +501,18 @@ int f41_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, // CHECK: define{{.*}} i32 @f41_stack(i32 noundef %i, i32 noundef %i2, i32 noundef %i3, i32 noundef %i4, i32 noundef %i5, i32 noundef %i6, i32 noundef %i7, i32 noundef %i8, i32 noundef %i9, i128 %s1.coerce, i128 %s2.coerce) // CHECK: %s1 = alloca %struct.s41, align 16 // CHECK: %s2 = alloca %struct.s41, align 16 -// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16 -// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 16 -// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s1, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s2, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s1, i32 0, i32 1 -// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s2, i32 0, i32 1 +// CHECK: store i128 %s1.coerce, ptr %{{.*}}, align 16 +// CHECK: store i128 %s2.coerce, ptr %{{.*}}, align 16 +// CHECK: getelementptr inbounds %struct.s41, ptr %s1, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s41, ptr %s2, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s41, ptr %s1, i32 0, i32 1 +// CHECK: getelementptr inbounds %struct.s41, ptr %s2, i32 0, i32 1 return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + s1.s + s2.s; } int caller41_stack() { // CHECK: define{{.*}} i32 @caller41_stack() -// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16 -// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16 +// CHECK: %[[a:.*]] = load i128, ptr @g41, align 16 +// CHECK: %[[b:.*]] = load i128, ptr @g41_2, align 16 // CHECK: call i32 @f41_stack(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, i32 noundef 6, i32 noundef 7, i32 noundef 8, i32 noundef 9, i128 %[[a]], i128 %[[b]]) return f41_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g41, g41_2); } @@ -537,11 +531,11 @@ typedef struct s42 s42_no_align; // passing structs in registers __attribute__ ((noinline)) int f42(int i, s42_no_align s1, s42_no_align s2) { -// CHECK: define{{.*}} i32 @f42(i32 noundef %i, %struct.s42* noundef %s1, %struct.s42* noundef %s2) -// CHECK: getelementptr inbounds %struct.s42, %struct.s42* %s1, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s42, %struct.s42* %s2, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s42, %struct.s42* %s1, i32 0, i32 1 -// CHECK: getelementptr inbounds %struct.s42, %struct.s42* %s2, i32 0, i32 1 +// CHECK: define{{.*}} i32 @f42(i32 noundef %i, ptr noundef %s1, ptr noundef %s2) +// CHECK: getelementptr inbounds %struct.s42, ptr %s1, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s42, ptr %s2, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s42, ptr %s1, i32 0, i32 1 +// CHECK: getelementptr inbounds %struct.s42, ptr %s2, i32 0, i32 1 return s1.i + s2.i + i + s1.s + s2.s; } s42_no_align g42; @@ -550,33 +544,29 @@ int caller42() { // CHECK: define{{.*}} i32 @caller42() // CHECK: %[[a:.*]] = alloca %struct.s42, align 4 // CHECK: %[[b:.*]] = alloca %struct.s42, align 4 -// CHECK: %[[c:.*]] = bitcast %struct.s42* %[[a]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 -// CHECK: %[[d:.*]] = bitcast %struct.s42* %[[b]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 -// CHECK: call i32 @f42(i32 noundef 3, %struct.s42* noundef %[[a]], %struct.s42* noundef %[[b]]) +// CHECK: call void @llvm.memcpy.p0.p0.i64 +// CHECK: call void @llvm.memcpy.p0.p0.i64 +// CHECK: call i32 @f42(i32 noundef 3, ptr noundef %[[a]], ptr noundef %[[b]]) return f42(3, g42, g42_2); } // passing structs on stack __attribute__ ((noinline)) int f42_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, int i9, s42_no_align s1, s42_no_align s2) { -// CHECK: define{{.*}} i32 @f42_stack(i32 noundef %i, i32 noundef %i2, i32 noundef %i3, i32 noundef %i4, i32 noundef %i5, i32 noundef %i6, i32 noundef %i7, i32 noundef %i8, i32 noundef %i9, %struct.s42* noundef %s1, %struct.s42* noundef %s2) -// CHECK: getelementptr inbounds %struct.s42, %struct.s42* %s1, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s42, %struct.s42* %s2, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s42, %struct.s42* %s1, i32 0, i32 1 -// CHECK: getelementptr inbounds %struct.s42, %struct.s42* %s2, i32 0, i32 1 +// CHECK: define{{.*}} i32 @f42_stack(i32 noundef %i, i32 noundef %i2, i32 noundef %i3, i32 noundef %i4, i32 noundef %i5, i32 noundef %i6, i32 noundef %i7, i32 noundef %i8, i32 noundef %i9, ptr noundef %s1, ptr noundef %s2) +// CHECK: getelementptr inbounds %struct.s42, ptr %s1, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s42, ptr %s2, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s42, ptr %s1, i32 0, i32 1 +// CHECK: getelementptr inbounds %struct.s42, ptr %s2, i32 0, i32 1 return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + s1.s + s2.s; } int caller42_stack() { // CHECK: define{{.*}} i32 @caller42_stack() // CHECK: %[[a:.*]] = alloca %struct.s42, align 4 // CHECK: %[[b:.*]] = alloca %struct.s42, align 4 -// CHECK: %[[c:.*]] = bitcast %struct.s42* %[[a]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 -// CHECK: %[[d:.*]] = bitcast %struct.s42* %[[b]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 -// CHECK: call i32 @f42_stack(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, i32 noundef 6, i32 noundef 7, i32 noundef 8, i32 noundef 9, %struct.s42* noundef %[[a]], %struct.s42* noundef %[[b]]) +// CHECK: call void @llvm.memcpy.p0.p0.i64 +// CHECK: call void @llvm.memcpy.p0.p0.i64 +// CHECK: call i32 @f42_stack(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, i32 noundef 6, i32 noundef 7, i32 noundef 8, i32 noundef 9, ptr noundef %[[a]], ptr noundef %[[b]]) return f42_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g42, g42_2); } @@ -594,11 +584,11 @@ typedef struct s43 s43_with_align; // passing aligned structs in registers __attribute__ ((noinline)) int f43(int i, s43_with_align s1, s43_with_align s2) { -// CHECK: define{{.*}} i32 @f43(i32 noundef %i, %struct.s43* noundef %s1, %struct.s43* noundef %s2) -// CHECK: getelementptr inbounds %struct.s43, %struct.s43* %s1, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s43, %struct.s43* %s2, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s43, %struct.s43* %s1, i32 0, i32 1 -// CHECK: getelementptr inbounds %struct.s43, %struct.s43* %s2, i32 0, i32 1 +// CHECK: define{{.*}} i32 @f43(i32 noundef %i, ptr noundef %s1, ptr noundef %s2) +// CHECK: getelementptr inbounds %struct.s43, ptr %s1, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s43, ptr %s2, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s43, ptr %s1, i32 0, i32 1 +// CHECK: getelementptr inbounds %struct.s43, ptr %s2, i32 0, i32 1 return s1.i + s2.i + i + s1.s + s2.s; } s43_with_align g43; @@ -607,33 +597,29 @@ int caller43() { // CHECK: define{{.*}} i32 @caller43() // CHECK: %[[a:.*]] = alloca %struct.s43, align 16 // CHECK: %[[b:.*]] = alloca %struct.s43, align 16 -// CHECK: %[[c:.*]] = bitcast %struct.s43* %[[a]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 -// CHECK: %[[d:.*]] = bitcast %struct.s43* %[[b]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 -// CHECK: call i32 @f43(i32 noundef 3, %struct.s43* noundef %[[a]], %struct.s43* noundef %[[b]]) +// CHECK: call void @llvm.memcpy.p0.p0.i64 +// CHECK: call void @llvm.memcpy.p0.p0.i64 +// CHECK: call i32 @f43(i32 noundef 3, ptr noundef %[[a]], ptr noundef %[[b]]) return f43(3, g43, g43_2); } // passing aligned structs on stack __attribute__ ((noinline)) int f43_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, int i9, s43_with_align s1, s43_with_align s2) { -// CHECK: define{{.*}} i32 @f43_stack(i32 noundef %i, i32 noundef %i2, i32 noundef %i3, i32 noundef %i4, i32 noundef %i5, i32 noundef %i6, i32 noundef %i7, i32 noundef %i8, i32 noundef %i9, %struct.s43* noundef %s1, %struct.s43* noundef %s2) -// CHECK: getelementptr inbounds %struct.s43, %struct.s43* %s1, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s43, %struct.s43* %s2, i32 0, i32 0 -// CHECK: getelementptr inbounds %struct.s43, %struct.s43* %s1, i32 0, i32 1 -// CHECK: getelementptr inbounds %struct.s43, %struct.s43* %s2, i32 0, i32 1 +// CHECK: define{{.*}} i32 @f43_stack(i32 noundef %i, i32 noundef %i2, i32 noundef %i3, i32 noundef %i4, i32 noundef %i5, i32 noundef %i6, i32 noundef %i7, i32 noundef %i8, i32 noundef %i9, ptr noundef %s1, ptr noundef %s2) +// CHECK: getelementptr inbounds %struct.s43, ptr %s1, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s43, ptr %s2, i32 0, i32 0 +// CHECK: getelementptr inbounds %struct.s43, ptr %s1, i32 0, i32 1 +// CHECK: getelementptr inbounds %struct.s43, ptr %s2, i32 0, i32 1 return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + s1.s + s2.s; } int caller43_stack() { // CHECK: define{{.*}} i32 @caller43_stack() // CHECK: %[[a:.*]] = alloca %struct.s43, align 16 // CHECK: %[[b:.*]] = alloca %struct.s43, align 16 -// CHECK: %[[c:.*]] = bitcast %struct.s43* %[[a]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 -// CHECK: %[[d:.*]] = bitcast %struct.s43* %[[b]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 -// CHECK: call i32 @f43_stack(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, i32 noundef 6, i32 noundef 7, i32 noundef 8, i32 noundef 9, %struct.s43* noundef %[[a]], %struct.s43* noundef %[[b]]) +// CHECK: call void @llvm.memcpy.p0.p0.i64 +// CHECK: call void @llvm.memcpy.p0.p0.i64 +// CHECK: call i32 @f43_stack(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, i32 noundef 6, i32 noundef 7, i32 noundef 8, i32 noundef 9, ptr noundef %[[a]], ptr noundef %[[b]]) return f43_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g43, g43_2); } @@ -670,14 +656,13 @@ struct HFA { float test_hfa(int n, ...) { // CHECK-LE-LABEL: define{{.*}} float @test_hfa(i32 noundef %n, ...) -// CHECK-LE: [[THELIST:%.*]] = alloca i8* -// CHECK-LE: [[CURLIST:%.*]] = load i8*, i8** [[THELIST]] +// CHECK-LE: [[THELIST:%.*]] = alloca ptr +// CHECK-LE: [[CURLIST:%.*]] = load ptr, ptr [[THELIST]] // HFA is not indirect, so occupies its full 16 bytes on the stack. -// CHECK-LE: [[NEXTLIST:%.*]] = getelementptr inbounds i8, i8* [[CURLIST]], i64 16 -// CHECK-LE: store i8* [[NEXTLIST]], i8** [[THELIST]] +// CHECK-LE: [[NEXTLIST:%.*]] = getelementptr inbounds i8, ptr [[CURLIST]], i64 16 +// CHECK-LE: store ptr [[NEXTLIST]], ptr [[THELIST]] -// CHECK-LE: bitcast i8* [[CURLIST]] to %struct.HFA* __builtin_va_list thelist; __builtin_va_start(thelist, n); struct HFA h = __builtin_va_arg(thelist, struct HFA); @@ -685,7 +670,7 @@ float test_hfa(int n, ...) { } float test_hfa_call(struct HFA *a) { -// CHECK-LABEL: define{{.*}} float @test_hfa_call(%struct.HFA* noundef %a) +// CHECK-LABEL: define{{.*}} float @test_hfa_call(ptr noundef %a) // CHECK: call float (i32, ...) @test_hfa(i32 noundef 1, [4 x float] {{.*}}) test_hfa(1, *a); } @@ -696,16 +681,15 @@ struct TooBigHFA { float test_toobig_hfa(int n, ...) { // CHECK-LE-LABEL: define{{.*}} float @test_toobig_hfa(i32 noundef %n, ...) -// CHECK-LE: [[THELIST:%.*]] = alloca i8* -// CHECK-LE: [[CURLIST:%.*]] = load i8*, i8** [[THELIST]] +// CHECK-LE: [[THELIST:%.*]] = alloca ptr +// CHECK-LE: [[CURLIST:%.*]] = load ptr, ptr [[THELIST]] // TooBigHFA is not actually an HFA, so gets passed indirectly. Only 8 bytes // of stack consumed. -// CHECK-LE: [[NEXTLIST:%.*]] = getelementptr inbounds i8, i8* [[CURLIST]], i64 8 -// CHECK-LE: store i8* [[NEXTLIST]], i8** [[THELIST]] +// CHECK-LE: [[NEXTLIST:%.*]] = getelementptr inbounds i8, ptr [[CURLIST]], i64 8 +// CHECK-LE: store ptr [[NEXTLIST]], ptr [[THELIST]] -// CHECK-LE: [[HFAPTRPTR:%.*]] = bitcast i8* [[CURLIST]] to %struct.TooBigHFA** -// CHECK-LE: [[HFAPTR:%.*]] = load %struct.TooBigHFA*, %struct.TooBigHFA** [[HFAPTRPTR]] +// CHECK-LE: [[HFAPTR:%.*]] = load ptr, ptr [[CURLIST]] __builtin_va_list thelist; __builtin_va_start(thelist, n); struct TooBigHFA h = __builtin_va_arg(thelist, struct TooBigHFA); @@ -718,20 +702,19 @@ struct HVA { int32x4_t test_hva(int n, ...) { // CHECK-LE-LABEL: define{{.*}} <4 x i32> @test_hva(i32 noundef %n, ...) -// CHECK-LE: [[THELIST:%.*]] = alloca i8* -// CHECK-LE: [[CURLIST:%.*]] = load i8*, i8** [[THELIST]] +// CHECK-LE: [[THELIST:%.*]] = alloca ptr +// CHECK-LE: [[CURLIST:%.*]] = load ptr, ptr [[THELIST]] // HVA is not indirect, so occupies its full 16 bytes on the stack. but it // must be properly aligned. -// CHECK-LE: [[ALIGN0:%.*]] = ptrtoint i8* [[CURLIST]] to i64 +// CHECK-LE: [[ALIGN0:%.*]] = ptrtoint ptr [[CURLIST]] to i64 // CHECK-LE: [[ALIGN1:%.*]] = add i64 [[ALIGN0]], 15 // CHECK-LE: [[ALIGN2:%.*]] = and i64 [[ALIGN1]], -16 -// CHECK-LE: [[ALIGNED_LIST:%.*]] = inttoptr i64 [[ALIGN2]] to i8* +// CHECK-LE: [[ALIGNED_LIST:%.*]] = inttoptr i64 [[ALIGN2]] to ptr -// CHECK-LE: [[NEXTLIST:%.*]] = getelementptr inbounds i8, i8* [[ALIGNED_LIST]], i64 32 -// CHECK-LE: store i8* [[NEXTLIST]], i8** [[THELIST]] +// CHECK-LE: [[NEXTLIST:%.*]] = getelementptr inbounds i8, ptr [[ALIGNED_LIST]], i64 32 +// CHECK-LE: store ptr [[NEXTLIST]], ptr [[THELIST]] -// CHECK-LE: bitcast i8* [[ALIGNED_LIST]] to %struct.HVA* __builtin_va_list thelist; __builtin_va_start(thelist, n); struct HVA h = __builtin_va_arg(thelist, struct HVA); @@ -744,16 +727,15 @@ struct TooBigHVA { int32x4_t test_toobig_hva(int n, ...) { // CHECK-LE-LABEL: define{{.*}} <4 x i32> @test_toobig_hva(i32 noundef %n, ...) -// CHECK-LE: [[THELIST:%.*]] = alloca i8* -// CHECK-LE: [[CURLIST:%.*]] = load i8*, i8** [[THELIST]] +// CHECK-LE: [[THELIST:%.*]] = alloca ptr +// CHECK-LE: [[CURLIST:%.*]] = load ptr, ptr [[THELIST]] // TooBigHVA is not actually an HVA, so gets passed indirectly. Only 8 bytes // of stack consumed. -// CHECK-LE: [[NEXTLIST:%.*]] = getelementptr inbounds i8, i8* [[CURLIST]], i64 8 -// CHECK-LE: store i8* [[NEXTLIST]], i8** [[THELIST]] +// CHECK-LE: [[NEXTLIST:%.*]] = getelementptr inbounds i8, ptr [[CURLIST]], i64 8 +// CHECK-LE: store ptr [[NEXTLIST]], ptr [[THELIST]] -// CHECK-LE: [[HVAPTRPTR:%.*]] = bitcast i8* [[CURLIST]] to %struct.TooBigHVA** -// CHECK-LE: [[HVAPTR:%.*]] = load %struct.TooBigHVA*, %struct.TooBigHVA** [[HVAPTRPTR]] +// CHECK-LE: [[HVAPTR:%.*]] = load ptr, ptr [[CURLIST]] __builtin_va_list thelist; __builtin_va_start(thelist, n); struct TooBigHVA h = __builtin_va_arg(thelist, struct TooBigHVA); @@ -765,20 +747,19 @@ typedef struct { float32x3_t arr[4]; } HFAv3; float32x3_t test_hva_v3(int n, ...) { // CHECK-LE-LABEL: define{{.*}} <3 x float> @test_hva_v3(i32 noundef %n, ...) -// CHECK-LE: [[THELIST:%.*]] = alloca i8* -// CHECK-LE: [[CURLIST:%.*]] = load i8*, i8** [[THELIST]] +// CHECK-LE: [[THELIST:%.*]] = alloca ptr +// CHECK-LE: [[CURLIST:%.*]] = load ptr, ptr [[THELIST]] // HVA is not indirect, so occupies its full 16 bytes on the stack. but it // must be properly aligned. -// CHECK-LE: [[ALIGN0:%.*]] = ptrtoint i8* [[CURLIST]] to i64 +// CHECK-LE: [[ALIGN0:%.*]] = ptrtoint ptr [[CURLIST]] to i64 // CHECK-LE: [[ALIGN1:%.*]] = add i64 [[ALIGN0]], 15 // CHECK-LE: [[ALIGN2:%.*]] = and i64 [[ALIGN1]], -16 -// CHECK-LE: [[ALIGNED_LIST:%.*]] = inttoptr i64 [[ALIGN2]] to i8* +// CHECK-LE: [[ALIGNED_LIST:%.*]] = inttoptr i64 [[ALIGN2]] to ptr -// CHECK-LE: [[NEXTLIST:%.*]] = getelementptr inbounds i8, i8* [[ALIGNED_LIST]], i64 64 -// CHECK-LE: store i8* [[NEXTLIST]], i8** [[THELIST]] +// CHECK-LE: [[NEXTLIST:%.*]] = getelementptr inbounds i8, ptr [[ALIGNED_LIST]], i64 64 +// CHECK-LE: store ptr [[NEXTLIST]], ptr [[THELIST]] -// CHECK-LE: bitcast i8* [[ALIGNED_LIST]] to %struct.HFAv3* __builtin_va_list l; __builtin_va_start(l, n); HFAv3 r = __builtin_va_arg(l, HFAv3); @@ -786,7 +767,7 @@ float32x3_t test_hva_v3(int n, ...) { } float32x3_t test_hva_v3_call(HFAv3 *a) { -// CHECK-LABEL: define{{.*}} <3 x float> @test_hva_v3_call(%struct.HFAv3* noundef %a) +// CHECK-LABEL: define{{.*}} <3 x float> @test_hva_v3_call(ptr noundef %a) // CHECK: call <3 x float> (i32, ...) @test_hva_v3(i32 noundef 1, [4 x <4 x float>] {{.*}}) return test_hva_v3(1, *a); } diff --git a/clang/test/CodeGen/ms_abi.c b/clang/test/CodeGen/ms_abi.c index 6ebe5b834107..adc5094267cb 100644 --- a/clang/test/CodeGen/ms_abi.c +++ b/clang/test/CodeGen/ms_abi.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-freebsd10.0 -emit-llvm < %s | FileCheck -check-prefix=FREEBSD %s -// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-pc-win32 -emit-llvm < %s | FileCheck -check-prefix=WIN64 %s +// RUN: %clang_cc1 -triple x86_64-unknown-freebsd10.0 -emit-llvm < %s | FileCheck -check-prefix=FREEBSD %s +// RUN: %clang_cc1 -triple x86_64-pc-win32 -emit-llvm < %s | FileCheck -check-prefix=WIN64 %s struct foo { int x; @@ -32,47 +32,41 @@ void __attribute__((ms_abi)) f4(int a, ...) { // WIN64-LABEL: define dso_local void @f4 __builtin_ms_va_list ap; __builtin_ms_va_start(ap, a); - // FREEBSD: %[[AP:.*]] = alloca i8* + // FREEBSD: %[[AP:.*]] = alloca ptr // FREEBSD: call void @llvm.va_start - // WIN64: %[[AP:.*]] = alloca i8* + // WIN64: %[[AP:.*]] = alloca ptr // WIN64: call void @llvm.va_start int b = __builtin_va_arg(ap, int); - // FREEBSD: %[[AP_CUR:.*]] = load i8*, i8** %[[AP]] - // FREEBSD-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR]], i64 8 - // FREEBSD-NEXT: store i8* %[[AP_NEXT]], i8** %[[AP]] - // FREEBSD-NEXT: bitcast i8* %[[AP_CUR]] to i32* - // WIN64: %[[AP_CUR:.*]] = load i8*, i8** %[[AP]] - // WIN64-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR]], i64 8 - // WIN64-NEXT: store i8* %[[AP_NEXT]], i8** %[[AP]] - // WIN64-NEXT: bitcast i8* %[[AP_CUR]] to i32* + // FREEBSD: %[[AP_CUR:.*]] = load ptr, ptr %[[AP]] + // FREEBSD-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR]], i64 8 + // FREEBSD-NEXT: store ptr %[[AP_NEXT]], ptr %[[AP]] + // WIN64: %[[AP_CUR:.*]] = load ptr, ptr %[[AP]] + // WIN64-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR]], i64 8 + // WIN64-NEXT: store ptr %[[AP_NEXT]], ptr %[[AP]] double _Complex c = __builtin_va_arg(ap, double _Complex); - // FREEBSD: %[[AP_CUR2:.*]] = load i8*, i8** %[[AP]] - // FREEBSD-NEXT: %[[AP_NEXT2:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR2]], i64 8 - // FREEBSD-NEXT: store i8* %[[AP_NEXT2]], i8** %[[AP]] - // FREEBSD-NEXT: %[[CUR2:.*]] = bitcast i8* %[[AP_CUR2]] to { double, double }** - // FREEBSD-NEXT: load { double, double }*, { double, double }** %[[CUR2]] - // WIN64: %[[AP_CUR2:.*]] = load i8*, i8** %[[AP]] - // WIN64-NEXT: %[[AP_NEXT2:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR2]], i64 8 - // WIN64-NEXT: store i8* %[[AP_NEXT2]], i8** %[[AP]] - // WIN64-NEXT: %[[CUR2:.*]] = bitcast i8* %[[AP_CUR2]] to { double, double }** - // WIN64-NEXT: load { double, double }*, { double, double }** %[[CUR2]] + // FREEBSD: %[[AP_CUR2:.*]] = load ptr, ptr %[[AP]] + // FREEBSD-NEXT: %[[AP_NEXT2:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR2]], i64 8 + // FREEBSD-NEXT: store ptr %[[AP_NEXT2]], ptr %[[AP]] + // FREEBSD-NEXT: load ptr, ptr %[[AP_CUR2]] + // WIN64: %[[AP_CUR2:.*]] = load ptr, ptr %[[AP]] + // WIN64-NEXT: %[[AP_NEXT2:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR2]], i64 8 + // WIN64-NEXT: store ptr %[[AP_NEXT2]], ptr %[[AP]] + // WIN64-NEXT: load ptr, ptr %[[AP_CUR2]] struct foo d = __builtin_va_arg(ap, struct foo); - // FREEBSD: %[[AP_CUR3:.*]] = load i8*, i8** %[[AP]] - // FREEBSD-NEXT: %[[AP_NEXT3:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR3]], i64 8 - // FREEBSD-NEXT: store i8* %[[AP_NEXT3]], i8** %[[AP]] - // FREEBSD-NEXT: %[[CUR3:.*]] = bitcast i8* %[[AP_CUR3]] to %[[STRUCT_FOO]]* - // FREEBSD-NEXT: load %[[STRUCT_FOO]]*, %[[STRUCT_FOO]]** %[[CUR3]] - // WIN64: %[[AP_CUR3:.*]] = load i8*, i8** %[[AP]] - // WIN64-NEXT: %[[AP_NEXT3:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR3]], i64 8 - // WIN64-NEXT: store i8* %[[AP_NEXT3]], i8** %[[AP]] - // WIN64-NEXT: %[[CUR3:.*]] = bitcast i8* %[[AP_CUR3]] to %[[STRUCT_FOO]]* - // WIN64-NEXT: load %[[STRUCT_FOO]]*, %[[STRUCT_FOO]]** %[[CUR3]] + // FREEBSD: %[[AP_CUR3:.*]] = load ptr, ptr %[[AP]] + // FREEBSD-NEXT: %[[AP_NEXT3:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR3]], i64 8 + // FREEBSD-NEXT: store ptr %[[AP_NEXT3]], ptr %[[AP]] + // FREEBSD-NEXT: load ptr, ptr %[[AP_CUR3]] + // WIN64: %[[AP_CUR3:.*]] = load ptr, ptr %[[AP]] + // WIN64-NEXT: %[[AP_NEXT3:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR3]], i64 8 + // WIN64-NEXT: store ptr %[[AP_NEXT3]], ptr %[[AP]] + // WIN64-NEXT: load ptr, ptr %[[AP_CUR3]] __builtin_ms_va_list ap2; __builtin_ms_va_copy(ap2, ap); - // FREEBSD: %[[AP_VAL:.*]] = load i8*, i8** %[[AP]] - // FREEBSD-NEXT: store i8* %[[AP_VAL]], i8** %[[AP2:.*]] - // WIN64: %[[AP_VAL:.*]] = load i8*, i8** %[[AP]] - // WIN64-NEXT: store i8* %[[AP_VAL]], i8** %[[AP2:.*]] + // FREEBSD: %[[AP_VAL:.*]] = load ptr, ptr %[[AP]] + // FREEBSD-NEXT: store ptr %[[AP_VAL]], ptr %[[AP2:.*]] + // WIN64: %[[AP_VAL:.*]] = load ptr, ptr %[[AP]] + // WIN64-NEXT: store ptr %[[AP_VAL]], ptr %[[AP2:.*]] __builtin_ms_va_end(ap); // FREEBSD: call void @llvm.va_end // WIN64: call void @llvm.va_end @@ -83,23 +77,20 @@ void f5(int a, ...) { // WIN64-LABEL: define dso_local void @f5 __builtin_va_list ap; __builtin_va_start(ap, a); - // WIN64: %[[AP:.*]] = alloca i8* + // WIN64: %[[AP:.*]] = alloca ptr // WIN64: call void @llvm.va_start int b = __builtin_va_arg(ap, int); - // WIN64: %[[AP_CUR:.*]] = load i8*, i8** %[[AP]] - // WIN64-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR]], i64 8 - // WIN64-NEXT: store i8* %[[AP_NEXT]], i8** %[[AP]] - // WIN64-NEXT: bitcast i8* %[[AP_CUR]] to i32* + // WIN64: %[[AP_CUR:.*]] = load ptr, ptr %[[AP]] + // WIN64-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR]], i64 8 + // WIN64-NEXT: store ptr %[[AP_NEXT]], ptr %[[AP]] double _Complex c = __builtin_va_arg(ap, double _Complex); - // WIN64: %[[AP_CUR2:.*]] = load i8*, i8** %[[AP]] - // WIN64-NEXT: %[[AP_NEXT2:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR2]], i64 8 - // WIN64-NEXT: store i8* %[[AP_NEXT2]], i8** %[[AP]] - // WIN64-NEXT: bitcast i8* %[[AP_CUR2]] to { double, double }* + // WIN64: %[[AP_CUR2:.*]] = load ptr, ptr %[[AP]] + // WIN64-NEXT: %[[AP_NEXT2:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR2]], i64 8 + // WIN64-NEXT: store ptr %[[AP_NEXT2]], ptr %[[AP]] struct foo d = __builtin_va_arg(ap, struct foo); - // WIN64: %[[AP_CUR3:.*]] = load i8*, i8** %[[AP]] - // WIN64-NEXT: %[[AP_NEXT3:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR3]], i64 8 - // WIN64-NEXT: store i8* %[[AP_NEXT3]], i8** %[[AP]] - // WIN64-NEXT: bitcast i8* %[[AP_CUR3]] to %[[STRUCT_FOO]]* + // WIN64: %[[AP_CUR3:.*]] = load ptr, ptr %[[AP]] + // WIN64-NEXT: %[[AP_NEXT3:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR3]], i64 8 + // WIN64-NEXT: store ptr %[[AP_NEXT3]], ptr %[[AP]] __builtin_va_list ap2; __builtin_va_copy(ap2, ap); // WIN64: call void @llvm.va_copy @@ -110,42 +101,36 @@ void f5(int a, ...) { // Verify that using a Win64 va_list from a System V function works. void __attribute__((sysv_abi)) f6(__builtin_ms_va_list ap) { // FREEBSD-LABEL: define{{.*}} void @f6 - // FREEBSD: store i8* %ap, i8** %[[AP:.*]] + // FREEBSD: store ptr %ap, ptr %[[AP:.*]] // WIN64-LABEL: define dso_local x86_64_sysvcc void @f6 - // WIN64: store i8* %ap, i8** %[[AP:.*]] + // WIN64: store ptr %ap, ptr %[[AP:.*]] int b = __builtin_va_arg(ap, int); - // FREEBSD: %[[AP_CUR:.*]] = load i8*, i8** %[[AP]] - // FREEBSD-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR]], i64 8 - // FREEBSD-NEXT: store i8* %[[AP_NEXT]], i8** %[[AP]] - // FREEBSD-NEXT: bitcast i8* %[[AP_CUR]] to i32* - // WIN64: %[[AP_CUR:.*]] = load i8*, i8** %[[AP]] - // WIN64-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR]], i64 8 - // WIN64-NEXT: store i8* %[[AP_NEXT]], i8** %[[AP]] - // WIN64-NEXT: bitcast i8* %[[AP_CUR]] to i32* + // FREEBSD: %[[AP_CUR:.*]] = load ptr, ptr %[[AP]] + // FREEBSD-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR]], i64 8 + // FREEBSD-NEXT: store ptr %[[AP_NEXT]], ptr %[[AP]] + // WIN64: %[[AP_CUR:.*]] = load ptr, ptr %[[AP]] + // WIN64-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR]], i64 8 + // WIN64-NEXT: store ptr %[[AP_NEXT]], ptr %[[AP]] double _Complex c = __builtin_va_arg(ap, double _Complex); - // FREEBSD: %[[AP_CUR2:.*]] = load i8*, i8** %[[AP]] - // FREEBSD-NEXT: %[[AP_NEXT2:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR2]], i64 8 - // FREEBSD-NEXT: store i8* %[[AP_NEXT2]], i8** %[[AP]] - // FREEBSD-NEXT: bitcast i8* %[[AP_CUR2]] to { double, double }** - // WIN64: %[[AP_CUR2:.*]] = load i8*, i8** %[[AP]] - // WIN64-NEXT: %[[AP_NEXT2:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR2]], i64 8 - // WIN64-NEXT: store i8* %[[AP_NEXT2]], i8** %[[AP]] - // WIN64-NEXT: bitcast i8* %[[AP_CUR2]] to { double, double }** + // FREEBSD: %[[AP_CUR2:.*]] = load ptr, ptr %[[AP]] + // FREEBSD-NEXT: %[[AP_NEXT2:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR2]], i64 8 + // FREEBSD-NEXT: store ptr %[[AP_NEXT2]], ptr %[[AP]] + // WIN64: %[[AP_CUR2:.*]] = load ptr, ptr %[[AP]] + // WIN64-NEXT: %[[AP_NEXT2:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR2]], i64 8 + // WIN64-NEXT: store ptr %[[AP_NEXT2]], ptr %[[AP]] struct foo d = __builtin_va_arg(ap, struct foo); - // FREEBSD: %[[AP_CUR3:.*]] = load i8*, i8** %[[AP]] - // FREEBSD-NEXT: %[[AP_NEXT3:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR3]], i64 8 - // FREEBSD-NEXT: store i8* %[[AP_NEXT3]], i8** %[[AP]] - // FREEBSD-NEXT: bitcast i8* %[[AP_CUR3]] to %[[STRUCT_FOO]]** - // WIN64: %[[AP_CUR3:.*]] = load i8*, i8** %[[AP]] - // WIN64-NEXT: %[[AP_NEXT3:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR3]], i64 8 - // WIN64-NEXT: store i8* %[[AP_NEXT3]], i8** %[[AP]] - // WIN64-NEXT: bitcast i8* %[[AP_CUR3]] to %[[STRUCT_FOO]]** + // FREEBSD: %[[AP_CUR3:.*]] = load ptr, ptr %[[AP]] + // FREEBSD-NEXT: %[[AP_NEXT3:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR3]], i64 8 + // FREEBSD-NEXT: store ptr %[[AP_NEXT3]], ptr %[[AP]] + // WIN64: %[[AP_CUR3:.*]] = load ptr, ptr %[[AP]] + // WIN64-NEXT: %[[AP_NEXT3:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR3]], i64 8 + // WIN64-NEXT: store ptr %[[AP_NEXT3]], ptr %[[AP]] __builtin_ms_va_list ap2; __builtin_ms_va_copy(ap2, ap); - // FREEBSD: %[[AP_VAL:.*]] = load i8*, i8** %[[AP]] - // FREEBSD-NEXT: store i8* %[[AP_VAL]], i8** %[[AP2:.*]] - // WIN64: %[[AP_VAL:.*]] = load i8*, i8** %[[AP]] - // WIN64-NEXT: store i8* %[[AP_VAL]], i8** %[[AP2:.*]] + // FREEBSD: %[[AP_VAL:.*]] = load ptr, ptr %[[AP]] + // FREEBSD-NEXT: store ptr %[[AP_VAL]], ptr %[[AP2:.*]] + // WIN64: %[[AP_VAL:.*]] = load ptr, ptr %[[AP]] + // WIN64-NEXT: store ptr %[[AP_VAL]], ptr %[[AP2:.*]] } // This test checks if structs are passed according to Win64 calling convention @@ -156,7 +141,7 @@ struct i128 { }; __attribute__((ms_abi)) struct i128 f7(struct i128 a) { - // WIN64: define dso_local void @f7(%struct.i128* noalias sret(%struct.i128) align 8 %agg.result, %struct.i128* noundef %a) - // FREEBSD: define{{.*}} win64cc void @f7(%struct.i128* noalias sret(%struct.i128) align 8 %agg.result, %struct.i128* noundef %a) + // WIN64: define dso_local void @f7(ptr noalias sret(%struct.i128) align 8 %agg.result, ptr noundef %a) + // FREEBSD: define{{.*}} win64cc void @f7(ptr noalias sret(%struct.i128) align 8 %agg.result, ptr noundef %a) return a; } diff --git a/clang/test/CodeGen/ms_abi_aarch64.c b/clang/test/CodeGen/ms_abi_aarch64.c index 96a1d8087f45..b8717e8738d0 100644 --- a/clang/test/CodeGen/ms_abi_aarch64.c +++ b/clang/test/CodeGen/ms_abi_aarch64.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -no-opaque-pointers -triple aarch64-linux-gnu -emit-llvm < %s | FileCheck -check-prefixes=LINUX,COMMON %s -// RUN: %clang_cc1 -no-opaque-pointers -triple aarch64-pc-win32 -emit-llvm < %s | FileCheck -check-prefixes=WIN64,COMMON %s +// RUN: %clang_cc1 -triple aarch64-linux-gnu -emit-llvm < %s | FileCheck -check-prefixes=LINUX,COMMON %s +// RUN: %clang_cc1 -triple aarch64-pc-win32 -emit-llvm < %s | FileCheck -check-prefixes=WIN64,COMMON %s struct small_odd { char a, b, c; @@ -31,17 +31,16 @@ void __attribute__((ms_abi)) f4(int a, ...) { // WIN64-LABEL: define dso_local void @f4 __builtin_ms_va_list ap; __builtin_ms_va_start(ap, a); - // COMMON: %[[AP:.*]] = alloca i8* + // COMMON: %[[AP:.*]] = alloca ptr // COMMON: call void @llvm.va_start int b = __builtin_va_arg(ap, int); - // COMMON: %[[AP_CUR:.*]] = load i8*, i8** %[[AP]] - // COMMON-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR]], i64 8 - // COMMON-NEXT: store i8* %[[AP_NEXT]], i8** %[[AP]] - // COMMON-NEXT: bitcast i8* %[[AP_CUR]] to i32* + // COMMON: %[[AP_CUR:.*]] = load ptr, ptr %[[AP]] + // COMMON-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR]], i64 8 + // COMMON-NEXT: store ptr %[[AP_NEXT]], ptr %[[AP]] __builtin_ms_va_list ap2; __builtin_ms_va_copy(ap2, ap); - // COMMON: %[[AP_VAL:.*]] = load i8*, i8** %[[AP]] - // COMMON-NEXT: store i8* %[[AP_VAL]], i8** %[[AP2:.*]] + // COMMON: %[[AP_VAL:.*]] = load ptr, ptr %[[AP]] + // COMMON-NEXT: store ptr %[[AP_VAL]], ptr %[[AP2:.*]] __builtin_ms_va_end(ap); // COMMON: call void @llvm.va_end } @@ -51,18 +50,16 @@ void __attribute__((ms_abi)) f4_2(int a, ...) { // WIN64-LABEL: define dso_local void @f4_2 __builtin_ms_va_list ap; __builtin_ms_va_start(ap, a); - // COMMON: %[[AP:.*]] = alloca i8* + // COMMON: %[[AP:.*]] = alloca ptr // COMMON: call void @llvm.va_start struct small_odd s1 = __builtin_va_arg(ap, struct small_odd); - // COMMON: %[[AP_CUR:.*]] = load i8*, i8** %[[AP]] - // COMMON-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR]], i64 8 - // COMMON-NEXT: store i8* %[[AP_NEXT]], i8** %[[AP]] - // COMMON-NEXT: bitcast i8* %[[AP_CUR]] to %struct.small_odd* + // COMMON: %[[AP_CUR:.*]] = load ptr, ptr %[[AP]] + // COMMON-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR]], i64 8 + // COMMON-NEXT: store ptr %[[AP_NEXT]], ptr %[[AP]] struct larger s2 = __builtin_va_arg(ap, struct larger); - // COMMON: %[[AP_CUR2:.*]] = load i8*, i8** %[[AP]] - // COMMON-NEXT: %[[AP_NEXT3:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR2]], i64 8 - // COMMON-NEXT: store i8* %[[AP_NEXT3]], i8** %[[AP]] - // COMMON-NEXT: bitcast i8* %[[AP_CUR2]] to %struct.larger** + // COMMON: %[[AP_CUR2:.*]] = load ptr, ptr %[[AP]] + // COMMON-NEXT: %[[AP_NEXT3:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR2]], i64 8 + // COMMON-NEXT: store ptr %[[AP_NEXT3]], ptr %[[AP]] __builtin_ms_va_end(ap); } @@ -71,13 +68,12 @@ void f5(int a, ...) { // WIN64-LABEL: define dso_local void @f5 __builtin_va_list ap; __builtin_va_start(ap, a); - // WIN64: %[[AP:.*]] = alloca i8* + // WIN64: %[[AP:.*]] = alloca ptr // WIN64: call void @llvm.va_start int b = __builtin_va_arg(ap, int); - // WIN64: %[[AP_CUR:.*]] = load i8*, i8** %[[AP]] - // WIN64-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR]], i64 8 - // WIN64-NEXT: store i8* %[[AP_NEXT]], i8** %[[AP]] - // WIN64-NEXT: bitcast i8* %[[AP_CUR]] to i32* + // WIN64: %[[AP_CUR:.*]] = load ptr, ptr %[[AP]] + // WIN64-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR]], i64 8 + // WIN64-NEXT: store ptr %[[AP_NEXT]], ptr %[[AP]] __builtin_va_list ap2; __builtin_va_copy(ap2, ap); // WIN64: call void @llvm.va_copy @@ -111,12 +107,11 @@ __attribute__((ms_abi)) void get_msabi_hfa_vararg(int a, ...) { // COMMON-LABEL: define{{.*}} void @get_msabi_hfa_vararg __builtin_ms_va_list ap; __builtin_ms_va_start(ap, a); - // COMMON: %[[AP:.*]] = alloca i8* + // COMMON: %[[AP:.*]] = alloca ptr // COMMON: call void @llvm.va_start struct HFA b = __builtin_va_arg(ap, struct HFA); - // COMMON: %[[AP_CUR:.*]] = load i8*, i8** %[[AP]] - // COMMON-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, i8* %[[AP_CUR]], i64 16 - // COMMON-NEXT: store i8* %[[AP_NEXT]], i8** %[[AP]] - // COMMON-NEXT: bitcast i8* %[[AP_CUR]] to %struct.HFA* + // COMMON: %[[AP_CUR:.*]] = load ptr, ptr %[[AP]] + // COMMON-NEXT: %[[AP_NEXT:.*]] = getelementptr inbounds i8, ptr %[[AP_CUR]], i64 16 + // COMMON-NEXT: store ptr %[[AP_NEXT]], ptr %[[AP]] __builtin_ms_va_end(ap); } diff --git a/clang/test/CodeGenCXX/homogeneous-aggregates.cpp b/clang/test/CodeGenCXX/homogeneous-aggregates.cpp index b4cca7c9ff00..972b0031a9e3 100644 --- a/clang/test/CodeGenCXX/homogeneous-aggregates.cpp +++ b/clang/test/CodeGenCXX/homogeneous-aggregates.cpp @@ -1,8 +1,8 @@ -// RUN: %clang_cc1 -no-opaque-pointers -triple powerpc64le-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s --check-prefix=PPC -// RUN: %clang_cc1 -no-opaque-pointers -mfloat-abi hard -triple armv7-unknown-linux-gnueabi -emit-llvm -o - %s | FileCheck %s --check-prefix=ARM32 -// RUN: %clang_cc1 -no-opaque-pointers -mfloat-abi hard -triple aarch64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s --check-prefix=ARM64 -// RUN: %clang_cc1 -no-opaque-pointers -mfloat-abi hard -triple x86_64-unknown-windows-gnu -emit-llvm -o - %s | FileCheck %s --check-prefix=X64 -// RUN: %clang_cc1 -no-opaque-pointers -mfloat-abi hard -triple aarch64-unknown-windows-msvc -emit-llvm -o - %s | FileCheck %s --check-prefix=WOA64 +// RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s --check-prefix=PPC +// RUN: %clang_cc1 -mfloat-abi hard -triple armv7-unknown-linux-gnueabi -emit-llvm -o - %s | FileCheck %s --check-prefix=ARM32 +// RUN: %clang_cc1 -mfloat-abi hard -triple aarch64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s --check-prefix=ARM64 +// RUN: %clang_cc1 -mfloat-abi hard -triple x86_64-unknown-windows-gnu -emit-llvm -o - %s | FileCheck %s --check-prefix=X64 +// RUN: %clang_cc1 -mfloat-abi hard -triple aarch64-unknown-windows-msvc -emit-llvm -o - %s | FileCheck %s --check-prefix=WOA64 #if defined(__x86_64__) #define CC __attribute__((vectorcall)) @@ -39,10 +39,10 @@ struct I2 : Base2 {}; struct I3 : Base2 {}; struct D5 : I1, I2, I3 {}; // homogeneous aggregate -// PPC: define{{.*}} void @_Z7func_D12D1(%struct.D1* noalias sret(%struct.D1) align 8 %agg.result, [3 x i64] %x.coerce) -// ARM32: define{{.*}} arm_aapcs_vfpcc void @_Z7func_D12D1(%struct.D1* noalias sret(%struct.D1) align 8 %agg.result, [3 x i64] %x.coerce) -// ARM64: define{{.*}} void @_Z7func_D12D1(%struct.D1* noalias sret(%struct.D1) align 8 %agg.result, %struct.D1* noundef %x) -// X64: define dso_local x86_vectorcallcc void @"\01_Z7func_D12D1@@24"(%struct.D1* noalias sret(%struct.D1) align 8 %agg.result, %struct.D1* noundef %x) +// PPC: define{{.*}} void @_Z7func_D12D1(ptr noalias sret(%struct.D1) align 8 %agg.result, [3 x i64] %x.coerce) +// ARM32: define{{.*}} arm_aapcs_vfpcc void @_Z7func_D12D1(ptr noalias sret(%struct.D1) align 8 %agg.result, [3 x i64] %x.coerce) +// ARM64: define{{.*}} void @_Z7func_D12D1(ptr noalias sret(%struct.D1) align 8 %agg.result, ptr noundef %x) +// X64: define dso_local x86_vectorcallcc void @"\01_Z7func_D12D1@@24"(ptr noalias sret(%struct.D1) align 8 %agg.result, ptr noundef %x) D1 CC func_D1(D1 x) { return x; } // PPC: define{{.*}} [3 x double] @_Z7func_D22D2([3 x double] %x.coerce) @@ -51,9 +51,9 @@ D1 CC func_D1(D1 x) { return x; } // X64: define dso_local x86_vectorcallcc %struct.D2 @"\01_Z7func_D22D2@@24"(%struct.D2 inreg %x.coerce) D2 CC func_D2(D2 x) { return x; } -// PPC: define{{.*}} void @_Z7func_D32D3(%struct.D3* noalias sret(%struct.D3) align 8 %agg.result, [4 x i64] %x.coerce) -// ARM32: define{{.*}} arm_aapcs_vfpcc void @_Z7func_D32D3(%struct.D3* noalias sret(%struct.D3) align 8 %agg.result, [4 x i64] %x.coerce) -// ARM64: define{{.*}} void @_Z7func_D32D3(%struct.D3* noalias sret(%struct.D3) align 8 %agg.result, %struct.D3* noundef %x) +// PPC: define{{.*}} void @_Z7func_D32D3(ptr noalias sret(%struct.D3) align 8 %agg.result, [4 x i64] %x.coerce) +// ARM32: define{{.*}} arm_aapcs_vfpcc void @_Z7func_D32D3(ptr noalias sret(%struct.D3) align 8 %agg.result, [4 x i64] %x.coerce) +// ARM64: define{{.*}} void @_Z7func_D32D3(ptr noalias sret(%struct.D3) align 8 %agg.result, ptr noundef %x) D3 CC func_D3(D3 x) { return x; } // PPC: define{{.*}} [4 x double] @_Z7func_D42D4([4 x double] %x.coerce) @@ -69,8 +69,7 @@ D5 CC func_D5(D5 x) { return x; } // do some extra checking. // // ARM64-LABEL: define{{.*}} %struct.D5 @_Z7func_D52D5([3 x double] %x.coerce) -// ARM64: bitcast %struct.D5* %{{.*}} to [3 x double]* -// ARM64: store [3 x double] %x.coerce, [3 x double]* +// ARM64: store [3 x double] %x.coerce, ptr void call_D5(D5 *p) { func_D5(*p); @@ -78,8 +77,8 @@ void call_D5(D5 *p) { // Check the call site. // -// ARM64-LABEL: define{{.*}} void @_Z7call_D5P2D5(%struct.D5* noundef %p) -// ARM64: load [3 x double], [3 x double]* +// ARM64-LABEL: define{{.*}} void @_Z7call_D5P2D5(ptr noundef %p) +// ARM64: load [3 x double], ptr // ARM64: call %struct.D5 @_Z7func_D52D5([3 x double] %{{.*}}) struct Empty { }; @@ -132,45 +131,45 @@ struct HasEmptyBase : public Empty { double b[2]; }; struct HasPodBase : public Pod {}; -// WOA64-LABEL: define dso_local %"struct.pr47611::Pod" @"?copy@pr47611@@YA?AUPod@1@PEAU21@@Z"(%"struct.pr47611::Pod"* noundef %x) +// WOA64-LABEL: define dso_local %"struct.pr47611::Pod" @"?copy@pr47611@@YA?AUPod@1@PEAU21@@Z"(ptr noundef %x) Pod copy(Pod *x) { return *x; } // MSVC: ldp d0,d1,[x0], Clang: ldp d0,d1,[x0] -// WOA64-LABEL: define dso_local void @"?copy@pr47611@@YA?AUNotCXX14Aggregate@1@PEAU21@@Z"(%"struct.pr47611::NotCXX14Aggregate"* inreg noalias sret(%"struct.pr47611::NotCXX14Aggregate") align 8 %agg.result, %"struct.pr47611::NotCXX14Aggregate"* noundef %x) +// WOA64-LABEL: define dso_local void @"?copy@pr47611@@YA?AUNotCXX14Aggregate@1@PEAU21@@Z"(ptr inreg noalias sret(%"struct.pr47611::NotCXX14Aggregate") align 8 %agg.result, ptr noundef %x) NotCXX14Aggregate copy(NotCXX14Aggregate *x) { return *x; } // MSVC: stp x8,x9,[x0], Clang: str q0,[x0] -// WOA64-LABEL: define dso_local [2 x i64] @"?copy@pr47611@@YA?AUNotPod@1@PEAU21@@Z"(%"struct.pr47611::NotPod"* noundef %x) +// WOA64-LABEL: define dso_local [2 x i64] @"?copy@pr47611@@YA?AUNotPod@1@PEAU21@@Z"(ptr noundef %x) NotPod copy(NotPod *x) { return *x; } -// WOA64-LABEL: define dso_local void @"?copy@pr47611@@YA?AUHasEmptyBase@1@PEAU21@@Z"(%"struct.pr47611::HasEmptyBase"* inreg noalias sret(%"struct.pr47611::HasEmptyBase") align 8 %agg.result, %"struct.pr47611::HasEmptyBase"* noundef %x) +// WOA64-LABEL: define dso_local void @"?copy@pr47611@@YA?AUHasEmptyBase@1@PEAU21@@Z"(ptr inreg noalias sret(%"struct.pr47611::HasEmptyBase") align 8 %agg.result, ptr noundef %x) HasEmptyBase copy(HasEmptyBase *x) { return *x; } -// WOA64-LABEL: define dso_local void @"?copy@pr47611@@YA?AUHasPodBase@1@PEAU21@@Z"(%"struct.pr47611::HasPodBase"* inreg noalias sret(%"struct.pr47611::HasPodBase") align 8 %agg.result, %"struct.pr47611::HasPodBase"* noundef %x) +// WOA64-LABEL: define dso_local void @"?copy@pr47611@@YA?AUHasPodBase@1@PEAU21@@Z"(ptr inreg noalias sret(%"struct.pr47611::HasPodBase") align 8 %agg.result, ptr noundef %x) HasPodBase copy(HasPodBase *x) { return *x; } void call_copy_pod(Pod *pod) { *pod = copy(pod); // WOA64-LABEL: define dso_local void @"?call_copy_pod@pr47611@@YAXPEAUPod@1@@Z" - // WOA64: %{{.*}} = call %"struct.pr47611::Pod" @"?copy@pr47611@@YA?AUPod@1@PEAU21@@Z"(%"struct.pr47611::Pod"* noundef %{{.*}}) + // WOA64: %{{.*}} = call %"struct.pr47611::Pod" @"?copy@pr47611@@YA?AUPod@1@PEAU21@@Z"(ptr noundef %{{.*}}) } void call_copy_notcxx14aggregate(NotCXX14Aggregate *notcxx14aggregate) { *notcxx14aggregate = copy(notcxx14aggregate); // WOA64-LABEL: define dso_local void @"?call_copy_notcxx14aggregate@pr47611@@YAXPEAUNotCXX14Aggregate@1@@Z" - // WOA64: call void @"?copy@pr47611@@YA?AUNotCXX14Aggregate@1@PEAU21@@Z"(%"struct.pr47611::NotCXX14Aggregate"* inreg sret(%"struct.pr47611::NotCXX14Aggregate") align 8 %{{.*}}, %"struct.pr47611::NotCXX14Aggregate"* noundef %{{.*}}) + // WOA64: call void @"?copy@pr47611@@YA?AUNotCXX14Aggregate@1@PEAU21@@Z"(ptr inreg sret(%"struct.pr47611::NotCXX14Aggregate") align 8 %{{.*}}, ptr noundef %{{.*}}) } void call_copy_notpod(NotPod *notPod) { *notPod = copy(notPod); // WOA64-LABEL: define dso_local void @"?call_copy_notpod@pr47611@@YAXPEAUNotPod@1@@Z" - // WOA64: %{{.*}} = call [2 x i64] @"?copy@pr47611@@YA?AUNotPod@1@PEAU21@@Z"(%"struct.pr47611::NotPod"* noundef %{{.*}}) + // WOA64: %{{.*}} = call [2 x i64] @"?copy@pr47611@@YA?AUNotPod@1@PEAU21@@Z"(ptr noundef %{{.*}}) } void call_copy_hasemptybase(HasEmptyBase *hasEmptyBase) { *hasEmptyBase = copy(hasEmptyBase); // WOA64-LABEL: define dso_local void @"?call_copy_hasemptybase@pr47611@@YAXPEAUHasEmptyBase@1@@Z" - // WOA64: call void @"?copy@pr47611@@YA?AUHasEmptyBase@1@PEAU21@@Z"(%"struct.pr47611::HasEmptyBase"* inreg sret(%"struct.pr47611::HasEmptyBase") align 8 %{{.*}}, %"struct.pr47611::HasEmptyBase"* noundef %{{.*}}) + // WOA64: call void @"?copy@pr47611@@YA?AUHasEmptyBase@1@PEAU21@@Z"(ptr inreg sret(%"struct.pr47611::HasEmptyBase") align 8 %{{.*}}, ptr noundef %{{.*}}) } void call_copy_haspodbase(HasPodBase *hasPodBase) { *hasPodBase = copy(hasPodBase); // WOA64-LABEL: define dso_local void @"?call_copy_haspodbase@pr47611@@YAXPEAUHasPodBase@1@@Z" - // WOA64: call void @"?copy@pr47611@@YA?AUHasPodBase@1@PEAU21@@Z"(%"struct.pr47611::HasPodBase"* inreg sret(%"struct.pr47611::HasPodBase") align 8 %{{.*}}, %"struct.pr47611::HasPodBase"* noundef %{{.*}}) + // WOA64: call void @"?copy@pr47611@@YA?AUHasPodBase@1@PEAU21@@Z"(ptr inreg sret(%"struct.pr47611::HasPodBase") align 8 %{{.*}}, ptr noundef %{{.*}}) } } // namespace pr47611 @@ -202,7 +201,7 @@ struct NonHFA { virtual void f1(); }; double foo(NonHFA v) { return v.x + v.y; } -// WOA64: define dso_local noundef double @"?foo@polymorphic@@YANUNonHFA@1@@Z"(%"struct.polymorphic::NonHFA"* noundef %{{.*}}) +// WOA64: define dso_local noundef double @"?foo@polymorphic@@YANUNonHFA@1@@Z"(ptr noundef %{{.*}}) } namespace trivial_copy_assignment { struct HFA { @@ -222,7 +221,7 @@ struct NonHFA { NonHFA &operator=(const NonHFA&); }; double foo(NonHFA v) { return v.x + v.y; } -// WOA64: define dso_local noundef double @"?foo@non_trivial_copy_assignment@@YANUNonHFA@1@@Z"(%"struct.non_trivial_copy_assignment::NonHFA"* noundef %{{.*}}) +// WOA64: define dso_local noundef double @"?foo@non_trivial_copy_assignment@@YANUNonHFA@1@@Z"(ptr noundef %{{.*}}) } namespace user_provided_ctor { struct HFA { @@ -252,7 +251,7 @@ struct NonHFA { ~NonHFA(); }; double foo(NonHFA v) { return v.x + v.y; } -// WOA64: define dso_local noundef double @"?foo@non_trivial_dtor@@YANUNonHFA@1@@Z"(%"struct.non_trivial_dtor::NonHFA"* noundef %{{.*}}) +// WOA64: define dso_local noundef double @"?foo@non_trivial_dtor@@YANUNonHFA@1@@Z"(ptr noundef %{{.*}}) } namespace non_empty_base { struct non_empty_base { double d; }; @@ -273,7 +272,7 @@ struct NonHFA { empty e; }; double foo(NonHFA v) { return v.x + v.y; } -// WOA64: define dso_local noundef double @"?foo@empty_field@@YANUNonHFA@1@@Z"(%"struct.empty_field::NonHFA"* noundef %{{.*}}) +// WOA64: define dso_local noundef double @"?foo@empty_field@@YANUNonHFA@1@@Z"(ptr noundef %{{.*}}) } namespace non_empty_field { struct non_empty { double d; }; diff --git a/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp b/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp index 67daed2e9620..6e145c6d3697 100644 --- a/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp +++ b/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -no-opaque-pointers -std=c++11 -emit-llvm %s -o - -triple=i386-pc-win32 -mconstructor-aliases -fexceptions -fcxx-exceptions -fno-rtti | FileCheck -check-prefix WIN32 -check-prefix WIN32-O0 %s -// RUN: %clang_cc1 -no-opaque-pointers -std=c++11 -emit-llvm -O3 -disable-llvm-passes %s -o - -triple=i386-pc-win32 -mconstructor-aliases -fexceptions -fcxx-exceptions -fno-rtti | FileCheck -check-prefix WIN32 -check-prefix WIN32-O3 -check-prefix WIN32-LIFETIME %s +// RUN: %clang_cc1 -std=c++11 -emit-llvm %s -o - -triple=i386-pc-win32 -mconstructor-aliases -fexceptions -fcxx-exceptions -fno-rtti | FileCheck -check-prefix WIN32 -check-prefix WIN32-O0 %s +// RUN: %clang_cc1 -std=c++11 -emit-llvm -O3 -disable-llvm-passes %s -o - -triple=i386-pc-win32 -mconstructor-aliases -fexceptions -fcxx-exceptions -fno-rtti | FileCheck -check-prefix WIN32 -check-prefix WIN32-O3 -check-prefix WIN32-LIFETIME %s struct A { A(); @@ -16,11 +16,11 @@ void HasEHCleanup() { // With exceptions, we need to clean up at least one of these temporaries. // WIN32-LABEL: define dso_local void @"?HasEHCleanup@@YAXXZ"() {{.*}} { -// WIN32: %[[base:.*]] = call i8* @llvm.stacksave() +// WIN32: %[[base:.*]] = call ptr @llvm.stacksave() // If this call throws, we have to restore the stack. -// WIN32: call void @"?getA@@YA?AUA@@XZ"(%struct.A* sret(%struct.A) align 4 %{{.*}}) +// WIN32: call void @"?getA@@YA?AUA@@XZ"(ptr sret(%struct.A) align 4 %{{.*}}) // If this call throws, we have to cleanup the first temporary. -// WIN32: invoke void @"?getA@@YA?AUA@@XZ"(%struct.A* sret(%struct.A) align 4 %{{.*}}) +// WIN32: invoke void @"?getA@@YA?AUA@@XZ"(ptr sret(%struct.A) align 4 %{{.*}}) // If this call throws, we have to cleanup the stacksave. // WIN32: call noundef i32 @"?TakesTwo@@YAHUA@@0@Z" // WIN32: call void @llvm.stackrestore @@ -41,9 +41,9 @@ void HasEHCleanupNoexcept() noexcept { // With exceptions, we need to clean up at least one of these temporaries. // WIN32-LABEL: define dso_local void @"?HasEHCleanupNoexcept@@YAXXZ"() {{.*}} { -// WIN32: %[[base:.*]] = call i8* @llvm.stacksave() -// WIN32: invoke void @"?getA@@YA?AUA@@XZ"(%struct.A* sret(%struct.A) align 4 %{{.*}}) -// WIN32: invoke void @"?getA@@YA?AUA@@XZ"(%struct.A* sret(%struct.A) align 4 %{{.*}}) +// WIN32: %[[base:.*]] = call ptr @llvm.stacksave() +// WIN32: invoke void @"?getA@@YA?AUA@@XZ"(ptr sret(%struct.A) align 4 %{{.*}}) +// WIN32: invoke void @"?getA@@YA?AUA@@XZ"(ptr sret(%struct.A) align 4 %{{.*}}) // WIN32: invoke noundef i32 @"?TakesTwo@@YAHUA@@0@Z" // WIN32: call void @llvm.stackrestore // WIN32: ret void @@ -61,31 +61,31 @@ int HasDeactivatedCleanups() { // WIN32-LABEL: define dso_local noundef i32 @"?HasDeactivatedCleanups@@YAHXZ"() {{.*}} { // WIN32: %[[isactive:.*]] = alloca i1 -// WIN32: call i8* @llvm.stacksave() +// WIN32: call ptr @llvm.stacksave() // WIN32: %[[argmem:.*]] = alloca inalloca [[argmem_ty:<{ %struct.A, %struct.A }>]] -// WIN32: %[[arg1:.*]] = getelementptr inbounds [[argmem_ty]], [[argmem_ty]]* %[[argmem]], i32 0, i32 1 -// WIN32: call x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ" +// WIN32: %[[arg1:.*]] = getelementptr inbounds [[argmem_ty]], ptr %[[argmem]], i32 0, i32 1 +// WIN32: call x86_thiscallcc noundef ptr @"??0A@@QAE@XZ" // WIN32: invoke void @"?TakeRef@@YAXABUA@@@Z" // -// WIN32: invoke x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ"(%struct.A* {{[^,]*}} %[[arg1]]) -// WIN32: store i1 true, i1* %[[isactive]] +// WIN32: invoke x86_thiscallcc noundef ptr @"??0A@@QAE@XZ"(ptr {{[^,]*}} %[[arg1]]) +// WIN32: store i1 true, ptr %[[isactive]] // -// WIN32: %[[arg0:.*]] = getelementptr inbounds [[argmem_ty]], [[argmem_ty]]* %[[argmem]], i32 0, i32 0 -// WIN32: invoke x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ" +// WIN32: %[[arg0:.*]] = getelementptr inbounds [[argmem_ty]], ptr %[[argmem]], i32 0, i32 0 +// WIN32: invoke x86_thiscallcc noundef ptr @"??0A@@QAE@XZ" // WIN32: invoke void @"?TakeRef@@YAXABUA@@@Z" -// WIN32: invoke x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ" -// WIN32: store i1 false, i1* %[[isactive]] +// WIN32: invoke x86_thiscallcc noundef ptr @"??0A@@QAE@XZ" +// WIN32: store i1 false, ptr %[[isactive]] // -// WIN32: invoke noundef i32 @"?TakesTwo@@YAHUA@@0@Z"([[argmem_ty]]* inalloca([[argmem_ty]]) %[[argmem]]) +// WIN32: invoke noundef i32 @"?TakesTwo@@YAHUA@@0@Z"(ptr inalloca([[argmem_ty]]) %[[argmem]]) // Destroy the two const ref temporaries. // WIN32: call x86_thiscallcc void @"??1A@@QAE@XZ"({{.*}}) // WIN32: call x86_thiscallcc void @"??1A@@QAE@XZ"({{.*}}) // WIN32: ret i32 // // Conditionally destroy arg1. -// WIN32: %[[cond:.*]] = load i1, i1* %[[isactive]] +// WIN32: %[[cond:.*]] = load i1, ptr %[[isactive]] // WIN32: br i1 %[[cond]] -// WIN32: call x86_thiscallcc void @"??1A@@QAE@XZ"(%struct.A* {{[^,]*}} %[[arg1]]) +// WIN32: call x86_thiscallcc void @"??1A@@QAE@XZ"(ptr {{[^,]*}} %[[arg1]]) // WIN32: } // Test putting the cleanups inside a conditional. @@ -97,10 +97,10 @@ int HasConditionalCleanup(bool cond) { // WIN32-LABEL: define dso_local noundef i32 @"?HasConditionalCleanup@@YAH_N@Z"(i1 noundef zeroext %{{.*}}) {{.*}} { // WIN32: store i1 false // WIN32: br i1 -// WIN32: call i8* @llvm.stacksave() -// WIN32: call x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ"(%struct.A* {{[^,]*}} %{{.*}}) +// WIN32: call ptr @llvm.stacksave() +// WIN32: call x86_thiscallcc noundef ptr @"??0A@@QAE@XZ"(ptr {{[^,]*}} %{{.*}}) // WIN32: store i1 true -// WIN32: invoke x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ"(%struct.A* {{[^,]*}} %{{.*}}) +// WIN32: invoke x86_thiscallcc noundef ptr @"??0A@@QAE@XZ"(ptr {{[^,]*}} %{{.*}}) // WIN32: call noundef i32 @"?TakesTwo@@YAHUA@@0@Z" // // WIN32: call void @llvm.stackrestore @@ -127,17 +127,17 @@ int HasConditionalDeactivatedCleanups(bool cond) { // WIN32-O0: store i1 false // WIN32-O0: br i1 // True condition. -// WIN32-O0: call x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ" +// WIN32-O0: call x86_thiscallcc noundef ptr @"??0A@@QAE@XZ" // WIN32-O0: store i1 true // WIN32-O0: invoke void @"?TakeRef@@YAXABUA@@@Z" -// WIN32-O0: invoke x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ" -// WIN32-O0: store i1 true, i1* %[[arg1_cond]] -// WIN32-O0: invoke x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ" +// WIN32-O0: invoke x86_thiscallcc noundef ptr @"??0A@@QAE@XZ" +// WIN32-O0: store i1 true, ptr %[[arg1_cond]] +// WIN32-O0: invoke x86_thiscallcc noundef ptr @"??0A@@QAE@XZ" // WIN32-O0: store i1 true // WIN32-O0: invoke void @"?TakeRef@@YAXABUA@@@Z" -// WIN32-O0: invoke x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ" +// WIN32-O0: invoke x86_thiscallcc noundef ptr @"??0A@@QAE@XZ" // WIN32-O0: store i1 true -// WIN32-O0: store i1 false, i1* %[[arg1_cond]] +// WIN32-O0: store i1 false, ptr %[[arg1_cond]] // WIN32-O0: invoke noundef i32 @"?TakesTwo@@YAHUA@@0@Z" // False condition. // WIN32-O0: invoke noundef i32 @"?CouldThrow@@YAHXZ"() @@ -147,7 +147,7 @@ int HasConditionalDeactivatedCleanups(bool cond) { // WIN32-O0: ret i32 // // Somewhere in the landing pad soup, we conditionally destroy arg1. -// WIN32-O0: %[[isactive:.*]] = load i1, i1* %[[arg1_cond]] +// WIN32-O0: %[[isactive:.*]] = load i1, ptr %[[arg1_cond]] // WIN32-O0: br i1 %[[isactive]] // WIN32-O0: call x86_thiscallcc void @"??1A@@QAE@XZ"({{.*}}) // WIN32-O0: } @@ -165,17 +165,17 @@ int HasConditionalDeactivatedCleanups(bool cond) { // WIN32-O3: store i1 false // WIN32-O3: br i1 // True condition. -// WIN32-O3: call x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ" +// WIN32-O3: call x86_thiscallcc noundef ptr @"??0A@@QAE@XZ" // WIN32-O3: store i1 true // WIN32-O3: invoke void @"?TakeRef@@YAXABUA@@@Z" -// WIN32-O3: invoke x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ" -// WIN32-O3: store i1 true, i1* %[[arg1_cond]] -// WIN32-O3: invoke x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ" +// WIN32-O3: invoke x86_thiscallcc noundef ptr @"??0A@@QAE@XZ" +// WIN32-O3: store i1 true, ptr %[[arg1_cond]] +// WIN32-O3: invoke x86_thiscallcc noundef ptr @"??0A@@QAE@XZ" // WIN32-O3: store i1 true // WIN32-O3: invoke void @"?TakeRef@@YAXABUA@@@Z" -// WIN32-O3: invoke x86_thiscallcc noundef %struct.A* @"??0A@@QAE@XZ" +// WIN32-O3: invoke x86_thiscallcc noundef ptr @"??0A@@QAE@XZ" // WIN32-O3: store i1 true -// WIN32-O3: store i1 false, i1* %[[arg1_cond]] +// WIN32-O3: store i1 false, ptr %[[arg1_cond]] // WIN32-O3: invoke noundef i32 @"?TakesTwo@@YAHUA@@0@Z" // False condition. // WIN32-O3: invoke noundef i32 @"?CouldThrow@@YAHXZ"() @@ -185,7 +185,7 @@ int HasConditionalDeactivatedCleanups(bool cond) { // WIN32-O3: ret i32 // // Somewhere in the landing pad soup, we conditionally destroy arg1. -// WIN32-O3: %[[isactive:.*]] = load i1, i1* %[[arg1_cond]] +// WIN32-O3: %[[isactive:.*]] = load i1, ptr %[[arg1_cond]] // WIN32-O3: br i1 %[[isactive]] // WIN32-O3: call x86_thiscallcc void @"??1A@@QAE@XZ"({{.*}}) // WIN32-O3: } @@ -214,17 +214,14 @@ C::C() { foo(); } // // We shouldn't do any vbptr loads, just constant GEPs. // WIN32-NOT: load -// WIN32: getelementptr i8, i8* %{{.*}}, i32 4 +// WIN32: getelementptr i8, ptr %{{.*}}, i32 4 // WIN32-NOT: load -// WIN32: bitcast i8* %{{.*}} to %"struct.crash_on_partial_destroy::B"* // WIN32: call x86_thiscallcc void @"??1B@crash_on_partial_destroy@@UAE@XZ" // // WIN32-NOT: load -// WIN32: bitcast %"struct.crash_on_partial_destroy::C"* %{{.*}} to i8* // WIN32-NOT: load -// WIN32: getelementptr inbounds i8, i8* %{{.*}}, i32 4 +// WIN32: getelementptr inbounds i8, ptr %{{.*}}, i32 4 // WIN32-NOT: load -// WIN32: bitcast i8* %{{.*}} to %"struct.crash_on_partial_destroy::A"* // WIN32: call x86_thiscallcc void @"??1A@crash_on_partial_destroy@@UAE@XZ"({{.*}}) // WIN32: } } @@ -263,9 +260,9 @@ void f() { // WIN32-LABEL: define dso_local void @"?f@noexcept_false_dtor@@YAXXZ"() // WIN32: invoke noundef i32 @"?CouldThrow@@YAHXZ"() -// WIN32: call x86_thiscallcc void @"??1D@noexcept_false_dtor@@QAE@XZ"(%"struct.noexcept_false_dtor::D"* {{[^,]*}} %{{.*}}) +// WIN32: call x86_thiscallcc void @"??1D@noexcept_false_dtor@@QAE@XZ"(ptr {{[^,]*}} %{{.*}}) // WIN32: cleanuppad -// WIN32: call x86_thiscallcc void @"??1D@noexcept_false_dtor@@QAE@XZ"(%"struct.noexcept_false_dtor::D"* {{[^,]*}} %{{.*}}) +// WIN32: call x86_thiscallcc void @"??1D@noexcept_false_dtor@@QAE@XZ"(ptr {{[^,]*}} %{{.*}}) // WIN32: cleanupret namespace lifetime_marker { @@ -280,15 +277,13 @@ void f() { // WIN32-LIFETIME-LABEL: define dso_local void @"?f@lifetime_marker@@YAXXZ"() // WIN32-LIFETIME: %[[c:.*]] = alloca %"struct.lifetime_marker::C" -// WIN32-LIFETIME: %[[bc0:.*]] = bitcast %"struct.lifetime_marker::C"* %c to i8* -// WIN32-LIFETIME: call void @llvm.lifetime.start.p0i8(i64 1, i8* %[[bc0]]) +// WIN32-LIFETIME: call void @llvm.lifetime.start.p0(i64 1, ptr %c) // WIN32-LIFETIME: invoke void @"?g@lifetime_marker@@YAXXZ"() // WIN32-LIFETIME-NEXT: to label %[[cont:[^ ]*]] unwind label %[[lpad0:[^ ]*]] // // WIN32-LIFETIME: [[cont]] // WIN32-LIFETIME: call x86_thiscallcc void @"??1C@lifetime_marker@@QAE@XZ"({{.*}}) -// WIN32-LIFETIME: %[[bc1:.*]] = bitcast %"struct.lifetime_marker::C"* %[[c]] to i8* -// WIN32-LIFETIME: call void @llvm.lifetime.end.p0i8(i64 1, i8* %[[bc1]]) +// WIN32-LIFETIME: call void @llvm.lifetime.end.p0(i64 1, ptr %[[c]]) // // WIN32-LIFETIME: [[lpad0]] // WIN32-LIFETIME-NEXT: cleanuppad @@ -297,8 +292,7 @@ void f() { // // WIN32-LIFETIME: [[lpad1]] // WIN32-LIFETIME-NEXT: cleanuppad -// WIN32-LIFETIME: %[[bc2:.*]] = bitcast %"struct.lifetime_marker::C"* %[[c]] to i8* -// WIN32-LIFETIME: call void @llvm.lifetime.end.p0i8(i64 1, i8* %[[bc2]]) +// WIN32-LIFETIME: call void @llvm.lifetime.end.p0(i64 1, ptr %[[c]]) } struct class_2 { @@ -315,9 +309,9 @@ struct class_0 : class_1 { }; class_0::class_0() { - // WIN32: define dso_local x86_thiscallcc noundef %struct.class_0* @"??0class_0@@QAE@XZ"(%struct.class_0* {{[^,]*}} returned align 4 dereferenceable(4) %this, i32 noundef %is_most_derived) - // WIN32: store i32 %is_most_derived, i32* %[[IS_MOST_DERIVED_VAR:.*]], align 4 - // WIN32: %[[IS_MOST_DERIVED_VAL:.*]] = load i32, i32* %[[IS_MOST_DERIVED_VAR]] + // WIN32: define dso_local x86_thiscallcc noundef ptr @"??0class_0@@QAE@XZ"(ptr {{[^,]*}} returned align 4 dereferenceable(4) %this, i32 noundef %is_most_derived) + // WIN32: store i32 %is_most_derived, ptr %[[IS_MOST_DERIVED_VAR:.*]], align 4 + // WIN32: %[[IS_MOST_DERIVED_VAL:.*]] = load i32, ptr %[[IS_MOST_DERIVED_VAR]] // WIN32: %[[SHOULD_CALL_VBASE_CTORS:.*]] = icmp ne i32 %[[IS_MOST_DERIVED_VAL]], 0 // WIN32: br i1 %[[SHOULD_CALL_VBASE_CTORS]], label %[[INIT_VBASES:.*]], label %[[SKIP_VBASES:.*]] // WIN32: [[INIT_VBASES]] @@ -325,9 +319,7 @@ class_0::class_0() { // WIN32: [[SKIP_VBASES]] // ehcleanup: // WIN32: %[[CLEANUPPAD:.*]] = cleanuppad within none [] - // WIN32-NEXT: bitcast %{{.*}}* %{{.*}} to i8* - // WIN32-NEXT: getelementptr inbounds i8, i8* %{{.*}}, i{{.*}} {{.}} - // WIN32-NEXT: bitcast i8* %{{.*}} to %{{.*}}* + // WIN32-NEXT: getelementptr inbounds i8, ptr %{{.*}}, i{{.*}} {{.}} // WIN32-NEXT: %[[SHOULD_CALL_VBASE_DTOR:.*]] = icmp ne i32 %[[IS_MOST_DERIVED_VAL]], 0 // WIN32-NEXT: br i1 %[[SHOULD_CALL_VBASE_DTOR]], label %[[DTOR_VBASE:.*]], label %[[SKIP_VBASE:.*]] // WIN32: [[DTOR_VBASE]] @@ -341,7 +333,7 @@ namespace PR37146 { // non-trival C structs. // WIN32: define dso_local void @"?test@PR37146@@YAXXZ"() -// WIN32: call void @llvm.memset.p0i8.i32( +// WIN32: call void @llvm.memset.p0.i32( // WIN32: call i32 @"?getS@PR37146@@YA?AUS@1@XZ"( // WIN32: call void @"?func@PR37146@@YAXUS@1@0@Z"( // WIN32-NEXT: ret void diff --git a/clang/test/CodeGenCoroutines/coro-unhandled-exception-exp-namespace.cpp b/clang/test/CodeGenCoroutines/coro-unhandled-exception-exp-namespace.cpp index 0bce7b1720da..2c260b1cbb89 100644 --- a/clang/test/CodeGenCoroutines/coro-unhandled-exception-exp-namespace.cpp +++ b/clang/test/CodeGenCoroutines/coro-unhandled-exception-exp-namespace.cpp @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -no-opaque-pointers -std=c++14 -fcoroutines-ts -triple=x86_64-pc-windows-msvc18.0.0 -emit-llvm %s -o - -fexceptions -fcxx-exceptions -disable-llvm-passes | FileCheck %s -// RUN: %clang_cc1 -no-opaque-pointers -std=c++14 -fcoroutines-ts -triple=x86_64-unknown-linux-gnu -emit-llvm -o - %s -fexceptions -fcxx-exceptions -disable-llvm-passes | FileCheck --check-prefix=CHECK-LPAD %s +// RUN: %clang_cc1 -std=c++14 -fcoroutines-ts -triple=x86_64-pc-windows-msvc18.0.0 -emit-llvm %s -o - -fexceptions -fcxx-exceptions -disable-llvm-passes | FileCheck %s +// RUN: %clang_cc1 -std=c++14 -fcoroutines-ts -triple=x86_64-unknown-linux-gnu -emit-llvm -o - %s -fexceptions -fcxx-exceptions -disable-llvm-passes | FileCheck --check-prefix=CHECK-LPAD %s #include "Inputs/coroutine-exp-namespace.h" @@ -52,20 +52,19 @@ coro_t f() { // CHECK: [[TRYCONT]]: // CHECK-NEXT: br label %[[COROFIN:.+]] // CHECK: [[COROFIN]]: -// CHECK-NEXT: bitcast %"struct.std::experimental::coroutines_v1::suspend_never"* %{{.+}} to i8* -// CHECK-NEXT: call void @llvm.lifetime.start.p0i8( +// CHECK-NEXT: call void @llvm.lifetime.start.p0( // CHECK-NEXT: call void @"?final_suspend@promise_type@coro_t@@QEAA?AUsuspend_never@coroutines_v1@experimental@std@@XZ"( // CHECK-LPAD: @_Z1fv( // CHECK-LPAD: invoke void @_Z9may_throwv() // CHECK-LPAD: to label %[[CONT:.+]] unwind label %[[CLEANUP:.+]] // CHECK-LPAD: [[CLEANUP]]: -// CHECK-LPAD: call void @_ZN7CleanupD1Ev(%struct.Cleanup* {{[^,]*}} %x) #2 +// CHECK-LPAD: call void @_ZN7CleanupD1Ev(ptr {{[^,]*}} %x) #2 // CHECK-LPAD: br label %[[CATCH:.+]] // CHECK-LPAD: [[CATCH]]: -// CHECK-LPAD: call i8* @__cxa_begin_catch -// CHECK-LPAD: call void @_ZN6coro_t12promise_type19unhandled_exceptionEv(%"struct.coro_t::promise_type"* {{[^,]*}} %__promise) #2 +// CHECK-LPAD: call ptr @__cxa_begin_catch +// CHECK-LPAD: call void @_ZN6coro_t12promise_type19unhandled_exceptionEv(ptr {{[^,]*}} %__promise) #2 // CHECK-LPAD: invoke void @__cxa_end_catch() // CHECK-LPAD-NEXT: to label %[[CATCHRETDEST:.+]] unwind label // CHECK-LPAD: [[CATCHRETDEST]]: @@ -73,6 +72,5 @@ coro_t f() { // CHECK-LPAD: [[TRYCONT]]: // CHECK-LPAD: br label %[[COROFIN:.+]] // CHECK-LPAD: [[COROFIN]]: -// CHECK-LPAD-NEXT: bitcast %"struct.std::experimental::coroutines_v1::suspend_never"* %{{.+}} to i8* -// CHECK-LPAD-NEXT: call void @llvm.lifetime.start.p0i8( +// CHECK-LPAD-NEXT: call void @llvm.lifetime.start.p0( // CHECK-LPAD-NEXT: call void @_ZN6coro_t12promise_type13final_suspendEv( diff --git a/clang/test/CodeGenCoroutines/coro-unhandled-exception.cpp b/clang/test/CodeGenCoroutines/coro-unhandled-exception.cpp index 0d253d9045c2..97ef983b7523 100644 --- a/clang/test/CodeGenCoroutines/coro-unhandled-exception.cpp +++ b/clang/test/CodeGenCoroutines/coro-unhandled-exception.cpp @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -no-opaque-pointers -std=c++20 -triple=x86_64-pc-windows-msvc18.0.0 -emit-llvm %s -o - -fexceptions -fcxx-exceptions -disable-llvm-passes | FileCheck %s -// RUN: %clang_cc1 -no-opaque-pointers -std=c++20 -triple=x86_64-unknown-linux-gnu -emit-llvm -o - %s -fexceptions -fcxx-exceptions -disable-llvm-passes | FileCheck --check-prefix=CHECK-LPAD %s +// RUN: %clang_cc1 -std=c++20 -triple=x86_64-pc-windows-msvc18.0.0 -emit-llvm %s -o - -fexceptions -fcxx-exceptions -disable-llvm-passes | FileCheck %s +// RUN: %clang_cc1 -std=c++20 -triple=x86_64-unknown-linux-gnu -emit-llvm -o - %s -fexceptions -fcxx-exceptions -disable-llvm-passes | FileCheck --check-prefix=CHECK-LPAD %s #include "Inputs/coroutine.h" @@ -48,20 +48,19 @@ coro_t f() { // CHECK: [[TRYCONT]]: // CHECK-NEXT: br label %[[COROFIN:.+]] // CHECK: [[COROFIN]]: -// CHECK-NEXT: bitcast %"struct.std::suspend_never"* %{{.+}} to i8* -// CHECK-NEXT: call void @llvm.lifetime.start.p0i8( +// CHECK-NEXT: call void @llvm.lifetime.start.p0( // CHECK-NEXT: call void @"?final_suspend@promise_type@coro_t@@QEAA?AUsuspend_never@std@@XZ"( // CHECK-LPAD: @_Z1fv( // CHECK-LPAD: invoke void @_Z9may_throwv() // CHECK-LPAD: to label %[[CONT:.+]] unwind label %[[CLEANUP:.+]] // CHECK-LPAD: [[CLEANUP]]: -// CHECK-LPAD: call void @_ZN7CleanupD1Ev(%struct.Cleanup* {{[^,]*}} %x) #2 +// CHECK-LPAD: call void @_ZN7CleanupD1Ev(ptr {{[^,]*}} %x) #2 // CHECK-LPAD: br label %[[CATCH:.+]] // CHECK-LPAD: [[CATCH]]: -// CHECK-LPAD: call i8* @__cxa_begin_catch -// CHECK-LPAD: call void @_ZN6coro_t12promise_type19unhandled_exceptionEv(%"struct.coro_t::promise_type"* {{[^,]*}} %__promise) #2 +// CHECK-LPAD: call ptr @__cxa_begin_catch +// CHECK-LPAD: call void @_ZN6coro_t12promise_type19unhandled_exceptionEv(ptr {{[^,]*}} %__promise) #2 // CHECK-LPAD: invoke void @__cxa_end_catch() // CHECK-LPAD-NEXT: to label %[[CATCHRETDEST:.+]] unwind label // CHECK-LPAD: [[CATCHRETDEST]]: @@ -69,6 +68,5 @@ coro_t f() { // CHECK-LPAD: [[TRYCONT]]: // CHECK-LPAD: br label %[[COROFIN:.+]] // CHECK-LPAD: [[COROFIN]]: -// CHECK-LPAD-NEXT: bitcast %"struct.std::suspend_never"* %{{.+}} to i8* -// CHECK-LPAD-NEXT: call void @llvm.lifetime.start.p0i8( +// CHECK-LPAD-NEXT: call void @llvm.lifetime.start.p0( // CHECK-LPAD-NEXT: call void @_ZN6coro_t12promise_type13final_suspendEv( diff --git a/clang/test/CodeGenObjC/arc-unsafeclaim.m b/clang/test/CodeGenObjC/arc-unsafeclaim.m index 190d2a1a7e2b..9c2b215df276 100644 --- a/clang/test/CodeGenObjC/arc-unsafeclaim.m +++ b/clang/test/CodeGenObjC/arc-unsafeclaim.m @@ -1,21 +1,21 @@ // Make sure it works on x86-64. -// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-darwin11 -fobjc-runtime=macosx-10.11 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNOPTIMIZED -check-prefix=NOTAIL-CALL +// RUN: %clang_cc1 -triple x86_64-apple-darwin11 -fobjc-runtime=macosx-10.11 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNOPTIMIZED -check-prefix=NOTAIL-CALL -// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-darwin11 -fobjc-runtime=macosx-10.11 -fobjc-arc -emit-llvm -O2 -disable-llvm-passes -o - %s | FileCheck %s -check-prefix=ATTACHED-CALL +// RUN: %clang_cc1 -triple x86_64-apple-darwin11 -fobjc-runtime=macosx-10.11 -fobjc-arc -emit-llvm -O2 -disable-llvm-passes -o - %s | FileCheck %s -check-prefix=ATTACHED-CALL // Make sure it works on x86-32. -// RUN: %clang_cc1 -no-opaque-pointers -triple i386-apple-darwin11 -fobjc-runtime=macosx-fragile-10.11 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNOPTIMIZED -check-prefix=CHECK-MARKED -check-prefix=CALL +// RUN: %clang_cc1 -triple i386-apple-darwin11 -fobjc-runtime=macosx-fragile-10.11 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNOPTIMIZED -check-prefix=CHECK-MARKED -check-prefix=CALL // Make sure it works on ARM64. -// RUN: %clang_cc1 -no-opaque-pointers -triple arm64-apple-ios9 -fobjc-runtime=ios-9.0 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNOPTIMIZED -check-prefix=CHECK-MARKED -check-prefix=CALL +// RUN: %clang_cc1 -triple arm64-apple-ios9 -fobjc-runtime=ios-9.0 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNOPTIMIZED -check-prefix=CHECK-MARKED -check-prefix=CALL // Make sure it works on ARM. -// RUN: %clang_cc1 -no-opaque-pointers -triple armv7-apple-ios9 -fobjc-runtime=ios-9.0 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNOPTIMIZED -check-prefix=CHECK-MARKED -check-prefix=CALL -// RUN: %clang_cc1 -no-opaque-pointers -triple armv7-apple-ios9 -fobjc-runtime=ios-9.0 -fobjc-arc -O -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-OPTIMIZED -check-prefix=CALL +// RUN: %clang_cc1 -triple armv7-apple-ios9 -fobjc-runtime=ios-9.0 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNOPTIMIZED -check-prefix=CHECK-MARKED -check-prefix=CALL +// RUN: %clang_cc1 -triple armv7-apple-ios9 -fobjc-runtime=ios-9.0 -fobjc-arc -O -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-OPTIMIZED -check-prefix=CALL // Make sure that it's implicitly disabled if the runtime version isn't high enough. -// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-darwin10 -fobjc-runtime=macosx-10.10 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=DISABLED -// RUN: %clang_cc1 -no-opaque-pointers -triple arm64-apple-ios8 -fobjc-runtime=ios-8 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=DISABLED -check-prefix=DISABLED-MARKED +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fobjc-runtime=macosx-10.10 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=DISABLED +// RUN: %clang_cc1 -triple arm64-apple-ios8 -fobjc-runtime=ios-8 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=DISABLED -check-prefix=DISABLED-MARKED @class A; @@ -26,54 +26,44 @@ void test_assign(void) { x = makeA(); } // CHECK-LABEL: define{{.*}} void @test_assign() -// CHECK: [[X:%.*]] = alloca i8* -// CHECK: [[T0:%.*]] = call [[A:.*]]* @makeA() +// CHECK: [[X:%.*]] = alloca ptr +// CHECK: [[T0:%.*]] = call ptr @makeA() // CHECK-MARKED-NEXT: call void asm sideeffect -// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* -// NOTAIL-CALL-NEXT: [[T2:%.*]] = notail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* [[T1]]) -// CALL-NEXT: [[T2:%.*]] = call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* [[T1]]) -// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[A]]* -// CHECK-NEXT: [[T4:%.*]] = bitcast [[A]]* [[T3]] to i8* -// CHECK-NEXT: store i8* [[T4]], i8** [[X]] -// CHECK-OPTIMIZED-NEXT: bitcast +// NOTAIL-CALL-NEXT: [[T2:%.*]] = notail call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr [[T0]]) +// CALL-NEXT: [[T2:%.*]] = call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr [[T0]]) +// CHECK-NEXT: store ptr [[T2]], ptr [[X]] // CHECK-OPTIMIZED-NEXT: lifetime.end // CHECK-NEXT: ret void // DISABLED-LABEL: define{{.*}} void @test_assign() -// DISABLED: [[T0:%.*]] = call [[A:.*]]* @makeA() +// DISABLED: [[T0:%.*]] = call ptr @makeA() // DISABLED-MARKED-NEXT: call void asm sideeffect -// DISABLED-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* -// DISABLED-NEXT: [[T2:%.*]] = {{.*}}call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* [[T1]]) +// DISABLED-NEXT: [[T2:%.*]] = {{.*}}call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr [[T0]]) // ATTACHED-CALL-LABEL: define{{.*}} void @test_assign() -// ATTACHED-CALL: [[T0:%.*]] = call [[A:.*]]* @makeA() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.unsafeClaimAutoreleasedReturnValue) ], -// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use([[A]]* [[T0]]) +// ATTACHED-CALL: [[T0:%.*]] = call ptr @makeA() [ "clang.arc.attachedcall"(ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue) ], +// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]]) void test_assign_assign(void) { __unsafe_unretained id x, y; x = y = makeA(); } // CHECK-LABEL: define{{.*}} void @test_assign_assign() -// CHECK: [[X:%.*]] = alloca i8* -// CHECK: [[Y:%.*]] = alloca i8* -// CHECK: [[T0:%.*]] = call [[A]]* @makeA() +// CHECK: [[X:%.*]] = alloca ptr +// CHECK: [[Y:%.*]] = alloca ptr +// CHECK: [[T0:%.*]] = call ptr @makeA() // CHECK-MARKED-NEXT: call void asm sideeffect -// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* -// NOTAIL-CALL-NEXT: [[T2:%.*]] = notail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* [[T1]]) -// CALL-NEXT: [[T2:%.*]] = call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* [[T1]]) -// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[A]]* -// CHECK-NEXT: [[T4:%.*]] = bitcast [[A]]* [[T3]] to i8* -// CHECK-NEXT: store i8* [[T4]], i8** [[Y]] -// CHECK-NEXT: store i8* [[T4]], i8** [[X]] -// CHECK-OPTIMIZED-NEXT: bitcast +// NOTAIL-CALL-NEXT: [[T2:%.*]] = notail call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr [[T0]]) +// CALL-NEXT: [[T2:%.*]] = call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr [[T0]]) +// CHECK-NEXT: store ptr [[T2]], ptr [[Y]] +// CHECK-NEXT: store ptr [[T2]], ptr [[X]] // CHECK-OPTIMIZED-NEXT: lifetime.end -// CHECK-OPTIMIZED-NEXT: bitcast // CHECK-OPTIMIZED-NEXT: lifetime.end // CHECK-NEXT: ret void // ATTACHED-CALL-LABEL: define{{.*}} void @test_assign_assign() -// ATTACHED-CALL: [[T0:%.*]] = call [[A:.*]]* @makeA() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.unsafeClaimAutoreleasedReturnValue) ], -// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use([[A]]* [[T0]]) +// ATTACHED-CALL: [[T0:%.*]] = call ptr @makeA() [ "clang.arc.attachedcall"(ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue) ], +// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]]) void test_strong_assign_assign(void) { __strong id x; @@ -81,30 +71,25 @@ void test_strong_assign_assign(void) { x = y = makeA(); } // CHECK-LABEL: define{{.*}} void @test_strong_assign_assign() -// CHECK: [[X:%.*]] = alloca i8* -// CHECK: [[Y:%.*]] = alloca i8* -// CHECK: [[T0:%.*]] = call [[A]]* @makeA() +// CHECK: [[X:%.*]] = alloca ptr +// CHECK: [[Y:%.*]] = alloca ptr +// CHECK: [[T0:%.*]] = call ptr @makeA() // CHECK-MARKED-NEXT: call void asm sideeffect -// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* -// CHECK-NEXT: [[T2:%.*]] = {{.*}}call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* [[T1]]) -// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[A]]* -// CHECK-NEXT: [[T4:%.*]] = bitcast [[A]]* [[T3]] to i8* -// CHECK-NEXT: store i8* [[T4]], i8** [[Y]] -// CHECK-NEXT: [[OLD:%.*]] = load i8*, i8** [[X]] -// CHECK-NEXT: store i8* [[T4]], i8** [[X]] -// CHECK-NEXT: call void @llvm.objc.release(i8* [[OLD]] -// CHECK-OPTIMIZED-NEXT: bitcast +// CHECK-NEXT: [[T2:%.*]] = {{.*}}call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr [[T0]]) +// CHECK-NEXT: store ptr [[T2]], ptr [[Y]] +// CHECK-NEXT: [[OLD:%.*]] = load ptr, ptr [[X]] +// CHECK-NEXT: store ptr [[T2]], ptr [[X]] +// CHECK-NEXT: call void @llvm.objc.release(ptr [[OLD]] // CHECK-OPTIMIZED-NEXT: lifetime.end -// CHECK-UNOPTIMIZED-NEXT: call void @llvm.objc.storeStrong(i8** [[X]], i8* null) -// CHECK-OPTIMIZED-NEXT: [[T0:%.*]] = load i8*, i8** [[X]] -// CHECK-OPTIMIZED-NEXT: call void @llvm.objc.release(i8* [[T0]]) -// CHECK-OPTIMIZED-NEXT: bitcast +// CHECK-UNOPTIMIZED-NEXT: call void @llvm.objc.storeStrong(ptr [[X]], ptr null) +// CHECK-OPTIMIZED-NEXT: [[T0:%.*]] = load ptr, ptr [[X]] +// CHECK-OPTIMIZED-NEXT: call void @llvm.objc.release(ptr [[T0]]) // CHECK-OPTIMIZED-NEXT: lifetime.end // CHECK-NEXT: ret void // ATTACHED-CALL-LABEL: define{{.*}} void @test_strong_assign_assign() -// ATTACHED-CALL: [[T0:%.*]] = call [[A:.*]]* @makeA() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ], -// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use([[A]]* [[T0]]) +// ATTACHED-CALL: [[T0:%.*]] = call ptr @makeA() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ], +// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]]) void test_assign_strong_assign(void) { __unsafe_unretained id x; @@ -112,167 +97,139 @@ void test_assign_strong_assign(void) { x = y = makeA(); } // CHECK-LABEL: define{{.*}} void @test_assign_strong_assign() -// CHECK: [[X:%.*]] = alloca i8* -// CHECK: [[Y:%.*]] = alloca i8* -// CHECK: [[T0:%.*]] = call [[A]]* @makeA() +// CHECK: [[X:%.*]] = alloca ptr +// CHECK: [[Y:%.*]] = alloca ptr +// CHECK: [[T0:%.*]] = call ptr @makeA() // CHECK-MARKED-NEXT: call void asm sideeffect -// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* -// CHECK-NEXT: [[T2:%.*]] = {{.*}}call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* [[T1]]) -// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[A]]* -// CHECK-NEXT: [[T4:%.*]] = bitcast [[A]]* [[T3]] to i8* -// CHECK-NEXT: [[OLD:%.*]] = load i8*, i8** [[Y]] -// CHECK-NEXT: store i8* [[T4]], i8** [[Y]] -// CHECK-NEXT: call void @llvm.objc.release(i8* [[OLD]] -// CHECK-NEXT: store i8* [[T4]], i8** [[X]] -// CHECK-UNOPTIMIZED-NEXT: call void @llvm.objc.storeStrong(i8** [[Y]], i8* null) -// CHECK-OPTIMIZED-NEXT: [[T0:%.*]] = load i8*, i8** [[Y]] -// CHECK-OPTIMIZED-NEXT: call void @llvm.objc.release(i8* [[T0]]) -// CHECK-OPTIMIZED-NEXT: bitcast +// CHECK-NEXT: [[T2:%.*]] = {{.*}}call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr [[T0]]) +// CHECK-NEXT: [[OLD:%.*]] = load ptr, ptr [[Y]] +// CHECK-NEXT: store ptr [[T2]], ptr [[Y]] +// CHECK-NEXT: call void @llvm.objc.release(ptr [[OLD]] +// CHECK-NEXT: store ptr [[T2]], ptr [[X]] +// CHECK-UNOPTIMIZED-NEXT: call void @llvm.objc.storeStrong(ptr [[Y]], ptr null) +// CHECK-OPTIMIZED-NEXT: [[T0:%.*]] = load ptr, ptr [[Y]] +// CHECK-OPTIMIZED-NEXT: call void @llvm.objc.release(ptr [[T0]]) // CHECK-OPTIMIZED-NEXT: lifetime.end -// CHECK-OPTIMIZED-NEXT: bitcast // CHECK-OPTIMIZED-NEXT: lifetime.end // CHECK-NEXT: ret void // ATTACHED-CALL-LABEL: define{{.*}} void @test_assign_strong_assign() -// ATTACHED-CALL: [[T0:%.*]] = call [[A:.*]]* @makeA() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ], -// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use([[A]]* [[T0]]) +// ATTACHED-CALL: [[T0:%.*]] = call ptr @makeA() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ], +// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]]) void test_init(void) { __unsafe_unretained id x = makeA(); } // CHECK-LABEL: define{{.*}} void @test_init() -// CHECK: [[X:%.*]] = alloca i8* -// CHECK: [[T0:%.*]] = call [[A]]* @makeA() +// CHECK: [[X:%.*]] = alloca ptr +// CHECK: [[T0:%.*]] = call ptr @makeA() // CHECK-MARKED-NEXT: call void asm sideeffect -// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* -// NOTAIL-CALL-NEXT: [[T2:%.*]] = notail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* [[T1]]) -// CALL-NEXT: [[T2:%.*]] = call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* [[T1]]) -// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[A]]* -// CHECK-NEXT: [[T4:%.*]] = bitcast [[A]]* [[T3]] to i8* -// CHECK-NEXT: store i8* [[T4]], i8** [[X]] -// CHECK-OPTIMIZED-NEXT: bitcast +// NOTAIL-CALL-NEXT: [[T2:%.*]] = notail call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr [[T0]]) +// CALL-NEXT: [[T2:%.*]] = call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr [[T0]]) +// CHECK-NEXT: store ptr [[T2]], ptr [[X]] // CHECK-OPTIMIZED-NEXT: lifetime.end // CHECK-NEXT: ret void // ATTACHED-CALL-LABEL: define{{.*}} void @test_init() -// ATTACHED-CALL: [[T0:%.*]] = call [[A:.*]]* @makeA() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.unsafeClaimAutoreleasedReturnValue) ], -// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use([[A]]* [[T0]]) +// ATTACHED-CALL: [[T0:%.*]] = call ptr @makeA() [ "clang.arc.attachedcall"(ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue) ], +// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]]) void test_init_assignment(void) { __unsafe_unretained id x; __unsafe_unretained id y = x = makeA(); } // CHECK-LABEL: define{{.*}} void @test_init_assignment() -// CHECK: [[X:%.*]] = alloca i8* -// CHECK: [[Y:%.*]] = alloca i8* -// CHECK: [[T0:%.*]] = call [[A]]* @makeA() +// CHECK: [[X:%.*]] = alloca ptr +// CHECK: [[Y:%.*]] = alloca ptr +// CHECK: [[T0:%.*]] = call ptr @makeA() // CHECK-MARKED-NEXT: call void asm sideeffect -// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* -// NOTAIL-CALL-NEXT: [[T2:%.*]] = notail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* [[T1]]) -// CALL-NEXT: [[T2:%.*]] = call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* [[T1]]) -// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[A]]* -// CHECK-NEXT: [[T4:%.*]] = bitcast [[A]]* [[T3]] to i8* -// CHECK-NEXT: store i8* [[T4]], i8** [[X]] -// CHECK-NEXT: store i8* [[T4]], i8** [[Y]] -// CHECK-OPTIMIZED-NEXT: bitcast +// NOTAIL-CALL-NEXT: [[T2:%.*]] = notail call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr [[T0]]) +// CALL-NEXT: [[T2:%.*]] = call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr [[T0]]) +// CHECK-NEXT: store ptr [[T2]], ptr [[X]] +// CHECK-NEXT: store ptr [[T2]], ptr [[Y]] // CHECK-OPTIMIZED-NEXT: lifetime.end -// CHECK-OPTIMIZED-NEXT: bitcast // CHECK-OPTIMIZED-NEXT: lifetime.end // CHECK-NEXT: ret void // ATTACHED-CALL-LABEL: define{{.*}} void @test_init_assignment() -// ATTACHED-CALL: [[T0:%.*]] = call [[A:.*]]* @makeA() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.unsafeClaimAutoreleasedReturnValue) ], -// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use([[A]]* [[T0]]) +// ATTACHED-CALL: [[T0:%.*]] = call ptr @makeA() [ "clang.arc.attachedcall"(ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue) ], +// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]]) void test_strong_init_assignment(void) { __unsafe_unretained id x; __strong id y = x = makeA(); } // CHECK-LABEL: define{{.*}} void @test_strong_init_assignment() -// CHECK: [[X:%.*]] = alloca i8* -// CHECK: [[Y:%.*]] = alloca i8* -// CHECK: [[T0:%.*]] = call [[A]]* @makeA() +// CHECK: [[X:%.*]] = alloca ptr +// CHECK: [[Y:%.*]] = alloca ptr +// CHECK: [[T0:%.*]] = call ptr @makeA() // CHECK-MARKED-NEXT: call void asm sideeffect -// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* -// CHECK-NEXT: [[T2:%.*]] = {{.*}}call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* [[T1]]) -// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[A]]* -// CHECK-NEXT: [[T4:%.*]] = bitcast [[A]]* [[T3]] to i8* -// CHECK-NEXT: store i8* [[T4]], i8** [[X]] -// CHECK-NEXT: store i8* [[T4]], i8** [[Y]] -// CHECK-UNOPTIMIZED-NEXT: call void @llvm.objc.storeStrong(i8** [[Y]], i8* null) -// CHECK-OPTIMIZED-NEXT: [[T0:%.*]] = load i8*, i8** [[Y]] -// CHECK-OPTIMIZED-NEXT: call void @llvm.objc.release(i8* [[T0]]) -// CHECK-OPTIMIZED-NEXT: bitcast +// CHECK-NEXT: [[T2:%.*]] = {{.*}}call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr [[T0]]) +// CHECK-NEXT: store ptr [[T2]], ptr [[X]] +// CHECK-NEXT: store ptr [[T2]], ptr [[Y]] +// CHECK-UNOPTIMIZED-NEXT: call void @llvm.objc.storeStrong(ptr [[Y]], ptr null) +// CHECK-OPTIMIZED-NEXT: [[T0:%.*]] = load ptr, ptr [[Y]] +// CHECK-OPTIMIZED-NEXT: call void @llvm.objc.release(ptr [[T0]]) // CHECK-OPTIMIZED-NEXT: lifetime.end -// CHECK-OPTIMIZED-NEXT: bitcast // CHECK-OPTIMIZED-NEXT: lifetime.end // CHECK-NEXT: ret void // ATTACHED-CALL-LABEL: define{{.*}} void @test_strong_init_assignment() -// ATTACHED-CALL: [[T0:%.*]] = call [[A:.*]]* @makeA() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ], -// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use([[A]]* [[T0]]) +// ATTACHED-CALL: [[T0:%.*]] = call ptr @makeA() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ], +// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]]) void test_init_strong_assignment(void) { __strong id x; __unsafe_unretained id y = x = makeA(); } // CHECK-LABEL: define{{.*}} void @test_init_strong_assignment() -// CHECK: [[X:%.*]] = alloca i8* -// CHECK: [[Y:%.*]] = alloca i8* -// CHECK: [[T0:%.*]] = call [[A]]* @makeA() +// CHECK: [[X:%.*]] = alloca ptr +// CHECK: [[Y:%.*]] = alloca ptr +// CHECK: [[T0:%.*]] = call ptr @makeA() // CHECK-MARKED-NEXT: call void asm sideeffect -// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* -// CHECK-NEXT: [[T2:%.*]] = {{.*}}call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* [[T1]]) -// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[A]]* -// CHECK-NEXT: [[T4:%.*]] = bitcast [[A]]* [[T3]] to i8* -// CHECK-NEXT: [[OLD:%.*]] = load i8*, i8** [[X]] -// CHECK-NEXT: store i8* [[T4]], i8** [[X]] -// CHECK-NEXT: call void @llvm.objc.release(i8* [[OLD]]) -// CHECK-NEXT: store i8* [[T4]], i8** [[Y]] -// CHECK-OPTIMIZED-NEXT: bitcast +// CHECK-NEXT: [[T2:%.*]] = {{.*}}call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr [[T0]]) +// CHECK-NEXT: [[OLD:%.*]] = load ptr, ptr [[X]] +// CHECK-NEXT: store ptr [[T2]], ptr [[X]] +// CHECK-NEXT: call void @llvm.objc.release(ptr [[OLD]]) +// CHECK-NEXT: store ptr [[T2]], ptr [[Y]] // CHECK-OPTIMIZED-NEXT: lifetime.end -// CHECK-UNOPTIMIZED-NEXT: call void @llvm.objc.storeStrong(i8** [[X]], i8* null) -// CHECK-OPTIMIZED-NEXT: [[T0:%.*]] = load i8*, i8** [[X]] -// CHECK-OPTIMIZED-NEXT: call void @llvm.objc.release(i8* [[T0]]) -// CHECK-OPTIMIZED-NEXT: bitcast +// CHECK-UNOPTIMIZED-NEXT: call void @llvm.objc.storeStrong(ptr [[X]], ptr null) +// CHECK-OPTIMIZED-NEXT: [[T0:%.*]] = load ptr, ptr [[X]] +// CHECK-OPTIMIZED-NEXT: call void @llvm.objc.release(ptr [[T0]]) // CHECK-OPTIMIZED-NEXT: lifetime.end // CHECK-NEXT: ret void // ATTACHED-CALL-LABEL: define{{.*}} void @test_init_strong_assignment() -// ATTACHED-CALL: [[T0:%.*]] = call [[A:.*]]* @makeA() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ], -// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use([[A]]* [[T0]]) +// ATTACHED-CALL: [[T0:%.*]] = call ptr @makeA() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ], +// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]]) void test_ignored(void) { makeA(); } // CHECK-LABEL: define{{.*}} void @test_ignored() -// CHECK: [[T0:%.*]] = call [[A]]* @makeA() +// CHECK: [[T0:%.*]] = call ptr @makeA() // CHECK-MARKED-NEXT: call void asm sideeffect -// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* -// NOTAIL-CALL-NEXT: [[T2:%.*]] = notail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* [[T1]]) -// CALL-NEXT: [[T2:%.*]] = call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* [[T1]]) -// CHECK-NEXT: bitcast i8* [[T2]] to [[A]]* +// NOTAIL-CALL-NEXT: [[T2:%.*]] = notail call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr [[T0]]) +// CALL-NEXT: [[T2:%.*]] = call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr [[T0]]) // CHECK-NEXT: ret void // ATTACHED-CALL-LABEL: define{{.*}} void @test_ignored() -// ATTACHED-CALL: [[T0:%.*]] = call [[A:.*]]* @makeA() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.unsafeClaimAutoreleasedReturnValue) ], -// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use([[A]]* [[T0]]) +// ATTACHED-CALL: [[T0:%.*]] = call ptr @makeA() [ "clang.arc.attachedcall"(ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue) ], +// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]]) void test_cast_to_void(void) { (void) makeA(); } // CHECK-LABEL: define{{.*}} void @test_cast_to_void() -// CHECK: [[T0:%.*]] = call [[A]]* @makeA() +// CHECK: [[T0:%.*]] = call ptr @makeA() // CHECK-MARKED-NEXT: call void asm sideeffect -// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* -// NOTAIL-CALL-NEXT: [[T2:%.*]] = notail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* [[T1]]) -// CALL-NEXT: [[T2:%.*]] = call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* [[T1]]) -// CHECK-NEXT: bitcast i8* [[T2]] to [[A]]* +// NOTAIL-CALL-NEXT: [[T2:%.*]] = notail call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr [[T0]]) +// CALL-NEXT: [[T2:%.*]] = call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr [[T0]]) // CHECK-NEXT: ret void // ATTACHED-CALL-LABEL: define{{.*}} void @test_cast_to_void() -// ATTACHED-CALL: [[T0:%.*]] = call [[A:.*]]* @makeA() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.unsafeClaimAutoreleasedReturnValue) ], -// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use([[A]]* [[T0]]) +// ATTACHED-CALL: [[T0:%.*]] = call ptr @makeA() [ "clang.arc.attachedcall"(ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue) ], +// ATTACHED-CALL: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]]) // This is always at the end of the module. diff --git a/clang/test/CodeGenObjC/convert-messages-to-runtime-calls.m b/clang/test/CodeGenObjC/convert-messages-to-runtime-calls.m index 0221cc59b0a1..9a5ecbc03d8a 100644 --- a/clang/test/CodeGenObjC/convert-messages-to-runtime-calls.m +++ b/clang/test/CodeGenObjC/convert-messages-to-runtime-calls.m @@ -1,12 +1,12 @@ -// RUN: %clang_cc1 -no-opaque-pointers -fobjc-runtime=macosx-10.10.0 -emit-llvm -o - %s -fno-objc-convert-messages-to-runtime-calls -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=MSGS -// RUN: %clang_cc1 -no-opaque-pointers -fobjc-runtime=macosx-10.10.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=CALLS -// RUN: %clang_cc1 -no-opaque-pointers -fobjc-runtime=macosx-10.9.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=MSGS -// RUN: %clang_cc1 -no-opaque-pointers -fobjc-runtime=macosx-fragile-10.10.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=MSGS -// RUN: %clang_cc1 -no-opaque-pointers -fobjc-runtime=ios-8.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=CALLS -// RUN: %clang_cc1 -no-opaque-pointers -fobjc-runtime=ios-7.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=MSGS +// RUN: %clang_cc1 -fobjc-runtime=macosx-10.10.0 -emit-llvm -o - %s -fno-objc-convert-messages-to-runtime-calls -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=MSGS +// RUN: %clang_cc1 -fobjc-runtime=macosx-10.10.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=CALLS +// RUN: %clang_cc1 -fobjc-runtime=macosx-10.9.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=MSGS +// RUN: %clang_cc1 -fobjc-runtime=macosx-fragile-10.10.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=MSGS +// RUN: %clang_cc1 -fobjc-runtime=ios-8.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=CALLS +// RUN: %clang_cc1 -fobjc-runtime=ios-7.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=MSGS // Note: This line below is for tvos for which the driver passes through to use the ios9.0 runtime. -// RUN: %clang_cc1 -no-opaque-pointers -fobjc-runtime=ios-9.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=CALLS -// RUN: %clang_cc1 -no-opaque-pointers -fobjc-runtime=watchos-2.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=CALLS +// RUN: %clang_cc1 -fobjc-runtime=ios-9.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=CALLS +// RUN: %clang_cc1 -fobjc-runtime=watchos-2.0 -emit-llvm -o - %s -fobjc-exceptions -fexceptions | FileCheck %s --check-prefix=CALLS #define nil (id)0 @@ -74,22 +74,16 @@ void test2(void* x) { - (A*) autorelease; @end -// Make sure we get a bitcast on the return type as the -// call will return i8* which we have to cast to A* // CHECK-LABEL: define {{.*}}void @test_alloc_class_ptr A* test_alloc_class_ptr(void) { // CALLS: {{call.*@objc_alloc}} - // CALLS-NEXT: bitcast i8* // CALLS-NEXT: ret return [B alloc]; } -// Make sure we get a bitcast on the return type as the -// call will return i8* which we have to cast to A* // CHECK-LABEL: define {{.*}}void @test_alloc_class_ptr A* test_allocWithZone_class_ptr(void) { // CALLS: {{call.*@objc_allocWithZone}} - // CALLS-NEXT: bitcast i8* // CALLS-NEXT: ret return [B allocWithZone:nil]; } @@ -108,21 +102,19 @@ void test_alloc_instance(A *a) { } // Make sure we get a bitcast on the return type as the -// call will return i8* which we have to cast to A* +// call will return ptr which we have to cast to A* // CHECK-LABEL: define {{.*}}void @test_retain_class_ptr A* test_retain_class_ptr(B *b) { // CALLS: {{call.*@objc_retain}} - // CALLS-NEXT: bitcast i8* // CALLS-NEXT: ret return [b retain]; } // Make sure we get a bitcast on the return type as the -// call will return i8* which we have to cast to A* +// call will return ptr which we have to cast to A* // CHECK-LABEL: define {{.*}}void @test_autorelease_class_ptr A* test_autorelease_class_ptr(B *b) { // CALLS: {{tail call.*@objc_autorelease}} - // CALLS-NEXT: bitcast i8* // CALLS-NEXT: ret return [b autorelease]; } @@ -158,7 +150,7 @@ float test_cannot_message_return_float(C *c) { @end @implementation TestSelf -// CHECK-LABEL: define internal i8* @"\01+[TestSelf classMeth]"( +// CHECK-LABEL: define internal ptr @"\01+[TestSelf classMeth]"( + (id)classMeth { // MSGS: {{call.*@objc_msgSend}} // MSGS: {{call.*@objc_msgSend}} @@ -167,7 +159,7 @@ float test_cannot_message_return_float(C *c) { [self allocWithZone:nil]; return [self alloc]; } -// CHECK-LABEL: define internal i8* @"\01-[TestSelf instanceMeth]"( +// CHECK-LABEL: define internal ptr @"\01-[TestSelf instanceMeth]"( - (id)instanceMeth { // MSGS: {{call.*@objc_msgSend}} // MSGS: {{call.*@objc_msgSend}} @@ -208,7 +200,7 @@ float test_cannot_message_return_float(C *c) { // CHECK-LABEL: define {{.*}}void @testException_release void testException_release(NSObject *a) { // MSGS: {{invoke.*@objc_msgSend}} - // CALLS: invoke{{.*}}void @objc_release(i8* % + // CALLS: invoke{{.*}}void @objc_release(ptr % @try { [a release]; } @catch (Ety *e) { @@ -219,7 +211,7 @@ void testException_release(NSObject *a) { void testException_autorelease(NSObject *a) { @try { // MSGS: {{invoke.*@objc_msgSend}} - // CALLS: invoke{{.*}}objc_autorelease(i8* % + // CALLS: invoke{{.*}}objc_autorelease(ptr % [a autorelease]; } @catch (Ety *e) { } @@ -229,7 +221,7 @@ void testException_autorelease(NSObject *a) { void testException_retain(NSObject *a) { @try { // MSGS: {{invoke.*@objc_msgSend}} - // CALLS: invoke{{.*}}@objc_retain(i8* % + // CALLS: invoke{{.*}}@objc_retain(ptr % [a retain]; } @catch (Ety *e) { } @@ -240,7 +232,7 @@ void testException_retain(NSObject *a) { void testException_alloc(void) { @try { // MSGS: {{invoke.*@objc_msgSend}} - // CALLS: invoke{{.*}}@objc_alloc(i8* % + // CALLS: invoke{{.*}}@objc_alloc(ptr % [A alloc]; } @catch (Ety *e) { } @@ -250,7 +242,7 @@ void testException_alloc(void) { void testException_allocWithZone(void) { @try { // MSGS: {{invoke.*@objc_msgSend}} - // CALLS: invoke{{.*}}@objc_allocWithZone(i8* % + // CALLS: invoke{{.*}}@objc_allocWithZone(ptr % [A allocWithZone:nil]; } @catch (Ety *e) { } diff --git a/clang/test/CodeGenOpenCL/address-spaces-conversions.cl b/clang/test/CodeGenOpenCL/address-spaces-conversions.cl index 1ae8bcb55409..f3a22fe89eae 100644 --- a/clang/test/CodeGenOpenCL/address-spaces-conversions.cl +++ b/clang/test/CodeGenOpenCL/address-spaces-conversions.cl @@ -1,7 +1,7 @@ -// RUN: %clang_cc1 -no-opaque-pointers %s -triple x86_64-unknown-linux-gnu -O0 -ffake-address-space-map -cl-std=CL2.0 -emit-llvm -o - | FileCheck %s -// RUN: %clang_cc1 -no-opaque-pointers %s -triple x86_64-unknown-linux-gnu -O0 -ffake-address-space-map -cl-std=CL3.0 -cl-ext=+__opencl_c_generic_address_space -emit-llvm -o - | FileCheck %s -// RUN: %clang_cc1 -no-opaque-pointers %s -triple x86_64-unknown-linux-gnu -O0 -cl-std=CL2.0 -emit-llvm -o - | FileCheck --check-prefix=CHECK-NOFAKE %s -// RUN: %clang_cc1 -no-opaque-pointers %s -triple x86_64-unknown-linux-gnu -O0 -cl-std=CL3.0 -cl-ext=+__opencl_c_generic_address_space -emit-llvm -o - | FileCheck --check-prefix=CHECK-NOFAKE %s +// RUN: %clang_cc1 %s -triple x86_64-unknown-linux-gnu -O0 -ffake-address-space-map -cl-std=CL2.0 -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 %s -triple x86_64-unknown-linux-gnu -O0 -ffake-address-space-map -cl-std=CL3.0 -cl-ext=+__opencl_c_generic_address_space -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 %s -triple x86_64-unknown-linux-gnu -O0 -cl-std=CL2.0 -emit-llvm -o - | FileCheck --check-prefix=CHECK-NOFAKE %s +// RUN: %clang_cc1 %s -triple x86_64-unknown-linux-gnu -O0 -cl-std=CL3.0 -cl-ext=+__opencl_c_generic_address_space -emit-llvm -o - | FileCheck --check-prefix=CHECK-NOFAKE %s // When -ffake-address-space-map is not used, all addr space mapped to 0 for x86_64. // test that we generate address space casts everywhere we need conversions of @@ -13,35 +13,33 @@ void test(global int *arg_glob, generic int *arg_gen, __attribute__((opencl_global_host)) int *arg_host) { int var_priv; arg_gen = arg_glob; // implicit cast global -> generic - // CHECK: %{{[0-9]+}} = addrspacecast i32 addrspace(1)* %{{[0-9]+}} to i32 addrspace(4)* + // CHECK: %{{[0-9]+}} = addrspacecast ptr addrspace(1) %{{[0-9]+}} to ptr addrspace(4) // CHECK-NOFAKE-NOT: addrspacecast arg_gen = &var_priv; // implicit cast with obtaining adr, private -> generic - // CHECK: %{{[._a-z0-9]+}} = addrspacecast i32* %{{[._a-z0-9]+}} to i32 addrspace(4)* + // CHECK: %{{[._a-z0-9]+}} = addrspacecast ptr %{{[._a-z0-9]+}} to ptr addrspace(4) // CHECK-NOFAKE-NOT: addrspacecast arg_glob = (global int *)arg_gen; // explicit cast - // CHECK: %{{[0-9]+}} = addrspacecast i32 addrspace(4)* %{{[0-9]+}} to i32 addrspace(1)* + // CHECK: %{{[0-9]+}} = addrspacecast ptr addrspace(4) %{{[0-9]+}} to ptr addrspace(1) // CHECK-NOFAKE-NOT: addrspacecast global int *var_glob = (global int *)arg_glob; // explicit cast in the same address space - // CHECK-NOT: %{{[0-9]+}} = addrspacecast i32 addrspace(1)* %{{[0-9]+}} to i32 addrspace(1)* + // CHECK-NOT: %{{[0-9]+}} = addrspacecast ptr addrspace(1) %{{[0-9]+}} to ptr addrspace(1) // CHECK-NOFAKE-NOT: addrspacecast var_priv = arg_gen - arg_glob; // arithmetic operation - // CHECK: %{{.*}} = ptrtoint i32 addrspace(4)* %{{.*}} to i64 - // CHECK: %{{.*}} = ptrtoint i32 addrspace(1)* %{{.*}} to i64 - // CHECK-NOFAKE: %{{.*}} = ptrtoint i32* %{{.*}} to i64 - // CHECK-NOFAKE: %{{.*}} = ptrtoint i32* %{{.*}} to i64 + // CHECK: %{{.*}} = ptrtoint ptr addrspace(4) %{{.*}} to i64 + // CHECK: %{{.*}} = ptrtoint ptr addrspace(1) %{{.*}} to i64 + // CHECK-NOFAKE: %{{.*}} = ptrtoint ptr %{{.*}} to i64 + // CHECK-NOFAKE: %{{.*}} = ptrtoint ptr %{{.*}} to i64 var_priv = arg_gen > arg_glob; // comparison - // CHECK: %{{[0-9]+}} = addrspacecast i32 addrspace(1)* %{{[0-9]+}} to i32 addrspace(4)* + // CHECK: %{{[0-9]+}} = addrspacecast ptr addrspace(1) %{{[0-9]+}} to ptr addrspace(4) generic void *var_gen_v = arg_glob; // CHECK: addrspacecast - // CHECK-NOT: bitcast - // CHECK-NOFAKE: bitcast // CHECK-NOFAKE-NOT: addrspacecast arg_glob = arg_device; // implicit cast @@ -81,14 +79,12 @@ void test_ternary(void) { var_gen = var_gen ? var_gen : var_gen2; // operands of the same addr spaces and the same type // CHECK: icmp // CHECK-NOT: addrspacecast - // CHECK-NOT: bitcast // CHECK: phi - // CHECK: store i32 addrspace(4)* %{{.+}}, i32 addrspace(4)** %{{.+}} + // CHECK: store ptr addrspace(4) %{{.+}}, ptr %{{.+}} var_gen = var_gen ? var_gen : var_glob; // operands of overlapping addr spaces and the same type // CHECK: icmp - // CHECK-NOT: bitcast - // CHECK: %{{.+}} = addrspacecast i32 addrspace(1)* %{{.+}} to i32 addrspace(4)* + // CHECK: %{{.+}} = addrspacecast ptr addrspace(1) %{{.+}} to ptr addrspace(4) // CHECK: phi // CHECK: store @@ -96,22 +92,18 @@ void test_ternary(void) { global int_t *var_glob_typedef; var_gen = var_gen ? var_gen : var_glob_typedef; // operands of overlapping addr spaces and equivalent types // CHECK: icmp - // CHECK-NOT: bitcast - // CHECK: %{{.+}} = addrspacecast i32 addrspace(1)* %{{.+}} to i32 addrspace(4)* + // CHECK: %{{.+}} = addrspacecast ptr addrspace(1) %{{.+}} to ptr addrspace(4) // CHECK: phi // CHECK: store var_gen_v = var_gen ? var_gen : var_gen_f; // operands of the same addr space and different types // CHECK: icmp - // CHECK: %{{.+}} = bitcast i32 addrspace(4)* %{{.+}} to i8 addrspace(4)* - // CHECK: %{{.+}} = bitcast float addrspace(4)* %{{.+}} to i8 addrspace(4)* // CHECK: phi // CHECK: store var_gen_v = var_gen ? var_glob : var_gen_f; // operands of overlapping addr spaces and different types // CHECK: icmp - // CHECK: %{{.+}} = addrspacecast i32 addrspace(1)* %{{.+}} to i8 addrspace(4)* - // CHECK: %{{.+}} = bitcast float addrspace(4)* %{{.+}} to i8 addrspace(4)* + // CHECK: %{{.+}} = addrspacecast ptr addrspace(1) %{{.+}} to ptr addrspace(4) // CHECK: phi // CHECK: store }