Skip to content

Commit

Permalink
Change memcpy/memset/memmove to have dest and source alignments.
Browse files Browse the repository at this point in the history
This is a follow on from a similar LLVM commit: r253511.

Note, this was reviewed (and more details are in) http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html

These intrinsics currently have an explicit alignment argument which is
required to be a constant integer.  It represents the alignment of the
source and dest, and so must be the minimum of those.

This change allows source and dest to each have their own alignments
by using the alignment attribute on their arguments.  The alignment
argument itself is removed.

The only code change to clang is hidden in CGBuilder.h which now passes
both dest and source alignment to IRBuilder, instead of taking the minimum of
dest and source alignments.

Reviewed by Hal Finkel.

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@253512 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
cooperp committed Nov 18, 2015
1 parent 0489066 commit 77871bc
Show file tree
Hide file tree
Showing 64 changed files with 284 additions and 284 deletions.
12 changes: 6 additions & 6 deletions lib/CodeGen/CGBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -271,23 +271,23 @@ class CGBuilderTy : public CGBuilderBaseTy {
using CGBuilderBaseTy::CreateMemCpy;
llvm::CallInst *CreateMemCpy(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size,
Align.getQuantity(), IsVolatile);
Dest.getAlignment().getQuantity(),
Src.getAlignment().getQuantity(), IsVolatile);
}
llvm::CallInst *CreateMemCpy(Address Dest, Address Src, uint64_t Size,
bool IsVolatile = false) {
auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size,
Align.getQuantity(), IsVolatile);
Dest.getAlignment().getQuantity(),
Src.getAlignment().getQuantity(), IsVolatile);
}

using CGBuilderBaseTy::CreateMemMove;
llvm::CallInst *CreateMemMove(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
return CreateMemMove(Dest.getPointer(), Src.getPointer(), Size,
Align.getQuantity(), IsVolatile);
Dest.getAlignment().getQuantity(),
Src.getAlignment().getQuantity(), IsVolatile);
}

using CGBuilderBaseTy::CreateMemSet;
Expand Down
2 changes: 1 addition & 1 deletion test/CodeGen/2007-11-07-CopyAggregateAlign.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@
struct A { char s, t, u, v; short a; };
// CHECK: %a = alloca %struct.A, align 2
// CHECK: %b = alloca %struct.A, align 2
// CHECK: call void @llvm.memcpy.p0i8.p0i8.{{.*}}, i32 2, i1 false)
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{[0-9]*}}(i8* align 2 %{{[0-9]*}}, i8* align 2 %{{[0-9]*}}, {{.*}}, i1 false)

void q() { struct A a, b; a = b; }
2 changes: 1 addition & 1 deletion test/CodeGen/2007-11-07-ZeroAggregateAlign.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s
struct A { short s; short t; int i; };
// CHECK: %a = alloca %struct.A, align 4
// CHECK: call void @llvm.memset.p0i8.{{.*}}i32 4, i1 false)
// // CHECK: call void @llvm.memset.p0i8.i{{[0-9]*}}(i8* align 4 %{{[0-9]*}}, {{.*}}, i1 false)
void q() { struct A a = {0}; }
4 changes: 2 additions & 2 deletions test/CodeGen/arm-arguments.c
Original file line number Diff line number Diff line change
Expand Up @@ -208,13 +208,13 @@ float32x4_t f35(int i, s35_with_align s1, s35_with_align s2) {
// APCS-GNU: %[[a:.*]] = alloca %struct.s35, align 16
// APCS-GNU: %[[b:.*]] = bitcast %struct.s35* %[[a]] to i8*
// APCS-GNU: %[[c:.*]] = bitcast %struct.s35* %0 to i8*
// APCS-GNU: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[b]], i8* %[[c]]
// APCS-GNU: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %[[b]], i8* align 4 %[[c]]
// APCS-GNU: %[[d:.*]] = bitcast %struct.s35* %[[a]] to <4 x float>*
// APCS-GNU: load <4 x float>, <4 x float>* %[[d]], align 16
// AAPCS-LABEL: define arm_aapcscc <4 x float> @f35(i32 %i, %struct.s35* byval align 8, %struct.s35* byval align 8)
// AAPCS: %[[a:.*]] = alloca %struct.s35, align 16
// AAPCS: %[[b:.*]] = bitcast %struct.s35* %[[a]] to i8*
// AAPCS: %[[c:.*]] = bitcast %struct.s35* %0 to i8*
// AAPCS: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[b]], i8* %[[c]]
// AAPCS: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %[[b]], i8* align 8 %[[c]]
// AAPCS: %[[d:.*]] = bitcast %struct.s35* %[[a]] to <4 x float>*
// AAPCS: load <4 x float>, <4 x float>* %[[d]], align 16
2 changes: 1 addition & 1 deletion test/CodeGen/arm64-be-bitfield.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ signed callee_b0f(struct bt3 bp11) {
// IR: callee_b0f(i64 [[ARG:%.*]])
// IR: store i64 [[ARG]], i64* [[PTR:%.*]], align 8
// IR: [[BITCAST:%.*]] = bitcast i64* [[PTR]] to i8*
// IR: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* [[BITCAST]], i64 4
// IR: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 {{.*}}, i8* align 8 [[BITCAST]], i64 4
// ARM: asr x0, x0, #54
return bp11.b2;
}
2 changes: 1 addition & 1 deletion test/CodeGen/atomic-arm64.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ void test3(pointer_pair_t pair) {
// CHECK: [[TEMP:%.*]] = alloca [[QUAD_T:%.*]], align 8
// CHECK-NEXT: [[T0:%.*]] = bitcast [[QUAD_T]]* [[TEMP]] to i8*
// CHECK-NEXT: [[T1:%.*]] = bitcast [[QUAD_T]]* {{%.*}} to i8*
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 32, i32 8, i1 false)
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[T0]], i8* align 8 [[T1]], i64 32, i1 false)
// CHECK-NEXT: [[T0:%.*]] = bitcast [[QUAD_T]]* [[TEMP]] to i256*
// CHECK-NEXT: [[T1:%.*]] = bitcast i256* [[T0]] to i8*
// CHECK-NEXT: call void @__atomic_store(i64 32, i8* bitcast ([[QUAD_T]]* @a_pointer_quad to i8*), i8* [[T1]], i32 5)
Expand Down
6 changes: 3 additions & 3 deletions test/CodeGen/block-byref-aggr.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ void test0() {
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[BYREF]], [[BYREF]]* [[T0]], i32 0, i32 4
// CHECK-NEXT: [[T2:%.*]] = bitcast [[AGG]]* [[T1]] to i8*
// CHECK-NEXT: [[T3:%.*]] = bitcast [[AGG]]* [[TEMP]] to i8*
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T2]], i8* [[T3]], i64 4, i32 4, i1 false)
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[T2]], i8* align 4 [[T3]], i64 4, i1 false)
// Verify that there's nothing else significant in the function.
// CHECK-NEXT: [[T0:%.*]] = bitcast [[BYREF]]* [[A]] to i8*
// CHECK-NEXT: call void @_Block_object_dispose(i8* [[T0]], i32 8)
Expand All @@ -50,14 +50,14 @@ void test1() {
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[B_BYREF]], [[B_BYREF]]* [[T0]], i32 0, i32 4
// CHECK-NEXT: [[T2:%.*]] = bitcast [[AGG]]* [[T1]] to i8*
// CHECK-NEXT: [[T3:%.*]] = bitcast [[AGG]]* [[TEMP]] to i8*
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T2]], i8* [[T3]], i64 4, i32 4, i1 false)
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[T2]], i8* align 4 [[T3]], i64 4, i1 false)
// Then for 'a':
// CHECK-NEXT: [[A_FORWARDING:%.*]] = getelementptr inbounds [[A_BYREF]], [[A_BYREF]]* [[A]], i32 0, i32 1
// CHECK-NEXT: [[T0:%.*]] = load [[A_BYREF]]*, [[A_BYREF]]** [[A_FORWARDING]]
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[A_BYREF]], [[A_BYREF]]* [[T0]], i32 0, i32 4
// CHECK-NEXT: [[T2:%.*]] = bitcast [[AGG]]* [[T1]] to i8*
// CHECK-NEXT: [[T3:%.*]] = bitcast [[AGG]]* [[TEMP]] to i8*
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T2]], i8* [[T3]], i64 4, i32 4, i1 false)
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[T2]], i8* align 4 [[T3]], i64 4, i1 false)
// Verify that there's nothing else significant in the function.
// CHECK-NEXT: [[T0:%.*]] = bitcast [[B_BYREF]]* [[B]] to i8*
// CHECK-NEXT: call void @_Block_object_dispose(i8* [[T0]], i32 8)
Expand Down
12 changes: 6 additions & 6 deletions test/CodeGen/builtin-memfns.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,15 +53,15 @@ int test6(char *X) {
// PR12094
int test7(int *p) {
struct snd_pcm_hw_params_t* hwparams; // incomplete type.
// CHECK: call void @llvm.memset{{.*}}256, i32 4, i1 false)

// CHECK: call void @llvm.memset{{.*}}(i8* align 4 %{{[0-9]*}}, {{.*}}, i{{[0-9]*}} 256, i1 false)
__builtin_memset(p, 0, 256); // Should be alignment = 4

// CHECK: call void @llvm.memset{{.*}}256, i32 1, i1 false)
// CHECK: call void @llvm.memset{{.*}}(i8* align 1 %{{[0-9]*}}, {{.*}}, i{{[0-9]*}} 256, i1 false)
__builtin_memset((char*)p, 0, 256); // Should be alignment = 1

__builtin_memset(hwparams, 0, 256); // No crash alignment = 1
// CHECK: call void @llvm.memset{{.*}}256, i32 1, i1 false)
// CHECK: call void @llvm.memset{{.*}}(i8* align 1 %{{[0-9]*}}, {{.*}}, i{{[0-9]*}} 256, i1 false)
}

// <rdar://problem/11314941>
Expand All @@ -73,13 +73,13 @@ struct PS {
struct PS ps;
void test8(int *arg) {
// CHECK: @test8
// CHECK: call void @llvm.memcpy{{.*}} 16, i32 1, i1 false)
// CHECK: call void @llvm.memcpy{{.*}}(i8* align 4 {{.*}}, i8* align 1 {{.*}}, i{{[0-9]*}} 16, i1 false)
__builtin_memcpy(arg, ps.modes, sizeof(struct PS));
}

__attribute((aligned(16))) int x[4], y[4];
void test9() {
// CHECK: @test9
// CHECK: call void @llvm.memcpy{{.*}} 16, i32 16, i1 false)
// CHECK: call void @llvm.memcpy{{.*}}(i8* align 16 {{.*}}, i8* align 16 {{.*}}, i{{[0-9]*}} 16, i1 false)
__builtin_memcpy(x, y, sizeof(y));
}
30 changes: 15 additions & 15 deletions test/CodeGen/c11atomics-ios.c
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ void testStruct(_Atomic(S) *fp) {
// CHECK-NEXT: [[T0:%.*]] = load [[S]]*, [[S]]** [[FP]]
// CHECK-NEXT: [[T1:%.*]] = bitcast [[S]]* [[TMP0]] to i8*
// CHECK-NEXT: [[T2:%.*]] = bitcast [[S]]* [[F]] to i8*
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T1]], i8* [[T2]], i32 8, i32 2, i1 false)
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[T1]], i8* align 2 [[T2]], i32 8, i1 false)
// CHECK-NEXT: [[T3:%.*]] = bitcast [[S]]* [[TMP0]] to i64*
// CHECK-NEXT: [[T4:%.*]] = load i64, i64* [[T3]], align 8
// CHECK-NEXT: [[T5:%.*]] = bitcast [[S]]* [[T0]] to i64*
Expand All @@ -154,7 +154,7 @@ void testPromotedStruct(_Atomic(PS) *fp) {

// CHECK-NEXT: [[P:%.*]] = load [[APS]]*, [[APS]]** [[FP]]
// CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[P]] to i8*
// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 8, i32 8, i1 false)
// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[T0]], i8 0, i64 8, i1 false)
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[P]], i32 0, i32 0
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0
// CHECK-NEXT: store i16 1, i16* [[T1]], align 8
Expand All @@ -165,7 +165,7 @@ void testPromotedStruct(_Atomic(PS) *fp) {
__c11_atomic_init(fp, (PS){1,2,3});

// CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[X]] to i8*
// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[T0]], i8 0, i32 8, i32 8, i1 false)
// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 8 [[T0]], i8 0, i32 8, i1 false)
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[X]], i32 0, i32 0
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0
// CHECK-NEXT: store i16 1, i16* [[T1]], align 8
Expand All @@ -183,16 +183,16 @@ void testPromotedStruct(_Atomic(PS) *fp) {
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[T1:%.*]] = bitcast [[PS]]* [[F]] to i8*
// CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T0]] to i8*
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T1]], i8* [[T2]], i32 6, i32 2, i1 false)
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T1]], i8* align 8 [[T2]], i32 6, i1 false)
PS f = *fp;

// CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]]
// CHECK-NEXT: [[T1:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[TMP1]] to i8*
// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[T1]], i8 0, i32 8, i32 8, i1 false)
// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 8 [[T1]], i8 0, i32 8, i1 false)
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP1]], i32 0, i32 0
// CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T1]] to i8*
// CHECK-NEXT: [[T3:%.*]] = bitcast [[PS]]* [[F]] to i8*
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T2]], i8* [[T3]], i32 6, i32 2, i1 false)
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[T2]], i8* align 2 [[T3]], i32 6, i1 false)
// CHECK-NEXT: [[T4:%.*]] = bitcast [[APS]]* [[TMP1]] to i64*
// CHECK-NEXT: [[T5:%.*]] = load i64, i64* [[T4]], align 8
// CHECK-NEXT: [[T6:%.*]] = bitcast [[APS]]* [[T0]] to i64*
Expand All @@ -215,7 +215,7 @@ PS test_promoted_load(_Atomic(PS) *addr) {
// CHECK: [[ATOMIC_RES_STRUCT:%.*]] = bitcast i64* [[ATOMIC_RES64]] to %struct.PS*
// CHECK: [[AGG_RESULT8:%.*]] = bitcast %struct.PS* %agg.result to i8*
// CHECK: [[ATOMIC_RES8:%.*]] = bitcast %struct.PS* [[ATOMIC_RES_STRUCT]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[AGG_RESULT8]], i8* [[ATOMIC_RES8]], i32 6, i32 2, i1 false)
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 8 [[ATOMIC_RES8]], i32 6, i1 false)

return __c11_atomic_load(addr, 5);
}
Expand All @@ -232,11 +232,11 @@ void test_promoted_store(_Atomic(PS) *addr, PS *val) {
// CHECK: [[VAL:%.*]] = load %struct.PS*, %struct.PS** [[VAL_ARG]], align 4
// CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8*
// CHECK: [[VAL8:%.*]] = bitcast %struct.PS* [[VAL]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[NONATOMIC_TMP8]], i8* [[VAL8]], i32 6, i32 2, i1 false)
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[NONATOMIC_TMP8]], i8* align 2 [[VAL8]], i32 6, i1 false)
// CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64*
// CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8*
// CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[ATOMIC_VAL8]], i8* [[NONATOMIC_TMP8]], i64 6, i32 2, i1 false)
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false)
// CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64*
// CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 8
// CHECK: store atomic i64 [[VAL64]], i64* [[ADDR64]] seq_cst, align 8
Expand All @@ -257,11 +257,11 @@ PS test_promoted_exchange(_Atomic(PS) *addr, PS *val) {
// CHECK: [[VAL:%.*]] = load %struct.PS*, %struct.PS** [[VAL_ARG]], align 4
// CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8*
// CHECK: [[VAL8:%.*]] = bitcast %struct.PS* [[VAL]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[NONATOMIC_TMP8]], i8* [[VAL8]], i32 6, i32 2, i1 false)
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[NONATOMIC_TMP8]], i8* align 2 [[VAL8]], i32 6, i1 false)
// CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64*
// CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8*
// CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[ATOMIC_VAL8]], i8* [[NONATOMIC_TMP8]], i64 6, i32 2, i1 false)
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false)
// CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64*
// CHECK: [[ATOMIC_RES64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_RES]] to i64*
// CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 8
Expand All @@ -270,7 +270,7 @@ PS test_promoted_exchange(_Atomic(PS) *addr, PS *val) {
// CHECK: [[ATOMIC_RES_STRUCT:%.*]] = bitcast i64* [[ATOMIC_RES64]] to %struct.PS*
// CHECK: [[AGG_RESULT8:%.*]] = bitcast %struct.PS* %agg.result to i8*
// CHECK: [[ATOMIC_RES8:%.*]] = bitcast %struct.PS* [[ATOMIC_RES_STRUCT]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[AGG_RESULT8]], i8* [[ATOMIC_RES8]], i32 6, i32 2, i1 false)
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 8 [[ATOMIC_RES8]], i32 6, i1 false)
return __c11_atomic_exchange(addr, *val, 5);
}

Expand All @@ -291,15 +291,15 @@ _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) {
// CHECK: [[NEW:%.*]] = load %struct.PS*, %struct.PS** [[NEW_ARG]], align 4
// CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8*
// CHECK: [[NEW8:%.*]] = bitcast %struct.PS* [[NEW]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[NONATOMIC_TMP8]], i8* [[NEW8]], i32 6, i32 2, i1 false)
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[NONATOMIC_TMP8]], i8* align 2 [[NEW8]], i32 6, i1 false)
// CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64*
// CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED:%.*]] to i8*
// CHECK: [[DESIRED8:%.*]] = bitcast %struct.PS* [[DESIRED]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[ATOMIC_DESIRED8]], i8* [[DESIRED8]], i64 6, i32 2, i1 false)
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i64 6, i1 false)
// CHECK: [[ATOMIC_DESIRED64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED:%.*]] to i64*
// CHECK: [[ATOMIC_NEW8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i8*
// CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[ATOMIC_NEW8]], i8* [[NONATOMIC_TMP8]], i64 6, i32 2, i1 false)
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false)
// CHECK: [[ATOMIC_NEW64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i64*
// CHECK: [[ATOMIC_DESIRED_VAL64:%.*]] = load i64, i64* [[ATOMIC_DESIRED64]], align 8
// CHECK: [[ATOMIC_NEW_VAL64:%.*]] = load i64, i64* [[ATOMIC_NEW64]], align 8
Expand Down
Loading

0 comments on commit 77871bc

Please sign in to comment.