Skip to content

Commit

Permalink
AArch64: Omit callframe setup/destroy when not necessary
Browse files Browse the repository at this point in the history
Do not create CALLSEQ_START/CALLSEQ_END when there is no callframe to
setup and the callframe size is 0.

- Fixes an invalid callframe nesting for byval arguments, which would
  look like this before this patch (as in `big-byval.ll`):
    ...
    ADJCALLSTACKDOWN 32768, 0, ...   # Setup for extfunc
    ...
    ADJCALLSTACKDOWN 0, 0, ...  # setup for memcpy
    ...
    BL &memcpy ...
    ADJCALLSTACKUP 0, 0, ...    # destroy for memcpy
    ...
    BL &extfunc
    ADJCALLSTACKUP 32768, 0, ...   # destroy for extfunc

- Saves us two instructions in the common case of zero-sized stackframes.
- Remove an unnecessary scheduling barrier (hence the small unittest
  changes).

Differential Revision: https://reviews.llvm.org/D42006

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@322917 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
MatzeB committed Jan 19, 2018
1 parent f2f8ac2 commit 93b966c
Show file tree
Hide file tree
Showing 8 changed files with 87 additions and 67 deletions.
18 changes: 13 additions & 5 deletions lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3343,9 +3343,15 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
}

// We can omit callseq_start/callseq_end if there is no callframe to setup.
// Do not omit for patchpoints as SelectionDAGBuilder::visitPatchpoint()
// currently expects it.
bool OmitCallSeq = NumBytes == 0 && !CLI.IsPatchPoint;
assert((!IsSibCall || OmitCallSeq) && "Should not get callseq for sibcalls");

// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
if (!IsSibCall)
if (!OmitCallSeq)
Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);

SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP,
Expand Down Expand Up @@ -3511,7 +3517,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// the frame up *after* the call, however in the ABI-changing tail-call case
// we've carefully laid out the parameters so that when sp is reset they'll be
// in the correct location.
if (IsTailCall && !IsSibCall) {
if (IsTailCall && !OmitCallSeq) {
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
InFlag = Chain.getValue(1);
Expand Down Expand Up @@ -3569,9 +3575,11 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
uint64_t CalleePopBytes =
DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0;

Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
DAG.getIntPtrConstant(CalleePopBytes, DL, true),
InFlag, DL);
if (!OmitCallSeq)
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
DAG.getIntPtrConstant(CalleePopBytes, DL, true),
InFlag, DL);

if (!Ins.empty())
InFlag = Chain.getValue(1);

Expand Down
4 changes: 2 additions & 2 deletions test/CodeGen/AArch64/arm64-hello.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,19 @@
; CHECK: sub sp, sp, #32
; CHECK-NEXT: stp x29, x30, [sp, #16]
; CHECK-NEXT: add x29, sp, #16
; CHECK-NEXT: stur wzr, [x29, #-4]
; CHECK: adrp x0, l_.str@PAGE
; CHECK: add x0, x0, l_.str@PAGEOFF
; CHECK-NEXT: stur wzr, [x29, #-4]
; CHECK-NEXT: bl _puts
; CHECK-NEXT: ldp x29, x30, [sp, #16]
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret

; CHECK-LINUX-LABEL: main:
; CHECK-LINUX: str x30, [sp, #-16]!
; CHECK-LINUX-NEXT: str wzr, [sp, #12]
; CHECK-LINUX: adrp x0, .L.str
; CHECK-LINUX: add x0, x0, :lo12:.L.str
; CHECK-LINUX-NEXT: str wzr, [sp, #12]
; CHECK-LINUX-NEXT: bl puts
; CHECK-LINUX-NEXT: ldr x30, [sp], #16
; CHECK-LINUX-NEXT: ret
Expand Down
6 changes: 3 additions & 3 deletions test/CodeGen/AArch64/arm64-shrink-wrapping.ll
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ target triple = "arm64-apple-ios"
; DISABLE: cmp w0, w1
; DISABLE-NEXT: b.ge [[EXIT_LABEL:LBB[0-9_]+]]
;
; Store %a in the alloca.
; CHECK: stur w0, {{\[}}[[SAVE_SP]], #-4]
; Set the alloca address in the second argument.
; CHECK-NEXT: sub x1, [[SAVE_SP]], #4
; CHECK: sub x1, [[SAVE_SP]], #4
; Store %a in the alloca.
; CHECK-NEXT: stur w0, {{\[}}[[SAVE_SP]], #-4]
; Set the first argument to zero.
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: bl _doSomething
Expand Down
13 changes: 13 additions & 0 deletions test/CodeGen/AArch64/big-byval.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
; RUN: llc -o - %s -verify-machineinstrs | FileCheck %s
target triple = "aarch64--"

; Make sure we don't fail machine verification because the memcpy callframe
; setup is nested inside the extfunc callframe setup.
; CHECK-LABEL: func:
; CHECK: bl memcpy
; CHECK: bl extfunc
declare void @extfunc([4096 x i64]* byval %p)
define void @func([4096 x i64]* %z) {
call void @extfunc([4096 x i64]* byval %z)
ret void
}
2 changes: 1 addition & 1 deletion test/CodeGen/AArch64/func-calls.ll
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,10 @@ define void @simple_rets() {
store [2 x i64] %arr, [2 x i64]* @varsmallstruct
; CHECK: bl return_smallstruct
; CHECK: add x[[VARSMALLSTRUCT:[0-9]+]], {{x[0-9]+}}, :lo12:varsmallstruct
; CHECK: add x8, {{x[0-9]+}}, {{#?}}:lo12:varstruct
; CHECK: stp x0, x1, [x[[VARSMALLSTRUCT]]]

call void @return_large_struct(%myStruct* sret @varstruct)
; CHECK: add x8, {{x[0-9]+}}, {{#?}}:lo12:varstruct
; CHECK: bl return_large_struct

ret void
Expand Down
8 changes: 4 additions & 4 deletions test/CodeGen/AArch64/nontemporal.ll
Original file line number Diff line number Diff line change
Expand Up @@ -313,8 +313,8 @@ declare void @dummy(<4 x float>*)

define void @test_stnp_v4f32_offset_alloca(<4 x float> %v) #0 {
; CHECK-LABEL: test_stnp_v4f32_offset_alloca:
; CHECK: stnp d0, d{{.*}}, [sp]
; CHECK-NEXT: mov x0, sp
; CHECK: mov x0, sp
; CHECK-NEXT: stnp d0, d{{.*}}, [sp]
; CHECK-NEXT: bl _dummy
%tmp0 = alloca <4 x float>
store <4 x float> %v, <4 x float>* %tmp0, align 1, !nontemporal !0
Expand All @@ -324,8 +324,8 @@ define void @test_stnp_v4f32_offset_alloca(<4 x float> %v) #0 {

define void @test_stnp_v4f32_offset_alloca_2(<4 x float> %v) #0 {
; CHECK-LABEL: test_stnp_v4f32_offset_alloca_2:
; CHECK: stnp d0, d{{.*}}, [sp, #16]
; CHECK-NEXT: mov x0, sp
; CHECK: mov x0, sp
; CHECK-NEXT: stnp d0, d{{.*}}, [sp, #16]
; CHECK-NEXT: bl _dummy
%tmp0 = alloca <4 x float>, i32 2
%tmp1 = getelementptr <4 x float>, <4 x float>* %tmp0, i32 1
Expand Down
93 changes: 46 additions & 47 deletions test/CodeGen/AArch64/swifterror.ll
Original file line number Diff line number Diff line change
Expand Up @@ -223,8 +223,8 @@ bb_end:
; parameter.
define void @foo_sret(%struct.S* sret %agg.result, i32 %val1, %swift_error** swifterror %error_ptr_ref) {
; CHECK-APPLE-LABEL: foo_sret:
; CHECK-APPLE: mov [[SRET:x[0-9]+]], x8
; CHECK-APPLE: orr w0, wzr, #0x10
; CHECK-APPLE: mov [[SRET:x[0-9]+]], x8
; CHECK-APPLE: malloc
; CHECK-APPLE: orr [[ID:w[0-9]+]], wzr, #0x1
; CHECK-APPLE: strb [[ID]], [x0, #8]
Expand Down Expand Up @@ -406,7 +406,7 @@ entry:
ret float %0
}

; CHECK-APPLE-LABEL: swifterror_clobber
; CHECK-APPLE-LABEL: swifterror_clobber:
; CHECK-APPLE: mov [[REG:x[0-9]+]], x21
; CHECK-APPLE: nop
; CHECK-APPLE: mov x21, [[REG]]
Expand All @@ -415,32 +415,31 @@ define swiftcc void @swifterror_clobber(%swift_error** nocapture swifterror %err
ret void
}

; CHECK-APPLE-LABEL: swifterror_reg_clobber
; CHECK-APPLE-LABEL: swifterror_reg_clobber:
; CHECK-APPLE: stp {{.*}}x21
; CHECK-APPLE: nop
; CHECK-APPLE: ldp {{.*}}x21
define swiftcc void @swifterror_reg_clobber(%swift_error** nocapture %err) {
call void asm sideeffect "nop", "~{x21}"()
ret void
}
; CHECK-APPLE-LABEL: params_in_reg
; CHECK-APPLE-LABEL: params_in_reg:
; Save callee saved registers and swifterror since it will be clobbered by the first call to params_in_reg2.
; CHECK-APPLE: stp x21, x28, [sp
; CHECK-APPLE: stp x27, x26, [sp
; CHECK-APPLE: stp x25, x24, [sp
; CHECK-APPLE: stp x23, x22, [sp
; CHECK-APPLE: stp x20, x19, [sp
; CHECK-APPLE: stp x29, x30, [sp
; CHECK-APPLE: str x20, [sp
; CHECK-APPLE: str x7, [sp
; Store argument registers.
; CHECK-APPLE: mov x23, x7
; CHECK-APPLE: mov x24, x6
; CHECK-APPLE: mov x25, x5
; CHECK-APPLE: mov x26, x4
; CHECK-APPLE: mov x27, x3
; CHECK-APPLE: mov x28, x2
; CHECK-APPLE: mov x19, x1
; CHECK-APPLE: mov x22, x0
; CHECK-APPLE: mov x23, x6
; CHECK-APPLE: mov x24, x5
; CHECK-APPLE: mov x25, x4
; CHECK-APPLE: mov x26, x3
; CHECK-APPLE: mov x27, x2
; CHECK-APPLE: mov x28, x1
; CHECK-APPLE: mov x19, x0
; Setup call.
; CHECK-APPLE: orr w0, wzr, #0x1
; CHECK-APPLE: orr w1, wzr, #0x2
Expand All @@ -450,20 +449,20 @@ define swiftcc void @swifterror_reg_clobber(%swift_error** nocapture %err) {
; CHECK-APPLE: orr w5, wzr, #0x6
; CHECK-APPLE: orr w6, wzr, #0x7
; CHECK-APPLE: orr w7, wzr, #0x8
; CHECK-APPLE: mov x22, x20
; CHECK-APPLE: mov x20, xzr
; CHECK-APPLE: mov x21, xzr
; CHECK-APPLE: bl _params_in_reg2
; Restore original arguments for next call.
; CHECK-APPLE: mov x0, x22
; CHECK-APPLE: mov x1, x19
; CHECK-APPLE: mov x2, x28
; CHECK-APPLE: mov x3, x27
; CHECK-APPLE: mov x4, x26
; CHECK-APPLE: mov x5, x25
; CHECK-APPLE: mov x6, x24
; CHECK-APPLE: mov x7, x23
; CHECK-APPLE: mov x0, x19
; CHECK-APPLE: mov x1, x28
; CHECK-APPLE: mov x2, x27
; CHECK-APPLE: mov x3, x26
; CHECK-APPLE: mov x4, x25
; CHECK-APPLE: mov x5, x24
; Restore original swiftself argument and swifterror %err.
; CHECK-APPLE: ldp x20, x21, [sp
; CHECK-APPLE: ldp x7, x21, [sp
; CHECK-APPLE: mov x20, x22
; CHECK-APPLE: bl _params_in_reg2
; Restore calle save registers but don't clober swifterror x21.
; CHECK-APPLE-NOT: x21
Expand All @@ -489,24 +488,23 @@ define swiftcc void @params_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8* s
}
declare swiftcc void @params_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, i8* swiftself, %swift_error** nocapture swifterror %err)

; CHECK-APPLE-LABEL: params_and_return_in_reg
; CHECK-APPLE-LABEL: params_and_return_in_reg:
; Store callee saved registers.
; CHECK-APPLE: stp x20, x28, [sp, #24
; CHECK-APPLE: stp x7, x28, [sp, #24
; CHECK-APPLE: stp x27, x26, [sp
; CHECK-APPLE: stp x25, x24, [sp
; CHECK-APPLE: stp x23, x22, [sp
; CHECK-APPLE: stp x20, x19, [sp
; CHECK-APPLE: stp x29, x30, [sp
; Save original arguments.
; CHECK-APPLE: mov x23, x21
; CHECK-APPLE: str x7, [sp, #16]
; CHECK-APPLE: mov x24, x6
; CHECK-APPLE: mov x25, x5
; CHECK-APPLE: mov x26, x4
; CHECK-APPLE: mov x27, x3
; CHECK-APPLE: mov x28, x2
; CHECK-APPLE: mov x19, x1
; CHECK-APPLE: mov x22, x0
; CHECK-APPLE: str x6, [sp, #16]
; CHECK-APPLE: mov x24, x5
; CHECK-APPLE: mov x25, x4
; CHECK-APPLE: mov x26, x3
; CHECK-APPLE: mov x27, x2
; CHECK-APPLE: mov x28, x1
; CHECK-APPLE: mov x19, x0
; Setup call arguments.
; CHECK-APPLE: orr w0, wzr, #0x1
; CHECK-APPLE: orr w1, wzr, #0x2
Expand All @@ -516,33 +514,33 @@ declare swiftcc void @params_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, i8*
; CHECK-APPLE: orr w5, wzr, #0x6
; CHECK-APPLE: orr w6, wzr, #0x7
; CHECK-APPLE: orr w7, wzr, #0x8
; CHECK-APPLE: mov x22, x20
; CHECK-APPLE: mov x20, xzr
; CHECK-APPLE: mov x21, xzr
; CHECK-APPLE: bl _params_in_reg2
; Store swifterror %error_ptr_ref.
; CHECK-APPLE: str x21, [sp, #8]
; Setup call arguments from original arguments.
; CHECK-APPLE: mov x0, x22
; CHECK-APPLE: mov x1, x19
; CHECK-APPLE: mov x2, x28
; CHECK-APPLE: mov x3, x27
; CHECK-APPLE: mov x4, x26
; CHECK-APPLE: mov x5, x25
; CHECK-APPLE: mov x6, x24
; CHECK-APPLE: ldp x7, x20, [sp, #16]
; CHECK-APPLE: mov x0, x19
; CHECK-APPLE: mov x1, x28
; CHECK-APPLE: mov x2, x27
; CHECK-APPLE: mov x3, x26
; CHECK-APPLE: mov x4, x25
; CHECK-APPLE: mov x5, x24
; CHECK-APPLE: ldp x6, x7, [sp, #16]
; CHECK-APPLE: mov x20, x22
; CHECK-APPLE: mov x21, x23
; CHECK-APPLE: bl _params_and_return_in_reg2
; Save swifterror %err.
; CHECK-APPLE: str x0, [sp, #24]
; Store return values.
; CHECK-APPLE: mov x19, x0
; CHECK-APPLE: mov x22, x1
; CHECK-APPLE: mov x24, x2
; CHECK-APPLE: mov x25, x3
; CHECK-APPLE: mov x26, x4
; CHECK-APPLE: mov x27, x5
; CHECK-APPLE: mov x28, x6
; CHECK-APPLE: mov x23, x7
; Save swifterror %err.
; CHECK-APPLE: str x21, [sp, #24]
; Setup call.
; CHECK-APPLE: orr w0, wzr, #0x1
; CHECK-APPLE: orr w1, wzr, #0x2
Expand All @@ -552,26 +550,27 @@ declare swiftcc void @params_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, i8*
; CHECK-APPLE: orr w5, wzr, #0x6
; CHECK-APPLE: orr w6, wzr, #0x7
; CHECK-APPLE: orr w7, wzr, #0x8
; CHECK-APPLE: mov x19, x21
; CHECK-APPLE: mov x20, xzr
; ... setup call with swiferror %error_ptr_ref.
; CHECK-APPLE: ldr x21, [sp, #8]
; CHECK-APPLE: bl _params_in_reg2
; Restore return values for return from this function.
; CHECK-APPLE: mov x0, x19
; CHECK-APPLE: mov x1, x22
; CHECK-APPLE: mov x2, x24
; CHECK-APPLE: mov x3, x25
; CHECK-APPLE: mov x4, x26
; CHECK-APPLE: mov x5, x27
; CHECK-APPLE: mov x6, x28
; CHECK-APPLE: mov x7, x23
; CHECK-APPLE: mov x21, x19
; Restore swifterror %err and callee save registers.
; CHECK-APPLE: ldp x21, x28, [sp, #24
; CHECK-APPLE: ldp x29, x30, [sp
; CHECK-APPLE: ldp x20, x19, [sp
; CHECK-APPLE: ldp x23, x22, [sp
; CHECK-APPLE: ldp x25, x24, [sp
; CHECK-APPLE: ldp x27, x26, [sp
; CHECK-APPLE: ldp x0, x28, [sp, #24
; CHECK-APPLE: ret
define swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8* swiftself, %swift_error** nocapture swifterror %err) {
%error_ptr_ref = alloca swifterror %swift_error*, align 8
Expand Down Expand Up @@ -601,14 +600,14 @@ entry:
declare swiftcc void @foo2(%swift_error** swifterror)

; Make sure we properly assign registers during fast-isel.
; CHECK-O0-LABEL: testAssign
; CHECK-O0-LABEL: testAssign:
; CHECK-O0: mov [[TMP:x.*]], xzr
; CHECK-O0: mov x21, [[TMP]]
; CHECK-O0: bl _foo2
; CHECK-O0: str x21, [s[[STK:.*]]]
; CHECK-O0: ldr x0, [s[[STK]]]

; CHECK-APPLE-LABEL: testAssign
; CHECK-APPLE-LABEL: testAssign:
; CHECK-APPLE: mov x21, xzr
; CHECK-APPLE: bl _foo2
; CHECK-APPLE: mov x0, x21
Expand Down
10 changes: 5 additions & 5 deletions test/CodeGen/AArch64/tailcall-explicit-sret.ll
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@ define void @test_tailcall_explicit_sret_alloca_unused() #0 {

; CHECK-LABEL: _test_tailcall_explicit_sret_alloca_dummyusers:
; CHECK: ldr [[PTRLOAD1:q[0-9]+]], [x0]
; CHECK: str [[PTRLOAD1]], [sp]
; CHECK: mov x8, sp
; CHECK-NEXT: bl _test_explicit_sret
; CHECK: str [[PTRLOAD1]], [sp]
; CHECK: bl _test_explicit_sret
; CHECK: ret
define void @test_tailcall_explicit_sret_alloca_dummyusers(i1024* %ptr) #0 {
%l = alloca i1024, align 8
Expand Down Expand Up @@ -75,10 +75,10 @@ define i1024 @test_tailcall_explicit_sret_alloca_returned() #0 {
}

; CHECK-LABEL: _test_indirect_tailcall_explicit_sret_nosret_arg:
; CHECK-DAG: mov x[[CALLERX8NUM:[0-9]+]], x8
; CHECK-DAG: mov [[FPTR:x[0-9]+]], x0
; CHECK: mov [[FPTR:x[0-9]+]], x0
; CHECK: mov x0, sp
; CHECK-NEXT: blr [[FPTR]]
; CHECK: mov x[[CALLERX8NUM:[0-9]+]], x8
; CHECK: blr [[FPTR]]
; CHECK: ldr [[CALLERSRET1:q[0-9]+]], [sp]
; CHECK: str [[CALLERSRET1:q[0-9]+]], [x[[CALLERX8NUM]]]
; CHECK: ret
Expand Down

0 comments on commit 93b966c

Please sign in to comment.