Skip to content

Commit

Permalink
[kernel][assembler] Add .cfi_startproc, .cfi_endproc directives
Browse files Browse the repository at this point in the history
... and switch to using .debug_frame from .eh_frame.

Some functions don't really need .cfi directives, e.g. during early boot.
These functions are marked with FUNCTION_LABEL instead of FUNCTION
as now all uses of FUNCTION require a corresponding END_FUNCTION.

MG-870 #comment baby steps

Change-Id: I3c7d182ca91b2f705100880282477bfe930c6633
  • Loading branch information
xdje42 committed Jul 26, 2017
1 parent 7370203 commit 6a0ae3c
Show file tree
Hide file tree
Showing 14 changed files with 51 additions and 15 deletions.
2 changes: 1 addition & 1 deletion kernel/arch/arm64/exceptions.S
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ add sp, sp, #(4*8) + (10*8)
b arm64_exc_shared_restore_long
.endm

FUNCTION(arm64_exception_base)
FUNCTION_LABEL(arm64_exception_base)

/* exceptions from current EL, using SP0 */
LOCAL_FUNCTION(arm64_sync_exc_current_el_SP0)
Expand Down
7 changes: 5 additions & 2 deletions kernel/arch/arm64/mexec.S
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,9 @@ FUNCTION(mexec_asm)

// Branch to the next kernel.
br new_kernel_addr
END_FUNCTION(mexec_asm)

memmove_mexec:
LOCAL_FUNCTION(memmove_mexec)
// x6 contains the stride (1 word if we're copying forward
// -1 word if we're copying backwards)
mov x6, 1
Expand Down Expand Up @@ -142,6 +143,7 @@ memmove_mexec:
bne .copy_loop
.done:
ret
END_FUNCTION(memmove_mexec)

// Perform a bulk clean/invalidate across the whole cache
// Normally on ARM we can use the CIVAC, CVAC, CVAU and IVAC instructions to
Expand All @@ -155,7 +157,7 @@ memmove_mexec:
// cache.
// The original implementation can be found in the ARMv8-A TRM or at the
// following URL: http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/BABJDBHI.html
mexec_arch_clean_invalidate_cache_all:
LOCAL_FUNCTION(mexec_arch_clean_invalidate_cache_all)
mrs x0, clidr_el1
and w3, w0, #0x07000000 // get 2 x level of coherence
lsr w3, w3, #23
Expand Down Expand Up @@ -199,6 +201,7 @@ mexec_arch_clean_invalidate_cache_all:
dsb sy

ret
END_FUNCTION(mexec_arch_clean_invalidate_cache_all)


/* This .ltorg emits any immediate constants here. We need to put this before
Expand Down
2 changes: 1 addition & 1 deletion kernel/arch/arm64/start.S
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ size .req x26
attr .req x27

.section .text.boot, "ax", @progbits
FUNCTION(_start)
FUNCTION_LABEL(_start)
#if FASTBOOT_HEADER
#include "fastboot-header.S"
#endif
Expand Down
5 changes: 5 additions & 0 deletions kernel/arch/x86/64/asm.S
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ FUNCTION(x86_64_context_switch)
popf

retq
END_FUNCTION(x86_64_context_switch)

#include <arch/x86/mp.h>

Expand All @@ -51,6 +52,7 @@ FUNCTION(arch_spin_lock)
cmp $0, %rax // make sure we actually got the lock
jne .Lspin // if we lost the race, resume waiting
ret
END_FUNCTION(arch_spin_lock)

/* int arch_spin_trylock(unsigned long *lock) */
FUNCTION(arch_spin_trylock)
Expand All @@ -60,12 +62,14 @@ FUNCTION(arch_spin_trylock)

xchg %rax, (%rdi)
ret // return 0 if we got the lock
END_FUNCTION(arch_spin_trylock)

/* void arch_spin_unlock(spin_lock_t *lock) */
FUNCTION(arch_spin_unlock)
mov $0, %rax
xchg %rax, (%rdi)
ret
END_FUNCTION(arch_spin_unlock)

/* rep stos version of page zero */
FUNCTION(arch_zero_page)
Expand All @@ -76,3 +80,4 @@ FUNCTION(arch_zero_page)
rep stosq

ret
END_FUNCTION(arch_zero_page)
2 changes: 2 additions & 0 deletions kernel/arch/x86/64/exceptions.S
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ FUNCTION(_isr_\@)
pushq $\@ /* interrupt number */
jmp interrupt_common
.endif
END_FUNCTION(_isr_\@)
.popsection
.pushsection .rodata
.quad _isr_\@
Expand Down Expand Up @@ -122,3 +123,4 @@ interrupt_common_iframe_set_up_for_debugger:
addq $16, %rsp

iretq
END_FUNCTION(interrupt_common)
2 changes: 2 additions & 0 deletions kernel/arch/x86/64/hypervisor.S
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ failure:
// Return MX_ERR_INTERNAL.
mov $MX_ERR_INTERNAL, %eax
ret
END_FUNCTION(vmx_enter)

/* This is effectively the second-half of vmx_enter. When we return from a
* VM exit, vmx_state argument is stored in RSP. We use this to restore the
Expand Down Expand Up @@ -138,3 +139,4 @@ FUNCTION(vmx_exit_entry)
// Return MX_OK, using the return address of vmx_enter pushed above.
mov $MX_OK, %eax
ret
END_FUNCTION(vmx_exit_entry)
2 changes: 2 additions & 0 deletions kernel/arch/x86/64/ops.S
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ FUNCTION(arch_idle)
hlt
1:
ret
END_FUNCTION(arch_idle)


/* status_t read_msr_safe(uint32_t msr_id, uint64_t *val); */
Expand Down Expand Up @@ -67,3 +68,4 @@ FUNCTION(read_msr_safe)

mov $MX_ERR_NOT_SUPPORTED, %rax
ret
END_FUNCTION(read_msr_safe)
4 changes: 2 additions & 2 deletions kernel/arch/x86/64/start.S
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@

.section .text.boot, "ax", @progbits
.code32
FUNCTION(_multiboot_start)
FUNCTION_LABEL(_multiboot_start)
cmpl $MULTIBOOT_BOOTLOADER_MAGIC, %eax
jne .Lcommon_boot
movl %ebx, PHYS(_multiboot_info)
Expand Down Expand Up @@ -214,7 +214,7 @@ high_entry:

/* 64bit entry point from a secondary loader */
.align 8
FUNCTION(_entry64)
FUNCTION_LABEL(_entry64)
mov %esi, PHYS(_bootdata_base)
/* ensure the stack pointer is sane */

Expand Down
8 changes: 4 additions & 4 deletions kernel/arch/x86/64/start16.S
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
.align PAGE_SIZE

.section .text
FUNCTION(x86_bootstrap16_start)
FUNCTION_LABEL(x86_bootstrap16_start)

.code16
FUNCTION(x86_bootstrap16_entry)
FUNCTION_LABEL(x86_bootstrap16_entry)
# Enter no-fill cache mode (allegedly this is the initial state
# according to Intel 3A, but on at least one Broadwell the APs can
# come up with caching enabled)
Expand Down Expand Up @@ -84,7 +84,7 @@ FUNCTION(x86_bootstrap16_entry)

# Get the secondary cpu into 64-bit mode with interrupts disabled and no TSS
.code64
FUNCTION(_x86_secondary_cpu_long_mode_entry)
FUNCTION_LABEL(_x86_secondary_cpu_long_mode_entry)
# When we get here, %rsi should contain the absolute address of our data
# page.
mov $1, %rdi
Expand Down Expand Up @@ -144,7 +144,7 @@ FUNCTION(_x86_secondary_cpu_long_mode_entry)
jmp 0b

# Get the cpu into 64-bit mode with interrupts disabled and no TSS
FUNCTION(_x86_suspend_wakeup)
FUNCTION_LABEL(_x86_suspend_wakeup)
# Retrieve the new PML4 address before our data page becomes unreachable
mov BCD_PHYS_KERNEL_PML4_OFFSET(%esi), %ecx

Expand Down
2 changes: 2 additions & 0 deletions kernel/arch/x86/64/user_copy.S
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ FUNCTION(_x86_copy_from_user)

end_usercopy
ret
END_FUNCTION(_x86_copy_from_user)

# status_t _x86_copy_to_user(void *dst, const void *src, size_t len, bool smap, void **fault_return)
FUNCTION(_x86_copy_to_user)
Expand Down Expand Up @@ -130,3 +131,4 @@ FUNCTION(_x86_copy_to_user)

end_usercopy
ret
END_FUNCTION(_x86_copy_to_user)
1 change: 1 addition & 0 deletions kernel/arch/x86/64/uspace_entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -44,3 +44,4 @@ FUNCTION(x86_uspace_entry)
mov %ax, %gs

iretq
END_FUNCTION(x86_uspace_entry)
3 changes: 3 additions & 0 deletions kernel/arch/x86/mexec.S
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,10 @@
#include <asm.h>

.section .text

FUNCTION(mexec_asm)
// Unimplemented for now, emit an undefined instruction.
UD2
END_FUNCTION(mexec_asm)

DATA(mexec_asm_end)
2 changes: 2 additions & 0 deletions kernel/dev/psci/psci.S
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,11 @@
FUNCTION(psci_smc_call)
smc #0
ret
END_FUNCTION(psci_smc_call)

FUNCTION(psci_hvc_call)
hvc #0
ret
END_FUNCTION(psci_hvc_call)

.ltorg
24 changes: 19 additions & 5 deletions kernel/include/asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,28 @@
#ifndef __ASM_H
#define __ASM_H

#define FUNCTION(x) .global x; .type x,STT_FUNC; x:
#define DATA(x) .global x; .type x,STT_OBJECT; x:
#ifndef ASSEMBLY
#error for assembly files only
#endif

// for functions that don't have an "end" or don't want .cfi_startproc
#define LOCAL_FUNCTION_LABEL(x) .type x,STT_FUNC; x:
#define FUNCTION_LABEL(x) .global x; LOCAL_FUNCTION_LABEL(x)

#define LOCAL_FUNCTION(x) LOCAL_FUNCTION_LABEL(x) .cfi_startproc
#define FUNCTION(x) .global x; LOCAL_FUNCTION(x)

// for local or global functions
#define END_FUNCTION(x) .cfi_endproc; .size x, . - x

#define LOCAL_FUNCTION(x) .type x,STT_FUNC; x:
#define LOCAL_DATA(x) .type x,STT_OBJECT; x:
#define DATA(x) .global x; LOCAL_DATA(x)

#define END_FUNCTION(x) .size x, . - x
// for local or global data
#define END_DATA(x) .size x, . - x

#endif
// We want .debug_frame not .eh_frame.
// WARNING: This is a subtle side-effect of including this file. Heads up!
.cfi_sections .debug_frame

#endif

0 comments on commit 6a0ae3c

Please sign in to comment.