Skip to content

Commit

Permalink
core: arm: enable CFG_BOOT_MEM unconditionally
Browse files Browse the repository at this point in the history
Enable CFG_BOOT_MEM unconditionally and call the boot_mem_*() functions
as needed from entry_*.S and boot.c.

The pager will reuse all boot_mem memory internally when configured.
The non-pager configuration will unmap the memory and make it available
for TAs if needed.

__FLATMAP_PAGER_TRAILING_SPACE is removed from the link script,
collect_mem_ranges() in core/mm/core_mmu.c maps the memory following
VCORE_INIT_RO automatically.

Signed-off-by: Jens Wiklander <[email protected]>
Acked-by: Etienne Carriere <[email protected]>
  • Loading branch information
jenswi-linaro authored and jforissier committed Dec 12, 2024
1 parent 5727b6a commit d461c89
Show file tree
Hide file tree
Showing 5 changed files with 151 additions and 73 deletions.
106 changes: 61 additions & 45 deletions core/arch/arm/kernel/boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
#endif
}

static void init_runtime(unsigned long pageable_part)
static void init_pager_runtime(unsigned long pageable_part)
{
size_t n;
size_t init_size = (size_t)(__init_end - __init_start);
Expand All @@ -523,12 +523,6 @@ static void init_runtime(unsigned long pageable_part)

tmp_hashes = __init_end + embdata->hashes_offset;

init_asan();

/* Add heap2 first as heap1 may be too small as initial bget pool */
malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);

/*
* This needs to be initialized early to support address lookup
* in MEM_AREA_TEE_RAM
Expand All @@ -542,10 +536,10 @@ static void init_runtime(unsigned long pageable_part)
asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);

/*
* Need physical memory pool initialized to be able to allocate
* secure physical memory below.
* The pager is about the be enabled below, eventual temporary boot
* memory allocation must be removed now.
*/
core_mmu_init_phys_mem();
boot_mem_release_tmp_alloc();

carve_out_asan_mem();

Expand Down Expand Up @@ -654,27 +648,9 @@ static void init_runtime(unsigned long pageable_part)

print_pager_pool_size();
}
#else

static void init_runtime(unsigned long pageable_part __unused)
#else /*!CFG_WITH_PAGER*/
static void init_pager_runtime(unsigned long pageable_part __unused)
{
init_asan();

/*
* By default whole OP-TEE uses malloc, so we need to initialize
* it early. But, when virtualization is enabled, malloc is used
* only by TEE runtime, so malloc should be initialized later, for
* every virtual partition separately. Core code uses nex_malloc
* instead.
*/
#ifdef CFG_NS_VIRTUALIZATION
nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
__nex_heap_start);
#else
malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
#endif

IMSG_RAW("\n");
}
#endif

Expand Down Expand Up @@ -891,10 +867,9 @@ static void update_external_dt(void)

void init_tee_runtime(void)
{
#ifndef CFG_WITH_PAGER
/* Pager initializes TA RAM early */
core_mmu_init_phys_mem();
#endif
if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
core_mmu_init_phys_mem();

/*
* With virtualization we call this function when creating the
* OP-TEE partition instead.
Expand Down Expand Up @@ -925,6 +900,8 @@ void init_tee_runtime(void)

static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
{
vaddr_t va = 0;

thread_init_core_local_stacks();
/*
* Mask asynchronous exceptions before switch to the thread vector
Expand All @@ -940,14 +917,54 @@ static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
if (IS_ENABLED(CFG_CRYPTO_WITH_CE))
check_crypto_extensions();

init_asan();

/*
* Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
* set a current thread right now to avoid a chicken-and-egg problem
* (thread_init_boot_thread() sets the current thread but needs
* things set by init_runtime()).
* By default whole OP-TEE uses malloc, so we need to initialize
* it early. But, when virtualization is enabled, malloc is used
* only by TEE runtime, so malloc should be initialized later, for
* every virtual partition separately. Core code uses nex_malloc
* instead.
*/
thread_get_core_local()->curr_thread = 0;
init_runtime(pageable_part);
#ifdef CFG_WITH_PAGER
/* Add heap2 first as heap1 may be too small as initial bget pool */
malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
#endif
#ifdef CFG_NS_VIRTUALIZATION
nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
__nex_heap_start);
#else
malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
#endif
IMSG_RAW("\n");

if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
IMSG("Initializing virtualization support");
core_mmu_init_virtualization();
} else {
core_mmu_init_phys_mem();
}
va = boot_mem_release_unused();
if (!IS_ENABLED(CFG_WITH_PAGER)) {
/*
* We must update boot_cached_mem_end to reflect the memory
* just unmapped by boot_mem_release_unused().
*/
assert(va && va <= boot_cached_mem_end);
boot_cached_mem_end = va;
}

if (IS_ENABLED(CFG_WITH_PAGER)) {
/*
* Pager: init_runtime() calls thread_kernel_enable_vfp()
* so we must set a current thread right now to avoid a
* chicken-and-egg problem (thread_init_boot_thread() sets
* the current thread but needs things set by
* init_runtime()).
*/
thread_get_core_local()->curr_thread = 0;
init_pager_runtime(pageable_part);
}

if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
/*
Expand Down Expand Up @@ -1028,12 +1045,8 @@ void __weak boot_init_primary_late(unsigned long fdt __unused,

boot_primary_init_intc();
init_vfp_nsec();
if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
IMSG("Initializing virtualization support");
core_mmu_init_virtualization();
} else {
if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
init_tee_runtime();
}
}

/*
Expand All @@ -1042,6 +1055,9 @@ void __weak boot_init_primary_late(unsigned long fdt __unused,
*/
void __weak boot_init_primary_final(void)
{
if (!IS_ENABLED(CFG_WITH_PAGER))
boot_mem_release_tmp_alloc();

if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
call_driver_initcalls();
call_finalcalls();
Expand Down
49 changes: 42 additions & 7 deletions core/arch/arm/kernel/entry_a32.S
Original file line number Diff line number Diff line change
Expand Up @@ -366,22 +366,30 @@ copy_init:
/*
* The binary is built as:
* [Core, rodata and data] : In correct location
* [struct boot_embdata + data] : Should be moved to __end, first
* uint32_t tells the length of the struct + data
* [struct boot_embdata + data] : Should be moved to right before
* __vcore_free_end, the first uint32_t tells the length of the
* struct + data
*/
ldr r0, =__end /* dst */
ldr r1, =__data_end /* src */
ldr r2, [r1] /* struct boot_embdata::total_len */
/* dst */
ldr r0, =__vcore_free_end
sub r0, r0, r2
/* round down to beginning of page */
mov r3, #(SMALL_PAGE_SIZE - 1)
bic r0, r0, r3
ldr r3, =boot_embdata_ptr
str r0, [r3]
/* Copy backwards (as memmove) in case we're overlapping */
add r0, r0, r2
add r1, r1, r2
add r2, r0, r2
ldr r3, =boot_cached_mem_end
str r2, [r3]

copy_init:
ldmdb r1!, {r3, r9-r12}
stmdb r0!, {r3, r9-r12}
cmp r0, r2
stmdb r2!, {r3, r9-r12}
cmp r2, r0
bgt copy_init
#endif

Expand Down Expand Up @@ -458,6 +466,23 @@ shadow_stack_access_ok:
bl boot_save_args
add sp, sp, #(2 * 4)

#ifdef CFG_WITH_PAGER
ldr r0, =__init_end /* pointer to boot_embdata */
ldr r1, [r0] /* struct boot_embdata::total_len */
add r0, r0, r1
mov_imm r1, 0xfff
add r0, r0, r1 /* round up */
bic r0, r0, r1 /* to next page */
mov_imm r1, (TEE_RAM_PH_SIZE + TEE_RAM_START)
mov r2, r1
#else
ldr r0, =__vcore_free_start
ldr r1, =boot_embdata_ptr
ldr r1, [r1]
ldr r2, =__vcore_free_end
#endif
bl boot_mem_init

#ifdef CFG_PL310
bl pl310_base
bl arm_cl2_config
Expand Down Expand Up @@ -512,6 +537,9 @@ shadow_stack_access_ok:
bl __get_core_pos
bl enable_mmu
#ifdef CFG_CORE_ASLR
ldr r0, =boot_mmu_config
ldr r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET]
bl boot_mem_relocate
/*
* Reinitialize console, since register_serial_console() has
* previously registered a PA and with ASLR the VA is different
Expand Down Expand Up @@ -639,6 +667,12 @@ LOCAL_DATA cached_mem_start , :
.word __text_start
END_DATA cached_mem_start

#ifndef CFG_WITH_PAGER
LOCAL_DATA boot_embdata_ptr , :
.skip 4
END_DATA boot_embdata_ptr
#endif

LOCAL_FUNC unhandled_cpu , :
wfi
b unhandled_cpu
Expand All @@ -651,7 +685,8 @@ LOCAL_FUNC relocate , :
#ifdef CFG_WITH_PAGER
ldr r12, =__init_end
#else
ldr r12, =__end
ldr r12, =boot_embdata_ptr
ldr r12, [r12]
#endif
ldr r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET]
ldr r3, [r12, #BOOT_EMBDATA_RELOC_LEN]
Expand Down
51 changes: 42 additions & 9 deletions core/arch/arm/kernel/entry_a64.S
Original file line number Diff line number Diff line change
Expand Up @@ -212,23 +212,30 @@ copy_init:
/*
* The binary is built as:
* [Core, rodata and data] : In correct location
* [struct boot_embdata + data] : Should be moved to __end, first
* uint32_t tells the length of the struct + data
* [struct boot_embdata + data] : Should be moved to right before
* __vcore_free_end, the first uint32_t tells the length of the
* struct + data
*/
adr_l x0, __end /* dst */
adr_l x1, __data_end /* src */
ldr w2, [x1] /* struct boot_embdata::total_len */
/* dst */
adr_l x0, __vcore_free_end
sub x0, x0, x2
/* round down to beginning of page */
bic x0, x0, #(SMALL_PAGE_SIZE - 1)
adr_l x3, boot_embdata_ptr
str x0, [x3]

/* Copy backwards (as memmove) in case we're overlapping */
add x0, x0, x2
add x1, x1, x2
add x2, x0, x2
adr_l x3, boot_cached_mem_end
str x0, [x3]
adr_l x2, __end
str x2, [x3]

copy_init:
ldp x3, x4, [x1, #-16]!
stp x3, x4, [x0, #-16]!
cmp x0, x2
stp x3, x4, [x2, #-16]!
cmp x2, x0
b.gt copy_init
#endif

Expand Down Expand Up @@ -304,6 +311,22 @@ clear_nex_bss:
mov x4, xzr
bl boot_save_args

#ifdef CFG_WITH_PAGER
adr_l x0, __init_end /* pointer to boot_embdata */
ldr w1, [x0] /* struct boot_embdata::total_len */
add x0, x0, x1
add x0, x0, #0xfff /* round up */
bic x0, x0, #0xfff /* to next page */
mov_imm x1, (TEE_RAM_PH_SIZE + TEE_RAM_START)
mov x2, x1
#else
adr_l x0, __vcore_free_start
adr_l x1, boot_embdata_ptr
ldr x1, [x1]
adr_l x2, __vcore_free_end;
#endif
bl boot_mem_init

#ifdef CFG_MEMTAG
/*
* If FEAT_MTE2 is available, initializes the memtag callbacks.
Expand Down Expand Up @@ -349,6 +372,9 @@ clear_nex_bss:
bl __get_core_pos
bl enable_mmu
#ifdef CFG_CORE_ASLR
adr_l x0, boot_mmu_config
ldr x0, [x0, #CORE_MMU_CONFIG_MAP_OFFSET]
bl boot_mem_relocate
/*
* Reinitialize console, since register_serial_console() has
* previously registered a PA and with ASLR the VA is different
Expand Down Expand Up @@ -470,8 +496,14 @@ clear_nex_bss:
END_FUNC _start
DECLARE_KEEP_INIT _start

#ifndef CFG_WITH_PAGER
.section .identity_map.data
.balign 8
LOCAL_DATA boot_embdata_ptr , :
.skip 8
END_DATA boot_embdata_ptr
#endif

#if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
LOCAL_FUNC relocate , :
/*
Expand All @@ -481,7 +513,8 @@ LOCAL_FUNC relocate , :
#ifdef CFG_WITH_PAGER
adr_l x6, __init_end
#else
adr_l x6, __end
adr_l x6, boot_embdata_ptr
ldr x6, [x6]
#endif
ldp w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET]

Expand Down
14 changes: 2 additions & 12 deletions core/arch/arm/kernel/kern.ld.S
Original file line number Diff line number Diff line change
Expand Up @@ -478,24 +478,14 @@ __vcore_nex_rw_end = __vcore_nex_rw_start + __vcore_nex_rw_size;
#endif

#ifdef CFG_WITH_PAGER
/*
* Core init mapping shall cover up to end of the physical RAM.
* This is required since the hash table is appended to the
* binary data after the firmware build sequence.
*/
#define __FLATMAP_PAGER_TRAILING_SPACE \
(TEE_RAM_START + TEE_RAM_PH_SIZE - \
(__flatmap_init_ro_start + __flatmap_init_ro_size))

/* Paged/init read-only memories */
__vcore_init_rx_start = __flatmap_init_rx_start;
#ifdef CFG_CORE_RODATA_NOEXEC
__vcore_init_rx_size = __flatmap_init_rx_size;
__vcore_init_ro_start = __flatmap_init_ro_start;
__vcore_init_ro_size = __flatmap_init_ro_size + __FLATMAP_PAGER_TRAILING_SPACE;
__vcore_init_ro_size = __flatmap_init_ro_size;
#else
__vcore_init_rx_size = __flatmap_init_rx_size + __flatmap_init_ro_size +
__FLATMAP_PAGER_TRAILING_SPACE;
__vcore_init_rx_size = __flatmap_init_rx_size + __flatmap_init_ro_size;
__vcore_init_ro_start = __vcore_init_rx_end;
__vcore_init_ro_size = 0;
#endif /* CFG_CORE_RODATA_NOEXEC */
Expand Down
Loading

0 comments on commit d461c89

Please sign in to comment.