Skip to content

Commit

Permalink
xtensa: mmu: mpu: add xtensa_mem_kernel_has_access()
Browse files Browse the repository at this point in the history
This adds a new function xtensa_mem_kernel_has_access() to
determine if a memory region can be accessed by kernel threads.
This allows checking for valid mapped memory before accessing
them to avoid relying on page faults to detect invalid access.

Also fixed an issue with arch_buffer_validate() on MPU where
it may return okay even if the incoming memory region has no
corresponding entry in the MPU table.

Signed-off-by: Daniel Leung <[email protected]>
  • Loading branch information
dcpleung authored and nashif committed Jun 15, 2024
1 parent 61ec0d1 commit 79939e3
Show file tree
Hide file tree
Showing 4 changed files with 137 additions and 2 deletions.
97 changes: 97 additions & 0 deletions arch/xtensa/core/mpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -989,6 +989,14 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
offset += XCHAL_MPU_ALIGN) {
uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset);

if ((probed & XTENSA_MPU_PROBE_VALID_ENTRY_MASK) == 0U) {
/* There is no foreground or background entry associated
* with the region.
*/
ret = -EPERM;
goto out;
}

uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK)
>> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT;

Expand Down Expand Up @@ -1037,6 +1045,95 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
return ret;
}

bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write)
{
uintptr_t aligned_addr;
size_t aligned_size, addr_offset;
bool ret = true;

/* addr/size arbitrary, fix this up into an aligned region */
aligned_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN);
addr_offset = (uintptr_t)addr - aligned_addr;
aligned_size = ROUND_UP(size + addr_offset, XCHAL_MPU_ALIGN);

for (size_t offset = 0; offset < aligned_size;
offset += XCHAL_MPU_ALIGN) {
uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset);

if ((probed & XTENSA_MPU_PROBE_VALID_ENTRY_MASK) == 0U) {
/* There is no foreground or background entry associated
* with the region.
*/
ret = false;
goto out;
}

uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK)
>> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT;


if (write != 0) {
/* Need to check write permission. */
switch (access_rights) {
case XTENSA_MPU_ACCESS_P_RW_U_NA:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_NA:
__fallthrough;
case XTENSA_MPU_ACCESS_P_WO_U_WO:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RO:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_RX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RW:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
/* These permissions are okay. */
break;
default:
ret = false;
goto out;
}
} else {
/* Only check read permission. */
switch (access_rights) {
case XTENSA_MPU_ACCESS_P_RO_U_NA:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RX_U_NA:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_NA:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_NA:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RO:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_RX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RO_U_RO:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RX_U_RX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RW:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
/* These permissions are okay. */
break;
default:
ret = false;
goto out;
}
}
}

out:
return ret;
}


void xtensa_user_stack_perms(struct k_thread *thread)
{
int ret;
Expand Down
13 changes: 11 additions & 2 deletions arch/xtensa/core/ptables.c
Original file line number Diff line number Diff line change
Expand Up @@ -1076,14 +1076,13 @@ static bool page_validate(uint32_t *ptables, uint32_t page, uint8_t ring, bool w
return true;
}

int arch_buffer_validate(const void *addr, size_t size, int write)
static int mem_buffer_validate(const void *addr, size_t size, int write, int ring)
{
int ret = 0;
uint8_t *virt;
size_t aligned_size;
const struct k_thread *thread = _current;
uint32_t *ptables = thread_page_tables_get(thread);
uint8_t ring = XTENSA_MMU_USER_RING;

/* addr/size arbitrary, fix this up into an aligned region */
k_mem_region_align((uintptr_t *)&virt, &aligned_size,
Expand All @@ -1100,6 +1099,16 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
return ret;
}

bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write)
{
return mem_buffer_validate(addr, size, write, XTENSA_MMU_KERNEL_RING) == 0;
}

int arch_buffer_validate(const void *addr, size_t size, int write)
{
return mem_buffer_validate(addr, size, write, XTENSA_MMU_USER_RING);
}

void xtensa_swap_update_page_tables(struct k_thread *incoming)
{
uint32_t *ptables = incoming->arch.ptables;
Expand Down
19 changes: 19 additions & 0 deletions arch/xtensa/include/xtensa_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,25 @@ void xtensa_userspace_enter(k_thread_entry_t user_entry,
uintptr_t stack_end,
uintptr_t stack_start);

/**
* @brief Check if kernel threads have access to a memory region.
*
* Given a memory region, return whether the current memory management
* hardware configuration would allow kernel threads to read/write
* that region.
*
* This is mainly used to make sure kernel has access to avoid relying
* on page fault to detect invalid mappings.
*
* @param addr Start address of the buffer
* @param size Size of the buffer
* @param write If non-zero, additionally check if the area is writable.
* Otherwise, just check if the memory can be read.
*
* @return False if the permissions don't match.
*/
bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write);

/**
* @}
*/
Expand Down
10 changes: 10 additions & 0 deletions arch/xtensa/include/xtensa_mpu_priv.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,16 @@
#define XTENSA_MPU_ENTRY_REG_MEMORY_TYPE_MASK \
(0x1FFU << XTENSA_MPU_ENTRY_REG_MEMORY_TYPE_SHIFT)

/** Bit mask for foreground entry returned by probing. */
#define XTENSA_MPU_PROBE_IS_FG_ENTRY_MASK BIT(31)

/** Bit mask for background entry returned by probing. */
#define XTENSA_MPU_PROBE_IS_BG_ENTRY_MASK BIT(30)

/** Bit mask used to determine if entry is valid returned by probing. */
#define XTENSA_MPU_PROBE_VALID_ENTRY_MASK \
(XTENSA_MPU_PROBE_IS_FG_ENTRY_MASK | XTENSA_MPU_PROBE_IS_BG_ENTRY_MASK)

/**
* @}
*/
Expand Down

0 comments on commit 79939e3

Please sign in to comment.