Skip to content

Commit

Permalink
arm: arm: mmu: Add function to try adding new section mappings
Browse files Browse the repository at this point in the history
At the moment the MMU is usually used with fixed mappings. In some use
case it might be necessary to dynamically map additional memory at
runtime. This should be done without affecting the existing mappings
(e.g. existing read-only executable memory should not accidentally
 get mapped read-write).

Add a arm_mmu_try_map_sections() function that implements this by
checking the existing mappings for mismatches, adds the missing
mappings and finally flushes the TLB and cache for the translation
table.
  • Loading branch information
stephan-gh committed Aug 19, 2022
1 parent 84b7aa1 commit cb75f3b
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 6 deletions.
2 changes: 2 additions & 0 deletions arch/arm/include/arch/arm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,8 @@ void arm_mmu_map_entry(mmu_section_t *entry);
#endif

void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags);
bool arm_mmu_try_map_sections(addr_t paddr, addr_t vaddr, uint size, uint flags);
void arm_mmu_flush(void);
uint64_t virtual_to_physical_mapping(uint32_t vaddr);
uint32_t physical_to_virtual_mapping(uint64_t paddr);

Expand Down
62 changes: 56 additions & 6 deletions arch/arm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include <arch.h>
#include <arch/arm.h>
#include <arch/defines.h>
#include <arch/ops.h>
#include <arch/arm/mmu.h>
#include <platform.h>

Expand All @@ -44,19 +45,25 @@ static uint32_t *tt = (void *)MMU_TRANSLATION_TABLE_ADDR;
static uint32_t tt[4096] __ALIGNED(16384);
#endif

static inline uint32_t arm_mmu_section_desc(addr_t paddr, uint flags)
{
/*
* (2<<0): Section entry
* (0<<5): Domain = 0
* flags: TEX, CB and AP bit settings provided by the caller.
*/
return (paddr & ~(MB-1)) | (0<<5) | (2<<0) | flags;
}

void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags)
{
int index;

/* Get the index into the translation table */
index = vaddr / MB;

/* Set the entry value:
* (2<<0): Section entry
* (0<<5): Domain = 0
* flags: TEX, CB and AP bit settings provided by the caller.
*/
tt[index] = (paddr & ~(MB-1)) | (0<<5) | (2<<0) | flags;
/* Set the entry value */
tt[index] = arm_mmu_section_desc(paddr, flags);

arm_invalidate_tlb();
}
Expand Down Expand Up @@ -107,4 +114,47 @@ void arch_disable_mmu(void)
arm_invalidate_tlb();
}

bool arm_mmu_try_map_sections(addr_t paddr, addr_t vaddr, uint size, uint flags)
{
/* Round up to next MB and handle offsets within sections */
uint mb = (size + (paddr % MB) + MB - 1) / MB;
uint i, index = vaddr / MB;
bool fully_mapped = true;

/* Offset within mapped section must be equal */
if (size == 0 || (paddr % MB) != (vaddr % MB))
return false;

/* Check if any existing mappings conflict */
for (i = 0; i < mb; ++i) {
uint32_t desc = arm_mmu_section_desc(paddr + i * MB, flags);
if (!tt[index + i]) {
fully_mapped = false;
continue;
}
if (tt[index + i] != desc) {
dprintf(CRITICAL, "MMU mapping mismatch @ %#08x: %#08x != %#08x\n",
(index + i) * MB, tt[index + i], desc);
return false;
}
}
if (fully_mapped)
return true;

/* Add the new mappings */
for (i = 0; i < mb; ++i)
tt[index + i] = arm_mmu_section_desc(paddr + i * MB, flags);
arm_mmu_flush();
return true;
}

void arm_mmu_flush(void)
{
arch_clean_cache_range((vaddr_t)&tt, sizeof(tt));
dsb();
arm_invalidate_tlb();
dsb();
isb();
}

#endif // ARM_WITH_MMU

0 comments on commit cb75f3b

Please sign in to comment.