diff --git a/kernel/arch/arm64/include/arch/arm64/mmu.h b/kernel/arch/arm64/include/arch/arm64/mmu.h index 4898b01f3c..0ddc125a1f 100644 --- a/kernel/arch/arm64/include/arch/arm64/mmu.h +++ b/kernel/arch/arm64/include/arch/arm64/mmu.h @@ -236,6 +236,8 @@ #define MMU_S2_PTE_ATTR_ATTR_INDEX_MASK BM(2, 4, 0xf) /* Normal, Outer Write-Back Cacheable, Inner Write-Back Cacheable. */ #define MMU_S2_PTE_ATTR_NORMAL_MEMORY BM(2, 4, 0xf) +/* Normal, Outer Non-cacheable, Inner Non-cacheable. */ +#define MMU_S2_PTE_ATTR_NORMAL_UNCACHED BM(2, 4, 0x5) /* Device, Device-nGnRnE memory. */ #define MMU_S2_PTE_ATTR_STRONGLY_ORDERED BM(2, 4, 0x0) /* Device, Device-nGnRE memory. */ diff --git a/kernel/arch/arm64/mmu.cpp b/kernel/arch/arm64/mmu.cpp index b9986a2a36..aa5c2b9951 100644 --- a/kernel/arch/arm64/mmu.cpp +++ b/kernel/arch/arm64/mmu.cpp @@ -157,9 +157,7 @@ static pte_t mmu_flags_to_s1_pte_attr(uint flags) { attr |= MMU_PTE_ATTR_DEVICE; break; default: - // Invalid user-supplied flag. - DEBUG_ASSERT(false); - return ZX_ERR_INVALID_ARGS; + PANIC_UNIMPLEMENTED; } switch (flags & (ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_WRITE)) { @@ -180,7 +178,6 @@ static pte_t mmu_flags_to_s1_pte_attr(uint flags) { if (!(flags & ARCH_MMU_FLAG_PERM_EXECUTE)) { attr |= MMU_PTE_ATTR_UXN | MMU_PTE_ATTR_PXN; } - if (flags & ARCH_MMU_FLAG_NS) { attr |= MMU_PTE_ATTR_NON_SECURE; } @@ -200,10 +197,12 @@ static void s1_pte_attr_to_mmu_flags(pte_t pte, uint* mmu_flags) { *mmu_flags |= ARCH_MMU_FLAG_WRITE_COMBINING; break; case MMU_PTE_ATTR_NORMAL_MEMORY: + *mmu_flags |= ARCH_MMU_FLAG_CACHED; break; default: PANIC_UNIMPLEMENTED; } + *mmu_flags |= ARCH_MMU_FLAG_PERM_READ; switch (pte & MMU_PTE_ATTR_AP_MASK) { case MMU_PTE_ATTR_AP_P_RW_U_NA: @@ -218,6 +217,7 @@ static void s1_pte_attr_to_mmu_flags(pte_t pte, uint* mmu_flags) { *mmu_flags |= ARCH_MMU_FLAG_PERM_USER; break; } + if (!((pte & MMU_PTE_ATTR_UXN) && (pte & MMU_PTE_ATTR_PXN))) { *mmu_flags |= ARCH_MMU_FLAG_PERM_EXECUTE; } @@ -227,16 +227,30 @@ static void s1_pte_attr_to_mmu_flags(pte_t pte, uint* mmu_flags) { } static pte_t mmu_flags_to_s2_pte_attr(uint flags) { - DEBUG_ASSERT((flags & ARCH_MMU_FLAG_CACHE_MASK) == ARCH_MMU_FLAG_CACHED); - // Only the inner-shareable, normal memory type is supported. - pte_t attr = MMU_PTE_ATTR_AF | MMU_PTE_ATTR_SH_INNER_SHAREABLE | MMU_S2_PTE_ATTR_NORMAL_MEMORY; + pte_t attr = MMU_PTE_ATTR_AF; + + switch (flags & ARCH_MMU_FLAG_CACHE_MASK) { + case ARCH_MMU_FLAG_CACHED: + attr |= MMU_S2_PTE_ATTR_NORMAL_MEMORY | MMU_PTE_ATTR_SH_INNER_SHAREABLE; + break; + case ARCH_MMU_FLAG_WRITE_COMBINING: + attr |= MMU_S2_PTE_ATTR_NORMAL_UNCACHED | MMU_PTE_ATTR_SH_INNER_SHAREABLE; + break; + case ARCH_MMU_FLAG_UNCACHED: + attr |= MMU_S2_PTE_ATTR_STRONGLY_ORDERED; + break; + case ARCH_MMU_FLAG_UNCACHED_DEVICE: + attr |= MMU_S2_PTE_ATTR_DEVICE; + break; + default: + PANIC_UNIMPLEMENTED; + } if (flags & ARCH_MMU_FLAG_PERM_WRITE) { attr |= MMU_S2_PTE_ATTR_S2AP_RW; } else { attr |= MMU_S2_PTE_ATTR_S2AP_RO; } - if (!(flags & ARCH_MMU_FLAG_PERM_EXECUTE)) { attr |= MMU_S2_PTE_ATTR_XN; } @@ -245,8 +259,20 @@ static pte_t mmu_flags_to_s2_pte_attr(uint flags) { } static void s2_pte_attr_to_mmu_flags(pte_t pte, uint* mmu_flags) { - // Only the inner-shareable, normal memory type is supported. - if ((pte & MMU_S2_PTE_ATTR_ATTR_INDEX_MASK) != MMU_S2_PTE_ATTR_NORMAL_MEMORY) { + switch (pte & MMU_S2_PTE_ATTR_ATTR_INDEX_MASK) { + case MMU_S2_PTE_ATTR_STRONGLY_ORDERED: + *mmu_flags |= ARCH_MMU_FLAG_UNCACHED; + break; + case MMU_S2_PTE_ATTR_DEVICE: + *mmu_flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE; + break; + case MMU_S2_PTE_ATTR_NORMAL_UNCACHED: + *mmu_flags |= ARCH_MMU_FLAG_WRITE_COMBINING; + break; + case MMU_S2_PTE_ATTR_NORMAL_MEMORY: + *mmu_flags |= ARCH_MMU_FLAG_CACHED; + break; + default: PANIC_UNIMPLEMENTED; } diff --git a/kernel/arch/x86/hypervisor/vcpu.cpp b/kernel/arch/x86/hypervisor/vcpu.cpp index dfc09dbb0f..0df1ef6a2b 100644 --- a/kernel/arch/x86/hypervisor/vcpu.cpp +++ b/kernel/arch/x86/hypervisor/vcpu.cpp @@ -238,7 +238,7 @@ static uint64_t ept_pointer(paddr_t pml4_address) { return // Physical address of the PML4 page, page aligned. pml4_address | - // Use write back memory. + // Use write-back memory type for paging structures. VMX_MEMORY_TYPE_WRITE_BACK << 0 | // Page walk length of 4 (defined as N minus 1). 3u << 3; diff --git a/kernel/arch/x86/include/arch/x86/mmu.h b/kernel/arch/x86/include/arch/x86/mmu.h index db335f7c43..4fed4eb3c4 100644 --- a/kernel/arch/x86/include/arch/x86/mmu.h +++ b/kernel/arch/x86/include/arch/x86/mmu.h @@ -13,23 +13,27 @@ /* top level defines for the x86 mmu */ /* NOTE: the top part can be included from assembly */ -#define X86_EPT_R (1u << 0) /* R Read */ -#define X86_EPT_W (1u << 1) /* W Write */ -#define X86_EPT_X (1u << 2) /* X Execute */ -#define X86_EPT_A (1u << 8) /* A Accessed */ -#define X86_EPT_D (1u << 9) /* D Dirty */ +#define X86_EPT_R (1u << 0) /* R Read */ +#define X86_EPT_W (1u << 1) /* W Write */ +#define X86_EPT_X (1u << 2) /* X Execute */ +#define X86_EPT_A (1u << 8) /* A Accessed */ +#define X86_EPT_D (1u << 9) /* D Dirty */ /* From Volume 3, Section 28.2.6: EPT and Memory Typing */ -#define X86_EPT_WB (6u << 3) /* WB Write-back memory type */ +#define X86_EPT_MEMORY_TYPE_MASK (7u << 3) +#define X86_EPT_UC (0u << 3) /* UC Uncached memory type */ +#define X86_EPT_WC (1u << 3) /* WC Write-combining memory type */ +#define X86_EPT_WT (4u << 3) /* WT Write-through memory type */ +#define X86_EPT_WP (5u << 3) /* WP Write-protected memory type */ +#define X86_EPT_WB (6u << 3) /* WB Write-back memory type */ /* Page Attribute Table memory types, defined in Table 11-10 of Intel 3A */ -#define X86_PAT_UC 0x00 /* Uncached */ -#define X86_PAT_WC 0x01 /* Write-combining */ -#define X86_PAT_WT 0x04 /* Write-through */ -#define X86_PAT_WP 0x05 /* Write protected */ -#define X86_PAT_WB 0X06 /* Write-back */ -#define X86_PAT_UC_ 0x07 /* Weakly Uncached (can be overrided by a - * WC MTRR setting) */ +#define X86_PAT_UC 0x00 /* Uncached */ +#define X86_PAT_WC 0x01 /* Write-combining */ +#define X86_PAT_WT 0x04 /* Write-through */ +#define X86_PAT_WP 0x05 /* Write protected */ +#define X86_PAT_WB 0X06 /* Write-back */ +#define X86_PAT_UC_ 0x07 /* Weakly Uncached (can be overridden by a WC MTRR setting) */ /* Our configuration for the PAT indexes. This must be kept in sync with the * selector definitions below it. For safety, it is important to ensure that diff --git a/kernel/arch/x86/mmu.cpp b/kernel/arch/x86/mmu.cpp index ba564401d0..ac2db2750c 100644 --- a/kernel/arch/x86/mmu.cpp +++ b/kernel/arch/x86/mmu.cpp @@ -239,18 +239,18 @@ X86PageTableBase::PtFlags X86PageTableMmu::terminal_flags(PageTableLevel level, uint flags) { X86PageTableBase::PtFlags terminal_flags = 0; - if (flags & ARCH_MMU_FLAG_PERM_WRITE) + if (flags & ARCH_MMU_FLAG_PERM_WRITE) { terminal_flags |= X86_MMU_PG_RW; - - if (flags & ARCH_MMU_FLAG_PERM_USER) + } + if (flags & ARCH_MMU_FLAG_PERM_USER) { terminal_flags |= X86_MMU_PG_U; - + } if (use_global_mappings_) { terminal_flags |= X86_MMU_PG_G; } - - if (!(flags & ARCH_MMU_FLAG_PERM_EXECUTE)) + if (!(flags & ARCH_MMU_FLAG_PERM_EXECUTE)) { terminal_flags |= X86_MMU_PG_NX; + } if (level > 0) { switch (flags & ARCH_MMU_FLAG_CACHE_MASK) { @@ -313,14 +313,15 @@ void X86PageTableMmu::TlbInvalidate(PendingTlbInvalidation* pending) { uint X86PageTableMmu::pt_flags_to_mmu_flags(PtFlags flags, PageTableLevel level) { uint mmu_flags = ARCH_MMU_FLAG_PERM_READ; - if (flags & X86_MMU_PG_RW) + if (flags & X86_MMU_PG_RW) { mmu_flags |= ARCH_MMU_FLAG_PERM_WRITE; - - if (flags & X86_MMU_PG_U) + } + if (flags & X86_MMU_PG_U) { mmu_flags |= ARCH_MMU_FLAG_PERM_USER; - - if (!(flags & X86_MMU_PG_NX)) + } + if (!(flags & X86_MMU_PG_NX)) { mmu_flags |= ARCH_MMU_FLAG_PERM_EXECUTE; + } if (level > 0) { switch (flags & X86_MMU_LARGE_PAT_MASK) { @@ -392,18 +393,32 @@ X86PageTableBase::PtFlags X86PageTableEpt::intermediate_flags() { X86PageTableBase::PtFlags X86PageTableEpt::terminal_flags(PageTableLevel level, uint flags) { - DEBUG_ASSERT((flags & ARCH_MMU_FLAG_CACHE_MASK) == ARCH_MMU_FLAG_CACHED); - // Only the write-back memory type is supported. - X86PageTableBase::PtFlags terminal_flags = X86_EPT_WB; + X86PageTableBase::PtFlags terminal_flags = 0; - if (flags & ARCH_MMU_FLAG_PERM_READ) + if (flags & ARCH_MMU_FLAG_PERM_READ) { terminal_flags |= X86_EPT_R; - - if (flags & ARCH_MMU_FLAG_PERM_WRITE) + } + if (flags & ARCH_MMU_FLAG_PERM_WRITE) { terminal_flags |= X86_EPT_W; - - if (flags & ARCH_MMU_FLAG_PERM_EXECUTE) + } + if (flags & ARCH_MMU_FLAG_PERM_EXECUTE) { terminal_flags |= X86_EPT_X; + } + + switch (flags & ARCH_MMU_FLAG_CACHE_MASK) { + case ARCH_MMU_FLAG_CACHED: + terminal_flags |= X86_EPT_WB; + break; + case ARCH_MMU_FLAG_UNCACHED_DEVICE: + case ARCH_MMU_FLAG_UNCACHED: + terminal_flags |= X86_EPT_UC; + break; + case ARCH_MMU_FLAG_WRITE_COMBINING: + terminal_flags |= X86_EPT_WC; + break; + default: + PANIC_UNIMPLEMENTED; + } return terminal_flags; } @@ -422,17 +437,31 @@ void X86PageTableEpt::TlbInvalidate(PendingTlbInvalidation* pending) { } uint X86PageTableEpt::pt_flags_to_mmu_flags(PtFlags flags, PageTableLevel level) { - // Only the write-back memory type is supported. - uint mmu_flags = ARCH_MMU_FLAG_CACHED; + uint mmu_flags = 0; - if (flags & X86_EPT_R) + if (flags & X86_EPT_R) { mmu_flags |= ARCH_MMU_FLAG_PERM_READ; - - if (flags & X86_EPT_W) + } + if (flags & X86_EPT_W) { mmu_flags |= ARCH_MMU_FLAG_PERM_WRITE; - - if (flags & X86_EPT_X) + } + if (flags & X86_EPT_X) { mmu_flags |= ARCH_MMU_FLAG_PERM_EXECUTE; + } + + switch (flags & X86_EPT_MEMORY_TYPE_MASK) { + case X86_EPT_WB: + mmu_flags |= ARCH_MMU_FLAG_CACHED; + break; + case X86_EPT_UC: + mmu_flags |= ARCH_MMU_FLAG_UNCACHED; + break; + case X86_EPT_WC: + mmu_flags |= ARCH_MMU_FLAG_WRITE_COMBINING; + break; + default: + PANIC_UNIMPLEMENTED; + } return mmu_flags; } diff --git a/kernel/lib/hypervisor/hypervisor_unittest.cpp b/kernel/lib/hypervisor/hypervisor_unittest.cpp index 10f4c431ea..a5106df2cb 100644 --- a/kernel/lib/hypervisor/hypervisor_unittest.cpp +++ b/kernel/lib/hypervisor/hypervisor_unittest.cpp @@ -35,20 +35,19 @@ static zx_status_t get_paddr(void* context, size_t offset, size_t index, paddr_t return ZX_OK; } -static zx_status_t create_vmo(size_t vmo_size, fbl::RefPtr* vmo_out) { - fbl::RefPtr vmo; - zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, vmo_size, &vmo); - if (status != ZX_OK) - return status; +static zx_status_t create_vmo(size_t vmo_size, fbl::RefPtr* vmo) { + return VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, vmo_size, vmo); +} +static zx_status_t commit_vmo(VmObject* vmo) { uint64_t committed = 0; - status = vmo->CommitRange(0, vmo->size(), &committed); - if (status != ZX_OK) + zx_status_t status = vmo->CommitRange(0, vmo->size(), &committed); + if (status != ZX_OK) { return status; - if (committed != vmo->size()) + } + if (committed != vmo->size()) { return ZX_ERR_BAD_STATE; - - *vmo_out = vmo; + } return ZX_OK; } @@ -75,10 +74,10 @@ static bool guest_physical_address_space_unmap_range() { return true; } - // Setup + // Setup. fbl::RefPtr vmo; zx_status_t status = create_vmo(PAGE_SIZE, &vmo); - EXPECT_EQ(ZX_OK, status, "Failed to setup vmo.\n"); + EXPECT_EQ(ZX_OK, status, "Failed to setup VMO.\n"); fbl::unique_ptr gpas; status = create_gpas(vmo, &gpas); EXPECT_EQ(ZX_OK, status, "Failed to create GuestPhysicalAddressSpace.\n"); @@ -88,10 +87,11 @@ static bool guest_physical_address_space_unmap_range() { EXPECT_EQ(ZX_OK, status, "Failed to unmap page from GuestPhysicalAddressSpace.\n"); // Verify GetPage for unmapped address fails. - paddr_t gpas_paddr; + zx_paddr_t gpas_paddr; status = gpas->GetPage(0, &gpas_paddr); EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "GetPage returning unexpected value for unmapped address.\n"); + END_TEST; } @@ -102,16 +102,20 @@ static bool guest_physical_address_space_get_page_not_present() { return true; } - // Setup + // Setup. fbl::RefPtr vmo; zx_status_t status = create_vmo(PAGE_SIZE, &vmo); - EXPECT_EQ(ZX_OK, status, "Failed to setup vmo.\n"); + EXPECT_EQ(ZX_OK, status, "Failed to setup VMO.\n"); fbl::unique_ptr gpas; status = create_gpas(vmo, &gpas); EXPECT_EQ(ZX_OK, status, "Failed to create GuestPhysicalAddressSpace.\n"); + // Commit VMO. + status = commit_vmo(vmo.get()); + EXPECT_EQ(ZX_OK, status, "Failed to commit VMO.\n"); + // Query unmapped address. - paddr_t gpas_paddr = 0; + zx_paddr_t gpas_paddr = 0; status = gpas->GetPage(UINTPTR_MAX, &gpas_paddr); EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "GetPage returning unexpected value for unmapped address.\n"); @@ -126,22 +130,26 @@ static bool guest_physical_address_space_get_page() { return true; } - // Setup + // Setup. fbl::RefPtr vmo; zx_status_t status = create_vmo(PAGE_SIZE, &vmo); - EXPECT_EQ(ZX_OK, status, "Failed to setup vmo.\n"); + EXPECT_EQ(ZX_OK, status, "Failed to setup VMO.\n"); fbl::unique_ptr gpas; status = create_gpas(vmo, &gpas); EXPECT_EQ(ZX_OK, status, "Failed to create GuestPhysicalAddressSpace.\n"); + // Commit VMO. + status = commit_vmo(vmo.get()); + EXPECT_EQ(ZX_OK, status, "Failed to commit VMO.\n"); + // Read expected physical address from the VMO. - paddr_t vmo_paddr = 0; + zx_paddr_t vmo_paddr = 0; status = vmo->Lookup(0, PAGE_SIZE, 0, get_paddr, &vmo_paddr); EXPECT_EQ(ZX_OK, status, "Failed to lookup physical address of VMO.\n"); EXPECT_NE(0u, vmo_paddr, "Failed to lookup physical address of VMO.\n"); // Read physical address from GPAS & compare with address read from VMO. - paddr_t gpas_paddr = 0; + zx_paddr_t gpas_paddr = 0; status = gpas->GetPage(0, &gpas_paddr); EXPECT_EQ(ZX_OK, status, "Failed to read page from GuestPhysicalAddressSpace.\n"); EXPECT_EQ(vmo_paddr, gpas_paddr, @@ -177,14 +185,18 @@ static bool guest_physical_address_space_get_page_complex() { const uint ROOT_VMO_SIZE = PAGE_SIZE; const uint SECOND_VMO_SIZE = PAGE_SIZE; - // Setup + // Setup. fbl::RefPtr vmo; zx_status_t status = create_vmo(ROOT_VMO_SIZE, &vmo); - EXPECT_EQ(ZX_OK, status, "Failed to setup vmo.\n"); + EXPECT_EQ(ZX_OK, status, "Failed to setup VMO.\n"); fbl::unique_ptr gpas; status = create_gpas(vmo, &gpas); EXPECT_EQ(ZX_OK, status, "Failed to create GuestPhysicalAddressSpace.\n"); + // Commit VMO. + status = commit_vmo(vmo.get()); + EXPECT_EQ(ZX_OK, status, "Failed to commit VMO.\n"); + // Allocate second VMAR, offset one page into the root. fbl::RefPtr root_vmar = gpas->RootVmar(); fbl::RefPtr shadow_vmar; @@ -198,6 +210,10 @@ static bool guest_physical_address_space_get_page_complex() { status = create_vmo(SECOND_VMO_SIZE, &vmo2); EXPECT_EQ(ZX_OK, status, "Failed allocate second VMO.\n"); + // Commit second VMO. + status = commit_vmo(vmo2.get()); + EXPECT_EQ(ZX_OK, status, "Failed to commit second VMO.\n"); + // Map second VMO into second VMAR. fbl::RefPtr mapping; uint mmu_flags = @@ -205,16 +221,16 @@ static bool guest_physical_address_space_get_page_complex() { status = shadow_vmar->CreateVmMapping( /* mapping_offset */ 0, vmo2->size(), /* align_pow2 */ 0, VMAR_FLAG_SPECIFIC, vmo2, /* vmar_offset */ 0, mmu_flags, "vmo2", &mapping); - EXPECT_EQ(ZX_OK, status, "Failed to map vmo into shadow vmar."); + EXPECT_EQ(ZX_OK, status, "Failed to map vmo into shadow vmar.\n"); // Read expected physical address from the VMO. - paddr_t vmo_paddr = 0; + zx_paddr_t vmo_paddr = 0; status = vmo2->Lookup(0, PAGE_SIZE, 0, get_paddr, &vmo_paddr); EXPECT_EQ(ZX_OK, status, "Failed to lookup physical address of VMO.\n"); EXPECT_NE(0u, vmo_paddr, "Failed to lookup physical address of VMO.\n"); // Read physical address from GPAS. - paddr_t gpas_paddr = 0; + zx_paddr_t gpas_paddr = 0; status = gpas->GetPage(ROOT_VMO_SIZE, &gpas_paddr); EXPECT_EQ(ZX_OK, status, "Failed to read page from GuestPhysicalAddressSpace.\n"); EXPECT_EQ(vmo_paddr, gpas_paddr, @@ -255,13 +271,79 @@ static bool guest_physical_address_space_map_interrupt_controller() { END_TEST; } +static bool guest_physical_address_space_uncached() { + BEGIN_TEST; + + if (!hypervisor_supported()) { + return true; + } + + // Setup. + fbl::RefPtr vmo; + zx_status_t status = create_vmo(PAGE_SIZE, &vmo); + EXPECT_EQ(ZX_OK, status, "Failed to setup VMO.\n"); + status = vmo->SetMappingCachePolicy(ZX_CACHE_POLICY_UNCACHED); + EXPECT_EQ(ZX_OK, status, "Failed to set cache policy.\n"); + + fbl::unique_ptr gpas; + status = create_gpas(vmo, &gpas); + EXPECT_EQ(ZX_OK, status, "Failed to create GuestPhysicalAddressSpace.\n"); + + END_TEST; +} + +static bool guest_physical_address_space_uncached_device() { + BEGIN_TEST; + + if (!hypervisor_supported()) { + return true; + } + + // Setup. + fbl::RefPtr vmo; + zx_status_t status = create_vmo(PAGE_SIZE, &vmo); + EXPECT_EQ(ZX_OK, status, "Failed to setup VMO.\n"); + status = vmo->SetMappingCachePolicy(ZX_CACHE_POLICY_UNCACHED_DEVICE); + EXPECT_EQ(ZX_OK, status, "Failed to set cache policy.\n"); + + fbl::unique_ptr gpas; + status = create_gpas(vmo, &gpas); + EXPECT_EQ(ZX_OK, status, "Failed to create GuestPhysicalAddressSpace.\n"); + + END_TEST; +} + +static bool guest_physical_address_space_write_combining() { + BEGIN_TEST; + + if (!hypervisor_supported()) { + return true; + } + + // Setup. + fbl::RefPtr vmo; + zx_status_t status = create_vmo(PAGE_SIZE, &vmo); + EXPECT_EQ(ZX_OK, status, "Failed to setup VMO.\n"); + status = vmo->SetMappingCachePolicy(ZX_CACHE_POLICY_WRITE_COMBINING); + EXPECT_EQ(ZX_OK, status, "Failed to set cache policy.\n"); + + fbl::unique_ptr gpas; + status = create_gpas(vmo, &gpas); + EXPECT_EQ(ZX_OK, status, "Failed to create GuestPhysicalAddressSpace.\n"); + + END_TEST; +} + // Use the function name as the test name #define HYPERVISOR_UNITTEST(fname) UNITTEST(#fname, fname) -UNITTEST_START_TESTCASE(hypervisor_tests) +UNITTEST_START_TESTCASE(hypervisor) HYPERVISOR_UNITTEST(guest_physical_address_space_unmap_range) HYPERVISOR_UNITTEST(guest_physical_address_space_get_page) HYPERVISOR_UNITTEST(guest_physical_address_space_get_page_complex) HYPERVISOR_UNITTEST(guest_physical_address_space_get_page_not_present) HYPERVISOR_UNITTEST(guest_physical_address_space_map_interrupt_controller) -UNITTEST_END_TESTCASE(hypervisor_tests, "hypervisor_tests", "Hypervisor unit tests."); +HYPERVISOR_UNITTEST(guest_physical_address_space_uncached) +HYPERVISOR_UNITTEST(guest_physical_address_space_uncached_device) +HYPERVISOR_UNITTEST(guest_physical_address_space_write_combining) +UNITTEST_END_TESTCASE(hypervisor, "hypervisor", "Hypervisor unit tests.");