Skip to content

Commit

Permalink
Support stand-alone memory mappings
Browse files Browse the repository at this point in the history
Android Emulator may dynamically create and destroy temporary
memory mappings at guest runtime for certain rendering tasks via
hax_user_backed_ram_map() and hax_user_backed_ram_unmap()
($AOSP/external/qemu/target/i386/hax-mem.c), e.g.:

 hax_user_backed_ram_map() <1>
 1.1) ADD_RAMBLOCK <1>:     HVA 0x14e070000..0x16e070000
 1.2) SET_RAM2 (map) <1>:   GPA 0x7dffff000..0x7fffff000 =>
                            HVA 0x14e070000..0x16e070000
 hax_user_backed_ram_unmap() <1>
 1.3) SET_RAM2 (unmap) <1>: GPA 0x7dffff000..0x7fffff000
 hax_user_backed_ram_map() <2>
 2.1) ADD_RAMBLOCK <2>:     HVA 0x14de70000..0x16de70000

The second ADD_RAMBLOCK call fails, because its HVA range overlaps
with that of the first ADD_RAMBLOCK call.

The problem is that the "map" step creates a RAM block, but the
"unmap" step doesn't destroy it. Instead of adding a DEL_RAMBLOCK
ioctl, simply exempt the caller from calling ADD_RAMBLOCK in the
first place:

 - Introduce a new hax_memslot flag for "stand-alone" mappings,
   along with a new capability flag for this API change.
 - Remove the ADD_RAMBLOCK call from hax_user_backed_ram_map().
   Instead, call SET_RAM2 with the new flag. (This will be done on
   the Android Emulator side.)
 - Internally, SET_RAM2 creates a stand-alone RAM block for each
   stand-alone mapping.
 - When the stand-alone mapping is unmapped, the reference count
   of the corresponding stand-alone RAM block will hit 0, which
   allows SET_RAM2 to destroy this temporary RAM block.

+ Replace HAX_RAM_INFO_xxx with HAX_MEMSLOT_xxx in code that is
  not directly in touch with user space.

Signed-off-by: Yu Ning <[email protected]>
  • Loading branch information
raphaelning committed Mar 25, 2019
1 parent 7cb307c commit 689ff8f
Show file tree
Hide file tree
Showing 7 changed files with 78 additions and 26 deletions.
1 change: 1 addition & 0 deletions core/hax.c
Original file line number Diff line number Diff line change
Expand Up @@ -340,6 +340,7 @@ int hax_get_capability(void *buf, int bufLeng, int *outLength)
cap->winfo |= HAX_CAP_64BIT_RAMBLOCK;
#ifdef CONFIG_HAX_EPT2
cap->winfo |= HAX_CAP_64BIT_SETRAM;
cap->winfo |= HAX_CAP_IMPLICIT_RAMBLOCK;
#endif
cap->winfo |= HAX_CAP_TUNNEL_PAGE;
cap->winfo |= HAX_CAP_RAM_PROTECTION;
Expand Down
11 changes: 8 additions & 3 deletions core/include/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,8 @@ typedef struct hax_ramblock {
uint8_t *chunks_bitmap;
// Reference count of this object
int ref_count;
// Whether this RAM block is associated with a stand-alone mapping
bool is_standalone;
// Turns this object into a list node
hax_list_node entry;
} hax_ramblock;
Expand All @@ -75,10 +77,13 @@ typedef struct hax_memslot {
} hax_memslot;

// Read-only mapping, == HAX_RAM_INFO_ROM in hax_interface.h
#define HAX_MEMSLOT_READONLY 0x01
#define HAX_MEMSLOT_READONLY (1 << 0)
// Stand-alone mapping, == HAX_RAM_INFO_STANDALONE in hax_interface.h
#define HAX_MEMSLOT_STANDALONE (1 << 6)

// Unmapped, == HAX_RAM_INFO_INVALID in hax_interface.h
// Used only by memslot_set_mapping(), not by any hax_memslot
#define HAX_MEMSLOT_INVALID 0x80
// Not to be used by hax_memslot::flags
#define HAX_MEMSLOT_INVALID (1 << 7)

typedef struct hax_gpa_prot {
// A bitmap where each bit represents the protection status of a guest page
Expand Down
4 changes: 2 additions & 2 deletions core/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -235,15 +235,15 @@ static struct hax_vcpu_mem *get_pmem_range(struct vm_t *vm, uint64_t va)
static int handle_set_ram(struct vm_t *vm, uint64_t start_gpa, uint64_t size,
uint64_t start_uva, uint32_t flags)
{
bool unmap = flags & HAX_RAM_INFO_INVALID;
bool unmap = flags & HAX_MEMSLOT_INVALID;
hax_gpa_space *gpa_space;
uint64_t start_gfn, npages;
int ret;
hax_ept_tree *ept_tree;

// HAX_RAM_INFO_INVALID indicates that guest physical address range
// [start_gpa, start_gpa + size) should be unmapped
if (unmap && (flags != HAX_RAM_INFO_INVALID || start_uva)) {
if (unmap && (flags != HAX_MEMSLOT_INVALID || start_uva)) {
hax_error("%s: Invalid start_uva=0x%llx or flags=0x%x for unmapping\n",
__func__, start_uva, flags);
return -EINVAL;
Expand Down
35 changes: 25 additions & 10 deletions core/memslot.c
Original file line number Diff line number Diff line change
Expand Up @@ -179,12 +179,28 @@ int memslot_set_mapping(hax_gpa_space *gpa_space, uint64_t start_gfn,

is_valid = memslot_is_valid(flags);
if (is_valid) {
block = ramblock_find(&gpa_space->ramblock_list, uva, NULL);
if (flags & HAX_MEMSLOT_STANDALONE) {
// Create a "disposable" RAM block for this stand-alone mapping
ret = ramblock_add(&gpa_space->ramblock_list, uva,
npages << PG_ORDER_4K, NULL, &block);
if (ret != 0 || block == NULL) {
hax_error("%s: Failed to create standalone RAM block:"
"start_gfn=0x%llx, npages=0x%llx, uva=0x%llx\n",
__func__, start_gfn, npages, uva);
return ret < 0 ? ret : -EINVAL;
}

if (block == NULL) {
hax_error("%s: Failed to find uva=0x%llx in RAM block\n", __func__,
uva);
return -EINVAL;
block->is_standalone = true;
// block->ref_count is 0 after ramblock_add(), but we want it to be
// 1, so as to be consistent with the ramblock_find() case below.
ramblock_ref(block);
} else {
block = ramblock_find(&gpa_space->ramblock_list, uva, NULL);
if (block == NULL) {
hax_error("%s: Failed to find uva=0x%llx in RAM block\n",
__func__, uva);
return -EINVAL;
}
}
}

Expand Down Expand Up @@ -285,11 +301,10 @@ int memslot_set_mapping(hax_gpa_space *gpa_space, uint64_t start_gfn,
mapping_broadcast(&gpa_space->listener_list, &mapping, dest, &snapshot);

out:
// ramblock_find() was invoked previously in this function and returned a
// pointer to an existing |hax_ramblock|, whose refcount was incremented by
// ramblock_find(). Now that the pointer (local variable |block|) is about
// to go out of scope, ramblock_deref() must be invoked here to keep the
// refcount accurate.
// Previously in this function, we called either ramblock_add() or
// ramblock_find(), and incremented (implicitly in the latter case) the
// refcount of the returned |block|. Now that |block| is about to go out of
// scope, we must call ramblock_deref() to keep the refcount accurate.
if (block != NULL) {
ramblock_deref(block);
}
Expand Down
27 changes: 21 additions & 6 deletions core/ramblock.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ static hax_ramblock * ramblock_alloc(uint64_t base_uva, uint64_t size)
}
memset(chunks_bitmap, 0, chunks_bitmap_size);
block->chunks_bitmap = chunks_bitmap;
block->is_standalone = false;
block->ref_count = 0;

return block;
Expand Down Expand Up @@ -189,6 +190,17 @@ int ramblock_init_list(hax_list_head *list)
return 0;
}

static void ramblock_remove(hax_ramblock *block)
{
hax_assert(block != NULL);
ramblock_info("%s: Removing RAM block: base_uva=0x%llx, size=0x%llx,"
" is_standalone=%d, ref_count=%d\n", __func__,
block->base_uva, block->size, block->is_standalone,
block->ref_count);
hax_list_del(&block->entry);
ramblock_free(block);
}

void ramblock_free_list(hax_list_head *list)
{
hax_ramblock *ramblock, *tmp;
Expand All @@ -200,11 +212,7 @@ void ramblock_free_list(hax_list_head *list)

ramblock_info("ramblock_free_list\n");
hax_list_entry_for_each_safe(ramblock, tmp, list, hax_ramblock, entry) {
hax_list_del(&ramblock->entry);
ramblock_info("%s: Freeing RAM block: uva: 0x%llx, size: 0x%llx, "
"ref_count: %d\n", __func__, ramblock->base_uva,
ramblock->size, ramblock->ref_count);
ramblock_free(ramblock);
ramblock_remove(ramblock);
}
}

Expand Down Expand Up @@ -420,7 +428,14 @@ void ramblock_deref(hax_ramblock *block)
hax_debug("%s: Reset RAM block (%p): base_uva = 0x%llx, size = 0x%llx, "
"ref_count = %d\n", __func__, block, block->base_uva,
block->size, block->ref_count);
ramblock_free_chunks(block, false);
if (block->is_standalone) {
// A stand-alone mapping is created along with a "disposable" RAM
// block, which must be destroyed when the mapping is unmapped, so
// its HVA range can be reused by other stand-alone mappings.
ramblock_remove(block);
} else {
ramblock_free_chunks(block, false);
}
return;
}

Expand Down
16 changes: 13 additions & 3 deletions docs/api.md
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ itself as well as the host environment.
#define HAX_CAP_64BIT_SETRAM (1 << 4)
#define HAX_CAP_TUNNEL_PAGE (1 << 5)
#define HAX_CAP_DEBUG (1 << 7)
#define HAX_CAP_IMPLICIT_RAMBLOCK (1 << 8)
```
* (Output) `wstatus`: The first set of capability flags reported to the
caller. The following bits may be set, while others are reserved:
Expand All @@ -120,7 +121,9 @@ supported by the host CPU, or disabled in BIOS.
feature. This is always the case with API v2 and later.
* `HAX_CAP_UG`: If set, the host CPU supports the Unrestricted Guest (UG)
feature.
* `HAX_CAP_64BIT_SETRAM`: If set, HAX\_VM\_IOCTL\_SET\_RAM2 is available.
* `HAX_CAP_64BIT_SETRAM`: If set, `HAX_VM_IOCTL_SET_RAM2` is available.
* `HAX_CAP_IMPLICIT_RAMBLOCK`: If set, `HAX_VM_IOCTL_SET_RAM2` supports the
`HAX_RAM_INFO_STANDALONE` flag.
* (Output) `win_refcount`: (Windows only)
* (Output) `mem_quota`: If the global memory cap setting is enabled (q.v.
`HAX_IOCTL_SET_MEMLIMIT`), reports the current quota on memory allocation (the
Expand Down Expand Up @@ -319,8 +322,9 @@ Same as `HAX_VM_IOCTL_SET_RAM`, but takes a 64-bit size instead of 32-bit.
uint64_t reserved2;
} __attribute__ ((__packed__));
#define HAX_RAM_INFO_ROM 0x01
#define HAX_RAM_INFO_INVALID 0x80
#define HAX_RAM_INFO_ROM (1 << 0)
#define HAX_RAM_INFO_STANDALONE (1 << 6)
#define HAX_RAM_INFO_INVALID (1 << 7)
```
* (Input) `pa_start`: The start address of the GPA range to map. Must be page-
aligned (i.e. a multiple of 4KB).
Expand All @@ -336,6 +340,12 @@ buffer.
while others are reserved:
* `HAX_RAM_INFO_ROM`: If set, the GPA range will be mapped as read-only
memory (ROM).
* `HAX_RAM_INFO_STANDALONE`: If set, the HVA range must not overlap with any
existing RAM block, and a new RAM block will be implicitly created for this
stand-alone mapping. In other words, when using this flag, the caller should not
call `HAX_VM_IOCTL_ADD_RAMBLOCK` in advance for the same HVA range. As soon as
the stand-alone mapping is destroyed (via `HAX_RAM_INFO_INVALID`), the
implicitly-created RAM block will also go away.
* `HAX_RAM_INFO_INVALID`: (Since API v4) If set, any existing mappings for
any guest physical pages in the GPA range will be removed, i.e. the GPA range
will be reserved for MMIO. This flag must not be combined with any other flags,
Expand Down
10 changes: 8 additions & 2 deletions include/hax_interface.h
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,7 @@ struct hax_module_version {
#define HAX_CAP_TUNNEL_PAGE (1 << 5)
#define HAX_CAP_RAM_PROTECTION (1 << 6)
#define HAX_CAP_DEBUG (1 << 7)
#define HAX_CAP_IMPLICIT_RAMBLOCK (1 << 8)

struct hax_capabilityinfo {
/*
Expand Down Expand Up @@ -240,8 +241,13 @@ struct hax_ramblock_info {
uint64_t reserved;
} PACKED;

#define HAX_RAM_INFO_ROM 0x01 // read-only
#define HAX_RAM_INFO_INVALID 0x80 // unmapped, usually used for MMIO
// Read-only mapping
#define HAX_RAM_INFO_ROM (1 << 0)
// Stand-alone mapping into a new HVA range
#define HAX_RAM_INFO_STANDALONE (1 << 6)

// Unmapped, usually used for MMIO
#define HAX_RAM_INFO_INVALID (1 << 7)

struct hax_set_ram_info {
uint64_t pa_start;
Expand Down

0 comments on commit 689ff8f

Please sign in to comment.