Skip to content

Commit

Permalink
mm: selftests for exclusive device memory
Browse files Browse the repository at this point in the history
Adds some selftests for exclusive device memory.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Alistair Popple <[email protected]>
Acked-by: Jason Gunthorpe <[email protected]>
Tested-by: Ralph Campbell <[email protected]>
Reviewed-by: Ralph Campbell <[email protected]>
Cc: Ben Skeggs <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: John Hubbard <[email protected]>
Cc: "Matthew Wilcox (Oracle)" <[email protected]>
Cc: Peter Xu <[email protected]>
Cc: Shakeel Butt <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
apopple-nvidia authored and torvalds committed Jul 1, 2021
1 parent b756a3b commit b659bae
Show file tree
Hide file tree
Showing 3 changed files with 285 additions and 0 deletions.
125 changes: 125 additions & 0 deletions lib/test_hmm.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <linux/swapops.h>
#include <linux/sched/mm.h>
#include <linux/platform_device.h>
#include <linux/rmap.h>

#include "test_hmm_uapi.h"

Expand All @@ -46,6 +47,7 @@ struct dmirror_bounce {
unsigned long cpages;
};

#define DPT_XA_TAG_ATOMIC 1UL
#define DPT_XA_TAG_WRITE 3UL

/*
Expand Down Expand Up @@ -619,6 +621,54 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
}
}

static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
unsigned long end)
{
unsigned long pfn;

for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
void *entry;
struct page *page;

entry = xa_load(&dmirror->pt, pfn);
page = xa_untag_pointer(entry);
if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
return -EPERM;
}

return 0;
}

static int dmirror_atomic_map(unsigned long start, unsigned long end,
struct page **pages, struct dmirror *dmirror)
{
unsigned long pfn, mapped = 0;
int i;

/* Map the migrated pages into the device's page tables. */
mutex_lock(&dmirror->mutex);

for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
void *entry;

if (!pages[i])
continue;

entry = pages[i];
entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
if (xa_is_err(entry)) {
mutex_unlock(&dmirror->mutex);
return xa_err(entry);
}

mapped++;
}

mutex_unlock(&dmirror->mutex);
return mapped;
}

static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
struct dmirror *dmirror)
{
Expand Down Expand Up @@ -661,6 +711,72 @@ static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
return 0;
}

static int dmirror_exclusive(struct dmirror *dmirror,
struct hmm_dmirror_cmd *cmd)
{
unsigned long start, end, addr;
unsigned long size = cmd->npages << PAGE_SHIFT;
struct mm_struct *mm = dmirror->notifier.mm;
struct page *pages[64];
struct dmirror_bounce bounce;
unsigned long next;
int ret;

start = cmd->addr;
end = start + size;
if (end < start)
return -EINVAL;

/* Since the mm is for the mirrored process, get a reference first. */
if (!mmget_not_zero(mm))
return -EINVAL;

mmap_read_lock(mm);
for (addr = start; addr < end; addr = next) {
unsigned long mapped;
int i;

if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT))
next = end;
else
next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT);

ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
mapped = dmirror_atomic_map(addr, next, pages, dmirror);
for (i = 0; i < ret; i++) {
if (pages[i]) {
unlock_page(pages[i]);
put_page(pages[i]);
}
}

if (addr + (mapped << PAGE_SHIFT) < next) {
mmap_read_unlock(mm);
mmput(mm);
return -EBUSY;
}
}
mmap_read_unlock(mm);
mmput(mm);

/* Return the migrated data for verification. */
ret = dmirror_bounce_init(&bounce, start, size);
if (ret)
return ret;
mutex_lock(&dmirror->mutex);
ret = dmirror_do_read(dmirror, start, end, &bounce);
mutex_unlock(&dmirror->mutex);
if (ret == 0) {
if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
bounce.size))
ret = -EFAULT;
}

cmd->cpages = bounce.cpages;
dmirror_bounce_fini(&bounce);
return ret;
}

static int dmirror_migrate(struct dmirror *dmirror,
struct hmm_dmirror_cmd *cmd)
{
Expand Down Expand Up @@ -948,6 +1064,15 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp,
ret = dmirror_migrate(dmirror, &cmd);
break;

case HMM_DMIRROR_EXCLUSIVE:
ret = dmirror_exclusive(dmirror, &cmd);
break;

case HMM_DMIRROR_CHECK_EXCLUSIVE:
ret = dmirror_check_atomic(dmirror, cmd.addr,
cmd.addr + (cmd.npages << PAGE_SHIFT));
break;

case HMM_DMIRROR_SNAPSHOT:
ret = dmirror_snapshot(dmirror, &cmd);
break;
Expand Down
2 changes: 2 additions & 0 deletions lib/test_hmm_uapi.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ struct hmm_dmirror_cmd {
#define HMM_DMIRROR_WRITE _IOWR('H', 0x01, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_MIGRATE _IOWR('H', 0x02, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x03, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x04, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd)

/*
* Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.
Expand Down
158 changes: 158 additions & 0 deletions tools/testing/selftests/vm/hmm-tests.c
Original file line number Diff line number Diff line change
Expand Up @@ -1485,4 +1485,162 @@ TEST_F(hmm2, double_map)
hmm_buffer_free(buffer);
}

/*
* Basic check of exclusive faulting.
*/
TEST_F(hmm, exclusive)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;

npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;

buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);

buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);

buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);

/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;

/* Map memory exclusively for device access. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);

/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);

/* Fault pages back to system memory and check them. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i]++, i);

for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i+1);

/* Check atomic access revoked */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages);
ASSERT_EQ(ret, 0);

hmm_buffer_free(buffer);
}

TEST_F(hmm, exclusive_mprotect)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;

npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;

buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);

buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);

buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);

/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;

/* Map memory exclusively for device access. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);

/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);

ret = mprotect(buffer->ptr, size, PROT_READ);
ASSERT_EQ(ret, 0);

/* Simulate a device writing system memory. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
ASSERT_EQ(ret, -EPERM);

hmm_buffer_free(buffer);
}

/*
* Check copy-on-write works.
*/
TEST_F(hmm, exclusive_cow)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
int *ptr;
int ret;

npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;

buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);

buffer->fd = -1;
buffer->size = size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);

buffer->ptr = mmap(NULL, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);

/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;

/* Map memory exclusively for device access. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);

fork();

/* Fault pages back to system memory and check them. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i]++, i);

for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i+1);

hmm_buffer_free(buffer);
}

TEST_HARNESS_MAIN

0 comments on commit b659bae

Please sign in to comment.