Skip to content

Commit

Permalink
iommufd: Add some fault injection points
Browse files Browse the repository at this point in the history
This increases the coverage the fail_nth test gets, as well as via
syzkaller.

Link: https://lore.kernel.org/r/[email protected]
Tested-by: Matthew Rosato <[email protected]> # s390
Signed-off-by: Jason Gunthorpe <[email protected]>
  • Loading branch information
jgunthorpe committed Dec 1, 2022
1 parent f4b20bb commit e26eed4
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 0 deletions.
3 changes: 3 additions & 0 deletions drivers/iommu/iommufd/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,9 @@ struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
{
struct iommufd_object *obj;

if (iommufd_should_fail())
return ERR_PTR(-ENOENT);

xa_lock(&ictx->objects);
obj = xa_load(&ictx->objects, id);
if (!obj || (type != IOMMUFD_OBJ_ANY && obj->type != type) ||
Expand Down
26 changes: 26 additions & 0 deletions drivers/iommu/iommufd/pages.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,10 @@ static void *temp_kmalloc(size_t *size, void *backup, size_t backup_len)

if (*size < backup_len)
return backup;

if (!backup && iommufd_should_fail())
return NULL;

*size = min_t(size_t, *size, TEMP_MEMORY_LIMIT);
res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (res)
Expand Down Expand Up @@ -544,13 +548,23 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
unsigned long last_index, struct page **pages)
{
struct page **end_pages = pages + (last_index - start_index) + 1;
struct page **half_pages = pages + (end_pages - pages) / 2;
XA_STATE(xas, xa, start_index);

do {
void *old;

xas_lock(&xas);
while (pages != end_pages) {
/* xarray does not participate in fault injection */
if (pages == half_pages && iommufd_should_fail()) {
xas_set_err(&xas, -EINVAL);
xas_unlock(&xas);
/* aka xas_destroy() */
xas_nomem(&xas, GFP_KERNEL);
goto err_clear;
}

old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages)));
if (xas_error(&xas))
break;
Expand All @@ -561,6 +575,7 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL));

err_clear:
if (xas_error(&xas)) {
if (xas.xa_index != start_index)
clear_xarray(xa, start_index, xas.xa_index - 1);
Expand Down Expand Up @@ -728,6 +743,10 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user,
npages = min_t(unsigned long, last_index - start_index + 1,
user->upages_len / sizeof(*user->upages));


if (iommufd_should_fail())
return -EFAULT;

uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE);
if (!remote_mm)
rc = pin_user_pages_fast(uptr, npages, user->gup_flags,
Expand Down Expand Up @@ -872,6 +891,8 @@ static int pfn_reader_user_update_pinned(struct pfn_reader_user *user,
npages = pages->last_npinned - pages->npinned;
inc = false;
} else {
if (iommufd_should_fail())
return -ENOMEM;
npages = pages->npinned - pages->last_npinned;
inc = true;
}
Expand Down Expand Up @@ -1721,6 +1742,11 @@ static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index,
return iopt_pages_rw_slow(pages, index, index, offset, data,
length, flags);

if (iommufd_should_fail()) {
rc = -EINVAL;
goto out_mmput;
}

mmap_read_lock(pages->source_mm);
rc = pin_user_pages_remote(
pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE),
Expand Down

0 comments on commit e26eed4

Please sign in to comment.