Skip to content

Commit

Permalink
dax: Inline dax_insert_mapping() into the callsite
Browse files Browse the repository at this point in the history
dax_insert_mapping() has only one callsite and we will need to further
fine tune what it does for synchronous faults. Just inline it into the
callsite so that we don't have to pass awkward bools around.

Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Ross Zwisler <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Dan Williams <[email protected]>
  • Loading branch information
jankara authored and djbw committed Nov 3, 2017
1 parent d2c43ef commit 1b5a1cb
Showing 1 changed file with 19 additions and 27 deletions.
46 changes: 19 additions & 27 deletions fs/dax.c
Original file line number Diff line number Diff line change
Expand Up @@ -858,32 +858,6 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
return rc;
}

static int dax_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
loff_t pos, void *entry)
{
const sector_t sector = dax_iomap_sector(iomap, pos);
struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping = vma->vm_file->f_mapping;
unsigned long vaddr = vmf->address;
void *ret;
int rc;
pfn_t pfn;

rc = dax_iomap_pfn(iomap, pos, PAGE_SIZE, &pfn);
if (rc < 0)
return rc;

ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
if (IS_ERR(ret))
return PTR_ERR(ret);

trace_dax_insert_mapping(mapping->host, vmf, ret);
if (vmf->flags & FAULT_FLAG_WRITE)
return vm_insert_mixed_mkwrite(vma, vaddr, pfn);
else
return vm_insert_mixed(vma, vaddr, pfn);
}

/*
* The user has performed a load from a hole in the file. Allocating a new
* page in the file would cause excessive storage usage for workloads with
Expand Down Expand Up @@ -1119,6 +1093,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
bool write = vmf->flags & FAULT_FLAG_WRITE;
int vmf_ret = 0;
void *entry;
pfn_t pfn;

trace_dax_pte_fault(inode, vmf, vmf_ret);
/*
Expand Down Expand Up @@ -1201,7 +1176,24 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
major = VM_FAULT_MAJOR;
}
error = dax_insert_mapping(vmf, &iomap, pos, entry);
error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
if (error < 0)
goto error_finish_iomap;

entry = dax_insert_mapping_entry(mapping, vmf, entry,
dax_iomap_sector(&iomap, pos),
0);
if (IS_ERR(entry)) {
error = PTR_ERR(entry);
goto error_finish_iomap;
}

trace_dax_insert_mapping(inode, vmf, entry);
if (write)
error = vm_insert_mixed_mkwrite(vma, vaddr, pfn);
else
error = vm_insert_mixed(vma, vaddr, pfn);

/* -EBUSY is fine, somebody else faulted on the same PTE */
if (error == -EBUSY)
error = 0;
Expand Down

0 comments on commit 1b5a1cb

Please sign in to comment.