Skip to content

Commit

Permalink
dax: Allow tuning whether dax_insert_mapping_entry() dirties entry
Browse files Browse the repository at this point in the history
Currently we dirty radix tree entry whenever dax_insert_mapping_entry()
gets called for a write fault. With synchronous page faults we would
like to insert clean radix tree entry and dirty it only once we call
fdatasync() and update page tables to save some unnecessary cache
flushing. Add 'dirty' argument to dax_insert_mapping_entry() for that.

Reviewed-by: Ross Zwisler <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Dan Williams <[email protected]>
  • Loading branch information
jankara authored and djbw committed Nov 3, 2017
1 parent 9a0dd42 commit f5b7b74
Showing 1 changed file with 7 additions and 7 deletions.
14 changes: 7 additions & 7 deletions fs/dax.c
Original file line number Diff line number Diff line change
Expand Up @@ -526,13 +526,13 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
static void *dax_insert_mapping_entry(struct address_space *mapping,
struct vm_fault *vmf,
void *entry, sector_t sector,
unsigned long flags)
unsigned long flags, bool dirty)
{
struct radix_tree_root *page_tree = &mapping->page_tree;
void *new_entry;
pgoff_t index = vmf->pgoff;

if (vmf->flags & FAULT_FLAG_WRITE)
if (dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);

if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
Expand Down Expand Up @@ -569,7 +569,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
entry = new_entry;
}

if (vmf->flags & FAULT_FLAG_WRITE)
if (dirty)
radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);

spin_unlock_irq(&mapping->tree_lock);
Expand Down Expand Up @@ -881,7 +881,7 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
}

entry2 = dax_insert_mapping_entry(mapping, vmf, entry, 0,
RADIX_DAX_ZERO_PAGE);
RADIX_DAX_ZERO_PAGE, false);
if (IS_ERR(entry2)) {
ret = VM_FAULT_SIGBUS;
goto out;
Expand Down Expand Up @@ -1182,7 +1182,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,

entry = dax_insert_mapping_entry(mapping, vmf, entry,
dax_iomap_sector(&iomap, pos),
0);
0, write);
if (IS_ERR(entry)) {
error = PTR_ERR(entry);
goto error_finish_iomap;
Expand Down Expand Up @@ -1258,7 +1258,7 @@ static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
goto fallback;

ret = dax_insert_mapping_entry(mapping, vmf, entry, 0,
RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE);
RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
if (IS_ERR(ret))
goto fallback;

Expand Down Expand Up @@ -1379,7 +1379,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,

entry = dax_insert_mapping_entry(mapping, vmf, entry,
dax_iomap_sector(&iomap, pos),
RADIX_DAX_PMD);
RADIX_DAX_PMD, write);
if (IS_ERR(entry))
goto finish_iomap;

Expand Down

0 comments on commit f5b7b74

Please sign in to comment.