Skip to content

Commit

Permalink
Fix race in create_empty_buffers() vs __set_page_dirty_buffers()
Browse files Browse the repository at this point in the history
Nick Piggin noticed this (very unlikely) race between setting a page
dirty and creating the buffers for it - we need to hold the mapping
private_lock until we've set the page dirty bit in order to make sure
that create_empty_buffers() might not build up a set of buffers without
the dirty bits set when the page is dirty.

I doubt anybody has ever hit this race (and it didn't solve the issue
Nick was looking at), but as Nick says: "Still, it does appear to solve
a real race, which we should close."

Acked-by: Nick Piggin <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
torvalds committed Mar 19, 2009
1 parent 68df375 commit a8e7d49
Showing 1 changed file with 11 additions and 12 deletions.
23 changes: 11 additions & 12 deletions fs/buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -760,15 +760,9 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
* If warn is true, then emit a warning if the page is not uptodate and has
* not been truncated.
*/
static int __set_page_dirty(struct page *page,
static void __set_page_dirty(struct page *page,
struct address_space *mapping, int warn)
{
if (unlikely(!mapping))
return !TestSetPageDirty(page);

if (TestSetPageDirty(page))
return 0;

spin_lock_irq(&mapping->tree_lock);
if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page));
Expand All @@ -785,8 +779,6 @@ static int __set_page_dirty(struct page *page,
}
spin_unlock_irq(&mapping->tree_lock);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);

return 1;
}

/*
Expand Down Expand Up @@ -816,6 +808,7 @@ static int __set_page_dirty(struct page *page,
*/
int __set_page_dirty_buffers(struct page *page)
{
int newly_dirty;
struct address_space *mapping = page_mapping(page);

if (unlikely(!mapping))
Expand All @@ -831,9 +824,12 @@ int __set_page_dirty_buffers(struct page *page)
bh = bh->b_this_page;
} while (bh != head);
}
newly_dirty = !TestSetPageDirty(page);
spin_unlock(&mapping->private_lock);

return __set_page_dirty(page, mapping, 1);
if (newly_dirty)
__set_page_dirty(page, mapping, 1);
return newly_dirty;
}
EXPORT_SYMBOL(__set_page_dirty_buffers);

Expand Down Expand Up @@ -1262,8 +1258,11 @@ void mark_buffer_dirty(struct buffer_head *bh)
return;
}

if (!test_set_buffer_dirty(bh))
__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
if (!test_set_buffer_dirty(bh)) {
struct page *page = bh->b_page;
if (!TestSetPageDirty(page))
__set_page_dirty(page, page_mapping(page), 0);
}
}

/*
Expand Down

0 comments on commit a8e7d49

Please sign in to comment.