Skip to content

Commit

Permalink
mm: pagecache insertion fewer atomics
Browse files Browse the repository at this point in the history
Setting and clearing the page locked when inserting it into swapcache /
pagecache when it has no other references can use non-atomic page flags
operations because no other CPU may be operating on it at this time.

This saves one atomic operation when inserting a page into pagecache.

Signed-off-by: Nick Piggin <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Nick Piggin authored and torvalds committed Oct 20, 2008
1 parent 9978ad5 commit f45840b
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 9 deletions.
14 changes: 7 additions & 7 deletions include/linux/pagemap.h
Original file line number Diff line number Diff line change
Expand Up @@ -299,14 +299,14 @@ extern int __lock_page_killable(struct page *page);
extern void __lock_page_nosync(struct page *page);
extern void unlock_page(struct page *page);

static inline void set_page_locked(struct page *page)
static inline void __set_page_locked(struct page *page)
{
set_bit(PG_locked, &page->flags);
__set_bit(PG_locked, &page->flags);
}

static inline void clear_page_locked(struct page *page)
static inline void __clear_page_locked(struct page *page)
{
clear_bit(PG_locked, &page->flags);
__clear_bit(PG_locked, &page->flags);
}

static inline int trylock_page(struct page *page)
Expand Down Expand Up @@ -438,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page);

/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
* the page is new, so we can just run set_page_locked() against it.
* the page is new, so we can just run __set_page_locked() against it.
*/
static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{
int error;

set_page_locked(page);
__set_page_locked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error))
clear_page_locked(page);
__clear_page_locked(page);
return error;
}

Expand Down
4 changes: 2 additions & 2 deletions mm/swap_state.c
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* re-using the just freed swap entry for an existing page.
* May fail (-ENOMEM) if radix-tree node allocation failed.
*/
set_page_locked(new_page);
__set_page_locked(new_page);
SetPageSwapBacked(new_page);
err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
if (likely(!err)) {
Expand All @@ -315,7 +315,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return new_page;
}
ClearPageSwapBacked(new_page);
clear_page_locked(new_page);
__clear_page_locked(new_page);
swap_free(entry);
} while (err != -ENOMEM);

Expand Down

0 comments on commit f45840b

Please sign in to comment.