Skip to content

Commit

Permalink
xfs: convert buftarg LRU to generic code
Browse files Browse the repository at this point in the history
Convert the buftarg LRU to use the new generic LRU list and take advantage
of the functionality it supplies to make the buffer cache shrinker node
aware.

Signed-off-by: Glauber Costa <[email protected]>
Signed-off-by: Dave Chinner <[email protected]>
Cc: "Theodore Ts'o" <[email protected]>
Cc: Adrian Hunter <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Artem Bityutskiy <[email protected]>
Cc: Arve Hjønnevåg <[email protected]>
Cc: Carlos Maiolino <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Chuck Lever <[email protected]>
Cc: Daniel Vetter <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Gleb Natapov <[email protected]>
Cc: Greg Thelen <[email protected]>
Cc: J. Bruce Fields <[email protected]>
Cc: Jan Kara <[email protected]>
Cc: Jerome Glisse <[email protected]>
Cc: John Stultz <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Kent Overstreet <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: Marcelo Tosatti <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Steven Whitehouse <[email protected]>
Cc: Thomas Hellstrom <[email protected]>
Cc: Trond Myklebust <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Al Viro <[email protected]>
  • Loading branch information
Dave Chinner authored and Al Viro committed Sep 10, 2013
1 parent 9b17c62 commit e80dfa1
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 93 deletions.
170 changes: 80 additions & 90 deletions fs/xfs/xfs_buf.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,20 +86,14 @@ xfs_buf_vmap_len(
* The LRU takes a new reference to the buffer so that it will only be freed
* once the shrinker takes the buffer off the LRU.
*/
STATIC void
static void
xfs_buf_lru_add(
struct xfs_buf *bp)
{
struct xfs_buftarg *btp = bp->b_target;

spin_lock(&btp->bt_lru_lock);
if (list_empty(&bp->b_lru)) {
atomic_inc(&bp->b_hold);
list_add_tail(&bp->b_lru, &btp->bt_lru);
btp->bt_lru_nr++;
if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
bp->b_lru_flags &= ~_XBF_LRU_DISPOSE;
atomic_inc(&bp->b_hold);
}
spin_unlock(&btp->bt_lru_lock);
}

/*
Expand All @@ -108,24 +102,13 @@ xfs_buf_lru_add(
* The unlocked check is safe here because it only occurs when there are not
* b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
* to optimise the shrinker removing the buffer from the LRU and calling
* xfs_buf_free(). i.e. it removes an unnecessary round trip on the
* bt_lru_lock.
* xfs_buf_free().
*/
STATIC void
static void
xfs_buf_lru_del(
struct xfs_buf *bp)
{
struct xfs_buftarg *btp = bp->b_target;

if (list_empty(&bp->b_lru))
return;

spin_lock(&btp->bt_lru_lock);
if (!list_empty(&bp->b_lru)) {
list_del_init(&bp->b_lru);
btp->bt_lru_nr--;
}
spin_unlock(&btp->bt_lru_lock);
list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
}

/*
Expand All @@ -152,18 +135,10 @@ xfs_buf_stale(
bp->b_flags &= ~_XBF_DELWRI_Q;

atomic_set(&(bp)->b_lru_ref, 0);
if (!list_empty(&bp->b_lru)) {
struct xfs_buftarg *btp = bp->b_target;

spin_lock(&btp->bt_lru_lock);
if (!list_empty(&bp->b_lru) &&
!(bp->b_lru_flags & _XBF_LRU_DISPOSE)) {
list_del_init(&bp->b_lru);
btp->bt_lru_nr--;
atomic_dec(&bp->b_hold);
}
spin_unlock(&btp->bt_lru_lock);
}
if (!(bp->b_lru_flags & _XBF_LRU_DISPOSE) &&
(list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
atomic_dec(&bp->b_hold);

ASSERT(atomic_read(&bp->b_hold) >= 1);
}

Expand Down Expand Up @@ -1502,83 +1477,97 @@ xfs_buf_iomove(
* returned. These buffers will have an elevated hold count, so wait on those
* while freeing all the buffers only held by the LRU.
*/
void
xfs_wait_buftarg(
struct xfs_buftarg *btp)
static enum lru_status
xfs_buftarg_wait_rele(
struct list_head *item,
spinlock_t *lru_lock,
void *arg)

{
struct xfs_buf *bp;
struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);

restart:
spin_lock(&btp->bt_lru_lock);
while (!list_empty(&btp->bt_lru)) {
bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
if (atomic_read(&bp->b_hold) > 1) {
trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
list_move_tail(&bp->b_lru, &btp->bt_lru);
spin_unlock(&btp->bt_lru_lock);
delay(100);
goto restart;
}
if (atomic_read(&bp->b_hold) > 1) {
/* need to wait */
trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
spin_unlock(lru_lock);
delay(100);
} else {
/*
* clear the LRU reference count so the buffer doesn't get
* ignored in xfs_buf_rele().
*/
atomic_set(&bp->b_lru_ref, 0);
spin_unlock(&btp->bt_lru_lock);
spin_unlock(lru_lock);
xfs_buf_rele(bp);
spin_lock(&btp->bt_lru_lock);
}
spin_unlock(&btp->bt_lru_lock);

spin_lock(lru_lock);
return LRU_RETRY;
}

int
xfs_buftarg_shrink(
void
xfs_wait_buftarg(
struct xfs_buftarg *btp)
{
while (list_lru_count(&btp->bt_lru))
list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
NULL, LONG_MAX);
}

static enum lru_status
xfs_buftarg_isolate(
struct list_head *item,
spinlock_t *lru_lock,
void *arg)
{
struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
struct list_head *dispose = arg;

/*
* Decrement the b_lru_ref count unless the value is already
* zero. If the value is already zero, we need to reclaim the
* buffer, otherwise it gets another trip through the LRU.
*/
if (!atomic_add_unless(&bp->b_lru_ref, -1, 0))
return LRU_ROTATE;

bp->b_lru_flags |= _XBF_LRU_DISPOSE;
list_move(item, dispose);
return LRU_REMOVED;
}

static long
xfs_buftarg_shrink_scan(
struct shrinker *shrink,
struct shrink_control *sc)
{
struct xfs_buftarg *btp = container_of(shrink,
struct xfs_buftarg, bt_shrinker);
struct xfs_buf *bp;
int nr_to_scan = sc->nr_to_scan;
LIST_HEAD(dispose);
long freed;
unsigned long nr_to_scan = sc->nr_to_scan;

if (!nr_to_scan)
return btp->bt_lru_nr;

spin_lock(&btp->bt_lru_lock);
while (!list_empty(&btp->bt_lru)) {
if (nr_to_scan-- <= 0)
break;

bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);

/*
* Decrement the b_lru_ref count unless the value is already
* zero. If the value is already zero, we need to reclaim the
* buffer, otherwise it gets another trip through the LRU.
*/
if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
list_move_tail(&bp->b_lru, &btp->bt_lru);
continue;
}

/*
* remove the buffer from the LRU now to avoid needing another
* lock round trip inside xfs_buf_rele().
*/
list_move(&bp->b_lru, &dispose);
btp->bt_lru_nr--;
bp->b_lru_flags |= _XBF_LRU_DISPOSE;
}
spin_unlock(&btp->bt_lru_lock);
freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate,
&dispose, &nr_to_scan);

while (!list_empty(&dispose)) {
struct xfs_buf *bp;
bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
list_del_init(&bp->b_lru);
xfs_buf_rele(bp);
}

return btp->bt_lru_nr;
return freed;
}

static long
xfs_buftarg_shrink_count(
struct shrinker *shrink,
struct shrink_control *sc)
{
struct xfs_buftarg *btp = container_of(shrink,
struct xfs_buftarg, bt_shrinker);
return list_lru_count_node(&btp->bt_lru, sc->nid);
}

void
Expand Down Expand Up @@ -1660,12 +1649,13 @@ xfs_alloc_buftarg(
if (!btp->bt_bdi)
goto error;

INIT_LIST_HEAD(&btp->bt_lru);
spin_lock_init(&btp->bt_lru_lock);
list_lru_init(&btp->bt_lru);
if (xfs_setsize_buftarg_early(btp, bdev))
goto error;
btp->bt_shrinker.shrink = xfs_buftarg_shrink;
btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
btp->bt_shrinker.seeks = DEFAULT_SEEKS;
btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
register_shrinker(&btp->bt_shrinker);
return btp;

Expand Down
5 changes: 2 additions & 3 deletions fs/xfs/xfs_buf.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/uio.h>
#include <linux/list_lru.h>

/*
* Base types
Expand Down Expand Up @@ -92,9 +93,7 @@ typedef struct xfs_buftarg {

/* LRU control structures */
struct shrinker bt_shrinker;
struct list_head bt_lru;
spinlock_t bt_lru_lock;
unsigned int bt_lru_nr;
struct list_lru bt_lru;
} xfs_buftarg_t;

struct xfs_buf;
Expand Down

0 comments on commit e80dfa1

Please sign in to comment.