Skip to content

Commit

Permalink
Merge tag 'gfs2-merge-window' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/steve/gfs2-3.0-nmw

Pull gfs2 updates from Steven Whitehouse:
 "The main feature of interest this time is quota updates.  There are
  some clean ups and some patches to use the new generic lru list code.

  There is still plenty of scope for some further changes in due course -
  faster lookups of quota structures is very much on the todo list.
  Also, a start has been made towards the more tricky issue of using the
  generic lru code with glocks, but that will have to be completed in a
  subsequent merge window.

  The other, more minor feature, is that there have been a number of
  performance patches which relate to block allocation.  In particular
  they will improve performance when the disk is nearly full"

* tag 'gfs2-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw:
  GFS2: Use generic list_lru for quota
  GFS2: Rename quota qd_lru_lock qd_lock
  GFS2: Use reflink for quota data cache
  GFS2: Use lockref for glocks
  GFS2: Protect quota sync generation
  GFS2: Inline qd_trylock into gfs2_quota_unlock
  GFS2: Make two similar quota code fragments into a function
  GFS2: Remove obsolete quota tunable
  GFS2: Move gfs2_icbit_munge into quota.c
  GFS2: Speed up starting point selection for block allocation
  GFS2: Add allocation parameters structure
  GFS2: Clean up reservation removal
  GFS2: fix dentry leaks
  GFS2: new function gfs2_rbm_incr
  GFS2: Introduce rbm field bii
  GFS2: Do not reset flags on active reservations
  GFS2: introduce bi_blocks for optimization
  GFS2: optimize rbm_from_block wrt bi_start
  GFS2: d_splice_alias() can't return error
  • Loading branch information
torvalds committed Nov 10, 2013
2 parents 6c86ae2 + 2147dbf commit 8b5baa4
Show file tree
Hide file tree
Showing 21 changed files with 455 additions and 339 deletions.
4 changes: 3 additions & 1 deletion fs/gfs2/aops.c
Original file line number Diff line number Diff line change
Expand Up @@ -611,12 +611,14 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);

if (alloc_required) {
struct gfs2_alloc_parms ap = { .aflags = 0, };
error = gfs2_quota_lock_check(ip);
if (error)
goto out_unlock;

requested = data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip, requested, 0);
ap.target = requested;
error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto out_qunlock;
}
Expand Down
7 changes: 5 additions & 2 deletions fs/gfs2/bmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1216,6 +1216,7 @@ static int do_grow(struct inode *inode, u64 size)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_alloc_parms ap = { .target = 1, };
struct buffer_head *dibh;
int error;
int unstuff = 0;
Expand All @@ -1226,7 +1227,7 @@ static int do_grow(struct inode *inode, u64 size)
if (error)
return error;

error = gfs2_inplace_reserve(ip, 1, 0);
error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto do_grow_qunlock;
unstuff = 1;
Expand Down Expand Up @@ -1279,6 +1280,7 @@ static int do_grow(struct inode *inode, u64 size)

int gfs2_setattr_size(struct inode *inode, u64 newsize)
{
struct gfs2_inode *ip = GFS2_I(inode);
int ret;
u64 oldsize;

Expand All @@ -1294,7 +1296,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)

inode_dio_wait(inode);

ret = gfs2_rs_alloc(GFS2_I(inode));
ret = gfs2_rs_alloc(ip);
if (ret)
goto out;

Expand All @@ -1304,6 +1306,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
goto out;
}

gfs2_rs_deltree(ip->i_res);
ret = do_shrink(inode, oldsize, newsize);
out:
put_write_access(inode);
Expand Down
10 changes: 7 additions & 3 deletions fs/gfs2/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -383,6 +383,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
struct inode *inode = file_inode(vma->vm_file);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned long last_index;
u64 pos = page->index << PAGE_CACHE_SHIFT;
unsigned int data_blocks, ind_blocks, rblocks;
Expand Down Expand Up @@ -430,7 +431,8 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto out_unlock;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
ap.target = data_blocks + ind_blocks;
ret = gfs2_inplace_reserve(ip, &ap);
if (ret)
goto out_quota_unlock;

Expand Down Expand Up @@ -620,7 +622,7 @@ static int gfs2_release(struct inode *inode, struct file *file)
if (!(file->f_mode & FMODE_WRITE))
return 0;

gfs2_rs_delete(ip);
gfs2_rs_delete(ip, &inode->i_writecount);
return 0;
}

Expand Down Expand Up @@ -800,6 +802,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
struct inode *inode = file_inode(file);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
loff_t bytes, max_bytes;
int error;
Expand Down Expand Up @@ -850,7 +853,8 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
retry:
gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);

error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
ap.target = data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip, &ap);
if (error) {
if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
bytes >>= 1;
Expand Down
83 changes: 40 additions & 43 deletions fs/gfs2/glock.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include <linux/bit_spinlock.h>
#include <linux/percpu.h>
#include <linux/list_sort.h>
#include <linux/lockref.h>

#include "gfs2.h"
#include "incore.h"
Expand Down Expand Up @@ -129,10 +130,10 @@ void gfs2_glock_free(struct gfs2_glock *gl)
*
*/

void gfs2_glock_hold(struct gfs2_glock *gl)
static void gfs2_glock_hold(struct gfs2_glock *gl)
{
GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
atomic_inc(&gl->gl_ref);
GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
lockref_get(&gl->gl_lockref);
}

/**
Expand Down Expand Up @@ -186,20 +187,6 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
spin_unlock(&lru_lock);
}

/**
* gfs2_glock_put_nolock() - Decrement reference count on glock
* @gl: The glock to put
*
* This function should only be used if the caller has its own reference
* to the glock, in addition to the one it is dropping.
*/

void gfs2_glock_put_nolock(struct gfs2_glock *gl)
{
if (atomic_dec_and_test(&gl->gl_ref))
GLOCK_BUG_ON(gl, 1);
}

/**
* gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put
Expand All @@ -211,17 +198,22 @@ void gfs2_glock_put(struct gfs2_glock *gl)
struct gfs2_sbd *sdp = gl->gl_sbd;
struct address_space *mapping = gfs2_glock2aspace(gl);

if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
__gfs2_glock_remove_from_lru(gl);
spin_unlock(&lru_lock);
spin_lock_bucket(gl->gl_hash);
hlist_bl_del_rcu(&gl->gl_list);
spin_unlock_bucket(gl->gl_hash);
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
trace_gfs2_glock_put(gl);
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}
if (lockref_put_or_lock(&gl->gl_lockref))
return;

lockref_mark_dead(&gl->gl_lockref);

spin_lock(&lru_lock);
__gfs2_glock_remove_from_lru(gl);
spin_unlock(&lru_lock);
spin_unlock(&gl->gl_lockref.lock);
spin_lock_bucket(gl->gl_hash);
hlist_bl_del_rcu(&gl->gl_list);
spin_unlock_bucket(gl->gl_hash);
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
trace_gfs2_glock_put(gl);
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}

/**
Expand All @@ -244,7 +236,7 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
continue;
if (gl->gl_sbd != sdp)
continue;
if (atomic_inc_not_zero(&gl->gl_ref))
if (lockref_get_not_dead(&gl->gl_lockref))
return gl;
}

Expand Down Expand Up @@ -396,10 +388,11 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
held2 = (new_state != LM_ST_UNLOCKED);

if (held1 != held2) {
GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
if (held2)
gfs2_glock_hold(gl);
gl->gl_lockref.count++;
else
gfs2_glock_put_nolock(gl);
gl->gl_lockref.count--;
}
if (held1 && held2 && list_empty(&gl->gl_holders))
clear_bit(GLF_QUEUED, &gl->gl_flags);
Expand Down Expand Up @@ -626,9 +619,9 @@ __acquires(&gl->gl_spin)
out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_clear_bit();
gfs2_glock_hold(gl);
gl->gl_lockref.count++;
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put_nolock(gl);
gl->gl_lockref.count--;
return;

out_unlock:
Expand Down Expand Up @@ -754,7 +747,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_sbd = sdp;
gl->gl_flags = 0;
gl->gl_name = name;
atomic_set(&gl->gl_ref, 1);
gl->gl_lockref.count = 1;
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
Expand Down Expand Up @@ -1356,10 +1349,10 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
}
}

spin_unlock(&gl->gl_spin);
gl->gl_lockref.count++;
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
smp_wmb();
gfs2_glock_hold(gl);
spin_unlock(&gl->gl_spin);

if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
}
Expand Down Expand Up @@ -1404,15 +1397,19 @@ __acquires(&lru_lock)
while(!list_empty(list)) {
gl = list_entry(list->next, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
if (!spin_trylock(&gl->gl_spin)) {
list_add(&gl->gl_lru, &lru_list);
atomic_inc(&lru_count);
continue;
}
clear_bit(GLF_LRU, &gl->gl_flags);
gfs2_glock_hold(gl);
spin_unlock(&lru_lock);
spin_lock(&gl->gl_spin);
gl->gl_lockref.count++;
if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put_nolock(gl);
gl->gl_lockref.count--;
spin_unlock(&gl->gl_spin);
spin_lock(&lru_lock);
}
Expand Down Expand Up @@ -1493,7 +1490,7 @@ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,

rcu_read_lock();
hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref))
if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
examiner(gl);
}
rcu_read_unlock();
Expand Down Expand Up @@ -1746,7 +1743,7 @@ int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
state2str(gl->gl_demote_state), dtime,
atomic_read(&gl->gl_ail_count),
atomic_read(&gl->gl_revokes),
atomic_read(&gl->gl_ref), gl->gl_hold_time);
(int)gl->gl_lockref.count, gl->gl_hold_time);

list_for_each_entry(gh, &gl->gl_holders, gh_list) {
error = dump_holder(seq, gh);
Expand Down Expand Up @@ -1902,7 +1899,7 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
gi->nhash = 0;
}
/* Skip entries for other sb and dead entries */
} while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
} while (gi->sdp != gi->gl->gl_sbd || __lockref_is_dead(&gl->gl_lockref));

return 0;
}
Expand Down
2 changes: 0 additions & 2 deletions fs/gfs2/glock.h
Original file line number Diff line number Diff line change
Expand Up @@ -181,8 +181,6 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp);
extern void gfs2_glock_hold(struct gfs2_glock *gl);
extern void gfs2_glock_put_nolock(struct gfs2_glock *gl);
extern void gfs2_glock_put(struct gfs2_glock *gl);
extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
unsigned flags, struct gfs2_holder *gh);
Expand Down
4 changes: 2 additions & 2 deletions fs/gfs2/glops.c
Original file line number Diff line number Diff line change
Expand Up @@ -525,9 +525,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)

if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
gfs2_glock_hold(gl);
gl->gl_lockref.count++;
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
gfs2_glock_put_nolock(gl);
gl->gl_lockref.count--;
}
}

Expand Down
Loading

0 comments on commit 8b5baa4

Please sign in to comment.