Skip to content

Commit

Permalink
xfs: remove xfs_qm_sync
Browse files Browse the repository at this point in the history
Now that we can't have any dirty dquots around that aren't in the AIL we
can get rid of the explicit dquot syncing from xfssyncd and xfs_fs_sync_fs
and instead rely on AIL pushing to write out any quota updates.

Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Dave Chinner <[email protected]>
Signed-off-by: Ben Myers <[email protected]>
  • Loading branch information
Christoph Hellwig authored and Ben Myers committed Dec 12, 2011
1 parent f2fba55 commit 34625c6
Show file tree
Hide file tree
Showing 5 changed files with 3 additions and 119 deletions.
94 changes: 0 additions & 94 deletions fs/xfs/xfs_qm.c
Original file line number Diff line number Diff line change
Expand Up @@ -879,100 +879,6 @@ xfs_qm_dqdetach(
}
}

int
xfs_qm_sync(
struct xfs_mount *mp,
int flags)
{
struct xfs_quotainfo *q = mp->m_quotainfo;
int recl, restarts;
struct xfs_dquot *dqp;
int error;

if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
return 0;

restarts = 0;

again:
mutex_lock(&q->qi_dqlist_lock);
/*
* dqpurge_all() also takes the mplist lock and iterate thru all dquots
* in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
* when we have the mplist lock, we know that dquots will be consistent
* as long as we have it locked.
*/
if (!XFS_IS_QUOTA_ON(mp)) {
mutex_unlock(&q->qi_dqlist_lock);
return 0;
}
ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
/*
* If this is vfs_sync calling, then skip the dquots that
* don't 'seem' to be dirty. ie. don't acquire dqlock.
* This is very similar to what xfs_sync does with inodes.
*/
if (flags & SYNC_TRYLOCK) {
if (!XFS_DQ_IS_DIRTY(dqp))
continue;
if (!xfs_qm_dqlock_nowait(dqp))
continue;
} else {
xfs_dqlock(dqp);
}

/*
* Now, find out for sure if this dquot is dirty or not.
*/
if (! XFS_DQ_IS_DIRTY(dqp)) {
xfs_dqunlock(dqp);
continue;
}

/* XXX a sentinel would be better */
recl = q->qi_dqreclaims;
if (!xfs_dqflock_nowait(dqp)) {
if (flags & SYNC_TRYLOCK) {
xfs_dqunlock(dqp);
continue;
}
/*
* If we can't grab the flush lock then if the caller
* really wanted us to give this our best shot, so
* see if we can give a push to the buffer before we wait
* on the flush lock. At this point, we know that
* even though the dquot is being flushed,
* it has (new) dirty data.
*/
xfs_qm_dqflock_pushbuf_wait(dqp);
}
/*
* Let go of the mplist lock. We don't want to hold it
* across a disk write
*/
mutex_unlock(&q->qi_dqlist_lock);
error = xfs_qm_dqflush(dqp, flags);
xfs_dqunlock(dqp);
if (error && XFS_FORCED_SHUTDOWN(mp))
return 0; /* Need to prevent umount failure */
else if (error)
return error;

mutex_lock(&q->qi_dqlist_lock);
if (recl != q->qi_dqreclaims) {
if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
break;

mutex_unlock(&q->qi_dqlist_lock);
goto again;
}
}

mutex_unlock(&q->qi_dqlist_lock);
return 0;
}

/*
* The hash chains and the mplist use the same xfs_dqhash structure as
* their list head, but we can take the mplist qh_lock and one of the
Expand Down
6 changes: 0 additions & 6 deletions fs/xfs/xfs_qm.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,6 @@ extern struct xfs_qm *xfs_Gqm;
extern kmem_zone_t *qm_dqzone;
extern kmem_zone_t *qm_dqtrxzone;

/*
* Used in xfs_qm_sync called by xfs_sync to count the max times that it can
* iterate over the mountpt's dquot list in one call.
*/
#define XFS_QM_SYNC_MAX_RESTARTS 7

/*
* Ditto, for xfs_qm_dqreclaim_one.
*/
Expand Down
5 changes: 0 additions & 5 deletions fs/xfs/xfs_quota.h
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,6 @@ extern int xfs_qm_dqattach_locked(struct xfs_inode *, uint);
extern void xfs_qm_dqdetach(struct xfs_inode *);
extern void xfs_qm_dqrele(struct xfs_dquot *);
extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *);
extern int xfs_qm_sync(struct xfs_mount *, int);
extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *);
extern void xfs_qm_mount_quotas(struct xfs_mount *);
extern void xfs_qm_unmount(struct xfs_mount *);
Expand Down Expand Up @@ -366,10 +365,6 @@ static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
#define xfs_qm_dqdetach(ip)
#define xfs_qm_dqrele(d)
#define xfs_qm_statvfs(ip, s)
static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
{
return 0;
}
#define xfs_qm_newmount(mp, a, b) (0)
#define xfs_qm_mount_quotas(mp)
#define xfs_qm_unmount(mp)
Expand Down
11 changes: 2 additions & 9 deletions fs/xfs/xfs_super.c
Original file line number Diff line number Diff line change
Expand Up @@ -1025,17 +1025,10 @@ xfs_fs_sync_fs(
int error;

/*
* Not much we can do for the first async pass. Writing out the
* superblock would be counter-productive as we are going to redirty
* when writing out other data and metadata (and writing out a single
* block is quite fast anyway).
*
* Try to asynchronously kick off quota syncing at least.
* Doing anything during the async pass would be counterproductive.
*/
if (!wait) {
xfs_qm_sync(mp, SYNC_TRYLOCK);
if (!wait)
return 0;
}

error = xfs_quiesce_data(mp);
if (error)
Expand Down
6 changes: 1 addition & 5 deletions fs/xfs/xfs_sync.c
Original file line number Diff line number Diff line change
Expand Up @@ -359,10 +359,7 @@ xfs_quiesce_data(
{
int error, error2 = 0;

xfs_qm_sync(mp, SYNC_TRYLOCK);
xfs_qm_sync(mp, SYNC_WAIT);

/* force out the newly dirtied log buffers */
/* force out the log */
xfs_log_force(mp, XFS_LOG_SYNC);

/* write superblock and hoover up shutdown errors */
Expand Down Expand Up @@ -470,7 +467,6 @@ xfs_sync_worker(
error = xfs_fs_log_dummy(mp);
else
xfs_log_force(mp, 0);
error = xfs_qm_sync(mp, SYNC_TRYLOCK);

/* start pushing all the metadata that is currently dirty */
xfs_ail_push_all(mp->m_ail);
Expand Down

0 comments on commit 34625c6

Please sign in to comment.