Skip to content

Commit

Permalink
gfs2: Rename rs_{free -> requested} and rd_{reserved -> requested}
Browse files Browse the repository at this point in the history
We keep track of what we've so far been referring to as reservations in
rd_rstree: the nodes in that tree indicate where in a resource group we'd
like to allocate the next couple of blocks for a particular inode.  Local
processes take those as hints, but they may still "steal" blocks from those
extents, so when actually allocating a block, we must double check in the
bitmap whether that block is actually still free.  Likewise, other cluster
nodes may "steal" such blocks as well.

One of the following patches introduces resource group glock sharing, i.e.,
sharing of an exclusively locked resource group glock among local processes to
speed up allocations.  To make that work, we'll need to keep track of how many
blocks we've actually reserved for each inode, so we end up with two different
kinds of reservations.

Distinguish these two kinds by referring to blocks which are reserved but may
still be "stolen" as "requested".  This rename also makes it more obvious that
rs_requested and rd_requested are strongly related.

Signed-off-by: Andreas Gruenbacher <[email protected]>
  • Loading branch information
Andreas Gruenbacher committed Feb 17, 2021
1 parent 0ec9b9e commit 07974d2
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 33 deletions.
6 changes: 3 additions & 3 deletions fs/gfs2/incore.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ struct gfs2_rgrpd {
u32 rd_data; /* num of data blocks in rgrp */
u32 rd_bitbytes; /* number of bytes in data bitmaps */
u32 rd_free;
u32 rd_reserved; /* number of blocks reserved */
u32 rd_requested; /* number of blocks in rd_rstree */
u32 rd_free_clone;
u32 rd_dinodes;
u64 rd_igeneration;
Expand Down Expand Up @@ -290,8 +290,8 @@ struct gfs2_qadata { /* quota allocation data */
struct gfs2_blkreserv {
struct rb_node rs_node; /* node within rd_rstree */
struct gfs2_rgrpd *rs_rgd;
u64 rs_start; /* start of reservation */
u32 rs_free; /* how many blocks are still free */
u64 rs_start;
u32 rs_requested;
};

/*
Expand Down
42 changes: 21 additions & 21 deletions fs/gfs2/rgrp.c
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
*/
static inline int rs_cmp(u64 start, u32 len, struct gfs2_blkreserv *rs)
{
if (start >= rs->rs_start + rs->rs_free)
if (start >= rs->rs_start + rs->rs_requested)
return 1;
if (rs->rs_start >= start + len)
return -1;
Expand Down Expand Up @@ -625,7 +625,7 @@ static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs,
fs_id_buf,
(unsigned long long)ip->i_no_addr,
(unsigned long long)rs->rs_start,
rs->rs_free);
rs->rs_requested);
}

/**
Expand All @@ -645,17 +645,17 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
rb_erase(&rs->rs_node, &rgd->rd_rstree);
RB_CLEAR_NODE(&rs->rs_node);

if (rs->rs_free) {
/* return reserved blocks to the rgrp */
BUG_ON(rs->rs_rgd->rd_reserved < rs->rs_free);
rs->rs_rgd->rd_reserved -= rs->rs_free;
if (rs->rs_requested) {
/* return requested blocks to the rgrp */
BUG_ON(rs->rs_rgd->rd_requested < rs->rs_requested);
rs->rs_rgd->rd_requested -= rs->rs_requested;

/* The rgrp extent failure point is likely not to increase;
it will only do so if the freed blocks are somehow
contiguous with a span of free blocks that follows. Still,
it will force the number to be recalculated later. */
rgd->rd_extfail_pt += rs->rs_free;
rs->rs_free = 0;
rgd->rd_extfail_pt += rs->rs_requested;
rs->rs_requested = 0;
}
}

Expand All @@ -672,7 +672,7 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
if (rgd) {
spin_lock(&rgd->rd_rsspin);
__rs_deltree(rs);
BUG_ON(rs->rs_free);
BUG_ON(rs->rs_requested);
spin_unlock(&rgd->rd_rsspin);
}
}
Expand Down Expand Up @@ -1504,7 +1504,7 @@ static void rs_insert(struct gfs2_inode *ip)
rb_entry(*newn, struct gfs2_blkreserv, rs_node);

parent = *newn;
rc = rs_cmp(rs->rs_start, rs->rs_free, cur);
rc = rs_cmp(rs->rs_start, rs->rs_requested, cur);
if (rc > 0)
newn = &((*newn)->rb_right);
else if (rc < 0)
Expand All @@ -1520,7 +1520,7 @@ static void rs_insert(struct gfs2_inode *ip)
rb_insert_color(&rs->rs_node, &rgd->rd_rstree);

/* Do our rgrp accounting for the reservation */
rgd->rd_reserved += rs->rs_free; /* blocks reserved */
rgd->rd_requested += rs->rs_requested; /* blocks requested */
spin_unlock(&rgd->rd_rsspin);
trace_gfs2_rs(rs, TRACE_RS_INSERT);
}
Expand All @@ -1541,9 +1541,9 @@ static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs)
{
u32 tot_reserved, tot_free;

if (WARN_ON_ONCE(rgd->rd_reserved < rs->rs_free))
if (WARN_ON_ONCE(rgd->rd_requested < rs->rs_requested))
return 0;
tot_reserved = rgd->rd_reserved - rs->rs_free;
tot_reserved = rgd->rd_requested - rs->rs_requested;

if (rgd->rd_free_clone < tot_reserved)
tot_reserved = 0;
Expand Down Expand Up @@ -1578,7 +1578,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target);
extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks);
}
if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
if ((rgd->rd_free_clone < rgd->rd_requested) || (free_blocks < extlen))
return;

/* Find bitmap block that contains bits for goal block */
Expand All @@ -1593,7 +1593,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, &ip->i_res, true);
if (ret == 0) {
rs->rs_start = gfs2_rbm_to_block(&rbm);
rs->rs_free = extlen;
rs->rs_requested = extlen;
rs_insert(ip);
} else {
if (goal == rgd->rd_last_alloc + rgd->rd_data0)
Expand Down Expand Up @@ -1637,7 +1637,7 @@ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,

if (n) {
while (rs_cmp(block, length, rs) == 0 && rs != ignore_rs) {
block = rs->rs_start + rs->rs_free;
block = rs->rs_start + rs->rs_requested;
n = n->rb_right;
if (n == NULL)
break;
Expand Down Expand Up @@ -2263,7 +2263,7 @@ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
fs_id_buf,
(unsigned long long)rgd->rd_addr, rgd->rd_flags,
rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
rgd->rd_reserved, rgd->rd_extfail_pt);
rgd->rd_requested, rgd->rd_extfail_pt);
if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;

Expand Down Expand Up @@ -2318,12 +2318,12 @@ static void gfs2_adjust_reservation(struct gfs2_inode *ip,
unsigned int rlen;

rs->rs_start += len;
rlen = min(rs->rs_free, len);
rs->rs_free -= rlen;
rgd->rd_reserved -= rlen;
rlen = min(rs->rs_requested, len);
rs->rs_requested -= rlen;
rgd->rd_requested -= rlen;
trace_gfs2_rs(rs, TRACE_RS_CLAIM);
if (rs->rs_start < rgd->rd_data0 + rgd->rd_data &&
rs->rs_free)
rs->rs_requested)
goto out;
/* We used up our block reservation, so we should
reserve more blocks next time. */
Expand Down
18 changes: 9 additions & 9 deletions fs/gfs2/trace_gfs2.h
Original file line number Diff line number Diff line change
Expand Up @@ -560,7 +560,7 @@ TRACE_EVENT(gfs2_block_alloc,
__field( u8, block_state )
__field( u64, rd_addr )
__field( u32, rd_free_clone )
__field( u32, rd_reserved )
__field( u32, rd_requested )
),

TP_fast_assign(
Expand All @@ -571,7 +571,7 @@ TRACE_EVENT(gfs2_block_alloc,
__entry->block_state = block_state;
__entry->rd_addr = rgd->rd_addr;
__entry->rd_free_clone = rgd->rd_free_clone;
__entry->rd_reserved = rgd->rd_reserved;
__entry->rd_requested = rgd->rd_requested;
),

TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rr:%lu",
Expand All @@ -581,7 +581,7 @@ TRACE_EVENT(gfs2_block_alloc,
(unsigned long)__entry->len,
block_state_name(__entry->block_state),
(unsigned long long)__entry->rd_addr,
__entry->rd_free_clone, (unsigned long)__entry->rd_reserved)
__entry->rd_free_clone, (unsigned long)__entry->rd_requested)
);

/* Keep track of multi-block reservations as they are allocated/freed */
Expand All @@ -595,22 +595,22 @@ TRACE_EVENT(gfs2_rs,
__field( dev_t, dev )
__field( u64, rd_addr )
__field( u32, rd_free_clone )
__field( u32, rd_reserved )
__field( u32, rd_requested )
__field( u64, inum )
__field( u64, start )
__field( u32, free )
__field( u32, requested )
__field( u8, func )
),

TP_fast_assign(
__entry->dev = rs->rs_rgd->rd_sbd->sd_vfs->s_dev;
__entry->rd_addr = rs->rs_rgd->rd_addr;
__entry->rd_free_clone = rs->rs_rgd->rd_free_clone;
__entry->rd_reserved = rs->rs_rgd->rd_reserved;
__entry->rd_requested = rs->rs_rgd->rd_requested;
__entry->inum = container_of(rs, struct gfs2_inode,
i_res)->i_no_addr;
__entry->start = rs->rs_start;
__entry->free = rs->rs_free;
__entry->requested = rs->rs_requested;
__entry->func = func;
),

Expand All @@ -620,8 +620,8 @@ TRACE_EVENT(gfs2_rs,
(unsigned long long)__entry->start,
(unsigned long long)__entry->rd_addr,
(unsigned long)__entry->rd_free_clone,
(unsigned long)__entry->rd_reserved,
rs_func_name(__entry->func), (unsigned long)__entry->free)
(unsigned long)__entry->rd_requested,
rs_func_name(__entry->func), (unsigned long)__entry->requested)
);

#endif /* _TRACE_GFS2_H */
Expand Down

0 comments on commit 07974d2

Please sign in to comment.