Commit ff6d6af2 authored by Bill O'Donnell's avatar Bill O'Donnell Committed by Dave Chinner

xfs: per-filesystem stats counter implementation

This patch modifies the stats counting macros and the callers
to those macros to properly increment, decrement, and add-to
the xfs stats counts. The counts for global and per-fs stats
are correctly advanced, and cleared by writing a "1" to the
corresponding clear file.

global counts: /sys/fs/xfs/stats/stats
per-fs counts: /sys/fs/xfs/sda*/stats/stats

global clear:  /sys/fs/xfs/stats/stats_clear
per-fs clear:  /sys/fs/xfs/sda*/stats/stats_clear

[dchinner: cleaned up macro variables, removed CONFIG_FS_PROC around
 stats structures and macros. ]
Signed-off-by: default avatarBill O'Donnell <billodo@redhat.com>
Reviewed-by: default avatarEric Sandeen <sandeen@redhat.com>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent 225e4635
...@@ -651,8 +651,8 @@ xfs_alloc_ag_vextent( ...@@ -651,8 +651,8 @@ xfs_alloc_ag_vextent(
-((long)(args->len))); -((long)(args->len)));
} }
XFS_STATS_INC(xs_allocx); XFS_STATS_INC(args->mp, xs_allocx);
XFS_STATS_ADD(xs_allocb, args->len); XFS_STATS_ADD(args->mp, xs_allocb, args->len);
return error; return error;
} }
...@@ -1808,8 +1808,8 @@ xfs_free_ag_extent( ...@@ -1808,8 +1808,8 @@ xfs_free_ag_extent(
if (!isfl) if (!isfl)
xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len); xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
XFS_STATS_INC(xs_freex); XFS_STATS_INC(mp, xs_freex);
XFS_STATS_ADD(xs_freeb, len); XFS_STATS_ADD(mp, xs_freeb, len);
trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright); trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
......
...@@ -125,7 +125,7 @@ xfs_attr_get( ...@@ -125,7 +125,7 @@ xfs_attr_get(
uint lock_mode; uint lock_mode;
int error; int error;
XFS_STATS_INC(xs_attr_get); XFS_STATS_INC(ip->i_mount, xs_attr_get);
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO; return -EIO;
...@@ -209,7 +209,7 @@ xfs_attr_set( ...@@ -209,7 +209,7 @@ xfs_attr_set(
int rsvd = (flags & ATTR_ROOT) != 0; int rsvd = (flags & ATTR_ROOT) != 0;
int error, err2, committed, local; int error, err2, committed, local;
XFS_STATS_INC(xs_attr_set); XFS_STATS_INC(mp, xs_attr_set);
if (XFS_FORCED_SHUTDOWN(dp->i_mount)) if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return -EIO; return -EIO;
...@@ -412,7 +412,7 @@ xfs_attr_remove( ...@@ -412,7 +412,7 @@ xfs_attr_remove(
xfs_fsblock_t firstblock; xfs_fsblock_t firstblock;
int error; int error;
XFS_STATS_INC(xs_attr_remove); XFS_STATS_INC(mp, xs_attr_remove);
if (XFS_FORCED_SHUTDOWN(dp->i_mount)) if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return -EIO; return -EIO;
......
...@@ -1435,7 +1435,7 @@ xfs_bmap_search_extents( ...@@ -1435,7 +1435,7 @@ xfs_bmap_search_extents(
xfs_ifork_t *ifp; /* inode fork pointer */ xfs_ifork_t *ifp; /* inode fork pointer */
xfs_bmbt_rec_host_t *ep; /* extent record pointer */ xfs_bmbt_rec_host_t *ep; /* extent record pointer */
XFS_STATS_INC(xs_look_exlist); XFS_STATS_INC(ip->i_mount, xs_look_exlist);
ifp = XFS_IFORK_PTR(ip, fork); ifp = XFS_IFORK_PTR(ip, fork);
ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp); ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
...@@ -1732,7 +1732,7 @@ xfs_bmap_add_extent_delay_real( ...@@ -1732,7 +1732,7 @@ xfs_bmap_add_extent_delay_real(
ASSERT(!bma->cur || ASSERT(!bma->cur ||
(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
XFS_STATS_INC(xs_add_exlist); XFS_STATS_INC(mp, xs_add_exlist);
#define LEFT r[0] #define LEFT r[0]
#define RIGHT r[1] #define RIGHT r[1]
...@@ -2286,7 +2286,7 @@ xfs_bmap_add_extent_unwritten_real( ...@@ -2286,7 +2286,7 @@ xfs_bmap_add_extent_unwritten_real(
ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
ASSERT(!isnullstartblock(new->br_startblock)); ASSERT(!isnullstartblock(new->br_startblock));
XFS_STATS_INC(xs_add_exlist); XFS_STATS_INC(mp, xs_add_exlist);
#define LEFT r[0] #define LEFT r[0]
#define RIGHT r[1] #define RIGHT r[1]
...@@ -2946,7 +2946,7 @@ xfs_bmap_add_extent_hole_real( ...@@ -2946,7 +2946,7 @@ xfs_bmap_add_extent_hole_real(
ASSERT(!bma->cur || ASSERT(!bma->cur ||
!(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
XFS_STATS_INC(xs_add_exlist); XFS_STATS_INC(mp, xs_add_exlist);
state = 0; state = 0;
if (whichfork == XFS_ATTR_FORK) if (whichfork == XFS_ATTR_FORK)
...@@ -4036,7 +4036,7 @@ xfs_bmapi_read( ...@@ -4036,7 +4036,7 @@ xfs_bmapi_read(
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return -EIO; return -EIO;
XFS_STATS_INC(xs_blk_mapr); XFS_STATS_INC(mp, xs_blk_mapr);
ifp = XFS_IFORK_PTR(ip, whichfork); ifp = XFS_IFORK_PTR(ip, whichfork);
...@@ -4221,7 +4221,7 @@ xfs_bmapi_delay( ...@@ -4221,7 +4221,7 @@ xfs_bmapi_delay(
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return -EIO; return -EIO;
XFS_STATS_INC(xs_blk_mapw); XFS_STATS_INC(mp, xs_blk_mapw);
if (!(ifp->if_flags & XFS_IFEXTENTS)) { if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
...@@ -4525,7 +4525,7 @@ xfs_bmapi_write( ...@@ -4525,7 +4525,7 @@ xfs_bmapi_write(
ifp = XFS_IFORK_PTR(ip, whichfork); ifp = XFS_IFORK_PTR(ip, whichfork);
XFS_STATS_INC(xs_blk_mapw); XFS_STATS_INC(mp, xs_blk_mapw);
if (*firstblock == NULLFSBLOCK) { if (*firstblock == NULLFSBLOCK) {
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
...@@ -4718,12 +4718,12 @@ xfs_bmap_del_extent( ...@@ -4718,12 +4718,12 @@ xfs_bmap_del_extent(
xfs_filblks_t temp2; /* for indirect length calculations */ xfs_filblks_t temp2; /* for indirect length calculations */
int state = 0; int state = 0;
XFS_STATS_INC(xs_del_exlist); mp = ip->i_mount;
XFS_STATS_INC(mp, xs_del_exlist);
if (whichfork == XFS_ATTR_FORK) if (whichfork == XFS_ATTR_FORK)
state |= BMAP_ATTRFORK; state |= BMAP_ATTRFORK;
mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork); ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT((*idx >= 0) && (*idx < ifp->if_bytes / ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
(uint)sizeof(xfs_bmbt_rec_t))); (uint)sizeof(xfs_bmbt_rec_t)));
...@@ -5070,7 +5070,7 @@ xfs_bunmapi( ...@@ -5070,7 +5070,7 @@ xfs_bunmapi(
*done = 1; *done = 1;
return 0; return 0;
} }
XFS_STATS_INC(xs_blk_unmap); XFS_STATS_INC(mp, xs_blk_unmap);
isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
start = bno; start = bno;
bno = start + len - 1; bno = start + len - 1;
......
...@@ -84,31 +84,38 @@ union xfs_btree_rec { ...@@ -84,31 +84,38 @@ union xfs_btree_rec {
/* /*
* Generic stats interface * Generic stats interface
*/ */
#define __XFS_BTREE_STATS_INC(type, stat) \ #define __XFS_BTREE_STATS_INC(mp, type, stat) \
XFS_STATS_INC(xs_ ## type ## _2_ ## stat) XFS_STATS_INC(mp, xs_ ## type ## _2_ ## stat)
#define XFS_BTREE_STATS_INC(cur, stat) \ #define XFS_BTREE_STATS_INC(cur, stat) \
do { \ do { \
struct xfs_mount *__mp = cur->bc_mp; \
switch (cur->bc_btnum) { \ switch (cur->bc_btnum) { \
case XFS_BTNUM_BNO: __XFS_BTREE_STATS_INC(abtb, stat); break; \ case XFS_BTNUM_BNO: __XFS_BTREE_STATS_INC(__mp, abtb, stat); break; \
case XFS_BTNUM_CNT: __XFS_BTREE_STATS_INC(abtc, stat); break; \ case XFS_BTNUM_CNT: __XFS_BTREE_STATS_INC(__mp, abtc, stat); break; \
case XFS_BTNUM_BMAP: __XFS_BTREE_STATS_INC(bmbt, stat); break; \ case XFS_BTNUM_BMAP: __XFS_BTREE_STATS_INC(__mp, bmbt, stat); break; \
case XFS_BTNUM_INO: __XFS_BTREE_STATS_INC(ibt, stat); break; \ case XFS_BTNUM_INO: __XFS_BTREE_STATS_INC(__mp, ibt, stat); break; \
case XFS_BTNUM_FINO: __XFS_BTREE_STATS_INC(fibt, stat); break; \ case XFS_BTNUM_FINO: __XFS_BTREE_STATS_INC(__mp, fibt, stat); break; \
case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \ case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
} \ } \
} while (0) } while (0)
#define __XFS_BTREE_STATS_ADD(type, stat, val) \ #define __XFS_BTREE_STATS_ADD(mp, type, stat, val) \
XFS_STATS_ADD(xs_ ## type ## _2_ ## stat, val) XFS_STATS_ADD(mp, xs_ ## type ## _2_ ## stat, val)
#define XFS_BTREE_STATS_ADD(cur, stat, val) \ #define XFS_BTREE_STATS_ADD(cur, stat, val) \
do { \ do { \
struct xfs_mount *__mp = cur->bc_mp; \
switch (cur->bc_btnum) { \ switch (cur->bc_btnum) { \
case XFS_BTNUM_BNO: __XFS_BTREE_STATS_ADD(abtb, stat, val); break; \ case XFS_BTNUM_BNO: \
case XFS_BTNUM_CNT: __XFS_BTREE_STATS_ADD(abtc, stat, val); break; \ __XFS_BTREE_STATS_ADD(__mp, abtb, stat, val); break; \
case XFS_BTNUM_BMAP: __XFS_BTREE_STATS_ADD(bmbt, stat, val); break; \ case XFS_BTNUM_CNT: \
case XFS_BTNUM_INO: __XFS_BTREE_STATS_ADD(ibt, stat, val); break; \ __XFS_BTREE_STATS_ADD(__mp, abtc, stat, val); break; \
case XFS_BTNUM_FINO: __XFS_BTREE_STATS_ADD(fibt, stat, val); break; \ case XFS_BTNUM_BMAP: \
case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \ __XFS_BTREE_STATS_ADD(__mp, bmbt, stat, val); break; \
case XFS_BTNUM_INO: \
__XFS_BTREE_STATS_ADD(__mp, ibt, stat, val); break; \
case XFS_BTNUM_FINO: \
__XFS_BTREE_STATS_ADD(__mp, fibt, stat, val); break; \
case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
} \ } \
} while (0) } while (0)
......
...@@ -271,7 +271,7 @@ xfs_dir_createname( ...@@ -271,7 +271,7 @@ xfs_dir_createname(
rval = xfs_dir_ino_validate(tp->t_mountp, inum); rval = xfs_dir_ino_validate(tp->t_mountp, inum);
if (rval) if (rval)
return rval; return rval;
XFS_STATS_INC(xs_dir_create); XFS_STATS_INC(dp->i_mount, xs_dir_create);
} }
args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
...@@ -365,7 +365,7 @@ xfs_dir_lookup( ...@@ -365,7 +365,7 @@ xfs_dir_lookup(
int lock_mode; int lock_mode;
ASSERT(S_ISDIR(dp->i_d.di_mode)); ASSERT(S_ISDIR(dp->i_d.di_mode));
XFS_STATS_INC(xs_dir_lookup); XFS_STATS_INC(dp->i_mount, xs_dir_lookup);
/* /*
* We need to use KM_NOFS here so that lockdep will not throw false * We need to use KM_NOFS here so that lockdep will not throw false
...@@ -444,7 +444,7 @@ xfs_dir_removename( ...@@ -444,7 +444,7 @@ xfs_dir_removename(
int v; /* type-checking value */ int v; /* type-checking value */
ASSERT(S_ISDIR(dp->i_d.di_mode)); ASSERT(S_ISDIR(dp->i_d.di_mode));
XFS_STATS_INC(xs_dir_remove); XFS_STATS_INC(dp->i_mount, xs_dir_remove);
args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
if (!args) if (!args)
......
...@@ -511,7 +511,7 @@ xfs_attr_list_int( ...@@ -511,7 +511,7 @@ xfs_attr_list_int(
xfs_inode_t *dp = context->dp; xfs_inode_t *dp = context->dp;
uint lock_mode; uint lock_mode;
XFS_STATS_INC(xs_attr_list); XFS_STATS_INC(dp->i_mount, xs_attr_list);
if (XFS_FORCED_SHUTDOWN(dp->i_mount)) if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return -EIO; return -EIO;
......
...@@ -201,7 +201,7 @@ _xfs_buf_alloc( ...@@ -201,7 +201,7 @@ _xfs_buf_alloc(
atomic_set(&bp->b_pin_count, 0); atomic_set(&bp->b_pin_count, 0);
init_waitqueue_head(&bp->b_waiters); init_waitqueue_head(&bp->b_waiters);
XFS_STATS_INC(xb_create); XFS_STATS_INC(target->bt_mount, xb_create);
trace_xfs_buf_init(bp, _RET_IP_); trace_xfs_buf_init(bp, _RET_IP_);
return bp; return bp;
...@@ -357,12 +357,12 @@ xfs_buf_allocate_memory( ...@@ -357,12 +357,12 @@ xfs_buf_allocate_memory(
"possible memory allocation deadlock in %s (mode:0x%x)", "possible memory allocation deadlock in %s (mode:0x%x)",
__func__, gfp_mask); __func__, gfp_mask);
XFS_STATS_INC(xb_page_retries); XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries);
congestion_wait(BLK_RW_ASYNC, HZ/50); congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry; goto retry;
} }
XFS_STATS_INC(xb_page_found); XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found);
nbytes = min_t(size_t, size, PAGE_SIZE - offset); nbytes = min_t(size_t, size, PAGE_SIZE - offset);
size -= nbytes; size -= nbytes;
...@@ -516,7 +516,7 @@ _xfs_buf_find( ...@@ -516,7 +516,7 @@ _xfs_buf_find(
new_bp->b_pag = pag; new_bp->b_pag = pag;
spin_unlock(&pag->pag_buf_lock); spin_unlock(&pag->pag_buf_lock);
} else { } else {
XFS_STATS_INC(xb_miss_locked); XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
spin_unlock(&pag->pag_buf_lock); spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag); xfs_perag_put(pag);
} }
...@@ -529,11 +529,11 @@ _xfs_buf_find( ...@@ -529,11 +529,11 @@ _xfs_buf_find(
if (!xfs_buf_trylock(bp)) { if (!xfs_buf_trylock(bp)) {
if (flags & XBF_TRYLOCK) { if (flags & XBF_TRYLOCK) {
xfs_buf_rele(bp); xfs_buf_rele(bp);
XFS_STATS_INC(xb_busy_locked); XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
return NULL; return NULL;
} }
xfs_buf_lock(bp); xfs_buf_lock(bp);
XFS_STATS_INC(xb_get_locked_waited); XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
} }
/* /*
...@@ -549,7 +549,7 @@ _xfs_buf_find( ...@@ -549,7 +549,7 @@ _xfs_buf_find(
} }
trace_xfs_buf_find(bp, flags, _RET_IP_); trace_xfs_buf_find(bp, flags, _RET_IP_);
XFS_STATS_INC(xb_get_locked); XFS_STATS_INC(btp->bt_mount, xb_get_locked);
return bp; return bp;
} }
...@@ -603,7 +603,7 @@ xfs_buf_get_map( ...@@ -603,7 +603,7 @@ xfs_buf_get_map(
} }
} }
XFS_STATS_INC(xb_get); XFS_STATS_INC(target->bt_mount, xb_get);
trace_xfs_buf_get(bp, flags, _RET_IP_); trace_xfs_buf_get(bp, flags, _RET_IP_);
return bp; return bp;
} }
...@@ -643,7 +643,7 @@ xfs_buf_read_map( ...@@ -643,7 +643,7 @@ xfs_buf_read_map(
trace_xfs_buf_read(bp, flags, _RET_IP_); trace_xfs_buf_read(bp, flags, _RET_IP_);
if (!XFS_BUF_ISDONE(bp)) { if (!XFS_BUF_ISDONE(bp)) {
XFS_STATS_INC(xb_get_read); XFS_STATS_INC(target->bt_mount, xb_get_read);
bp->b_ops = ops; bp->b_ops = ops;
_xfs_buf_read(bp, flags); _xfs_buf_read(bp, flags);
} else if (flags & XBF_ASYNC) { } else if (flags & XBF_ASYNC) {
......
...@@ -666,7 +666,7 @@ xfs_readdir( ...@@ -666,7 +666,7 @@ xfs_readdir(
return -EIO; return -EIO;
ASSERT(S_ISDIR(dp->i_d.di_mode)); ASSERT(S_ISDIR(dp->i_d.di_mode));
XFS_STATS_INC(xs_dir_getdents); XFS_STATS_INC(dp->i_mount, xs_dir_getdents);
args.dp = dp; args.dp = dp;
args.geo = dp->i_mount->m_dir_geo; args.geo = dp->i_mount->m_dir_geo;
......
...@@ -75,9 +75,9 @@ xfs_qm_dqdestroy( ...@@ -75,9 +75,9 @@ xfs_qm_dqdestroy(
ASSERT(list_empty(&dqp->q_lru)); ASSERT(list_empty(&dqp->q_lru));
mutex_destroy(&dqp->q_qlock); mutex_destroy(&dqp->q_qlock);
kmem_zone_free(xfs_qm_dqzone, dqp);
XFS_STATS_DEC(xs_qm_dquot); XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
kmem_zone_free(xfs_qm_dqzone, dqp);
} }
/* /*
...@@ -605,7 +605,7 @@ xfs_qm_dqread( ...@@ -605,7 +605,7 @@ xfs_qm_dqread(
break; break;
} }
XFS_STATS_INC(xs_qm_dquot); XFS_STATS_INC(mp, xs_qm_dquot);
trace_xfs_dqread(dqp); trace_xfs_dqread(dqp);
...@@ -747,12 +747,12 @@ xfs_qm_dqget( ...@@ -747,12 +747,12 @@ xfs_qm_dqget(
mutex_unlock(&qi->qi_tree_lock); mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_hit(dqp); trace_xfs_dqget_hit(dqp);
XFS_STATS_INC(xs_qm_dqcachehits); XFS_STATS_INC(mp, xs_qm_dqcachehits);
*O_dqpp = dqp; *O_dqpp = dqp;
return 0; return 0;
} }
mutex_unlock(&qi->qi_tree_lock); mutex_unlock(&qi->qi_tree_lock);
XFS_STATS_INC(xs_qm_dqcachemisses); XFS_STATS_INC(mp, xs_qm_dqcachemisses);
/* /*
* Dquot cache miss. We don't want to keep the inode lock across * Dquot cache miss. We don't want to keep the inode lock across
...@@ -806,7 +806,7 @@ xfs_qm_dqget( ...@@ -806,7 +806,7 @@ xfs_qm_dqget(
mutex_unlock(&qi->qi_tree_lock); mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_dup(dqp); trace_xfs_dqget_dup(dqp);
xfs_qm_dqdestroy(dqp); xfs_qm_dqdestroy(dqp);
XFS_STATS_INC(xs_qm_dquot_dups); XFS_STATS_INC(mp, xs_qm_dquot_dups);
goto restart; goto restart;
} }
...@@ -846,7 +846,7 @@ xfs_qm_dqput( ...@@ -846,7 +846,7 @@ xfs_qm_dqput(
trace_xfs_dqput_free(dqp); trace_xfs_dqput_free(dqp);
if (list_lru_add(&qi->qi_lru, &dqp->q_lru)) if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
XFS_STATS_INC(xs_qm_dquot_unused); XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
} }
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
} }
......
...@@ -287,7 +287,7 @@ xfs_file_read_iter( ...@@ -287,7 +287,7 @@ xfs_file_read_iter(
xfs_fsize_t n; xfs_fsize_t n;
loff_t pos = iocb->ki_pos; loff_t pos = iocb->ki_pos;
XFS_STATS_INC(xs_read_calls); XFS_STATS_INC(mp, xs_read_calls);
if (unlikely(iocb->ki_flags & IOCB_DIRECT)) if (unlikely(iocb->ki_flags & IOCB_DIRECT))
ioflags |= XFS_IO_ISDIRECT; ioflags |= XFS_IO_ISDIRECT;
...@@ -365,7 +365,7 @@ xfs_file_read_iter( ...@@ -365,7 +365,7 @@ xfs_file_read_iter(
ret = generic_file_read_iter(iocb, to); ret = generic_file_read_iter(iocb, to);
if (ret > 0) if (ret > 0)
XFS_STATS_ADD(xs_read_bytes, ret); XFS_STATS_ADD(mp, xs_read_bytes, ret);
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
return ret; return ret;
...@@ -383,7 +383,7 @@ xfs_file_splice_read( ...@@ -383,7 +383,7 @@ xfs_file_splice_read(
int ioflags = 0; int ioflags = 0;
ssize_t ret; ssize_t ret;
XFS_STATS_INC(xs_read_calls); XFS_STATS_INC(ip->i_mount, xs_read_calls);
if (infilp->f_mode & FMODE_NOCMTIME) if (infilp->f_mode & FMODE_NOCMTIME)
ioflags |= XFS_IO_INVIS; ioflags |= XFS_IO_INVIS;
...@@ -401,7 +401,7 @@ xfs_file_splice_read( ...@@ -401,7 +401,7 @@ xfs_file_splice_read(
else else
ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
if (ret > 0) if (ret > 0)
XFS_STATS_ADD(xs_read_bytes, ret); XFS_STATS_ADD(ip->i_mount, xs_read_bytes, ret);
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
return ret; return ret;
...@@ -867,7 +867,7 @@ xfs_file_write_iter( ...@@ -867,7 +867,7 @@ xfs_file_write_iter(
ssize_t ret; ssize_t ret;
size_t ocount = iov_iter_count(from); size_t ocount = iov_iter_count(from);
XFS_STATS_INC(xs_write_calls); XFS_STATS_INC(ip->i_mount, xs_write_calls);
if (ocount == 0) if (ocount == 0)
return 0; return 0;
...@@ -883,7 +883,7 @@ xfs_file_write_iter( ...@@ -883,7 +883,7 @@ xfs_file_write_iter(
if (ret > 0) { if (ret > 0) {
ssize_t err; ssize_t err;
XFS_STATS_ADD(xs_write_bytes, ret); XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
/* Handle various SYNC-type writes */ /* Handle various SYNC-type writes */
err = generic_write_sync(file, iocb->ki_pos - ret, ret); err = generic_write_sync(file, iocb->ki_pos - ret, ret);
......
...@@ -63,7 +63,7 @@ xfs_inode_alloc( ...@@ -63,7 +63,7 @@ xfs_inode_alloc(
return NULL; return NULL;
} }
XFS_STATS_INC(vn_active); XFS_STATS_INC(mp, vn_active);
ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(!spin_is_locked(&ip->i_flags_lock));
ASSERT(!xfs_isiflocked(ip)); ASSERT(!xfs_isiflocked(ip));
...@@ -129,7 +129,7 @@ xfs_inode_free( ...@@ -129,7 +129,7 @@ xfs_inode_free(
/* asserts to verify all state is correct here */ /* asserts to verify all state is correct here */
ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!xfs_isiflocked(ip)); ASSERT(!xfs_isiflocked(ip));
XFS_STATS_DEC(vn_active); XFS_STATS_DEC(ip->i_mount, vn_active);
call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
} }
...@@ -159,7 +159,7 @@ xfs_iget_cache_hit( ...@@ -159,7 +159,7 @@ xfs_iget_cache_hit(
spin_lock(&ip->i_flags_lock); spin_lock(&ip->i_flags_lock);
if (ip->i_ino != ino) { if (ip->i_ino != ino) {
trace_xfs_iget_skip(ip); trace_xfs_iget_skip(ip);
XFS_STATS_INC(xs_ig_frecycle); XFS_STATS_INC(mp, xs_ig_frecycle);
error = -EAGAIN; error = -EAGAIN;
goto out_error; goto out_error;
} }
...@@ -177,7 +177,7 @@ xfs_iget_cache_hit( ...@@ -177,7 +177,7 @@ xfs_iget_cache_hit(
*/ */
if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
trace_xfs_iget_skip(ip); trace_xfs_iget_skip(ip);
XFS_STATS_INC(xs_ig_frecycle); XFS_STATS_INC(mp, xs_ig_frecycle);
error = -EAGAIN; error = -EAGAIN;
goto out_error; goto out_error;
} }
...@@ -259,7 +259,7 @@ xfs_iget_cache_hit( ...@@ -259,7 +259,7 @@ xfs_iget_cache_hit(
xfs_ilock(ip, lock_flags); xfs_ilock(ip, lock_flags);
xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE); xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
XFS_STATS_INC(xs_ig_found); XFS_STATS_INC(mp, xs_ig_found);
return 0; return 0;
...@@ -342,7 +342,7 @@ xfs_iget_cache_miss( ...@@ -342,7 +342,7 @@ xfs_iget_cache_miss(
error = radix_tree_insert(&pag->pag_ici_root, agino, ip); error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
if (unlikely(error)) { if (unlikely(error)) {
WARN_ON(error != -EEXIST); WARN_ON(error != -EEXIST);
XFS_STATS_INC(xs_ig_dup); XFS_STATS_INC(mp, xs_ig_dup);
error = -EAGAIN; error = -EAGAIN;
goto out_preload_end; goto out_preload_end;
} }
...@@ -412,7 +412,7 @@ xfs_iget( ...@@ -412,7 +412,7 @@ xfs_iget(
if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
return -EINVAL; return -EINVAL;
XFS_STATS_INC(xs_ig_attempts); XFS_STATS_INC(mp, xs_ig_attempts);
/* get the perag structure and ensure that it's inode capable */ /* get the perag structure and ensure that it's inode capable */
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
...@@ -429,7 +429,7 @@ xfs_iget( ...@@ -429,7 +429,7 @@ xfs_iget(
goto out_error_or_again; goto out_error_or_again;
} else { } else {
rcu_read_unlock(); rcu_read_unlock();
XFS_STATS_INC(xs_ig_missed); XFS_STATS_INC(mp, xs_ig_missed);
error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
flags, lock_flags); flags, lock_flags);
...@@ -965,7 +965,7 @@ xfs_reclaim_inode( ...@@ -965,7 +965,7 @@ xfs_reclaim_inode(
xfs_ifunlock(ip); xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
XFS_STATS_INC(xs_ig_reclaims); XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
/* /*
* Remove the inode from the per-AG radix tree. * Remove the inode from the per-AG radix tree.
* *
......
...@@ -3271,8 +3271,8 @@ xfs_iflush_cluster( ...@@ -3271,8 +3271,8 @@ xfs_iflush_cluster(
} }
if (clcount) { if (clcount) {
XFS_STATS_INC(xs_icluster_flushcnt); XFS_STATS_INC(mp, xs_icluster_flushcnt);
XFS_STATS_ADD(xs_icluster_flushinode, clcount); XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
} }
out_free: out_free:
...@@ -3345,7 +3345,7 @@ xfs_iflush( ...@@ -3345,7 +3345,7 @@ xfs_iflush(
struct xfs_dinode *dip; struct xfs_dinode *dip;
int error; int error;
XFS_STATS_INC(xs_iflush_count); XFS_STATS_INC(mp, xs_iflush_count);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(xfs_isiflocked(ip)); ASSERT(xfs_isiflocked(ip));
......
...@@ -1028,7 +1028,7 @@ xfs_ioctl_setattr_xflags( ...@@ -1028,7 +1028,7 @@ xfs_ioctl_setattr_xflags(
xfs_diflags_to_linux(ip); xfs_diflags_to_linux(ip);
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
XFS_STATS_INC(xs_ig_attrchg); XFS_STATS_INC(mp, xs_ig_attrchg);
return 0; return 0;
} }
......
...@@ -670,7 +670,7 @@ xfs_iomap_write_allocate( ...@@ -670,7 +670,7 @@ xfs_iomap_write_allocate(
count_fsb = imap->br_blockcount; count_fsb = imap->br_blockcount;
map_start_fsb = imap->br_startoff; map_start_fsb = imap->br_startoff;
XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
while (count_fsb != 0) { while (count_fsb != 0) {
/* /*
...@@ -777,7 +777,7 @@ xfs_iomap_write_allocate( ...@@ -777,7 +777,7 @@ xfs_iomap_write_allocate(
if ((offset_fsb >= imap->br_startoff) && if ((offset_fsb >= imap->br_startoff) &&
(offset_fsb < (imap->br_startoff + (offset_fsb < (imap->br_startoff +
imap->br_blockcount))) { imap->br_blockcount))) {
XFS_STATS_INC(xs_xstrat_quick); XFS_STATS_INC(mp, xs_xstrat_quick);
return 0; return 0;
} }
......
...@@ -695,7 +695,7 @@ xfs_setattr_nonsize( ...@@ -695,7 +695,7 @@ xfs_setattr_nonsize(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
XFS_STATS_INC(xs_ig_attrchg); XFS_STATS_INC(mp, xs_ig_attrchg);
if (mp->m_flags & XFS_MOUNT_WSYNC) if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_trans_set_sync(tp); xfs_trans_set_sync(tp);
...@@ -922,7 +922,7 @@ xfs_setattr_size( ...@@ -922,7 +922,7 @@ xfs_setattr_size(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
XFS_STATS_INC(xs_ig_attrchg); XFS_STATS_INC(mp, xs_ig_attrchg);
if (mp->m_flags & XFS_MOUNT_WSYNC) if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_trans_set_sync(tp); xfs_trans_set_sync(tp);
......
...@@ -268,7 +268,7 @@ xlog_grant_head_wait( ...@@ -268,7 +268,7 @@ xlog_grant_head_wait(
__set_current_state(TASK_UNINTERRUPTIBLE); __set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(&head->lock); spin_unlock(&head->lock);
XFS_STATS_INC(xs_sleep_logspace); XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
trace_xfs_log_grant_sleep(log, tic); trace_xfs_log_grant_sleep(log, tic);
schedule(); schedule();
...@@ -379,7 +379,7 @@ xfs_log_regrant( ...@@ -379,7 +379,7 @@ xfs_log_regrant(
if (XLOG_FORCED_SHUTDOWN(log)) if (XLOG_FORCED_SHUTDOWN(log))
return -EIO; return -EIO;
XFS_STATS_INC(xs_try_logspace); XFS_STATS_INC(mp, xs_try_logspace);
/* /*
* This is a new transaction on the ticket, so we need to change the * This is a new transaction on the ticket, so we need to change the
...@@ -448,7 +448,7 @@ xfs_log_reserve( ...@@ -448,7 +448,7 @@ xfs_log_reserve(
if (XLOG_FORCED_SHUTDOWN(log)) if (XLOG_FORCED_SHUTDOWN(log))
return -EIO; return -EIO;
XFS_STATS_INC(xs_try_logspace); XFS_STATS_INC(mp, xs_try_logspace);
ASSERT(*ticp == NULL); ASSERT(*ticp == NULL);
tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
...@@ -1768,7 +1768,7 @@ xlog_sync( ...@@ -1768,7 +1768,7 @@ xlog_sync(
int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb);
int size; int size;
XFS_STATS_INC(xs_log_writes); XFS_STATS_INC(log->l_mp, xs_log_writes);
ASSERT(atomic_read(&iclog->ic_refcnt) == 0); ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
/* Add for LR header */ /* Add for LR header */
...@@ -1805,7 +1805,7 @@ xlog_sync( ...@@ -1805,7 +1805,7 @@ xlog_sync(
bp = iclog->ic_bp; bp = iclog->ic_bp;
XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)));
XFS_STATS_ADD(xs_log_blocks, BTOBB(count)); XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
/* Do we need to split this write into 2 parts? */ /* Do we need to split this write into 2 parts? */
if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) {
...@@ -2913,7 +2913,7 @@ xlog_state_get_iclog_space( ...@@ -2913,7 +2913,7 @@ xlog_state_get_iclog_space(
iclog = log->l_iclog; iclog = log->l_iclog;
if (iclog->ic_state != XLOG_STATE_ACTIVE) { if (iclog->ic_state != XLOG_STATE_ACTIVE) {
XFS_STATS_INC(xs_log_noiclogs); XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
/* Wait for log writes to have flushed */ /* Wait for log writes to have flushed */
xlog_wait(&log->l_flush_wait, &log->l_icloglock); xlog_wait(&log->l_flush_wait, &log->l_icloglock);
...@@ -3212,7 +3212,7 @@ _xfs_log_force( ...@@ -3212,7 +3212,7 @@ _xfs_log_force(
struct xlog_in_core *iclog; struct xlog_in_core *iclog;
xfs_lsn_t lsn; xfs_lsn_t lsn;
XFS_STATS_INC(xs_log_force); XFS_STATS_INC(mp, xs_log_force);
xlog_cil_force(log); xlog_cil_force(log);
...@@ -3297,7 +3297,7 @@ _xfs_log_force( ...@@ -3297,7 +3297,7 @@ _xfs_log_force(
spin_unlock(&log->l_icloglock); spin_unlock(&log->l_icloglock);
return -EIO; return -EIO;
} }
XFS_STATS_INC(xs_log_force_sleep); XFS_STATS_INC(mp, xs_log_force_sleep);
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
/* /*
* No need to grab the log lock here since we're * No need to grab the log lock here since we're
...@@ -3362,7 +3362,7 @@ _xfs_log_force_lsn( ...@@ -3362,7 +3362,7 @@ _xfs_log_force_lsn(
ASSERT(lsn != 0); ASSERT(lsn != 0);
XFS_STATS_INC(xs_log_force); XFS_STATS_INC(mp, xs_log_force);
lsn = xlog_cil_force_lsn(log, lsn); lsn = xlog_cil_force_lsn(log, lsn);
if (lsn == NULLCOMMITLSN) if (lsn == NULLCOMMITLSN)
...@@ -3411,7 +3411,7 @@ _xfs_log_force_lsn( ...@@ -3411,7 +3411,7 @@ _xfs_log_force_lsn(
(XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) { (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
XFS_STATS_INC(xs_log_force_sleep); XFS_STATS_INC(mp, xs_log_force_sleep);
xlog_wait(&iclog->ic_prev->ic_write_wait, xlog_wait(&iclog->ic_prev->ic_write_wait,
&log->l_icloglock); &log->l_icloglock);
...@@ -3441,7 +3441,7 @@ _xfs_log_force_lsn( ...@@ -3441,7 +3441,7 @@ _xfs_log_force_lsn(
spin_unlock(&log->l_icloglock); spin_unlock(&log->l_icloglock);
return -EIO; return -EIO;
} }
XFS_STATS_INC(xs_log_force_sleep); XFS_STATS_INC(mp, xs_log_force_sleep);
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
/* /*
* No need to grab the log lock here since we're * No need to grab the log lock here since we're
......
...@@ -184,7 +184,7 @@ xfs_qm_dqpurge( ...@@ -184,7 +184,7 @@ xfs_qm_dqpurge(
*/ */
ASSERT(!list_empty(&dqp->q_lru)); ASSERT(!list_empty(&dqp->q_lru));
list_lru_del(&qi->qi_lru, &dqp->q_lru); list_lru_del(&qi->qi_lru, &dqp->q_lru);
XFS_STATS_DEC(xs_qm_dquot_unused); XFS_STATS_DEC(mp, xs_qm_dquot_unused);
xfs_qm_dqdestroy(dqp); xfs_qm_dqdestroy(dqp);
return 0; return 0;
...@@ -448,11 +448,11 @@ xfs_qm_dquot_isolate( ...@@ -448,11 +448,11 @@ xfs_qm_dquot_isolate(
*/ */
if (dqp->q_nrefs) { if (dqp->q_nrefs) {
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
XFS_STATS_INC(xs_qm_dqwants); XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
trace_xfs_dqreclaim_want(dqp); trace_xfs_dqreclaim_want(dqp);
list_lru_isolate(lru, &dqp->q_lru); list_lru_isolate(lru, &dqp->q_lru);
XFS_STATS_DEC(xs_qm_dquot_unused); XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
return LRU_REMOVED; return LRU_REMOVED;
} }
...@@ -496,19 +496,19 @@ xfs_qm_dquot_isolate( ...@@ -496,19 +496,19 @@ xfs_qm_dquot_isolate(
ASSERT(dqp->q_nrefs == 0); ASSERT(dqp->q_nrefs == 0);
list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose); list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
XFS_STATS_DEC(xs_qm_dquot_unused); XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
trace_xfs_dqreclaim_done(dqp); trace_xfs_dqreclaim_done(dqp);
XFS_STATS_INC(xs_qm_dqreclaims); XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
return LRU_REMOVED; return LRU_REMOVED;
out_miss_busy: out_miss_busy:
trace_xfs_dqreclaim_busy(dqp); trace_xfs_dqreclaim_busy(dqp);
XFS_STATS_INC(xs_qm_dqreclaim_misses); XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
return LRU_SKIP; return LRU_SKIP;
out_unlock_dirty: out_unlock_dirty:
trace_xfs_dqreclaim_busy(dqp); trace_xfs_dqreclaim_busy(dqp);
XFS_STATS_INC(xs_qm_dqreclaim_misses); XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
spin_lock(lru_lock); spin_lock(lru_lock);
return LRU_RETRY; return LRU_RETRY;
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#ifndef __XFS_STATS_H__ #ifndef __XFS_STATS_H__
#define __XFS_STATS_H__ #define __XFS_STATS_H__
#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF)
#include <linux/percpu.h> #include <linux/percpu.h>
...@@ -218,14 +217,25 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf); ...@@ -218,14 +217,25 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf);
void xfs_stats_clearall(struct xfsstats __percpu *stats); void xfs_stats_clearall(struct xfsstats __percpu *stats);
extern struct xstats xfsstats; extern struct xstats xfsstats;
#define XFS_STATS_INC(v) \ #define XFS_STATS_INC(mp, v) \
(per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v++) do { \
per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v++; \
per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->v++; \
} while (0)
#define XFS_STATS_DEC(v) \ #define XFS_STATS_DEC(mp, v) \
(per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v--) do { \
per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v--; \
per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->v--; \
} while (0)
#define XFS_STATS_ADD(v, inc) \ #define XFS_STATS_ADD(mp, v, inc) \
(per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v += (inc)) do { \
per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v += (inc); \
per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->v += (inc); \
} while (0)
#if defined(CONFIG_PROC_FS)
extern int xfs_init_procfs(void); extern int xfs_init_procfs(void);
extern void xfs_cleanup_procfs(void); extern void xfs_cleanup_procfs(void);
...@@ -233,10 +243,6 @@ extern void xfs_cleanup_procfs(void); ...@@ -233,10 +243,6 @@ extern void xfs_cleanup_procfs(void);
#else /* !CONFIG_PROC_FS */ #else /* !CONFIG_PROC_FS */
# define XFS_STATS_INC(count)
# define XFS_STATS_DEC(count)
# define XFS_STATS_ADD(count, inc)
static inline int xfs_init_procfs(void) static inline int xfs_init_procfs(void)
{ {
return 0; return 0;
......
...@@ -922,7 +922,7 @@ xfs_fs_destroy_inode( ...@@ -922,7 +922,7 @@ xfs_fs_destroy_inode(
trace_xfs_destroy_inode(ip); trace_xfs_destroy_inode(ip);
XFS_STATS_INC(vn_reclaim); XFS_STATS_INC(ip->i_mount, vn_reclaim);
ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
...@@ -983,8 +983,8 @@ xfs_fs_evict_inode( ...@@ -983,8 +983,8 @@ xfs_fs_evict_inode(
truncate_inode_pages_final(&inode->i_data); truncate_inode_pages_final(&inode->i_data);
clear_inode(inode); clear_inode(inode);
XFS_STATS_INC(vn_rele); XFS_STATS_INC(ip->i_mount, vn_rele);
XFS_STATS_INC(vn_remove); XFS_STATS_INC(ip->i_mount, vn_remove);
xfs_inactive(ip); xfs_inactive(ip);
} }
......
...@@ -930,9 +930,9 @@ __xfs_trans_commit( ...@@ -930,9 +930,9 @@ __xfs_trans_commit(
*/ */
if (sync) { if (sync) {
error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL); error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
XFS_STATS_INC(xs_trans_sync); XFS_STATS_INC(mp, xs_trans_sync);
} else { } else {
XFS_STATS_INC(xs_trans_async); XFS_STATS_INC(mp, xs_trans_async);
} }
return error; return error;
...@@ -955,7 +955,7 @@ __xfs_trans_commit( ...@@ -955,7 +955,7 @@ __xfs_trans_commit(
xfs_trans_free_items(tp, NULLCOMMITLSN, !!error); xfs_trans_free_items(tp, NULLCOMMITLSN, !!error);
xfs_trans_free(tp); xfs_trans_free(tp);
XFS_STATS_INC(xs_trans_empty); XFS_STATS_INC(mp, xs_trans_empty);
return error; return error;
} }
......
...@@ -349,7 +349,7 @@ xfsaild_push( ...@@ -349,7 +349,7 @@ xfsaild_push(
xfs_ail_min_lsn(ailp))) { xfs_ail_min_lsn(ailp))) {
ailp->xa_log_flush = 0; ailp->xa_log_flush = 0;
XFS_STATS_INC(xs_push_ail_flush); XFS_STATS_INC(mp, xs_push_ail_flush);
xfs_log_force(mp, XFS_LOG_SYNC); xfs_log_force(mp, XFS_LOG_SYNC);
} }
...@@ -371,7 +371,7 @@ xfsaild_push( ...@@ -371,7 +371,7 @@ xfsaild_push(
goto out_done; goto out_done;
} }
XFS_STATS_INC(xs_push_ail); XFS_STATS_INC(mp, xs_push_ail);
lsn = lip->li_lsn; lsn = lip->li_lsn;
while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
...@@ -385,7 +385,7 @@ xfsaild_push( ...@@ -385,7 +385,7 @@ xfsaild_push(
lock_result = lip->li_ops->iop_push(lip, &ailp->xa_buf_list); lock_result = lip->li_ops->iop_push(lip, &ailp->xa_buf_list);
switch (lock_result) { switch (lock_result) {
case XFS_ITEM_SUCCESS: case XFS_ITEM_SUCCESS:
XFS_STATS_INC(xs_push_ail_success); XFS_STATS_INC(mp, xs_push_ail_success);
trace_xfs_ail_push(lip); trace_xfs_ail_push(lip);
ailp->xa_last_pushed_lsn = lsn; ailp->xa_last_pushed_lsn = lsn;
...@@ -403,7 +403,7 @@ xfsaild_push( ...@@ -403,7 +403,7 @@ xfsaild_push(
* re-try the flushing relatively soon if most of the * re-try the flushing relatively soon if most of the
* AIL is beeing flushed. * AIL is beeing flushed.
*/ */
XFS_STATS_INC(xs_push_ail_flushing); XFS_STATS_INC(mp, xs_push_ail_flushing);
trace_xfs_ail_flushing(lip); trace_xfs_ail_flushing(lip);
flushing++; flushing++;
...@@ -411,14 +411,14 @@ xfsaild_push( ...@@ -411,14 +411,14 @@ xfsaild_push(
break; break;
case XFS_ITEM_PINNED: case XFS_ITEM_PINNED:
XFS_STATS_INC(xs_push_ail_pinned); XFS_STATS_INC(mp, xs_push_ail_pinned);
trace_xfs_ail_pinned(lip); trace_xfs_ail_pinned(lip);
stuck++; stuck++;
ailp->xa_log_flush++; ailp->xa_log_flush++;
break; break;
case XFS_ITEM_LOCKED: case XFS_ITEM_LOCKED:
XFS_STATS_INC(xs_push_ail_locked); XFS_STATS_INC(mp, xs_push_ail_locked);
trace_xfs_ail_locked(lip); trace_xfs_ail_locked(lip);
stuck++; stuck++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment