Commit 0d8fee32 authored by Al Viro's avatar Al Viro Committed by Nathan Scott

[XFS] Kill direct access to ->count in valusema(); all we ever use it for

is check if semaphore is actually locked, which can be trivially done in
portable way. Code gets more reabable, while we are at it... 

SGI-PV: 953915
SGI-Modid: xfs-linux-melb:xfs-kern:26274a
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarNathan Scott <nathans@sgi.com>
parent a805bad5
...@@ -34,20 +34,21 @@ typedef struct semaphore sema_t; ...@@ -34,20 +34,21 @@ typedef struct semaphore sema_t;
#define initnsema(sp, val, name) sema_init(sp, val) #define initnsema(sp, val, name) sema_init(sp, val)
#define psema(sp, b) down(sp) #define psema(sp, b) down(sp)
#define vsema(sp) up(sp) #define vsema(sp) up(sp)
#define valusema(sp) (atomic_read(&(sp)->count)) #define freesema(sema) do { } while (0)
#define freesema(sema)
static inline int issemalocked(sema_t *sp)
{
return down_trylock(sp) || (up(sp), 0);
}
/* /*
* Map cpsema (try to get the sema) to down_trylock. We need to switch * Map cpsema (try to get the sema) to down_trylock. We need to switch
* the return values since cpsema returns 1 (acquired) 0 (failed) and * the return values since cpsema returns 1 (acquired) 0 (failed) and
* down_trylock returns the reverse 0 (acquired) 1 (failed). * down_trylock returns the reverse 0 (acquired) 1 (failed).
*/ */
static inline int cpsema(sema_t *sp)
#define cpsema(sp) (down_trylock(sp) ? 0 : 1) {
return down_trylock(sp) ? 0 : 1;
/* }
* Didn't do cvsema(sp). Not sure how to map this to up/down/...
* It does a vsema if the values is < 0 other wise nothing.
*/
#endif /* __XFS_SUPPORT_SEMA_H__ */ #endif /* __XFS_SUPPORT_SEMA_H__ */
...@@ -119,7 +119,7 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp) ...@@ -119,7 +119,7 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
*/ */
#define xfs_dqflock(dqp) { psema(&((dqp)->q_flock), PINOD | PRECALC);\ #define xfs_dqflock(dqp) { psema(&((dqp)->q_flock), PINOD | PRECALC);\
(dqp)->dq_flags |= XFS_DQ_FLOCKED; } (dqp)->dq_flags |= XFS_DQ_FLOCKED; }
#define xfs_dqfunlock(dqp) { ASSERT(valusema(&((dqp)->q_flock)) <= 0); \ #define xfs_dqfunlock(dqp) { ASSERT(issemalocked(&((dqp)->q_flock))); \
vsema(&((dqp)->q_flock)); \ vsema(&((dqp)->q_flock)); \
(dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); } (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); }
...@@ -128,7 +128,7 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp) ...@@ -128,7 +128,7 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
#define XFS_DQ_PINUNLOCK(dqp, s) mutex_spinunlock( \ #define XFS_DQ_PINUNLOCK(dqp, s) mutex_spinunlock( \
&(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s) &(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s)
#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (valusema(&((dqp)->q_flock)) <= 0) #define XFS_DQ_IS_FLUSH_LOCKED(dqp) (issemalocked(&((dqp)->q_flock)))
#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp)) #define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp))
#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
......
...@@ -248,7 +248,7 @@ xfs_qm_dquot_logitem_pushbuf( ...@@ -248,7 +248,7 @@ xfs_qm_dquot_logitem_pushbuf(
* inode flush completed and the inode was taken off the AIL. * inode flush completed and the inode was taken off the AIL.
* So, just get out. * So, just get out.
*/ */
if ((valusema(&(dqp->q_flock)) > 0) || if (!issemalocked(&(dqp->q_flock)) ||
((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) { ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
qip->qli_pushbuf_flag = 0; qip->qli_pushbuf_flag = 0;
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
...@@ -261,7 +261,7 @@ xfs_qm_dquot_logitem_pushbuf( ...@@ -261,7 +261,7 @@ xfs_qm_dquot_logitem_pushbuf(
if (bp != NULL) { if (bp != NULL) {
if (XFS_BUF_ISDELAYWRITE(bp)) { if (XFS_BUF_ISDELAYWRITE(bp)) {
dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
(valusema(&(dqp->q_flock)) <= 0)); issemalocked(&(dqp->q_flock)));
qip->qli_pushbuf_flag = 0; qip->qli_pushbuf_flag = 0;
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
......
...@@ -1031,6 +1031,6 @@ xfs_iflock_nowait(xfs_inode_t *ip) ...@@ -1031,6 +1031,6 @@ xfs_iflock_nowait(xfs_inode_t *ip)
void void
xfs_ifunlock(xfs_inode_t *ip) xfs_ifunlock(xfs_inode_t *ip)
{ {
ASSERT(valusema(&(ip->i_flock)) <= 0); ASSERT(issemalocked(&(ip->i_flock)));
vsema(&(ip->i_flock)); vsema(&(ip->i_flock));
} }
...@@ -3015,7 +3015,7 @@ xfs_iflush( ...@@ -3015,7 +3015,7 @@ xfs_iflush(
XFS_STATS_INC(xs_iflush_count); XFS_STATS_INC(xs_iflush_count);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
ASSERT(valusema(&ip->i_flock) <= 0); ASSERT(issemalocked(&(ip->i_flock)));
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
ip->i_d.di_nextents > ip->i_df.if_ext_max); ip->i_d.di_nextents > ip->i_df.if_ext_max);
...@@ -3273,7 +3273,7 @@ xfs_iflush_int( ...@@ -3273,7 +3273,7 @@ xfs_iflush_int(
SPLDECL(s); SPLDECL(s);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
ASSERT(valusema(&ip->i_flock) <= 0); ASSERT(issemalocked(&(ip->i_flock)));
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
ip->i_d.di_nextents > ip->i_df.if_ext_max); ip->i_d.di_nextents > ip->i_df.if_ext_max);
......
...@@ -794,7 +794,7 @@ xfs_inode_item_pushbuf( ...@@ -794,7 +794,7 @@ xfs_inode_item_pushbuf(
* inode flush completed and the inode was taken off the AIL. * inode flush completed and the inode was taken off the AIL.
* So, just get out. * So, just get out.
*/ */
if ((valusema(&(ip->i_flock)) > 0) || if (!issemalocked(&(ip->i_flock)) ||
((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) { ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
iip->ili_pushbuf_flag = 0; iip->ili_pushbuf_flag = 0;
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
...@@ -816,7 +816,7 @@ xfs_inode_item_pushbuf( ...@@ -816,7 +816,7 @@ xfs_inode_item_pushbuf(
* If not, we can flush it async. * If not, we can flush it async.
*/ */
dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) && dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
(valusema(&(ip->i_flock)) <= 0)); issemalocked(&(ip->i_flock)));
iip->ili_pushbuf_flag = 0; iip->ili_pushbuf_flag = 0;
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
xfs_buftrace("INODE ITEM PUSH", bp); xfs_buftrace("INODE ITEM PUSH", bp);
...@@ -864,7 +864,7 @@ xfs_inode_item_push( ...@@ -864,7 +864,7 @@ xfs_inode_item_push(
ip = iip->ili_inode; ip = iip->ili_inode;
ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS)); ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS));
ASSERT(valusema(&(ip->i_flock)) <= 0); ASSERT(issemalocked(&(ip->i_flock)));
/* /*
* Since we were able to lock the inode's flush lock and * Since we were able to lock the inode's flush lock and
* we found it on the AIL, the inode must be dirty. This * we found it on the AIL, the inode must be dirty. This
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment