Commit 4504a3c1 authored by Dmitry Eremin's avatar Dmitry Eremin Committed by Greg Kroah-Hartman

staging/lustre: clean up SET_BUT_UNUSED/UNUSED macros

This is SET_BUT_UNUSED/UNUSED macro cleaning up part
of the original Lustre tree commit.

Lustre-change: http://review.whamcloud.com/6139
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-3204Signed-off-by: default avatarDmitry Eremin <dmitry.eremin@intel.com>
Signed-off-by: default avatarNed Bass <bass6@llnl.gov>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarPeng Tao <bergwolf@gmail.com>
Signed-off-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 7fc1f831
...@@ -181,8 +181,6 @@ static inline void *__container_of(void *ptr, unsigned long shift) ...@@ -181,8 +181,6 @@ static inline void *__container_of(void *ptr, unsigned long shift)
#define container_of0(ptr, type, member) \ #define container_of0(ptr, type, member) \
((type *)__container_of((void *)(ptr), offsetof(type, member))) ((type *)__container_of((void *)(ptr), offsetof(type, member)))
#define SET_BUT_UNUSED(a) do { } while(sizeof(a) - sizeof(a))
#define _LIBCFS_H #define _LIBCFS_H
#endif /* _LIBCFS_H */ #endif /* _LIBCFS_H */
...@@ -124,7 +124,6 @@ srpc_bulk_t * ...@@ -124,7 +124,6 @@ srpc_bulk_t *
srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
{ {
srpc_bulk_t *bk; srpc_bulk_t *bk;
struct page **pages;
int i; int i;
LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV); LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
...@@ -140,7 +139,6 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) ...@@ -140,7 +139,6 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
bk->bk_sink = sink; bk->bk_sink = sink;
bk->bk_len = bulk_len; bk->bk_len = bulk_len;
bk->bk_niov = bulk_npg; bk->bk_niov = bulk_npg;
UNUSED(pages);
for (i = 0; i < bulk_npg; i++) { for (i = 0; i < bulk_npg; i++) {
struct page *pg; struct page *pg;
......
...@@ -572,9 +572,6 @@ swi_state2str (int state) ...@@ -572,9 +572,6 @@ swi_state2str (int state)
#undef STATE2STR #undef STATE2STR
} }
#define UNUSED(x) ( (void)(x) )
#define selftest_wait_events() cfs_pause(cfs_time_seconds(1) / 10) #define selftest_wait_events() cfs_pause(cfs_time_seconds(1) / 10)
......
...@@ -171,19 +171,14 @@ stt_check_timers(cfs_time_t *last) ...@@ -171,19 +171,14 @@ stt_check_timers(cfs_time_t *last)
int int
stt_timer_main(void *arg) stt_timer_main(void *arg)
{ {
int rc = 0;
UNUSED(arg);
SET_BUT_UNUSED(rc);
cfs_block_allsigs(); cfs_block_allsigs();
while (!stt_data.stt_shuttingdown) { while (!stt_data.stt_shuttingdown) {
stt_check_timers(&stt_data.stt_prev_slot); stt_check_timers(&stt_data.stt_prev_slot);
rc = wait_event_timeout(stt_data.stt_waitq, wait_event_timeout(stt_data.stt_waitq,
stt_data.stt_shuttingdown, stt_data.stt_shuttingdown,
cfs_time_seconds(STTIMER_SLOTTIME)); cfs_time_seconds(STTIMER_SLOTTIME));
} }
spin_lock(&stt_data.stt_lock); spin_lock(&stt_data.stt_lock);
......
...@@ -404,7 +404,6 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags, ...@@ -404,7 +404,6 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
struct inode *inode = de->d_inode; struct inode *inode = de->d_inode;
struct ll_inode_info *lli = ll_i2info(inode); struct ll_inode_info *lli = ll_i2info(inode);
struct obd_client_handle **och_p; struct obd_client_handle **och_p;
__u64 *och_usecount;
__u64 ibits; __u64 ibits;
/* /*
...@@ -418,37 +417,32 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags, ...@@ -418,37 +417,32 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
*/ */
if (it->it_flags & FMODE_WRITE) { if (it->it_flags & FMODE_WRITE)
och_p = &lli->lli_mds_write_och; och_p = &lli->lli_mds_write_och;
och_usecount = &lli->lli_open_fd_write_count; else if (it->it_flags & FMODE_EXEC)
} else if (it->it_flags & FMODE_EXEC) {
och_p = &lli->lli_mds_exec_och; och_p = &lli->lli_mds_exec_och;
och_usecount = &lli->lli_open_fd_exec_count; else
} else {
och_p = &lli->lli_mds_read_och; och_p = &lli->lli_mds_read_och;
och_usecount = &lli->lli_open_fd_read_count;
}
/* Check for the proper lock. */ /* Check for the proper lock. */
ibits = MDS_INODELOCK_LOOKUP; ibits = MDS_INODELOCK_LOOKUP;
if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE)) if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
goto do_lock; goto do_lock;
mutex_lock(&lli->lli_och_mutex); mutex_lock(&lli->lli_och_mutex);
if (*och_p) { /* Everything is open already, do nothing */ if (*och_p) { /* Everything is open already, do nothing */
/*(*och_usecount)++; Do not let them steal our open /* Originally it was idea to do not let them steal our
handle from under us */ * open handle from under us by (*och_usecount)++ here.
SET_BUT_UNUSED(och_usecount); * But in case we have the handle, but we cannot use it
/* XXX The code above was my original idea, but in case * due to later checks (e.g. O_CREAT|O_EXCL flags set),
we have the handle, but we cannot use it due to later * nobody would decrement counter increased here. So we
checks (e.g. O_CREAT|O_EXCL flags set), nobody * just hope the lock won't be invalidated in between.
would decrement counter increased here. So we just * But if it would be, we'll reopen the open request to
hope the lock won't be invalidated in between. But * MDS later during file open path.
if it would be, we'll reopen the open request to */
MDS later during file open path */
mutex_unlock(&lli->lli_och_mutex); mutex_unlock(&lli->lli_och_mutex);
return 1; return 1;
} else {
mutex_unlock(&lli->lli_och_mutex);
} }
mutex_unlock(&lli->lli_och_mutex);
} }
if (it->it_op == IT_GETATTR) { if (it->it_op == IT_GETATTR) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment