Commit 194a6b5b authored by Waiman Long's avatar Waiman Long Committed by Ingo Molnar

sched/wake_q: Rename WAKE_Q to DEFINE_WAKE_Q

Currently the wake_q data structure is defined by the WAKE_Q() macro.
This macro, however, looks like a function doing something as "wake" is
a verb. Even checkpatch.pl was confused as it reported warnings like

  WARNING: Missing a blank line after declarations
  #548: FILE: kernel/futex.c:3665:
  +	int ret;
  +	WAKE_Q(wake_q);

This patch renames the WAKE_Q() macro to DEFINE_WAKE_Q() which clarifies
what the macro is doing and eliminates the checkpatch.pl warnings.
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Acked-by: default avatarDavidlohr Bueso <dave@stgolabs.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1479401198-1765-1-git-send-email-longman@redhat.com
[ Resolved conflict and added missing rename. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6d0d2878
...@@ -989,7 +989,7 @@ enum cpu_idle_type { ...@@ -989,7 +989,7 @@ enum cpu_idle_type {
* already in a wake queue, the wakeup will happen soon and the second * already in a wake queue, the wakeup will happen soon and the second
* waker can just skip it. * waker can just skip it.
* *
* The WAKE_Q macro declares and initializes the list head. * The DEFINE_WAKE_Q macro declares and initializes the list head.
* wake_up_q() does NOT reinitialize the list; it's expected to be * wake_up_q() does NOT reinitialize the list; it's expected to be
* called near the end of a function, where the fact that the queue is * called near the end of a function, where the fact that the queue is
* not used again will be easy to see by inspection. * not used again will be easy to see by inspection.
...@@ -1009,7 +1009,7 @@ struct wake_q_head { ...@@ -1009,7 +1009,7 @@ struct wake_q_head {
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
#define WAKE_Q(name) \ #define DEFINE_WAKE_Q(name) \
struct wake_q_head name = { WAKE_Q_TAIL, &name.first } struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
extern void wake_q_add(struct wake_q_head *head, extern void wake_q_add(struct wake_q_head *head,
......
...@@ -967,7 +967,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, ...@@ -967,7 +967,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
struct timespec ts; struct timespec ts;
struct posix_msg_tree_node *new_leaf = NULL; struct posix_msg_tree_node *new_leaf = NULL;
int ret = 0; int ret = 0;
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
if (u_abs_timeout) { if (u_abs_timeout) {
int res = prepare_timeout(u_abs_timeout, &expires, &ts); int res = prepare_timeout(u_abs_timeout, &expires, &ts);
...@@ -1151,7 +1151,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, ...@@ -1151,7 +1151,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
msg_ptr = wait.msg; msg_ptr = wait.msg;
} }
} else { } else {
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
msg_ptr = msg_get(info); msg_ptr = msg_get(info);
......
...@@ -235,7 +235,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) ...@@ -235,7 +235,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
{ {
struct msg_msg *msg, *t; struct msg_msg *msg, *t;
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
expunge_all(msq, -EIDRM, &wake_q); expunge_all(msq, -EIDRM, &wake_q);
ss_wakeup(msq, &wake_q, true); ss_wakeup(msq, &wake_q, true);
...@@ -397,7 +397,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, ...@@ -397,7 +397,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
goto out_up; goto out_up;
case IPC_SET: case IPC_SET:
{ {
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
if (msqid64.msg_qbytes > ns->msg_ctlmnb && if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
!capable(CAP_SYS_RESOURCE)) { !capable(CAP_SYS_RESOURCE)) {
...@@ -634,7 +634,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, ...@@ -634,7 +634,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
struct msg_msg *msg; struct msg_msg *msg;
int err; int err;
struct ipc_namespace *ns; struct ipc_namespace *ns;
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
ns = current->nsproxy->ipc_ns; ns = current->nsproxy->ipc_ns;
...@@ -850,7 +850,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl ...@@ -850,7 +850,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
struct msg_queue *msq; struct msg_queue *msq;
struct ipc_namespace *ns; struct ipc_namespace *ns;
struct msg_msg *msg, *copy = NULL; struct msg_msg *msg, *copy = NULL;
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
ns = current->nsproxy->ipc_ns; ns = current->nsproxy->ipc_ns;
......
...@@ -1298,7 +1298,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, ...@@ -1298,7 +1298,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
struct task_struct *new_owner; struct task_struct *new_owner;
struct futex_pi_state *pi_state = this->pi_state; struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval; u32 uninitialized_var(curval), newval;
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
bool deboost; bool deboost;
int ret = 0; int ret = 0;
...@@ -1415,7 +1415,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) ...@@ -1415,7 +1415,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
struct futex_q *this, *next; struct futex_q *this, *next;
union futex_key key = FUTEX_KEY_INIT; union futex_key key = FUTEX_KEY_INIT;
int ret; int ret;
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
if (!bitset) if (!bitset)
return -EINVAL; return -EINVAL;
...@@ -1469,7 +1469,7 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, ...@@ -1469,7 +1469,7 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
struct futex_hash_bucket *hb1, *hb2; struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next; struct futex_q *this, *next;
int ret, op_ret; int ret, op_ret;
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
retry: retry:
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
...@@ -1708,7 +1708,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, ...@@ -1708,7 +1708,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
struct futex_pi_state *pi_state = NULL; struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2; struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next; struct futex_q *this, *next;
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
if (requeue_pi) { if (requeue_pi) {
/* /*
......
...@@ -840,7 +840,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne ...@@ -840,7 +840,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
{ {
struct task_struct *next = NULL; struct task_struct *next = NULL;
unsigned long owner, flags; unsigned long owner, flags;
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
mutex_release(&lock->dep_map, 1, ip); mutex_release(&lock->dep_map, 1, ip);
......
...@@ -1382,7 +1382,7 @@ rt_mutex_fastunlock(struct rt_mutex *lock, ...@@ -1382,7 +1382,7 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock, bool (*slowfn)(struct rt_mutex *lock,
struct wake_q_head *wqh)) struct wake_q_head *wqh))
{ {
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
rt_mutex_deadlock_account_unlock(current); rt_mutex_deadlock_account_unlock(current);
......
...@@ -225,7 +225,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) ...@@ -225,7 +225,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
struct task_struct *tsk = current; struct task_struct *tsk = current;
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
waiter.task = tsk; waiter.task = tsk;
waiter.type = RWSEM_WAITING_FOR_READ; waiter.type = RWSEM_WAITING_FOR_READ;
...@@ -461,7 +461,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) ...@@ -461,7 +461,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
bool waiting = true; /* any queued threads before us */ bool waiting = true; /* any queued threads before us */
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
struct rw_semaphore *ret = sem; struct rw_semaphore *ret = sem;
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
/* undo write bias from down_write operation, stop active locking */ /* undo write bias from down_write operation, stop active locking */
count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count); count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
...@@ -495,7 +495,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) ...@@ -495,7 +495,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
* wake any read locks that were queued ahead of us. * wake any read locks that were queued ahead of us.
*/ */
if (count > RWSEM_WAITING_BIAS) { if (count > RWSEM_WAITING_BIAS) {
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
__rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q); __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
/* /*
...@@ -571,7 +571,7 @@ __visible ...@@ -571,7 +571,7 @@ __visible
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{ {
unsigned long flags; unsigned long flags;
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
/* /*
* If a spinner is present, it is not necessary to do the wakeup. * If a spinner is present, it is not necessary to do the wakeup.
...@@ -625,7 +625,7 @@ __visible ...@@ -625,7 +625,7 @@ __visible
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{ {
unsigned long flags; unsigned long flags;
WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
raw_spin_lock_irqsave(&sem->wait_lock, flags); raw_spin_lock_irqsave(&sem->wait_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment