Commit a08727ba authored by Linus Torvalds's avatar Linus Torvalds

Make workqueue bit operations work on "atomic_long_t"

On architectures where the atomicity of the bit operations is handled by
external means (ie a separate spinlock to protect concurrent accesses),
just doing a direct assignment on the workqueue data field (as done by
commit 4594bf15) can cause the
assignment to be lost due to lack of serialization with the bitops on
the same word.

So we need to serialize the assignment with the locks on those
architectures (notably older ARM chips, PA-RISC and sparc32).

So rather than using an "unsigned long", let's use "atomic_long_t",
which already has a safe assignment operation (atomic_long_set()) on
such architectures.

This requires that the atomic operations use the same atomicity locks as
the bit operations do, but that is largely the case anyway.  Sparc32
will probably need fixing.

Architectures (including modern ARM with LL/SC) that implement sane
atomic operations for SMP won't see any of this matter.

Cc: Russell King <rmk+lkml@arm.linux.org.uk>
Cc: David Howells <dhowells@redhat.com>
Cc: David Miller <davem@davemloft.com>
Cc: Matthew Wilcox <matthew@wil.cx>
Cc: Linux Arch Maintainers <linux-arch@vger.kernel.org>
Cc: Andrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 2f77d107
...@@ -8,16 +8,21 @@ ...@@ -8,16 +8,21 @@
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <asm/atomic.h>
struct workqueue_struct; struct workqueue_struct;
struct work_struct; struct work_struct;
typedef void (*work_func_t)(struct work_struct *work); typedef void (*work_func_t)(struct work_struct *work);
/*
* The first word is the work queue pointer and the flags rolled into
* one
*/
#define work_data_bits(work) ((unsigned long *)(&(work)->data))
struct work_struct { struct work_struct {
/* the first word is the work queue pointer and the flags rolled into atomic_long_t data;
* one */
unsigned long management;
#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */ #define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
#define WORK_STRUCT_FLAG_MASK (3UL) #define WORK_STRUCT_FLAG_MASK (3UL)
...@@ -26,6 +31,9 @@ struct work_struct { ...@@ -26,6 +31,9 @@ struct work_struct {
work_func_t func; work_func_t func;
}; };
#define WORK_DATA_INIT(autorelease) \
ATOMIC_LONG_INIT((autorelease) << WORK_STRUCT_NOAUTOREL)
struct delayed_work { struct delayed_work {
struct work_struct work; struct work_struct work;
struct timer_list timer; struct timer_list timer;
...@@ -36,13 +44,13 @@ struct execute_work { ...@@ -36,13 +44,13 @@ struct execute_work {
}; };
#define __WORK_INITIALIZER(n, f) { \ #define __WORK_INITIALIZER(n, f) { \
.management = 0, \ .data = WORK_DATA_INIT(0), \
.entry = { &(n).entry, &(n).entry }, \ .entry = { &(n).entry, &(n).entry }, \
.func = (f), \ .func = (f), \
} }
#define __WORK_INITIALIZER_NAR(n, f) { \ #define __WORK_INITIALIZER_NAR(n, f) { \
.management = (1 << WORK_STRUCT_NOAUTOREL), \ .data = WORK_DATA_INIT(1), \
.entry = { &(n).entry, &(n).entry }, \ .entry = { &(n).entry, &(n).entry }, \
.func = (f), \ .func = (f), \
} }
...@@ -82,17 +90,21 @@ struct execute_work { ...@@ -82,17 +90,21 @@ struct execute_work {
/* /*
* initialize all of a work item in one go * initialize all of a work item in one go
*
* NOTE! No point in using "atomic_long_set()": useing a direct
* assignment of the work data initializer allows the compiler
* to generate better code.
*/ */
#define INIT_WORK(_work, _func) \ #define INIT_WORK(_work, _func) \
do { \ do { \
(_work)->management = 0; \ (_work)->data = (atomic_long_t) WORK_DATA_INIT(0); \
INIT_LIST_HEAD(&(_work)->entry); \ INIT_LIST_HEAD(&(_work)->entry); \
PREPARE_WORK((_work), (_func)); \ PREPARE_WORK((_work), (_func)); \
} while (0) } while (0)
#define INIT_WORK_NAR(_work, _func) \ #define INIT_WORK_NAR(_work, _func) \
do { \ do { \
(_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \ (_work)->data = (atomic_long_t) WORK_DATA_INIT(1); \
INIT_LIST_HEAD(&(_work)->entry); \ INIT_LIST_HEAD(&(_work)->entry); \
PREPARE_WORK((_work), (_func)); \ PREPARE_WORK((_work), (_func)); \
} while (0) } while (0)
...@@ -114,7 +126,7 @@ struct execute_work { ...@@ -114,7 +126,7 @@ struct execute_work {
* @work: The work item in question * @work: The work item in question
*/ */
#define work_pending(work) \ #define work_pending(work) \
test_bit(WORK_STRUCT_PENDING, &(work)->management) test_bit(WORK_STRUCT_PENDING, work_data_bits(work))
/** /**
* delayed_work_pending - Find out whether a delayable work item is currently * delayed_work_pending - Find out whether a delayable work item is currently
...@@ -143,7 +155,7 @@ struct execute_work { ...@@ -143,7 +155,7 @@ struct execute_work {
* This should also be used to release a delayed work item. * This should also be used to release a delayed work item.
*/ */
#define work_release(work) \ #define work_release(work) \
clear_bit(WORK_STRUCT_PENDING, &(work)->management) clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
extern struct workqueue_struct *__create_workqueue(const char *name, extern struct workqueue_struct *__create_workqueue(const char *name,
...@@ -188,7 +200,7 @@ static inline int cancel_delayed_work(struct delayed_work *work) ...@@ -188,7 +200,7 @@ static inline int cancel_delayed_work(struct delayed_work *work)
ret = del_timer_sync(&work->timer); ret = del_timer_sync(&work->timer);
if (ret) if (ret)
clear_bit(WORK_STRUCT_PENDING, &work->work.management); work_release(&work->work);
return ret; return ret;
} }
......
...@@ -96,13 +96,13 @@ static inline void set_wq_data(struct work_struct *work, void *wq) ...@@ -96,13 +96,13 @@ static inline void set_wq_data(struct work_struct *work, void *wq)
BUG_ON(!work_pending(work)); BUG_ON(!work_pending(work));
new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
new |= work->management & WORK_STRUCT_FLAG_MASK; new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
work->management = new; atomic_long_set(&work->data, new);
} }
static inline void *get_wq_data(struct work_struct *work) static inline void *get_wq_data(struct work_struct *work)
{ {
return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
} }
static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
...@@ -133,7 +133,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work ...@@ -133,7 +133,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work
list_del_init(&work->entry); list_del_init(&work->entry);
spin_unlock_irqrestore(&cwq->lock, flags); spin_unlock_irqrestore(&cwq->lock, flags);
if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
work_release(work); work_release(work);
f(work); f(work);
...@@ -206,7 +206,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) ...@@ -206,7 +206,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
{ {
int ret = 0, cpu = get_cpu(); int ret = 0, cpu = get_cpu();
if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
if (unlikely(is_single_threaded(wq))) if (unlikely(is_single_threaded(wq)))
cpu = singlethread_cpu; cpu = singlethread_cpu;
BUG_ON(!list_empty(&work->entry)); BUG_ON(!list_empty(&work->entry));
...@@ -248,7 +248,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, ...@@ -248,7 +248,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
if (delay == 0) if (delay == 0)
return queue_work(wq, work); return queue_work(wq, work);
if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
BUG_ON(timer_pending(timer)); BUG_ON(timer_pending(timer));
BUG_ON(!list_empty(&work->entry)); BUG_ON(!list_empty(&work->entry));
...@@ -280,7 +280,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, ...@@ -280,7 +280,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct timer_list *timer = &dwork->timer; struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work; struct work_struct *work = &dwork->work;
if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
BUG_ON(timer_pending(timer)); BUG_ON(timer_pending(timer));
BUG_ON(!list_empty(&work->entry)); BUG_ON(!list_empty(&work->entry));
...@@ -321,7 +321,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) ...@@ -321,7 +321,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
spin_unlock_irqrestore(&cwq->lock, flags); spin_unlock_irqrestore(&cwq->lock, flags);
BUG_ON(get_wq_data(work) != cwq); BUG_ON(get_wq_data(work) != cwq);
if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
work_release(work); work_release(work);
f(work); f(work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment