Commit 7224b31b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.13-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue fixes from Tejun Heo:
 "This contains one important fix.  The NUMA support added a while back
  broke ordering guarantees on ordered workqueues.  It was enforced by
  having single frontend interface with @max_active == 1 but the NUMA
  support puts multiple interfaces on unbound workqueues on NUMA
  machines thus breaking the ordered guarantee.  This is fixed by
  disabling NUMA support on ordered workqueues.

  The above and a couple other patches were sitting in for-3.12-fixes
  but I forgot to push that out, so they ended up waiting a bit too
  long.  My aplogies.

  Other fixes are minor"

* 'for-3.13-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: fix pool ID allocation leakage and remove BUILD_BUG_ON() in init_workqueues
  workqueue: fix comment typo for __queue_work()
  workqueue: fix ordered workqueues in NUMA setups
  workqueue: swap set_cpus_allowed_ptr() and PF_NO_SETAFFINITY
parents de92a058 4e8b22bd
...@@ -305,6 +305,9 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); ...@@ -305,6 +305,9 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
/* I: attributes used when instantiating standard unbound pools on demand */ /* I: attributes used when instantiating standard unbound pools on demand */
static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
/* I: attributes used when instantiating ordered pools on demand */
static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
struct workqueue_struct *system_wq __read_mostly; struct workqueue_struct *system_wq __read_mostly;
EXPORT_SYMBOL(system_wq); EXPORT_SYMBOL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly; struct workqueue_struct *system_highpri_wq __read_mostly;
...@@ -518,14 +521,21 @@ static inline void debug_work_activate(struct work_struct *work) { } ...@@ -518,14 +521,21 @@ static inline void debug_work_activate(struct work_struct *work) { }
static inline void debug_work_deactivate(struct work_struct *work) { } static inline void debug_work_deactivate(struct work_struct *work) { }
#endif #endif
/* allocate ID and assign it to @pool */ /**
* worker_pool_assign_id - allocate ID and assing it to @pool
* @pool: the pool pointer of interest
*
* Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
* successfully, -errno on failure.
*/
static int worker_pool_assign_id(struct worker_pool *pool) static int worker_pool_assign_id(struct worker_pool *pool)
{ {
int ret; int ret;
lockdep_assert_held(&wq_pool_mutex); lockdep_assert_held(&wq_pool_mutex);
ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
GFP_KERNEL);
if (ret >= 0) { if (ret >= 0) {
pool->id = ret; pool->id = ret;
return 0; return 0;
...@@ -1320,7 +1330,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1320,7 +1330,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
debug_work_activate(work); debug_work_activate(work);
/* if dying, only works from the same workqueue are allowed */ /* if draining, only works from the same workqueue are allowed */
if (unlikely(wq->flags & __WQ_DRAINING) && if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq))) WARN_ON_ONCE(!is_chained_work(wq)))
return; return;
...@@ -1736,16 +1746,17 @@ static struct worker *create_worker(struct worker_pool *pool) ...@@ -1736,16 +1746,17 @@ static struct worker *create_worker(struct worker_pool *pool)
if (IS_ERR(worker->task)) if (IS_ERR(worker->task))
goto fail; goto fail;
set_user_nice(worker->task, pool->attrs->nice);
/* prevent userland from meddling with cpumask of workqueue workers */
worker->task->flags |= PF_NO_SETAFFINITY;
/* /*
* set_cpus_allowed_ptr() will fail if the cpumask doesn't have any * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
* online CPUs. It'll be re-applied when any of the CPUs come up. * online CPUs. It'll be re-applied when any of the CPUs come up.
*/ */
set_user_nice(worker->task, pool->attrs->nice);
set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
/* prevent userland from meddling with cpumask of workqueue workers */
worker->task->flags |= PF_NO_SETAFFINITY;
/* /*
* The caller is responsible for ensuring %POOL_DISASSOCIATED * The caller is responsible for ensuring %POOL_DISASSOCIATED
* remains stable across this function. See the comments above the * remains stable across this function. See the comments above the
...@@ -4106,7 +4117,7 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, ...@@ -4106,7 +4117,7 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
static int alloc_and_link_pwqs(struct workqueue_struct *wq) static int alloc_and_link_pwqs(struct workqueue_struct *wq)
{ {
bool highpri = wq->flags & WQ_HIGHPRI; bool highpri = wq->flags & WQ_HIGHPRI;
int cpu; int cpu, ret;
if (!(wq->flags & WQ_UNBOUND)) { if (!(wq->flags & WQ_UNBOUND)) {
wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
...@@ -4126,6 +4137,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) ...@@ -4126,6 +4137,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
mutex_unlock(&wq->mutex); mutex_unlock(&wq->mutex);
} }
return 0; return 0;
} else if (wq->flags & __WQ_ORDERED) {
ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
/* there should only be single pwq for ordering guarantee */
WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
"ordering guarantee broken for workqueue %s\n", wq->name);
return ret;
} else { } else {
return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
} }
...@@ -5009,10 +5027,6 @@ static int __init init_workqueues(void) ...@@ -5009,10 +5027,6 @@ static int __init init_workqueues(void)
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
int i, cpu; int i, cpu;
/* make sure we have enough bits for OFFQ pool ID */
BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
WORK_CPU_END * NR_STD_WORKER_POOLS);
WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
...@@ -5051,13 +5065,23 @@ static int __init init_workqueues(void) ...@@ -5051,13 +5065,23 @@ static int __init init_workqueues(void)
} }
} }
/* create default unbound wq attrs */ /* create default unbound and ordered wq attrs */
for (i = 0; i < NR_STD_WORKER_POOLS; i++) { for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
struct workqueue_attrs *attrs; struct workqueue_attrs *attrs;
BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
attrs->nice = std_nice[i]; attrs->nice = std_nice[i];
unbound_std_wq_attrs[i] = attrs; unbound_std_wq_attrs[i] = attrs;
/*
* An ordered wq should have only one pwq as ordering is
* guaranteed by max_active which is enforced by pwqs.
* Turn off NUMA so that dfl_pwq is used for all nodes.
*/
BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
attrs->nice = std_nice[i];
attrs->no_numa = true;
ordered_wq_attrs[i] = attrs;
} }
system_wq = alloc_workqueue("events", 0, 0); system_wq = alloc_workqueue("events", 0, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment