Commit da4d34b6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-5.15-2021-10-22' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:
 "Two fixes for the max workers limit API that was introduced this
  series: one fix for an issue with that code, and one fixing a linked
  timeout regression in this series"

* tag 'io_uring-5.15-2021-10-22' of git://git.kernel.dk/linux-block:
  io_uring: apply worker limits to previous users
  io_uring: fix ltimeout unprep
  io_uring: apply max_workers limit to all future users
  io-wq: max_worker fixes
parents 5ab2ed0a b22fa62a
...@@ -253,7 +253,7 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) ...@@ -253,7 +253,7 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
pr_warn_once("io-wq is not configured for unbound workers"); pr_warn_once("io-wq is not configured for unbound workers");
raw_spin_lock(&wqe->lock); raw_spin_lock(&wqe->lock);
if (acct->nr_workers == acct->max_workers) { if (acct->nr_workers >= acct->max_workers) {
raw_spin_unlock(&wqe->lock); raw_spin_unlock(&wqe->lock);
return true; return true;
} }
...@@ -1291,15 +1291,18 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count) ...@@ -1291,15 +1291,18 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
rcu_read_lock(); rcu_read_lock();
for_each_node(node) { for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
struct io_wqe_acct *acct; struct io_wqe_acct *acct;
raw_spin_lock(&wqe->lock);
for (i = 0; i < IO_WQ_ACCT_NR; i++) { for (i = 0; i < IO_WQ_ACCT_NR; i++) {
acct = &wq->wqes[node]->acct[i]; acct = &wqe->acct[i];
prev = max_t(int, acct->max_workers, prev); prev = max_t(int, acct->max_workers, prev);
if (new_count[i]) if (new_count[i])
acct->max_workers = new_count[i]; acct->max_workers = new_count[i];
new_count[i] = prev; new_count[i] = prev;
} }
raw_spin_unlock(&wqe->lock);
} }
rcu_read_unlock(); rcu_read_unlock();
return 0; return 0;
......
...@@ -456,6 +456,8 @@ struct io_ring_ctx { ...@@ -456,6 +456,8 @@ struct io_ring_ctx {
struct work_struct exit_work; struct work_struct exit_work;
struct list_head tctx_list; struct list_head tctx_list;
struct completion ref_comp; struct completion ref_comp;
u32 iowq_limits[2];
bool iowq_limits_set;
}; };
}; };
...@@ -1368,11 +1370,6 @@ static void io_req_track_inflight(struct io_kiocb *req) ...@@ -1368,11 +1370,6 @@ static void io_req_track_inflight(struct io_kiocb *req)
} }
} }
static inline void io_unprep_linked_timeout(struct io_kiocb *req)
{
req->flags &= ~REQ_F_LINK_TIMEOUT;
}
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
{ {
if (WARN_ON_ONCE(!req->link)) if (WARN_ON_ONCE(!req->link))
...@@ -6983,7 +6980,7 @@ static void __io_queue_sqe(struct io_kiocb *req) ...@@ -6983,7 +6980,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
switch (io_arm_poll_handler(req)) { switch (io_arm_poll_handler(req)) {
case IO_APOLL_READY: case IO_APOLL_READY:
if (linked_timeout) if (linked_timeout)
io_unprep_linked_timeout(req); io_queue_linked_timeout(linked_timeout);
goto issue_sqe; goto issue_sqe;
case IO_APOLL_ABORTED: case IO_APOLL_ABORTED:
/* /*
...@@ -9638,7 +9635,16 @@ static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx) ...@@ -9638,7 +9635,16 @@ static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
ret = io_uring_alloc_task_context(current, ctx); ret = io_uring_alloc_task_context(current, ctx);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
tctx = current->io_uring; tctx = current->io_uring;
if (ctx->iowq_limits_set) {
unsigned int limits[2] = { ctx->iowq_limits[0],
ctx->iowq_limits[1], };
ret = io_wq_max_workers(tctx->io_wq, limits);
if (ret)
return ret;
}
} }
if (!xa_load(&tctx->xa, (unsigned long)ctx)) { if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
node = kmalloc(sizeof(*node), GFP_KERNEL); node = kmalloc(sizeof(*node), GFP_KERNEL);
...@@ -10643,7 +10649,9 @@ static int io_unregister_iowq_aff(struct io_ring_ctx *ctx) ...@@ -10643,7 +10649,9 @@ static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
static int io_register_iowq_max_workers(struct io_ring_ctx *ctx, static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
void __user *arg) void __user *arg)
__must_hold(&ctx->uring_lock)
{ {
struct io_tctx_node *node;
struct io_uring_task *tctx = NULL; struct io_uring_task *tctx = NULL;
struct io_sq_data *sqd = NULL; struct io_sq_data *sqd = NULL;
__u32 new_count[2]; __u32 new_count[2];
...@@ -10674,13 +10682,19 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx, ...@@ -10674,13 +10682,19 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
tctx = current->io_uring; tctx = current->io_uring;
} }
ret = -EINVAL; BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
if (!tctx || !tctx->io_wq)
goto err;
memcpy(ctx->iowq_limits, new_count, sizeof(new_count));
ctx->iowq_limits_set = true;
ret = -EINVAL;
if (tctx && tctx->io_wq) {
ret = io_wq_max_workers(tctx->io_wq, new_count); ret = io_wq_max_workers(tctx->io_wq, new_count);
if (ret) if (ret)
goto err; goto err;
} else {
memset(new_count, 0, sizeof(new_count));
}
if (sqd) { if (sqd) {
mutex_unlock(&sqd->lock); mutex_unlock(&sqd->lock);
...@@ -10690,6 +10704,22 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx, ...@@ -10690,6 +10704,22 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
if (copy_to_user(arg, new_count, sizeof(new_count))) if (copy_to_user(arg, new_count, sizeof(new_count)))
return -EFAULT; return -EFAULT;
/* that's it for SQPOLL, only the SQPOLL task creates requests */
if (sqd)
return 0;
/* now propagate the restriction to all registered users */
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
struct io_uring_task *tctx = node->task->io_uring;
if (WARN_ON_ONCE(!tctx->io_wq))
continue;
for (i = 0; i < ARRAY_SIZE(new_count); i++)
new_count[i] = ctx->iowq_limits[i];
/* ignore errors, it always returns zero anyway */
(void)io_wq_max_workers(tctx->io_wq, new_count);
}
return 0; return 0;
err: err:
if (sqd) { if (sqd) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment