Commit b226c9e1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20191115' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A few fixes that should make it into this release. This contains:

   - io_uring:
        - The timeout command assumes sequence == 0 means that we want
          one completion, but this kind of overloading is unfortunate as
          it prevents users from doing a pure time based wait. Since
          this operation was introduced in this cycle, let's correct it
          now, while we can. (me)
        - One-liner to fix an issue with dependent links and fixed
          buffer reads. The actual IO completed fine, but the link got
          severed since we stored the wrong expected value. (me)
        - Add TIMEOUT to list of opcodes that don't need a file. (Pavel)

   - rsxx missing workqueue destry calls. Old bug. (Chuhong)

   - Fix blk-iocost active list check (Jiufei)

   - Fix impossible-to-hit overflow merge condition, that still hit some
     folks very rarely (Junichi)

   - Fix bfq hang issue from 5.3. This didn't get marked for stable, but
     will go into stable post this merge (Paolo)"

* tag 'for-linus-20191115' of git://git.kernel.dk/linux-block:
  rsxx: add missed destroy_workqueue calls in remove
  iocost: check active_list of all the ancestors in iocg_activate()
  block, bfq: deschedule empty bfq_queues not referred by any process
  io_uring: ensure registered buffer import returns the IO length
  io_uring: Fix getting file for timeout
  block: check bi_size overflow before merge
  io_uring: make timeout sequence == 0 mean no sequence
parents 875fef49 dcb77e4b
...@@ -2713,6 +2713,28 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) ...@@ -2713,6 +2713,28 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
} }
} }
static
void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
/*
* To prevent bfqq's service guarantees from being violated,
* bfqq may be left busy, i.e., queued for service, even if
* empty (see comments in __bfq_bfqq_expire() for
* details). But, if no process will send requests to bfqq any
* longer, then there is no point in keeping bfqq queued for
* service. In addition, keeping bfqq queued for service, but
* with no process ref any longer, may have caused bfqq to be
* freed when dequeued from service. But this is assumed to
* never happen.
*/
if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
bfqq != bfqd->in_service_queue)
bfq_del_bfqq_busy(bfqd, bfqq, false);
bfq_put_queue(bfqq);
}
static void static void
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
...@@ -2783,8 +2805,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, ...@@ -2783,8 +2805,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
*/ */
new_bfqq->pid = -1; new_bfqq->pid = -1;
bfqq->bic = NULL; bfqq->bic = NULL;
/* release process reference to bfqq */ bfq_release_process_ref(bfqd, bfqq);
bfq_put_queue(bfqq);
} }
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
...@@ -4899,7 +4920,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) ...@@ -4899,7 +4920,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_put_cooperator(bfqq); bfq_put_cooperator(bfqq);
bfq_put_queue(bfqq); /* release process reference */ bfq_release_process_ref(bfqd, bfqq);
} }
static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
...@@ -5001,8 +5022,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) ...@@ -5001,8 +5022,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
bfqq = bic_to_bfqq(bic, false); bfqq = bic_to_bfqq(bic, false);
if (bfqq) { if (bfqq) {
/* release process reference on this queue */ bfq_release_process_ref(bfqd, bfqq);
bfq_put_queue(bfqq);
bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
bic_set_bfqq(bic, bfqq, false); bic_set_bfqq(bic, bfqq, false);
} }
...@@ -5963,7 +5983,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) ...@@ -5963,7 +5983,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
bfq_put_cooperator(bfqq); bfq_put_cooperator(bfqq);
bfq_put_queue(bfqq); bfq_release_process_ref(bfqq->bfqd, bfqq);
return NULL; return NULL;
} }
......
...@@ -751,7 +751,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, ...@@ -751,7 +751,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return false; return false;
if (bio->bi_vcnt > 0) { if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) { if (page_is_mergeable(bv, page, len, off, same_page)) {
......
...@@ -1057,9 +1057,12 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) ...@@ -1057,9 +1057,12 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
atomic64_set(&iocg->active_period, cur_period); atomic64_set(&iocg->active_period, cur_period);
/* already activated or breaking leaf-only constraint? */ /* already activated or breaking leaf-only constraint? */
for (i = iocg->level; i > 0; i--) if (!list_empty(&iocg->active_list))
if (!list_empty(&iocg->active_list)) goto succeed_unlock;
for (i = iocg->level - 1; i > 0; i--)
if (!list_empty(&iocg->ancestors[i]->active_list))
goto fail_unlock; goto fail_unlock;
if (iocg->child_active_sum) if (iocg->child_active_sum)
goto fail_unlock; goto fail_unlock;
...@@ -1101,6 +1104,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) ...@@ -1101,6 +1104,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
ioc_start_period(ioc, now); ioc_start_period(ioc, now);
} }
succeed_unlock:
spin_unlock_irq(&ioc->lock); spin_unlock_irq(&ioc->lock);
return true; return true;
......
...@@ -1000,8 +1000,10 @@ static void rsxx_pci_remove(struct pci_dev *dev) ...@@ -1000,8 +1000,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
cancel_work_sync(&card->event_work); cancel_work_sync(&card->event_work);
destroy_workqueue(card->event_wq);
rsxx_destroy_dev(card); rsxx_destroy_dev(card);
rsxx_dma_destroy(card); rsxx_dma_destroy(card);
destroy_workqueue(card->creg_ctrl.creg_wq);
spin_lock_irqsave(&card->irq_lock, flags); spin_lock_irqsave(&card->irq_lock, flags);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL); rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
......
...@@ -326,6 +326,7 @@ struct io_kiocb { ...@@ -326,6 +326,7 @@ struct io_kiocb {
#define REQ_F_TIMEOUT 1024 /* timeout request */ #define REQ_F_TIMEOUT 1024 /* timeout request */
#define REQ_F_ISREG 2048 /* regular file */ #define REQ_F_ISREG 2048 /* regular file */
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */ #define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
u64 user_data; u64 user_data;
u32 result; u32 result;
u32 sequence; u32 sequence;
...@@ -453,9 +454,13 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) ...@@ -453,9 +454,13 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
struct io_kiocb *req; struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list); req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
if (req && !__io_sequence_defer(ctx, req)) { if (req) {
list_del_init(&req->list); if (req->flags & REQ_F_TIMEOUT_NOSEQ)
return req; return NULL;
if (!__io_sequence_defer(ctx, req)) {
list_del_init(&req->list);
return req;
}
} }
return NULL; return NULL;
...@@ -1225,7 +1230,7 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw, ...@@ -1225,7 +1230,7 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
} }
} }
return 0; return len;
} }
static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw, static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
...@@ -1941,18 +1946,24 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1941,18 +1946,24 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr))) if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT; return -EFAULT;
req->flags |= REQ_F_TIMEOUT;
/* /*
* sqe->off holds how many events that need to occur for this * sqe->off holds how many events that need to occur for this
* timeout event to be satisfied. * timeout event to be satisfied. If it isn't set, then this is
* a pure timeout request, sequence isn't used.
*/ */
count = READ_ONCE(sqe->off); count = READ_ONCE(sqe->off);
if (!count) if (!count) {
count = 1; req->flags |= REQ_F_TIMEOUT_NOSEQ;
spin_lock_irq(&ctx->completion_lock);
entry = ctx->timeout_list.prev;
goto add;
}
req->sequence = ctx->cached_sq_head + count - 1; req->sequence = ctx->cached_sq_head + count - 1;
/* reuse it to store the count */ /* reuse it to store the count */
req->submit.sequence = count; req->submit.sequence = count;
req->flags |= REQ_F_TIMEOUT;
/* /*
* Insertion sort, ensuring the first entry in the list is always * Insertion sort, ensuring the first entry in the list is always
...@@ -1964,6 +1975,9 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1964,6 +1975,9 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
unsigned nxt_sq_head; unsigned nxt_sq_head;
long long tmp, tmp_nxt; long long tmp, tmp_nxt;
if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
continue;
/* /*
* Since cached_sq_head + count - 1 can overflow, use type long * Since cached_sq_head + count - 1 can overflow, use type long
* long to store it. * long to store it.
...@@ -1990,6 +2004,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1990,6 +2004,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
nxt->sequence++; nxt->sequence++;
} }
req->sequence -= span; req->sequence -= span;
add:
list_add(&req->list, entry); list_add(&req->list, entry);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
...@@ -2283,6 +2298,7 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe) ...@@ -2283,6 +2298,7 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe)
switch (op) { switch (op) {
case IORING_OP_NOP: case IORING_OP_NOP:
case IORING_OP_POLL_REMOVE: case IORING_OP_POLL_REMOVE:
case IORING_OP_TIMEOUT:
return false; return false;
default: default:
return true; return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment