Commit 277100b3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.9-20240315' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - Revert of a change for mq-deadline that went into the 6.8 release,
   causing a performance regression for some (Bart)

 - Revert of the interruptible discard handling. This needs more work
   since the ioctl and fs path aren't properly split, and will happen
   for the 6.10 kernel release. For 6.9, do the minimal revert
   (Christoph)

 - Fix for an issue with the timestamp caching code (me)

 - kerneldoc fix (Jiapeng)

* tag 'block-6.9-20240315' of git://git.kernel.dk/linux:
  block: fix mismatched kerneldoc function name
  Revert "blk-lib: check for kill signal"
  Revert "block/mq-deadline: use correct way to throttling write requests"
  block: limit block time caching to in_task() context
parents c8e76996 4c4ab8ae
...@@ -35,26 +35,6 @@ static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) ...@@ -35,26 +35,6 @@ static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT; return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
} }
static void await_bio_endio(struct bio *bio)
{
complete(bio->bi_private);
bio_put(bio);
}
/*
* await_bio_chain - ends @bio and waits for every chained bio to complete
*/
static void await_bio_chain(struct bio *bio)
{
DECLARE_COMPLETION_ONSTACK_MAP(done,
bio->bi_bdev->bd_disk->lockdep_map);
bio->bi_private = &done;
bio->bi_end_io = await_bio_endio;
bio_endio(bio);
blk_wait_io(&done);
}
int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
{ {
...@@ -97,10 +77,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -97,10 +77,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
* is disabled. * is disabled.
*/ */
cond_resched(); cond_resched();
if (fatal_signal_pending(current)) {
await_bio_chain(bio);
return -EINTR;
}
} }
*biop = bio; *biop = bio;
...@@ -167,10 +143,6 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev, ...@@ -167,10 +143,6 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
nr_sects -= len; nr_sects -= len;
sector += len; sector += len;
cond_resched(); cond_resched();
if (fatal_signal_pending(current)) {
await_bio_chain(bio);
return -EINTR;
}
} }
*biop = bio; *biop = bio;
...@@ -215,10 +187,6 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev, ...@@ -215,10 +187,6 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
break; break;
} }
cond_resched(); cond_resched();
if (fatal_signal_pending(current)) {
await_bio_chain(bio);
return -EINTR;
}
} }
*biop = bio; *biop = bio;
...@@ -309,7 +277,7 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, ...@@ -309,7 +277,7 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
bio_put(bio); bio_put(bio);
} }
blk_finish_plug(&plug); blk_finish_plug(&plug);
if (ret && ret != -EINTR && try_write_zeroes) { if (ret && try_write_zeroes) {
if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
try_write_zeroes = false; try_write_zeroes = false;
goto retry; goto retry;
...@@ -361,12 +329,6 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, ...@@ -361,12 +329,6 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
sector += len; sector += len;
nr_sects -= len; nr_sects -= len;
cond_resched(); cond_resched();
if (fatal_signal_pending(current)) {
await_bio_chain(bio);
ret = -EINTR;
bio = NULL;
break;
}
} }
if (bio) { if (bio) {
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);
......
...@@ -267,7 +267,7 @@ int queue_limits_commit_update(struct request_queue *q, ...@@ -267,7 +267,7 @@ int queue_limits_commit_update(struct request_queue *q,
EXPORT_SYMBOL_GPL(queue_limits_commit_update); EXPORT_SYMBOL_GPL(queue_limits_commit_update);
/** /**
* queue_limits_commit_set - apply queue limits to queue * queue_limits_set - apply queue limits to queue
* @q: queue to update * @q: queue to update
* @lim: limits to apply * @lim: limits to apply
* *
......
...@@ -534,7 +534,7 @@ static inline u64 blk_time_get_ns(void) ...@@ -534,7 +534,7 @@ static inline u64 blk_time_get_ns(void)
{ {
struct blk_plug *plug = current->plug; struct blk_plug *plug = current->plug;
if (!plug) if (!plug || !in_task())
return ktime_get_ns(); return ktime_get_ns();
/* /*
......
...@@ -646,9 +646,8 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) ...@@ -646,9 +646,8 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags; struct blk_mq_tags *tags = hctx->sched_tags;
unsigned int shift = tags->bitmap_tags.sb.shift;
dd->async_depth = max(1U, 3 * (1U << shift) / 4); dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth); sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment