Commit e821bcec authored by Jens Axboe's avatar Jens Axboe

Merge branch 'for-6.11/block-limits' into for-6.11/block

Merge in queue limits cleanups.

* for-6.11/block-limits:
  block: move the raid_partial_stripes_expensive flag into the features field
  block: remove the discard_alignment flag
  block: move the misaligned flag into the features field
  block: renumber and rename the cache disabled flag
  block: fix spelling and grammar for in writeback_cache_control.rst
  block: remove the unused blk_bounce enum
parents 5ddb88f2 7d4dec52
...@@ -70,8 +70,8 @@ flag in the features field of the queue_limits structure. ...@@ -70,8 +70,8 @@ flag in the features field of the queue_limits structure.
Implementation details for bio based block drivers Implementation details for bio based block drivers
-------------------------------------------------- --------------------------------------------------
For bio based drivers the REQ_PREFLUSH and REQ_FUA bit are simplify passed on For bio based drivers the REQ_PREFLUSH and REQ_FUA bit are simply passed on to
to the driver if the drivers sets the BLK_FEAT_WRITE_CACHE flag and the drivers the driver if the driver sets the BLK_FEAT_WRITE_CACHE flag and the driver
needs to handle them. needs to handle them.
*NOTE*: The REQ_FUA bit also gets passed on when the BLK_FEAT_FUA flags is *NOTE*: The REQ_FUA bit also gets passed on when the BLK_FEAT_FUA flags is
...@@ -89,7 +89,7 @@ When the BLK_FEAT_WRITE_CACHE flag is set, REQ_OP_WRITE | REQ_PREFLUSH requests ...@@ -89,7 +89,7 @@ When the BLK_FEAT_WRITE_CACHE flag is set, REQ_OP_WRITE | REQ_PREFLUSH requests
with a payload are automatically turned into a sequence of a REQ_OP_FLUSH with a payload are automatically turned into a sequence of a REQ_OP_FLUSH
request followed by the actual write by the block layer. request followed by the actual write by the block layer.
When the BLK_FEAT_FUA flags is set, the REQ_FUA bit simplify passed on for the When the BLK_FEAT_FUA flags is set, the REQ_FUA bit is simply passed on for the
REQ_OP_WRITE request, else a REQ_OP_FLUSH request is sent by the block layer REQ_OP_WRITE request, else a REQ_OP_FLUSH request is sent by the block layer
after the completion of the write request for bio submissions with the REQ_FUA after the completion of the write request for bio submissions with the REQ_FUA
bit set. bit set.
...@@ -266,7 +266,7 @@ static int blk_validate_limits(struct queue_limits *lim) ...@@ -266,7 +266,7 @@ static int blk_validate_limits(struct queue_limits *lim)
if (lim->alignment_offset) { if (lim->alignment_offset) {
lim->alignment_offset &= (lim->physical_block_size - 1); lim->alignment_offset &= (lim->physical_block_size - 1);
lim->misaligned = 0; lim->features &= ~BLK_FEAT_MISALIGNED;
} }
if (!(lim->features & BLK_FEAT_WRITE_CACHE)) if (!(lim->features & BLK_FEAT_WRITE_CACHE))
...@@ -477,6 +477,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -477,6 +477,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
if (!(b->features & BLK_FEAT_POLL)) if (!(b->features & BLK_FEAT_POLL))
t->features &= ~BLK_FEAT_POLL; t->features &= ~BLK_FEAT_POLL;
t->flags |= (b->flags & BLK_FEAT_MISALIGNED);
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_user_sectors = min_not_zero(t->max_user_sectors, t->max_user_sectors = min_not_zero(t->max_user_sectors,
b->max_user_sectors); b->max_user_sectors);
...@@ -501,8 +503,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -501,8 +503,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_segment_size = min_not_zero(t->max_segment_size, t->max_segment_size = min_not_zero(t->max_segment_size,
b->max_segment_size); b->max_segment_size);
t->misaligned |= b->misaligned;
alignment = queue_limit_alignment_offset(b, start); alignment = queue_limit_alignment_offset(b, start);
/* Bottom device has different alignment. Check that it is /* Bottom device has different alignment. Check that it is
...@@ -516,7 +516,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -516,7 +516,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
/* Verify that top and bottom intervals line up */ /* Verify that top and bottom intervals line up */
if (max(top, bottom) % min(top, bottom)) { if (max(top, bottom) % min(top, bottom)) {
t->misaligned = 1; t->flags |= BLK_FEAT_MISALIGNED;
ret = -1; ret = -1;
} }
} }
...@@ -538,42 +538,38 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -538,42 +538,38 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
/* Physical block size a multiple of the logical block size? */ /* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) { if (t->physical_block_size & (t->logical_block_size - 1)) {
t->physical_block_size = t->logical_block_size; t->physical_block_size = t->logical_block_size;
t->misaligned = 1; t->flags |= BLK_FEAT_MISALIGNED;
ret = -1; ret = -1;
} }
/* Minimum I/O a multiple of the physical block size? */ /* Minimum I/O a multiple of the physical block size? */
if (t->io_min & (t->physical_block_size - 1)) { if (t->io_min & (t->physical_block_size - 1)) {
t->io_min = t->physical_block_size; t->io_min = t->physical_block_size;
t->misaligned = 1; t->flags |= BLK_FEAT_MISALIGNED;
ret = -1; ret = -1;
} }
/* Optimal I/O a multiple of the physical block size? */ /* Optimal I/O a multiple of the physical block size? */
if (t->io_opt & (t->physical_block_size - 1)) { if (t->io_opt & (t->physical_block_size - 1)) {
t->io_opt = 0; t->io_opt = 0;
t->misaligned = 1; t->flags |= BLK_FEAT_MISALIGNED;
ret = -1; ret = -1;
} }
/* chunk_sectors a multiple of the physical block size? */ /* chunk_sectors a multiple of the physical block size? */
if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
t->chunk_sectors = 0; t->chunk_sectors = 0;
t->misaligned = 1; t->flags |= BLK_FEAT_MISALIGNED;
ret = -1; ret = -1;
} }
t->raid_partial_stripes_expensive =
max(t->raid_partial_stripes_expensive,
b->raid_partial_stripes_expensive);
/* Find lowest common alignment_offset */ /* Find lowest common alignment_offset */
t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
% max(t->physical_block_size, t->io_min); % max(t->physical_block_size, t->io_min);
/* Verify that new alignment_offset is on a logical block boundary */ /* Verify that new alignment_offset is on a logical block boundary */
if (t->alignment_offset & (t->logical_block_size - 1)) { if (t->alignment_offset & (t->logical_block_size - 1)) {
t->misaligned = 1; t->flags |= BLK_FEAT_MISALIGNED;
ret = -1; ret = -1;
} }
...@@ -585,16 +581,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -585,16 +581,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
if (b->discard_granularity) { if (b->discard_granularity) {
alignment = queue_limit_discard_alignment(b, start); alignment = queue_limit_discard_alignment(b, start);
if (t->discard_granularity != 0 &&
t->discard_alignment != alignment) {
top = t->discard_granularity + t->discard_alignment;
bottom = b->discard_granularity + alignment;
/* Verify that top and bottom intervals line up */
if ((max(top, bottom) % min(top, bottom)) != 0)
t->discard_misaligned = 1;
}
t->max_discard_sectors = min_not_zero(t->max_discard_sectors, t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
b->max_discard_sectors); b->max_discard_sectors);
t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
...@@ -736,7 +722,7 @@ int bdev_alignment_offset(struct block_device *bdev) ...@@ -736,7 +722,7 @@ int bdev_alignment_offset(struct block_device *bdev)
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
if (q->limits.misaligned) if (q->limits.flags & BLK_FEAT_MISALIGNED)
return -1; return -1;
if (bdev_is_partition(bdev)) if (bdev_is_partition(bdev))
return queue_limit_alignment_offset(&q->limits, return queue_limit_alignment_offset(&q->limits,
......
...@@ -429,7 +429,7 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, ...@@ -429,7 +429,7 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
static ssize_t queue_wc_show(struct request_queue *q, char *page) static ssize_t queue_wc_show(struct request_queue *q, char *page)
{ {
if (q->limits.features & BLK_FLAGS_WRITE_CACHE_DISABLED) if (q->limits.features & BLK_FLAG_WRITE_CACHE_DISABLED)
return sprintf(page, "write through\n"); return sprintf(page, "write through\n");
return sprintf(page, "write back\n"); return sprintf(page, "write back\n");
} }
...@@ -452,9 +452,9 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page, ...@@ -452,9 +452,9 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page,
lim = queue_limits_start_update(q); lim = queue_limits_start_update(q);
if (disable) if (disable)
lim.flags |= BLK_FLAGS_WRITE_CACHE_DISABLED; lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
else else
lim.flags &= ~BLK_FLAGS_WRITE_CACHE_DISABLED; lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
err = queue_limits_commit_update(q, &lim); err = queue_limits_commit_update(q, &lim);
if (err) if (err)
return err; return err;
......
...@@ -1416,8 +1416,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) ...@@ -1416,8 +1416,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
} }
if (bdev_io_opt(dc->bdev)) if (bdev_io_opt(dc->bdev))
dc->partial_stripes_expensive = dc->partial_stripes_expensive = q->limits.features &
q->limits.raid_partial_stripes_expensive; BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
ret = bcache_device_init(&dc->disk, block_size, ret = bcache_device_init(&dc->disk, block_size,
bdev_nr_sectors(dc->bdev) - dc->sb.data_offset, bdev_nr_sectors(dc->bdev) - dc->sb.data_offset,
......
...@@ -3403,7 +3403,6 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits) ...@@ -3403,7 +3403,6 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors; limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
limits->discard_granularity = origin_limits->discard_granularity; limits->discard_granularity = origin_limits->discard_granularity;
limits->discard_alignment = origin_limits->discard_alignment; limits->discard_alignment = origin_limits->discard_alignment;
limits->discard_misaligned = origin_limits->discard_misaligned;
} }
static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
......
...@@ -2059,7 +2059,6 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits) ...@@ -2059,7 +2059,6 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors; limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
limits->discard_granularity = dest_limits->discard_granularity; limits->discard_granularity = dest_limits->discard_granularity;
limits->discard_alignment = dest_limits->discard_alignment; limits->discard_alignment = dest_limits->discard_alignment;
limits->discard_misaligned = dest_limits->discard_misaligned;
limits->max_discard_segments = dest_limits->max_discard_segments; limits->max_discard_segments = dest_limits->max_discard_segments;
} }
......
...@@ -1808,7 +1808,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, ...@@ -1808,7 +1808,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
limits->max_hw_discard_sectors = 0; limits->max_hw_discard_sectors = 0;
limits->discard_granularity = 0; limits->discard_granularity = 0;
limits->discard_alignment = 0; limits->discard_alignment = 0;
limits->discard_misaligned = 0;
} }
if (!dm_table_supports_write_zeroes(t)) if (!dm_table_supports_write_zeroes(t))
......
...@@ -7711,7 +7711,7 @@ static int raid5_set_limits(struct mddev *mddev) ...@@ -7711,7 +7711,7 @@ static int raid5_set_limits(struct mddev *mddev)
blk_set_stacking_limits(&lim); blk_set_stacking_limits(&lim);
lim.io_min = mddev->chunk_sectors << 9; lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded); lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded);
lim.raid_partial_stripes_expensive = 1; lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
lim.discard_granularity = stripe; lim.discard_granularity = stripe;
lim.max_write_zeroes_sectors = 0; lim.max_write_zeroes_sectors = 0;
mddev_stack_rdev_limits(mddev, &lim, 0); mddev_stack_rdev_limits(mddev, &lim, 0);
......
...@@ -328,6 +328,9 @@ enum { ...@@ -328,6 +328,9 @@ enum {
/* bounce all highmem pages */ /* bounce all highmem pages */
BLK_FEAT_BOUNCE_HIGH = (1u << 14), BLK_FEAT_BOUNCE_HIGH = (1u << 14),
/* undocumented magic for bcache */
BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE = (1u << 15),
}; };
/* /*
...@@ -335,21 +338,16 @@ enum { ...@@ -335,21 +338,16 @@ enum {
*/ */
#define BLK_FEAT_INHERIT_MASK \ #define BLK_FEAT_INHERIT_MASK \
(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH) BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \
BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
/* internal flags in queue_limits.flags */ /* internal flags in queue_limits.flags */
enum { enum {
/* do not send FLUSH or FUA command despite advertised write cache */ /* do not send FLUSH/FUA commands despite advertising a write cache */
BLK_FLAGS_WRITE_CACHE_DISABLED = (1u << 31), BLK_FLAG_WRITE_CACHE_DISABLED = (1u << 0),
};
/* /* I/O topology is misaligned */
* BLK_BOUNCE_NONE: never bounce (default) BLK_FEAT_MISALIGNED = (1u << 1),
* BLK_BOUNCE_HIGH: bounce all highmem pages
*/
enum blk_bounce {
BLK_BOUNCE_NONE,
BLK_BOUNCE_HIGH,
}; };
struct queue_limits { struct queue_limits {
...@@ -383,9 +381,6 @@ struct queue_limits { ...@@ -383,9 +381,6 @@ struct queue_limits {
unsigned short max_integrity_segments; unsigned short max_integrity_segments;
unsigned short max_discard_segments; unsigned short max_discard_segments;
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
unsigned int max_open_zones; unsigned int max_open_zones;
unsigned int max_active_zones; unsigned int max_active_zones;
...@@ -1347,7 +1342,7 @@ static inline bool bdev_stable_writes(struct block_device *bdev) ...@@ -1347,7 +1342,7 @@ static inline bool bdev_stable_writes(struct block_device *bdev)
static inline bool blk_queue_write_cache(struct request_queue *q) static inline bool blk_queue_write_cache(struct request_queue *q)
{ {
return (q->limits.features & BLK_FEAT_WRITE_CACHE) && return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
!(q->limits.flags & BLK_FLAGS_WRITE_CACHE_DISABLED); !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED);
} }
static inline bool bdev_write_cache(struct block_device *bdev) static inline bool bdev_write_cache(struct block_device *bdev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment