Commit 3720281d authored by Mike Snitzer's avatar Mike Snitzer

dm: optimize max_io_len() by inlining max_io_len_target_boundary()

Saves redundant dm_target_offset() math.

Also, reverse argument order for max_io_len() to be consistent with
other similar functions.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 094ee64d
...@@ -1041,16 +1041,16 @@ static void clone_endio(struct bio *bio) ...@@ -1041,16 +1041,16 @@ static void clone_endio(struct bio *bio)
* Return maximum size of I/O possible at the supplied sector up to the current * Return maximum size of I/O possible at the supplied sector up to the current
* target boundary. * target boundary.
*/ */
static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
sector_t target_offset)
{ {
sector_t target_offset = dm_target_offset(ti, sector);
return ti->len - target_offset; return ti->len - target_offset;
} }
static sector_t max_io_len(sector_t sector, struct dm_target *ti) static sector_t max_io_len(struct dm_target *ti, sector_t sector)
{ {
sector_t len = max_io_len_target_boundary(sector, ti); sector_t target_offset = dm_target_offset(ti, sector);
sector_t len = max_io_len_target_boundary(ti, target_offset);
sector_t max_len; sector_t max_len;
/* /*
...@@ -1060,7 +1060,7 @@ static sector_t max_io_len(sector_t sector, struct dm_target *ti) ...@@ -1060,7 +1060,7 @@ static sector_t max_io_len(sector_t sector, struct dm_target *ti)
* - blk_max_size_offset() also respects q->limits.max_sectors * - blk_max_size_offset() also respects q->limits.max_sectors
*/ */
max_len = blk_max_size_offset(dm_table_get_md(ti->table)->queue, max_len = blk_max_size_offset(dm_table_get_md(ti->table)->queue,
dm_target_offset(ti, sector)); target_offset);
if (len > max_len) if (len > max_len)
len = max_len; len = max_len;
...@@ -1115,7 +1115,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, ...@@ -1115,7 +1115,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
goto out; goto out;
if (!ti->type->direct_access) if (!ti->type->direct_access)
goto out; goto out;
len = max_io_len(sector, ti) / PAGE_SECTORS; len = max_io_len(ti, sector) / PAGE_SECTORS;
if (len < 1) if (len < 1)
goto out; goto out;
nr_pages = min(len, nr_pages); nr_pages = min(len, nr_pages);
...@@ -1497,7 +1497,8 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target * ...@@ -1497,7 +1497,8 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *
if (!num_bios) if (!num_bios)
return -EOPNOTSUPP; return -EOPNOTSUPP;
len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); len = min_t(sector_t, ci->sector_count,
max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
__send_duplicate_bios(ci, ti, num_bios, &len); __send_duplicate_bios(ci, ti, num_bios, &len);
...@@ -1578,7 +1579,7 @@ static int __split_and_process_non_flush(struct clone_info *ci) ...@@ -1578,7 +1579,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (__process_abnormal_io(ci, ti, &r)) if (__process_abnormal_io(ci, ti, &r))
return r; return r;
len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
if (r < 0) if (r < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment