Commit c75981a1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.5/dm-fixes' of...

Merge tag 'for-6.5/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - Fix double free on memory allocation failure in DM integrity target's
   integrity_recalc()

 - Fix locking in DM raid target's raid_ctr() and around call to
   md_stop()

 - Fix DM cache target's cleaner policy to always allow work to be
   queued for writeback; even if cache isn't idle.

* tag 'for-6.5/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache policy smq: ensure IO doesn't prevent cleaner policy progress
  dm raid: protect md_stop() with 'reconfig_mutex'
  dm raid: clean up four equivalent goto tags in raid_ctr()
  dm raid: fix missing reconfig_mutex unlock in raid_ctr() error paths
  dm integrity: fix double free on memory allocation failure
parents 6fb9f7f8 1e4ab7b4
...@@ -857,7 +857,13 @@ struct smq_policy { ...@@ -857,7 +857,13 @@ struct smq_policy {
struct background_tracker *bg_work; struct background_tracker *bg_work;
bool migrations_allowed; bool migrations_allowed:1;
/*
* If this is set the policy will try and clean the whole cache
* even if the device is not idle.
*/
bool cleaner:1;
}; };
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
...@@ -1138,7 +1144,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle) ...@@ -1138,7 +1144,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the * Cache entries may not be populated. So we cannot rely on the
* size of the clean queue. * size of the clean queue.
*/ */
if (idle) { if (idle || mq->cleaner) {
/* /*
* We'd like to clean everything. * We'd like to clean everything.
*/ */
...@@ -1722,11 +1728,9 @@ static void calc_hotspot_params(sector_t origin_size, ...@@ -1722,11 +1728,9 @@ static void calc_hotspot_params(sector_t origin_size,
*hotspot_block_size /= 2u; *hotspot_block_size /= 2u;
} }
static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size, static struct dm_cache_policy *
sector_t origin_size, __smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
sector_t cache_block_size, bool mimic_mq, bool migrations_allowed, bool cleaner)
bool mimic_mq,
bool migrations_allowed)
{ {
unsigned int i; unsigned int i;
unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS; unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
...@@ -1813,6 +1817,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size, ...@@ -1813,6 +1817,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
goto bad_btracker; goto bad_btracker;
mq->migrations_allowed = migrations_allowed; mq->migrations_allowed = migrations_allowed;
mq->cleaner = cleaner;
return &mq->policy; return &mq->policy;
...@@ -1836,21 +1841,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size, ...@@ -1836,21 +1841,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
sector_t origin_size, sector_t origin_size,
sector_t cache_block_size) sector_t cache_block_size)
{ {
return __smq_create(cache_size, origin_size, cache_block_size, false, true); return __smq_create(cache_size, origin_size, cache_block_size,
false, true, false);
} }
static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
sector_t origin_size, sector_t origin_size,
sector_t cache_block_size) sector_t cache_block_size)
{ {
return __smq_create(cache_size, origin_size, cache_block_size, true, true); return __smq_create(cache_size, origin_size, cache_block_size,
true, true, false);
} }
static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size, static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
sector_t origin_size, sector_t origin_size,
sector_t cache_block_size) sector_t cache_block_size)
{ {
return __smq_create(cache_size, origin_size, cache_block_size, false, false); return __smq_create(cache_size, origin_size, cache_block_size,
false, false, true);
} }
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
......
...@@ -2676,6 +2676,7 @@ static void integrity_recalc(struct work_struct *w) ...@@ -2676,6 +2676,7 @@ static void integrity_recalc(struct work_struct *w)
recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO); recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
if (!recalc_tags) { if (!recalc_tags) {
vfree(recalc_buffer); vfree(recalc_buffer);
recalc_buffer = NULL;
goto oom; goto oom;
} }
......
...@@ -3251,8 +3251,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -3251,8 +3251,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
r = md_start(&rs->md); r = md_start(&rs->md);
if (r) { if (r) {
ti->error = "Failed to start raid array"; ti->error = "Failed to start raid array";
mddev_unlock(&rs->md); goto bad_unlock;
goto bad_md_start;
} }
/* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */ /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
...@@ -3260,8 +3259,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -3260,8 +3259,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
if (r) { if (r) {
ti->error = "Failed to set raid4/5/6 journal mode"; ti->error = "Failed to set raid4/5/6 journal mode";
mddev_unlock(&rs->md); goto bad_unlock;
goto bad_journal_mode_set;
} }
} }
...@@ -3272,14 +3270,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -3272,14 +3270,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (rs_is_raid456(rs)) { if (rs_is_raid456(rs)) {
r = rs_set_raid456_stripe_cache(rs); r = rs_set_raid456_stripe_cache(rs);
if (r) if (r)
goto bad_stripe_cache; goto bad_unlock;
} }
/* Now do an early reshape check */ /* Now do an early reshape check */
if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
r = rs_check_reshape(rs); r = rs_check_reshape(rs);
if (r) if (r)
goto bad_check_reshape; goto bad_unlock;
/* Restore new, ctr requested layout to perform check */ /* Restore new, ctr requested layout to perform check */
rs_config_restore(rs, &rs_layout); rs_config_restore(rs, &rs_layout);
...@@ -3288,7 +3286,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -3288,7 +3286,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
r = rs->md.pers->check_reshape(&rs->md); r = rs->md.pers->check_reshape(&rs->md);
if (r) { if (r) {
ti->error = "Reshape check failed"; ti->error = "Reshape check failed";
goto bad_check_reshape; goto bad_unlock;
} }
} }
} }
...@@ -3299,11 +3297,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -3299,11 +3297,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mddev_unlock(&rs->md); mddev_unlock(&rs->md);
return 0; return 0;
bad_md_start: bad_unlock:
bad_journal_mode_set:
bad_stripe_cache:
bad_check_reshape:
md_stop(&rs->md); md_stop(&rs->md);
mddev_unlock(&rs->md);
bad: bad:
raid_set_free(rs); raid_set_free(rs);
...@@ -3314,7 +3310,9 @@ static void raid_dtr(struct dm_target *ti) ...@@ -3314,7 +3310,9 @@ static void raid_dtr(struct dm_target *ti)
{ {
struct raid_set *rs = ti->private; struct raid_set *rs = ti->private;
mddev_lock_nointr(&rs->md);
md_stop(&rs->md); md_stop(&rs->md);
mddev_unlock(&rs->md);
raid_set_free(rs); raid_set_free(rs);
} }
......
...@@ -6247,6 +6247,8 @@ static void __md_stop(struct mddev *mddev) ...@@ -6247,6 +6247,8 @@ static void __md_stop(struct mddev *mddev)
void md_stop(struct mddev *mddev) void md_stop(struct mddev *mddev)
{ {
lockdep_assert_held(&mddev->reconfig_mutex);
/* stop the array and free an attached data structures. /* stop the array and free an attached data structures.
* This is called from dm-raid * This is called from dm-raid
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment