Commit 4c0cfebd authored by Theodore Ts'o's avatar Theodore Ts'o

ext4: clean up mballoc criteria comments

Line wrap and slightly clarify the comments describing mballoc's
cirtiera.

Define EXT4_MB_NUM_CRS as part of the enum, so that it will
automatically get updated when criteria is added or removed.

Also fix a potential unitialized use of 'cr' variable if
CONFIG_EXT4_DEBUG is enabled.
Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent ab8627e1
...@@ -128,47 +128,52 @@ enum SHIFT_DIRECTION { ...@@ -128,47 +128,52 @@ enum SHIFT_DIRECTION {
}; };
/* /*
* Number of criterias defined. For each criteria, mballoc has slightly * For each criteria, mballoc has slightly different way of finding
* different way of finding the required blocks nad usually, higher the * the required blocks nad usually, higher the criteria the slower the
* criteria the slower the allocation. We start at lower criterias and keep * allocation. We start at lower criterias and keep falling back to
* falling back to higher ones if we are not able to find any blocks. * higher ones if we are not able to find any blocks. Lower (earlier)
*/ * criteria are faster.
#define EXT4_MB_NUM_CRS 5
/*
* All possible allocation criterias for mballoc. Lower are faster.
*/ */
enum criteria { enum criteria {
/* /*
* Used when number of blocks needed is a power of 2. This doesn't * Used when number of blocks needed is a power of 2. This
* trigger any disk IO except prefetch and is the fastest criteria. * doesn't trigger any disk IO except prefetch and is the
* fastest criteria.
*/ */
CR_POWER2_ALIGNED, CR_POWER2_ALIGNED,
/* /*
* Tries to lookup in-memory data structures to find the most suitable * Tries to lookup in-memory data structures to find the most
* group that satisfies goal request. No disk IO except block prefetch. * suitable group that satisfies goal request. No disk IO
* except block prefetch.
*/ */
CR_GOAL_LEN_FAST, CR_GOAL_LEN_FAST,
/* /*
* Same as CR_GOAL_LEN_FAST but is allowed to reduce the goal length to * Same as CR_GOAL_LEN_FAST but is allowed to reduce the goal
* the best available length for faster allocation. * length to the best available length for faster allocation.
*/ */
CR_BEST_AVAIL_LEN, CR_BEST_AVAIL_LEN,
/* /*
* Reads each block group sequentially, performing disk IO if necessary, to * Reads each block group sequentially, performing disk IO if
* find find_suitable block group. Tries to allocate goal length but might trim * necessary, to find find_suitable block group. Tries to
* the request if nothing is found after enough tries. * allocate goal length but might trim the request if nothing
* is found after enough tries.
*/ */
CR_GOAL_LEN_SLOW, CR_GOAL_LEN_SLOW,
/* /*
* Finds the first free set of blocks and allocates those. This is only * Finds the first free set of blocks and allocates
* used in rare cases when CR_GOAL_LEN_SLOW also fails to allocate * those. This is only used in rare cases when
* anything. * CR_GOAL_LEN_SLOW also fails to allocate anything.
*/ */
CR_ANY_FREE, CR_ANY_FREE,
/*
* Number of criterias defined.
*/
EXT4_MB_NUM_CRS
}; };
/* criteria below which we use fast block scanning and avoid unnecessary IO */ /* criteria below which we use fast block scanning and avoid unnecessary IO */
......
...@@ -1035,11 +1035,9 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context ...@@ -1035,11 +1035,9 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
if (num_stripe_clusters > 0) { if (num_stripe_clusters > 0) {
/* /*
* Try to round up the adjusted goal to stripe size * Try to round up the adjusted goal length to
* (in cluster units) multiple for efficiency. * stripe size (in cluster units) multiple for
* * efficiency.
* XXX: Is s->stripe always a power of 2? In that case
* we can use the faster round_up() variant.
*/ */
ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len, ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len,
num_stripe_clusters); num_stripe_clusters);
...@@ -2758,7 +2756,7 @@ static noinline_for_stack int ...@@ -2758,7 +2756,7 @@ static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context *ac) ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
{ {
ext4_group_t prefetch_grp = 0, ngroups, group, i; ext4_group_t prefetch_grp = 0, ngroups, group, i;
enum criteria cr, new_cr; enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
int err = 0, first_err = 0; int err = 0, first_err = 0;
unsigned int nr = 0, prefetch_ios = 0; unsigned int nr = 0, prefetch_ios = 0;
struct ext4_sb_info *sbi; struct ext4_sb_info *sbi;
...@@ -2815,12 +2813,13 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) ...@@ -2815,12 +2813,13 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
spin_unlock(&sbi->s_md_lock); spin_unlock(&sbi->s_md_lock);
} }
/* Let's just scan groups to find more-less suitable blocks */
cr = ac->ac_2order ? CR_POWER2_ALIGNED : CR_GOAL_LEN_FAST;
/* /*
* cr == CR_POWER2_ALIGNED try to get exact allocation, * Let's just scan groups to find more-less suitable blocks We
* cr == CR_ANY_FREE try to get anything * start with CR_GOAL_LEN_FAST, unless it is power of 2
* aligned, in which case let's do that faster approach first.
*/ */
if (ac->ac_2order)
cr = CR_POWER2_ALIGNED;
repeat: repeat:
for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) { for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
ac->ac_criteria = cr; ac->ac_criteria = cr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment