Commit ea0ea2bc authored by Shaohua Li's avatar Shaohua Li Committed by Jens Axboe

blk-throttle: cap discard request size

discard request usually is very big and easily use all bandwidth budget
of a cgroup. discard request size doesn't really mean the size of data
written, so it doesn't make sense to account it into bandwidth budget.
Jens pointed out treating the size 0 doesn't make sense too, because
discard request does have cost. But it's not easy to find the actual
cost. This patch simply makes the size one sector.
Signed-off-by: default avatarShaohua Li <shli@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6470812e
...@@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) ...@@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
} \ } \
} while (0) } while (0)
static inline unsigned int throtl_bio_data_size(struct bio *bio)
{
/* assume it's one sector */
if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
return 512;
return bio->bi_iter.bi_size;
}
static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
{ {
INIT_LIST_HEAD(&qn->node); INIT_LIST_HEAD(&qn->node);
...@@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, ...@@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
bool rw = bio_data_dir(bio); bool rw = bio_data_dir(bio);
u64 bytes_allowed, extra_bytes, tmp; u64 bytes_allowed, extra_bytes, tmp;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
unsigned int bio_size = throtl_bio_data_size(bio);
jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
...@@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, ...@@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
do_div(tmp, HZ); do_div(tmp, HZ);
bytes_allowed = tmp; bytes_allowed = tmp;
if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
if (wait) if (wait)
*wait = 0; *wait = 0;
return true; return true;
} }
/* Calc approx time to dispatch */ /* Calc approx time to dispatch */
extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw)); jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
if (!jiffy_wait) if (!jiffy_wait)
...@@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, ...@@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{ {
bool rw = bio_data_dir(bio); bool rw = bio_data_dir(bio);
unsigned int bio_size = throtl_bio_data_size(bio);
/* Charge the bio to the group */ /* Charge the bio to the group */
tg->bytes_disp[rw] += bio->bi_iter.bi_size; tg->bytes_disp[rw] += bio_size;
tg->io_disp[rw]++; tg->io_disp[rw]++;
tg->last_bytes_disp[rw] += bio->bi_iter.bi_size; tg->last_bytes_disp[rw] += bio_size;
tg->last_io_disp[rw]++; tg->last_io_disp[rw]++;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment