Commit f5d632d1 authored by Jens Axboe's avatar Jens Axboe

block: shrink rq_map_data a bit

We don't need full ints for several of these members. Change the
page_order and nr_entries to unsigned shorts, and the true/false from_user
and null_mapped to booleans.

This shrinks the struct from 32 to 24 bytes on 64-bit archs.
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d322f355
...@@ -158,7 +158,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, ...@@ -158,7 +158,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
if (map_data) { if (map_data) {
nr_pages = 1 << map_data->page_order; nr_pages = 1U << map_data->page_order;
i = map_data->offset / PAGE_SIZE; i = map_data->offset / PAGE_SIZE;
} }
while (len) { while (len) {
......
...@@ -963,11 +963,11 @@ blk_status_t blk_insert_cloned_request(struct request *rq); ...@@ -963,11 +963,11 @@ blk_status_t blk_insert_cloned_request(struct request *rq);
struct rq_map_data { struct rq_map_data {
struct page **pages; struct page **pages;
int page_order;
int nr_entries;
unsigned long offset; unsigned long offset;
int null_mapped; unsigned short page_order;
int from_user; unsigned short nr_entries;
bool null_mapped;
bool from_user;
}; };
int blk_rq_map_user(struct request_queue *, struct request *, int blk_rq_map_user(struct request_queue *, struct request *,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment