Commit ae9f5cce authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: ublk_drv: cleanup 'struct ublk_map_data'

'struct ublk_map_data' is passed to ublk_copy_user_pages()
for copying data between userspace buffer and request pages.

Here what matters is userspace buffer address/len and 'struct request',
so replace ->io field with user buffer address, and rename max_bytes
as len.

Meantime remove 'ubq' field from ublk_map_data, since it isn't used
any more.

Then code becomes more readable.
Reviewed-by: default avatarZiyang Zhang <ZiyangZhang@linux.alibaba.com>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 96cf2f54
...@@ -420,10 +420,9 @@ static const struct block_device_operations ub_fops = { ...@@ -420,10 +420,9 @@ static const struct block_device_operations ub_fops = {
#define UBLK_MAX_PIN_PAGES 32 #define UBLK_MAX_PIN_PAGES 32
struct ublk_map_data { struct ublk_map_data {
const struct ublk_queue *ubq;
const struct request *rq; const struct request *rq;
const struct ublk_io *io; unsigned long ubuf;
unsigned max_bytes; unsigned int len;
}; };
struct ublk_io_iter { struct ublk_io_iter {
...@@ -483,14 +482,14 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data, ...@@ -483,14 +482,14 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm) static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
{ {
const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0; const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
const unsigned long start_vm = data->io->addr; const unsigned long start_vm = data->ubuf;
unsigned int done = 0; unsigned int done = 0;
struct ublk_io_iter iter = { struct ublk_io_iter iter = {
.pg_off = start_vm & (PAGE_SIZE - 1), .pg_off = start_vm & (PAGE_SIZE - 1),
.bio = data->rq->bio, .bio = data->rq->bio,
.iter = data->rq->bio->bi_iter, .iter = data->rq->bio->bi_iter,
}; };
const unsigned int nr_pages = round_up(data->max_bytes + const unsigned int nr_pages = round_up(data->len +
(start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT; (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
while (done < nr_pages) { while (done < nr_pages) {
...@@ -503,13 +502,13 @@ static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm) ...@@ -503,13 +502,13 @@ static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
iter.pages); iter.pages);
if (iter.nr_pages <= 0) if (iter.nr_pages <= 0)
return done == 0 ? iter.nr_pages : done; return done == 0 ? iter.nr_pages : done;
len = ublk_copy_io_pages(&iter, data->max_bytes, to_vm); len = ublk_copy_io_pages(&iter, data->len, to_vm);
for (i = 0; i < iter.nr_pages; i++) { for (i = 0; i < iter.nr_pages; i++) {
if (to_vm) if (to_vm)
set_page_dirty(iter.pages[i]); set_page_dirty(iter.pages[i]);
put_page(iter.pages[i]); put_page(iter.pages[i]);
} }
data->max_bytes -= len; data->len -= len;
done += iter.nr_pages; done += iter.nr_pages;
} }
...@@ -538,15 +537,14 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, ...@@ -538,15 +537,14 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
*/ */
if (ublk_need_map_req(req)) { if (ublk_need_map_req(req)) {
struct ublk_map_data data = { struct ublk_map_data data = {
.ubq = ubq,
.rq = req, .rq = req,
.io = io, .ubuf = io->addr,
.max_bytes = rq_bytes, .len = rq_bytes,
}; };
ublk_copy_user_pages(&data, true); ublk_copy_user_pages(&data, true);
return rq_bytes - data.max_bytes; return rq_bytes - data.len;
} }
return rq_bytes; return rq_bytes;
} }
...@@ -559,17 +557,16 @@ static int ublk_unmap_io(const struct ublk_queue *ubq, ...@@ -559,17 +557,16 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
if (ublk_need_unmap_req(req)) { if (ublk_need_unmap_req(req)) {
struct ublk_map_data data = { struct ublk_map_data data = {
.ubq = ubq,
.rq = req, .rq = req,
.io = io, .ubuf = io->addr,
.max_bytes = io->res, .len = io->res,
}; };
WARN_ON_ONCE(io->res > rq_bytes); WARN_ON_ONCE(io->res > rq_bytes);
ublk_copy_user_pages(&data, false); ublk_copy_user_pages(&data, false);
return io->res - data.max_bytes; return io->res - data.len;
} }
return rq_bytes; return rq_bytes;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment