Commit 981f95a5 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

ublk: cleanup ublk_copy_user_pages

Clean up ublk_copy_user_pages() by using iov_iter_get_pages2, and code
gets simplified a lot and becomes much more readable than before.
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20230519065030.351216-4-ming.lei@redhat.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f236a214
...@@ -412,49 +412,39 @@ static const struct block_device_operations ub_fops = { ...@@ -412,49 +412,39 @@ static const struct block_device_operations ub_fops = {
#define UBLK_MAX_PIN_PAGES 32 #define UBLK_MAX_PIN_PAGES 32
struct ublk_map_data {
const struct request *rq;
unsigned long ubuf;
unsigned int len;
};
struct ublk_io_iter { struct ublk_io_iter {
struct page *pages[UBLK_MAX_PIN_PAGES]; struct page *pages[UBLK_MAX_PIN_PAGES];
unsigned pg_off; /* offset in the 1st page in pages */
int nr_pages; /* how many page pointers in pages */
struct bio *bio; struct bio *bio;
struct bvec_iter iter; struct bvec_iter iter;
}; };
static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data, /* return how many pages are copied */
unsigned max_bytes, bool to_vm) static void ublk_copy_io_pages(struct ublk_io_iter *data,
size_t total, size_t pg_off, int dir)
{ {
const unsigned total = min_t(unsigned, max_bytes,
PAGE_SIZE - data->pg_off +
((data->nr_pages - 1) << PAGE_SHIFT));
unsigned done = 0; unsigned done = 0;
unsigned pg_idx = 0; unsigned pg_idx = 0;
while (done < total) { while (done < total) {
struct bio_vec bv = bio_iter_iovec(data->bio, data->iter); struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
const unsigned int bytes = min3(bv.bv_len, total - done, unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
(unsigned)(PAGE_SIZE - data->pg_off)); (unsigned)(PAGE_SIZE - pg_off));
void *bv_buf = bvec_kmap_local(&bv); void *bv_buf = bvec_kmap_local(&bv);
void *pg_buf = kmap_local_page(data->pages[pg_idx]); void *pg_buf = kmap_local_page(data->pages[pg_idx]);
if (to_vm) if (dir == ITER_DEST)
memcpy(pg_buf + data->pg_off, bv_buf, bytes); memcpy(pg_buf + pg_off, bv_buf, bytes);
else else
memcpy(bv_buf, pg_buf + data->pg_off, bytes); memcpy(bv_buf, pg_buf + pg_off, bytes);
kunmap_local(pg_buf); kunmap_local(pg_buf);
kunmap_local(bv_buf); kunmap_local(bv_buf);
/* advance page array */ /* advance page array */
data->pg_off += bytes; pg_off += bytes;
if (data->pg_off == PAGE_SIZE) { if (pg_off == PAGE_SIZE) {
pg_idx += 1; pg_idx += 1;
data->pg_off = 0; pg_off = 0;
} }
done += bytes; done += bytes;
...@@ -468,41 +458,40 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data, ...@@ -468,41 +458,40 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
data->iter = data->bio->bi_iter; data->iter = data->bio->bi_iter;
} }
} }
return done;
} }
static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm) /*
* Copy data between request pages and io_iter, and 'offset'
* is the start point of linear offset of request.
*/
static size_t ublk_copy_user_pages(const struct request *req,
struct iov_iter *uiter, int dir)
{ {
const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
const unsigned long start_vm = data->ubuf;
unsigned int done = 0;
struct ublk_io_iter iter = { struct ublk_io_iter iter = {
.pg_off = start_vm & (PAGE_SIZE - 1), .bio = req->bio,
.bio = data->rq->bio, .iter = req->bio->bi_iter,
.iter = data->rq->bio->bi_iter,
}; };
const unsigned int nr_pages = round_up(data->len + size_t done = 0;
(start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
while (iov_iter_count(uiter) && iter.bio) {
while (done < nr_pages) { unsigned nr_pages;
const unsigned to_pin = min_t(unsigned, UBLK_MAX_PIN_PAGES, size_t len, off;
nr_pages - done); int i;
unsigned i, len;
len = iov_iter_get_pages2(uiter, iter.pages,
iter.nr_pages = get_user_pages_fast(start_vm + iov_iter_count(uiter),
(done << PAGE_SHIFT), to_pin, gup_flags, UBLK_MAX_PIN_PAGES, &off);
iter.pages); if (len <= 0)
if (iter.nr_pages <= 0) return done;
return done == 0 ? iter.nr_pages : done;
len = ublk_copy_io_pages(&iter, data->len, to_vm); ublk_copy_io_pages(&iter, len, off, dir);
for (i = 0; i < iter.nr_pages; i++) { nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
if (to_vm) for (i = 0; i < nr_pages; i++) {
if (dir == ITER_DEST)
set_page_dirty(iter.pages[i]); set_page_dirty(iter.pages[i]);
put_page(iter.pages[i]); put_page(iter.pages[i]);
} }
data->len -= len; done += len;
done += iter.nr_pages;
} }
return done; return done;
...@@ -529,15 +518,14 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, ...@@ -529,15 +518,14 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
* context is pretty fast, see ublk_pin_user_pages * context is pretty fast, see ublk_pin_user_pages
*/ */
if (ublk_need_map_req(req)) { if (ublk_need_map_req(req)) {
struct ublk_map_data data = { struct iov_iter iter;
.rq = req, struct iovec iov;
.ubuf = io->addr, const int dir = ITER_DEST;
.len = rq_bytes,
};
ublk_copy_user_pages(&data, true); import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes,
&iov, &iter);
return rq_bytes - data.len; return ublk_copy_user_pages(req, &iter, dir);
} }
return rq_bytes; return rq_bytes;
} }
...@@ -549,17 +537,15 @@ static int ublk_unmap_io(const struct ublk_queue *ubq, ...@@ -549,17 +537,15 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
const unsigned int rq_bytes = blk_rq_bytes(req); const unsigned int rq_bytes = blk_rq_bytes(req);
if (ublk_need_unmap_req(req)) { if (ublk_need_unmap_req(req)) {
struct ublk_map_data data = { struct iov_iter iter;
.rq = req, struct iovec iov;
.ubuf = io->addr, const int dir = ITER_SOURCE;
.len = io->res,
};
WARN_ON_ONCE(io->res > rq_bytes); WARN_ON_ONCE(io->res > rq_bytes);
ublk_copy_user_pages(&data, false); import_single_range(dir, u64_to_user_ptr(io->addr), io->res,
&iov, &iter);
return io->res - data.len; return ublk_copy_user_pages(req, &iter, dir);
} }
return rq_bytes; return rq_bytes;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment