Commit 2f3af723 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: ublk_drv: add two helpers to clean up map/unmap request

Add two helpers for checking if map/unmap is needed, since we may have
passthrough request which needs map or unmap in future, such as for
supporting report zones.

Meantime don't mark ublk_copy_user_pages as inline since this function
is a bit fat now.
Reviewed-by: default avatarZiyang Zhang <ZiyangZhang@linux.alibaba.com>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 23ef8220
...@@ -488,8 +488,7 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data, ...@@ -488,8 +488,7 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
return done; return done;
} }
static inline int ublk_copy_user_pages(struct ublk_map_data *data, static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
bool to_vm)
{ {
const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0; const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
const unsigned long start_vm = data->io->addr; const unsigned long start_vm = data->io->addr;
...@@ -525,6 +524,16 @@ static inline int ublk_copy_user_pages(struct ublk_map_data *data, ...@@ -525,6 +524,16 @@ static inline int ublk_copy_user_pages(struct ublk_map_data *data,
return done; return done;
} }
static inline bool ublk_need_map_req(const struct request *req)
{
return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
}
static inline bool ublk_need_unmap_req(const struct request *req)
{
return ublk_rq_has_data(req) && req_op(req) == REQ_OP_READ;
}
static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
struct ublk_io *io) struct ublk_io *io)
{ {
...@@ -535,7 +544,7 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, ...@@ -535,7 +544,7 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
* context and the big benefit is that pinning pages in current * context and the big benefit is that pinning pages in current
* context is pretty fast, see ublk_pin_user_pages * context is pretty fast, see ublk_pin_user_pages
*/ */
if (ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE) { if (ublk_need_map_req(req)) {
struct ublk_map_data data = { struct ublk_map_data data = {
.ubq = ubq, .ubq = ubq,
.rq = req, .rq = req,
...@@ -556,7 +565,7 @@ static int ublk_unmap_io(const struct ublk_queue *ubq, ...@@ -556,7 +565,7 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
{ {
const unsigned int rq_bytes = blk_rq_bytes(req); const unsigned int rq_bytes = blk_rq_bytes(req);
if (req_op(req) == REQ_OP_READ && ublk_rq_has_data(req)) { if (ublk_need_unmap_req(req)) {
struct ublk_map_data data = { struct ublk_map_data data = {
.ubq = ubq, .ubq = ubq,
.rq = req, .rq = req,
...@@ -770,7 +779,7 @@ static inline void __ublk_rq_task_work(struct request *req) ...@@ -770,7 +779,7 @@ static inline void __ublk_rq_task_work(struct request *req)
return; return;
} }
if (ublk_need_get_data(ubq) && (req_op(req) == REQ_OP_WRITE)) { if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
/* /*
* We have not handled UBLK_IO_NEED_GET_DATA command yet, * We have not handled UBLK_IO_NEED_GET_DATA command yet,
* so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment