Commit 9275c206 authored by Christoph Hellwig's avatar Christoph Hellwig

nvme-pci: refactor nvme_unmap_data

Split out three helpers from nvme_unmap_data that will allow finer grained
unwinding from nvme_map_data.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <kbusch@kernel.org>
Reviewed-by: default avatarMarc Orr <marcorr@google.com>
parent bffcd507
...@@ -543,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) ...@@ -543,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
return true; return true;
} }
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
{ {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
dma_addr_t dma_addr = iod->first_dma, next_dma_addr; struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_addr_t dma_addr = iod->first_dma;
int i; int i;
if (iod->dma_len) { for (i = 0; i < iod->npages; i++) {
dma_unmap_page(dev->dev, dma_addr, iod->dma_len, __le64 *prp_list = nvme_pci_iod_list(req)[i];
rq_dma_dir(req)); dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
return;
dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
dma_addr = next_dma_addr;
} }
WARN_ON_ONCE(!iod->nents); }
static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
{
const int last_sg = SGES_PER_PAGE - 1;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_addr_t dma_addr = iod->first_dma;
int i;
for (i = 0; i < iod->npages; i++) {
struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
dma_addr = next_dma_addr;
}
}
static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
if (is_pci_p2pdma_page(sg_page(iod->sg))) if (is_pci_p2pdma_page(sg_page(iod->sg)))
pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
rq_dma_dir(req)); rq_dma_dir(req));
else else
dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
}
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
if (iod->npages == 0) if (iod->dma_len) {
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
dma_addr); rq_dma_dir(req));
return;
for (i = 0; i < iod->npages; i++) {
void *addr = nvme_pci_iod_list(req)[i];
if (iod->use_sgl) {
struct nvme_sgl_desc *sg_list = addr;
next_dma_addr =
le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
} else {
__le64 *prp_list = addr;
next_dma_addr = le64_to_cpu(prp_list[last_prp]);
} }
dma_pool_free(dev->prp_page_pool, addr, dma_addr); WARN_ON_ONCE(!iod->nents);
dma_addr = next_dma_addr;
}
nvme_unmap_sg(dev, req);
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
iod->first_dma);
else if (iod->use_sgl)
nvme_free_sgls(dev, req);
else
nvme_free_prps(dev, req);
mempool_free(iod->sg, dev->iod_mempool); mempool_free(iod->sg, dev->iod_mempool);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment