Commit bb5c64c8 authored by Christophe JAILLET's avatar Christophe JAILLET Committed by Jakub Kicinski

mlxsw: pci: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.

When memory is allocated in 'mlxsw_pci_queue_init()' and
'mlxsw_pci_fw_area_init()' GFP_KERNEL can be used because both functions
are already using this flag and no lock is acquired.

When memory is allocated in 'mlxsw_pci_mbox_alloc()' GFP_KERNEL can be used
because it is only called from the probe function and no lock is acquired
in the between.
The call chain is:
  --> mlxsw_pci_probe()
    --> mlxsw_pci_cmd_init()
      --> mlxsw_pci_mbox_alloc()

While at it, also replace the 'dma_set_mask/dma_set_coherent_mask' sequence
by a less verbose 'dma_set_mask_and_coherent() call.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Tested-by: default avatarIdo Schimmel <idosch@nvidia.com>
Link: https://lore.kernel.org/r/20210114084757.490540-1-christophe.jaillet@wanadoo.frSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent c612fe78
...@@ -323,8 +323,8 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, ...@@ -323,8 +323,8 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
struct pci_dev *pdev = mlxsw_pci->pdev; struct pci_dev *pdev = mlxsw_pci->pdev;
dma_addr_t mapaddr; dma_addr_t mapaddr;
mapaddr = pci_map_single(pdev, frag_data, frag_len, direction); mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) { if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n"); dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
return -EIO; return -EIO;
} }
...@@ -342,7 +342,7 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, ...@@ -342,7 +342,7 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
if (!frag_len) if (!frag_len)
return; return;
pci_unmap_single(pdev, mapaddr, frag_len, direction); dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
} }
static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci, static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
...@@ -858,9 +858,9 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, ...@@ -858,9 +858,9 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
tasklet_setup(&q->tasklet, q_ops->tasklet); tasklet_setup(&q->tasklet, q_ops->tasklet);
mem_item->size = MLXSW_PCI_AQ_SIZE; mem_item->size = MLXSW_PCI_AQ_SIZE;
mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev, mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
mem_item->size, mem_item->size, &mem_item->mapaddr,
&mem_item->mapaddr); GFP_KERNEL);
if (!mem_item->buf) if (!mem_item->buf)
return -ENOMEM; return -ENOMEM;
...@@ -890,8 +890,8 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, ...@@ -890,8 +890,8 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err_q_ops_init: err_q_ops_init:
kfree(q->elem_info); kfree(q->elem_info);
err_elem_info_alloc: err_elem_info_alloc:
pci_free_consistent(mlxsw_pci->pdev, mem_item->size, dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
mem_item->buf, mem_item->mapaddr); mem_item->buf, mem_item->mapaddr);
return err; return err;
} }
...@@ -903,8 +903,8 @@ static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci, ...@@ -903,8 +903,8 @@ static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
q_ops->fini(mlxsw_pci, q); q_ops->fini(mlxsw_pci, q);
kfree(q->elem_info); kfree(q->elem_info);
pci_free_consistent(mlxsw_pci->pdev, mem_item->size, dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
mem_item->buf, mem_item->mapaddr); mem_item->buf, mem_item->mapaddr);
} }
static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox, static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
...@@ -1273,9 +1273,9 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, ...@@ -1273,9 +1273,9 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
mem_item = &mlxsw_pci->fw_area.items[i]; mem_item = &mlxsw_pci->fw_area.items[i];
mem_item->size = MLXSW_PCI_PAGE_SIZE; mem_item->size = MLXSW_PCI_PAGE_SIZE;
mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev, mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
mem_item->size, mem_item->size,
&mem_item->mapaddr); &mem_item->mapaddr, GFP_KERNEL);
if (!mem_item->buf) { if (!mem_item->buf) {
err = -ENOMEM; err = -ENOMEM;
goto err_alloc; goto err_alloc;
...@@ -1304,8 +1304,8 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, ...@@ -1304,8 +1304,8 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
for (i--; i >= 0; i--) { for (i--; i >= 0; i--) {
mem_item = &mlxsw_pci->fw_area.items[i]; mem_item = &mlxsw_pci->fw_area.items[i];
pci_free_consistent(mlxsw_pci->pdev, mem_item->size, dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
mem_item->buf, mem_item->mapaddr); mem_item->buf, mem_item->mapaddr);
} }
kfree(mlxsw_pci->fw_area.items); kfree(mlxsw_pci->fw_area.items);
return err; return err;
...@@ -1321,8 +1321,8 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci) ...@@ -1321,8 +1321,8 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
for (i = 0; i < mlxsw_pci->fw_area.count; i++) { for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
mem_item = &mlxsw_pci->fw_area.items[i]; mem_item = &mlxsw_pci->fw_area.items[i];
pci_free_consistent(mlxsw_pci->pdev, mem_item->size, dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
mem_item->buf, mem_item->mapaddr); mem_item->buf, mem_item->mapaddr);
} }
kfree(mlxsw_pci->fw_area.items); kfree(mlxsw_pci->fw_area.items);
} }
...@@ -1347,8 +1347,8 @@ static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci, ...@@ -1347,8 +1347,8 @@ static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
int err = 0; int err = 0;
mbox->size = MLXSW_CMD_MBOX_SIZE; mbox->size = MLXSW_CMD_MBOX_SIZE;
mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
&mbox->mapaddr); &mbox->mapaddr, GFP_KERNEL);
if (!mbox->buf) { if (!mbox->buf) {
dev_err(&pdev->dev, "Failed allocating memory for mailbox\n"); dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
err = -ENOMEM; err = -ENOMEM;
...@@ -1362,8 +1362,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, ...@@ -1362,8 +1362,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
{ {
struct pci_dev *pdev = mlxsw_pci->pdev; struct pci_dev *pdev = mlxsw_pci->pdev;
pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf, dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
mbox->mapaddr); mbox->mapaddr);
} }
static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci, static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
...@@ -1848,17 +1848,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1848,17 +1848,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_request_regions; goto err_pci_request_regions;
} }
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) { if (err) {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
goto err_pci_set_dma_mask;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) { if (err) {
dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); dev_err(&pdev->dev, "dma_set_mask failed\n");
goto err_pci_set_dma_mask; goto err_pci_set_dma_mask;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment