Commit d51dc8dd authored by Gerd Bayer's avatar Gerd Bayer Committed by Paolo Abeni

Revert "s390/ism: fix receive message buffer allocation"

This reverts commit 58effa34.
Review was not finished on this patch. So it's not ready for
upstreaming.
Signed-off-by: default avatarGerd Bayer <gbayer@linux.ibm.com>
Link: https://lore.kernel.org/r/20240409113753.2181368-1-gbayer@linux.ibm.com
Fixes: 58effa34 ("s390/ism: fix receive message buffer allocation")
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent 33623113
...@@ -14,8 +14,6 @@ ...@@ -14,8 +14,6 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/processor.h> #include <linux/processor.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include "ism.h" #include "ism.h"
...@@ -294,15 +292,13 @@ static int ism_read_local_gid(struct ism_dev *ism) ...@@ -294,15 +292,13 @@ static int ism_read_local_gid(struct ism_dev *ism)
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb) static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{ {
clear_bit(dmb->sba_idx, ism->sba_bitmap); clear_bit(dmb->sba_idx, ism->sba_bitmap);
dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len, dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
DMA_FROM_DEVICE); dmb->cpu_addr, dmb->dma_addr);
folio_put(virt_to_folio(dmb->cpu_addr));
} }
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb) static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{ {
unsigned long bit; unsigned long bit;
int rc;
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev)) if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL; return -EINVAL;
...@@ -319,30 +315,14 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb) ...@@ -319,30 +315,14 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL; return -EINVAL;
dmb->cpu_addr = dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
folio_address(folio_alloc(GFP_KERNEL | __GFP_NOWARN | &dmb->dma_addr,
__GFP_NOMEMALLOC | __GFP_NORETRY, GFP_KERNEL | __GFP_NOWARN |
get_order(dmb->dmb_len))); __GFP_NOMEMALLOC | __GFP_NORETRY);
if (!dmb->cpu_addr)
if (!dmb->cpu_addr) {
rc = -ENOMEM;
goto out_bit;
}
dmb->dma_addr = dma_map_page(&ism->pdev->dev,
virt_to_page(dmb->cpu_addr), 0,
dmb->dmb_len, DMA_FROM_DEVICE);
if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
rc = -ENOMEM;
goto out_free;
}
return 0;
out_free:
kfree(dmb->cpu_addr);
out_bit:
clear_bit(dmb->sba_idx, ism->sba_bitmap); clear_bit(dmb->sba_idx, ism->sba_bitmap);
return rc;
return dmb->cpu_addr ? 0 : -ENOMEM;
} }
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb, int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment