Commit b02bab6b authored by NeilBrown's avatar NeilBrown Committed by Vinod Koul

async_tx: use GFP_NOWAIT rather than GFP_IO

These async_XX functions are called from md/raid5 in an atomic
section, between get_cpu() and put_cpu(), so they must not sleep.
So use GFP_NOWAIT rather than GFP_IO.

Dan Williams writes: Longer term async_tx needs to be merged into md
directly as we can allocate this unmap data statically per-stripe
rather than per request.

Fixed: 7476bd79 ("async_pq: convert to dmaengine_unmap_data")
Cc: stable@vger.kernel.org (v3.13+)
Reported-and-tested-by: default avatarStanislav Samsonov <slava@annapurnalabs.com>
Acked-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 16605e8d
...@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, ...@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dmaengine_unmap_data *unmap = NULL; struct dmaengine_unmap_data *unmap = NULL;
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
unsigned long dma_prep_flags = 0; unsigned long dma_prep_flags = 0;
......
...@@ -188,7 +188,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, ...@@ -188,7 +188,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
/* XORing P/Q is only implemented in software */ /* XORing P/Q is only implemented in software */
if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
...@@ -307,7 +307,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, ...@@ -307,7 +307,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
BUG_ON(disks < 4); BUG_ON(disks < 4);
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
if (unmap && disks <= dma_maxpq(device, 0) && if (unmap && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned(device, offset, 0, len)) { is_dma_pq_aligned(device, offset, 0, len)) {
......
...@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, ...@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
u8 *a, *b, *c; u8 *a, *b, *c;
if (dma) if (dma)
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
if (unmap) { if (unmap) {
struct device *dev = dma->dev; struct device *dev = dma->dev;
...@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, ...@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
u8 *d, *s; u8 *d, *s;
if (dma) if (dma)
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
if (unmap) { if (unmap) {
dma_addr_t dma_dest[2]; dma_addr_t dma_dest[2];
......
...@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, ...@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
BUG_ON(src_cnt <= 1); BUG_ON(src_cnt <= 1);
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
...@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, ...@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
BUG_ON(src_cnt <= 1); BUG_ON(src_cnt <= 1);
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
if (unmap && src_cnt <= device->max_xor && if (unmap && src_cnt <= device->max_xor &&
is_dma_xor_aligned(device, offset, 0, len)) { is_dma_xor_aligned(device, offset, 0, len)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment