Commit 5dc18f51 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
  dmatest: fix use after free in dmatest_exit
  ipu_idmac: fix spinlock type
  iop-adma, mv_xor: fix mem leak on self-test setup failure
  fsldma: fix off by one in dma_halt
  I/OAT: fail self-test if callback test reaches timeout
  I/OAT: update driver version and copyright dates
  I/OAT: list usage cleanup
  I/OAT: set tcp_dma_copybreak to 256k for I/OAT ver.3
  I/OAT: cancel watchdog before dma remove
  I/OAT: fail initialization on zero channels detection
  I/OAT: do not set DCACTRL_CMPL_WRITE_ENABLE for I/OAT ver.3
  I/OAT: add verification for proper APICID_TAG_MAP setting by BIOS
  dmaengine: update kerneldoc
parents fd6ec5f3 7cbd4877
/* /*
* Copyright(c) 2007 Intel Corporation. All rights reserved. * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
......
...@@ -430,13 +430,15 @@ late_initcall(dmatest_init); ...@@ -430,13 +430,15 @@ late_initcall(dmatest_init);
static void __exit dmatest_exit(void) static void __exit dmatest_exit(void)
{ {
struct dmatest_chan *dtc, *_dtc; struct dmatest_chan *dtc, *_dtc;
struct dma_chan *chan;
list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
list_del(&dtc->node); list_del(&dtc->node);
chan = dtc->chan;
dmatest_cleanup_channel(dtc); dmatest_cleanup_channel(dtc);
pr_debug("dmatest: dropped channel %s\n", pr_debug("dmatest: dropped channel %s\n",
dma_chan_name(dtc->chan)); dma_chan_name(chan));
dma_release_channel(dtc->chan); dma_release_channel(chan);
} }
} }
module_exit(dmatest_exit); module_exit(dmatest_exit);
......
...@@ -158,7 +158,8 @@ static void dma_start(struct fsl_dma_chan *fsl_chan) ...@@ -158,7 +158,8 @@ static void dma_start(struct fsl_dma_chan *fsl_chan)
static void dma_halt(struct fsl_dma_chan *fsl_chan) static void dma_halt(struct fsl_dma_chan *fsl_chan)
{ {
int i = 0; int i;
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
32); 32);
...@@ -166,8 +167,11 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan) ...@@ -166,8 +167,11 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan)
DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
| FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
while (!dma_is_idle(fsl_chan) && (i++ < 100)) for (i = 0; i < 100; i++) {
if (dma_is_idle(fsl_chan))
break;
udelay(10); udelay(10);
}
if (i >= 100 && !dma_is_idle(fsl_chan)) if (i >= 100 && !dma_is_idle(fsl_chan))
dev_err(fsl_chan->dev, "DMA halt timeout!\n"); dev_err(fsl_chan->dev, "DMA halt timeout!\n");
} }
......
/* /*
* Intel I/OAT DMA Linux driver * Intel I/OAT DMA Linux driver
* Copyright(c) 2007 Intel Corporation. * Copyright(c) 2007 - 2009 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
......
/* /*
* Intel I/OAT DMA Linux driver * Intel I/OAT DMA Linux driver
* Copyright(c) 2007 Intel Corporation. * Copyright(c) 2007 - 2009 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -49,6 +49,23 @@ ...@@ -49,6 +49,23 @@
#define DCA_TAG_MAP_MASK 0xDF #define DCA_TAG_MAP_MASK 0xDF
/* expected tag map bytes for I/OAT ver.2 */
#define DCA2_TAG_MAP_BYTE0 0x80
#define DCA2_TAG_MAP_BYTE1 0x0
#define DCA2_TAG_MAP_BYTE2 0x81
#define DCA2_TAG_MAP_BYTE3 0x82
#define DCA2_TAG_MAP_BYTE4 0x82
/* verify if tag map matches expected values */
static inline int dca2_tag_map_valid(u8 *tag_map)
{
return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
(tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
(tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
(tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
(tag_map[4] == DCA2_TAG_MAP_BYTE4));
}
/* /*
* "Legacy" DCA systems do not implement the DCA register set in the * "Legacy" DCA systems do not implement the DCA register set in the
* I/OAT device. Software needs direct support for their tag mappings. * I/OAT device. Software needs direct support for their tag mappings.
...@@ -452,6 +469,13 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) ...@@ -452,6 +469,13 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
ioatdca->tag_map[i] = 0; ioatdca->tag_map[i] = 0;
} }
if (!dca2_tag_map_valid(ioatdca->tag_map)) {
dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, "
"disabling DCA\n");
free_dca_provider(dca);
return NULL;
}
err = register_dca_provider(dca, &pdev->dev); err = register_dca_provider(dca, &pdev->dev);
if (err) { if (err) {
free_dca_provider(dca); free_dca_provider(dca);
......
/* /*
* Intel I/OAT DMA Linux driver * Intel I/OAT DMA Linux driver
* Copyright(c) 2004 - 2007 Intel Corporation. * Copyright(c) 2004 - 2009 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -189,11 +189,13 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) ...@@ -189,11 +189,13 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
ioat_chan->xfercap = xfercap; ioat_chan->xfercap = xfercap;
ioat_chan->desccount = 0; ioat_chan->desccount = 0;
INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
if (ioat_chan->device->version != IOAT_VER_1_2) { if (ioat_chan->device->version == IOAT_VER_2_0)
writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE |
| IOAT_DMA_DCA_ANY_CPU, IOAT_DMA_DCA_ANY_CPU,
ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
} else if (ioat_chan->device->version == IOAT_VER_3_0)
writel(IOAT_DMA_DCA_ANY_CPU,
ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
spin_lock_init(&ioat_chan->cleanup_lock); spin_lock_init(&ioat_chan->cleanup_lock);
spin_lock_init(&ioat_chan->desc_lock); spin_lock_init(&ioat_chan->desc_lock);
INIT_LIST_HEAD(&ioat_chan->free_desc); INIT_LIST_HEAD(&ioat_chan->free_desc);
...@@ -1169,9 +1171,8 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) ...@@ -1169,9 +1171,8 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
* up if the client is done with the descriptor * up if the client is done with the descriptor
*/ */
if (async_tx_test_ack(&desc->async_tx)) { if (async_tx_test_ack(&desc->async_tx)) {
list_del(&desc->node); list_move_tail(&desc->node,
list_add_tail(&desc->node, &ioat_chan->free_desc);
&ioat_chan->free_desc);
} else } else
desc->async_tx.cookie = 0; desc->async_tx.cookie = 0;
} else { } else {
...@@ -1362,6 +1363,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) ...@@ -1362,6 +1363,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
dma_cookie_t cookie; dma_cookie_t cookie;
int err = 0; int err = 0;
struct completion cmp; struct completion cmp;
unsigned long tmo;
src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
if (!src) if (!src)
...@@ -1413,9 +1415,10 @@ static int ioat_dma_self_test(struct ioatdma_device *device) ...@@ -1413,9 +1415,10 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
} }
device->common.device_issue_pending(dma_chan); device->common.device_issue_pending(dma_chan);
wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) if (tmo == 0 ||
device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
!= DMA_SUCCESS) { != DMA_SUCCESS) {
dev_err(&device->pdev->dev, dev_err(&device->pdev->dev,
"Self-test copy timed out, disabling\n"); "Self-test copy timed out, disabling\n");
...@@ -1657,6 +1660,13 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, ...@@ -1657,6 +1660,13 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
" %d channels, device version 0x%02x, driver version %s\n", " %d channels, device version 0x%02x, driver version %s\n",
device->common.chancnt, device->version, IOAT_DMA_VERSION); device->common.chancnt, device->version, IOAT_DMA_VERSION);
if (!device->common.chancnt) {
dev_err(&device->pdev->dev,
"Intel(R) I/OAT DMA Engine problem found: "
"zero channels detected\n");
goto err_setup_interrupts;
}
err = ioat_dma_setup_interrupts(device); err = ioat_dma_setup_interrupts(device);
if (err) if (err)
goto err_setup_interrupts; goto err_setup_interrupts;
...@@ -1696,6 +1706,9 @@ void ioat_dma_remove(struct ioatdma_device *device) ...@@ -1696,6 +1706,9 @@ void ioat_dma_remove(struct ioatdma_device *device)
struct dma_chan *chan, *_chan; struct dma_chan *chan, *_chan;
struct ioat_dma_chan *ioat_chan; struct ioat_dma_chan *ioat_chan;
if (device->version != IOAT_VER_3_0)
cancel_delayed_work(&device->work);
ioat_dma_remove_interrupts(device); ioat_dma_remove_interrupts(device);
dma_async_device_unregister(&device->common); dma_async_device_unregister(&device->common);
...@@ -1707,10 +1720,6 @@ void ioat_dma_remove(struct ioatdma_device *device) ...@@ -1707,10 +1720,6 @@ void ioat_dma_remove(struct ioatdma_device *device)
pci_release_regions(device->pdev); pci_release_regions(device->pdev);
pci_disable_device(device->pdev); pci_disable_device(device->pdev);
if (device->version != IOAT_VER_3_0) {
cancel_delayed_work(&device->work);
}
list_for_each_entry_safe(chan, _chan, list_for_each_entry_safe(chan, _chan,
&device->common.channels, device_node) { &device->common.channels, device_node) {
ioat_chan = to_ioat_chan(chan); ioat_chan = to_ioat_chan(chan);
......
/* /*
* Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved. * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include <linux/pci_ids.h> #include <linux/pci_ids.h>
#include <net/tcp.h> #include <net/tcp.h>
#define IOAT_DMA_VERSION "3.30" #define IOAT_DMA_VERSION "3.64"
enum ioat_interrupt { enum ioat_interrupt {
none = 0, none = 0,
...@@ -135,12 +135,14 @@ static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) ...@@ -135,12 +135,14 @@ static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
switch (dev->version) { switch (dev->version) {
case IOAT_VER_1_2: case IOAT_VER_1_2:
case IOAT_VER_3_0:
sysctl_tcp_dma_copybreak = 4096; sysctl_tcp_dma_copybreak = 4096;
break; break;
case IOAT_VER_2_0: case IOAT_VER_2_0:
sysctl_tcp_dma_copybreak = 2048; sysctl_tcp_dma_copybreak = 2048;
break; break;
case IOAT_VER_3_0:
sysctl_tcp_dma_copybreak = 262144;
break;
} }
#endif #endif
} }
......
/* /*
* Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved. * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
......
/* /*
* Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved. * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
......
...@@ -928,19 +928,19 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) ...@@ -928,19 +928,19 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL); xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
if (!xor_srcs[src_idx]) if (!xor_srcs[src_idx]) {
while (src_idx--) { while (src_idx--)
__free_page(xor_srcs[src_idx]); __free_page(xor_srcs[src_idx]);
return -ENOMEM; return -ENOMEM;
} }
} }
dest = alloc_page(GFP_KERNEL); dest = alloc_page(GFP_KERNEL);
if (!dest) if (!dest) {
while (src_idx--) { while (src_idx--)
__free_page(xor_srcs[src_idx]); __free_page(xor_srcs[src_idx]);
return -ENOMEM; return -ENOMEM;
} }
/* Fill in src buffers */ /* Fill in src buffers */
for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
......
...@@ -729,7 +729,7 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan, ...@@ -729,7 +729,7 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
ichan->status = IPU_CHANNEL_READY; ichan->status = IPU_CHANNEL_READY;
spin_unlock_irqrestore(ipu->lock, flags); spin_unlock_irqrestore(&ipu->lock, flags);
return 0; return 0;
} }
......
...@@ -1019,19 +1019,19 @@ mv_xor_xor_self_test(struct mv_xor_device *device) ...@@ -1019,19 +1019,19 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL); xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
if (!xor_srcs[src_idx]) if (!xor_srcs[src_idx]) {
while (src_idx--) { while (src_idx--)
__free_page(xor_srcs[src_idx]); __free_page(xor_srcs[src_idx]);
return -ENOMEM; return -ENOMEM;
} }
} }
dest = alloc_page(GFP_KERNEL); dest = alloc_page(GFP_KERNEL);
if (!dest) if (!dest) {
while (src_idx--) { while (src_idx--)
__free_page(xor_srcs[src_idx]); __free_page(xor_srcs[src_idx]);
return -ENOMEM; return -ENOMEM;
} }
/* Fill in src buffers */ /* Fill in src buffers */
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
......
...@@ -97,7 +97,6 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; ...@@ -97,7 +97,6 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
/** /**
* struct dma_chan_percpu - the per-CPU part of struct dma_chan * struct dma_chan_percpu - the per-CPU part of struct dma_chan
* @refcount: local_t used for open-coded "bigref" counting
* @memcpy_count: transaction counter * @memcpy_count: transaction counter
* @bytes_transferred: byte counter * @bytes_transferred: byte counter
*/ */
...@@ -114,9 +113,6 @@ struct dma_chan_percpu { ...@@ -114,9 +113,6 @@ struct dma_chan_percpu {
* @cookie: last cookie value returned to client * @cookie: last cookie value returned to client
* @chan_id: channel ID for sysfs * @chan_id: channel ID for sysfs
* @dev: class device for sysfs * @dev: class device for sysfs
* @refcount: kref, used in "bigref" slow-mode
* @slow_ref: indicates that the DMA channel is free
* @rcu: the DMA channel's RCU head
* @device_node: used to add this to the device chan list * @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu * @local: per-cpu pointer to a struct dma_chan_percpu
* @client-count: how many clients are using this channel * @client-count: how many clients are using this channel
...@@ -213,8 +209,6 @@ struct dma_async_tx_descriptor { ...@@ -213,8 +209,6 @@ struct dma_async_tx_descriptor {
* @global_node: list_head for global dma_device_list * @global_node: list_head for global dma_device_list
* @cap_mask: one or more dma_capability flags * @cap_mask: one or more dma_capability flags
* @max_xor: maximum number of xor sources, 0 if no capability * @max_xor: maximum number of xor sources, 0 if no capability
* @refcount: reference count
* @done: IO completion struct
* @dev_id: unique device ID * @dev_id: unique device ID
* @dev: struct device reference for dma mapping api * @dev: struct device reference for dma mapping api
* @device_alloc_chan_resources: allocate resources and return the * @device_alloc_chan_resources: allocate resources and return the
...@@ -227,6 +221,7 @@ struct dma_async_tx_descriptor { ...@@ -227,6 +221,7 @@ struct dma_async_tx_descriptor {
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation * @device_prep_slave_sg: prepares a slave dma operation
* @device_terminate_all: terminate all pending operations * @device_terminate_all: terminate all pending operations
* @device_is_tx_complete: poll for transaction completion
* @device_issue_pending: push pending transactions to hardware * @device_issue_pending: push pending transactions to hardware
*/ */
struct dma_device { struct dma_device {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment