Commit e030dbf9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'ioat-md-accel-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop

* 'ioat-md-accel-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop: (28 commits)
  ioatdma: add the unisys "i/oat" pci vendor/device id
  ARM: Add drivers/dma to arch/arm/Kconfig
  iop3xx: surface the iop3xx DMA and AAU units to the iop-adma driver
  iop13xx: surface the iop13xx adma units to the iop-adma driver
  dmaengine: driver for the iop32x, iop33x, and iop13xx raid engines
  md: remove raid5 compute_block and compute_parity5
  md: handle_stripe5 - request io processing in raid5_run_ops
  md: handle_stripe5 - add request/completion logic for async expand ops
  md: handle_stripe5 - add request/completion logic for async read ops
  md: handle_stripe5 - add request/completion logic for async check ops
  md: handle_stripe5 - add request/completion logic for async compute ops
  md: handle_stripe5 - add request/completion logic for async write ops
  md: common infrastructure for running operations with raid5_run_ops
  md: raid5_run_ops - run stripe operations outside sh->lock
  raid5: replace custom debug PRINTKs with standard pr_debug
  raid5: refactor handle_stripe5 and handle_stripe6 (v3)
  async_tx: add the async_tx api
  xor: make 'xor_blocks' a library routine for use with async_tx
  dmaengine: make clients responsible for managing channels
  dmaengine: refactor dmaengine around dma_async_tx_descriptor
  ...
parents 12a22960 3039f073
...@@ -433,6 +433,12 @@ tcp_workaround_signed_windows - BOOLEAN ...@@ -433,6 +433,12 @@ tcp_workaround_signed_windows - BOOLEAN
not receive a window scaling option from them. not receive a window scaling option from them.
Default: 0 Default: 0
tcp_dma_copybreak - INTEGER
Lower limit, in bytes, of the size of socket reads that will be
offloaded to a DMA copy engine, if one is present in the system
and CONFIG_NET_DMA is enabled.
Default: 4096
CIPSOv4 Variables: CIPSOv4 Variables:
cipso_cache_enable - BOOLEAN cipso_cache_enable - BOOLEAN
......
...@@ -1042,6 +1042,8 @@ source "drivers/mmc/Kconfig" ...@@ -1042,6 +1042,8 @@ source "drivers/mmc/Kconfig"
source "drivers/rtc/Kconfig" source "drivers/rtc/Kconfig"
source "drivers/dma/Kconfig"
endmenu endmenu
source "fs/Kconfig" source "fs/Kconfig"
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/hardware/iop_adma.h>
#define IOP13XX_UART_XTAL 33334000 #define IOP13XX_UART_XTAL 33334000
#define IOP13XX_SETUP_DEBUG 0 #define IOP13XX_SETUP_DEBUG 0
...@@ -236,19 +237,143 @@ static unsigned long iq8134x_probe_flash_size(void) ...@@ -236,19 +237,143 @@ static unsigned long iq8134x_probe_flash_size(void)
} }
#endif #endif
/* ADMA Channels */
static struct resource iop13xx_adma_0_resources[] = {
[0] = {
.start = IOP13XX_ADMA_PHYS_BASE(0),
.end = IOP13XX_ADMA_UPPER_PA(0),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_IOP13XX_ADMA0_EOT,
.end = IRQ_IOP13XX_ADMA0_EOT,
.flags = IORESOURCE_IRQ
},
[2] = {
.start = IRQ_IOP13XX_ADMA0_EOC,
.end = IRQ_IOP13XX_ADMA0_EOC,
.flags = IORESOURCE_IRQ
},
[3] = {
.start = IRQ_IOP13XX_ADMA0_ERR,
.end = IRQ_IOP13XX_ADMA0_ERR,
.flags = IORESOURCE_IRQ
}
};
static struct resource iop13xx_adma_1_resources[] = {
[0] = {
.start = IOP13XX_ADMA_PHYS_BASE(1),
.end = IOP13XX_ADMA_UPPER_PA(1),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_IOP13XX_ADMA1_EOT,
.end = IRQ_IOP13XX_ADMA1_EOT,
.flags = IORESOURCE_IRQ
},
[2] = {
.start = IRQ_IOP13XX_ADMA1_EOC,
.end = IRQ_IOP13XX_ADMA1_EOC,
.flags = IORESOURCE_IRQ
},
[3] = {
.start = IRQ_IOP13XX_ADMA1_ERR,
.end = IRQ_IOP13XX_ADMA1_ERR,
.flags = IORESOURCE_IRQ
}
};
static struct resource iop13xx_adma_2_resources[] = {
[0] = {
.start = IOP13XX_ADMA_PHYS_BASE(2),
.end = IOP13XX_ADMA_UPPER_PA(2),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_IOP13XX_ADMA2_EOT,
.end = IRQ_IOP13XX_ADMA2_EOT,
.flags = IORESOURCE_IRQ
},
[2] = {
.start = IRQ_IOP13XX_ADMA2_EOC,
.end = IRQ_IOP13XX_ADMA2_EOC,
.flags = IORESOURCE_IRQ
},
[3] = {
.start = IRQ_IOP13XX_ADMA2_ERR,
.end = IRQ_IOP13XX_ADMA2_ERR,
.flags = IORESOURCE_IRQ
}
};
static u64 iop13xx_adma_dmamask = DMA_64BIT_MASK;
static struct iop_adma_platform_data iop13xx_adma_0_data = {
.hw_id = 0,
.pool_size = PAGE_SIZE,
};
static struct iop_adma_platform_data iop13xx_adma_1_data = {
.hw_id = 1,
.pool_size = PAGE_SIZE,
};
static struct iop_adma_platform_data iop13xx_adma_2_data = {
.hw_id = 2,
.pool_size = PAGE_SIZE,
};
/* The ids are fixed up later in iop13xx_platform_init */
static struct platform_device iop13xx_adma_0_channel = {
.name = "iop-adma",
.id = 0,
.num_resources = 4,
.resource = iop13xx_adma_0_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.platform_data = (void *) &iop13xx_adma_0_data,
},
};
static struct platform_device iop13xx_adma_1_channel = {
.name = "iop-adma",
.id = 0,
.num_resources = 4,
.resource = iop13xx_adma_1_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.platform_data = (void *) &iop13xx_adma_1_data,
},
};
static struct platform_device iop13xx_adma_2_channel = {
.name = "iop-adma",
.id = 0,
.num_resources = 4,
.resource = iop13xx_adma_2_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.platform_data = (void *) &iop13xx_adma_2_data,
},
};
void __init iop13xx_map_io(void) void __init iop13xx_map_io(void)
{ {
/* Initialize the Static Page Table maps */ /* Initialize the Static Page Table maps */
iotable_init(iop13xx_std_desc, ARRAY_SIZE(iop13xx_std_desc)); iotable_init(iop13xx_std_desc, ARRAY_SIZE(iop13xx_std_desc));
} }
static int init_uart = 0; static int init_uart;
static int init_i2c = 0; static int init_i2c;
static int init_adma;
void __init iop13xx_platform_init(void) void __init iop13xx_platform_init(void)
{ {
int i; int i;
u32 uart_idx, i2c_idx, plat_idx; u32 uart_idx, i2c_idx, adma_idx, plat_idx;
struct platform_device *iop13xx_devices[IQ81340_MAX_PLAT_DEVICES]; struct platform_device *iop13xx_devices[IQ81340_MAX_PLAT_DEVICES];
/* set the bases so we can read the device id */ /* set the bases so we can read the device id */
...@@ -294,6 +419,12 @@ void __init iop13xx_platform_init(void) ...@@ -294,6 +419,12 @@ void __init iop13xx_platform_init(void)
} }
} }
if (init_adma == IOP13XX_INIT_ADMA_DEFAULT) {
init_adma |= IOP13XX_INIT_ADMA_0;
init_adma |= IOP13XX_INIT_ADMA_1;
init_adma |= IOP13XX_INIT_ADMA_2;
}
plat_idx = 0; plat_idx = 0;
uart_idx = 0; uart_idx = 0;
i2c_idx = 0; i2c_idx = 0;
...@@ -332,6 +463,56 @@ void __init iop13xx_platform_init(void) ...@@ -332,6 +463,56 @@ void __init iop13xx_platform_init(void)
} }
} }
/* initialize adma channel ids and capabilities */
adma_idx = 0;
for (i = 0; i < IQ81340_NUM_ADMA; i++) {
struct iop_adma_platform_data *plat_data;
if ((init_adma & (1 << i)) && IOP13XX_SETUP_DEBUG)
printk(KERN_INFO
"Adding adma%d to platform device list\n", i);
switch (init_adma & (1 << i)) {
case IOP13XX_INIT_ADMA_0:
iop13xx_adma_0_channel.id = adma_idx++;
iop13xx_devices[plat_idx++] = &iop13xx_adma_0_channel;
plat_data = &iop13xx_adma_0_data;
dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
dma_cap_set(DMA_XOR, plat_data->cap_mask);
dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask);
dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask);
dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
break;
case IOP13XX_INIT_ADMA_1:
iop13xx_adma_1_channel.id = adma_idx++;
iop13xx_devices[plat_idx++] = &iop13xx_adma_1_channel;
plat_data = &iop13xx_adma_1_data;
dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
dma_cap_set(DMA_XOR, plat_data->cap_mask);
dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask);
dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask);
dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
break;
case IOP13XX_INIT_ADMA_2:
iop13xx_adma_2_channel.id = adma_idx++;
iop13xx_devices[plat_idx++] = &iop13xx_adma_2_channel;
plat_data = &iop13xx_adma_2_data;
dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
dma_cap_set(DMA_XOR, plat_data->cap_mask);
dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask);
dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask);
dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
dma_cap_set(DMA_PQ_XOR, plat_data->cap_mask);
dma_cap_set(DMA_PQ_UPDATE, plat_data->cap_mask);
dma_cap_set(DMA_PQ_ZERO_SUM, plat_data->cap_mask);
break;
}
}
#ifdef CONFIG_MTD_PHYSMAP #ifdef CONFIG_MTD_PHYSMAP
iq8134x_flash_resource.end = iq8134x_flash_resource.start + iq8134x_flash_resource.end = iq8134x_flash_resource.start +
iq8134x_probe_flash_size() - 1; iq8134x_probe_flash_size() - 1;
...@@ -399,5 +580,35 @@ static int __init iop13xx_init_i2c_setup(char *str) ...@@ -399,5 +580,35 @@ static int __init iop13xx_init_i2c_setup(char *str)
return 1; return 1;
} }
static int __init iop13xx_init_adma_setup(char *str)
{
if (str) {
while (*str != '\0') {
switch (*str) {
case '0':
init_adma |= IOP13XX_INIT_ADMA_0;
break;
case '1':
init_adma |= IOP13XX_INIT_ADMA_1;
break;
case '2':
init_adma |= IOP13XX_INIT_ADMA_2;
break;
case ',':
case '=':
break;
default:
PRINTK("\"iop13xx_init_adma\" malformed"
" at character: \'%c\'", *str);
*(str + 1) = '\0';
init_adma = IOP13XX_INIT_ADMA_DEFAULT;
}
str++;
}
}
return 1;
}
__setup("iop13xx_init_adma", iop13xx_init_adma_setup);
__setup("iop13xx_init_uart", iop13xx_init_uart_setup); __setup("iop13xx_init_uart", iop13xx_init_uart_setup);
__setup("iop13xx_init_i2c", iop13xx_init_i2c_setup); __setup("iop13xx_init_i2c", iop13xx_init_i2c_setup);
...@@ -180,6 +180,8 @@ static void __init glantank_init_machine(void) ...@@ -180,6 +180,8 @@ static void __init glantank_init_machine(void)
platform_device_register(&iop3xx_i2c1_device); platform_device_register(&iop3xx_i2c1_device);
platform_device_register(&glantank_flash_device); platform_device_register(&glantank_flash_device);
platform_device_register(&glantank_serial_device); platform_device_register(&glantank_serial_device);
platform_device_register(&iop3xx_dma_0_channel);
platform_device_register(&iop3xx_dma_1_channel);
pm_power_off = glantank_power_off; pm_power_off = glantank_power_off;
} }
......
...@@ -298,9 +298,14 @@ static void __init iq31244_init_machine(void) ...@@ -298,9 +298,14 @@ static void __init iq31244_init_machine(void)
platform_device_register(&iop3xx_i2c1_device); platform_device_register(&iop3xx_i2c1_device);
platform_device_register(&iq31244_flash_device); platform_device_register(&iq31244_flash_device);
platform_device_register(&iq31244_serial_device); platform_device_register(&iq31244_serial_device);
platform_device_register(&iop3xx_dma_0_channel);
platform_device_register(&iop3xx_dma_1_channel);
if (is_ep80219()) if (is_ep80219())
pm_power_off = ep80219_power_off; pm_power_off = ep80219_power_off;
if (!is_80219())
platform_device_register(&iop3xx_aau_channel);
} }
static int __init force_ep80219_setup(char *str) static int __init force_ep80219_setup(char *str)
......
...@@ -181,6 +181,9 @@ static void __init iq80321_init_machine(void) ...@@ -181,6 +181,9 @@ static void __init iq80321_init_machine(void)
platform_device_register(&iop3xx_i2c1_device); platform_device_register(&iop3xx_i2c1_device);
platform_device_register(&iq80321_flash_device); platform_device_register(&iq80321_flash_device);
platform_device_register(&iq80321_serial_device); platform_device_register(&iq80321_serial_device);
platform_device_register(&iop3xx_dma_0_channel);
platform_device_register(&iop3xx_dma_1_channel);
platform_device_register(&iop3xx_aau_channel);
} }
MACHINE_START(IQ80321, "Intel IQ80321") MACHINE_START(IQ80321, "Intel IQ80321")
......
...@@ -245,6 +245,8 @@ static void __init n2100_init_machine(void) ...@@ -245,6 +245,8 @@ static void __init n2100_init_machine(void)
platform_device_register(&iop3xx_i2c0_device); platform_device_register(&iop3xx_i2c0_device);
platform_device_register(&n2100_flash_device); platform_device_register(&n2100_flash_device);
platform_device_register(&n2100_serial_device); platform_device_register(&n2100_serial_device);
platform_device_register(&iop3xx_dma_0_channel);
platform_device_register(&iop3xx_dma_1_channel);
pm_power_off = n2100_power_off; pm_power_off = n2100_power_off;
......
...@@ -136,6 +136,9 @@ static void __init iq80331_init_machine(void) ...@@ -136,6 +136,9 @@ static void __init iq80331_init_machine(void)
platform_device_register(&iop33x_uart0_device); platform_device_register(&iop33x_uart0_device);
platform_device_register(&iop33x_uart1_device); platform_device_register(&iop33x_uart1_device);
platform_device_register(&iq80331_flash_device); platform_device_register(&iq80331_flash_device);
platform_device_register(&iop3xx_dma_0_channel);
platform_device_register(&iop3xx_dma_1_channel);
platform_device_register(&iop3xx_aau_channel);
} }
MACHINE_START(IQ80331, "Intel IQ80331") MACHINE_START(IQ80331, "Intel IQ80331")
......
...@@ -136,6 +136,9 @@ static void __init iq80332_init_machine(void) ...@@ -136,6 +136,9 @@ static void __init iq80332_init_machine(void)
platform_device_register(&iop33x_uart0_device); platform_device_register(&iop33x_uart0_device);
platform_device_register(&iop33x_uart1_device); platform_device_register(&iop33x_uart1_device);
platform_device_register(&iq80332_flash_device); platform_device_register(&iq80332_flash_device);
platform_device_register(&iop3xx_dma_0_channel);
platform_device_register(&iop3xx_dma_1_channel);
platform_device_register(&iop3xx_aau_channel);
} }
MACHINE_START(IQ80332, "Intel IQ80332") MACHINE_START(IQ80332, "Intel IQ80332")
......
...@@ -12,6 +12,7 @@ obj-$(CONFIG_ARCH_IOP32X) += setup.o ...@@ -12,6 +12,7 @@ obj-$(CONFIG_ARCH_IOP32X) += setup.o
obj-$(CONFIG_ARCH_IOP32X) += time.o obj-$(CONFIG_ARCH_IOP32X) += time.o
obj-$(CONFIG_ARCH_IOP32X) += io.o obj-$(CONFIG_ARCH_IOP32X) += io.o
obj-$(CONFIG_ARCH_IOP32X) += cp6.o obj-$(CONFIG_ARCH_IOP32X) += cp6.o
obj-$(CONFIG_ARCH_IOP32X) += adma.o
# IOP33X # IOP33X
obj-$(CONFIG_ARCH_IOP33X) += gpio.o obj-$(CONFIG_ARCH_IOP33X) += gpio.o
...@@ -21,6 +22,7 @@ obj-$(CONFIG_ARCH_IOP33X) += setup.o ...@@ -21,6 +22,7 @@ obj-$(CONFIG_ARCH_IOP33X) += setup.o
obj-$(CONFIG_ARCH_IOP33X) += time.o obj-$(CONFIG_ARCH_IOP33X) += time.o
obj-$(CONFIG_ARCH_IOP33X) += io.o obj-$(CONFIG_ARCH_IOP33X) += io.o
obj-$(CONFIG_ARCH_IOP33X) += cp6.o obj-$(CONFIG_ARCH_IOP33X) += cp6.o
obj-$(CONFIG_ARCH_IOP33X) += adma.o
# IOP13XX # IOP13XX
obj-$(CONFIG_ARCH_IOP13XX) += cp6.o obj-$(CONFIG_ARCH_IOP13XX) += cp6.o
......
/*
* platform device definitions for the iop3xx dma/xor engines
* Copyright © 2006, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/platform_device.h>
#include <asm/hardware/iop3xx.h>
#include <linux/dma-mapping.h>
#include <asm/arch/adma.h>
#include <asm/hardware/iop_adma.h>
#ifdef CONFIG_ARCH_IOP32X
#define IRQ_DMA0_EOT IRQ_IOP32X_DMA0_EOT
#define IRQ_DMA0_EOC IRQ_IOP32X_DMA0_EOC
#define IRQ_DMA0_ERR IRQ_IOP32X_DMA0_ERR
#define IRQ_DMA1_EOT IRQ_IOP32X_DMA1_EOT
#define IRQ_DMA1_EOC IRQ_IOP32X_DMA1_EOC
#define IRQ_DMA1_ERR IRQ_IOP32X_DMA1_ERR
#define IRQ_AA_EOT IRQ_IOP32X_AA_EOT
#define IRQ_AA_EOC IRQ_IOP32X_AA_EOC
#define IRQ_AA_ERR IRQ_IOP32X_AA_ERR
#endif
#ifdef CONFIG_ARCH_IOP33X
#define IRQ_DMA0_EOT IRQ_IOP33X_DMA0_EOT
#define IRQ_DMA0_EOC IRQ_IOP33X_DMA0_EOC
#define IRQ_DMA0_ERR IRQ_IOP33X_DMA0_ERR
#define IRQ_DMA1_EOT IRQ_IOP33X_DMA1_EOT
#define IRQ_DMA1_EOC IRQ_IOP33X_DMA1_EOC
#define IRQ_DMA1_ERR IRQ_IOP33X_DMA1_ERR
#define IRQ_AA_EOT IRQ_IOP33X_AA_EOT
#define IRQ_AA_EOC IRQ_IOP33X_AA_EOC
#define IRQ_AA_ERR IRQ_IOP33X_AA_ERR
#endif
/* AAU and DMA Channels */
static struct resource iop3xx_dma_0_resources[] = {
[0] = {
.start = IOP3XX_DMA_PHYS_BASE(0),
.end = IOP3XX_DMA_UPPER_PA(0),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_DMA0_EOT,
.end = IRQ_DMA0_EOT,
.flags = IORESOURCE_IRQ
},
[2] = {
.start = IRQ_DMA0_EOC,
.end = IRQ_DMA0_EOC,
.flags = IORESOURCE_IRQ
},
[3] = {
.start = IRQ_DMA0_ERR,
.end = IRQ_DMA0_ERR,
.flags = IORESOURCE_IRQ
}
};
static struct resource iop3xx_dma_1_resources[] = {
[0] = {
.start = IOP3XX_DMA_PHYS_BASE(1),
.end = IOP3XX_DMA_UPPER_PA(1),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_DMA1_EOT,
.end = IRQ_DMA1_EOT,
.flags = IORESOURCE_IRQ
},
[2] = {
.start = IRQ_DMA1_EOC,
.end = IRQ_DMA1_EOC,
.flags = IORESOURCE_IRQ
},
[3] = {
.start = IRQ_DMA1_ERR,
.end = IRQ_DMA1_ERR,
.flags = IORESOURCE_IRQ
}
};
static struct resource iop3xx_aau_resources[] = {
[0] = {
.start = IOP3XX_AAU_PHYS_BASE,
.end = IOP3XX_AAU_UPPER_PA,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_AA_EOT,
.end = IRQ_AA_EOT,
.flags = IORESOURCE_IRQ
},
[2] = {
.start = IRQ_AA_EOC,
.end = IRQ_AA_EOC,
.flags = IORESOURCE_IRQ
},
[3] = {
.start = IRQ_AA_ERR,
.end = IRQ_AA_ERR,
.flags = IORESOURCE_IRQ
}
};
static u64 iop3xx_adma_dmamask = DMA_32BIT_MASK;
static struct iop_adma_platform_data iop3xx_dma_0_data = {
.hw_id = DMA0_ID,
.pool_size = PAGE_SIZE,
};
static struct iop_adma_platform_data iop3xx_dma_1_data = {
.hw_id = DMA1_ID,
.pool_size = PAGE_SIZE,
};
static struct iop_adma_platform_data iop3xx_aau_data = {
.hw_id = AAU_ID,
.pool_size = 3 * PAGE_SIZE,
};
struct platform_device iop3xx_dma_0_channel = {
.name = "iop-adma",
.id = 0,
.num_resources = 4,
.resource = iop3xx_dma_0_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.platform_data = (void *) &iop3xx_dma_0_data,
},
};
struct platform_device iop3xx_dma_1_channel = {
.name = "iop-adma",
.id = 1,
.num_resources = 4,
.resource = iop3xx_dma_1_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.platform_data = (void *) &iop3xx_dma_1_data,
},
};
struct platform_device iop3xx_aau_channel = {
.name = "iop-adma",
.id = 2,
.num_resources = 4,
.resource = iop3xx_aau_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.platform_data = (void *) &iop3xx_aau_data,
},
};
static int __init iop3xx_adma_cap_init(void)
{
#ifdef CONFIG_ARCH_IOP32X /* the 32x DMA does not perform CRC32C */
dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask);
dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask);
#else
dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask);
dma_cap_set(DMA_MEMCPY_CRC32C, iop3xx_dma_0_data.cap_mask);
dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask);
#endif
#ifdef CONFIG_ARCH_IOP32X /* the 32x DMA does not perform CRC32C */
dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask);
dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask);
#else
dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask);
dma_cap_set(DMA_MEMCPY_CRC32C, iop3xx_dma_1_data.cap_mask);
dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask);
#endif
#ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */
dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
#else
dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
dma_cap_set(DMA_ZERO_SUM, iop3xx_aau_data.cap_mask);
dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
#endif
return 0;
}
arch_initcall(iop3xx_adma_cap_init);
# #
# Cryptographic API Configuration # Generic algorithms support
#
config XOR_BLOCKS
tristate
# #
# async_tx api: hardware offloaded memory transfer/transform support
#
source "crypto/async_tx/Kconfig"
#
# Cryptographic API Configuration
#
menu "Cryptographic options" menu "Cryptographic options"
config CRYPTO config CRYPTO
......
...@@ -50,3 +50,9 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o ...@@ -50,3 +50,9 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
#
# generic algorithms and the async_tx api
#
obj-$(CONFIG_XOR_BLOCKS) += xor.o
obj-$(CONFIG_ASYNC_CORE) += async_tx/
config ASYNC_CORE
tristate
config ASYNC_MEMCPY
tristate
select ASYNC_CORE
config ASYNC_XOR
tristate
select ASYNC_CORE
select XOR_BLOCKS
config ASYNC_MEMSET
tristate
select ASYNC_CORE
obj-$(CONFIG_ASYNC_CORE) += async_tx.o
obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o
obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o
obj-$(CONFIG_ASYNC_XOR) += async_xor.o
/*
* copy offload engine support
*
* Copyright © 2006, Intel Corporation.
*
* Dan Williams <dan.j.williams@intel.com>
*
* with architecture considerations by:
* Neil Brown <neilb@suse.de>
* Jeff Garzik <jeff@garzik.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/kernel.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/async_tx.h>
/**
* async_memcpy - attempt to copy memory with a dma engine.
* @dest: destination page
* @src: src page
* @offset: offset in pages to start transaction
* @len: length in bytes
* @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
* ASYNC_TX_KMAP_SRC, ASYNC_TX_KMAP_DST
* @depend_tx: memcpy depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine
*/
struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
unsigned int src_offset, size_t len, enum async_tx_flags flags,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY);
struct dma_device *device = chan ? chan->device : NULL;
int int_en = cb_fn ? 1 : 0;
struct dma_async_tx_descriptor *tx = device ?
device->device_prep_dma_memcpy(chan, len,
int_en) : NULL;
if (tx) { /* run the memcpy asynchronously */
dma_addr_t addr;
enum dma_data_direction dir;
pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
DMA_NONE : DMA_FROM_DEVICE;
addr = dma_map_page(device->dev, dest, dest_offset, len, dir);
tx->tx_set_dest(addr, tx, 0);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
DMA_NONE : DMA_TO_DEVICE;
addr = dma_map_page(device->dev, src, src_offset, len, dir);
tx->tx_set_src(addr, tx, 0);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
} else { /* run the memcpy synchronously */
void *dest_buf, *src_buf;
pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len);
/* wait for any prerequisite operations */
if (depend_tx) {
/* if ack is already set then we cannot be sure
* we are referring to the correct operation
*/
BUG_ON(depend_tx->ack);
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
panic("%s: DMA_ERROR waiting for depend_tx\n",
__FUNCTION__);
}
if (flags & ASYNC_TX_KMAP_DST)
dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
else
dest_buf = page_address(dest) + dest_offset;
if (flags & ASYNC_TX_KMAP_SRC)
src_buf = kmap_atomic(src, KM_USER0) + src_offset;
else
src_buf = page_address(src) + src_offset;
memcpy(dest_buf, src_buf, len);
if (flags & ASYNC_TX_KMAP_DST)
kunmap_atomic(dest_buf, KM_USER0);
if (flags & ASYNC_TX_KMAP_SRC)
kunmap_atomic(src_buf, KM_USER0);
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_memcpy);
static int __init async_memcpy_init(void)
{
return 0;
}
static void __exit async_memcpy_exit(void)
{
do { } while (0);
}
module_init(async_memcpy_init);
module_exit(async_memcpy_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous memcpy api");
MODULE_LICENSE("GPL");
/*
* memory fill offload engine support
*
* Copyright © 2006, Intel Corporation.
*
* Dan Williams <dan.j.williams@intel.com>
*
* with architecture considerations by:
* Neil Brown <neilb@suse.de>
* Jeff Garzik <jeff@garzik.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/async_tx.h>
/**
* async_memset - attempt to fill memory with a dma engine.
* @dest: destination page
* @val: fill value
* @offset: offset in pages to start transaction
* @len: length in bytes
* @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: memset depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine
*/
struct dma_async_tx_descriptor *
async_memset(struct page *dest, int val, unsigned int offset,
size_t len, enum async_tx_flags flags,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET);
struct dma_device *device = chan ? chan->device : NULL;
int int_en = cb_fn ? 1 : 0;
struct dma_async_tx_descriptor *tx = device ?
device->device_prep_dma_memset(chan, val, len,
int_en) : NULL;
if (tx) { /* run the memset asynchronously */
dma_addr_t dma_addr;
enum dma_data_direction dir;
pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
DMA_NONE : DMA_FROM_DEVICE;
dma_addr = dma_map_page(device->dev, dest, offset, len, dir);
tx->tx_set_dest(dma_addr, tx, 0);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
} else { /* run the memset synchronously */
void *dest_buf;
pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len);
dest_buf = (void *) (((char *) page_address(dest)) + offset);
/* wait for any prerequisite operations */
if (depend_tx) {
/* if ack is already set then we cannot be sure
* we are referring to the correct operation
*/
BUG_ON(depend_tx->ack);
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
panic("%s: DMA_ERROR waiting for depend_tx\n",
__FUNCTION__);
}
memset(dest_buf, val, len);
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_memset);
static int __init async_memset_init(void)
{
return 0;
}
static void __exit async_memset_exit(void)
{
do { } while (0);
}
module_init(async_memset_init);
module_exit(async_memset_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous memset api");
MODULE_LICENSE("GPL");
This diff is collapsed.
/*
* xor offload engine api
*
* Copyright © 2006, Intel Corporation.
*
* Dan Williams <dan.j.williams@intel.com>
*
* with architecture considerations by:
* Neil Brown <neilb@suse.de>
* Jeff Garzik <jeff@garzik.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/raid/xor.h>
#include <linux/async_tx.h>
static void
do_async_xor(struct dma_async_tx_descriptor *tx, struct dma_device *device,
struct dma_chan *chan, struct page *dest, struct page **src_list,
unsigned int offset, unsigned int src_cnt, size_t len,
enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
dma_addr_t dma_addr;
enum dma_data_direction dir;
int i;
pr_debug("%s: len: %zu\n", __FUNCTION__, len);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
DMA_NONE : DMA_FROM_DEVICE;
dma_addr = dma_map_page(device->dev, dest, offset, len, dir);
tx->tx_set_dest(dma_addr, tx, 0);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
DMA_NONE : DMA_TO_DEVICE;
for (i = 0; i < src_cnt; i++) {
dma_addr = dma_map_page(device->dev, src_list[i],
offset, len, dir);
tx->tx_set_src(dma_addr, tx, i);
}
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
}
static void
do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
unsigned int src_cnt, size_t len, enum async_tx_flags flags,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
void *_dest;
int i;
pr_debug("%s: len: %zu\n", __FUNCTION__, len);
/* reuse the 'src_list' array to convert to buffer pointers */
for (i = 0; i < src_cnt; i++)
src_list[i] = (struct page *)
(page_address(src_list[i]) + offset);
/* set destination address */
_dest = page_address(dest) + offset;
if (flags & ASYNC_TX_XOR_ZERO_DST)
memset(_dest, 0, len);
xor_blocks(src_cnt, len, _dest,
(void **) src_list);
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
}
/**
* async_xor - attempt to xor a set of blocks with a dma engine.
* xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
* flag must be set to not include dest data in the calculation. The
* assumption with dma eninges is that they only use the destination
* buffer as a source when it is explicity specified in the source list.
* @dest: destination page
* @src_list: array of source pages (if the dest is also a source it must be
* at index zero). The contents of this array may be overwritten.
* @offset: offset in pages to start transaction
* @src_cnt: number of source pages
* @len: length in bytes
* @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
* ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: xor depends on the result of this transaction.
* @cb_fn: function to call when the xor completes
* @cb_param: parameter to pass to the callback routine
*/
struct dma_async_tx_descriptor *
async_xor(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, enum async_tx_flags flags,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
dma_async_tx_callback _cb_fn;
void *_cb_param;
unsigned long local_flags;
int xor_src_cnt;
int i = 0, src_off = 0, int_en;
BUG_ON(src_cnt <= 1);
while (src_cnt) {
local_flags = flags;
if (device) { /* run the xor asynchronously */
xor_src_cnt = min(src_cnt, device->max_xor);
/* if we are submitting additional xors
* only set the callback on the last transaction
*/
if (src_cnt > xor_src_cnt) {
local_flags &= ~ASYNC_TX_ACK;
_cb_fn = NULL;
_cb_param = NULL;
} else {
_cb_fn = cb_fn;
_cb_param = cb_param;
}
int_en = _cb_fn ? 1 : 0;
tx = device->device_prep_dma_xor(
chan, xor_src_cnt, len, int_en);
if (tx) {
do_async_xor(tx, device, chan, dest,
&src_list[src_off], offset, xor_src_cnt, len,
local_flags, depend_tx, _cb_fn,
_cb_param);
} else /* fall through */
goto xor_sync;
} else { /* run the xor synchronously */
xor_sync:
/* in the sync case the dest is an implied source
* (assumes the dest is at the src_off index)
*/
if (flags & ASYNC_TX_XOR_DROP_DST) {
src_cnt--;
src_off++;
}
/* process up to 'MAX_XOR_BLOCKS' sources */
xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
/* if we are submitting additional xors
* only set the callback on the last transaction
*/
if (src_cnt > xor_src_cnt) {
local_flags &= ~ASYNC_TX_ACK;
_cb_fn = NULL;
_cb_param = NULL;
} else {
_cb_fn = cb_fn;
_cb_param = cb_param;
}
/* wait for any prerequisite operations */
if (depend_tx) {
/* if ack is already set then we cannot be sure
* we are referring to the correct operation
*/
BUG_ON(depend_tx->ack);
if (dma_wait_for_async_tx(depend_tx) ==
DMA_ERROR)
panic("%s: DMA_ERROR waiting for "
"depend_tx\n",
__FUNCTION__);
}
do_sync_xor(dest, &src_list[src_off], offset,
xor_src_cnt, len, local_flags, depend_tx,
_cb_fn, _cb_param);
}
/* the previous tx is hidden from the client,
* so ack it
*/
if (i && depend_tx)
async_tx_ack(depend_tx);
depend_tx = tx;
if (src_cnt > xor_src_cnt) {
/* drop completed sources */
src_cnt -= xor_src_cnt;
src_off += xor_src_cnt;
/* unconditionally preserve the destination */
flags &= ~ASYNC_TX_XOR_ZERO_DST;
/* use the intermediate result a source, but remember
* it's dropped, because it's implied, in the sync case
*/
src_list[--src_off] = dest;
src_cnt++;
flags |= ASYNC_TX_XOR_DROP_DST;
} else
src_cnt = 0;
i++;
}
return tx;
}
EXPORT_SYMBOL_GPL(async_xor);
static int page_is_zero(struct page *p, unsigned int offset, size_t len)
{
char *a = page_address(p) + offset;
return ((*(u32 *) a) == 0 &&
memcmp(a, a + 4, len - 4) == 0);
}
/**
* async_xor_zero_sum - attempt a xor parity check with a dma engine.
* @dest: destination page used if the xor is performed synchronously
* @src_list: array of source pages. The dest page must be listed as a source
* at index zero. The contents of this array may be overwritten.
* @offset: offset in pages to start transaction
* @src_cnt: number of source pages
* @len: length in bytes
* @result: 0 if sum == 0 else non-zero
* @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: xor depends on the result of this transaction.
* @cb_fn: function to call when the xor completes
* @cb_param: parameter to pass to the callback routine
*/
struct dma_async_tx_descriptor *
async_xor_zero_sum(struct page *dest, struct page **src_list,
unsigned int offset, int src_cnt, size_t len,
u32 *result, enum async_tx_flags flags,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM);
struct dma_device *device = chan ? chan->device : NULL;
int int_en = cb_fn ? 1 : 0;
struct dma_async_tx_descriptor *tx = device ?
device->device_prep_dma_zero_sum(chan, src_cnt, len, result,
int_en) : NULL;
int i;
BUG_ON(src_cnt <= 1);
if (tx) {
dma_addr_t dma_addr;
enum dma_data_direction dir;
pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
DMA_NONE : DMA_TO_DEVICE;
for (i = 0; i < src_cnt; i++) {
dma_addr = dma_map_page(device->dev, src_list[i],
offset, len, dir);
tx->tx_set_src(dma_addr, tx, i);
}
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
} else {
unsigned long xor_flags = flags;
pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len);
xor_flags |= ASYNC_TX_XOR_DROP_DST;
xor_flags &= ~ASYNC_TX_ACK;
tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
depend_tx, NULL, NULL);
if (tx) {
if (dma_wait_for_async_tx(tx) == DMA_ERROR)
panic("%s: DMA_ERROR waiting for tx\n",
__FUNCTION__);
async_tx_ack(tx);
}
*result = page_is_zero(dest, offset, len) ? 0 : 1;
tx = NULL;
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_xor_zero_sum);
static int __init async_xor_init(void)
{
return 0;
}
static void __exit async_xor_exit(void)
{
do { } while (0);
}
module_init(async_xor_init);
module_exit(async_xor_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
MODULE_LICENSE("GPL");
...@@ -26,32 +26,32 @@ ...@@ -26,32 +26,32 @@
static struct xor_block_template *active_template; static struct xor_block_template *active_template;
void void
xor_block(unsigned int count, unsigned int bytes, void **ptr) xor_blocks(unsigned int src_count, unsigned int bytes, void *dest, void **srcs)
{ {
unsigned long *p0, *p1, *p2, *p3, *p4; unsigned long *p1, *p2, *p3, *p4;
p0 = (unsigned long *) ptr[0]; p1 = (unsigned long *) srcs[0];
p1 = (unsigned long *) ptr[1]; if (src_count == 1) {
if (count == 2) { active_template->do_2(bytes, dest, p1);
active_template->do_2(bytes, p0, p1);
return; return;
} }
p2 = (unsigned long *) ptr[2]; p2 = (unsigned long *) srcs[1];
if (count == 3) { if (src_count == 2) {
active_template->do_3(bytes, p0, p1, p2); active_template->do_3(bytes, dest, p1, p2);
return; return;
} }
p3 = (unsigned long *) ptr[3]; p3 = (unsigned long *) srcs[2];
if (count == 4) { if (src_count == 3) {
active_template->do_4(bytes, p0, p1, p2, p3); active_template->do_4(bytes, dest, p1, p2, p3);
return; return;
} }
p4 = (unsigned long *) ptr[4]; p4 = (unsigned long *) srcs[3];
active_template->do_5(bytes, p0, p1, p2, p3, p4); active_template->do_5(bytes, dest, p1, p2, p3, p4);
} }
EXPORT_SYMBOL(xor_blocks);
/* Set of all registered templates. */ /* Set of all registered templates. */
static struct xor_block_template *template_list; static struct xor_block_template *template_list;
...@@ -78,7 +78,7 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) ...@@ -78,7 +78,7 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
now = jiffies; now = jiffies;
count = 0; count = 0;
while (jiffies == now) { while (jiffies == now) {
mb(); mb(); /* prevent loop optimzation */
tmpl->do_2(BENCH_SIZE, b1, b2); tmpl->do_2(BENCH_SIZE, b1, b2);
mb(); mb();
count++; count++;
...@@ -91,26 +91,26 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) ...@@ -91,26 +91,26 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
speed = max * (HZ * BENCH_SIZE / 1024); speed = max * (HZ * BENCH_SIZE / 1024);
tmpl->speed = speed; tmpl->speed = speed;
printk(" %-10s: %5d.%03d MB/sec\n", tmpl->name, printk(KERN_INFO " %-10s: %5d.%03d MB/sec\n", tmpl->name,
speed / 1000, speed % 1000); speed / 1000, speed % 1000);
} }
static int static int __init
calibrate_xor_block(void) calibrate_xor_blocks(void)
{ {
void *b1, *b2; void *b1, *b2;
struct xor_block_template *f, *fastest; struct xor_block_template *f, *fastest;
b1 = (void *) __get_free_pages(GFP_KERNEL, 2); b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
if (! b1) { if (!b1) {
printk("raid5: Yikes! No memory available.\n"); printk(KERN_WARNING "xor: Yikes! No memory available.\n");
return -ENOMEM; return -ENOMEM;
} }
b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE; b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE;
/* /*
* If this arch/cpu has a short-circuited selection, don't loop through all * If this arch/cpu has a short-circuited selection, don't loop through
* the possible functions, just test the best one * all the possible functions, just test the best one
*/ */
fastest = NULL; fastest = NULL;
...@@ -122,11 +122,12 @@ calibrate_xor_block(void) ...@@ -122,11 +122,12 @@ calibrate_xor_block(void)
#define xor_speed(templ) do_xor_speed((templ), b1, b2) #define xor_speed(templ) do_xor_speed((templ), b1, b2)
if (fastest) { if (fastest) {
printk(KERN_INFO "raid5: automatically using best checksumming function: %s\n", printk(KERN_INFO "xor: automatically using best "
"checksumming function: %s\n",
fastest->name); fastest->name);
xor_speed(fastest); xor_speed(fastest);
} else { } else {
printk(KERN_INFO "raid5: measuring checksumming speed\n"); printk(KERN_INFO "xor: measuring software checksum speed\n");
XOR_TRY_TEMPLATES; XOR_TRY_TEMPLATES;
fastest = template_list; fastest = template_list;
for (f = fastest; f; f = f->next) for (f = fastest; f; f = f->next)
...@@ -134,7 +135,7 @@ calibrate_xor_block(void) ...@@ -134,7 +135,7 @@ calibrate_xor_block(void)
fastest = f; fastest = f;
} }
printk("raid5: using function: %s (%d.%03d MB/sec)\n", printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n",
fastest->name, fastest->speed / 1000, fastest->speed % 1000); fastest->name, fastest->speed / 1000, fastest->speed % 1000);
#undef xor_speed #undef xor_speed
...@@ -147,8 +148,8 @@ calibrate_xor_block(void) ...@@ -147,8 +148,8 @@ calibrate_xor_block(void)
static __exit void xor_exit(void) { } static __exit void xor_exit(void) { }
EXPORT_SYMBOL(xor_block);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
module_init(calibrate_xor_block); /* when built-in xor.o must initialize before drivers/md/md.o */
core_initcall(calibrate_xor_blocks);
module_exit(xor_exit); module_exit(xor_exit);
...@@ -8,8 +8,8 @@ menu "DMA Engine support" ...@@ -8,8 +8,8 @@ menu "DMA Engine support"
config DMA_ENGINE config DMA_ENGINE
bool "Support for DMA engines" bool "Support for DMA engines"
---help--- ---help---
DMA engines offload copy operations from the CPU to dedicated DMA engines offload bulk memory operations from the CPU to dedicated
hardware, allowing the copies to happen asynchronously. hardware, allowing the operations to happen asynchronously.
comment "DMA Clients" comment "DMA Clients"
...@@ -32,4 +32,12 @@ config INTEL_IOATDMA ...@@ -32,4 +32,12 @@ config INTEL_IOATDMA
---help--- ---help---
Enable support for the Intel(R) I/OAT DMA engine. Enable support for the Intel(R) I/OAT DMA engine.
config INTEL_IOP_ADMA
tristate "Intel IOP ADMA support"
depends on DMA_ENGINE && (ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX)
select ASYNC_CORE
default m
---help---
Enable support for the Intel(R) IOP Series RAID engines.
endmenu endmenu
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
This diff is collapsed.
This diff is collapsed.
...@@ -30,9 +30,6 @@ ...@@ -30,9 +30,6 @@
#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 #define IOAT_LOW_COMPLETION_MASK 0xffffffc0
extern struct list_head dma_device_list;
extern struct list_head dma_client_list;
/** /**
* struct ioat_device - internal representation of a IOAT device * struct ioat_device - internal representation of a IOAT device
* @pdev: PCI-Express device * @pdev: PCI-Express device
...@@ -105,21 +102,20 @@ struct ioat_dma_chan { ...@@ -105,21 +102,20 @@ struct ioat_dma_chan {
/** /**
* struct ioat_desc_sw - wrapper around hardware descriptor * struct ioat_desc_sw - wrapper around hardware descriptor
* @hw: hardware DMA descriptor * @hw: hardware DMA descriptor
* @node: * @node: this descriptor will either be on the free list,
* @cookie: * or attached to a transaction list (async_tx.tx_list)
* @phys: * @tx_cnt: number of descriptors required to complete the transaction
* @async_tx: the generic software descriptor for all engines
*/ */
struct ioat_desc_sw { struct ioat_desc_sw {
struct ioat_dma_descriptor *hw; struct ioat_dma_descriptor *hw;
struct list_head node; struct list_head node;
dma_cookie_t cookie; int tx_cnt;
dma_addr_t phys;
DECLARE_PCI_UNMAP_ADDR(src) DECLARE_PCI_UNMAP_ADDR(src)
DECLARE_PCI_UNMAP_LEN(src_len) DECLARE_PCI_UNMAP_LEN(src_len)
DECLARE_PCI_UNMAP_ADDR(dst) DECLARE_PCI_UNMAP_ADDR(dst)
DECLARE_PCI_UNMAP_LEN(dst_len) DECLARE_PCI_UNMAP_LEN(dst_len)
struct dma_async_tx_descriptor async_tx;
}; };
#endif /* IOATDMA_H */ #endif /* IOATDMA_H */
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef IOATDMA_IO_H
#define IOATDMA_IO_H
#include <asm/io.h>
/*
* device and per-channel MMIO register read and write functions
* this is a lot of anoying inline functions, but it's typesafe
*/
static inline u8 ioatdma_read8(struct ioat_device *device,
unsigned int offset)
{
return readb(device->reg_base + offset);
}
static inline u16 ioatdma_read16(struct ioat_device *device,
unsigned int offset)
{
return readw(device->reg_base + offset);
}
static inline u32 ioatdma_read32(struct ioat_device *device,
unsigned int offset)
{
return readl(device->reg_base + offset);
}
static inline void ioatdma_write8(struct ioat_device *device,
unsigned int offset, u8 value)
{
writeb(value, device->reg_base + offset);
}
static inline void ioatdma_write16(struct ioat_device *device,
unsigned int offset, u16 value)
{
writew(value, device->reg_base + offset);
}
static inline void ioatdma_write32(struct ioat_device *device,
unsigned int offset, u32 value)
{
writel(value, device->reg_base + offset);
}
static inline u8 ioatdma_chan_read8(struct ioat_dma_chan *chan,
unsigned int offset)
{
return readb(chan->reg_base + offset);
}
static inline u16 ioatdma_chan_read16(struct ioat_dma_chan *chan,
unsigned int offset)
{
return readw(chan->reg_base + offset);
}
static inline u32 ioatdma_chan_read32(struct ioat_dma_chan *chan,
unsigned int offset)
{
return readl(chan->reg_base + offset);
}
static inline void ioatdma_chan_write8(struct ioat_dma_chan *chan,
unsigned int offset, u8 value)
{
writeb(value, chan->reg_base + offset);
}
static inline void ioatdma_chan_write16(struct ioat_dma_chan *chan,
unsigned int offset, u16 value)
{
writew(value, chan->reg_base + offset);
}
static inline void ioatdma_chan_write32(struct ioat_dma_chan *chan,
unsigned int offset, u32 value)
{
writel(value, chan->reg_base + offset);
}
#if (BITS_PER_LONG == 64)
static inline u64 ioatdma_chan_read64(struct ioat_dma_chan *chan,
unsigned int offset)
{
return readq(chan->reg_base + offset);
}
static inline void ioatdma_chan_write64(struct ioat_dma_chan *chan,
unsigned int offset, u64 value)
{
writeq(value, chan->reg_base + offset);
}
#endif
#endif /* IOATDMA_IO_H */
This diff is collapsed.
...@@ -109,6 +109,8 @@ config MD_RAID10 ...@@ -109,6 +109,8 @@ config MD_RAID10
config MD_RAID456 config MD_RAID456
tristate "RAID-4/RAID-5/RAID-6 mode" tristate "RAID-4/RAID-5/RAID-6 mode"
depends on BLK_DEV_MD depends on BLK_DEV_MD
select ASYNC_MEMCPY
select ASYNC_XOR
---help--- ---help---
A RAID-5 set of N drives with a capacity of C MB per drive provides A RAID-5 set of N drives with a capacity of C MB per drive provides
the capacity of C * (N - 1) MB, and protects against a failure the capacity of C * (N - 1) MB, and protects against a failure
......
...@@ -18,7 +18,7 @@ raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \ ...@@ -18,7 +18,7 @@ raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \
hostprogs-y := mktables hostprogs-y := mktables
# Note: link order is important. All raid personalities # Note: link order is important. All raid personalities
# and xor.o must come before md.o, as they each initialise # and must come before md.o, as they each initialise
# themselves, and md.o may use the personalities when it # themselves, and md.o may use the personalities when it
# auto-initialised. # auto-initialised.
...@@ -26,7 +26,7 @@ obj-$(CONFIG_MD_LINEAR) += linear.o ...@@ -26,7 +26,7 @@ obj-$(CONFIG_MD_LINEAR) += linear.o
obj-$(CONFIG_MD_RAID0) += raid0.o obj-$(CONFIG_MD_RAID0) += raid0.o
obj-$(CONFIG_MD_RAID1) += raid1.o obj-$(CONFIG_MD_RAID1) += raid1.o
obj-$(CONFIG_MD_RAID10) += raid10.o obj-$(CONFIG_MD_RAID10) += raid10.o
obj-$(CONFIG_MD_RAID456) += raid456.o xor.o obj-$(CONFIG_MD_RAID456) += raid456.o
obj-$(CONFIG_MD_MULTIPATH) += multipath.o obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o obj-$(CONFIG_MD_FAULTY) += faulty.o
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
......
...@@ -5814,7 +5814,7 @@ static __exit void md_exit(void) ...@@ -5814,7 +5814,7 @@ static __exit void md_exit(void)
} }
} }
module_init(md_init) subsys_initcall(md_init);
module_exit(md_exit) module_exit(md_exit)
static int get_ro(char *buffer, struct kernel_param *kp) static int get_ro(char *buffer, struct kernel_param *kp)
......
This diff is collapsed.
This diff is collapsed.
...@@ -166,12 +166,22 @@ static inline int iop13xx_cpu_id(void) ...@@ -166,12 +166,22 @@ static inline int iop13xx_cpu_id(void)
#define IOP13XX_INIT_I2C_1 (1 << 1) #define IOP13XX_INIT_I2C_1 (1 << 1)
#define IOP13XX_INIT_I2C_2 (1 << 2) #define IOP13XX_INIT_I2C_2 (1 << 2)
/* ADMA selection flags */
/* INIT_ADMA_DEFAULT = Rely on CONFIG_IOP13XX_ADMA* */
#define IOP13XX_INIT_ADMA_DEFAULT (0)
#define IOP13XX_INIT_ADMA_0 (1 << 0)
#define IOP13XX_INIT_ADMA_1 (1 << 1)
#define IOP13XX_INIT_ADMA_2 (1 << 2)
/* Platform devices */
#define IQ81340_NUM_UART 2 #define IQ81340_NUM_UART 2
#define IQ81340_NUM_I2C 3 #define IQ81340_NUM_I2C 3
#define IQ81340_NUM_PHYS_MAP_FLASH 1 #define IQ81340_NUM_PHYS_MAP_FLASH 1
#define IQ81340_MAX_PLAT_DEVICES (IQ81340_NUM_UART +\ #define IQ81340_NUM_ADMA 3
IQ81340_NUM_I2C +\ #define IQ81340_MAX_PLAT_DEVICES (IQ81340_NUM_UART + \
IQ81340_NUM_PHYS_MAP_FLASH) IQ81340_NUM_I2C + \
IQ81340_NUM_PHYS_MAP_FLASH + \
IQ81340_NUM_ADMA)
/*========================== PMMR offsets for key registers ============*/ /*========================== PMMR offsets for key registers ============*/
#define IOP13XX_ATU0_PMMR_OFFSET 0x00048000 #define IOP13XX_ATU0_PMMR_OFFSET 0x00048000
...@@ -444,22 +454,6 @@ static inline int iop13xx_cpu_id(void) ...@@ -444,22 +454,6 @@ static inline int iop13xx_cpu_id(void)
/*==============================ADMA UNITS===============================*/ /*==============================ADMA UNITS===============================*/
#define IOP13XX_ADMA_PHYS_BASE(chan) IOP13XX_REG_ADDR32_PHYS((chan << 9)) #define IOP13XX_ADMA_PHYS_BASE(chan) IOP13XX_REG_ADDR32_PHYS((chan << 9))
#define IOP13XX_ADMA_UPPER_PA(chan) (IOP13XX_ADMA_PHYS_BASE(chan) + 0xc0) #define IOP13XX_ADMA_UPPER_PA(chan) (IOP13XX_ADMA_PHYS_BASE(chan) + 0xc0)
#define IOP13XX_ADMA_OFFSET(chan, ofs) IOP13XX_REG_ADDR32((chan << 9) + (ofs))
#define IOP13XX_ADMA_ACCR(chan) IOP13XX_ADMA_OFFSET(chan, 0x0)
#define IOP13XX_ADMA_ACSR(chan) IOP13XX_ADMA_OFFSET(chan, 0x4)
#define IOP13XX_ADMA_ADAR(chan) IOP13XX_ADMA_OFFSET(chan, 0x8)
#define IOP13XX_ADMA_IIPCR(chan) IOP13XX_ADMA_OFFSET(chan, 0x18)
#define IOP13XX_ADMA_IIPAR(chan) IOP13XX_ADMA_OFFSET(chan, 0x1c)
#define IOP13XX_ADMA_IIPUAR(chan) IOP13XX_ADMA_OFFSET(chan, 0x20)
#define IOP13XX_ADMA_ANDAR(chan) IOP13XX_ADMA_OFFSET(chan, 0x24)
#define IOP13XX_ADMA_ADCR(chan) IOP13XX_ADMA_OFFSET(chan, 0x28)
#define IOP13XX_ADMA_CARMD(chan) IOP13XX_ADMA_OFFSET(chan, 0x2c)
#define IOP13XX_ADMA_ABCR(chan) IOP13XX_ADMA_OFFSET(chan, 0x30)
#define IOP13XX_ADMA_DLADR(chan) IOP13XX_ADMA_OFFSET(chan, 0x34)
#define IOP13XX_ADMA_DUADR(chan) IOP13XX_ADMA_OFFSET(chan, 0x38)
#define IOP13XX_ADMA_SLAR(src, chan) IOP13XX_ADMA_OFFSET(chan, 0x3c + (src <<3))
#define IOP13XX_ADMA_SUAR(src, chan) IOP13XX_ADMA_OFFSET(chan, 0x40 + (src <<3))
/*==============================XSI BRIDGE===============================*/ /*==============================XSI BRIDGE===============================*/
#define IOP13XX_XBG_BECSR IOP13XX_REG_ADDR32(0x178c) #define IOP13XX_XBG_BECSR IOP13XX_REG_ADDR32(0x178c)
......
#ifndef IOP32X_ADMA_H
#define IOP32X_ADMA_H
#include <asm/hardware/iop3xx-adma.h>
#endif
#ifndef IOP33X_ADMA_H
#define IOP33X_ADMA_H
#include <asm/hardware/iop3xx-adma.h>
#endif
This diff is collapsed.
...@@ -144,24 +144,9 @@ extern int init_atu; ...@@ -144,24 +144,9 @@ extern int init_atu;
#define IOP3XX_IAR (volatile u32 *)IOP3XX_REG_ADDR(0x0380) #define IOP3XX_IAR (volatile u32 *)IOP3XX_REG_ADDR(0x0380)
/* DMA Controller */ /* DMA Controller */
#define IOP3XX_DMA0_CCR (volatile u32 *)IOP3XX_REG_ADDR(0x0400) #define IOP3XX_DMA_PHYS_BASE(chan) (IOP3XX_PERIPHERAL_PHYS_BASE + \
#define IOP3XX_DMA0_CSR (volatile u32 *)IOP3XX_REG_ADDR(0x0404) (0x400 + (chan << 6)))
#define IOP3XX_DMA0_DAR (volatile u32 *)IOP3XX_REG_ADDR(0x040c) #define IOP3XX_DMA_UPPER_PA(chan) (IOP3XX_DMA_PHYS_BASE(chan) + 0x27)
#define IOP3XX_DMA0_NDAR (volatile u32 *)IOP3XX_REG_ADDR(0x0410)
#define IOP3XX_DMA0_PADR (volatile u32 *)IOP3XX_REG_ADDR(0x0414)
#define IOP3XX_DMA0_PUADR (volatile u32 *)IOP3XX_REG_ADDR(0x0418)
#define IOP3XX_DMA0_LADR (volatile u32 *)IOP3XX_REG_ADDR(0x041c)
#define IOP3XX_DMA0_BCR (volatile u32 *)IOP3XX_REG_ADDR(0x0420)
#define IOP3XX_DMA0_DCR (volatile u32 *)IOP3XX_REG_ADDR(0x0424)
#define IOP3XX_DMA1_CCR (volatile u32 *)IOP3XX_REG_ADDR(0x0440)
#define IOP3XX_DMA1_CSR (volatile u32 *)IOP3XX_REG_ADDR(0x0444)
#define IOP3XX_DMA1_DAR (volatile u32 *)IOP3XX_REG_ADDR(0x044c)
#define IOP3XX_DMA1_NDAR (volatile u32 *)IOP3XX_REG_ADDR(0x0450)
#define IOP3XX_DMA1_PADR (volatile u32 *)IOP3XX_REG_ADDR(0x0454)
#define IOP3XX_DMA1_PUADR (volatile u32 *)IOP3XX_REG_ADDR(0x0458)
#define IOP3XX_DMA1_LADR (volatile u32 *)IOP3XX_REG_ADDR(0x045c)
#define IOP3XX_DMA1_BCR (volatile u32 *)IOP3XX_REG_ADDR(0x0460)
#define IOP3XX_DMA1_DCR (volatile u32 *)IOP3XX_REG_ADDR(0x0464)
/* Peripheral bus interface */ /* Peripheral bus interface */
#define IOP3XX_PBCR (volatile u32 *)IOP3XX_REG_ADDR(0x0680) #define IOP3XX_PBCR (volatile u32 *)IOP3XX_REG_ADDR(0x0680)
...@@ -210,48 +195,8 @@ extern int init_atu; ...@@ -210,48 +195,8 @@ extern int init_atu;
#define IOP_TMR_RATIO_1_1 0x00 #define IOP_TMR_RATIO_1_1 0x00
/* Application accelerator unit */ /* Application accelerator unit */
#define IOP3XX_AAU_ACR (volatile u32 *)IOP3XX_REG_ADDR(0x0800) #define IOP3XX_AAU_PHYS_BASE (IOP3XX_PERIPHERAL_PHYS_BASE + 0x800)
#define IOP3XX_AAU_ASR (volatile u32 *)IOP3XX_REG_ADDR(0x0804) #define IOP3XX_AAU_UPPER_PA (IOP3XX_AAU_PHYS_BASE + 0xa7)
#define IOP3XX_AAU_ADAR (volatile u32 *)IOP3XX_REG_ADDR(0x0808)
#define IOP3XX_AAU_ANDAR (volatile u32 *)IOP3XX_REG_ADDR(0x080c)
#define IOP3XX_AAU_SAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0810)
#define IOP3XX_AAU_SAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0814)
#define IOP3XX_AAU_SAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0818)
#define IOP3XX_AAU_SAR4 (volatile u32 *)IOP3XX_REG_ADDR(0x081c)
#define IOP3XX_AAU_DAR (volatile u32 *)IOP3XX_REG_ADDR(0x0820)
#define IOP3XX_AAU_ABCR (volatile u32 *)IOP3XX_REG_ADDR(0x0824)
#define IOP3XX_AAU_ADCR (volatile u32 *)IOP3XX_REG_ADDR(0x0828)
#define IOP3XX_AAU_SAR5 (volatile u32 *)IOP3XX_REG_ADDR(0x082c)
#define IOP3XX_AAU_SAR6 (volatile u32 *)IOP3XX_REG_ADDR(0x0830)
#define IOP3XX_AAU_SAR7 (volatile u32 *)IOP3XX_REG_ADDR(0x0834)
#define IOP3XX_AAU_SAR8 (volatile u32 *)IOP3XX_REG_ADDR(0x0838)
#define IOP3XX_AAU_EDCR0 (volatile u32 *)IOP3XX_REG_ADDR(0x083c)
#define IOP3XX_AAU_SAR9 (volatile u32 *)IOP3XX_REG_ADDR(0x0840)
#define IOP3XX_AAU_SAR10 (volatile u32 *)IOP3XX_REG_ADDR(0x0844)
#define IOP3XX_AAU_SAR11 (volatile u32 *)IOP3XX_REG_ADDR(0x0848)
#define IOP3XX_AAU_SAR12 (volatile u32 *)IOP3XX_REG_ADDR(0x084c)
#define IOP3XX_AAU_SAR13 (volatile u32 *)IOP3XX_REG_ADDR(0x0850)
#define IOP3XX_AAU_SAR14 (volatile u32 *)IOP3XX_REG_ADDR(0x0854)
#define IOP3XX_AAU_SAR15 (volatile u32 *)IOP3XX_REG_ADDR(0x0858)
#define IOP3XX_AAU_SAR16 (volatile u32 *)IOP3XX_REG_ADDR(0x085c)
#define IOP3XX_AAU_EDCR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0860)
#define IOP3XX_AAU_SAR17 (volatile u32 *)IOP3XX_REG_ADDR(0x0864)
#define IOP3XX_AAU_SAR18 (volatile u32 *)IOP3XX_REG_ADDR(0x0868)
#define IOP3XX_AAU_SAR19 (volatile u32 *)IOP3XX_REG_ADDR(0x086c)
#define IOP3XX_AAU_SAR20 (volatile u32 *)IOP3XX_REG_ADDR(0x0870)
#define IOP3XX_AAU_SAR21 (volatile u32 *)IOP3XX_REG_ADDR(0x0874)
#define IOP3XX_AAU_SAR22 (volatile u32 *)IOP3XX_REG_ADDR(0x0878)
#define IOP3XX_AAU_SAR23 (volatile u32 *)IOP3XX_REG_ADDR(0x087c)
#define IOP3XX_AAU_SAR24 (volatile u32 *)IOP3XX_REG_ADDR(0x0880)
#define IOP3XX_AAU_EDCR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0884)
#define IOP3XX_AAU_SAR25 (volatile u32 *)IOP3XX_REG_ADDR(0x0888)
#define IOP3XX_AAU_SAR26 (volatile u32 *)IOP3XX_REG_ADDR(0x088c)
#define IOP3XX_AAU_SAR27 (volatile u32 *)IOP3XX_REG_ADDR(0x0890)
#define IOP3XX_AAU_SAR28 (volatile u32 *)IOP3XX_REG_ADDR(0x0894)
#define IOP3XX_AAU_SAR29 (volatile u32 *)IOP3XX_REG_ADDR(0x0898)
#define IOP3XX_AAU_SAR30 (volatile u32 *)IOP3XX_REG_ADDR(0x089c)
#define IOP3XX_AAU_SAR31 (volatile u32 *)IOP3XX_REG_ADDR(0x08a0)
#define IOP3XX_AAU_SAR32 (volatile u32 *)IOP3XX_REG_ADDR(0x08a4)
/* I2C bus interface unit */ /* I2C bus interface unit */
#define IOP3XX_ICR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1680) #define IOP3XX_ICR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1680)
...@@ -329,6 +274,9 @@ static inline void write_tisr(u32 val) ...@@ -329,6 +274,9 @@ static inline void write_tisr(u32 val)
asm volatile("mcr p6, 0, %0, c6, c1, 0" : : "r" (val)); asm volatile("mcr p6, 0, %0, c6, c1, 0" : : "r" (val));
} }
extern struct platform_device iop3xx_dma_0_channel;
extern struct platform_device iop3xx_dma_1_channel;
extern struct platform_device iop3xx_aau_channel;
extern struct platform_device iop3xx_i2c0_device; extern struct platform_device iop3xx_i2c0_device;
extern struct platform_device iop3xx_i2c1_device; extern struct platform_device iop3xx_i2c1_device;
......
/*
* Copyright © 2006, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef IOP_ADMA_H
#define IOP_ADMA_H
#include <linux/types.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#define IOP_ADMA_SLOT_SIZE 32
#define IOP_ADMA_THRESHOLD 4
/**
* struct iop_adma_device - internal representation of an ADMA device
* @pdev: Platform device
* @id: HW ADMA Device selector
* @dma_desc_pool: base of DMA descriptor region (DMA address)
* @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
* @common: embedded struct dma_device
*/
struct iop_adma_device {
struct platform_device *pdev;
int id;
dma_addr_t dma_desc_pool;
void *dma_desc_pool_virt;
struct dma_device common;
};
/**
* struct iop_adma_chan - internal representation of an ADMA device
* @pending: allows batching of hardware operations
* @completed_cookie: identifier for the most recently completed operation
* @lock: serializes enqueue/dequeue operations to the slot pool
* @mmr_base: memory mapped register base
* @chain: device chain view of the descriptors
* @device: parent device
* @common: common dmaengine channel object members
* @last_used: place holder for allocation to continue from where it left off
* @all_slots: complete domain of slots usable by the channel
* @cleanup_watchdog: workaround missed interrupts on iop3xx
* @slots_allocated: records the actual size of the descriptor slot pool
* @irq_tasklet: bottom half where iop_adma_slot_cleanup runs
*/
struct iop_adma_chan {
int pending;
dma_cookie_t completed_cookie;
spinlock_t lock; /* protects the descriptor slot pool */
void __iomem *mmr_base;
struct list_head chain;
struct iop_adma_device *device;
struct dma_chan common;
struct iop_adma_desc_slot *last_used;
struct list_head all_slots;
struct timer_list cleanup_watchdog;
int slots_allocated;
struct tasklet_struct irq_tasklet;
};
/**
* struct iop_adma_desc_slot - IOP-ADMA software descriptor
* @slot_node: node on the iop_adma_chan.all_slots list
* @chain_node: node on the op_adma_chan.chain list
* @hw_desc: virtual address of the hardware descriptor chain
* @phys: hardware address of the hardware descriptor chain
* @group_head: first operation in a transaction
* @slot_cnt: total slots used in an transaction (group of operations)
* @slots_per_op: number of slots per operation
* @idx: pool index
* @unmap_src_cnt: number of xor sources
* @unmap_len: transaction bytecount
* @async_tx: support for the async_tx api
* @group_list: list of slots that make up a multi-descriptor transaction
* for example transfer lengths larger than the supported hw max
* @xor_check_result: result of zero sum
* @crc32_result: result crc calculation
*/
struct iop_adma_desc_slot {
struct list_head slot_node;
struct list_head chain_node;
void *hw_desc;
struct iop_adma_desc_slot *group_head;
u16 slot_cnt;
u16 slots_per_op;
u16 idx;
u16 unmap_src_cnt;
size_t unmap_len;
struct dma_async_tx_descriptor async_tx;
union {
u32 *xor_check_result;
u32 *crc32_result;
};
};
struct iop_adma_platform_data {
int hw_id;
dma_cap_mask_t cap_mask;
size_t pool_size;
};
#define to_iop_sw_desc(addr_hw_desc) \
container_of(addr_hw_desc, struct iop_adma_desc_slot, hw_desc)
#define iop_hw_desc_slot_idx(hw_desc, idx) \
( (void *) (((unsigned long) hw_desc) + ((idx) << 5)) )
#endif
This diff is collapsed.
This diff is collapsed.
...@@ -479,6 +479,9 @@ ...@@ -479,6 +479,9 @@
#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361
#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 #define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252
#define PCI_VENDOR_ID_UNISYS 0x1018
#define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C
#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */ #define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */
#define PCI_DEVICE_ID_COMPEX2_100VG 0x0005 #define PCI_DEVICE_ID_COMPEX2_100VG 0x0005
......
This diff is collapsed.
...@@ -3,9 +3,10 @@ ...@@ -3,9 +3,10 @@
#include <linux/raid/md.h> #include <linux/raid/md.h>
#define MAX_XOR_BLOCKS 5 #define MAX_XOR_BLOCKS 4
extern void xor_block(unsigned int count, unsigned int bytes, void **ptr); extern void xor_blocks(unsigned int count, unsigned int bytes,
void *dest, void **srcs);
struct xor_block_template { struct xor_block_template {
struct xor_block_template *next; struct xor_block_template *next;
......
This diff is collapsed.
...@@ -1116,6 +1116,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1116,6 +1116,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
long timeo; long timeo;
struct task_struct *user_recv = NULL; struct task_struct *user_recv = NULL;
int copied_early = 0; int copied_early = 0;
struct sk_buff *skb;
lock_sock(sk); lock_sock(sk);
...@@ -1142,16 +1143,26 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1142,16 +1143,26 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
tp->ucopy.dma_chan = NULL; tp->ucopy.dma_chan = NULL;
preempt_disable(); preempt_disable();
if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && skb = skb_peek_tail(&sk->sk_receive_queue);
!sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { {
int available = 0;
if (skb)
available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
if ((available < target) &&
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency &&
__get_cpu_var(softnet_data).net_dma) {
preempt_enable_no_resched(); preempt_enable_no_resched();
tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); tp->ucopy.pinned_list =
} else dma_pin_iovec_pages(msg->msg_iov, len);
} else {
preempt_enable_no_resched(); preempt_enable_no_resched();
}
}
#endif #endif
do { do {
struct sk_buff *skb;
u32 offset; u32 offset;
/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
...@@ -1439,7 +1450,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1439,7 +1450,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
if (tp->ucopy.dma_chan) { if (tp->ucopy.dma_chan) {
struct sk_buff *skb;
dma_cookie_t done, used; dma_cookie_t done, used;
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment