Commit 9a583e9f authored by James Bottomley's avatar James Bottomley Committed by James Bottomley

qla2xxx: DMA pool/api usage

From: Andrew Vasquez <andrew.vasquez@qlogic.com>

Rework driver DMA allocations to use the DMA pool APIs
to minimize potential run-time allocation failures.
Signed-off-by: default avatarAndrew Vasquez <andrew.vasquez@qlogic.com>

Fixed up rejections and replaced the
schedule_timeout's in the patch with msleep
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent ff78254e
...@@ -26,8 +26,10 @@ ...@@ -26,8 +26,10 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/dmapool.h>
#include <linux/mempool.h> #include <linux/mempool.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/completion.h> #include <linux/completion.h>
...@@ -2029,6 +2031,16 @@ struct qla_board_info { ...@@ -2029,6 +2031,16 @@ struct qla_board_info {
struct qla_fw_info *fw_info; struct qla_fw_info *fw_info;
}; };
/* Return data from MBC_GET_ID_LIST call. */
struct gid_list_info {
uint8_t al_pa;
uint8_t area;
uint8_t domain;
uint8_t loop_id_2100; /* ISP2100/ISP2200 -- 4 bytes. */
uint16_t loop_id; /* ISP23XX -- 6 bytes. */
};
#define GID_LIST_SIZE (sizeof(struct gid_list_info) * MAX_FIBRE_DEVICES)
/* /*
* Linux Host Adapter structure * Linux Host Adapter structure
*/ */
...@@ -2240,8 +2252,6 @@ typedef struct scsi_qla_host { ...@@ -2240,8 +2252,6 @@ typedef struct scsi_qla_host {
struct io_descriptor io_descriptors[MAX_IO_DESCRIPTORS]; struct io_descriptor io_descriptors[MAX_IO_DESCRIPTORS];
uint16_t iodesc_signature; uint16_t iodesc_signature;
port_database_t *iodesc_pd;
dma_addr_t iodesc_pd_dma;
/* OS target queue pointers. */ /* OS target queue pointers. */
os_tgt_t *otgt[MAX_FIBRE_DEVICES]; os_tgt_t *otgt[MAX_FIBRE_DEVICES];
...@@ -2276,10 +2286,22 @@ typedef struct scsi_qla_host { ...@@ -2276,10 +2286,22 @@ typedef struct scsi_qla_host {
uint32_t timer_active; uint32_t timer_active;
struct timer_list timer; struct timer_list timer;
/* Firmware Initialization Control Block data */ dma_addr_t gid_list_dma;
dma_addr_t init_cb_dma; /* Physical address. */ struct gid_list_info *gid_list;
dma_addr_t rlc_rsp_dma;
rpt_lun_cmd_rsp_t *rlc_rsp;
/* Small DMA pool allocations -- maximum 256 bytes in length. */
#define DMA_POOL_SIZE 256
struct dma_pool *s_dma_pool;
dma_addr_t init_cb_dma;
init_cb_t *init_cb; init_cb_t *init_cb;
dma_addr_t iodesc_pd_dma;
port_database_t *iodesc_pd;
/* These are used by mailbox operations. */ /* These are used by mailbox operations. */
volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT]; volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
......
...@@ -51,8 +51,7 @@ static void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); ...@@ -51,8 +51,7 @@ static void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
static void qla2x00_lun_discovery(scsi_qla_host_t *, fc_port_t *); static void qla2x00_lun_discovery(scsi_qla_host_t *, fc_port_t *);
static int qla2x00_rpt_lun_discovery(scsi_qla_host_t *, fc_port_t *, static int qla2x00_rpt_lun_discovery(scsi_qla_host_t *, fc_port_t *,
inq_cmd_rsp_t *, dma_addr_t); inq_cmd_rsp_t *, dma_addr_t);
static int qla2x00_report_lun(scsi_qla_host_t *, fc_port_t *, static int qla2x00_report_lun(scsi_qla_host_t *, fc_port_t *);
rpt_lun_cmd_rsp_t *, dma_addr_t);
static fc_lun_t *qla2x00_cfg_lun(scsi_qla_host_t *, fc_port_t *, uint16_t, static fc_lun_t *qla2x00_cfg_lun(scsi_qla_host_t *, fc_port_t *, uint16_t,
inq_cmd_rsp_t *, dma_addr_t); inq_cmd_rsp_t *, dma_addr_t);
static fc_lun_t * qla2x00_add_lun(fc_port_t *, uint16_t); static fc_lun_t * qla2x00_add_lun(fc_port_t *, uint16_t);
...@@ -690,8 +689,9 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha) ...@@ -690,8 +689,9 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
return; return;
/* Attempt to claim larger area for request queue. */ /* Attempt to claim larger area for request queue. */
request_ring = pci_alloc_consistent(ha->pdev, request_ring = dma_alloc_coherent(&ha->pdev->dev,
(request_q_length + 1) * sizeof(request_t), &request_dma); (request_q_length + 1) * sizeof(request_t), &request_dma,
GFP_KERNEL);
if (request_ring == NULL) if (request_ring == NULL)
return; return;
...@@ -702,7 +702,7 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha) ...@@ -702,7 +702,7 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
"(%d -> %d)...\n", ha->request_q_length, request_q_length); "(%d -> %d)...\n", ha->request_q_length, request_q_length);
/* Clear old allocations. */ /* Clear old allocations. */
pci_free_consistent(ha->pdev, dma_free_coherent(&ha->pdev->dev,
(ha->request_q_length + 1) * sizeof(request_t), ha->request_ring, (ha->request_q_length + 1) * sizeof(request_t), ha->request_ring,
ha->request_dma); ha->request_dma);
...@@ -1710,50 +1710,28 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha) ...@@ -1710,50 +1710,28 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
uint16_t index; uint16_t index;
uint16_t entries; uint16_t entries;
struct dev_id {
uint8_t al_pa;
uint8_t area;
uint8_t domain;
uint8_t loop_id_2100; /* ISP2100/ISP2200 -- 4 bytes. */
uint16_t loop_id; /* ISP23XX -- 6 bytes. */
} *id_list;
#define MAX_ID_LIST_SIZE (sizeof(struct dev_id) * MAX_FIBRE_DEVICES)
dma_addr_t id_list_dma;
char *id_iter; char *id_iter;
uint16_t loop_id; uint16_t loop_id;
uint8_t domain, area, al_pa; uint8_t domain, area, al_pa;
found_devs = 0; found_devs = 0;
new_fcport = NULL; new_fcport = NULL;
entries = MAX_FIBRE_DEVICES; entries = MAX_FIBRE_DEVICES;
id_list = pci_alloc_consistent(ha->pdev, MAX_ID_LIST_SIZE,
&id_list_dma);
if (id_list == NULL) {
DEBUG2(printk("scsi(%ld): Failed to allocate memory, No local "
"loop\n", ha->host_no));
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - port_list");
ha->mem_err++;
return (QLA_MEMORY_ALLOC_FAILED);
}
memset(id_list, 0, MAX_ID_LIST_SIZE);
DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", ha->host_no)); DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", ha->host_no));
DEBUG3(qla2x00_get_fcal_position_map(ha, NULL)); DEBUG3(qla2x00_get_fcal_position_map(ha, NULL));
/* Get list of logged in devices. */ /* Get list of logged in devices. */
rval = qla2x00_get_id_list(ha, id_list, id_list_dma, &entries); memset(ha->gid_list, 0, GID_LIST_SIZE);
if (rval != QLA_SUCCESS) { rval = qla2x00_get_id_list(ha, ha->gid_list, ha->gid_list_dma,
&entries);
if (rval != QLA_SUCCESS)
goto cleanup_allocation; goto cleanup_allocation;
}
DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
ha->host_no, entries)); ha->host_no, entries));
DEBUG3(qla2x00_dump_buffer((uint8_t *)id_list, DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
entries * sizeof(struct dev_id))); entries * sizeof(struct gid_list_info)));
/* Allocate temporary fcport for any new fcports discovered. */ /* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL);
...@@ -1781,18 +1759,18 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha) ...@@ -1781,18 +1759,18 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
} }
/* Add devices to port list. */ /* Add devices to port list. */
id_iter = (char *)id_list; id_iter = (char *)ha->gid_list;
for (index = 0; index < entries; index++) { for (index = 0; index < entries; index++) {
domain = ((struct dev_id *)id_iter)->domain; domain = ((struct gid_list_info *)id_iter)->domain;
area = ((struct dev_id *)id_iter)->area; area = ((struct gid_list_info *)id_iter)->area;
al_pa = ((struct dev_id *)id_iter)->al_pa; al_pa = ((struct gid_list_info *)id_iter)->al_pa;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) { if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
loop_id = loop_id = (uint16_t)
(uint16_t)((struct dev_id *)id_iter)->loop_id_2100; ((struct gid_list_info *)id_iter)->loop_id_2100;
id_iter += 4; id_iter += 4;
} else { } else {
loop_id = loop_id = le16_to_cpu(
le16_to_cpu(((struct dev_id *)id_iter)->loop_id); ((struct gid_list_info *)id_iter)->loop_id);
id_iter += 6; id_iter += 6;
} }
...@@ -1863,8 +1841,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha) ...@@ -1863,8 +1841,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
} }
cleanup_allocation: cleanup_allocation:
pci_free_consistent(ha->pdev, MAX_ID_LIST_SIZE, id_list, id_list_dma);
if (new_fcport) if (new_fcport)
kfree(new_fcport); kfree(new_fcport);
...@@ -1972,7 +1948,7 @@ qla2x00_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport) ...@@ -1972,7 +1948,7 @@ qla2x00_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport)
dma_addr_t inq_dma; dma_addr_t inq_dma;
uint16_t lun; uint16_t lun;
inq = pci_alloc_consistent(ha->pdev, sizeof(inq_cmd_rsp_t), &inq_dma); inq = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &inq_dma);
if (inq == NULL) { if (inq == NULL) {
qla_printk(KERN_WARNING, ha, qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - INQ\n"); "Memory Allocation failed - INQ\n");
...@@ -1988,7 +1964,7 @@ qla2x00_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport) ...@@ -1988,7 +1964,7 @@ qla2x00_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport)
} }
} }
pci_free_consistent(ha->pdev, sizeof(inq_cmd_rsp_t), inq, inq_dma); dma_pool_free(ha->s_dma_pool, inq, inq_dma);
} }
/* /*
...@@ -2012,8 +1988,6 @@ qla2x00_rpt_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport, ...@@ -2012,8 +1988,6 @@ qla2x00_rpt_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport,
int rval; int rval;
uint32_t len, cnt; uint32_t len, cnt;
uint16_t lun; uint16_t lun;
rpt_lun_cmd_rsp_t *rlc;
dma_addr_t rlc_dma;
/* Assume a failed status */ /* Assume a failed status */
rval = QLA_FUNCTION_FAILED; rval = QLA_FUNCTION_FAILED;
...@@ -2022,30 +1996,19 @@ qla2x00_rpt_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport, ...@@ -2022,30 +1996,19 @@ qla2x00_rpt_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport,
if ((fcport->flags & FCF_RLC_SUPPORT) == 0) if ((fcport->flags & FCF_RLC_SUPPORT) == 0)
return (rval); return (rval);
rlc = pci_alloc_consistent(ha->pdev, sizeof(rpt_lun_cmd_rsp_t), rval = qla2x00_report_lun(ha, fcport);
&rlc_dma); if (rval != QLA_SUCCESS)
if (rlc == NULL) {
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - RLC");
return QLA_MEMORY_ALLOC_FAILED;
}
rval = qla2x00_report_lun(ha, fcport, rlc, rlc_dma);
if (rval != QLA_SUCCESS) {
pci_free_consistent(ha->pdev, sizeof(rpt_lun_cmd_rsp_t), rlc,
rlc_dma);
return (rval); return (rval);
}
/* Always add a fc_lun_t structure for lun 0 -- mid-layer requirement */ /* Always add a fc_lun_t structure for lun 0 -- mid-layer requirement */
qla2x00_add_lun(fcport, 0); qla2x00_add_lun(fcport, 0);
/* Configure LUN list. */ /* Configure LUN list. */
len = be32_to_cpu(rlc->list.hdr.len); len = be32_to_cpu(ha->rlc_rsp->list.hdr.len);
len /= 8; len /= 8;
for (cnt = 0; cnt < len; cnt++) { for (cnt = 0; cnt < len; cnt++) {
lun = CHAR_TO_SHORT(rlc->list.lst[cnt].lsb, lun = CHAR_TO_SHORT(ha->rlc_rsp->list.lst[cnt].lsb,
rlc->list.lst[cnt].msb.b); ha->rlc_rsp->list.lst[cnt].msb.b);
DEBUG3(printk("scsi(%ld): RLC lun = (%d)\n", ha->host_no, lun)); DEBUG3(printk("scsi(%ld): RLC lun = (%d)\n", ha->host_no, lun));
...@@ -2056,8 +2019,6 @@ qla2x00_rpt_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport, ...@@ -2056,8 +2019,6 @@ qla2x00_rpt_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport,
} }
atomic_set(&fcport->state, FCS_ONLINE); atomic_set(&fcport->state, FCS_ONLINE);
pci_free_consistent(ha->pdev, sizeof(rpt_lun_cmd_rsp_t), rlc, rlc_dma);
return (rval); return (rval);
} }
...@@ -2068,8 +2029,6 @@ qla2x00_rpt_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport, ...@@ -2068,8 +2029,6 @@ qla2x00_rpt_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport,
* Input: * Input:
* ha: adapter state pointer. * ha: adapter state pointer.
* fcport: FC port structure pointer. * fcport: FC port structure pointer.
* mem: pointer to dma memory object for report LUN IOCB
* packet.
* *
* Returns: * Returns:
* qla2x00 local function return status code. * qla2x00 local function return status code.
...@@ -2078,15 +2037,18 @@ qla2x00_rpt_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport, ...@@ -2078,15 +2037,18 @@ qla2x00_rpt_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport,
* Kernel context. * Kernel context.
*/ */
static int static int
qla2x00_report_lun(scsi_qla_host_t *ha, qla2x00_report_lun(scsi_qla_host_t *ha, fc_port_t *fcport)
fc_port_t *fcport, rpt_lun_cmd_rsp_t *rlc, dma_addr_t rlc_dma)
{ {
int rval; int rval;
uint16_t retries; uint16_t retries;
uint16_t comp_status; uint16_t comp_status;
uint16_t scsi_status; uint16_t scsi_status;
rpt_lun_cmd_rsp_t *rlc;
dma_addr_t rlc_dma;
rval = QLA_FUNCTION_FAILED; rval = QLA_FUNCTION_FAILED;
rlc = ha->rlc_rsp;
rlc_dma = ha->rlc_rsp_dma;
for (retries = 3; retries; retries--) { for (retries = 3; retries; retries--) {
memset(rlc, 0, sizeof(rpt_lun_cmd_rsp_t)); memset(rlc, 0, sizeof(rpt_lun_cmd_rsp_t));
......
...@@ -1403,7 +1403,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt) ...@@ -1403,7 +1403,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
DEBUG11(printk("qla2x00_get_port_database(%ld): entered.\n", DEBUG11(printk("qla2x00_get_port_database(%ld): entered.\n",
ha->host_no);) ha->host_no);)
pd = pci_alloc_consistent(ha->pdev, PORT_DATABASE_SIZE, &pd_dma); pd = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &pd_dma);
if (pd == NULL) { if (pd == NULL) {
DEBUG2_3_11(printk("qla2x00_get_port_database(%ld): **** " DEBUG2_3_11(printk("qla2x00_get_port_database(%ld): **** "
"Mem Alloc Failed ****", ha->host_no);) "Mem Alloc Failed ****", ha->host_no);)
...@@ -1464,7 +1464,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt) ...@@ -1464,7 +1464,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
fcport->port_type = FCT_TARGET; fcport->port_type = FCT_TARGET;
gpd_error_out: gpd_error_out:
pci_free_consistent(ha->pdev, PORT_DATABASE_SIZE, pd, pd_dma); dma_pool_free(ha->s_dma_pool, pd, pd_dma);
if (rval != QLA_SUCCESS) { if (rval != QLA_SUCCESS) {
/*EMPTY*/ /*EMPTY*/
...@@ -1617,14 +1617,12 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id, ...@@ -1617,14 +1617,12 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
mbx_cmd_t mc; mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc; mbx_cmd_t *mcp = &mc;
link_stat_t *stat_buf; link_stat_t *stat_buf;
dma_addr_t phys_address = 0; dma_addr_t stat_buf_dma;
DEBUG11(printk("qla2x00_get_link_status(%ld): entered.\n", DEBUG11(printk("qla2x00_get_link_status(%ld): entered.\n",
ha->host_no);) ha->host_no);)
stat_buf = pci_alloc_consistent(ha->pdev, sizeof(link_stat_t), stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma);
&phys_address);
if (stat_buf == NULL) { if (stat_buf == NULL) {
DEBUG2_3_11(printk("qla2x00_get_link_status(%ld): Failed to " DEBUG2_3_11(printk("qla2x00_get_link_status(%ld): Failed to "
"allocate memory.\n", ha->host_no)); "allocate memory.\n", ha->host_no));
...@@ -1641,10 +1639,10 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id, ...@@ -1641,10 +1639,10 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
} else { } else {
mcp->mb[1] = loop_id << 8; mcp->mb[1] = loop_id << 8;
} }
mcp->mb[2] = MSW(phys_address); mcp->mb[2] = MSW(stat_buf_dma);
mcp->mb[3] = LSW(phys_address); mcp->mb[3] = LSW(stat_buf_dma);
mcp->mb[6] = MSW(MSD(phys_address)); mcp->mb[6] = MSW(MSD(stat_buf_dma));
mcp->mb[7] = LSW(MSD(phys_address)); mcp->mb[7] = LSW(MSD(stat_buf_dma));
mcp->in_mb = MBX_0; mcp->in_mb = MBX_0;
mcp->tov = 30; mcp->tov = 30;
...@@ -1688,8 +1686,7 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id, ...@@ -1688,8 +1686,7 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
rval = BIT_1; rval = BIT_1;
} }
pci_free_consistent(ha->pdev, sizeof(link_stat_t), stat_buf, dma_pool_free(ha->s_dma_pool, stat_buf, stat_buf_dma);
phys_address);
return rval; return rval;
} }
...@@ -2409,7 +2406,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map) ...@@ -2409,7 +2406,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
char *pmap; char *pmap;
dma_addr_t pmap_dma; dma_addr_t pmap_dma;
pmap = pci_alloc_consistent(ha->pdev, FCAL_MAP_SIZE, &pmap_dma); pmap = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &pmap_dma);
if (pmap == NULL) { if (pmap == NULL) {
DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
__func__, ha->host_no)); __func__, ha->host_no));
...@@ -2438,7 +2435,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map) ...@@ -2438,7 +2435,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
if (pos_map) if (pos_map)
memcpy(pos_map, pmap, FCAL_MAP_SIZE); memcpy(pos_map, pmap, FCAL_MAP_SIZE);
} }
pci_free_consistent(ha->pdev, FCAL_MAP_SIZE, pmap, pmap_dma); dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
if (rval != QLA_SUCCESS) { if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
......
...@@ -1818,28 +1818,27 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha) ...@@ -1818,28 +1818,27 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
* assist in setting the proper dma mask. * assist in setting the proper dma mask.
*/ */
if (sizeof(dma_addr_t) > 4) { if (sizeof(dma_addr_t) > 4) {
/* Update our PCI device dma_mask for full 64 bits */ if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK) == 0) {
if (pci_set_dma_mask(ha->pdev, 0xffffffffffffffffULL) == 0) {
ha->flags.enable_64bit_addressing = 1; ha->flags.enable_64bit_addressing = 1;
ha->calc_request_entries = qla2x00_calc_iocbs_64; ha->calc_request_entries = qla2x00_calc_iocbs_64;
ha->build_scsi_iocbs = qla2x00_build_scsi_iocbs_64; ha->build_scsi_iocbs = qla2x00_build_scsi_iocbs_64;
if (pci_set_consistent_dma_mask(ha->pdev, if (pci_set_consistent_dma_mask(ha->pdev,
0xffffffffffffffffULL)) { DMA_64BIT_MASK)) {
qla_printk(KERN_DEBUG, ha, qla_printk(KERN_DEBUG, ha,
"Failed to set 64 bit PCI consistent mask; " "Failed to set 64 bit PCI consistent mask; "
"using 32 bit.\n"); "using 32 bit.\n");
pci_set_consistent_dma_mask(ha->pdev, pci_set_consistent_dma_mask(ha->pdev,
0xffffffffULL); DMA_32BIT_MASK);
} }
} else { } else {
qla_printk(KERN_DEBUG, ha, qla_printk(KERN_DEBUG, ha,
"Failed to set 64 bit PCI DMA mask, falling back " "Failed to set 64 bit PCI DMA mask, falling back "
"to 32 bit MASK.\n"); "to 32 bit MASK.\n");
pci_set_dma_mask(ha->pdev, 0xffffffff); pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
} }
} else { } else {
pci_set_dma_mask(ha->pdev, 0xffffffff); pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
} }
} }
...@@ -2813,6 +2812,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha) ...@@ -2813,6 +2812,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha)
static uint8_t static uint8_t
qla2x00_mem_alloc(scsi_qla_host_t *ha) qla2x00_mem_alloc(scsi_qla_host_t *ha)
{ {
char name[16];
uint8_t status = 1; uint8_t status = 1;
int retry= 10; int retry= 10;
...@@ -2823,9 +2823,9 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha) ...@@ -2823,9 +2823,9 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
* bug where available mem is not allocated until after a * bug where available mem is not allocated until after a
* little delay and a retry. * little delay and a retry.
*/ */
ha->request_ring = pci_alloc_consistent(ha->pdev, ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
((ha->request_q_length + 1) * (sizeof(request_t))), (ha->request_q_length + 1) * sizeof(request_t),
&ha->request_dma); &ha->request_dma, GFP_KERNEL);
if (ha->request_ring == NULL) { if (ha->request_ring == NULL) {
qla_printk(KERN_WARNING, ha, qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - request_ring\n"); "Memory Allocation failed - request_ring\n");
...@@ -2836,9 +2836,9 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha) ...@@ -2836,9 +2836,9 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
continue; continue;
} }
ha->response_ring = pci_alloc_consistent(ha->pdev, ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
((ha->response_q_length + 1) * (sizeof(response_t))), (ha->response_q_length + 1) * sizeof(response_t),
&ha->response_dma); &ha->response_dma, GFP_KERNEL);
if (ha->response_ring == NULL) { if (ha->response_ring == NULL) {
qla_printk(KERN_WARNING, ha, qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - response_ring\n"); "Memory Allocation failed - response_ring\n");
...@@ -2849,19 +2849,58 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha) ...@@ -2849,19 +2849,58 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
continue; continue;
} }
/* get consistent memory allocated for init control block */ ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
ha->init_cb = pci_alloc_consistent(ha->pdev, &ha->gid_list_dma, GFP_KERNEL);
sizeof(init_cb_t), &ha->init_cb_dma); if (ha->gid_list == NULL) {
if (ha->init_cb == NULL) {
qla_printk(KERN_WARNING, ha, qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - init_cb\n"); "Memory Allocation failed - gid_list\n");
qla2x00_mem_free(ha); qla2x00_mem_free(ha);
msleep(100); msleep(100);
continue; continue;
} }
memset(ha->init_cb, 0, sizeof(init_cb_t));
ha->rlc_rsp = dma_alloc_coherent(&ha->pdev->dev,
sizeof(rpt_lun_cmd_rsp_t), &ha->rlc_rsp_dma, GFP_KERNEL);
if (ha->rlc_rsp == NULL) {
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - rlc");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
snprintf(name, sizeof(name), "qla2xxx_%ld", ha->host_no);
ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
DMA_POOL_SIZE, 8, 0);
if (ha->s_dma_pool == NULL) {
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - s_dma_pool\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
/* Get consistent memory allocated for Get Port Database cmd */
ha->iodesc_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->iodesc_pd_dma);
if (ha->iodesc_pd == NULL) {
/* error */
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - iodesc_pd\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
memset(ha->iodesc_pd, 0, PORT_DATABASE_SIZE);
/* Allocate ioctl related memory. */ /* Allocate ioctl related memory. */
if (qla2x00_alloc_ioctl_mem(ha)) { if (qla2x00_alloc_ioctl_mem(ha)) {
...@@ -2888,8 +2927,9 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha) ...@@ -2888,8 +2927,9 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
/* Allocate memory for SNS commands */ /* Allocate memory for SNS commands */
if (IS_QLA2100(ha) || IS_QLA2200(ha)) { if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
/* Get consistent memory allocated for SNS commands */ /* Get consistent memory allocated for SNS commands */
ha->sns_cmd = pci_alloc_consistent(ha->pdev, ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma); sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma,
GFP_KERNEL);
if (ha->sns_cmd == NULL) { if (ha->sns_cmd == NULL) {
/* error */ /* error */
qla_printk(KERN_WARNING, ha, qla_printk(KERN_WARNING, ha,
...@@ -2903,8 +2943,8 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha) ...@@ -2903,8 +2943,8 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
memset(ha->sns_cmd, 0, sizeof(struct sns_cmd_pkt)); memset(ha->sns_cmd, 0, sizeof(struct sns_cmd_pkt));
} else { } else {
/* Get consistent memory allocated for MS IOCB */ /* Get consistent memory allocated for MS IOCB */
ha->ms_iocb = pci_alloc_consistent(ha->pdev, ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
sizeof(ms_iocb_entry_t), &ha->ms_iocb_dma); &ha->ms_iocb_dma);
if (ha->ms_iocb == NULL) { if (ha->ms_iocb == NULL) {
/* error */ /* error */
qla_printk(KERN_WARNING, ha, qla_printk(KERN_WARNING, ha,
...@@ -2921,8 +2961,9 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha) ...@@ -2921,8 +2961,9 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
* Get consistent memory allocated for CT SNS * Get consistent memory allocated for CT SNS
* commands * commands
*/ */
ha->ct_sns = pci_alloc_consistent(ha->pdev, ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct ct_sns_pkt), &ha->ct_sns_dma); sizeof(struct ct_sns_pkt), &ha->ct_sns_dma,
GFP_KERNEL);
if (ha->ct_sns == NULL) { if (ha->ct_sns == NULL) {
/* error */ /* error */
qla_printk(KERN_WARNING, ha, qla_printk(KERN_WARNING, ha,
...@@ -3005,52 +3046,67 @@ qla2x00_mem_free(scsi_qla_host_t *ha) ...@@ -3005,52 +3046,67 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
/* free sp pool */ /* free sp pool */
qla2x00_free_sp_pool(ha); qla2x00_free_sp_pool(ha);
if (ha->iodesc_pd) { if (ha->sns_cmd)
pci_free_consistent(ha->pdev, PORT_DATABASE_SIZE, dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
ha->iodesc_pd, ha->iodesc_pd_dma); ha->sns_cmd, ha->sns_cmd_dma);
}
if (ha->sns_cmd) { if (ha->ct_sns)
pci_free_consistent(ha->pdev, dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
sizeof(struct sns_cmd_pkt), ha->sns_cmd, ha->sns_cmd_dma); ha->ct_sns, ha->ct_sns_dma);
}
if (ha->ct_sns) { if (ha->ms_iocb)
pci_free_consistent(ha->pdev, dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
sizeof(struct ct_sns_pkt), ha->ct_sns, ha->ct_sns_dma);
}
if (ha->ms_iocb) {
pci_free_consistent(ha->pdev,
sizeof(ms_iocb_entry_t), ha->ms_iocb, ha->ms_iocb_dma);
}
if (ha->init_cb) { if (ha->iodesc_pd)
pci_free_consistent(ha->pdev, dma_pool_free(ha->s_dma_pool, ha->iodesc_pd, ha->iodesc_pd_dma);
sizeof(init_cb_t), ha->init_cb, ha->init_cb_dma);
}
if (ha->request_ring) { if (ha->init_cb)
pci_free_consistent(ha->pdev, dma_pool_free(ha->s_dma_pool, ha->init_cb, ha->init_cb_dma);
((ha->request_q_length + 1) * (sizeof(request_t))),
ha->request_ring, ha->request_dma); if (ha->s_dma_pool)
} dma_pool_destroy(ha->s_dma_pool);
if (ha->response_ring) { if (ha->rlc_rsp)
pci_free_consistent(ha->pdev, dma_free_coherent(&ha->pdev->dev,
((ha->response_q_length + 1) * (sizeof(response_t))), sizeof(rpt_lun_cmd_rsp_t), ha->rlc_rsp,
ha->rlc_rsp_dma);
if (ha->gid_list)
dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
ha->gid_list_dma);
if (ha->response_ring)
dma_free_coherent(&ha->pdev->dev,
(ha->response_q_length + 1) * sizeof(response_t),
ha->response_ring, ha->response_dma); ha->response_ring, ha->response_dma);
}
ha->iodesc_pd = NULL; if (ha->request_ring)
ha->iodesc_pd_dma = 0; dma_free_coherent(&ha->pdev->dev,
(ha->request_q_length + 1) * sizeof(request_t),
ha->request_ring, ha->request_dma);
ha->sns_cmd = NULL;
ha->sns_cmd_dma = 0;
ha->ct_sns = NULL; ha->ct_sns = NULL;
ha->ct_sns_dma = 0;
ha->ms_iocb = NULL; ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
ha->iodesc_pd = NULL;
ha->iodesc_pd_dma = 0;
ha->init_cb = NULL; ha->init_cb = NULL;
ha->request_ring = NULL; ha->init_cb_dma = 0;
ha->request_dma = 0;
ha->s_dma_pool = NULL;
ha->rlc_rsp = NULL;
ha->rlc_rsp_dma = 0;
ha->gid_list = NULL;
ha->gid_list_dma = 0;
ha->response_ring = NULL; ha->response_ring = NULL;
ha->response_dma = 0; ha->response_dma = 0;
ha->request_ring = NULL;
ha->request_dma = 0;
list_for_each_safe(fcpl, fcptemp, &ha->fcports) { list_for_each_safe(fcpl, fcptemp, &ha->fcports) {
fcport = list_entry(fcpl, fc_port_t, list); fcport = list_entry(fcpl, fc_port_t, list);
...@@ -3069,16 +3125,15 @@ qla2x00_mem_free(scsi_qla_host_t *ha) ...@@ -3069,16 +3125,15 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
} }
INIT_LIST_HEAD(&ha->fcports); INIT_LIST_HEAD(&ha->fcports);
if (ha->fw_dump) { if (ha->fw_dump)
free_pages((unsigned long)ha->fw_dump, ha->fw_dump_order); free_pages((unsigned long)ha->fw_dump, ha->fw_dump_order);
ha->fw_dump = NULL;
}
if (ha->fw_dump_buffer) { if (ha->fw_dump_buffer)
vfree(ha->fw_dump_buffer); vfree(ha->fw_dump_buffer);
ha->fw_dump = NULL;
ha->fw_dump_reading = 0; ha->fw_dump_reading = 0;
ha->fw_dump_buffer = NULL; ha->fw_dump_buffer = NULL;
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment