Commit c9b8af00 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (154 commits)
  [SCSI] osd: Remove out-of-tree left overs
  [SCSI] libosd: Use REQ_QUIET requests.
  [SCSI] osduld: use filp_open() when looking up an osd-device
  [SCSI] libosd: Define an osd_dev wrapper to retrieve the request_queue
  [SCSI] libosd: osd_req_{read,write} takes a length parameter
  [SCSI] libosd: Let _osd_req_finalize_data_integrity receive number of out_bytes
  [SCSI] libosd: osd_req_{read,write}_kern new API
  [SCSI] libosd: Better printout of OSD target system information
  [SCSI] libosd: OSD2r05: Attribute definitions
  [SCSI] libosd: OSD2r05: Additional command enums
  [SCSI] mpt fusion: fix up doc book comments
  [SCSI] mpt fusion: Added support for Broadcast primitives Event handling
  [SCSI] mpt fusion: Queue full event handling
  [SCSI] mpt fusion: RAID device handling and Dual port Raid support is added
  [SCSI] mpt fusion: Put IOC into ready state if it not already in ready state
  [SCSI] mpt fusion: Code Cleanup patch
  [SCSI] mpt fusion: Rescan SAS topology added
  [SCSI] mpt fusion: SAS topology scan changes, expander events
  [SCSI] mpt fusion: Firmware event implementation using seperate WorkQueue
  [SCSI] mpt fusion: rewrite of ioctl_cmds internal generated function
  ...
parents c59a264c 82681a31
...@@ -257,11 +257,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task) ...@@ -257,11 +257,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{ {
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
/* /* mgmt tasks do not need special cleanup */
* mgmt tasks do not need special cleanup and we do not if (!task->sc)
* allocate anything in the init task callout
*/
if (!task->sc || task->state == ISCSI_TASK_PENDING)
return; return;
if (iser_task->status == ISER_TASK_STATUS_STARTED) { if (iser_task->status == ISER_TASK_STATUS_STARTED) {
...@@ -517,7 +514,8 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s ...@@ -517,7 +514,8 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
} }
static struct iscsi_endpoint * static struct iscsi_endpoint *
iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking) iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
{ {
int err; int err;
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -58,6 +58,7 @@ ...@@ -58,6 +58,7 @@
#define MPT_DEBUG_FC 0x00080000 #define MPT_DEBUG_FC 0x00080000
#define MPT_DEBUG_SAS 0x00100000 #define MPT_DEBUG_SAS 0x00100000
#define MPT_DEBUG_SAS_WIDE 0x00200000 #define MPT_DEBUG_SAS_WIDE 0x00200000
#define MPT_DEBUG_36GB_MEM 0x00400000
/* /*
* CONFIG_FUSION_LOGGING - enabled in Kconfig * CONFIG_FUSION_LOGGING - enabled in Kconfig
...@@ -135,6 +136,8 @@ ...@@ -135,6 +136,8 @@
#define dsaswideprintk(IOC, CMD) \ #define dsaswideprintk(IOC, CMD) \
MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE) MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
#define d36memprintk(IOC, CMD) \
MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM)
/* /*
......
...@@ -1251,17 +1251,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1251,17 +1251,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* A slightly different algorithm is required for * A slightly different algorithm is required for
* 64bit SGEs. * 64bit SGEs.
*/ */
scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); scale = ioc->req_sz/ioc->SGE_size;
if (sizeof(dma_addr_t) == sizeof(u64)) { if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) * numSGE = (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale + (ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 60) / (sizeof(dma_addr_t) + (ioc->req_sz - 60) / ioc->SGE_size;
sizeof(u32));
} else { } else {
numSGE = 1 + (scale - 1) * numSGE = 1 + (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale + (ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 64) / (sizeof(dma_addr_t) + (ioc->req_sz - 64) / ioc->SGE_size;
sizeof(u32));
} }
if (numSGE < sh->sg_tablesize) { if (numSGE < sh->sg_tablesize) {
...@@ -1292,9 +1290,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1292,9 +1290,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Clear the TM flags /* Clear the TM flags
*/ */
hd->tmPending = 0;
hd->tmState = TM_STATE_NONE;
hd->resetPending = 0;
hd->abortSCpnt = NULL; hd->abortSCpnt = NULL;
/* Clear the pointer used to store /* Clear the pointer used to store
...@@ -1312,8 +1307,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1312,8 +1307,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hd->timer.data = (unsigned long) hd; hd->timer.data = (unsigned long) hd;
hd->timer.function = mptscsih_timer_expired; hd->timer.function = mptscsih_timer_expired;
init_waitqueue_head(&hd->scandv_waitq);
hd->scandv_wait_done = 0;
hd->last_queue_full = 0; hd->last_queue_full = 0;
sh->transportt = mptfc_transport_template; sh->transportt = mptfc_transport_template;
......
This diff is collapsed.
...@@ -53,6 +53,7 @@ struct mptsas_target_reset_event { ...@@ -53,6 +53,7 @@ struct mptsas_target_reset_event {
struct list_head list; struct list_head list;
EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data; EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data;
u8 target_reset_issued; u8 target_reset_issued;
unsigned long time_count;
}; };
enum mptsas_hotplug_action { enum mptsas_hotplug_action {
...@@ -60,12 +61,37 @@ enum mptsas_hotplug_action { ...@@ -60,12 +61,37 @@ enum mptsas_hotplug_action {
MPTSAS_DEL_DEVICE, MPTSAS_DEL_DEVICE,
MPTSAS_ADD_RAID, MPTSAS_ADD_RAID,
MPTSAS_DEL_RAID, MPTSAS_DEL_RAID,
MPTSAS_ADD_PHYSDISK,
MPTSAS_ADD_PHYSDISK_REPROBE,
MPTSAS_DEL_PHYSDISK,
MPTSAS_DEL_PHYSDISK_REPROBE,
MPTSAS_ADD_INACTIVE_VOLUME, MPTSAS_ADD_INACTIVE_VOLUME,
MPTSAS_IGNORE_EVENT, MPTSAS_IGNORE_EVENT,
}; };
struct mptsas_mapping{
u8 id;
u8 channel;
};
struct mptsas_device_info {
struct list_head list;
struct mptsas_mapping os; /* operating system mapping*/
struct mptsas_mapping fw; /* firmware mapping */
u64 sas_address;
u32 device_info; /* specific bits for devices */
u16 slot; /* enclosure slot id */
u64 enclosure_logical_id; /*enclosure address */
u8 is_logical_volume; /* is this logical volume */
/* this belongs to volume */
u8 is_hidden_raid_component;
/* this valid when is_hidden_raid_component set */
u8 volume_id;
/* cached data for a removed device */
u8 is_cached;
};
struct mptsas_hotplug_event { struct mptsas_hotplug_event {
struct work_struct work;
MPT_ADAPTER *ioc; MPT_ADAPTER *ioc;
enum mptsas_hotplug_action event_type; enum mptsas_hotplug_action event_type;
u64 sas_address; u64 sas_address;
...@@ -73,11 +99,18 @@ struct mptsas_hotplug_event { ...@@ -73,11 +99,18 @@ struct mptsas_hotplug_event {
u8 id; u8 id;
u32 device_info; u32 device_info;
u16 handle; u16 handle;
u16 parent_handle;
u8 phy_id; u8 phy_id;
u8 phys_disk_num_valid; /* hrc (hidden raid component) */
u8 phys_disk_num; /* hrc - unique index*/ u8 phys_disk_num; /* hrc - unique index*/
u8 hidden_raid_component; /* hrc - don't expose*/ struct scsi_device *sdev;
};
struct fw_event_work {
struct list_head list;
struct delayed_work work;
MPT_ADAPTER *ioc;
u32 event;
u8 retries;
u8 event_data[1];
}; };
struct mptsas_discovery_event { struct mptsas_discovery_event {
......
This diff is collapsed.
...@@ -60,6 +60,7 @@ ...@@ -60,6 +60,7 @@
#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008) #define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008)
#define MPT_SCANDV_ISSUE_SENSE (0x00000010) #define MPT_SCANDV_ISSUE_SENSE (0x00000010)
#define MPT_SCANDV_FALLBACK (0x00000020) #define MPT_SCANDV_FALLBACK (0x00000020)
#define MPT_SCANDV_BUSY (0x00000040)
#define MPT_SCANDV_MAX_RETRIES (10) #define MPT_SCANDV_MAX_RETRIES (10)
...@@ -89,6 +90,7 @@ ...@@ -89,6 +90,7 @@
#endif #endif
typedef struct _internal_cmd { typedef struct _internal_cmd {
char *data; /* data pointer */ char *data; /* data pointer */
dma_addr_t data_dma; /* data dma address */ dma_addr_t data_dma; /* data dma address */
...@@ -112,6 +114,8 @@ extern int mptscsih_resume(struct pci_dev *pdev); ...@@ -112,6 +114,8 @@ extern int mptscsih_resume(struct pci_dev *pdev);
extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func); extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
extern const char * mptscsih_info(struct Scsi_Host *SChost); extern const char * mptscsih_info(struct Scsi_Host *SChost);
extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)); extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
u8 id, int lun, int ctx2abort, ulong timeout);
extern void mptscsih_slave_destroy(struct scsi_device *device); extern void mptscsih_slave_destroy(struct scsi_device *device);
extern int mptscsih_slave_configure(struct scsi_device *device); extern int mptscsih_slave_configure(struct scsi_device *device);
extern int mptscsih_abort(struct scsi_cmnd * SCpnt); extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
...@@ -126,7 +130,8 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE ...@@ -126,7 +130,8 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE
extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
extern void mptscsih_timer_expired(unsigned long data); extern void mptscsih_timer_expired(unsigned long data);
extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern struct device_attribute *mptscsih_host_attrs[]; extern struct device_attribute *mptscsih_host_attrs[];
extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
...@@ -300,7 +300,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id) ...@@ -300,7 +300,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
(IOCPage4Ptr->Header.PageLength + ii) * 4; (IOCPage4Ptr->Header.PageLength + ii) * 4;
mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma); ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n", "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
...@@ -614,19 +614,24 @@ static void mptspi_read_parameters(struct scsi_target *starget) ...@@ -614,19 +614,24 @@ static void mptspi_read_parameters(struct scsi_target *starget)
spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0; spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
} }
static int int
mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
{ {
MPT_ADAPTER *ioc = hd->ioc;
MpiRaidActionRequest_t *pReq; MpiRaidActionRequest_t *pReq;
MPT_FRAME_HDR *mf; MPT_FRAME_HDR *mf;
MPT_ADAPTER *ioc = hd->ioc; int ret;
unsigned long timeleft;
mutex_lock(&ioc->internal_cmds.mutex);
/* Get and Populate a free Frame /* Get and Populate a free Frame
*/ */
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
ddvprintk(ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n", dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT
ioc->name)); "%s: no msg frames!\n", ioc->name, __func__));
return -EAGAIN; ret = -EAGAIN;
goto out;
} }
pReq = (MpiRaidActionRequest_t *)mf; pReq = (MpiRaidActionRequest_t *)mf;
if (quiesce) if (quiesce)
...@@ -643,29 +648,36 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) ...@@ -643,29 +648,36 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
pReq->Reserved2 = 0; pReq->Reserved2 = 0;
pReq->ActionDataWord = 0; /* Reserved for this action */ pReq->ActionDataWord = 0; /* Reserved for this action */
mpt_add_sge((char *)&pReq->ActionDataSGE, ioc->add_sge((char *)&pReq->ActionDataSGE,
MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n",
ioc->name, pReq->Action, channel, id)); ioc->name, pReq->Action, channel, id));
hd->pLocal = NULL; INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
hd->scandv_wait_done = 0;
/* Save cmd pointer, for resource free if timeout or
* FW reload occurs
*/
hd->cmdPtr = mf;
add_timer(&hd->timer);
mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
wait_event(hd->scandv_waitq, hd->scandv_wait_done); timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ);
if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n",
ioc->name, __func__));
if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out;
if (!timeleft) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
mpt_HardResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
}
if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0)) ret = ioc->internal_cmds.completion_code;
return -1;
return 0; out:
CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
mutex_unlock(&ioc->internal_cmds.mutex);
return ret;
} }
static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd, static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
...@@ -1423,17 +1435,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1423,17 +1435,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* A slightly different algorithm is required for * A slightly different algorithm is required for
* 64bit SGEs. * 64bit SGEs.
*/ */
scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); scale = ioc->req_sz/ioc->SGE_size;
if (sizeof(dma_addr_t) == sizeof(u64)) { if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) * numSGE = (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale + (ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 60) / (sizeof(dma_addr_t) + (ioc->req_sz - 60) / ioc->SGE_size;
sizeof(u32));
} else { } else {
numSGE = 1 + (scale - 1) * numSGE = 1 + (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale + (ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 64) / (sizeof(dma_addr_t) + (ioc->req_sz - 64) / ioc->SGE_size;
sizeof(u32));
} }
if (numSGE < sh->sg_tablesize) { if (numSGE < sh->sg_tablesize) {
...@@ -1464,9 +1474,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1464,9 +1474,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Clear the TM flags /* Clear the TM flags
*/ */
hd->tmPending = 0;
hd->tmState = TM_STATE_NONE;
hd->resetPending = 0;
hd->abortSCpnt = NULL; hd->abortSCpnt = NULL;
/* Clear the pointer used to store /* Clear the pointer used to store
...@@ -1493,8 +1500,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1493,8 +1500,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mpt_saf_te)); mpt_saf_te));
ioc->spi_data.noQas = 0; ioc->spi_data.noQas = 0;
init_waitqueue_head(&hd->scandv_waitq);
hd->scandv_wait_done = 0;
hd->last_queue_full = 0; hd->last_queue_full = 0;
hd->spi_pending = 0; hd->spi_pending = 0;
...@@ -1514,7 +1519,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1514,7 +1519,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* issue internal bus reset * issue internal bus reset
*/ */
if (ioc->spi_data.bus_reset) if (ioc->spi_data.bus_reset)
mptscsih_TMHandler(hd, mptscsih_IssueTaskMgmt(hd,
MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
0, 0, 0, 0, 5); 0, 0, 0, 0, 5);
......
...@@ -2264,6 +2264,17 @@ config BNX2 ...@@ -2264,6 +2264,17 @@ config BNX2
To compile this driver as a module, choose M here: the module To compile this driver as a module, choose M here: the module
will be called bnx2. This is recommended. will be called bnx2. This is recommended.
config CNIC
tristate "Broadcom CNIC support"
depends on BNX2
depends on UIO
help
This driver supports offload features of Broadcom NetXtremeII
gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called cnic. This is recommended.
config SPIDER_NET config SPIDER_NET
tristate "Spider Gigabit Ethernet driver" tristate "Spider Gigabit Ethernet driver"
depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
......
...@@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o ...@@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_TIGON3) += tg3.o obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BNX2) += bnx2.o obj-$(CONFIG_BNX2) += bnx2.o
obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x.o obj-$(CONFIG_BNX2X) += bnx2x.o
bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x-objs := bnx2x_main.o bnx2x_link.o
spidernet-y += spider_net.o spider_net_ethtool.o spidernet-y += spider_net.o spider_net_ethtool.o
......
...@@ -49,6 +49,10 @@ ...@@ -49,6 +49,10 @@
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/log2.h> #include <linux/log2.h>
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
#include "cnic_if.h"
#endif
#include "bnx2.h" #include "bnx2.h"
#include "bnx2_fw.h" #include "bnx2_fw.h"
...@@ -315,6 +319,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) ...@@ -315,6 +319,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
spin_unlock_bh(&bp->indirect_lock); spin_unlock_bh(&bp->indirect_lock);
} }
#ifdef BCM_CNIC
static int
bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
{
struct bnx2 *bp = netdev_priv(dev);
struct drv_ctl_io *io = &info->data.io;
switch (info->cmd) {
case DRV_CTL_IO_WR_CMD:
bnx2_reg_wr_ind(bp, io->offset, io->data);
break;
case DRV_CTL_IO_RD_CMD:
io->data = bnx2_reg_rd_ind(bp, io->offset);
break;
case DRV_CTL_CTX_WR_CMD:
bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
break;
default:
return -EINVAL;
}
return 0;
}
static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
{
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
int sb_id;
if (bp->flags & BNX2_FLAG_USING_MSIX) {
cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
bnapi->cnic_present = 0;
sb_id = bp->irq_nvecs;
cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
} else {
cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
bnapi->cnic_tag = bnapi->last_status_idx;
bnapi->cnic_present = 1;
sb_id = 0;
cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
}
cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
cp->irq_arr[0].status_blk = (void *)
((unsigned long) bnapi->status_blk.msi +
(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
cp->irq_arr[0].status_blk_num = sb_id;
cp->num_irq = 1;
}
static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
void *data)
{
struct bnx2 *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
if (ops == NULL)
return -EINVAL;
if (cp->drv_state & CNIC_DRV_STATE_REGD)
return -EBUSY;
bp->cnic_data = data;
rcu_assign_pointer(bp->cnic_ops, ops);
cp->num_irq = 0;
cp->drv_state = CNIC_DRV_STATE_REGD;
bnx2_setup_cnic_irq_info(bp);
return 0;
}
static int bnx2_unregister_cnic(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
cp->drv_state = 0;
bnapi->cnic_present = 0;
rcu_assign_pointer(bp->cnic_ops, NULL);
synchronize_rcu();
return 0;
}
struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
cp->drv_owner = THIS_MODULE;
cp->chip_id = bp->chip_id;
cp->pdev = bp->pdev;
cp->io_base = bp->regview;
cp->drv_ctl = bnx2_drv_ctl;
cp->drv_register_cnic = bnx2_register_cnic;
cp->drv_unregister_cnic = bnx2_unregister_cnic;
return cp;
}
EXPORT_SYMBOL(bnx2_cnic_probe);
static void
bnx2_cnic_stop(struct bnx2 *bp)
{
struct cnic_ops *c_ops;
struct cnic_ctl_info info;
rcu_read_lock();
c_ops = rcu_dereference(bp->cnic_ops);
if (c_ops) {
info.cmd = CNIC_CTL_STOP_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
}
rcu_read_unlock();
}
static void
bnx2_cnic_start(struct bnx2 *bp)
{
struct cnic_ops *c_ops;
struct cnic_ctl_info info;
rcu_read_lock();
c_ops = rcu_dereference(bp->cnic_ops);
if (c_ops) {
if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
bnapi->cnic_tag = bnapi->last_status_idx;
}
info.cmd = CNIC_CTL_START_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
}
rcu_read_unlock();
}
#else
static void
bnx2_cnic_stop(struct bnx2 *bp)
{
}
static void
bnx2_cnic_start(struct bnx2 *bp)
{
}
#endif
static int static int
bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
{ {
...@@ -488,6 +644,7 @@ bnx2_napi_enable(struct bnx2 *bp) ...@@ -488,6 +644,7 @@ bnx2_napi_enable(struct bnx2 *bp)
static void static void
bnx2_netif_stop(struct bnx2 *bp) bnx2_netif_stop(struct bnx2 *bp)
{ {
bnx2_cnic_stop(bp);
bnx2_disable_int_sync(bp); bnx2_disable_int_sync(bp);
if (netif_running(bp->dev)) { if (netif_running(bp->dev)) {
bnx2_napi_disable(bp); bnx2_napi_disable(bp);
...@@ -504,6 +661,7 @@ bnx2_netif_start(struct bnx2 *bp) ...@@ -504,6 +661,7 @@ bnx2_netif_start(struct bnx2 *bp)
netif_tx_wake_all_queues(bp->dev); netif_tx_wake_all_queues(bp->dev);
bnx2_napi_enable(bp); bnx2_napi_enable(bp);
bnx2_enable_int(bp); bnx2_enable_int(bp);
bnx2_cnic_start(bp);
} }
} }
} }
...@@ -3164,6 +3322,11 @@ bnx2_has_work(struct bnx2_napi *bnapi) ...@@ -3164,6 +3322,11 @@ bnx2_has_work(struct bnx2_napi *bnapi)
if (bnx2_has_fast_work(bnapi)) if (bnx2_has_fast_work(bnapi))
return 1; return 1;
#ifdef BCM_CNIC
if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
return 1;
#endif
if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
(sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
return 1; return 1;
...@@ -3193,6 +3356,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp) ...@@ -3193,6 +3356,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
bp->idle_chk_status_idx = bnapi->last_status_idx; bp->idle_chk_status_idx = bnapi->last_status_idx;
} }
#ifdef BCM_CNIC
static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
struct cnic_ops *c_ops;
if (!bnapi->cnic_present)
return;
rcu_read_lock();
c_ops = rcu_dereference(bp->cnic_ops);
if (c_ops)
bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
bnapi->status_blk.msi);
rcu_read_unlock();
}
#endif
static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
{ {
struct status_block *sblk = bnapi->status_blk.msi; struct status_block *sblk = bnapi->status_blk.msi;
...@@ -3267,6 +3447,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget) ...@@ -3267,6 +3447,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
work_done = bnx2_poll_work(bp, bnapi, work_done, budget); work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
#ifdef BCM_CNIC
bnx2_poll_cnic(bp, bnapi);
#endif
/* bnapi->last_status_idx is used below to tell the hw how /* bnapi->last_status_idx is used below to tell the hw how
* much work has been processed, so we must read it before * much work has been processed, so we must read it before
* checking for more work. * checking for more work.
...@@ -4632,8 +4816,11 @@ bnx2_init_chip(struct bnx2 *bp) ...@@ -4632,8 +4816,11 @@ bnx2_init_chip(struct bnx2 *bp)
val = REG_RD(bp, BNX2_MQ_CONFIG); val = REG_RD(bp, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1) if (CHIP_NUM(bp) == CHIP_NUM_5709) {
val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
if (CHIP_REV(bp) == CHIP_REV_Ax)
val |= BNX2_MQ_CONFIG_HALT_DIS; val |= BNX2_MQ_CONFIG_HALT_DIS;
}
REG_WR(bp, BNX2_MQ_CONFIG, val); REG_WR(bp, BNX2_MQ_CONFIG, val);
...@@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) ...@@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
INIT_WORK(&bp->reset_task, bnx2_reset_task); INIT_WORK(&bp->reset_task, bnx2_reset_task);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS); mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
dev->mem_end = dev->mem_start + mem_len; dev->mem_end = dev->mem_start + mem_len;
dev->irq = pdev->irq; dev->irq = pdev->irq;
......
...@@ -361,6 +361,9 @@ struct l2_fhdr { ...@@ -361,6 +361,9 @@ struct l2_fhdr {
#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) #define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28)
#define BNX2_L2CTX_HOST_BDIDX 0x00000004 #define BNX2_L2CTX_HOST_BDIDX 0x00000004
#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16
#define BNX2_L2CTX_STATUSB_NUM(sb_id) \
(((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0)
#define BNX2_L2CTX_HOST_BSEQ 0x00000008 #define BNX2_L2CTX_HOST_BSEQ 0x00000008
#define BNX2_L2CTX_NX_BSEQ 0x0000000c #define BNX2_L2CTX_NX_BSEQ 0x0000000c
#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 #define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010
...@@ -5900,6 +5903,7 @@ struct l2_fhdr { ...@@ -5900,6 +5903,7 @@ struct l2_fhdr {
#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) #define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
#define BNX2_RXP_SCRATCH 0x000e0000 #define BNX2_RXP_SCRATCH 0x000e0000
#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024
#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038 #define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038
#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c #define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c
#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128 #define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128
...@@ -6678,6 +6682,11 @@ struct bnx2_napi { ...@@ -6678,6 +6682,11 @@ struct bnx2_napi {
u32 last_status_idx; u32 last_status_idx;
u32 int_num; u32 int_num;
#ifdef BCM_CNIC
u32 cnic_tag;
int cnic_present;
#endif
struct bnx2_rx_ring_info rx_ring; struct bnx2_rx_ring_info rx_ring;
struct bnx2_tx_ring_info tx_ring; struct bnx2_tx_ring_info tx_ring;
}; };
...@@ -6727,6 +6736,11 @@ struct bnx2 { ...@@ -6727,6 +6736,11 @@ struct bnx2 {
int tx_ring_size; int tx_ring_size;
u32 tx_wake_thresh; u32 tx_wake_thresh;
#ifdef BCM_CNIC
struct cnic_ops *cnic_ops;
void *cnic_data;
#endif
/* End of fields used in the performance code paths. */ /* End of fields used in the performance code paths. */
unsigned int current_interval; unsigned int current_interval;
...@@ -6885,6 +6899,10 @@ struct bnx2 { ...@@ -6885,6 +6899,10 @@ struct bnx2 {
u32 idle_chk_status_idx; u32 idle_chk_status_idx;
#ifdef BCM_CNIC
struct cnic_eth_dev cnic_eth_dev;
#endif
const struct firmware *mips_firmware; const struct firmware *mips_firmware;
const struct firmware *rv2p_firmware; const struct firmware *rv2p_firmware;
}; };
......
This diff is collapsed.
/* cnic.h: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
*/
#ifndef CNIC_H
#define CNIC_H
#define KWQ_PAGE_CNT 4
#define KCQ_PAGE_CNT 16
#define KWQ_CID 24
#define KCQ_CID 25
/*
* krnlq_context definition
*/
#define L5_KRNLQ_FLAGS 0x00000000
#define L5_KRNLQ_SIZE 0x00000000
#define L5_KRNLQ_TYPE 0x00000000
#define KRNLQ_FLAGS_PG_SZ (0xf<<0)
#define KRNLQ_FLAGS_PG_SZ_256 (0<<0)
#define KRNLQ_FLAGS_PG_SZ_512 (1<<0)
#define KRNLQ_FLAGS_PG_SZ_1K (2<<0)
#define KRNLQ_FLAGS_PG_SZ_2K (3<<0)
#define KRNLQ_FLAGS_PG_SZ_4K (4<<0)
#define KRNLQ_FLAGS_PG_SZ_8K (5<<0)
#define KRNLQ_FLAGS_PG_SZ_16K (6<<0)
#define KRNLQ_FLAGS_PG_SZ_32K (7<<0)
#define KRNLQ_FLAGS_PG_SZ_64K (8<<0)
#define KRNLQ_FLAGS_PG_SZ_128K (9<<0)
#define KRNLQ_FLAGS_PG_SZ_256K (10<<0)
#define KRNLQ_FLAGS_PG_SZ_512K (11<<0)
#define KRNLQ_FLAGS_PG_SZ_1M (12<<0)
#define KRNLQ_FLAGS_PG_SZ_2M (13<<0)
#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15)
#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
#define KRNLQ_TYPE_TYPE (0xf<<28)
#define KRNLQ_TYPE_TYPE_EMPTY (0<<28)
#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28)
#define L5_KRNLQ_HOST_QIDX 0x00000004
#define L5_KRNLQ_HOST_FW_QIDX 0x00000008
#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c
#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c
#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010
#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014
#define L5_KRNLQ_PGTBL_PGIDX 0x00000018
#define L5_KRNLQ_NX_PG_QIDX 0x00000018
#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c
#define L5_KRNLQ_QIDX_INCR 0x0000001c
#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020
#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024
#define BNX2_PG_CTX_MAP 0x1a0034
#define BNX2_ISCSI_CTX_MAP 0x1a0074
struct cnic_redirect_entry {
struct dst_entry *old_dst;
struct dst_entry *new_dst;
};
#define MAX_COMPLETED_KCQE 64
#define MAX_CNIC_L5_CONTEXT 256
#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT
#define MAX_ISCSI_TBL_SZ 256
#define CNIC_LOCAL_PORT_MIN 60000
#define CNIC_LOCAL_PORT_MAX 61000
#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
#define MAX_KWQE_CNT (KWQE_CNT - 1)
#define MAX_KCQE_CNT (KCQE_CNT - 1)
#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
(MAX_KCQE_CNT - 1)) ? \
(x) + 2 : (x) + 1
#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
#define BNX2X_KWQ_DATA(cp, x) \
&(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
#define DEF_IPID_COUNT 0xc001
#define DEF_KA_TIMEOUT 10000
#define DEF_KA_INTERVAL 300000
#define DEF_KA_MAX_PROBE_COUNT 3
#define DEF_TOS 0
#define DEF_TTL 0xfe
#define DEF_SND_SEQ_SCALE 0
#define DEF_RCV_BUF 0xffff
#define DEF_SND_BUF 0xffff
#define DEF_SEED 0
#define DEF_MAX_RT_TIME 500
#define DEF_MAX_DA_COUNT 2
#define DEF_SWS_TIMER 1000
#define DEF_MAX_CWND 0xffff
struct cnic_ctx {
u32 cid;
void *ctx;
dma_addr_t mapping;
};
#define BNX2_MAX_CID 0x2000
struct cnic_dma {
int num_pages;
void **pg_arr;
dma_addr_t *pg_map_arr;
int pgtbl_size;
u32 *pgtbl;
dma_addr_t pgtbl_map;
};
struct cnic_id_tbl {
spinlock_t lock;
u32 start;
u32 max;
u32 next;
unsigned long *table;
};
#define CNIC_KWQ16_DATA_SIZE 128
struct kwqe_16_data {
u8 data[CNIC_KWQ16_DATA_SIZE];
};
struct cnic_iscsi {
struct cnic_dma task_array_info;
struct cnic_dma r2tq_info;
struct cnic_dma hq_info;
};
struct cnic_context {
u32 cid;
struct kwqe_16_data *kwqe_data;
dma_addr_t kwqe_data_mapping;
wait_queue_head_t waitq;
int wait_cond;
unsigned long timestamp;
u32 ctx_flags;
#define CTX_FL_OFFLD_START 0x00000001
u8 ulp_proto_id;
union {
struct cnic_iscsi *iscsi;
} proto;
};
struct cnic_local {
spinlock_t cnic_ulp_lock;
void *ulp_handle[MAX_CNIC_ULP_TYPE];
unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
#define ULP_F_INIT 0
#define ULP_F_START 1
struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
/* protected by ulp_lock */
u32 cnic_local_flags;
#define CNIC_LCL_FL_KWQ_INIT 0x00000001
struct cnic_dev *dev;
struct cnic_eth_dev *ethdev;
void *l2_ring;
dma_addr_t l2_ring_map;
int l2_ring_size;
int l2_rx_ring_size;
void *l2_buf;
dma_addr_t l2_buf_map;
int l2_buf_size;
int l2_single_buf_size;
u16 *rx_cons_ptr;
u16 *tx_cons_ptr;
u16 rx_cons;
u16 tx_cons;
u32 kwq_cid_addr;
u32 kcq_cid_addr;
struct cnic_dma kwq_info;
struct kwqe **kwq;
struct cnic_dma kwq_16_data_info;
u16 max_kwq_idx;
u16 kwq_prod_idx;
u32 kwq_io_addr;
u16 *kwq_con_idx_ptr;
u16 kwq_con_idx;
struct cnic_dma kcq_info;
struct kcqe **kcq;
u16 kcq_prod_idx;
u32 kcq_io_addr;
void *status_blk;
struct status_block_msix *bnx2_status_blk;
struct host_status_block *bnx2x_status_blk;
u32 status_blk_num;
u32 int_num;
u32 last_status_idx;
struct tasklet_struct cnic_irq_task;
struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
struct cnic_sock *csk_tbl;
struct cnic_id_tbl csk_port_tbl;
struct cnic_dma conn_buf_info;
struct cnic_dma gbl_buf_info;
struct cnic_iscsi *iscsi_tbl;
struct cnic_context *ctx_tbl;
struct cnic_id_tbl cid_tbl;
int max_iscsi_conn;
atomic_t iscsi_conn;
/* per connection parameters */
int num_iscsi_tasks;
int num_ccells;
int task_array_size;
int r2tq_size;
int hq_size;
int num_cqs;
struct cnic_ctx *ctx_arr;
int ctx_blks;
int ctx_blk_size;
int cids_per_blk;
u32 chip_id;
int func;
u32 shmem_base;
u32 uio_dev;
struct uio_info *cnic_uinfo;
struct cnic_ops *cnic_ops;
int (*start_hw)(struct cnic_dev *);
void (*stop_hw)(struct cnic_dev *);
void (*setup_pgtbl)(struct cnic_dev *,
struct cnic_dma *);
int (*alloc_resc)(struct cnic_dev *);
void (*free_resc)(struct cnic_dev *);
int (*start_cm)(struct cnic_dev *);
void (*stop_cm)(struct cnic_dev *);
void (*enable_int)(struct cnic_dev *);
void (*disable_int_sync)(struct cnic_dev *);
void (*ack_int)(struct cnic_dev *);
void (*close_conn)(struct cnic_sock *, u32 opcode);
u16 (*next_idx)(u16);
u16 (*hw_idx)(u16);
};
struct bnx2x_bd_chain_next {
u32 addr_lo;
u32 addr_hi;
u8 reserved[8];
};
#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
#define CDU_REGION_NUMBER_XCM_AG 2
#define CDU_REGION_NUMBER_UCM_AG 4
#endif
This diff is collapsed.
/* cnic_if.h: Broadcom CNIC core network driver.
*
* Copyright (c) 2006 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
*/
#ifndef CNIC_IF_H
#define CNIC_IF_H
#define CNIC_MODULE_VERSION "2.0.0"
#define CNIC_MODULE_RELDATE "May 21, 2009"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
#define CNIC_ULP_L4 2
#define MAX_CNIC_ULP_TYPE_EXT 2
#define MAX_CNIC_ULP_TYPE 3
struct kwqe {
u32 kwqe_op_flag;
#define KWQE_OPCODE_MASK 0x00ff0000
#define KWQE_OPCODE_SHIFT 16
#define KWQE_FLAGS_LAYER_SHIFT 28
#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
u32 kwqe_info0;
u32 kwqe_info1;
u32 kwqe_info2;
u32 kwqe_info3;
u32 kwqe_info4;
u32 kwqe_info5;
u32 kwqe_info6;
};
struct kwqe_16 {
u32 kwqe_info0;
u32 kwqe_info1;
u32 kwqe_info2;
u32 kwqe_info3;
};
struct kcqe {
u32 kcqe_info0;
u32 kcqe_info1;
u32 kcqe_info2;
u32 kcqe_info3;
u32 kcqe_info4;
u32 kcqe_info5;
u32 kcqe_info6;
u32 kcqe_op_flag;
#define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
#define KCQE_FLAGS_LAYER_MASK (0x7<<28)
#define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
#define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
#define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
#define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
#define KCQE_FLAGS_NEXT (1<<31)
#define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
#define KCQE_FLAGS_OPCODE_SHIFT (16)
#define KCQE_OPCODE(op) \
(((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
};
#define MAX_CNIC_CTL_DATA 64
#define MAX_DRV_CTL_DATA 64
#define CNIC_CTL_STOP_CMD 1
#define CNIC_CTL_START_CMD 2
#define CNIC_CTL_COMPLETION_CMD 3
#define DRV_CTL_IO_WR_CMD 0x101
#define DRV_CTL_IO_RD_CMD 0x102
#define DRV_CTL_CTX_WR_CMD 0x103
#define DRV_CTL_CTXTBL_WR_CMD 0x104
#define DRV_CTL_COMPLETION_CMD 0x105
struct cnic_ctl_completion {
u32 cid;
};
struct drv_ctl_completion {
u32 comp_count;
};
struct cnic_ctl_info {
int cmd;
union {
struct cnic_ctl_completion comp;
char bytes[MAX_CNIC_CTL_DATA];
} data;
};
struct drv_ctl_io {
u32 cid_addr;
u32 offset;
u32 data;
dma_addr_t dma_addr;
};
struct drv_ctl_info {
int cmd;
union {
struct drv_ctl_completion comp;
struct drv_ctl_io io;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
struct cnic_ops {
struct module *cnic_owner;
/* Calls to these functions are protected by RCU. When
* unregistering, we wait for any calls to complete before
* continuing.
*/
int (*cnic_handler)(void *, void *);
int (*cnic_ctl)(void *, struct cnic_ctl_info *);
};
#define MAX_CNIC_VEC 8
struct cnic_irq {
unsigned int vector;
void *status_blk;
u32 status_blk_num;
u32 irq_flags;
#define CNIC_IRQ_FL_MSIX 0x00000001
};
struct cnic_eth_dev {
struct module *drv_owner;
u32 drv_state;
#define CNIC_DRV_STATE_REGD 0x00000001
#define CNIC_DRV_STATE_USING_MSIX 0x00000002
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
void __iomem *io_base;
u32 ctx_tbl_offset;
u32 ctx_tbl_len;
int ctx_blk_size;
u32 starting_cid;
u32 max_iscsi_conn;
u32 max_fcoe_conn;
u32 max_rdma_conn;
u32 reserved0[2];
int num_irq;
struct cnic_irq irq_arr[MAX_CNIC_VEC];
int (*drv_register_cnic)(struct net_device *,
struct cnic_ops *, void *);
int (*drv_unregister_cnic)(struct net_device *);
int (*drv_submit_kwqes_32)(struct net_device *,
struct kwqe *[], u32);
int (*drv_submit_kwqes_16)(struct net_device *,
struct kwqe_16 *[], u32);
int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
unsigned long reserved1[2];
};
struct cnic_sockaddr {
union {
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} local;
union {
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} remote;
};
struct cnic_sock {
struct cnic_dev *dev;
void *context;
u32 src_ip[4];
u32 dst_ip[4];
u16 src_port;
u16 dst_port;
u16 vlan_id;
unsigned char old_ha[6];
unsigned char ha[6];
u32 mtu;
u32 cid;
u32 l5_cid;
u32 pg_cid;
int ulp_type;
u32 ka_timeout;
u32 ka_interval;
u8 ka_max_probe_count;
u8 tos;
u8 ttl;
u8 snd_seq_scale;
u32 rcv_buf;
u32 snd_buf;
u32 seed;
unsigned long tcp_flags;
#define SK_TCP_NO_DELAY_ACK 0x1
#define SK_TCP_KEEP_ALIVE 0x2
#define SK_TCP_NAGLE 0x4
#define SK_TCP_TIMESTAMP 0x8
#define SK_TCP_SACK 0x10
#define SK_TCP_SEG_SCALING 0x20
unsigned long flags;
#define SK_F_INUSE 0
#define SK_F_OFFLD_COMPLETE 1
#define SK_F_OFFLD_SCHED 2
#define SK_F_PG_OFFLD_COMPLETE 3
#define SK_F_CONNECT_START 4
#define SK_F_IPV6 5
#define SK_F_CLOSING 7
atomic_t ref_count;
u32 state;
struct kwqe kwqe1;
struct kwqe kwqe2;
struct kwqe kwqe3;
};
struct cnic_dev {
struct net_device *netdev;
struct pci_dev *pcidev;
void __iomem *regview;
struct list_head list;
int (*register_device)(struct cnic_dev *dev, int ulp_type,
void *ulp_ctx);
int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num_wqes);
int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
u32 num_wqes);
int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
void *);
int (*cm_destroy)(struct cnic_sock *);
int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
int (*cm_abort)(struct cnic_sock *);
int (*cm_close)(struct cnic_sock *);
struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
unsigned long flags;
#define CNIC_F_CNIC_UP 1
#define CNIC_F_BNX2_CLASS 3
#define CNIC_F_BNX2X_CLASS 4
atomic_t ref_count;
u8 mac_addr[6];
int max_iscsi_conn;
int max_fcoe_conn;
int max_rdma_conn;
void *cnic_priv;
};
#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
#define CNIC_RD(dev, off) readl(dev->regview + off)
#define CNIC_RD16(dev, off) readw(dev->regview + off)
struct cnic_ulp_ops {
/* Calls to these functions are protected by RCU. When
* unregistering, we wait for any calls to complete before
* continuing.
*/
void (*cnic_init)(struct cnic_dev *dev);
void (*cnic_exit)(struct cnic_dev *dev);
void (*cnic_start)(void *ulp_ctx);
void (*cnic_stop)(void *ulp_ctx);
void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
u32 num_cqes);
void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
void (*cm_connect_complete)(struct cnic_sock *);
void (*cm_close_complete)(struct cnic_sock *);
void (*cm_abort_complete)(struct cnic_sock *);
void (*cm_remote_close)(struct cnic_sock *);
void (*cm_remote_abort)(struct cnic_sock *);
void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
struct module *owner;
};
extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
extern int cnic_unregister_driver(int ulp_type);
#endif
...@@ -11,6 +11,24 @@ ...@@ -11,6 +11,24 @@
#include "zfcp_ext.h" #include "zfcp_ext.h"
#define ZFCP_MODEL_PRIV 0x4
static struct ccw_device_id zfcp_ccw_device_id[] = {
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
{},
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
/**
* zfcp_ccw_priv_sch - check if subchannel is privileged
* @adapter: Adapter/Subchannel to check
*/
int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
{
return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV;
}
/** /**
* zfcp_ccw_probe - probe function of zfcp driver * zfcp_ccw_probe - probe function of zfcp driver
* @ccw_device: pointer to belonging ccw device * @ccw_device: pointer to belonging ccw device
...@@ -176,8 +194,8 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event) ...@@ -176,8 +194,8 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
"ccnoti4", NULL); "ccnoti4", NULL);
break; break;
case CIO_BOXED: case CIO_BOXED:
dev_warn(&adapter->ccw_device->dev, dev_warn(&adapter->ccw_device->dev, "The FCP device "
"The ccw device did not respond in time.\n"); "did not respond within the specified time\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
break; break;
} }
...@@ -199,14 +217,6 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) ...@@ -199,14 +217,6 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
up(&zfcp_data.config_sema); up(&zfcp_data.config_sema);
} }
static struct ccw_device_id zfcp_ccw_device_id[] = {
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
{},
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
static struct ccw_driver zfcp_ccw_driver = { static struct ccw_driver zfcp_ccw_driver = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "zfcp", .name = "zfcp",
......
...@@ -163,7 +163,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) ...@@ -163,7 +163,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
} }
response->fsf_command = fsf_req->fsf_command; response->fsf_command = fsf_req->fsf_command;
response->fsf_reqid = (unsigned long)fsf_req; response->fsf_reqid = fsf_req->req_id;
response->fsf_seqno = fsf_req->seq_no; response->fsf_seqno = fsf_req->seq_no;
response->fsf_issued = fsf_req->issued; response->fsf_issued = fsf_req->issued;
response->fsf_prot_status = qtcb->prefix.prot_status; response->fsf_prot_status = qtcb->prefix.prot_status;
...@@ -737,7 +737,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) ...@@ -737,7 +737,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
spin_lock_irqsave(&adapter->san_dbf_lock, flags); spin_lock_irqsave(&adapter->san_dbf_lock, flags);
memset(r, 0, sizeof(*r)); memset(r, 0, sizeof(*r));
strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
r->fsf_reqid = (unsigned long)fsf_req; r->fsf_reqid = fsf_req->req_id;
r->fsf_seqno = fsf_req->seq_no; r->fsf_seqno = fsf_req->seq_no;
r->s_id = fc_host_port_id(adapter->scsi_host); r->s_id = fc_host_port_id(adapter->scsi_host);
r->d_id = wka_port->d_id; r->d_id = wka_port->d_id;
...@@ -773,7 +773,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) ...@@ -773,7 +773,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
spin_lock_irqsave(&adapter->san_dbf_lock, flags); spin_lock_irqsave(&adapter->san_dbf_lock, flags);
memset(r, 0, sizeof(*r)); memset(r, 0, sizeof(*r));
strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
r->fsf_reqid = (unsigned long)fsf_req; r->fsf_reqid = fsf_req->req_id;
r->fsf_seqno = fsf_req->seq_no; r->fsf_seqno = fsf_req->seq_no;
r->s_id = wka_port->d_id; r->s_id = wka_port->d_id;
r->d_id = fc_host_port_id(adapter->scsi_host); r->d_id = fc_host_port_id(adapter->scsi_host);
...@@ -803,7 +803,7 @@ static void zfcp_san_dbf_event_els(const char *tag, int level, ...@@ -803,7 +803,7 @@ static void zfcp_san_dbf_event_els(const char *tag, int level,
spin_lock_irqsave(&adapter->san_dbf_lock, flags); spin_lock_irqsave(&adapter->san_dbf_lock, flags);
memset(rec, 0, sizeof(*rec)); memset(rec, 0, sizeof(*rec));
strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
rec->fsf_reqid = (unsigned long)fsf_req; rec->fsf_reqid = fsf_req->req_id;
rec->fsf_seqno = fsf_req->seq_no; rec->fsf_seqno = fsf_req->seq_no;
rec->s_id = s_id; rec->s_id = s_id;
rec->d_id = d_id; rec->d_id = d_id;
...@@ -965,7 +965,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, ...@@ -965,7 +965,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
ZFCP_DBF_SCSI_FCP_SNS_INFO); ZFCP_DBF_SCSI_FCP_SNS_INFO);
} }
rec->fsf_reqid = (unsigned long)fsf_req; rec->fsf_reqid = fsf_req->req_id;
rec->fsf_seqno = fsf_req->seq_no; rec->fsf_seqno = fsf_req->seq_no;
rec->fsf_issued = fsf_req->issued; rec->fsf_issued = fsf_req->issued;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -27,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int); ...@@ -27,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
/* zfcp_ccw.c */ /* zfcp_ccw.c */
extern int zfcp_ccw_register(void); extern int zfcp_ccw_register(void);
extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *); extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
/* zfcp_cfdc.c */ /* zfcp_cfdc.c */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment