Commit ed09441d authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (39 commits)
  [SCSI] sd: fix compile failure with CONFIG_BLK_DEV_INTEGRITY=n
  libiscsi: fix locking in iscsi_eh_device_reset
  libiscsi: check reason why we are stopping iscsi session to determine error value
  [SCSI] iscsi_tcp: return a descriptive error value during connection errors
  [SCSI] libiscsi: rename host reset to target reset
  [SCSI] iscsi class: fix endpoint id handling
  [SCSI] libiscsi: Support drivers initiating session removal
  [SCSI] libiscsi: fix data corruption when target has to resend data-in packets
  [SCSI] sd: Switch kernel printing level for DIF messages
  [SCSI] sd: Correctly handle all combinations of DIF and DIX
  [SCSI] sd: Always print actual protection_type
  [SCSI] sd: Issue correct protection operation
  [SCSI] scsi_error: fix target reset handling
  [SCSI] lpfc 8.2.8 v2 : Add statistical reporting control and additional fc vendor events
  [SCSI] lpfc 8.2.8 v2 : Add sysfs control of target queue depth handling
  [SCSI] lpfc 8.2.8 v2 : Revert target busy in favor of transport disrupted
  [SCSI] scsi_dh_alua: remove REQ_NOMERGE
  [SCSI] lpfc 8.2.8 : update driver version to 8.2.8
  [SCSI] lpfc 8.2.8 : Add MSI-X support
  [SCSI] lpfc 8.2.8 : Update driver to use new Host byte error code DID_TRANSPORT_DISRUPTED
  ...
parents b225ee5b 4c393e6e
...@@ -1075,8 +1075,15 @@ void init_request_from_bio(struct request *req, struct bio *bio) ...@@ -1075,8 +1075,15 @@ void init_request_from_bio(struct request *req, struct bio *bio)
/* /*
* inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
*/ */
if (bio_rw_ahead(bio) || bio_failfast(bio)) if (bio_rw_ahead(bio))
req->cmd_flags |= REQ_FAILFAST; req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER);
if (bio_failfast_dev(bio))
req->cmd_flags |= REQ_FAILFAST_DEV;
if (bio_failfast_transport(bio))
req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
if (bio_failfast_driver(bio))
req->cmd_flags |= REQ_FAILFAST_DRIVER;
/* /*
* REQ_BARRIER implies no merging, but lets make it explicit * REQ_BARRIER implies no merging, but lets make it explicit
......
...@@ -378,6 +378,7 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) ...@@ -378,6 +378,7 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
{ {
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
iscsi_session_teardown(cls_session);
iscsi_host_remove(shost); iscsi_host_remove(shost);
iscsi_host_free(shost); iscsi_host_free(shost);
} }
...@@ -597,7 +598,7 @@ static struct scsi_host_template iscsi_iser_sht = { ...@@ -597,7 +598,7 @@ static struct scsi_host_template iscsi_iser_sht = {
.cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort, .eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset, .eh_device_reset_handler= iscsi_eh_device_reset,
.eh_host_reset_handler = iscsi_eh_host_reset, .eh_target_reset_handler= iscsi_eh_target_reset,
.use_clustering = DISABLE_CLUSTERING, .use_clustering = DISABLE_CLUSTERING,
.proc_name = "iscsi_iser", .proc_name = "iscsi_iser",
.this_id = -1, .this_id = -1,
......
...@@ -849,7 +849,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio, ...@@ -849,7 +849,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
dm_bio_record(&mpio->details, bio); dm_bio_record(&mpio->details, bio);
map_context->ptr = mpio; map_context->ptr = mpio;
bio->bi_rw |= (1 << BIO_RW_FAILFAST); bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
r = map_io(m, bio, mpio, 0); r = map_io(m, bio, mpio, 0);
if (r < 0 || r == DM_MAPIO_REQUEUE) if (r < 0 || r == DM_MAPIO_REQUEUE)
mempool_free(mpio, m->mpio_pool); mempool_free(mpio, m->mpio_pool);
......
...@@ -167,7 +167,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) ...@@ -167,7 +167,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
mp_bh->bio = *bio; mp_bh->bio = *bio;
mp_bh->bio.bi_sector += multipath->rdev->data_offset; mp_bh->bio.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev; mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST); mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
mp_bh->bio.bi_end_io = multipath_end_request; mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh; mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio); generic_make_request(&mp_bh->bio);
...@@ -393,7 +393,7 @@ static void multipathd (mddev_t *mddev) ...@@ -393,7 +393,7 @@ static void multipathd (mddev_t *mddev)
*bio = *(mp_bh->master_bio); *bio = *(mp_bh->master_bio);
bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
bio->bi_rw |= (1 << BIO_RW_FAILFAST); bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
bio->bi_end_io = multipath_end_request; bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh; bio->bi_private = mp_bh;
generic_make_request(bio); generic_make_request(bio);
......
...@@ -544,7 +544,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, ...@@ -544,7 +544,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
} }
cqr->retries = DIAG_MAX_RETRIES; cqr->retries = DIAG_MAX_RETRIES;
cqr->buildclk = get_clock(); cqr->buildclk = get_clock();
if (req->cmd_flags & REQ_FAILFAST) if (blk_noretry_request(req))
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev; cqr->startdev = memdev;
cqr->memdev = memdev; cqr->memdev = memdev;
......
...@@ -1700,7 +1700,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, ...@@ -1700,7 +1700,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
recid++; recid++;
} }
} }
if (req->cmd_flags & REQ_FAILFAST) if (blk_noretry_request(req))
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = startdev; cqr->startdev = startdev;
cqr->memdev = startdev; cqr->memdev = startdev;
......
...@@ -355,7 +355,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, ...@@ -355,7 +355,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
recid++; recid++;
} }
} }
if (req->cmd_flags & REQ_FAILFAST) if (blk_noretry_request(req))
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev; cqr->startdev = memdev;
cqr->memdev = memdev; cqr->memdev = memdev;
......
...@@ -1364,7 +1364,8 @@ EXPORT_SYMBOL(scsi_print_sense); ...@@ -1364,7 +1364,8 @@ EXPORT_SYMBOL(scsi_print_sense);
static const char * const hostbyte_table[]={ static const char * const hostbyte_table[]={
"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", "DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE"}; "DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST" };
#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table) #define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
static const char * const driverbyte_table[]={ static const char * const driverbyte_table[]={
......
...@@ -109,7 +109,8 @@ static struct request *get_alua_req(struct scsi_device *sdev, ...@@ -109,7 +109,8 @@ static struct request *get_alua_req(struct scsi_device *sdev,
} }
rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->retries = ALUA_FAILOVER_RETRIES; rq->retries = ALUA_FAILOVER_RETRIES;
rq->timeout = ALUA_FAILOVER_TIMEOUT; rq->timeout = ALUA_FAILOVER_TIMEOUT;
......
...@@ -303,7 +303,8 @@ static struct request *get_req(struct scsi_device *sdev, int cmd, ...@@ -303,7 +303,8 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
rq->cmd[4] = len; rq->cmd[4] = len;
rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->timeout = CLARIION_TIMEOUT; rq->timeout = CLARIION_TIMEOUT;
rq->retries = CLARIION_RETRIES; rq->retries = CLARIION_RETRIES;
......
...@@ -112,7 +112,8 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) ...@@ -112,7 +112,8 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
return SCSI_DH_RES_TEMP_UNAVAIL; return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST; req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
req->cmd[0] = TEST_UNIT_READY; req->cmd[0] = TEST_UNIT_READY;
req->timeout = HP_SW_TIMEOUT; req->timeout = HP_SW_TIMEOUT;
...@@ -204,7 +205,8 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h) ...@@ -204,7 +205,8 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
return SCSI_DH_RES_TEMP_UNAVAIL; return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST; req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
req->cmd_len = COMMAND_SIZE(START_STOP); req->cmd_len = COMMAND_SIZE(START_STOP);
req->cmd[0] = START_STOP; req->cmd[0] = START_STOP;
req->cmd[4] = 1; /* Start spin cycle */ req->cmd[4] = 1; /* Start spin cycle */
......
...@@ -226,7 +226,8 @@ static struct request *get_rdac_req(struct scsi_device *sdev, ...@@ -226,7 +226,8 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
} }
rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->retries = RDAC_RETRIES; rq->retries = RDAC_RETRIES;
rq->timeout = RDAC_TIMEOUT; rq->timeout = RDAC_TIMEOUT;
......
...@@ -2031,8 +2031,6 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) ...@@ -2031,8 +2031,6 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
} else } else
ibmvfc_issue_fc_host_lip(shost); ibmvfc_issue_fc_host_lip(shost);
scsi_target_unblock(&rport->dev);
LEAVE; LEAVE;
} }
......
...@@ -523,22 +523,20 @@ iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task) ...@@ -523,22 +523,20 @@ iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
} }
/** /**
* iscsi_data_rsp - SCSI Data-In Response processing * iscsi_data_in - SCSI Data-In Response processing
* @conn: iscsi connection * @conn: iscsi connection
* @task: scsi command task * @task: scsi command task
**/ **/
static int static int
iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task) iscsi_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
{ {
struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc;
int datasn = be32_to_cpu(rhdr->datasn); int datasn = be32_to_cpu(rhdr->datasn);
unsigned total_in_length = scsi_in(sc)->length; unsigned total_in_length = scsi_in(task->sc)->length;
iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
if (tcp_conn->in.datalen == 0) if (tcp_conn->in.datalen == 0)
return 0; return 0;
...@@ -558,23 +556,6 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task) ...@@ -558,23 +556,6 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
return ISCSI_ERR_DATA_OFFSET; return ISCSI_ERR_DATA_OFFSET;
} }
if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
sc->result = (DID_OK << 16) | rhdr->cmd_status;
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
ISCSI_FLAG_DATA_OVERFLOW)) {
int res_count = be32_to_cpu(rhdr->residual_count);
if (res_count > 0 &&
(rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
res_count <= total_in_length))
scsi_in(sc)->resid = res_count;
else
sc->result = (DID_BAD_TARGET << 16) |
rhdr->cmd_status;
}
}
conn->datain_pdus_cnt++; conn->datain_pdus_cnt++;
return 0; return 0;
} }
...@@ -774,7 +755,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) ...@@ -774,7 +755,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
if (!task) if (!task)
rc = ISCSI_ERR_BAD_ITT; rc = ISCSI_ERR_BAD_ITT;
else else
rc = iscsi_data_rsp(conn, task); rc = iscsi_data_in(conn, task);
if (rc) { if (rc) {
spin_unlock(&conn->session->lock); spin_unlock(&conn->session->lock);
break; break;
...@@ -998,7 +979,7 @@ iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, ...@@ -998,7 +979,7 @@ iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
error: error:
debug_tcp("Error receiving PDU, errno=%d\n", rc); debug_tcp("Error receiving PDU, errno=%d\n", rc);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); iscsi_conn_failure(conn, rc);
return 0; return 0;
} }
...@@ -1117,8 +1098,10 @@ iscsi_xmit(struct iscsi_conn *conn) ...@@ -1117,8 +1098,10 @@ iscsi_xmit(struct iscsi_conn *conn)
while (1) { while (1) {
rc = iscsi_tcp_xmit_segment(tcp_conn, segment); rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
if (rc < 0) if (rc < 0) {
rc = ISCSI_ERR_XMIT_FAILED;
goto error; goto error;
}
if (rc == 0) if (rc == 0)
break; break;
...@@ -1127,7 +1110,7 @@ iscsi_xmit(struct iscsi_conn *conn) ...@@ -1127,7 +1110,7 @@ iscsi_xmit(struct iscsi_conn *conn)
if (segment->total_copied >= segment->total_size) { if (segment->total_copied >= segment->total_size) {
if (segment->done != NULL) { if (segment->done != NULL) {
rc = segment->done(tcp_conn, segment); rc = segment->done(tcp_conn, segment);
if (rc < 0) if (rc != 0)
goto error; goto error;
} }
} }
...@@ -1142,8 +1125,8 @@ iscsi_xmit(struct iscsi_conn *conn) ...@@ -1142,8 +1125,8 @@ iscsi_xmit(struct iscsi_conn *conn)
/* Transmit error. We could initiate error recovery /* Transmit error. We could initiate error recovery
* here. */ * here. */
debug_tcp("Error sending PDU, errno=%d\n", rc); debug_tcp("Error sending PDU, errno=%d\n", rc);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); iscsi_conn_failure(conn, rc);
return rc; return -EIO;
} }
/** /**
...@@ -1904,6 +1887,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) ...@@ -1904,6 +1887,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
iscsi_r2tpool_free(cls_session->dd_data); iscsi_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
iscsi_host_remove(shost); iscsi_host_remove(shost);
iscsi_host_free(shost); iscsi_host_free(shost);
...@@ -1927,7 +1911,7 @@ static struct scsi_host_template iscsi_sht = { ...@@ -1927,7 +1911,7 @@ static struct scsi_host_template iscsi_sht = {
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort, .eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset, .eh_device_reset_handler= iscsi_eh_device_reset,
.eh_host_reset_handler = iscsi_eh_host_reset, .eh_target_reset_handler= iscsi_eh_target_reset,
.use_clustering = DISABLE_CLUSTERING, .use_clustering = DISABLE_CLUSTERING,
.slave_configure = iscsi_tcp_slave_configure, .slave_configure = iscsi_tcp_slave_configure,
.proc_name = "iscsi_tcp", .proc_name = "iscsi_tcp",
......
This diff is collapsed.
...@@ -34,7 +34,14 @@ struct lpfc_sli2_slim; ...@@ -34,7 +34,14 @@ struct lpfc_sli2_slim;
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
#define LPFC_VNAME_LEN 100 /* vport symbolic name length */ #define LPFC_VNAME_LEN 100 /* vport symbolic name length */
#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt
queue depth change in millisecs */
#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
#define LPFC_MIN_TGT_QDEPTH 100
#define LPFC_MAX_TGT_QDEPTH 0xFFFF
#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
collection. */
/* /*
* Following time intervals are used of adjusting SCSI device * Following time intervals are used of adjusting SCSI device
* queue depths when there are driver resource error or Firmware * queue depths when there are driver resource error or Firmware
...@@ -49,6 +56,9 @@ struct lpfc_sli2_slim; ...@@ -49,6 +56,9 @@ struct lpfc_sli2_slim;
#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ #define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ #define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
/* Error Attention event polling interval */
#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
/* Define macros for 64 bit support */ /* Define macros for 64 bit support */
#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr))) #define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32))) #define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
...@@ -60,6 +70,9 @@ struct lpfc_sli2_slim; ...@@ -60,6 +70,9 @@ struct lpfc_sli2_slim;
#define MAX_HBAEVT 32 #define MAX_HBAEVT 32
/* Number of MSI-X vectors the driver uses */
#define LPFC_MSIX_VECTORS 2
/* lpfc wait event data ready flag */ /* lpfc wait event data ready flag */
#define LPFC_DATA_READY (1<<0) #define LPFC_DATA_READY (1<<0)
...@@ -357,6 +370,7 @@ struct lpfc_vport { ...@@ -357,6 +370,7 @@ struct lpfc_vport {
uint32_t cfg_log_verbose; uint32_t cfg_log_verbose;
uint32_t cfg_max_luns; uint32_t cfg_max_luns;
uint32_t cfg_enable_da_id; uint32_t cfg_enable_da_id;
uint32_t cfg_max_scsicmpl_time;
uint32_t dev_loss_tmo_changed; uint32_t dev_loss_tmo_changed;
...@@ -369,6 +383,8 @@ struct lpfc_vport { ...@@ -369,6 +383,8 @@ struct lpfc_vport {
struct lpfc_debugfs_trc *disc_trc; struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt; atomic_t disc_trc_cnt;
#endif #endif
uint8_t stat_data_enabled;
uint8_t stat_data_blocked;
}; };
struct hbq_s { struct hbq_s {
...@@ -407,10 +423,11 @@ struct lpfc_hba { ...@@ -407,10 +423,11 @@ struct lpfc_hba {
struct lpfc_sli sli; struct lpfc_sli sli;
uint32_t sli_rev; /* SLI2 or SLI3 */ uint32_t sli_rev; /* SLI2 or SLI3 */
uint32_t sli3_options; /* Mask of enabled SLI3 options */ uint32_t sli3_options; /* Mask of enabled SLI3 options */
#define LPFC_SLI3_ENABLED 0x01 #define LPFC_SLI3_HBQ_ENABLED 0x01
#define LPFC_SLI3_HBQ_ENABLED 0x02 #define LPFC_SLI3_NPIV_ENABLED 0x02
#define LPFC_SLI3_NPIV_ENABLED 0x04 #define LPFC_SLI3_VPORT_TEARDOWN 0x04
#define LPFC_SLI3_VPORT_TEARDOWN 0x08 #define LPFC_SLI3_CRP_ENABLED 0x08
#define LPFC_SLI3_INB_ENABLED 0x10
uint32_t iocb_cmd_size; uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size; uint32_t iocb_rsp_size;
...@@ -422,10 +439,20 @@ struct lpfc_hba { ...@@ -422,10 +439,20 @@ struct lpfc_hba {
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
struct lpfc_sli2_slim *slim2p; uint32_t hba_flag; /* hba generic flags */
struct lpfc_dmabuf hbqslimp; #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
struct lpfc_dmabuf slim2p;
dma_addr_t slim2p_mapping; MAILBOX_t *mbox;
uint32_t *inb_ha_copy;
uint32_t *inb_counter;
uint32_t inb_last_counter;
uint32_t ha_copy;
struct _PCB *pcb;
struct _IOCB *IOCBs;
struct lpfc_dmabuf hbqslimp;
uint16_t pci_cfg_value; uint16_t pci_cfg_value;
...@@ -492,7 +519,7 @@ struct lpfc_hba { ...@@ -492,7 +519,7 @@ struct lpfc_hba {
wait_queue_head_t work_waitq; wait_queue_head_t work_waitq;
struct task_struct *worker_thread; struct task_struct *worker_thread;
long data_flags; unsigned long data_flags;
uint32_t hbq_in_use; /* HBQs in use flag */ uint32_t hbq_in_use; /* HBQs in use flag */
struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
...@@ -514,6 +541,7 @@ struct lpfc_hba { ...@@ -514,6 +541,7 @@ struct lpfc_hba {
void __iomem *HCregaddr; /* virtual address for host ctl reg */ void __iomem *HCregaddr; /* virtual address for host ctl reg */
struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */ struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
struct lpfc_pgp *port_gp;
uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */ uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */ uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
...@@ -536,6 +564,7 @@ struct lpfc_hba { ...@@ -536,6 +564,7 @@ struct lpfc_hba {
uint8_t soft_wwn_enable; uint8_t soft_wwn_enable;
struct timer_list fcp_poll_timer; struct timer_list fcp_poll_timer;
struct timer_list eratt_poll;
/* /*
* stat counters * stat counters
...@@ -565,7 +594,7 @@ struct lpfc_hba { ...@@ -565,7 +594,7 @@ struct lpfc_hba {
struct fc_host_statistics link_stats; struct fc_host_statistics link_stats;
enum intr_type_t intr_type; enum intr_type_t intr_type;
struct msix_entry msix_entries[1]; struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
struct list_head port_list; struct list_head port_list;
struct lpfc_vport *pport; /* physical lpfc_vport pointer */ struct lpfc_vport *pport; /* physical lpfc_vport pointer */
...@@ -605,6 +634,7 @@ struct lpfc_hba { ...@@ -605,6 +634,7 @@ struct lpfc_hba {
unsigned long last_completion_time; unsigned long last_completion_time;
struct timer_list hb_tmofunc; struct timer_list hb_tmofunc;
uint8_t hb_outstanding; uint8_t hb_outstanding;
enum hba_temp_state over_temp_state;
/* ndlp reference management */ /* ndlp reference management */
spinlock_t ndlp_lock; spinlock_t ndlp_lock;
/* /*
...@@ -613,7 +643,19 @@ struct lpfc_hba { ...@@ -613,7 +643,19 @@ struct lpfc_hba {
*/ */
#define QUE_BUFTAG_BIT (1<<31) #define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count; uint32_t buffer_tag_count;
enum hba_temp_state over_temp_state; int wait_4_mlo_maint_flg;
wait_queue_head_t wait_4_mlo_m_q;
/* data structure used for latency data collection */
#define LPFC_NO_BUCKET 0
#define LPFC_LINEAR_BUCKET 1
#define LPFC_POWER2_BUCKET 2
uint8_t bucket_type;
uint32_t bucket_base;
uint32_t bucket_step;
/* Maximum number of events that can be outstanding at any time*/
#define LPFC_MAX_EVT_COUNT 512
atomic_t fast_event_count;
}; };
static inline struct Scsi_Host * static inline struct Scsi_Host *
...@@ -650,15 +692,25 @@ lpfc_worker_wake_up(struct lpfc_hba *phba) ...@@ -650,15 +692,25 @@ lpfc_worker_wake_up(struct lpfc_hba *phba)
return; return;
} }
#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ static inline void
#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature lpfc_sli_read_hs(struct lpfc_hba *phba)
event */ {
/*
* There was a link/board error. Read the status register to retrieve
* the error event and process it.
*/
phba->sli.slistat.err_attn_event++;
/* Save status info */
phba->work_hs = readl(phba->HSregaddr);
phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
/* Clear chip Host Attention error bit */
writel(HA_ERATT, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
phba->pport->stopped = 1;
return;
}
struct temp_event {
uint32_t event_type;
uint32_t event_code;
uint32_t data;
};
#define LPFC_CRIT_TEMP 0x1
#define LPFC_THRESHOLD_TEMP 0x2
#define LPFC_NORMAL_TEMP 0x3
This diff is collapsed.
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param); typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport; struct fc_rport;
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
...@@ -26,11 +26,11 @@ void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); ...@@ -26,11 +26,11 @@ void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, int lpfc_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
struct lpfc_dmabuf *mp);
void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport); void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *);
void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
...@@ -43,7 +43,7 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); ...@@ -43,7 +43,7 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove); void lpfc_cleanup_rpis(struct lpfc_vport *, int);
int lpfc_linkdown(struct lpfc_hba *); int lpfc_linkdown(struct lpfc_hba *);
void lpfc_port_link_failure(struct lpfc_vport *); void lpfc_port_link_failure(struct lpfc_vport *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
...@@ -135,7 +135,7 @@ void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, ...@@ -135,7 +135,7 @@ void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long); void lpfc_fdmi_tmo(unsigned long);
void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport); void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *); int lpfc_config_port_prep(struct lpfc_hba *);
int lpfc_config_port_post(struct lpfc_hba *); int lpfc_config_port_post(struct lpfc_hba *);
...@@ -155,6 +155,8 @@ int lpfc_sli_queue_setup(struct lpfc_hba *); ...@@ -155,6 +155,8 @@ int lpfc_sli_queue_setup(struct lpfc_hba *);
void lpfc_handle_eratt(struct lpfc_hba *); void lpfc_handle_eratt(struct lpfc_hba *);
void lpfc_handle_latt(struct lpfc_hba *); void lpfc_handle_latt(struct lpfc_hba *);
irqreturn_t lpfc_intr_handler(int, void *); irqreturn_t lpfc_intr_handler(int, void *);
irqreturn_t lpfc_sp_intr_handler(int, void *);
irqreturn_t lpfc_fp_intr_handler(int, void *);
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
...@@ -175,11 +177,12 @@ void lpfc_mem_free(struct lpfc_hba *); ...@@ -175,11 +177,12 @@ void lpfc_mem_free(struct lpfc_hba *);
void lpfc_stop_vport_timers(struct lpfc_vport *); void lpfc_stop_vport_timers(struct lpfc_vport *);
void lpfc_poll_timeout(unsigned long ptr); void lpfc_poll_timeout(unsigned long ptr);
void lpfc_poll_start_timer(struct lpfc_hba * phba); void lpfc_poll_start_timer(struct lpfc_hba *);
void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba); void lpfc_poll_eratt(unsigned long);
void lpfc_sli_poll_fcp_ring(struct lpfc_hba *);
struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
void lpfc_reset_barrier(struct lpfc_hba * phba); void lpfc_reset_barrier(struct lpfc_hba * phba);
int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
...@@ -187,11 +190,13 @@ int lpfc_sli_brdkill(struct lpfc_hba *); ...@@ -187,11 +190,13 @@ int lpfc_sli_brdkill(struct lpfc_hba *);
int lpfc_sli_brdreset(struct lpfc_hba *); int lpfc_sli_brdreset(struct lpfc_hba *);
int lpfc_sli_brdrestart(struct lpfc_hba *); int lpfc_sli_brdrestart(struct lpfc_hba *);
int lpfc_sli_hba_setup(struct lpfc_hba *); int lpfc_sli_hba_setup(struct lpfc_hba *);
int lpfc_sli_config_port(struct lpfc_hba *, int);
int lpfc_sli_host_down(struct lpfc_vport *); int lpfc_sli_host_down(struct lpfc_vport *);
int lpfc_sli_hba_down(struct lpfc_hba *); int lpfc_sli_hba_down(struct lpfc_hba *);
int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
int lpfc_sli_handle_mb_event(struct lpfc_hba *); int lpfc_sli_handle_mb_event(struct lpfc_hba *);
int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
int lpfc_sli_check_eratt(struct lpfc_hba *);
int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t); struct lpfc_sli_ring *, uint32_t);
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
...@@ -199,6 +204,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, ...@@ -199,6 +204,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *, uint32_t); struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *); struct lpfc_dmabuf *);
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
...@@ -226,17 +232,13 @@ struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t); ...@@ -226,17 +232,13 @@ struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
struct lpfc_name *); struct lpfc_name *);
int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
uint32_t timeout);
int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_sli_ring * pring, struct lpfc_iocbq *, struct lpfc_iocbq *,
struct lpfc_iocbq * piocb, uint32_t);
struct lpfc_iocbq * prspiocbq, void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
uint32_t timeout); struct lpfc_iocbq *);
void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *); void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
...@@ -269,7 +271,7 @@ void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); ...@@ -269,7 +271,7 @@ void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *); struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *);
int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable); int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
void lpfc_mbx_unreg_vpi(struct lpfc_vport *); int lpfc_mbx_unreg_vpi(struct lpfc_vport *);
void destroy_port(struct lpfc_vport *); void destroy_port(struct lpfc_vport *);
int lpfc_get_instance(void); int lpfc_get_instance(void);
void lpfc_host_attrib_init(struct Scsi_Host *); void lpfc_host_attrib_init(struct Scsi_Host *);
...@@ -290,6 +292,13 @@ void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); ...@@ -290,6 +292,13 @@ void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
void lpfc_adjust_queue_depth(struct lpfc_hba *); void lpfc_adjust_queue_depth(struct lpfc_hba *);
void lpfc_ramp_down_queue_handler(struct lpfc_hba *); void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
void lpfc_ramp_up_queue_handler(struct lpfc_hba *); void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
void lpfc_scsi_dev_block(struct lpfc_hba *);
void
lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5 #define HBA_EVENT_RSCN 5
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include "lpfc_hw.h" #include "lpfc_hw.h"
#include "lpfc_sli.h" #include "lpfc_sli.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h" #include "lpfc_disc.h"
#include "lpfc_scsi.h" #include "lpfc_scsi.h"
#include "lpfc.h" #include "lpfc.h"
...@@ -134,25 +135,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ...@@ -134,25 +135,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
} }
list_del(&head); list_del(&head);
} else { } else {
struct lpfc_iocbq *next; INIT_LIST_HEAD(&head);
list_add_tail(&head, &piocbq->list);
list_for_each_entry_safe(iocbq, next, &piocbq->list, list) { list_for_each_entry(iocbq, &head, list) {
icmd = &iocbq->iocb; icmd = &iocbq->iocb;
if (icmd->ulpBdeCount == 0) if (icmd->ulpBdeCount == 0)
lpfc_ct_unsol_buffer(phba, piocbq, NULL, 0); lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
for (i = 0; i < icmd->ulpBdeCount; i++) { for (i = 0; i < icmd->ulpBdeCount; i++) {
paddr = getPaddr(icmd->un.cont64[i].addrHigh, paddr = getPaddr(icmd->un.cont64[i].addrHigh,
icmd->un.cont64[i].addrLow); icmd->un.cont64[i].addrLow);
mp = lpfc_sli_ringpostbuf_get(phba, pring, mp = lpfc_sli_ringpostbuf_get(phba, pring,
paddr); paddr);
size = icmd->un.cont64[i].tus.f.bdeSize; size = icmd->un.cont64[i].tus.f.bdeSize;
lpfc_ct_unsol_buffer(phba, piocbq, mp, size); lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
lpfc_in_buf_free(phba, mp); lpfc_in_buf_free(phba, mp);
} }
list_del(&iocbq->list);
lpfc_sli_release_iocbq(phba, iocbq);
lpfc_post_buffer(phba, pring, i); lpfc_post_buffer(phba, pring, i);
} }
list_del(&head);
} }
} }
...@@ -212,7 +212,7 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl, ...@@ -212,7 +212,7 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
else else
list_add_tail(&mp->list, &mlist->list); list_add_tail(&mp->list, &mlist->list);
bpl->tus.f.bdeFlags = BUFF_USE_RCV; bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
/* build buffer ptr list for IOCB */ /* build buffer ptr list for IOCB */
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
...@@ -283,7 +283,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, ...@@ -283,7 +283,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
icmd->un.genreq64.bdl.ulpIoTag32 = 0; icmd->un.genreq64.bdl.ulpIoTag32 = 0;
icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL; icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64)); icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
if (usr_flg) if (usr_flg)
...@@ -861,7 +861,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -861,7 +861,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry++; retry++;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0216 Retrying NS cmd %x\n", cmdcode); "0250 Retrying NS cmd %x\n", cmdcode);
rc = lpfc_ns_cmd(vport, cmdcode, retry, 0); rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
if (rc == 0) if (rc == 0)
goto out; goto out;
......
This diff is collapsed.
...@@ -37,6 +37,7 @@ enum lpfc_work_type { ...@@ -37,6 +37,7 @@ enum lpfc_work_type {
LPFC_EVT_KILL, LPFC_EVT_KILL,
LPFC_EVT_ELS_RETRY, LPFC_EVT_ELS_RETRY,
LPFC_EVT_DEV_LOSS, LPFC_EVT_DEV_LOSS,
LPFC_EVT_FASTPATH_MGMT_EVT,
}; };
/* structure used to queue event to the discovery tasklet */ /* structure used to queue event to the discovery tasklet */
...@@ -47,6 +48,24 @@ struct lpfc_work_evt { ...@@ -47,6 +48,24 @@ struct lpfc_work_evt {
enum lpfc_work_type evt; enum lpfc_work_type evt;
}; };
struct lpfc_scsi_check_condition_event;
struct lpfc_scsi_varqueuedepth_event;
struct lpfc_scsi_event_header;
struct lpfc_fabric_event_header;
struct lpfc_fcprdchkerr_event;
/* structure used for sending events from fast path */
struct lpfc_fast_path_event {
struct lpfc_work_evt work_evt;
struct lpfc_vport *vport;
union {
struct lpfc_scsi_check_condition_event check_cond_evt;
struct lpfc_scsi_varqueuedepth_event queue_depth_evt;
struct lpfc_scsi_event_header scsi_evt;
struct lpfc_fabric_event_header fabric_evt;
struct lpfc_fcprdchkerr_event read_check_error;
} un;
};
struct lpfc_nodelist { struct lpfc_nodelist {
struct list_head nlp_listp; struct list_head nlp_listp;
...@@ -88,6 +107,10 @@ struct lpfc_nodelist { ...@@ -88,6 +107,10 @@ struct lpfc_nodelist {
unsigned long last_ramp_up_time; /* jiffy of last ramp up */ unsigned long last_ramp_up_time; /* jiffy of last ramp up */
unsigned long last_q_full_time; /* jiffy of last queue full */ unsigned long last_q_full_time; /* jiffy of last queue full */
struct kref kref; struct kref kref;
atomic_t cmd_pending;
uint32_t cmd_qdepth;
unsigned long last_change_time;
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
}; };
/* Defines for nlp_flag (uint32) */ /* Defines for nlp_flag (uint32) */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/******************************************************************* /*******************************************************************
* This file is part of the Emulex Linux Device Driver for * * This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. * * Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. * * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. * * EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com * * www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig * * Portions Copyright (C) 2004-2005 Christoph Hellwig *
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "lpfc_hw.h" #include "lpfc_hw.h"
#include "lpfc_sli.h" #include "lpfc_sli.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h" #include "lpfc_disc.h"
#include "lpfc_scsi.h" #include "lpfc_scsi.h"
#include "lpfc.h" #include "lpfc.h"
...@@ -39,7 +40,21 @@ ...@@ -39,7 +40,21 @@
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
/**
* lpfc_mem_alloc: create and allocate all PCI and memory pools
* @phba: HBA to allocate pools for
*
* Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
* lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools
* for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
*
* Notes: Not interrupt-safe. Must be called with no locks held. If any
* allocation fails, frees all successfully allocated memory before returning.
*
* Returns:
* 0 on success
* -ENOMEM on failure (if any memory allocations fail)
**/
int int
lpfc_mem_alloc(struct lpfc_hba * phba) lpfc_mem_alloc(struct lpfc_hba * phba)
{ {
...@@ -120,6 +135,16 @@ lpfc_mem_alloc(struct lpfc_hba * phba) ...@@ -120,6 +135,16 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
return -ENOMEM; return -ENOMEM;
} }
/**
* lpfc_mem_free: Frees all PCI and memory allocated by lpfc_mem_alloc
* @phba: HBA to free memory for
*
* Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool,
* lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and
* lpfc_nodelist. Also frees the VPI bitmask.
*
* Returns: None
**/
void void
lpfc_mem_free(struct lpfc_hba * phba) lpfc_mem_free(struct lpfc_hba * phba)
{ {
...@@ -181,12 +206,29 @@ lpfc_mem_free(struct lpfc_hba * phba) ...@@ -181,12 +206,29 @@ lpfc_mem_free(struct lpfc_hba * phba)
phba->lpfc_scsi_dma_buf_pool = NULL; phba->lpfc_scsi_dma_buf_pool = NULL;
phba->lpfc_mbuf_pool = NULL; phba->lpfc_mbuf_pool = NULL;
/* Free the iocb lookup array */ /* Free the iocb lookup array */
kfree(psli->iocbq_lookup); kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL; psli->iocbq_lookup = NULL;
} }
/**
* lpfc_mbuf_alloc: Allocate an mbuf from the lpfc_mbuf_pool PCI pool
* @phba: HBA which owns the pool to allocate from
* @mem_flags: indicates if this is a priority (MEM_PRI) allocation
* @handle: used to return the DMA-mapped address of the mbuf
*
* Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
* Allocates from generic pci_pool_alloc function first and if that fails and
* mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
* HBA's pool.
*
* Notes: Not interrupt-safe. Must be called with no locks held. Takes
* phba->hbalock.
*
* Returns:
* pointer to the allocated mbuf on success
* NULL on failure
**/
void * void *
lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
{ {
...@@ -206,6 +248,20 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) ...@@ -206,6 +248,20 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
return ret; return ret;
} }
/**
* __lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
* @phba: HBA which owns the pool to return to
* @virt: mbuf to free
* @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
*
* Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
* it is below its max_count, frees the mbuf otherwise.
*
* Notes: Must be called with phba->hbalock held to synchronize access to
* lpfc_mbuf_safety_pool.
*
* Returns: None
**/
void void
__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
{ {
...@@ -221,7 +277,21 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) ...@@ -221,7 +277,21 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
return; return;
} }
/**
* lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
* @phba: HBA which owns the pool to return to
* @virt: mbuf to free
* @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
*
* Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
* it is below its max_count, frees the mbuf otherwise.
*
* Notes: Takes phba->hbalock. Can be called with or without other locks held.
*
* Returns: None
**/
void void
lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
{ {
unsigned long iflags; unsigned long iflags;
...@@ -232,6 +302,19 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) ...@@ -232,6 +302,19 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
return; return;
} }
/**
* lpfc_els_hbq_alloc: Allocate an HBQ buffer
* @phba: HBA to allocate HBQ buffer for
*
* Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI
* pool along a non-DMA-mapped container for it.
*
* Notes: Not interrupt-safe. Must be called with no locks held.
*
* Returns:
* pointer to HBQ on success
* NULL on failure
**/
struct hbq_dmabuf * struct hbq_dmabuf *
lpfc_els_hbq_alloc(struct lpfc_hba *phba) lpfc_els_hbq_alloc(struct lpfc_hba *phba)
{ {
...@@ -251,6 +334,18 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) ...@@ -251,6 +334,18 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
return hbqbp; return hbqbp;
} }
/**
* lpfc_mem_hbq_free: Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
* @phba: HBA buffer was allocated for
* @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
*
* Description: Frees both the container and the DMA-mapped buffer returned by
* lpfc_els_hbq_alloc.
*
* Notes: Can be called with or without locks held.
*
* Returns: None
**/
void void
lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
{ {
...@@ -259,7 +354,18 @@ lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) ...@@ -259,7 +354,18 @@ lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
return; return;
} }
/* This is ONLY called for the LPFC_ELS_HBQ */ /**
* lpfc_in_buf_free: Free a DMA buffer
* @phba: HBA buffer is associated with
* @mp: Buffer to free
*
* Description: Frees the given DMA buffer in the appropriate way given if the
* HBA is running in SLI3 mode with HBQs enabled.
*
* Notes: Takes phba->hbalock. Can be called with or without other locks held.
*
* Returns: None
**/
void void
lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
{ {
......
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
/* Event definitions for RegisterForEvent */
#define FC_REG_LINK_EVENT 0x0001 /* link up / down events */
#define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */
#define FC_REG_CT_EVENT 0x0004 /* CT request events */
#define FC_REG_DUMP_EVENT 0x0008 /* Dump events */
#define FC_REG_TEMPERATURE_EVENT 0x0010 /* temperature events */
#define FC_REG_ELS_EVENT 0x0020 /* lpfc els events */
#define FC_REG_FABRIC_EVENT 0x0040 /* lpfc fabric events */
#define FC_REG_SCSI_EVENT 0x0080 /* lpfc scsi events */
#define FC_REG_BOARD_EVENT 0x0100 /* lpfc board events */
#define FC_REG_ADAPTER_EVENT 0x0200 /* lpfc adapter events */
#define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \
FC_REG_RSCN_EVENT | \
FC_REG_CT_EVENT | \
FC_REG_DUMP_EVENT | \
FC_REG_TEMPERATURE_EVENT | \
FC_REG_ELS_EVENT | \
FC_REG_FABRIC_EVENT | \
FC_REG_SCSI_EVENT | \
FC_REG_BOARD_EVENT | \
FC_REG_ADAPTER_EVENT)
/* Temperature events */
#define LPFC_CRIT_TEMP 0x1
#define LPFC_THRESHOLD_TEMP 0x2
#define LPFC_NORMAL_TEMP 0x3
/*
* All net link event payloads will begin with and event type
* and subcategory. The event type must come first.
* The subcategory further defines the data that follows in the rest
* of the payload. Each category will have its own unique header plus
* any addtional data unique to the subcategory.
* The payload sent via the fc transport is one-way driver->application.
*/
/* els event header */
struct lpfc_els_event_header {
uint32_t event_type;
uint32_t subcategory;
uint8_t wwpn[8];
uint8_t wwnn[8];
};
/* subcategory codes for FC_REG_ELS_EVENT */
#define LPFC_EVENT_PLOGI_RCV 0x01
#define LPFC_EVENT_PRLO_RCV 0x02
#define LPFC_EVENT_ADISC_RCV 0x04
#define LPFC_EVENT_LSRJT_RCV 0x08
/* special els lsrjt event */
struct lpfc_lsrjt_event {
struct lpfc_els_event_header header;
uint32_t command;
uint32_t reason_code;
uint32_t explanation;
};
/* fabric event header */
struct lpfc_fabric_event_header {
uint32_t event_type;
uint32_t subcategory;
uint8_t wwpn[8];
uint8_t wwnn[8];
};
/* subcategory codes for FC_REG_FABRIC_EVENT */
#define LPFC_EVENT_FABRIC_BUSY 0x01
#define LPFC_EVENT_PORT_BUSY 0x02
#define LPFC_EVENT_FCPRDCHKERR 0x04
/* special case fabric fcprdchkerr event */
struct lpfc_fcprdchkerr_event {
struct lpfc_fabric_event_header header;
uint32_t lun;
uint32_t opcode;
uint32_t fcpiparam;
};
/* scsi event header */
struct lpfc_scsi_event_header {
uint32_t event_type;
uint32_t subcategory;
uint32_t lun;
uint8_t wwpn[8];
uint8_t wwnn[8];
};
/* subcategory codes for FC_REG_SCSI_EVENT */
#define LPFC_EVENT_QFULL 0x0001
#define LPFC_EVENT_DEVBSY 0x0002
#define LPFC_EVENT_CHECK_COND 0x0004
#define LPFC_EVENT_LUNRESET 0x0008
#define LPFC_EVENT_TGTRESET 0x0010
#define LPFC_EVENT_BUSRESET 0x0020
#define LPFC_EVENT_VARQUEDEPTH 0x0040
/* special case scsi varqueuedepth event */
struct lpfc_scsi_varqueuedepth_event {
struct lpfc_scsi_event_header scsi_event;
uint32_t oldval;
uint32_t newval;
};
/* special case scsi check condition event */
struct lpfc_scsi_check_condition_event {
struct lpfc_scsi_event_header scsi_event;
uint8_t sense_key;
uint8_t asc;
uint8_t ascq;
};
/* event codes for FC_REG_BOARD_EVENT */
#define LPFC_EVENT_PORTINTERR 0x01
/* board event header */
struct lpfc_board_event_header {
uint32_t event_type;
uint32_t subcategory;
};
/* event codes for FC_REG_ADAPTER_EVENT */
#define LPFC_EVENT_ARRIVAL 0x01
/* adapter event header */
struct lpfc_adapter_event_header {
uint32_t event_type;
uint32_t subcategory;
};
/* event codes for temp_event */
#define LPFC_CRIT_TEMP 0x1
#define LPFC_THRESHOLD_TEMP 0x2
#define LPFC_NORMAL_TEMP 0x3
struct temp_event {
uint32_t event_type;
uint32_t event_code;
uint32_t data;
};
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "lpfc_hw.h" #include "lpfc_hw.h"
#include "lpfc_sli.h" #include "lpfc_sli.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h" #include "lpfc_disc.h"
#include "lpfc_scsi.h" #include "lpfc_scsi.h"
#include "lpfc.h" #include "lpfc.h"
...@@ -1003,20 +1004,8 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -1003,20 +1004,8 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
if (vport->num_disc_nodes)
if (vport->num_disc_nodes) {
lpfc_more_adisc(vport); lpfc_more_adisc(vport);
if ((vport->num_disc_nodes == 0) &&
(vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
if (vport->num_disc_nodes == 0) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
}
} }
return ndlp->nlp_state; return ndlp->nlp_state;
} }
...@@ -1865,8 +1854,13 @@ static uint32_t ...@@ -1865,8 +1854,13 @@ static uint32_t
lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt) void *arg, uint32_t evt)
{ {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (ndlp->nlp_DID == Fabric_DID) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
spin_unlock_irq(shost->host_lock);
}
lpfc_unreg_rpi(vport, ndlp); lpfc_unreg_rpi(vport, ndlp);
/* This routine does nothing, just return the current state */
return ndlp->nlp_state; return ndlp->nlp_state;
} }
...@@ -2155,7 +2149,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -2155,7 +2149,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
} else { } else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0212 DSM out state %d on NPort free\n", rc); "0213 DSM out state %d on NPort free\n", rc);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM out: ste:%d did:x%x flg:x%x", "DSM out: ste:%d did:x%x flg:x%x",
......
This diff is collapsed.
...@@ -107,6 +107,10 @@ struct fcp_cmnd { ...@@ -107,6 +107,10 @@ struct fcp_cmnd {
}; };
struct lpfc_scsicmd_bkt {
uint32_t cmd_count;
};
struct lpfc_scsi_buf { struct lpfc_scsi_buf {
struct list_head list; struct list_head list;
struct scsi_cmnd *pCmd; struct scsi_cmnd *pCmd;
...@@ -139,6 +143,7 @@ struct lpfc_scsi_buf { ...@@ -139,6 +143,7 @@ struct lpfc_scsi_buf {
*/ */
struct lpfc_iocbq cur_iocbq; struct lpfc_iocbq cur_iocbq;
wait_queue_head_t *waitq; wait_queue_head_t *waitq;
unsigned long start_time;
}; };
#define LPFC_SCSI_DMA_EXT_SIZE 264 #define LPFC_SCSI_DMA_EXT_SIZE 264
......
This diff is collapsed.
...@@ -233,6 +233,7 @@ struct lpfc_sli { ...@@ -233,6 +233,7 @@ struct lpfc_sli {
#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ #define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
struct lpfc_sli_ring ring[LPFC_MAX_RING]; struct lpfc_sli_ring ring[LPFC_MAX_RING];
int fcp_ring; /* ring used for FCP initiator commands */ int fcp_ring; /* ring used for FCP initiator commands */
......
...@@ -18,9 +18,11 @@ ...@@ -18,9 +18,11 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "8.2.7" #define LPFC_DRIVER_VERSION "8.2.8"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION LPFC_DRIVER_VERSION
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <scsi/scsi_transport_fc.h> #include <scsi/scsi_transport_fc.h>
#include "lpfc_hw.h" #include "lpfc_hw.h"
#include "lpfc_sli.h" #include "lpfc_sli.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h" #include "lpfc_disc.h"
#include "lpfc_scsi.h" #include "lpfc_scsi.h"
#include "lpfc.h" #include "lpfc.h"
...@@ -204,6 +205,77 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport) ...@@ -204,6 +205,77 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
return 1; return 1;
} }
/**
* lpfc_discovery_wait: Wait for driver discovery to quiesce.
* @vport: The virtual port for which this call is being executed.
*
* This driver calls this routine specifically from lpfc_vport_delete
* to enforce a synchronous execution of vport
* delete relative to discovery activities. The
* lpfc_vport_delete routine should not return until it
* can reasonably guarantee that discovery has quiesced.
* Post FDISC LOGO, the driver must wait until its SAN teardown is
* complete and all resources recovered before allowing
* cleanup.
*
* This routine does not require any locks held.
**/
static void lpfc_discovery_wait(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
uint32_t wait_flags = 0;
unsigned long wait_time_max;
unsigned long start_time;
wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
/*
* The time constraint on this loop is a balance between the
* fabric RA_TOV value and dev_loss tmo. The driver's
* devloss_tmo is 10 giving this loop a 3x multiplier minimally.
*/
wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
wait_time_max += jiffies;
start_time = jiffies;
while (time_before(jiffies, wait_time_max)) {
if ((vport->num_disc_nodes > 0) ||
(vport->fc_flag & wait_flags) ||
((vport->port_state > LPFC_VPORT_FAILED) &&
(vport->port_state < LPFC_VPORT_READY))) {
lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
"1833 Vport discovery quiesce Wait:"
" vpi x%x state x%x fc_flags x%x"
" num_nodes x%x, waiting 1000 msecs"
" total wait msecs x%x\n",
vport->vpi, vport->port_state,
vport->fc_flag, vport->num_disc_nodes,
jiffies_to_msecs(jiffies - start_time));
msleep(1000);
} else {
/* Base case. Wait variants satisfied. Break out */
lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
"1834 Vport discovery quiesced:"
" vpi x%x state x%x fc_flags x%x"
" wait msecs x%x\n",
vport->vpi, vport->port_state,
vport->fc_flag,
jiffies_to_msecs(jiffies
- start_time));
break;
}
}
if (time_after(jiffies, wait_time_max))
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"1835 Vport discovery quiesce failed:"
" vpi x%x state x%x fc_flags x%x"
" wait msecs x%x\n",
vport->vpi, vport->port_state,
vport->fc_flag,
jiffies_to_msecs(jiffies - start_time));
}
int int
lpfc_vport_create(struct fc_vport *fc_vport, bool disable) lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
{ {
...@@ -506,8 +578,12 @@ lpfc_vport_delete(struct fc_vport *fc_vport) ...@@ -506,8 +578,12 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
* initiated after we've disposed of all other resources associated * initiated after we've disposed of all other resources associated
* with the port. * with the port.
*/ */
if (!scsi_host_get(shost) || !scsi_host_get(shost)) if (!scsi_host_get(shost))
return VPORT_INVAL;
if (!scsi_host_get(shost)) {
scsi_host_put(shost);
return VPORT_INVAL; return VPORT_INVAL;
}
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING; vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
...@@ -597,11 +673,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) ...@@ -597,11 +673,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
} }
vport->unreg_vpi_cmpl = VPORT_INVAL; vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000); timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
goto skip_logo;
if (!lpfc_issue_els_npiv_logo(vport, ndlp)) if (!lpfc_issue_els_npiv_logo(vport, ndlp))
while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
timeout = schedule_timeout(timeout); timeout = schedule_timeout(timeout);
} }
if (!(phba->pport->load_flag & FC_UNLOADING))
lpfc_discovery_wait(vport);
skip_logo: skip_logo:
lpfc_cleanup(vport); lpfc_cleanup(vport);
lpfc_sli_host_down(vport); lpfc_sli_host_down(vport);
...@@ -615,8 +696,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport) ...@@ -615,8 +696,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
* Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
* does the scsi_host_put() to release the vport. * does the scsi_host_put() to release the vport.
*/ */
lpfc_mbx_unreg_vpi(vport); if (lpfc_mbx_unreg_vpi(vport))
} scsi_host_put(shost);
} else
scsi_host_put(shost);
lpfc_free_vpi(phba, vport->vpi); lpfc_free_vpi(phba, vport->vpi);
vport->work_port_events = 0; vport->work_port_events = 0;
...@@ -663,3 +746,82 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports) ...@@ -663,3 +746,82 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
scsi_host_put(lpfc_shost_from_vport(vports[i])); scsi_host_put(lpfc_shost_from_vport(vports[i]));
kfree(vports); kfree(vports);
} }
/**
* lpfc_vport_reset_stat_data: Reset the statistical data for the vport.
* @vport: Pointer to vport object.
*
* This function resets the statistical data for the vport. This function
* is called with the host_lock held
**/
void
lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
if (ndlp->lat_data)
memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
sizeof(struct lpfc_scsicmd_bkt));
}
}
/**
* lpfc_alloc_bucket: Allocate data buffer required for collecting
* statistical data.
* @vport: Pointer to vport object.
*
* This function allocates data buffer required for all the FC
* nodes of the vport to collect statistical data.
**/
void
lpfc_alloc_bucket(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
kfree(ndlp->lat_data);
ndlp->lat_data = NULL;
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
sizeof(struct lpfc_scsicmd_bkt),
GFP_ATOMIC);
if (!ndlp->lat_data)
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"0287 lpfc_alloc_bucket failed to "
"allocate statistical data buffer DID "
"0x%x\n", ndlp->nlp_DID);
}
}
}
/**
* lpfc_free_bucket: Free data buffer required for collecting
* statistical data.
* @vport: Pointer to vport object.
*
* Th function frees statistical data buffer of all the FC
* nodes of the vport.
**/
void
lpfc_free_bucket(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
kfree(ndlp->lat_data);
ndlp->lat_data = NULL;
}
}
...@@ -112,4 +112,8 @@ struct vport_cmd_tag { ...@@ -112,4 +112,8 @@ struct vport_cmd_tag {
void lpfc_vport_set_state(struct lpfc_vport *vport, void lpfc_vport_set_state(struct lpfc_vport *vport,
enum fc_vport_state new_state); enum fc_vport_state new_state);
void lpfc_vport_reset_stat_data(struct lpfc_vport *);
void lpfc_alloc_bucket(struct lpfc_vport *);
void lpfc_free_bucket(struct lpfc_vport *);
#endif /* H_LPFC_VPORT */ #endif /* H_LPFC_VPORT */
...@@ -1006,7 +1006,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) ...@@ -1006,7 +1006,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
} }
qla2x00_abort_fcport_cmds(fcport); qla2x00_abort_fcport_cmds(fcport);
scsi_target_unblock(&rport->dev);
} }
static int static int
......
This diff is collapsed.
This diff is collapsed.
...@@ -139,7 +139,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, ...@@ -139,7 +139,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
ha->host_no, cmd->device->channel, ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun)); cmd->device->id, cmd->device->lun));
cmd->result = DID_BUS_BUSY << 16; cmd->result = DID_TRANSPORT_DISRUPTED << 16;
/* /*
* Mark device missing so that we won't continue to send * Mark device missing so that we won't continue to send
...@@ -243,7 +243,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, ...@@ -243,7 +243,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
qla4xxx_mark_device_missing(ha, ddb_entry); qla4xxx_mark_device_missing(ha, ddb_entry);
cmd->result = DID_BUS_BUSY << 16; cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break; break;
case SCS_QUEUE_FULL: case SCS_QUEUE_FULL:
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -59,6 +59,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost, ...@@ -59,6 +59,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *done_q); struct list_head *done_q);
int scsi_eh_get_sense(struct list_head *work_q, int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q); struct list_head *done_q);
int scsi_noretry_cmd(struct scsi_cmnd *scmd);
/* scsi_lib.c */ /* scsi_lib.c */
extern int scsi_maybe_unblock_host(struct scsi_device *sdev); extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment