Commit 2b541f8f authored by Dave C Boutcher's avatar Dave C Boutcher Committed by James Bottomley

[SCSI] ibmvscsi: handle re-enable firmware message

New versions of the Power5 firmware can send a "re-enable" message to
the virtual scsi adapter.  This fix makes us handle the message
correctly.  Without it, the driver goes catatonic and the system crashes
unpleasantly.
Signed-off-by: default avatarDave Boutcher <sleddog@us.ibm.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent 2dbb04c6
...@@ -87,7 +87,7 @@ static int max_channel = 3; ...@@ -87,7 +87,7 @@ static int max_channel = 3;
static int init_timeout = 5; static int init_timeout = 5;
static int max_requests = 50; static int max_requests = 50;
#define IBMVSCSI_VERSION "1.5.7" #define IBMVSCSI_VERSION "1.5.8"
MODULE_DESCRIPTION("IBM Virtual SCSI"); MODULE_DESCRIPTION("IBM Virtual SCSI");
MODULE_AUTHOR("Dave Boutcher"); MODULE_AUTHOR("Dave Boutcher");
...@@ -534,7 +534,6 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, ...@@ -534,7 +534,6 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
struct ibmvscsi_host_data *hostdata) struct ibmvscsi_host_data *hostdata)
{ {
struct scsi_cmnd *cmnd;
u64 *crq_as_u64 = (u64 *) &evt_struct->crq; u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
int rc; int rc;
...@@ -544,19 +543,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, ...@@ -544,19 +543,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
* can handle more requests (can_queue) when we actually can't * can handle more requests (can_queue) when we actually can't
*/ */
if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) && if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) &&
(atomic_dec_if_positive(&hostdata->request_limit) < 0)) { (atomic_dec_if_positive(&hostdata->request_limit) < 0))
/* See if the adapter is disabled */ goto send_error;
if (atomic_read(&hostdata->request_limit) < 0)
goto send_error;
printk(KERN_WARNING
"ibmvscsi: Warning, request_limit exceeded\n");
unmap_cmd_data(&evt_struct->iu.srp.cmd,
evt_struct,
hostdata->dev);
free_event_struct(&hostdata->pool, evt_struct);
return SCSI_MLQUEUE_HOST_BUSY;
}
/* Copy the IU into the transfer area */ /* Copy the IU into the transfer area */
*evt_struct->xfer_iu = evt_struct->iu; *evt_struct->xfer_iu = evt_struct->iu;
...@@ -572,7 +560,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, ...@@ -572,7 +560,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
list_del(&evt_struct->list); list_del(&evt_struct->list);
printk(KERN_ERR "ibmvscsi: failed to send event struct rc %d\n", printk(KERN_ERR "ibmvscsi: send error %d\n",
rc); rc);
goto send_error; goto send_error;
} }
...@@ -582,14 +570,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, ...@@ -582,14 +570,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
send_error: send_error:
unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
if ((cmnd = evt_struct->cmnd) != NULL) {
cmnd->result = DID_ERROR << 16;
evt_struct->cmnd_done(cmnd);
} else if (evt_struct->done)
evt_struct->done(evt_struct);
free_event_struct(&hostdata->pool, evt_struct); free_event_struct(&hostdata->pool, evt_struct);
return 0; return SCSI_MLQUEUE_HOST_BUSY;
} }
/** /**
...@@ -802,7 +784,8 @@ static void login_rsp(struct srp_event_struct *evt_struct) ...@@ -802,7 +784,8 @@ static void login_rsp(struct srp_event_struct *evt_struct)
case SRP_LOGIN_RSP_TYPE: /* it worked! */ case SRP_LOGIN_RSP_TYPE: /* it worked! */
break; break;
case SRP_LOGIN_REJ_TYPE: /* refused! */ case SRP_LOGIN_REJ_TYPE: /* refused! */
printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REQ rejected\n"); printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n",
evt_struct->xfer_iu->srp.login_rej.reason);
/* Login failed. */ /* Login failed. */
atomic_set(&hostdata->request_limit, -1); atomic_set(&hostdata->request_limit, -1);
return; return;
...@@ -834,6 +817,9 @@ static void login_rsp(struct srp_event_struct *evt_struct) ...@@ -834,6 +817,9 @@ static void login_rsp(struct srp_event_struct *evt_struct)
return; return;
} }
/* If we had any pending I/Os, kick them */
scsi_unblock_requests(hostdata->host);
send_mad_adapter_info(hostdata); send_mad_adapter_info(hostdata);
return; return;
} }
...@@ -862,6 +848,7 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) ...@@ -862,6 +848,7 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
init_timeout * HZ); init_timeout * HZ);
login = &evt_struct->iu.srp.login_req; login = &evt_struct->iu.srp.login_req;
memset(login, 0x00, sizeof(struct srp_login_req));
login->type = SRP_LOGIN_REQ_TYPE; login->type = SRP_LOGIN_REQ_TYPE;
login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu); login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu);
login->required_buffer_formats = 0x0006; login->required_buffer_formats = 0x0006;
...@@ -1122,7 +1109,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) ...@@ -1122,7 +1109,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
* purge_requests: Our virtual adapter just shut down. purge any sent requests * purge_requests: Our virtual adapter just shut down. purge any sent requests
* @hostdata: the adapter * @hostdata: the adapter
*/ */
static void purge_requests(struct ibmvscsi_host_data *hostdata) static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
{ {
struct srp_event_struct *tmp_evt, *pos; struct srp_event_struct *tmp_evt, *pos;
unsigned long flags; unsigned long flags;
...@@ -1131,7 +1118,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata) ...@@ -1131,7 +1118,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata)
list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
list_del(&tmp_evt->list); list_del(&tmp_evt->list);
if (tmp_evt->cmnd) { if (tmp_evt->cmnd) {
tmp_evt->cmnd->result = (DID_ERROR << 16); tmp_evt->cmnd->result = (error_code << 16);
unmap_cmd_data(&tmp_evt->iu.srp.cmd, unmap_cmd_data(&tmp_evt->iu.srp.cmd,
tmp_evt, tmp_evt,
tmp_evt->hostdata->dev); tmp_evt->hostdata->dev);
...@@ -1186,12 +1173,30 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, ...@@ -1186,12 +1173,30 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
printk(KERN_ERR "ibmvscsi: unknown crq message type\n"); printk(KERN_ERR "ibmvscsi: unknown crq message type\n");
} }
return; return;
case 0xFF: /* Hypervisor telling us the connection is closed */ case 0xFF: /* Hypervisor telling us the connection is closed */
printk(KERN_INFO "ibmvscsi: Virtual adapter failed!\n"); scsi_block_requests(hostdata->host);
if (crq->format == 0x06) {
/* We need to re-setup the interpartition connection */
printk(KERN_INFO
"ibmvscsi: Re-enabling adapter!\n");
purge_requests(hostdata, DID_REQUEUE);
if (ibmvscsi_reenable_crq_queue(&hostdata->queue,
hostdata) == 0)
if (ibmvscsi_send_crq(hostdata,
0xC001000000000000LL, 0))
printk(KERN_ERR
"ibmvscsi: transmit error after"
" enable\n");
} else {
printk(KERN_INFO
"ibmvscsi: Virtual adapter failed rc %d!\n",
crq->format);
atomic_set(&hostdata->request_limit, -1); atomic_set(&hostdata->request_limit, -1);
purge_requests(hostdata); purge_requests(hostdata, DID_ERROR);
ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata); ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
}
scsi_unblock_requests(hostdata->host);
return; return;
case 0x80: /* real payload */ case 0x80: /* real payload */
break; break;
......
...@@ -103,6 +103,9 @@ void ibmvscsi_release_crq_queue(struct crq_queue *queue, ...@@ -103,6 +103,9 @@ void ibmvscsi_release_crq_queue(struct crq_queue *queue,
int ibmvscsi_reset_crq_queue(struct crq_queue *queue, int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata); struct ibmvscsi_host_data *hostdata);
int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata);
void ibmvscsi_handle_crq(struct viosrp_crq *crq, void ibmvscsi_handle_crq(struct viosrp_crq *crq,
struct ibmvscsi_host_data *hostdata); struct ibmvscsi_host_data *hostdata);
int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
......
...@@ -123,6 +123,19 @@ int ibmvscsi_reset_crq_queue(struct crq_queue *queue, ...@@ -123,6 +123,19 @@ int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
return 0; return 0;
} }
/**
* reenable_crq_queue: - reenables a crq after a failure
* @queue: crq_queue to initialize and register
* @hostdata: ibmvscsi_host_data of host
*
* no-op for iSeries
*/
int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata)
{
return 0;
}
/** /**
* ibmvscsi_send_crq: - Send a CRQ * ibmvscsi_send_crq: - Send a CRQ
* @hostdata: the adapter * @hostdata: the adapter
......
...@@ -280,6 +280,28 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, ...@@ -280,6 +280,28 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
return -1; return -1;
} }
/**
* reenable_crq_queue: - reenables a crq after
* @queue: crq_queue to initialize and register
* @hostdata: ibmvscsi_host_data of host
*
*/
int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata)
{
int rc;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
/* Re-enable the CRQ */
do {
rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
} while ((rc == H_InProgress) || (rc == H_Busy) || (H_isLongBusy(rc)));
if (rc)
printk(KERN_ERR "ibmvscsi: Error %d enabling adapter\n", rc);
return rc;
}
/** /**
* reset_crq_queue: - resets a crq after a failure * reset_crq_queue: - resets a crq after a failure
* @queue: crq_queue to initialize and register * @queue: crq_queue to initialize and register
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment