Commit a50e2cc7 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

parents c7868048 c9526497
......@@ -100,7 +100,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
void ibmvscsi_release_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata,
int max_requests);
void ibmvscsi_reset_crq_queue(struct crq_queue *queue,
int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata);
void ibmvscsi_handle_crq(struct viosrp_crq *crq,
......
......@@ -117,9 +117,10 @@ void ibmvscsi_release_crq_queue(struct crq_queue *queue,
*
* no-op for iSeries
*/
void ibmvscsi_reset_crq_queue(struct crq_queue *queue,
int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata)
{
return 0;
}
/**
......
......@@ -230,6 +230,11 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
rc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
if (rc == H_Resource)
/* maybe kexecing and resource is busy. try a reset */
rc = ibmvscsi_reset_crq_queue(queue,
hostdata);
if (rc == 2) {
/* Adapter is good, but other end is not ready */
printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
......@@ -281,7 +286,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
* @hostdata: ibmvscsi_host_data of host
*
*/
void ibmvscsi_reset_crq_queue(struct crq_queue *queue,
int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata)
{
int rc;
......@@ -309,4 +314,5 @@ void ibmvscsi_reset_crq_queue(struct crq_queue *queue,
printk(KERN_WARNING
"ibmvscsi: couldn't register crq--rc 0x%x\n", rc);
}
return rc;
}
......@@ -664,7 +664,7 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
sg->offset;
} else
buf = cmd->request_buffer;
memset(cmd->request_buffer, 0, cmd->cmnd[4]);
memset(buf, 0, cmd->cmnd[4]);
if (cmd->use_sg) {
struct scatterlist *sg;
......
......@@ -2476,17 +2476,9 @@ typedef struct scsi_qla_host {
*/
#define LOOP_TRANSITION(ha) \
(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
#define LOOP_NOT_READY(ha) \
((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) || \
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
test_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) || \
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
atomic_read(&ha->loop_state) == LOOP_DOWN)
#define LOOP_RDY(ha) (!LOOP_NOT_READY(ha))
#define TGT_Q(ha, t) (ha->otgt[t])
#define to_qla_host(x) ((scsi_qla_host_t *) (x)->hostdata)
......
......@@ -1259,7 +1259,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
rval = qla2x00_get_adapter_id(ha,
&loop_id, &al_pa, &area, &domain, &topo);
if (rval != QLA_SUCCESS) {
if (LOOP_NOT_READY(ha) || atomic_read(&ha->loop_down_timer) ||
if (LOOP_TRANSITION(ha) || atomic_read(&ha->loop_down_timer) ||
(rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
__func__, ha->host_no));
......@@ -1796,7 +1796,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
}
if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
if (LOOP_NOT_READY(ha)) {
if (LOOP_TRANSITION(ha)) {
rval = QLA_FUNCTION_FAILED;
} else {
rval = qla2x00_configure_fabric(ha);
......@@ -2369,7 +2369,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
if (qla2x00_is_reserved_id(ha, loop_id))
continue;
if (atomic_read(&ha->loop_down_timer) || LOOP_NOT_READY(ha))
if (atomic_read(&ha->loop_down_timer) || LOOP_TRANSITION(ha))
break;
if (swl != NULL) {
......
......@@ -909,6 +909,21 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
resid = resid_len;
cp->resid = resid;
CMD_RESID_LEN(cp) = resid;
if (!lscsi_status &&
((unsigned)(cp->request_bufflen - resid) <
cp->underflow)) {
qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d:%d): Mid-layer underflow "
"detected (%x of %x bytes)...returning "
"error status.\n", ha->host_no,
cp->device->channel, cp->device->id,
cp->device->lun, resid,
cp->request_bufflen);
cp->result = DID_ERROR << 16;
break;
}
}
cp->result = DID_OK << 16 | lscsi_status;
......
......@@ -422,10 +422,15 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
**/
static void scsi_eh_done(struct scsi_cmnd *scmd)
{
struct completion *eh_action;
SCSI_LOG_ERROR_RECOVERY(3,
printk("%s scmd: %p result: %x\n",
__FUNCTION__, scmd, scmd->result));
complete(scmd->device->host->eh_action);
eh_action = scmd->device->host->eh_action;
if (eh_action)
complete(eh_action);
}
/**
......
......@@ -1085,6 +1085,26 @@ static void scsi_generic_done(struct scsi_cmnd *cmd)
scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
}
void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd, int retries)
{
struct request *req = cmd->request;
BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
cmd->cmd_len = req->cmd_len;
if (!req->data_len)
cmd->sc_data_direction = DMA_NONE;
else if (rq_data_dir(req) == WRITE)
cmd->sc_data_direction = DMA_TO_DEVICE;
else
cmd->sc_data_direction = DMA_FROM_DEVICE;
cmd->transfersize = req->data_len;
cmd->allowed = retries;
cmd->timeout_per_command = req->timeout;
}
EXPORT_SYMBOL_GPL(scsi_setup_blk_pc_cmnd);
static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct scsi_device *sdev = q->queuedata;
......@@ -1220,18 +1240,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
goto kill;
}
} else {
memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
cmd->cmd_len = req->cmd_len;
if (rq_data_dir(req) == WRITE)
cmd->sc_data_direction = DMA_TO_DEVICE;
else if (req->data_len)
cmd->sc_data_direction = DMA_FROM_DEVICE;
else
cmd->sc_data_direction = DMA_NONE;
cmd->transfersize = req->data_len;
cmd->allowed = 3;
cmd->timeout_per_command = req->timeout;
scsi_setup_blk_pc_cmnd(cmd, 3);
cmd->done = scsi_generic_done;
}
}
......
......@@ -245,24 +245,10 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
* SG_IO from block layer already setup, just copy cdb basically
*/
if (blk_pc_request(rq)) {
if (sizeof(rq->cmd) > sizeof(SCpnt->cmnd))
return 0;
memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
SCpnt->cmd_len = rq->cmd_len;
if (rq_data_dir(rq) == WRITE)
SCpnt->sc_data_direction = DMA_TO_DEVICE;
else if (rq->data_len)
SCpnt->sc_data_direction = DMA_FROM_DEVICE;
else
SCpnt->sc_data_direction = DMA_NONE;
this_count = rq->data_len;
scsi_setup_blk_pc_cmnd(SCpnt, SD_PASSTHROUGH_RETRIES);
if (rq->timeout)
timeout = rq->timeout;
SCpnt->transfersize = rq->data_len;
SCpnt->allowed = SD_PASSTHROUGH_RETRIES;
goto queue;
}
......
......@@ -320,25 +320,11 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
* these are already setup, just copy cdb basically
*/
if (SCpnt->request->flags & REQ_BLOCK_PC) {
struct request *rq = SCpnt->request;
scsi_setup_blk_pc_cmnd(SCpnt, MAX_RETRIES);
if (sizeof(rq->cmd) > sizeof(SCpnt->cmnd))
return 0;
memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
SCpnt->cmd_len = rq->cmd_len;
if (!rq->data_len)
SCpnt->sc_data_direction = DMA_NONE;
else if (rq_data_dir(rq) == WRITE)
SCpnt->sc_data_direction = DMA_TO_DEVICE;
else
SCpnt->sc_data_direction = DMA_FROM_DEVICE;
this_count = rq->data_len;
if (rq->timeout)
timeout = rq->timeout;
if (SCpnt->timeout_per_command)
timeout = SCpnt->timeout_per_command;
SCpnt->transfersize = rq->data_len;
goto queue;
}
......
......@@ -4194,27 +4194,10 @@ static void st_intr(struct scsi_cmnd *SCpnt)
*/
static int st_init_command(struct scsi_cmnd *SCpnt)
{
struct request *rq;
if (!(SCpnt->request->flags & REQ_BLOCK_PC))
return 0;
rq = SCpnt->request;
if (sizeof(rq->cmd) > sizeof(SCpnt->cmnd))
return 0;
memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
SCpnt->cmd_len = rq->cmd_len;
if (rq_data_dir(rq) == WRITE)
SCpnt->sc_data_direction = DMA_TO_DEVICE;
else if (rq->data_len)
SCpnt->sc_data_direction = DMA_FROM_DEVICE;
else
SCpnt->sc_data_direction = DMA_NONE;
SCpnt->timeout_per_command = rq->timeout;
SCpnt->transfersize = rq->data_len;
scsi_setup_blk_pc_cmnd(SCpnt, 0);
SCpnt->done = st_intr;
return 1;
}
......
......@@ -1405,7 +1405,6 @@ static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget,
goal->iu = 0;
goal->dt = 0;
goal->qas = 0;
goal->period = 0;
goal->offset = 0;
return;
}
......@@ -1465,7 +1464,8 @@ static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgp
* Many devices implement PPR in a buggy way, so only use it if we
* really want to.
*/
if (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)) {
if (goal->offset &&
(goal->iu || goal->dt || goal->qas || (goal->period < 0xa))) {
nego = NS_PPR;
} else if (spi_width(starget) != goal->width) {
nego = NS_WIDE;
......
......@@ -151,5 +151,6 @@ extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
extern void scsi_put_command(struct scsi_cmnd *);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int);
extern void scsi_finish_command(struct scsi_cmnd *cmd);
extern void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd, int retries);
#endif /* _SCSI_SCSI_CMND_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment