Commit b6faaaf7 authored by Quinn Tran's avatar Quinn Tran Committed by Martin K. Petersen

scsi: qla2xxx: Serialize mailbox request

For driver MBX submission, use mbox_busy to serialize request.  For Userspace
MBX submission, use optrom mutex to serialize request.
Signed-off-by: default avatarQuinn Tran <quinn.tran@cavium.com>
Signed-off-by: default avatarHimanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 8852f5b1
...@@ -158,9 +158,17 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, ...@@ -158,9 +158,17 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return 0; return 0;
mutex_lock(&ha->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&ha->optrom_mutex);
return -EAGAIN;
}
if (IS_NOCACHE_VPD_TYPE(ha)) if (IS_NOCACHE_VPD_TYPE(ha))
ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
ha->nvram_size); ha->nvram_size);
mutex_unlock(&ha->optrom_mutex);
return memory_read_from_buffer(buf, count, &off, ha->nvram, return memory_read_from_buffer(buf, count, &off, ha->nvram,
ha->nvram_size); ha->nvram_size);
} }
...@@ -208,10 +216,17 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, ...@@ -208,10 +216,17 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
return -EAGAIN; return -EAGAIN;
} }
mutex_lock(&ha->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&vha->hw->optrom_mutex);
return -EAGAIN;
}
/* Write NVRAM. */ /* Write NVRAM. */
ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count); ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
count); count);
mutex_unlock(&ha->optrom_mutex);
ql_dbg(ql_dbg_user, vha, 0x7060, ql_dbg(ql_dbg_user, vha, 0x7060,
"Setting ISP_ABORT_NEEDED\n"); "Setting ISP_ABORT_NEEDED\n");
...@@ -322,6 +337,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, ...@@ -322,6 +337,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
size = ha->optrom_size - start; size = ha->optrom_size - start;
mutex_lock(&ha->optrom_mutex); mutex_lock(&ha->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&ha->optrom_mutex);
return -EAGAIN;
}
switch (val) { switch (val) {
case 0: case 0:
if (ha->optrom_state != QLA_SREADING && if (ha->optrom_state != QLA_SREADING &&
...@@ -499,8 +518,14 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, ...@@ -499,8 +518,14 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
faddr = ha->flt_region_vpd_sec << 2; faddr = ha->flt_region_vpd_sec << 2;
mutex_lock(&ha->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&ha->optrom_mutex);
return -EAGAIN;
}
ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
ha->vpd_size); ha->vpd_size);
mutex_unlock(&ha->optrom_mutex);
} }
return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
} }
...@@ -518,9 +543,6 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, ...@@ -518,9 +543,6 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
if (unlikely(pci_channel_offline(ha->pdev))) if (unlikely(pci_channel_offline(ha->pdev)))
return 0; return 0;
if (qla2x00_chip_is_down(vha))
return 0;
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
!ha->isp_ops->write_nvram) !ha->isp_ops->write_nvram)
return 0; return 0;
...@@ -531,16 +553,25 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, ...@@ -531,16 +553,25 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
return -EAGAIN; return -EAGAIN;
} }
mutex_lock(&ha->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&ha->optrom_mutex);
return -EAGAIN;
}
/* Write NVRAM. */ /* Write NVRAM. */
ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count); ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count); ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
/* Update flash version information for 4Gb & above. */ /* Update flash version information for 4Gb & above. */
if (!IS_FWI2_CAPABLE(ha)) if (!IS_FWI2_CAPABLE(ha)) {
mutex_unlock(&ha->optrom_mutex);
return -EINVAL; return -EINVAL;
}
tmp_data = vmalloc(256); tmp_data = vmalloc(256);
if (!tmp_data) { if (!tmp_data) {
mutex_unlock(&ha->optrom_mutex);
ql_log(ql_log_warn, vha, 0x706b, ql_log(ql_log_warn, vha, 0x706b,
"Unable to allocate memory for VPD information update.\n"); "Unable to allocate memory for VPD information update.\n");
return -ENOMEM; return -ENOMEM;
...@@ -548,6 +579,8 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, ...@@ -548,6 +579,8 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
ha->isp_ops->get_flash_version(vha, tmp_data); ha->isp_ops->get_flash_version(vha, tmp_data);
vfree(tmp_data); vfree(tmp_data);
mutex_unlock(&ha->optrom_mutex);
return count; return count;
} }
...@@ -573,10 +606,15 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, ...@@ -573,10 +606,15 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE) if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
return 0; return 0;
if (qla2x00_chip_is_down(vha)) mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&vha->hw->optrom_mutex);
return 0; return 0;
}
rval = qla2x00_read_sfp_dev(vha, buf, count); rval = qla2x00_read_sfp_dev(vha, buf, count);
mutex_unlock(&vha->hw->optrom_mutex);
if (rval) if (rval)
return -EIO; return -EIO;
...@@ -785,9 +823,11 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, ...@@ -785,9 +823,11 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
if (unlikely(pci_channel_offline(ha->pdev))) if (unlikely(pci_channel_offline(ha->pdev)))
return 0; return 0;
mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&vha->hw->optrom_mutex);
return 0; return 0;
}
if (ha->xgmac_data) if (ha->xgmac_data)
goto do_read; goto do_read;
...@@ -795,6 +835,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, ...@@ -795,6 +835,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
&ha->xgmac_data_dma, GFP_KERNEL); &ha->xgmac_data_dma, GFP_KERNEL);
if (!ha->xgmac_data) { if (!ha->xgmac_data) {
mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x7076, ql_log(ql_log_warn, vha, 0x7076,
"Unable to allocate memory for XGMAC read-data.\n"); "Unable to allocate memory for XGMAC read-data.\n");
return 0; return 0;
...@@ -806,6 +847,8 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, ...@@ -806,6 +847,8 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
XGMAC_DATA_SIZE, &actual_size); XGMAC_DATA_SIZE, &actual_size);
mutex_unlock(&vha->hw->optrom_mutex);
if (rval != QLA_SUCCESS) { if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7077, ql_log(ql_log_warn, vha, 0x7077,
"Unable to read XGMAC data (%x).\n", rval); "Unable to read XGMAC data (%x).\n", rval);
...@@ -842,13 +885,16 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, ...@@ -842,13 +885,16 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
if (ha->dcbx_tlv) if (ha->dcbx_tlv)
goto do_read; goto do_read;
mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&vha->hw->optrom_mutex);
return 0; return 0;
}
ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
&ha->dcbx_tlv_dma, GFP_KERNEL); &ha->dcbx_tlv_dma, GFP_KERNEL);
if (!ha->dcbx_tlv) { if (!ha->dcbx_tlv) {
mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x7078, ql_log(ql_log_warn, vha, 0x7078,
"Unable to allocate memory for DCBX TLV read-data.\n"); "Unable to allocate memory for DCBX TLV read-data.\n");
return -ENOMEM; return -ENOMEM;
...@@ -859,6 +905,9 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, ...@@ -859,6 +905,9 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
DCBX_TLV_DATA_SIZE); DCBX_TLV_DATA_SIZE);
mutex_unlock(&vha->hw->optrom_mutex);
if (rval != QLA_SUCCESS) { if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7079, ql_log(ql_log_warn, vha, 0x7079,
"Unable to read DCBX TLV (%x).\n", rval); "Unable to read DCBX TLV (%x).\n", rval);
...@@ -1184,15 +1233,17 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, ...@@ -1184,15 +1233,17 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
if (IS_QLA2100(ha) || IS_QLA2200(ha)) if (IS_QLA2100(ha) || IS_QLA2200(ha))
return -EPERM; return -EPERM;
if (sscanf(buf, "%d", &val) != 1)
return -EINVAL;
mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) { if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x707a, ql_log(ql_log_warn, vha, 0x707a,
"Abort ISP active -- ignoring beacon request.\n"); "Abort ISP active -- ignoring beacon request.\n");
return -EBUSY; return -EBUSY;
} }
if (sscanf(buf, "%d", &val) != 1)
return -EINVAL;
if (val) if (val)
rval = ha->isp_ops->beacon_on(vha); rval = ha->isp_ops->beacon_on(vha);
else else
...@@ -1201,6 +1252,8 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, ...@@ -1201,6 +1252,8 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
if (rval != QLA_SUCCESS) if (rval != QLA_SUCCESS)
count = 0; count = 0;
mutex_unlock(&vha->hw->optrom_mutex);
return count; return count;
} }
...@@ -1370,18 +1423,24 @@ qla2x00_thermal_temp_show(struct device *dev, ...@@ -1370,18 +1423,24 @@ qla2x00_thermal_temp_show(struct device *dev,
{ {
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
uint16_t temp = 0; uint16_t temp = 0;
int rc;
mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) { if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
goto done; goto done;
} }
if (vha->hw->flags.eeh_busy) { if (vha->hw->flags.eeh_busy) {
mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n"); ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
goto done; goto done;
} }
if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS) rc = qla2x00_get_thermal_temp(vha, &temp);
mutex_unlock(&vha->hw->optrom_mutex);
if (rc == QLA_SUCCESS)
return scnprintf(buf, PAGE_SIZE, "%d\n", temp); return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
done: done:
...@@ -1402,13 +1461,24 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, ...@@ -1402,13 +1461,24 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate); return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
} }
if (qla2x00_chip_is_down(vha)) mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x707c, ql_log(ql_log_warn, vha, 0x707c,
"ISP reset active.\n"); "ISP reset active.\n");
else if (!vha->hw->flags.eeh_busy) goto out;
rval = qla2x00_get_firmware_state(vha, state); } else if (vha->hw->flags.eeh_busy) {
if (rval != QLA_SUCCESS) mutex_unlock(&vha->hw->optrom_mutex);
goto out;
}
rval = qla2x00_get_firmware_state(vha, state);
mutex_unlock(&vha->hw->optrom_mutex);
out:
if (rval != QLA_SUCCESS) {
memset(state, -1, sizeof(state)); memset(state, -1, sizeof(state));
rval = qla2x00_get_firmware_state(vha, state);
}
return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
state[0], state[1], state[2], state[3], state[4], state[5]); state[0], state[1], state[2], state[3], state[4], state[5]);
......
...@@ -189,7 +189,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ...@@ -189,7 +189,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
goto premature_exit; goto premature_exit;
} }
ha->flags.mbox_busy = 1;
/* Save mailbox command for debug */ /* Save mailbox command for debug */
ha->mcp = mcp; ha->mcp = mcp;
...@@ -198,12 +198,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ...@@ -198,12 +198,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
ha->flags.mbox_busy) {
rval = QLA_ABORTED; rval = QLA_ABORTED;
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
goto premature_exit; goto premature_exit;
} }
ha->flags.mbox_busy = 1;
/* Load mailbox registers. */ /* Load mailbox registers. */
if (IS_P3P_TYPE(ha)) if (IS_P3P_TYPE(ha))
...@@ -254,9 +255,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ...@@ -254,9 +255,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (IS_P3P_TYPE(ha)) { if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) & if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) { HINT_MBX_INT_PENDING) {
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, spin_unlock_irqrestore(&ha->hardware_lock,
flags); flags);
ha->flags.mbox_busy = 0;
atomic_dec(&ha->num_pend_mbx_stage2); atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1010, ql_dbg(ql_dbg_mbx, vha, 0x1010,
"Pending mailbox timeout, exiting.\n"); "Pending mailbox timeout, exiting.\n");
...@@ -274,6 +276,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ...@@ -274,6 +276,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
atomic_inc(&ha->num_pend_mbx_stage3); atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp, if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) { mcp->tov * HZ)) {
if (chip_reset != ha->chip_reset) {
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
atomic_dec(&ha->num_pend_mbx_stage2);
atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
goto premature_exit;
}
ql_dbg(ql_dbg_mbx, vha, 0x117a, ql_dbg(ql_dbg_mbx, vha, 0x117a,
"cmd=%x Timeout.\n", command); "cmd=%x Timeout.\n", command);
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
...@@ -282,7 +294,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ...@@ -282,7 +294,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
} else if (ha->flags.purge_mbox || } else if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) { chip_reset != ha->chip_reset) {
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0; ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
atomic_dec(&ha->num_pend_mbx_stage2); atomic_dec(&ha->num_pend_mbx_stage2);
atomic_dec(&ha->num_pend_mbx_stage3); atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED; rval = QLA_ABORTED;
...@@ -300,9 +314,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ...@@ -300,9 +314,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (IS_P3P_TYPE(ha)) { if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) & if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) { HINT_MBX_INT_PENDING) {
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, spin_unlock_irqrestore(&ha->hardware_lock,
flags); flags);
ha->flags.mbox_busy = 0;
atomic_dec(&ha->num_pend_mbx_stage2); atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1012, ql_dbg(ql_dbg_mbx, vha, 0x1012,
"Pending mailbox timeout, exiting.\n"); "Pending mailbox timeout, exiting.\n");
...@@ -320,7 +334,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ...@@ -320,7 +334,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
while (!ha->flags.mbox_int) { while (!ha->flags.mbox_int) {
if (ha->flags.purge_mbox || if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) { chip_reset != ha->chip_reset) {
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0; ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
atomic_dec(&ha->num_pend_mbx_stage2); atomic_dec(&ha->num_pend_mbx_stage2);
rval = QLA_ABORTED; rval = QLA_ABORTED;
goto premature_exit; goto premature_exit;
...@@ -363,7 +380,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ...@@ -363,7 +380,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0; ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Setting Link-Down error */ /* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR; mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ha->mcp = NULL; ha->mcp = NULL;
...@@ -436,7 +456,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ...@@ -436,7 +456,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
* then only PCI ERR flag would be set. * then only PCI ERR flag would be set.
* we will do premature exit for above case. * we will do premature exit for above case.
*/ */
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0; ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
rval = QLA_FUNCTION_TIMEOUT; rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit; goto premature_exit;
} }
...@@ -451,8 +474,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ...@@ -451,8 +474,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_FUNCTION_TIMEOUT; rval = QLA_FUNCTION_TIMEOUT;
} }
} }
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0; ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Clean up */ /* Clean up */
ha->mcp = NULL; ha->mcp = NULL;
......
...@@ -1491,27 +1491,14 @@ int qlt_stop_phase1(struct qla_tgt *tgt) ...@@ -1491,27 +1491,14 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
struct qla_hw_data *ha = tgt->ha; struct qla_hw_data *ha = tgt->ha;
unsigned long flags; unsigned long flags;
mutex_lock(&ha->optrom_mutex);
mutex_lock(&qla_tgt_mutex); mutex_lock(&qla_tgt_mutex);
if (!vha->fc_vport) {
struct Scsi_Host *sh = vha->host;
struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
bool npiv_vports;
spin_lock_irqsave(sh->host_lock, flags);
npiv_vports = (fc_host->npiv_vports_inuse);
spin_unlock_irqrestore(sh->host_lock, flags);
if (npiv_vports) {
mutex_unlock(&qla_tgt_mutex);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
"NPIV is in use. Can not stop target\n");
return -EPERM;
}
}
if (tgt->tgt_stop || tgt->tgt_stopped) { if (tgt->tgt_stop || tgt->tgt_stopped) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
"Already in tgt->tgt_stop or tgt_stopped state\n"); "Already in tgt->tgt_stop or tgt_stopped state\n");
mutex_unlock(&qla_tgt_mutex); mutex_unlock(&qla_tgt_mutex);
mutex_unlock(&ha->optrom_mutex);
return -EPERM; return -EPERM;
} }
...@@ -1549,6 +1536,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt) ...@@ -1549,6 +1536,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
/* Wait for sessions to clear out (just in case) */ /* Wait for sessions to clear out (just in case) */
wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
mutex_unlock(&ha->optrom_mutex);
return 0; return 0;
} }
EXPORT_SYMBOL(qlt_stop_phase1); EXPORT_SYMBOL(qlt_stop_phase1);
...@@ -6595,6 +6584,9 @@ qlt_enable_vha(struct scsi_qla_host *vha) ...@@ -6595,6 +6584,9 @@ qlt_enable_vha(struct scsi_qla_host *vha)
qlt_set_mode(vha); qlt_set_mode(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_lock(&ha->optrom_mutex);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
"%s.\n", __func__);
if (vha->vp_idx) { if (vha->vp_idx) {
qla24xx_disable_vp(vha); qla24xx_disable_vp(vha);
qla24xx_enable_vp(vha); qla24xx_enable_vp(vha);
...@@ -6603,6 +6595,7 @@ qlt_enable_vha(struct scsi_qla_host *vha) ...@@ -6603,6 +6595,7 @@ qlt_enable_vha(struct scsi_qla_host *vha)
qla2xxx_wake_dpc(base_vha); qla2xxx_wake_dpc(base_vha);
qla2x00_wait_for_hba_online(base_vha); qla2x00_wait_for_hba_online(base_vha);
} }
mutex_unlock(&ha->optrom_mutex);
} }
EXPORT_SYMBOL(qlt_enable_vha); EXPORT_SYMBOL(qlt_enable_vha);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment