Commit 9333d775 authored by Venkat Gopalakrishnan's avatar Venkat Gopalakrishnan Committed by Martin K. Petersen

scsi: ufs: Fix irq return code

Return IRQ_HANDLED only if the irq is really handled, this will help in
catching spurious interrupts that go unhandled.

Link: https://lore.kernel.org/r/1573798172-20534-6-git-send-email-cang@codeaurora.orgReviewed-by: default avatarAvri Altman <avri.altman@wdc.com>
Signed-off-by: default avatarVenkat Gopalakrishnan <venkatg@codeaurora.org>
Signed-off-by: default avatarCan Guo <cang@codeaurora.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent cddaebaf
...@@ -240,7 +240,7 @@ static struct ufs_dev_fix ufs_fixups[] = { ...@@ -240,7 +240,7 @@ static struct ufs_dev_fix ufs_fixups[] = {
END_FIX END_FIX
}; };
static void ufshcd_tmc_handler(struct ufs_hba *hba); static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie); static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
...@@ -4799,19 +4799,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) ...@@ -4799,19 +4799,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* ufshcd_uic_cmd_compl - handle completion of uic command * ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance * @hba: per adapter instance
* @intr_status: interrupt status generated by the controller * @intr_status: interrupt status generated by the controller
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/ */
static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{ {
irqreturn_t retval = IRQ_NONE;
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |= hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba); ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 = hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba); ufshcd_get_dme_attr_val(hba);
complete(&hba->active_uic_cmd->done); complete(&hba->active_uic_cmd->done);
retval = IRQ_HANDLED;
} }
if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
complete(hba->uic_async_done); complete(hba->uic_async_done);
retval = IRQ_HANDLED;
}
return retval;
} }
/** /**
...@@ -4867,8 +4877,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, ...@@ -4867,8 +4877,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/** /**
* ufshcd_transfer_req_compl - handle SCSI and query command completion * ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance * @hba: per adapter instance
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/ */
static void ufshcd_transfer_req_compl(struct ufs_hba *hba) static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{ {
unsigned long completed_reqs; unsigned long completed_reqs;
u32 tr_doorbell; u32 tr_doorbell;
...@@ -4887,7 +4901,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) ...@@ -4887,7 +4901,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs; completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
if (completed_reqs) {
__ufshcd_transfer_req_compl(hba, completed_reqs); __ufshcd_transfer_req_compl(hba, completed_reqs);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
}
} }
/** /**
...@@ -5406,10 +5425,15 @@ static void ufshcd_err_handler(struct work_struct *work) ...@@ -5406,10 +5425,15 @@ static void ufshcd_err_handler(struct work_struct *work)
/** /**
* ufshcd_update_uic_error - check and set fatal UIC error flags. * ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance * @hba: per-adapter instance
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/ */
static void ufshcd_update_uic_error(struct ufs_hba *hba) static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
{ {
u32 reg; u32 reg;
irqreturn_t retval = IRQ_NONE;
/* PHY layer lane error */ /* PHY layer lane error */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
...@@ -5422,11 +5446,13 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba) ...@@ -5422,11 +5446,13 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
*/ */
dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__); dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg); ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
retval |= IRQ_HANDLED;
} }
/* PA_INIT_ERROR is fatal and needs UIC reset */ /* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
if (reg) if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
(reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg); ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
...@@ -5439,28 +5465,37 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba) ...@@ -5439,28 +5465,37 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
} }
retval |= IRQ_HANDLED;
}
/* UIC NL/TL/DME errors needs software retry */ /* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
if (reg) { if ((reg & UIC_NETWORK_LAYER_ERROR) &&
(reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg); ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR; hba->uic_error |= UFSHCD_UIC_NL_ERROR;
retval |= IRQ_HANDLED;
} }
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
if (reg) { if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
(reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg); ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR; hba->uic_error |= UFSHCD_UIC_TL_ERROR;
retval |= IRQ_HANDLED;
} }
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
if (reg) { if ((reg & UIC_DME_ERROR) &&
(reg & UIC_DME_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg); ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR; hba->uic_error |= UFSHCD_UIC_DME_ERROR;
retval |= IRQ_HANDLED;
} }
dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
__func__, hba->uic_error); __func__, hba->uic_error);
return retval;
} }
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
...@@ -5483,10 +5518,15 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, ...@@ -5483,10 +5518,15 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
/** /**
* ufshcd_check_errors - Check for errors that need s/w attention * ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance * @hba: per-adapter instance
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/ */
static void ufshcd_check_errors(struct ufs_hba *hba) static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
{ {
bool queue_eh_work = false; bool queue_eh_work = false;
irqreturn_t retval = IRQ_NONE;
if (hba->errors & INT_FATAL_ERRORS) { if (hba->errors & INT_FATAL_ERRORS) {
ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors); ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
...@@ -5495,7 +5535,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba) ...@@ -5495,7 +5535,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
if (hba->errors & UIC_ERROR) { if (hba->errors & UIC_ERROR) {
hba->uic_error = 0; hba->uic_error = 0;
ufshcd_update_uic_error(hba); retval = ufshcd_update_uic_error(hba);
if (hba->uic_error) if (hba->uic_error)
queue_eh_work = true; queue_eh_work = true;
} }
...@@ -5543,6 +5583,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba) ...@@ -5543,6 +5583,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
} }
schedule_work(&hba->eh_work); schedule_work(&hba->eh_work);
} }
retval |= IRQ_HANDLED;
} }
/* /*
* if (!queue_eh_work) - * if (!queue_eh_work) -
...@@ -5550,44 +5591,62 @@ static void ufshcd_check_errors(struct ufs_hba *hba) ...@@ -5550,44 +5591,62 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
* itself without s/w intervention or errors that will be * itself without s/w intervention or errors that will be
* handled by the SCSI core layer. * handled by the SCSI core layer.
*/ */
return retval;
} }
/** /**
* ufshcd_tmc_handler - handle task management function completion * ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance * @hba: per adapter instance
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/ */
static void ufshcd_tmc_handler(struct ufs_hba *hba) static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{ {
u32 tm_doorbell; u32 tm_doorbell;
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
if (hba->tm_condition) {
wake_up(&hba->tm_wq); wake_up(&hba->tm_wq);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
}
} }
/** /**
* ufshcd_sl_intr - Interrupt service routine * ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance * @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller * @intr_status: contains interrupts generated by the controller
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/ */
static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{ {
irqreturn_t retval = IRQ_NONE;
hba->errors = UFSHCD_ERROR_MASK & intr_status; hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (ufshcd_is_auto_hibern8_error(hba, intr_status)) if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
if (hba->errors) if (hba->errors)
ufshcd_check_errors(hba); retval |= ufshcd_check_errors(hba);
if (intr_status & UFSHCD_UIC_MASK) if (intr_status & UFSHCD_UIC_MASK)
ufshcd_uic_cmd_compl(hba, intr_status); retval |= ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL) if (intr_status & UTP_TASK_REQ_COMPL)
ufshcd_tmc_handler(hba); retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL) if (intr_status & UTP_TRANSFER_REQ_COMPL)
ufshcd_transfer_req_compl(hba); retval |= ufshcd_transfer_req_compl(hba);
return retval;
} }
/** /**
...@@ -5595,7 +5654,8 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) ...@@ -5595,7 +5654,8 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* @irq: irq number * @irq: irq number
* @__hba: pointer to adapter instance * @__hba: pointer to adapter instance
* *
* Returns IRQ_HANDLED - If interrupt is valid * Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt * IRQ_NONE - If invalid interrupt
*/ */
static irqreturn_t ufshcd_intr(int irq, void *__hba) static irqreturn_t ufshcd_intr(int irq, void *__hba)
...@@ -5619,14 +5679,18 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) ...@@ -5619,14 +5679,18 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (intr_status) if (intr_status)
ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
if (enabled_intr_status) { if (enabled_intr_status)
ufshcd_sl_intr(hba, enabled_intr_status); retval |= ufshcd_sl_intr(hba, enabled_intr_status);
retval = IRQ_HANDLED;
}
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
} while (intr_status && --retries); } while (intr_status && --retries);
if (retval == IRQ_NONE) {
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
__func__, intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
spin_unlock(hba->host->host_lock); spin_unlock(hba->host->host_lock);
return retval; return retval;
} }
......
...@@ -195,7 +195,7 @@ enum { ...@@ -195,7 +195,7 @@ enum {
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */ /* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000 #define UIC_DATA_LINK_LAYER_ERROR 0x80000000
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF #define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF
#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2 #define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4 #define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8 #define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment