Commit d6d189ce authored by Bart Van Assche's avatar Bart Van Assche Committed by Martin K. Petersen

scsi: lpfc: Change smp_processor_id() into raw_smp_processor_id()

This patch avoids that a kernel warning appears when smp_processor_id() is
called with preempt debugging enabled.

Cc: James Smart <james.smart@broadcom.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Acked-by: default avatarJames Smart <james.smart@broadcom.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent d8c2040b
...@@ -229,7 +229,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, ...@@ -229,7 +229,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
if (qhandle == NULL) if (qhandle == NULL)
return -ENOMEM; return -ENOMEM;
qhandle->cpu_id = smp_processor_id(); qhandle->cpu_id = raw_smp_processor_id();
qhandle->qidx = qidx; qhandle->qidx = qidx;
/* /*
* NVME qidx == 0 is the admin queue, so both admin queue * NVME qidx == 0 is the admin queue, so both admin queue
...@@ -1143,7 +1143,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, ...@@ -1143,7 +1143,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
uint32_t cpu; uint32_t cpu;
idx = lpfc_ncmd->cur_iocbq.hba_wqidx; idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
cpu = smp_processor_id(); cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) { if (cpu < LPFC_CHECK_CPU_CNT) {
if (lpfc_ncmd->cpu != cpu) if (lpfc_ncmd->cpu != cpu)
lpfc_printf_vlog(vport, lpfc_printf_vlog(vport,
...@@ -1561,7 +1561,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, ...@@ -1561,7 +1561,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
idx = lpfc_queue_info->index; idx = lpfc_queue_info->index;
} else { } else {
cpu = smp_processor_id(); cpu = raw_smp_processor_id();
idx = phba->sli4_hba.cpu_map[cpu].hdwq; idx = phba->sli4_hba.cpu_map[cpu].hdwq;
} }
...@@ -1641,7 +1641,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, ...@@ -1641,7 +1641,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
cpu = smp_processor_id(); cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) { if (cpu < LPFC_CHECK_CPU_CNT) {
lpfc_ncmd->cpu = cpu; lpfc_ncmd->cpu = cpu;
if (idx != cpu) if (idx != cpu)
......
...@@ -433,7 +433,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) ...@@ -433,7 +433,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
* Use the CPU context list, from the MRQ the IO was received on * Use the CPU context list, from the MRQ the IO was received on
* (ctxp->idx), to save context structure. * (ctxp->idx), to save context structure.
*/ */
cpu = smp_processor_id(); cpu = raw_smp_processor_id();
infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx); infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag); spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
...@@ -763,7 +763,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ...@@ -763,7 +763,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
} }
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
id = smp_processor_id(); id = raw_smp_processor_id();
if (id < LPFC_CHECK_CPU_CNT) { if (id < LPFC_CHECK_CPU_CNT) {
if (ctxp->cpu != id) if (ctxp->cpu != id)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
...@@ -904,7 +904,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, ...@@ -904,7 +904,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid]; ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
int id = smp_processor_id(); int id = raw_smp_processor_id();
if (id < LPFC_CHECK_CPU_CNT) { if (id < LPFC_CHECK_CPU_CNT) {
if (rsp->hwqid != id) if (rsp->hwqid != id)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
...@@ -1118,7 +1118,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, ...@@ -1118,7 +1118,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
ctxp->oxid, ctxp->size, smp_processor_id()); ctxp->oxid, ctxp->size, raw_smp_processor_id());
if (!nvmebuf) { if (!nvmebuf) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
...@@ -1594,7 +1594,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, ...@@ -1594,7 +1594,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
lpfc_nvmeio_data(phba, lpfc_nvmeio_data(phba,
"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
xri, smp_processor_id(), 0); xri, raw_smp_processor_id(), 0);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6319 NVMET Rcv ABTS:acc xri x%x\n", xri); "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
...@@ -1610,7 +1610,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, ...@@ -1610,7 +1610,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
xri, smp_processor_id(), 1); xri, raw_smp_processor_id(), 1);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6320 NVMET Rcv ABTS:rjt xri x%x\n", xri); "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
...@@ -2044,7 +2044,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ...@@ -2044,7 +2044,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
* be empty, thus it would need to be replenished with the * be empty, thus it would need to be replenished with the
* context list from another CPU for this MRQ. * context list from another CPU for this MRQ.
*/ */
current_cpu = smp_processor_id(); current_cpu = raw_smp_processor_id();
current_infop = lpfc_get_ctx_list(phba, current_cpu, idx); current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag); spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
if (current_infop->nvmet_ctx_list_cnt) { if (current_infop->nvmet_ctx_list_cnt) {
...@@ -2074,7 +2074,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ...@@ -2074,7 +2074,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
#endif #endif
lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
oxid, size, smp_processor_id()); oxid, size, raw_smp_processor_id());
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
......
...@@ -688,7 +688,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, ...@@ -688,7 +688,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint32_t sgl_size, cpu, idx; uint32_t sgl_size, cpu, idx;
int tag; int tag;
cpu = smp_processor_id(); cpu = raw_smp_processor_id();
if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
tag = blk_mq_unique_tag(cmnd->request); tag = blk_mq_unique_tag(cmnd->request);
idx = blk_mq_unique_tag_to_hwq(tag); idx = blk_mq_unique_tag_to_hwq(tag);
...@@ -3669,7 +3669,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, ...@@ -3669,7 +3669,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
cpu = smp_processor_id(); cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq) if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
} }
...@@ -4464,7 +4464,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) ...@@ -4464,7 +4464,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
cpu = smp_processor_id(); cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) { if (cpu < LPFC_CHECK_CPU_CNT) {
struct lpfc_sli4_hdw_queue *hdwq = struct lpfc_sli4_hdw_queue *hdwq =
&phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no]; &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
......
...@@ -13542,7 +13542,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, ...@@ -13542,7 +13542,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0390 Cannot schedule soft IRQ " "0390 Cannot schedule soft IRQ "
"for CQ eqcqid=%d, cqid=%d on CPU %d\n", "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
cqid, cq->queue_id, smp_processor_id()); cqid, cq->queue_id, raw_smp_processor_id());
} }
/** /**
...@@ -14091,7 +14091,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, ...@@ -14091,7 +14091,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0363 Cannot schedule soft IRQ " "0363 Cannot schedule soft IRQ "
"for CQ eqcqid=%d, cqid=%d on CPU %d\n", "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
cqid, cq->queue_id, smp_processor_id()); cqid, cq->queue_id, raw_smp_processor_id());
} }
/** /**
...@@ -14230,7 +14230,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) ...@@ -14230,7 +14230,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
eqi = phba->sli4_hba.eq_info; eqi = phba->sli4_hba.eq_info;
icnt = this_cpu_inc_return(eqi->icnt); icnt = this_cpu_inc_return(eqi->icnt);
fpeq->last_cpu = smp_processor_id(); fpeq->last_cpu = raw_smp_processor_id();
if (icnt > LPFC_EQD_ISR_TRIGGER && if (icnt > LPFC_EQD_ISR_TRIGGER &&
phba->cfg_irq_chann == 1 && phba->cfg_irq_chann == 1 &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment