Commit 222e9239 authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Resize cpu maps structures based on possible cpus

The work done to date utilized the number of present cpus when sizing
per-cpu structures. Structures should have been sized based on the max
possible cpu count.

Convert the driver over to possible cpu count for sizing allocation.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 75508a8b
...@@ -5176,16 +5176,22 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, ...@@ -5176,16 +5176,22 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
case 1: case 1:
len += snprintf(buf + len, PAGE_SIZE-len, len += snprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: HBA centric mapping (%d): " "fcp_cpu_map: HBA centric mapping (%d): "
"%d online CPUs\n", "%d of %d CPUs online from %d possible CPUs\n",
phba->cfg_fcp_cpu_map, phba->cfg_fcp_cpu_map, num_online_cpus(),
phba->sli4_hba.num_online_cpu); num_present_cpus(),
phba->sli4_hba.num_possible_cpu);
break; break;
} }
while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) { while (phba->sli4_hba.curr_disp_cpu <
phba->sli4_hba.num_possible_cpu) {
cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) { if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
len += snprintf(buf + len, PAGE_SIZE - len,
"CPU %02d not present\n",
phba->sli4_hba.curr_disp_cpu);
else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
len += snprintf( len += snprintf(
buf + len, PAGE_SIZE - len, buf + len, PAGE_SIZE - len,
...@@ -5225,14 +5231,15 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, ...@@ -5225,14 +5231,15 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
/* display max number of CPUs keeping some margin */ /* display max number of CPUs keeping some margin */
if (phba->sli4_hba.curr_disp_cpu < if (phba->sli4_hba.curr_disp_cpu <
phba->sli4_hba.num_present_cpu && phba->sli4_hba.num_possible_cpu &&
(len >= (PAGE_SIZE - 64))) { (len >= (PAGE_SIZE - 64))) {
len += snprintf(buf + len, PAGE_SIZE-len, "more...\n"); len += snprintf(buf + len,
PAGE_SIZE - len, "more...\n");
break; break;
} }
} }
if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu) if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
phba->sli4_hba.curr_disp_cpu = 0; phba->sli4_hba.curr_disp_cpu = 0;
return len; return len;
......
...@@ -6373,8 +6373,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -6373,8 +6373,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
u32 if_type; u32 if_type;
u32 if_fam; u32 if_fam;
phba->sli4_hba.num_online_cpu = num_online_cpus();
phba->sli4_hba.num_present_cpu = lpfc_present_cpu; phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
phba->sli4_hba.num_possible_cpu = num_possible_cpus();
phba->sli4_hba.curr_disp_cpu = 0; phba->sli4_hba.curr_disp_cpu = 0;
/* Get all the module params for configuring this host */ /* Get all the module params for configuring this host */
...@@ -6796,7 +6796,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -6796,7 +6796,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_fcf_rr_bmask; goto out_free_fcf_rr_bmask;
} }
phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu, phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
sizeof(struct lpfc_vector_map_info), sizeof(struct lpfc_vector_map_info),
GFP_KERNEL); GFP_KERNEL);
if (!phba->sli4_hba.cpu_map) { if (!phba->sli4_hba.cpu_map) {
...@@ -6868,8 +6868,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) ...@@ -6868,8 +6868,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
/* Free memory allocated for msi-x interrupt vector to CPU mapping */ /* Free memory allocated for msi-x interrupt vector to CPU mapping */
kfree(phba->sli4_hba.cpu_map); kfree(phba->sli4_hba.cpu_map);
phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0; phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.num_online_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0; phba->sli4_hba.curr_disp_cpu = 0;
/* Free memory allocated for fast-path work queue handles */ /* Free memory allocated for fast-path work queue handles */
...@@ -10519,15 +10519,14 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) ...@@ -10519,15 +10519,14 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
int cpu; int cpu;
/* Find the desired phys_id for the specified EQ */ /* Find the desired phys_id for the specified EQ */
cpup = phba->sli4_hba.cpu_map; for_each_present_cpu(cpu) {
for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { cpup = &phba->sli4_hba.cpu_map[cpu];
if ((match == LPFC_FIND_BY_EQ) && if ((match == LPFC_FIND_BY_EQ) &&
(cpup->irq != LPFC_VECTOR_MAP_EMPTY) && (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
(cpup->eq == id)) (cpup->eq == id))
return cpu; return cpu;
if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
return cpu; return cpu;
cpup++;
} }
return 0; return 0;
} }
...@@ -10545,11 +10544,10 @@ lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq) ...@@ -10545,11 +10544,10 @@ lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq)
int cpu; int cpu;
/* Find the desired phys_id for the specified EQ */ /* Find the desired phys_id for the specified EQ */
cpup = phba->sli4_hba.cpu_map; for_each_present_cpu(cpu) {
for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { cpup = &phba->sli4_hba.cpu_map[cpu];
if (cpup->hdwq == hdwq) if (cpup->hdwq == hdwq)
return cpup->eq; return cpup->eq;
cpup++;
} }
return 0; return 0;
} }
...@@ -10569,16 +10567,14 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu, ...@@ -10569,16 +10567,14 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
struct lpfc_vector_map_info *cpup; struct lpfc_vector_map_info *cpup;
int idx; int idx;
cpup = phba->sli4_hba.cpu_map; for_each_present_cpu(idx) {
for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { cpup = &phba->sli4_hba.cpu_map[idx];
/* Does the cpup match the one we are looking for */ /* Does the cpup match the one we are looking for */
if ((cpup->phys_id == phys_id) && if ((cpup->phys_id == phys_id) &&
(cpup->core_id == core_id) && (cpup->core_id == core_id) &&
(cpu != idx)) { (cpu != idx))
return 1; return 1;
} }
cpup++;
}
return 0; return 0;
} }
#endif #endif
...@@ -10608,7 +10604,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) ...@@ -10608,7 +10604,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
/* Init cpu_map array */ /* Init cpu_map array */
memset(phba->sli4_hba.cpu_map, 0xff, memset(phba->sli4_hba.cpu_map, 0xff,
(sizeof(struct lpfc_vector_map_info) * (sizeof(struct lpfc_vector_map_info) *
phba->sli4_hba.num_present_cpu)); phba->sli4_hba.num_possible_cpu));
max_phys_id = 0; max_phys_id = 0;
min_phys_id = 0xffff; min_phys_id = 0xffff;
...@@ -10617,8 +10613,8 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) ...@@ -10617,8 +10613,8 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
phys_id = 0; phys_id = 0;
/* Update CPU map with physical id and core id of each CPU */ /* Update CPU map with physical id and core id of each CPU */
cpup = phba->sli4_hba.cpu_map; for_each_present_cpu(cpu) {
for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { cpup = &phba->sli4_hba.cpu_map[cpu];
#ifdef CONFIG_X86 #ifdef CONFIG_X86
cpuinfo = &cpu_data(cpu); cpuinfo = &cpu_data(cpu);
cpup->phys_id = cpuinfo->phys_proc_id; cpup->phys_id = cpuinfo->phys_proc_id;
...@@ -10645,8 +10641,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) ...@@ -10645,8 +10641,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
max_core_id = cpup->core_id; max_core_id = cpup->core_id;
if (cpup->core_id < min_core_id) if (cpup->core_id < min_core_id)
min_core_id = cpup->core_id; min_core_id = cpup->core_id;
cpup++;
} }
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
......
...@@ -1194,9 +1194,9 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) ...@@ -1194,9 +1194,9 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
/* Cycle the the entire CPU context list for every MRQ */ /* Cycle the the entire CPU context list for every MRQ */
for (i = 0; i < phba->cfg_nvmet_mrq; i++) { for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) { for_each_present_cpu(j) {
infop = lpfc_get_ctx_list(phba, j, i);
__lpfc_nvmet_clean_io_for_cpu(phba, infop); __lpfc_nvmet_clean_io_for_cpu(phba, infop);
infop++; /* next */
} }
} }
kfree(phba->sli4_hba.nvmet_ctx_info); kfree(phba->sli4_hba.nvmet_ctx_info);
...@@ -1211,14 +1211,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) ...@@ -1211,14 +1211,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
union lpfc_wqe128 *wqe; union lpfc_wqe128 *wqe;
struct lpfc_nvmet_ctx_info *last_infop; struct lpfc_nvmet_ctx_info *last_infop;
struct lpfc_nvmet_ctx_info *infop; struct lpfc_nvmet_ctx_info *infop;
int i, j, idx; int i, j, idx, cpu;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME, lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6403 Allocate NVMET resources for %d XRIs\n", "6403 Allocate NVMET resources for %d XRIs\n",
phba->sli4_hba.nvmet_xri_cnt); phba->sli4_hba.nvmet_xri_cnt);
phba->sli4_hba.nvmet_ctx_info = kcalloc( phba->sli4_hba.nvmet_ctx_info = kcalloc(
phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq, phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
if (!phba->sli4_hba.nvmet_ctx_info) { if (!phba->sli4_hba.nvmet_ctx_info) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
...@@ -1246,13 +1246,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) ...@@ -1246,13 +1246,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
* of the IO completion. Thus a context that was allocated for MRQ A * of the IO completion. Thus a context that was allocated for MRQ A
* whose IO completed on CPU B will be freed to cpuB/mrqA. * whose IO completed on CPU B will be freed to cpuB/mrqA.
*/ */
infop = phba->sli4_hba.nvmet_ctx_info; for_each_possible_cpu(i) {
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
for (j = 0; j < phba->cfg_nvmet_mrq; j++) { for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
infop = lpfc_get_ctx_list(phba, i, j);
INIT_LIST_HEAD(&infop->nvmet_ctx_list); INIT_LIST_HEAD(&infop->nvmet_ctx_list);
spin_lock_init(&infop->nvmet_ctx_list_lock); spin_lock_init(&infop->nvmet_ctx_list_lock);
infop->nvmet_ctx_list_cnt = 0; infop->nvmet_ctx_list_cnt = 0;
infop++;
} }
} }
...@@ -1262,8 +1261,10 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) ...@@ -1262,8 +1261,10 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
* MRQ 1 cycling thru CPUs 0 - X, and so on. * MRQ 1 cycling thru CPUs 0 - X, and so on.
*/ */
for (j = 0; j < phba->cfg_nvmet_mrq; j++) { for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
last_infop = lpfc_get_ctx_list(phba, 0, j); last_infop = lpfc_get_ctx_list(phba,
for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) { cpumask_first(cpu_present_mask),
j);
for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
infop = lpfc_get_ctx_list(phba, i, j); infop = lpfc_get_ctx_list(phba, i, j);
infop->nvmet_ctx_next_cpu = last_infop; infop->nvmet_ctx_next_cpu = last_infop;
last_infop = infop; last_infop = infop;
...@@ -1274,6 +1275,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) ...@@ -1274,6 +1275,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
* received command on a per xri basis. * received command on a per xri basis.
*/ */
idx = 0; idx = 0;
cpu = cpumask_first(cpu_present_mask);
for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
if (!ctx_buf) { if (!ctx_buf) {
...@@ -1327,7 +1329,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) ...@@ -1327,7 +1329,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
* is MRQidx will be associated with CPUidx. This association * is MRQidx will be associated with CPUidx. This association
* can change on the fly. * can change on the fly.
*/ */
infop = lpfc_get_ctx_list(phba, idx, idx); infop = lpfc_get_ctx_list(phba, cpu, idx);
spin_lock(&infop->nvmet_ctx_list_lock); spin_lock(&infop->nvmet_ctx_list_lock);
list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
infop->nvmet_ctx_list_cnt++; infop->nvmet_ctx_list_cnt++;
...@@ -1335,11 +1337,18 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) ...@@ -1335,11 +1337,18 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
/* Spread ctx structures evenly across all MRQs */ /* Spread ctx structures evenly across all MRQs */
idx++; idx++;
if (idx >= phba->cfg_nvmet_mrq) if (idx >= phba->cfg_nvmet_mrq) {
idx = 0; idx = 0;
cpu = cpumask_first(cpu_present_mask);
continue;
}
cpu = cpumask_next(cpu, cpu_present_mask);
if (cpu == nr_cpu_ids)
cpu = cpumask_first(cpu_present_mask);
} }
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { for_each_present_cpu(i) {
for (j = 0; j < phba->cfg_nvmet_mrq; j++) { for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
infop = lpfc_get_ctx_list(phba, i, j); infop = lpfc_get_ctx_list(phba, i, j);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
...@@ -1839,7 +1848,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba, ...@@ -1839,7 +1848,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
else else
get_infop = current_infop->nvmet_ctx_next_cpu; get_infop = current_infop->nvmet_ctx_next_cpu;
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
if (get_infop == current_infop) { if (get_infop == current_infop) {
get_infop = get_infop->nvmet_ctx_next_cpu; get_infop = get_infop->nvmet_ctx_next_cpu;
continue; continue;
......
...@@ -890,7 +890,7 @@ struct lpfc_sli4_hba { ...@@ -890,7 +890,7 @@ struct lpfc_sli4_hba {
/* CPU to vector mapping information */ /* CPU to vector mapping information */
struct lpfc_vector_map_info *cpu_map; struct lpfc_vector_map_info *cpu_map;
uint16_t num_online_cpu; uint16_t num_possible_cpu;
uint16_t num_present_cpu; uint16_t num_present_cpu;
uint16_t curr_disp_cpu; uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info; struct lpfc_eq_intr_info __percpu *eq_info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment