Commit 7aba3a85 authored by Dawei Li's avatar Dawei Li Committed by Andreas Larsson

sparc/srmmu: Remove on-stack cpumask var

In general it's preferable to avoid placing cpumasks on the stack, as
for large values of NR_CPUS these can consume significant amounts of
stack space and make stack overflows more likely.

Use cpumask_any_but() to avoid the need for a temporary cpumask on
the stack and simplify code.
Reviewed-by: default avatarSam Ravnborg <sam@ravnborg.org>
Signed-off-by: default avatarDawei Li <dawei.li@shingroup.cn>
Reviewed-by: default avatarAndreas Larsson <andreas@gaisler.com>
Link: https://lore.kernel.org/r/20240424025548.3765250-2-dawei.li@shingroup.cnSigned-off-by: default avatarAndreas Larsson <andreas@gaisler.com>
parent 48d85acd
...@@ -1653,13 +1653,15 @@ static void smp_flush_tlb_all(void) ...@@ -1653,13 +1653,15 @@ static void smp_flush_tlb_all(void)
local_ops->tlb_all(); local_ops->tlb_all();
} }
static bool any_other_mm_cpus(struct mm_struct *mm)
{
return cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids;
}
static void smp_flush_cache_mm(struct mm_struct *mm) static void smp_flush_cache_mm(struct mm_struct *mm)
{ {
if (mm->context != NO_CONTEXT) { if (mm->context != NO_CONTEXT) {
cpumask_t cpu_mask; if (any_other_mm_cpus(mm))
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
xc1(local_ops->cache_mm, (unsigned long)mm); xc1(local_ops->cache_mm, (unsigned long)mm);
local_ops->cache_mm(mm); local_ops->cache_mm(mm);
} }
...@@ -1668,10 +1670,7 @@ static void smp_flush_cache_mm(struct mm_struct *mm) ...@@ -1668,10 +1670,7 @@ static void smp_flush_cache_mm(struct mm_struct *mm)
static void smp_flush_tlb_mm(struct mm_struct *mm) static void smp_flush_tlb_mm(struct mm_struct *mm)
{ {
if (mm->context != NO_CONTEXT) { if (mm->context != NO_CONTEXT) {
cpumask_t cpu_mask; if (any_other_mm_cpus(mm)) {
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask)) {
xc1(local_ops->tlb_mm, (unsigned long)mm); xc1(local_ops->tlb_mm, (unsigned long)mm);
if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
cpumask_copy(mm_cpumask(mm), cpumask_copy(mm_cpumask(mm),
...@@ -1688,10 +1687,7 @@ static void smp_flush_cache_range(struct vm_area_struct *vma, ...@@ -1688,10 +1687,7 @@ static void smp_flush_cache_range(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) { if (mm->context != NO_CONTEXT) {
cpumask_t cpu_mask; if (any_other_mm_cpus(mm))
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
xc3(local_ops->cache_range, (unsigned long)vma, start, xc3(local_ops->cache_range, (unsigned long)vma, start,
end); end);
local_ops->cache_range(vma, start, end); local_ops->cache_range(vma, start, end);
...@@ -1705,10 +1701,7 @@ static void smp_flush_tlb_range(struct vm_area_struct *vma, ...@@ -1705,10 +1701,7 @@ static void smp_flush_tlb_range(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) { if (mm->context != NO_CONTEXT) {
cpumask_t cpu_mask; if (any_other_mm_cpus(mm))
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
xc3(local_ops->tlb_range, (unsigned long)vma, start, xc3(local_ops->tlb_range, (unsigned long)vma, start,
end); end);
local_ops->tlb_range(vma, start, end); local_ops->tlb_range(vma, start, end);
...@@ -1720,10 +1713,7 @@ static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) ...@@ -1720,10 +1713,7 @@ static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) { if (mm->context != NO_CONTEXT) {
cpumask_t cpu_mask; if (any_other_mm_cpus(mm))
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
xc2(local_ops->cache_page, (unsigned long)vma, page); xc2(local_ops->cache_page, (unsigned long)vma, page);
local_ops->cache_page(vma, page); local_ops->cache_page(vma, page);
} }
...@@ -1734,10 +1724,7 @@ static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -1734,10 +1724,7 @@ static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) { if (mm->context != NO_CONTEXT) {
cpumask_t cpu_mask; if (any_other_mm_cpus(mm))
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
xc2(local_ops->tlb_page, (unsigned long)vma, page); xc2(local_ops->tlb_page, (unsigned long)vma, page);
local_ops->tlb_page(vma, page); local_ops->tlb_page(vma, page);
} }
...@@ -1759,10 +1746,7 @@ static void smp_flush_page_to_ram(unsigned long page) ...@@ -1759,10 +1746,7 @@ static void smp_flush_page_to_ram(unsigned long page)
static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
{ {
cpumask_t cpu_mask; if (any_other_mm_cpus(mm))
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr); xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr);
local_ops->sig_insns(mm, insn_addr); local_ops->sig_insns(mm, insn_addr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment