Commit a2055abe authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

x86/mm: Pass flush_tlb_info to flush_tlb_others() etc

Rather than passing all the contents of flush_tlb_info to
flush_tlb_others(), pass a pointer to the structure directly. For
consistency, this also removes the unnecessary cpu parameter from
uv_flush_tlb_others() to make its signature match the other
*flush_tlb_others() functions.

This serves two purposes:

 - It will dramatically simplify future patches that change struct
   flush_tlb_info, which I'm planning to do.

 - struct flush_tlb_info is an adequate description of what to do
   for a local flush, too, so by reusing it we can remove duplicated
   code between local and remove flushes in a future patch.
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
[ Fix build warning. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4241119e
......@@ -312,11 +312,9 @@ static inline void __flush_tlb_single(unsigned long addr)
}
static inline void flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
const struct flush_tlb_info *info)
{
PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info);
}
static inline int paravirt_pgd_alloc(struct mm_struct *mm)
......
......@@ -51,6 +51,7 @@ struct mm_struct;
struct desc_struct;
struct task_struct;
struct cpumask;
struct flush_tlb_info;
/*
* Wrapper type for pointers to code which uses the non-standard
......@@ -223,9 +224,7 @@ struct pv_mmu_ops {
void (*flush_tlb_kernel)(void);
void (*flush_tlb_single)(unsigned long addr);
void (*flush_tlb_others)(const struct cpumask *cpus,
struct mm_struct *mm,
unsigned long start,
unsigned long end);
const struct flush_tlb_info *info);
/* Hooks for allocating and freeing a pagetable top-level */
int (*pgd_alloc)(struct mm_struct *mm);
......
......@@ -220,12 +220,18 @@ static inline void __flush_tlb_one(unsigned long addr)
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
* - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
*/
struct flush_tlb_info {
struct mm_struct *mm;
unsigned long start;
unsigned long end;
};
#ifndef CONFIG_SMP
/* "_up" is for UniProcessor.
......@@ -279,9 +285,7 @@ static inline void flush_tlb_mm_range(struct mm_struct *mm,
}
static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
const struct flush_tlb_info *info)
{
}
......@@ -317,8 +321,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
}
void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long start, unsigned long end);
const struct flush_tlb_info *info);
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
......@@ -340,8 +343,8 @@ extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
#endif /* SMP */
#ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, start, end) \
native_flush_tlb_others(mask, mm, start, end)
#define flush_tlb_others(mask, info) \
native_flush_tlb_others(mask, info)
#endif
#endif /* _ASM_X86_TLBFLUSH_H */
#ifndef _ASM_X86_UV_UV_H
#define _ASM_X86_UV_UV_H
#include <asm/tlbflush.h>
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
struct cpumask;
......@@ -15,10 +17,7 @@ extern void uv_cpu_init(void);
extern void uv_nmi_init(void);
extern void uv_system_init(void);
extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long start,
unsigned long end,
unsigned int cpu);
const struct flush_tlb_info *info);
#else /* X86_UV */
......@@ -28,8 +27,8 @@ static inline int is_uv_hubless(void) { return 0; }
static inline void uv_cpu_init(void) { }
static inline void uv_system_init(void) { }
static inline const struct cpumask *
uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
unsigned long start, unsigned long end, unsigned int cpu)
uv_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{ return cpumask; }
#endif /* X86_UV */
......
......@@ -30,12 +30,6 @@
#ifdef CONFIG_SMP
struct flush_tlb_info {
struct mm_struct *flush_mm;
unsigned long flush_start;
unsigned long flush_end;
};
/*
* We cannot call mmdrop() because we are in interrupt context,
* instead update mm->cpu_vm_mask.
......@@ -229,11 +223,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
*/
static void flush_tlb_func(void *info)
{
struct flush_tlb_info *f = info;
const struct flush_tlb_info *f = info;
inc_irq_stat(irq_tlb_count);
if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.active_mm))
return;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
......@@ -243,15 +237,15 @@ static void flush_tlb_func(void *info)
return;
}
if (f->flush_end == TLB_FLUSH_ALL) {
if (f->end == TLB_FLUSH_ALL) {
local_flush_tlb();
trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
} else {
unsigned long addr;
unsigned long nr_pages =
(f->flush_end - f->flush_start) / PAGE_SIZE;
addr = f->flush_start;
while (addr < f->flush_end) {
(f->end - f->start) / PAGE_SIZE;
addr = f->start;
while (addr < f->end) {
__flush_tlb_single(addr);
addr += PAGE_SIZE;
}
......@@ -260,33 +254,27 @@ static void flush_tlb_func(void *info)
}
void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, unsigned long start,
unsigned long end)
const struct flush_tlb_info *info)
{
struct flush_tlb_info info;
info.flush_mm = mm;
info.flush_start = start;
info.flush_end = end;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
if (end == TLB_FLUSH_ALL)
if (info->end == TLB_FLUSH_ALL)
trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
else
trace_tlb_flush(TLB_REMOTE_SEND_IPI,
(end - start) >> PAGE_SHIFT);
(info->end - info->start) >> PAGE_SHIFT);
if (is_uv_system()) {
unsigned int cpu;
cpu = smp_processor_id();
cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
cpumask = uv_flush_tlb_others(cpumask, info);
if (cpumask)
smp_call_function_many(cpumask, flush_tlb_func,
&info, 1);
(void *)info, 1);
return;
}
smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
smp_call_function_many(cpumask, flush_tlb_func,
(void *)info, 1);
}
/*
......@@ -305,6 +293,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag)
{
unsigned long addr;
struct flush_tlb_info info;
/* do a global flush by default */
unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
......@@ -347,15 +336,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
}
trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
out:
info.mm = mm;
if (base_pages_to_flush == TLB_FLUSH_ALL) {
start = 0UL;
end = TLB_FLUSH_ALL;
info.start = 0UL;
info.end = TLB_FLUSH_ALL;
} else {
info.start = start;
info.end = end;
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, start, end);
flush_tlb_others(mm_cpumask(mm), &info);
preempt_enable();
}
static void do_flush_tlb_all(void *info)
{
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
......@@ -376,7 +370,7 @@ static void do_kernel_range_flush(void *info)
unsigned long addr;
/* flush range by one by one 'invlpg' */
for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
__flush_tlb_single(addr);
}
......@@ -389,14 +383,20 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
on_each_cpu(do_flush_tlb_all, NULL, 1);
} else {
struct flush_tlb_info info;
info.flush_start = start;
info.flush_end = end;
info.start = start;
info.end = end;
on_each_cpu(do_kernel_range_flush, &info, 1);
}
}
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
struct flush_tlb_info info = {
.mm = NULL,
.start = 0UL,
.end = TLB_FLUSH_ALL,
};
int cpu = get_cpu();
if (cpumask_test_cpu(cpu, &batch->cpumask)) {
......@@ -406,7 +406,7 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
}
if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
flush_tlb_others(&batch->cpumask, NULL, 0, TLB_FLUSH_ALL);
flush_tlb_others(&batch->cpumask, &info);
cpumask_clear(&batch->cpumask);
put_cpu();
......
......@@ -1121,11 +1121,9 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
* done. The returned pointer is valid till preemption is re-enabled.
*/
const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long start,
unsigned long end,
unsigned int cpu)
const struct flush_tlb_info *info)
{
unsigned int cpu = smp_processor_id();
int locals = 0, remotes = 0, hubs = 0;
struct bau_desc *bau_desc;
struct cpumask *flush_mask;
......@@ -1179,8 +1177,8 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
record_send_statistics(stat, locals, hubs, remotes, bau_desc);
if (!end || (end - start) <= PAGE_SIZE)
address = start;
if (!info->end || (info->end - info->start) <= PAGE_SIZE)
address = info->start;
else
address = TLB_FLUSH_ALL;
......
......@@ -1366,8 +1366,7 @@ static void xen_flush_tlb_single(unsigned long addr)
}
static void xen_flush_tlb_others(const struct cpumask *cpus,
struct mm_struct *mm, unsigned long start,
unsigned long end)
const struct flush_tlb_info *info)
{
struct {
struct mmuext_op op;
......@@ -1379,7 +1378,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
} *args;
struct multicall_space mcs;
trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
if (cpumask_empty(cpus))
return; /* nothing to do */
......@@ -1393,9 +1392,10 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
if (info->end != TLB_FLUSH_ALL &&
(info->end - info->start) <= PAGE_SIZE) {
args->op.cmd = MMUEXT_INVLPG_MULTI;
args->op.arg1.linear_addr = start;
args->op.arg1.linear_addr = info->start;
}
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment