Commit 1299ef1d authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

x86/mm: Rename flush_tlb_single() and flush_tlb_one() to __flush_tlb_one_[user|kernel]()

flush_tlb_single() and flush_tlb_one() sound almost identical, but
they really mean "flush one user translation" and "flush one kernel
translation".  Rename them to flush_tlb_one_user() and
flush_tlb_one_kernel() to make the semantics more obvious.

[ I was looking at some PTI-related code, and the flush-one-address code
  is unnecessarily hard to understand because the names of the helpers are
  uninformative.  This came up during PTI review, but no one got around to
  doing it. ]
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Eduardo Valentin <eduval@amazon.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kees Cook <keescook@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Linux-MM <linux-mm@kvack.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Link: http://lkml.kernel.org/r/3303b02e3c3d049dc5235d5651e0ae6d29a34354.1517414378.git.luto@kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ea00f301
...@@ -297,9 +297,9 @@ static inline void __flush_tlb_global(void) ...@@ -297,9 +297,9 @@ static inline void __flush_tlb_global(void)
{ {
PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
} }
static inline void __flush_tlb_single(unsigned long addr) static inline void __flush_tlb_one_user(unsigned long addr)
{ {
PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr);
} }
static inline void flush_tlb_others(const struct cpumask *cpumask, static inline void flush_tlb_others(const struct cpumask *cpumask,
......
...@@ -217,7 +217,7 @@ struct pv_mmu_ops { ...@@ -217,7 +217,7 @@ struct pv_mmu_ops {
/* TLB operations */ /* TLB operations */
void (*flush_tlb_user)(void); void (*flush_tlb_user)(void);
void (*flush_tlb_kernel)(void); void (*flush_tlb_kernel)(void);
void (*flush_tlb_single)(unsigned long addr); void (*flush_tlb_one_user)(unsigned long addr);
void (*flush_tlb_others)(const struct cpumask *cpus, void (*flush_tlb_others)(const struct cpumask *cpus,
const struct flush_tlb_info *info); const struct flush_tlb_info *info);
......
...@@ -61,7 +61,7 @@ void paging_init(void); ...@@ -61,7 +61,7 @@ void paging_init(void);
#define kpte_clear_flush(ptep, vaddr) \ #define kpte_clear_flush(ptep, vaddr) \
do { \ do { \
pte_clear(&init_mm, (vaddr), (ptep)); \ pte_clear(&init_mm, (vaddr), (ptep)); \
__flush_tlb_one((vaddr)); \ __flush_tlb_one_kernel((vaddr)); \
} while (0) } while (0)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -140,7 +140,7 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) ...@@ -140,7 +140,7 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
#else #else
#define __flush_tlb() __native_flush_tlb() #define __flush_tlb() __native_flush_tlb()
#define __flush_tlb_global() __native_flush_tlb_global() #define __flush_tlb_global() __native_flush_tlb_global()
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) #define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
#endif #endif
static inline bool tlb_defer_switch_to_init_mm(void) static inline bool tlb_defer_switch_to_init_mm(void)
...@@ -400,7 +400,7 @@ static inline void __native_flush_tlb_global(void) ...@@ -400,7 +400,7 @@ static inline void __native_flush_tlb_global(void)
/* /*
* flush one page in the user mapping * flush one page in the user mapping
*/ */
static inline void __native_flush_tlb_single(unsigned long addr) static inline void __native_flush_tlb_one_user(unsigned long addr)
{ {
u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
...@@ -437,18 +437,31 @@ static inline void __flush_tlb_all(void) ...@@ -437,18 +437,31 @@ static inline void __flush_tlb_all(void)
/* /*
* flush one page in the kernel mapping * flush one page in the kernel mapping
*/ */
static inline void __flush_tlb_one(unsigned long addr) static inline void __flush_tlb_one_kernel(unsigned long addr)
{ {
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
/*
* If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
* paravirt equivalent. Even with PCID, this is sufficient: we only
* use PCID if we also use global PTEs for the kernel mapping, and
* INVLPG flushes global translations across all address spaces.
*
* If PTI is on, then the kernel is mapped with non-global PTEs, and
* __flush_tlb_one_user() will flush the given address for the current
* kernel address space and for its usermode counterpart, but it does
* not flush it for other address spaces.
*/
__flush_tlb_one_user(addr);
if (!static_cpu_has(X86_FEATURE_PTI)) if (!static_cpu_has(X86_FEATURE_PTI))
return; return;
/* /*
* __flush_tlb_single() will have cleared the TLB entry for this ASID, * See above. We need to propagate the flush to all other address
* but since kernel space is replicated across all, we must also * spaces. In principle, we only need to propagate it to kernelmode
* invalidate all others. * address spaces, but the extra bookkeeping we would need is not
* worth it.
*/ */
invalidate_other_asid(); invalidate_other_asid();
} }
......
...@@ -200,9 +200,9 @@ static void native_flush_tlb_global(void) ...@@ -200,9 +200,9 @@ static void native_flush_tlb_global(void)
__native_flush_tlb_global(); __native_flush_tlb_global();
} }
static void native_flush_tlb_single(unsigned long addr) static void native_flush_tlb_one_user(unsigned long addr)
{ {
__native_flush_tlb_single(addr); __native_flush_tlb_one_user(addr);
} }
struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_enabled;
...@@ -401,7 +401,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { ...@@ -401,7 +401,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
.flush_tlb_user = native_flush_tlb, .flush_tlb_user = native_flush_tlb,
.flush_tlb_kernel = native_flush_tlb_global, .flush_tlb_kernel = native_flush_tlb_global,
.flush_tlb_single = native_flush_tlb_single, .flush_tlb_one_user = native_flush_tlb_one_user,
.flush_tlb_others = native_flush_tlb_others, .flush_tlb_others = native_flush_tlb_others,
.pgd_alloc = __paravirt_pgd_alloc, .pgd_alloc = __paravirt_pgd_alloc,
......
...@@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) ...@@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
* It's enough to flush this one mapping. * It's enough to flush this one mapping.
* (PGE mappings get flushed as well) * (PGE mappings get flushed as well)
*/ */
__flush_tlb_one(vaddr); __flush_tlb_one_kernel(vaddr);
} }
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
......
...@@ -820,5 +820,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx, ...@@ -820,5 +820,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
else else
pte_clear(&init_mm, addr, pte); pte_clear(&init_mm, addr, pte);
__flush_tlb_one(addr); __flush_tlb_one_kernel(addr);
} }
...@@ -168,7 +168,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear) ...@@ -168,7 +168,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
return -1; return -1;
} }
__flush_tlb_one(f->addr); __flush_tlb_one_kernel(f->addr);
return 0; return 0;
} }
......
...@@ -63,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) ...@@ -63,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
* It's enough to flush this one mapping. * It's enough to flush this one mapping.
* (PGE mappings get flushed as well) * (PGE mappings get flushed as well)
*/ */
__flush_tlb_one(vaddr); __flush_tlb_one_kernel(vaddr);
} }
unsigned long __FIXADDR_TOP = 0xfffff000; unsigned long __FIXADDR_TOP = 0xfffff000;
......
...@@ -492,7 +492,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, ...@@ -492,7 +492,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
* flush that changes context.tlb_gen from 2 to 3. If they get * flush that changes context.tlb_gen from 2 to 3. If they get
* processed on this CPU in reverse order, we'll see * processed on this CPU in reverse order, we'll see
* local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
* If we were to use __flush_tlb_single() and set local_tlb_gen to * If we were to use __flush_tlb_one_user() and set local_tlb_gen to
* 3, we'd be break the invariant: we'd update local_tlb_gen above * 3, we'd be break the invariant: we'd update local_tlb_gen above
* 1 without the full flush that's needed for tlb_gen 2. * 1 without the full flush that's needed for tlb_gen 2.
* *
...@@ -513,7 +513,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, ...@@ -513,7 +513,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
addr = f->start; addr = f->start;
while (addr < f->end) { while (addr < f->end) {
__flush_tlb_single(addr); __flush_tlb_one_user(addr);
addr += PAGE_SIZE; addr += PAGE_SIZE;
} }
if (local) if (local)
...@@ -660,7 +660,7 @@ static void do_kernel_range_flush(void *info) ...@@ -660,7 +660,7 @@ static void do_kernel_range_flush(void *info)
/* flush range by one by one 'invlpg' */ /* flush range by one by one 'invlpg' */
for (addr = f->start; addr < f->end; addr += PAGE_SIZE) for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
__flush_tlb_one(addr); __flush_tlb_one_kernel(addr);
} }
void flush_tlb_kernel_range(unsigned long start, unsigned long end) void flush_tlb_kernel_range(unsigned long start, unsigned long end)
......
...@@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, ...@@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
local_flush_tlb(); local_flush_tlb();
stat->d_alltlb++; stat->d_alltlb++;
} else { } else {
__flush_tlb_single(msg->address); __flush_tlb_one_user(msg->address);
stat->d_onetlb++; stat->d_onetlb++;
} }
stat->d_requestee++; stat->d_requestee++;
......
...@@ -1300,12 +1300,12 @@ static void xen_flush_tlb(void) ...@@ -1300,12 +1300,12 @@ static void xen_flush_tlb(void)
preempt_enable(); preempt_enable();
} }
static void xen_flush_tlb_single(unsigned long addr) static void xen_flush_tlb_one_user(unsigned long addr)
{ {
struct mmuext_op *op; struct mmuext_op *op;
struct multicall_space mcs; struct multicall_space mcs;
trace_xen_mmu_flush_tlb_single(addr); trace_xen_mmu_flush_tlb_one_user(addr);
preempt_disable(); preempt_disable();
...@@ -2370,7 +2370,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { ...@@ -2370,7 +2370,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.flush_tlb_user = xen_flush_tlb, .flush_tlb_user = xen_flush_tlb,
.flush_tlb_kernel = xen_flush_tlb, .flush_tlb_kernel = xen_flush_tlb,
.flush_tlb_single = xen_flush_tlb_single, .flush_tlb_one_user = xen_flush_tlb_one_user,
.flush_tlb_others = xen_flush_tlb_others, .flush_tlb_others = xen_flush_tlb_others,
.pgd_alloc = xen_pgd_alloc, .pgd_alloc = xen_pgd_alloc,
......
...@@ -368,7 +368,7 @@ TRACE_EVENT(xen_mmu_flush_tlb, ...@@ -368,7 +368,7 @@ TRACE_EVENT(xen_mmu_flush_tlb,
TP_printk("%s", "") TP_printk("%s", "")
); );
TRACE_EVENT(xen_mmu_flush_tlb_single, TRACE_EVENT(xen_mmu_flush_tlb_one_user,
TP_PROTO(unsigned long addr), TP_PROTO(unsigned long addr),
TP_ARGS(addr), TP_ARGS(addr),
TP_STRUCT__entry( TP_STRUCT__entry(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment