Commit cb32edf6 authored by Luis R. Rodriguez's avatar Luis R. Rodriguez Committed by Ingo Molnar

x86/mm/pat: Wrap pat_enabled into a function API

We use pat_enabled in x86-specific code to see if PAT is enabled
or not but we're granting full access to it even though readers
do not need to set it. If, for instance, we granted access to it
to modules later they then could override the variable
setting... no bueno.

This renames pat_enabled to a new static variable __pat_enabled.
Folks are redirected to use pat_enabled() now.

Code that sets this can only be internal to pat.c. Apart from
the early kernel parameter "nopat" to disable PAT, we also have
a few cases that disable it later and make use of a helper
pat_disable(). It is wrapped under an ifdef but since that code
cannot run unless PAT was enabled its not required to wrap it
with ifdefs, unwrap that. Likewise, since "nopat" doesn't really
change non-PAT systems just remove that ifdef as well.

Although we could add and use an early_param_off(), these
helpers don't use __read_mostly but we want to keep
__read_mostly for __pat_enabled as this is a hot path -- upon
boot, for instance, a simple guest may see ~4k accesses to
pat_enabled(). Since __read_mostly early boot params are not
that common we don't add a helper for them just yet.
Signed-off-by: default avatarLuis R. Rodriguez <mcgrof@suse.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Walls <awalls@md.metrocast.net>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Doug Ledford <dledford@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kyle McMartin <kyle@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1430425520-22275-3-git-send-email-mcgrof@do-not-panic.com
Link: http://lkml.kernel.org/r/1432628901-18044-13-git-send-email-bp@alien8.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f9626104
...@@ -4,12 +4,7 @@ ...@@ -4,12 +4,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/pgtable_types.h> #include <asm/pgtable_types.h>
#ifdef CONFIG_X86_PAT bool pat_enabled(void);
extern int pat_enabled;
#else
static const int pat_enabled;
#endif
extern void pat_init(void); extern void pat_init(void);
void pat_init_cache_modes(void); void pat_init_cache_modes(void);
......
...@@ -558,7 +558,7 @@ int arch_phys_wc_add(unsigned long base, unsigned long size) ...@@ -558,7 +558,7 @@ int arch_phys_wc_add(unsigned long base, unsigned long size)
{ {
int ret; int ret;
if (pat_enabled || !mtrr_enabled()) if (pat_enabled() || !mtrr_enabled())
return 0; /* Success! (We don't need to do anything.) */ return 0; /* Success! (We don't need to do anything.) */
ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true); ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
......
...@@ -82,7 +82,7 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) ...@@ -82,7 +82,7 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
* MTRR is UC or WC. UC_MINUS gets the real intention, of the * MTRR is UC or WC. UC_MINUS gets the real intention, of the
* user, which is "WC if the MTRR is WC, UC if you can't do that." * user, which is "WC if the MTRR is WC, UC if you can't do that."
*/ */
if (!pat_enabled && pgprot_val(prot) == if (!pat_enabled() && pgprot_val(prot) ==
(__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC))) (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC)))
prot = __pgprot(__PAGE_KERNEL | prot = __pgprot(__PAGE_KERNEL |
cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
......
...@@ -234,7 +234,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) ...@@ -234,7 +234,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
{ {
/* /*
* Ideally, this should be: * Ideally, this should be:
* pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS; * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
* *
* Till we fix all X drivers to use ioremap_wc(), we will use * Till we fix all X drivers to use ioremap_wc(), we will use
* UC MINUS. Drivers that are certain they need or can already * UC MINUS. Drivers that are certain they need or can already
...@@ -292,7 +292,7 @@ EXPORT_SYMBOL_GPL(ioremap_uc); ...@@ -292,7 +292,7 @@ EXPORT_SYMBOL_GPL(ioremap_uc);
*/ */
void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
{ {
if (pat_enabled) if (pat_enabled())
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
__builtin_return_address(0)); __builtin_return_address(0));
else else
......
...@@ -1571,7 +1571,7 @@ int set_memory_wc(unsigned long addr, int numpages) ...@@ -1571,7 +1571,7 @@ int set_memory_wc(unsigned long addr, int numpages)
{ {
int ret; int ret;
if (!pat_enabled) if (!pat_enabled())
return set_memory_uc(addr, numpages); return set_memory_uc(addr, numpages);
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
......
...@@ -36,12 +36,11 @@ ...@@ -36,12 +36,11 @@
#undef pr_fmt #undef pr_fmt
#define pr_fmt(fmt) "" fmt #define pr_fmt(fmt) "" fmt
#ifdef CONFIG_X86_PAT static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
int __read_mostly pat_enabled = 1;
static inline void pat_disable(const char *reason) static inline void pat_disable(const char *reason)
{ {
pat_enabled = 0; __pat_enabled = 0;
pr_info("x86/PAT: %s\n", reason); pr_info("x86/PAT: %s\n", reason);
} }
...@@ -51,13 +50,11 @@ static int __init nopat(char *str) ...@@ -51,13 +50,11 @@ static int __init nopat(char *str)
return 0; return 0;
} }
early_param("nopat", nopat); early_param("nopat", nopat);
#else
static inline void pat_disable(const char *reason) bool pat_enabled(void)
{ {
(void)reason; return !!__pat_enabled;
} }
#endif
int pat_debug_enable; int pat_debug_enable;
...@@ -201,7 +198,7 @@ void pat_init(void) ...@@ -201,7 +198,7 @@ void pat_init(void)
u64 pat; u64 pat;
bool boot_cpu = !boot_pat_state; bool boot_cpu = !boot_pat_state;
if (!pat_enabled) if (!pat_enabled())
return; return;
if (!cpu_has_pat) { if (!cpu_has_pat) {
...@@ -402,7 +399,7 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, ...@@ -402,7 +399,7 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
BUG_ON(start >= end); /* end is exclusive */ BUG_ON(start >= end); /* end is exclusive */
if (!pat_enabled) { if (!pat_enabled()) {
/* This is identical to page table setting without PAT */ /* This is identical to page table setting without PAT */
if (new_type) { if (new_type) {
if (req_type == _PAGE_CACHE_MODE_WC) if (req_type == _PAGE_CACHE_MODE_WC)
...@@ -477,7 +474,7 @@ int free_memtype(u64 start, u64 end) ...@@ -477,7 +474,7 @@ int free_memtype(u64 start, u64 end)
int is_range_ram; int is_range_ram;
struct memtype *entry; struct memtype *entry;
if (!pat_enabled) if (!pat_enabled())
return 0; return 0;
/* Low ISA region is always mapped WB. No need to track */ /* Low ISA region is always mapped WB. No need to track */
...@@ -625,7 +622,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) ...@@ -625,7 +622,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
u64 to = from + size; u64 to = from + size;
u64 cursor = from; u64 cursor = from;
if (!pat_enabled) if (!pat_enabled())
return 1; return 1;
while (cursor < to) { while (cursor < to) {
...@@ -661,7 +658,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, ...@@ -661,7 +658,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
* caching for the high addresses through the KEN pin, but * caching for the high addresses through the KEN pin, but
* we maintain the tradition of paranoia in this code. * we maintain the tradition of paranoia in this code.
*/ */
if (!pat_enabled && if (!pat_enabled() &&
!(boot_cpu_has(X86_FEATURE_MTRR) || !(boot_cpu_has(X86_FEATURE_MTRR) ||
boot_cpu_has(X86_FEATURE_K6_MTRR) || boot_cpu_has(X86_FEATURE_K6_MTRR) ||
boot_cpu_has(X86_FEATURE_CYRIX_ARR) || boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
...@@ -730,7 +727,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, ...@@ -730,7 +727,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
* the type requested matches the type of first page in the range. * the type requested matches the type of first page in the range.
*/ */
if (is_ram) { if (is_ram) {
if (!pat_enabled) if (!pat_enabled())
return 0; return 0;
pcm = lookup_memtype(paddr); pcm = lookup_memtype(paddr);
...@@ -844,7 +841,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, ...@@ -844,7 +841,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
return ret; return ret;
} }
if (!pat_enabled) if (!pat_enabled())
return 0; return 0;
/* /*
...@@ -872,7 +869,7 @@ int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, ...@@ -872,7 +869,7 @@ int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
{ {
enum page_cache_mode pcm; enum page_cache_mode pcm;
if (!pat_enabled) if (!pat_enabled())
return 0; return 0;
/* Set prot based on lookup */ /* Set prot based on lookup */
...@@ -913,7 +910,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, ...@@ -913,7 +910,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
pgprot_t pgprot_writecombine(pgprot_t prot) pgprot_t pgprot_writecombine(pgprot_t prot)
{ {
if (pat_enabled) if (pat_enabled())
return __pgprot(pgprot_val(prot) | return __pgprot(pgprot_val(prot) |
cachemode2protval(_PAGE_CACHE_MODE_WC)); cachemode2protval(_PAGE_CACHE_MODE_WC));
else else
...@@ -996,7 +993,7 @@ static const struct file_operations memtype_fops = { ...@@ -996,7 +993,7 @@ static const struct file_operations memtype_fops = {
static int __init pat_memtype_list_init(void) static int __init pat_memtype_list_init(void)
{ {
if (pat_enabled) { if (pat_enabled()) {
debugfs_create_file("pat_memtype_list", S_IRUSR, debugfs_create_file("pat_memtype_list", S_IRUSR,
arch_debugfs_dir, NULL, &memtype_fops); arch_debugfs_dir, NULL, &memtype_fops);
} }
......
...@@ -429,12 +429,12 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -429,12 +429,12 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
* Caller can followup with UC MINUS request and add a WC mtrr if there * Caller can followup with UC MINUS request and add a WC mtrr if there
* is a free mtrr slot. * is a free mtrr slot.
*/ */
if (!pat_enabled && write_combine) if (!pat_enabled() && write_combine)
return -EINVAL; return -EINVAL;
if (pat_enabled && write_combine) if (pat_enabled() && write_combine)
prot |= cachemode2protval(_PAGE_CACHE_MODE_WC); prot |= cachemode2protval(_PAGE_CACHE_MODE_WC);
else if (pat_enabled || boot_cpu_data.x86 > 3) else if (pat_enabled() || boot_cpu_data.x86 > 3)
/* /*
* ioremap() and ioremap_nocache() defaults to UC MINUS for now. * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
* To avoid attribute conflicts, request UC MINUS here * To avoid attribute conflicts, request UC MINUS here
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment