Commit 28003486 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
 "This update contains:

   - the manual revert of the SYSCALL32 changes which caused a
     regression

   - a fix for the MPX vma handling

   - three fixes for the ioremap 'is ram' checks.

   - PAT warning fixes

   - a trivial fix for the size calculation of TLB tracepoints

   - handle old EFI structures gracefully

  This also contains a PAT fix from Jan plus a revert thereof.  Toshi
  explained why the code is correct"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm/pat: Revert 'Adjust default caching mode translation tables'
  x86/asm/entry/32: Revert 'Do not use R9 in SYSCALL32' commit
  x86/mm: Fix newly introduced printk format warnings
  mm: Fix bugs in region_is_ram()
  x86/mm: Remove region_is_ram() call from ioremap
  x86/mm: Move warning from __ioremap_check_ram() to the call site
  x86/mm/pat, drivers/media/ivtv: Move the PAT warning and replace WARN() with pr_warn()
  x86/mm/pat, drivers/infiniband/ipath: Replace WARN() with pr_warn()
  x86/mm/pat: Adjust default caching mode translation tables
  x86/fpu: Disable dependent CPU features on "noxsave"
  x86/mpx: Do not set ->vm_ops on MPX VMAs
  x86/mm: Add parenthesis for TLB tracepoint size calculation
  efi: Handle memory error structures produced based on old versions of standard
parents 26ae19a3 1a4e8795
...@@ -205,7 +205,6 @@ sysexit_from_sys_call: ...@@ -205,7 +205,6 @@ sysexit_from_sys_call:
movl RDX(%rsp), %edx /* arg3 */ movl RDX(%rsp), %edx /* arg3 */
movl RSI(%rsp), %ecx /* arg4 */ movl RSI(%rsp), %ecx /* arg4 */
movl RDI(%rsp), %r8d /* arg5 */ movl RDI(%rsp), %r8d /* arg5 */
movl %ebp, %r9d /* arg6 */
.endm .endm
.macro auditsys_exit exit .macro auditsys_exit exit
...@@ -236,6 +235,7 @@ sysexit_from_sys_call: ...@@ -236,6 +235,7 @@ sysexit_from_sys_call:
sysenter_auditsys: sysenter_auditsys:
auditsys_entry_common auditsys_entry_common
movl %ebp, %r9d /* reload 6th syscall arg */
jmp sysenter_dispatch jmp sysenter_dispatch
sysexit_audit: sysexit_audit:
...@@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat) ...@@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat)
* 32-bit zero extended: * 32-bit zero extended:
*/ */
ASM_STAC ASM_STAC
1: movl (%r8), %ebp 1: movl (%r8), %r9d
_ASM_EXTABLE(1b, ia32_badarg) _ASM_EXTABLE(1b, ia32_badarg)
ASM_CLAC ASM_CLAC
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
...@@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat) ...@@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat)
cstar_do_call: cstar_do_call:
/* 32-bit syscall -> 64-bit C ABI argument conversion */ /* 32-bit syscall -> 64-bit C ABI argument conversion */
movl %edi, %r8d /* arg5 */ movl %edi, %r8d /* arg5 */
movl %ebp, %r9d /* arg6 */ /* r9 already loaded */ /* arg6 */
xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
movl %ebx, %edi /* arg1 */ movl %ebx, %edi /* arg1 */
movl %edx, %edx /* arg3 (zero extension) */ movl %edx, %edx /* arg3 (zero extension) */
...@@ -358,7 +358,6 @@ cstar_dispatch: ...@@ -358,7 +358,6 @@ cstar_dispatch:
call *ia32_sys_call_table(, %rax, 8) call *ia32_sys_call_table(, %rax, 8)
movq %rax, RAX(%rsp) movq %rax, RAX(%rsp)
1: 1:
movl RCX(%rsp), %ebp
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
...@@ -392,7 +391,9 @@ sysretl_from_sys_call: ...@@ -392,7 +391,9 @@ sysretl_from_sys_call:
#ifdef CONFIG_AUDITSYSCALL #ifdef CONFIG_AUDITSYSCALL
cstar_auditsys: cstar_auditsys:
movl %r9d, R9(%rsp) /* register to be clobbered by call */
auditsys_entry_common auditsys_entry_common
movl R9(%rsp), %r9d /* reload 6th syscall arg */
jmp cstar_dispatch jmp cstar_dispatch
sysretl_audit: sysretl_audit:
...@@ -404,14 +405,16 @@ cstar_tracesys: ...@@ -404,14 +405,16 @@ cstar_tracesys:
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jz cstar_auditsys jz cstar_auditsys
#endif #endif
xchgl %r9d, %ebp
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
xorl %eax, %eax /* Do not leak kernel information */ xorl %eax, %eax /* Do not leak kernel information */
movq %rax, R11(%rsp) movq %rax, R11(%rsp)
movq %rax, R10(%rsp) movq %rax, R10(%rsp)
movq %rax, R9(%rsp) movq %r9, R9(%rsp)
movq %rax, R8(%rsp) movq %rax, R8(%rsp)
movq %rsp, %rdi /* &pt_regs -> arg1 */ movq %rsp, %rdi /* &pt_regs -> arg1 */
call syscall_trace_enter call syscall_trace_enter
movl R9(%rsp), %r9d
/* Reload arg registers from stack. (see sysenter_tracesys) */ /* Reload arg registers from stack. (see sysenter_tracesys) */
movl RCX(%rsp), %ecx movl RCX(%rsp), %ecx
...@@ -421,6 +424,7 @@ cstar_tracesys: ...@@ -421,6 +424,7 @@ cstar_tracesys:
movl %eax, %eax /* zero extension */ movl %eax, %eax /* zero extension */
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
xchgl %ebp, %r9d
jmp cstar_do_call jmp cstar_do_call
END(entry_SYSCALL_compat) END(entry_SYSCALL_compat)
......
...@@ -351,9 +351,15 @@ static int __init x86_noxsave_setup(char *s) ...@@ -351,9 +351,15 @@ static int __init x86_noxsave_setup(char *s)
setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
setup_clear_cpu_cap(X86_FEATURE_XSAVEC);
setup_clear_cpu_cap(X86_FEATURE_XSAVES); setup_clear_cpu_cap(X86_FEATURE_XSAVES);
setup_clear_cpu_cap(X86_FEATURE_AVX); setup_clear_cpu_cap(X86_FEATURE_AVX);
setup_clear_cpu_cap(X86_FEATURE_AVX2); setup_clear_cpu_cap(X86_FEATURE_AVX2);
setup_clear_cpu_cap(X86_FEATURE_AVX512F);
setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
setup_clear_cpu_cap(X86_FEATURE_MPX);
return 1; return 1;
} }
......
...@@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, ...@@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
!PageReserved(pfn_to_page(start_pfn + i))) !PageReserved(pfn_to_page(start_pfn + i)))
return 1; return 1;
WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
return 0; return 0;
} }
...@@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, ...@@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
pgprot_t prot; pgprot_t prot;
int retval; int retval;
void __iomem *ret_addr; void __iomem *ret_addr;
int ram_region;
/* Don't allow wraparound or zero size */ /* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1; last_addr = phys_addr + size - 1;
...@@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, ...@@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
/* /*
* Don't allow anybody to remap normal RAM that we're using.. * Don't allow anybody to remap normal RAM that we're using..
*/ */
/* First check if whole region can be identified as RAM or not */ pfn = phys_addr >> PAGE_SHIFT;
ram_region = region_is_ram(phys_addr, size); last_pfn = last_addr >> PAGE_SHIFT;
if (ram_region > 0) { if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", __ioremap_check_ram) == 1) {
(unsigned long int)phys_addr, WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
(unsigned long int)last_addr); &phys_addr, &last_addr);
return NULL; return NULL;
} }
/* If could not be identified(-1), check page by page */
if (ram_region < 0) {
pfn = phys_addr >> PAGE_SHIFT;
last_pfn = last_addr >> PAGE_SHIFT;
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
__ioremap_check_ram) == 1)
return NULL;
}
/* /*
* Mappings have to be page-aligned * Mappings have to be page-aligned
*/ */
......
...@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->get_unmapped_area = arch_get_unmapped_area_topdown;
} }
} }
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_MPX)
return "[mpx]";
return NULL;
}
...@@ -20,20 +20,6 @@ ...@@ -20,20 +20,6 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <asm/trace/mpx.h> #include <asm/trace/mpx.h>
static const char *mpx_mapping_name(struct vm_area_struct *vma)
{
return "[mpx]";
}
static struct vm_operations_struct mpx_vma_ops = {
.name = mpx_mapping_name,
};
static int is_mpx_vma(struct vm_area_struct *vma)
{
return (vma->vm_ops == &mpx_vma_ops);
}
static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
{ {
if (is_64bit_mm(mm)) if (is_64bit_mm(mm))
...@@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm) ...@@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
/* /*
* This is really a simplified "vm_mmap". it only handles MPX * This is really a simplified "vm_mmap". it only handles MPX
* bounds tables (the bounds directory is user-allocated). * bounds tables (the bounds directory is user-allocated).
*
* Later on, we use the vma->vm_ops to uniquely identify these
* VMAs.
*/ */
static unsigned long mpx_mmap(unsigned long len) static unsigned long mpx_mmap(unsigned long len)
{ {
...@@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len) ...@@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
vma->vm_ops = &mpx_vma_ops;
if (vm_flags & VM_LOCKED) { if (vm_flags & VM_LOCKED) {
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
...@@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm, ...@@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
* so stop immediately and return an error. This * so stop immediately and return an error. This
* probably results in a SIGSEGV. * probably results in a SIGSEGV.
*/ */
if (!is_mpx_vma(vma)) if (!(vma->vm_flags & VM_MPX))
return -EINVAL; return -EINVAL;
len = min(vma->vm_end, end) - addr; len = min(vma->vm_end, end) - addr;
...@@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm, ...@@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm,
* lots of tables even though we have no actual table * lots of tables even though we have no actual table
* entries in use. * entries in use.
*/ */
while (next && is_mpx_vma(next)) while (next && (next->vm_flags & VM_MPX))
next = next->vm_next; next = next->vm_next;
while (prev && is_mpx_vma(prev)) while (prev && (prev->vm_flags & VM_MPX))
prev = prev->vm_prev; prev = prev->vm_prev;
/* /*
* We know 'start' and 'end' lie within an area controlled * We know 'start' and 'end' lie within an area controlled
......
...@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info) ...@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
} else { } else {
unsigned long addr; unsigned long addr;
unsigned long nr_pages = unsigned long nr_pages =
f->flush_end - f->flush_start / PAGE_SIZE; (f->flush_end - f->flush_start) / PAGE_SIZE;
addr = f->flush_start; addr = f->flush_start;
while (addr < f->flush_end) { while (addr < f->flush_end) {
__flush_tlb_single(addr); __flush_tlb_single(addr);
......
...@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p, ...@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
return ret; return ret;
} }
static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
int len)
{ {
struct cper_mem_err_compact cmem; struct cper_mem_err_compact cmem;
/* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
if (len == sizeof(struct cper_sec_mem_err_old) &&
(mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
pr_err(FW_WARN "valid bits set for fields beyond structure\n");
return;
}
if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
if (mem->validation_bits & CPER_MEM_VALID_PA) if (mem->validation_bits & CPER_MEM_VALID_PA)
...@@ -405,8 +412,10 @@ static void cper_estatus_print_section( ...@@ -405,8 +412,10 @@ static void cper_estatus_print_section(
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
printk("%s""section_type: memory error\n", newpfx); printk("%s""section_type: memory error\n", newpfx);
if (gdata->error_data_length >= sizeof(*mem_err)) if (gdata->error_data_length >=
cper_print_mem(newpfx, mem_err); sizeof(struct cper_sec_mem_err_old))
cper_print_mem(newpfx, mem_err,
gdata->error_data_length);
else else
goto err_section_too_small; goto err_section_too_small;
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
* SOFTWARE. * SOFTWARE.
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/idr.h> #include <linux/idr.h>
...@@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 bar0 = 0, bar1 = 0; u32 bar0 = 0, bar1 = 0;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (WARN(pat_enabled(), if (pat_enabled()) {
"ipath needs PAT disabled, boot with nopat kernel parameter\n")) { pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
ret = -ENODEV; ret = -ENODEV;
goto bail; goto bail;
} }
......
...@@ -38,6 +38,8 @@ ...@@ -38,6 +38,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/fb.h> #include <linux/fb.h>
...@@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv) ...@@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv)
{ {
int rc; int rc;
#ifdef CONFIG_X86_64
if (pat_enabled()) {
pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n");
return -ENODEV;
}
#endif
if (itv->osd_info) { if (itv->osd_info) {
IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id);
return -EBUSY; return -EBUSY;
...@@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void) ...@@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void)
int registered = 0; int registered = 0;
int err; int err;
#ifdef CONFIG_X86_64
if (WARN(pat_enabled(),
"ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) {
return -ENODEV;
}
#endif
if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
......
...@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx { ...@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
__u64 mm_reg_addr; __u64 mm_reg_addr;
}; };
/* Memory Error Section */ /* Old Memory Error Section UEFI 2.1, 2.2 */
struct cper_sec_mem_err_old {
__u64 validation_bits;
__u64 error_status;
__u64 physical_addr;
__u64 physical_addr_mask;
__u16 node;
__u16 card;
__u16 module;
__u16 bank;
__u16 device;
__u16 row;
__u16 column;
__u16 bit_pos;
__u64 requestor_id;
__u64 responder_id;
__u64 target_id;
__u8 error_type;
};
/* Memory Error Section UEFI >= 2.3 */
struct cper_sec_mem_err { struct cper_sec_mem_err {
__u64 validation_bits; __u64 validation_bits;
__u64 error_status; __u64 error_status;
......
...@@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size) ...@@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size)
{ {
struct resource *p; struct resource *p;
resource_size_t end = start + size - 1; resource_size_t end = start + size - 1;
int flags = IORESOURCE_MEM | IORESOURCE_BUSY; unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
const char *name = "System RAM"; const char *name = "System RAM";
int ret = -1; int ret = -1;
read_lock(&resource_lock); read_lock(&resource_lock);
for (p = iomem_resource.child; p ; p = p->sibling) { for (p = iomem_resource.child; p ; p = p->sibling) {
if (end < p->start) if (p->end < start)
continue; continue;
if (p->start <= start && end <= p->end) { if (p->start <= start && end <= p->end) {
...@@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size) ...@@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size)
ret = 1; ret = 1;
break; break;
} }
if (p->end < start) if (end < p->start)
break; /* not found */ break; /* not found */
} }
read_unlock(&resource_lock); read_unlock(&resource_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment