Commit dee7cbb2 authored by Venki Pallipadi's avatar Venki Pallipadi Committed by Ingo Molnar

x86: PAT bug fix for attribute type check after reserve_memtype

Bug fixes for reserve_memtype() call in __ioremap and pci_mmap_page_range().
If reserve_memtype returns non-zero, then it is an error and subsequent free is
not required. Requested and returned prot value check should be done when
reserve_memtype returns success.
Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9307caca
...@@ -124,6 +124,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, ...@@ -124,6 +124,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
struct vm_struct *area; struct vm_struct *area;
unsigned long new_prot_val; unsigned long new_prot_val;
pgprot_t prot; pgprot_t prot;
int retval;
/* Don't allow wraparound or zero size */ /* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1; last_addr = phys_addr + size - 1;
...@@ -163,8 +164,14 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, ...@@ -163,8 +164,14 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
phys_addr &= PAGE_MASK; phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr; size = PAGE_ALIGN(last_addr+1) - phys_addr;
if (reserve_memtype(phys_addr, phys_addr + size, retval = reserve_memtype(phys_addr, phys_addr + size,
prot_val, &new_prot_val)) { prot_val, &new_prot_val);
if (retval) {
printk("reserve_memtype returned %d\n", retval);
return NULL;
}
if (prot_val != new_prot_val) {
/* /*
* Do not fallback to certain memory types with certain * Do not fallback to certain memory types with certain
* requested type: * requested type:
......
...@@ -328,6 +328,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -328,6 +328,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
unsigned long len = vma->vm_end - vma->vm_start; unsigned long len = vma->vm_end - vma->vm_start;
unsigned long flags; unsigned long flags;
unsigned long new_flags; unsigned long new_flags;
int retval;
/* I/O space cannot be accessed via normal processor loads and /* I/O space cannot be accessed via normal processor loads and
* stores on this platform. * stores on this platform.
...@@ -344,7 +345,11 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -344,7 +345,11 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
vma->vm_page_prot = __pgprot(prot); vma->vm_page_prot = __pgprot(prot);
flags = pgprot_val(vma->vm_page_prot) & _PAGE_CACHE_MASK; flags = pgprot_val(vma->vm_page_prot) & _PAGE_CACHE_MASK;
if (reserve_memtype(addr, addr + len, flags, &new_flags)) { retval = reserve_memtype(addr, addr + len, flags, &new_flags);
if (retval)
return retval;
if (flags != new_flags) {
/* /*
* Do not fallback to certain memory types with certain * Do not fallback to certain memory types with certain
* requested type: * requested type:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment