Commit e045fb2a authored by venkatesh.pallipadi@intel.com's avatar venkatesh.pallipadi@intel.com Committed by Ingo Molnar

x86: PAT avoid aliasing in /dev/mem read/write

Add xlate and unxlate around /dev/mem read/write. This sets up the mapping
that can be used for /dev/mem read and write without aliasing worries.
Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent e2beb3ea
...@@ -336,6 +336,35 @@ void iounmap(volatile void __iomem *addr) ...@@ -336,6 +336,35 @@ void iounmap(volatile void __iomem *addr)
} }
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
void *xlate_dev_mem_ptr(unsigned long phys)
{
void *addr;
unsigned long start = phys & PAGE_MASK;
/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
if (page_is_ram(start >> PAGE_SHIFT))
return __va(phys);
addr = (void *)ioremap(start, PAGE_SIZE);
if (addr)
addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
return addr;
}
void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
{
if (page_is_ram(phys >> PAGE_SHIFT))
return;
iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
return;
}
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
int __initdata early_ioremap_debug; int __initdata early_ioremap_debug;
......
...@@ -134,6 +134,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) ...@@ -134,6 +134,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
} }
#endif #endif
void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
{
}
/* /*
* This funcion reads the *physical* memory. The f_pos points directly to the * This funcion reads the *physical* memory. The f_pos points directly to the
* memory location. * memory location.
...@@ -176,17 +180,25 @@ static ssize_t read_mem(struct file * file, char __user * buf, ...@@ -176,17 +180,25 @@ static ssize_t read_mem(struct file * file, char __user * buf,
sz = min_t(unsigned long, sz, count); sz = min_t(unsigned long, sz, count);
if (!range_is_allowed(p >> PAGE_SHIFT, count))
return -EPERM;
/* /*
* On ia64 if a page has been mapped somewhere as * On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached * uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur * by the kernel or data corruption may occur
*/ */
ptr = xlate_dev_mem_ptr(p); ptr = xlate_dev_mem_ptr(p);
if (!ptr)
return -EFAULT;
if (!range_is_allowed(p >> PAGE_SHIFT, count)) if (copy_to_user(buf, ptr, sz)) {
return -EPERM; unxlate_dev_mem_ptr(p, ptr);
if (copy_to_user(buf, ptr, sz))
return -EFAULT; return -EFAULT;
}
unxlate_dev_mem_ptr(p, ptr);
buf += sz; buf += sz;
p += sz; p += sz;
count -= sz; count -= sz;
...@@ -235,22 +247,32 @@ static ssize_t write_mem(struct file * file, const char __user * buf, ...@@ -235,22 +247,32 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
sz = min_t(unsigned long, sz, count); sz = min_t(unsigned long, sz, count);
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
return -EPERM;
/* /*
* On ia64 if a page has been mapped somewhere as * On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached * uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur * by the kernel or data corruption may occur
*/ */
ptr = xlate_dev_mem_ptr(p); ptr = xlate_dev_mem_ptr(p);
if (!ptr) {
if (written)
break;
return -EFAULT;
}
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
return -EPERM;
copied = copy_from_user(ptr, buf, sz); copied = copy_from_user(ptr, buf, sz);
if (copied) { if (copied) {
written += sz - copied; written += sz - copied;
unxlate_dev_mem_ptr(p, ptr);
if (written) if (written)
break; break;
return -EFAULT; return -EFAULT;
} }
unxlate_dev_mem_ptr(p, ptr);
buf += sz; buf += sz;
p += sz; p += sz;
count -= sz; count -= sz;
......
#ifndef _ASM_X86_IO_H
#define _ASM_X86_IO_H
#define ARCH_HAS_IOREMAP_WC #define ARCH_HAS_IOREMAP_WC
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
...@@ -5,7 +8,12 @@ ...@@ -5,7 +8,12 @@
#else #else
# include "io_64.h" # include "io_64.h"
#endif #endif
extern void *xlate_dev_mem_ptr(unsigned long phys);
extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
unsigned long prot_val); unsigned long prot_val);
extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
#endif /* _ASM_X86_IO_H */
...@@ -48,12 +48,6 @@ ...@@ -48,12 +48,6 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/* /*
* Convert a virtual cached pointer to an uncached pointer * Convert a virtual cached pointer to an uncached pointer
*/ */
......
...@@ -307,12 +307,6 @@ void memset_io(volatile void __iomem *a, int b, size_t c); ...@@ -307,12 +307,6 @@ void memset_io(volatile void __iomem *a, int b, size_t c);
extern int iommu_bio_merge; extern int iommu_bio_merge;
#define BIO_VMERGE_BOUNDARY iommu_bio_merge #define BIO_VMERGE_BOUNDARY iommu_bio_merge
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/* /*
* Convert a virtual cached pointer to an uncached pointer * Convert a virtual cached pointer to an uncached pointer
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment