Commit a2249c27 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] drivers/char/mem.c cleanup

From: David Mosberger <davidm@napali.hpl.hp.com>

Here is a simplified version of the earlier /dev/mem cleanup.  This version
of the patch no longer pretends to support uncached accesses via
read()/write().  Instead, all it does is:

- consolidate much of the ugly code in uncached_access()

- move pgprot_noncached() macro to pgtable.h for i386, m68k, ppc, ppc64,
  and x86_64

- fix the ia64 implementation to use efi_mem_attributes() to determine
  the cacheability of an address

Eventually, we may want to replace the #ifdef CONFIG_IA64 with #ifdef
CONFIG_EFI, but we can do this "on demand", if and when EFI-based x86
machines become more common.
parent 329bc121
...@@ -29,6 +29,10 @@ ...@@ -29,6 +29,10 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#ifdef CONFIG_IA64
# include <linux/efi.h>
#endif
#ifdef CONFIG_FB #ifdef CONFIG_FB
extern void fbmem_init(void); extern void fbmem_init(void);
#endif #endif
...@@ -36,6 +40,45 @@ extern void fbmem_init(void); ...@@ -36,6 +40,45 @@ extern void fbmem_init(void);
extern void tapechar_init(void); extern void tapechar_init(void);
#endif #endif
/*
* Architectures vary in how they handle caching for addresses
* outside of main memory.
*
*/
static inline int uncached_access(struct file *file, unsigned long addr)
{
#if defined(__i386__)
/*
* On the PPro and successors, the MTRRs are used to set
* memory types for physical addresses outside main memory,
* so blindly setting PCD or PWT on those pages is wrong.
* For Pentiums and earlier, the surround logic should disable
* caching for the high addresses through the KEN pin, but
* we maintain the tradition of paranoia in this code.
*/
if (file->f_flags & O_SYNC)
return 1;
return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
&& addr >= __pa(high_memory);
#elif defined(CONFIG_IA64)
/*
* On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
*/
return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
#else
/*
* Accessing memory above the top the kernel knows about or through a file pointer
* that was marked O_SYNC will be done non-cached.
*/
if (file->f_flags & O_SYNC)
return 1;
return addr >= __pa(high_memory);
#endif
}
static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp, static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp,
const char * buf, size_t count, loff_t *ppos) const char * buf, size_t count, loff_t *ppos)
{ {
...@@ -116,77 +159,16 @@ static ssize_t write_mem(struct file * file, const char * buf, ...@@ -116,77 +159,16 @@ static ssize_t write_mem(struct file * file, const char * buf,
return do_write_mem(file, __va(p), p, buf, count, ppos); return do_write_mem(file, __va(p), p, buf, count, ppos);
} }
#ifndef pgprot_noncached
/*
* This should probably be per-architecture in <asm/pgtable.h>
*/
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
{
unsigned long prot = pgprot_val(_prot);
#if defined(__i386__) || defined(__x86_64__)
/* On PPro and successors, PCD alone doesn't always mean
uncached because of interactions with the MTRRs. PCD | PWT
means definitely uncached. */
if (boot_cpu_data.x86 > 3)
prot |= _PAGE_PCD | _PAGE_PWT;
#elif defined(__powerpc__)
prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
#elif defined(__mc68000__) && defined(CONFIG_MMU)
#ifdef SUN3_PAGE_NOCACHE
if (MMU_IS_SUN3)
prot |= SUN3_PAGE_NOCACHE;
else
#endif
if (MMU_IS_851 || MMU_IS_030)
prot |= _PAGE_NOCACHE030;
/* Use no-cache mode, serialized */
else if (MMU_IS_040 || MMU_IS_060)
prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S;
#endif
return __pgprot(prot);
}
#endif /* !pgprot_noncached */
/*
* Architectures vary in how they handle caching for addresses
* outside of main memory.
*/
static inline int noncached_address(unsigned long addr)
{
#if defined(__i386__)
/*
* On the PPro and successors, the MTRRs are used to set
* memory types for physical addresses outside main memory,
* so blindly setting PCD or PWT on those pages is wrong.
* For Pentiums and earlier, the surround logic should disable
* caching for the high addresses through the KEN pin, but
* we maintain the tradition of paranoia in this code.
*/
return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
&& addr >= __pa(high_memory);
#else
return addr >= __pa(high_memory);
#endif
}
static int mmap_mem(struct file * file, struct vm_area_struct * vma) static int mmap_mem(struct file * file, struct vm_area_struct * vma)
{ {
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
int uncached;
/* uncached = uncached_access(file, offset);
* Accessing memory above the top the kernel knows about or #ifdef pgprot_noncached
* through a file pointer that was marked O_SYNC will be if (uncached)
* done non-cached.
*/
if (noncached_address(offset) || (file->f_flags & O_SYNC))
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
/* Don't try to swap out physical pages.. */ /* Don't try to swap out physical pages.. */
vma->vm_flags |= VM_RESERVED; vma->vm_flags |= VM_RESERVED;
...@@ -194,7 +176,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma) ...@@ -194,7 +176,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
/* /*
* Don't dump addresses that are not real memory to a core file. * Don't dump addresses that are not real memory to a core file.
*/ */
if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC)) if (uncached)
vma->vm_flags |= VM_IO; vma->vm_flags |= VM_IO;
if (remap_page_range(vma, vma->vm_start, offset, vma->vm_end-vma->vm_start, if (remap_page_range(vma, vma->vm_start, offset, vma->vm_end-vma->vm_start,
......
...@@ -220,6 +220,13 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) { return test_and_clea ...@@ -220,6 +220,13 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) { return test_and_clea
static inline void ptep_set_wrprotect(pte_t *ptep) { clear_bit(_PAGE_BIT_RW, &ptep->pte_low); } static inline void ptep_set_wrprotect(pte_t *ptep) { clear_bit(_PAGE_BIT_RW, &ptep->pte_low); }
static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); } static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); }
/*
* Macro to mark a page protection value as "uncacheable". On processors which do not support
* it, this is a no-op.
*/
#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
/* /*
* Conversion functions: convert a page and protection to a page entry, * Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to. * and a page entry and page directory to the page they refer to.
......
...@@ -153,6 +153,23 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma, ...@@ -153,6 +153,23 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
/*
* Macro to mark a page protection value as "uncacheable".
*/
#ifdef SUN3_PAGE_NOCACHE
# define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE
#else
# define __SUN3_PAGE_NOCACHE 0
#endif
#define pgprot_noncached(prot) \
(MMU_IS_SUN3 \
? (__pgprot(pgprot_val(prot) | __SUN3_PAGE_NOCACHE)) \
: ((MMU_IS_851 || MMU_IS_030) \
? (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE030)) \
: (MMU_IS_040 || MMU_IS_060) \
? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \
: (prot)))
typedef pte_t *pte_addr_t; typedef pte_t *pte_addr_t;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -478,6 +478,11 @@ static inline void ptep_mkdirty(pte_t *ptep) ...@@ -478,6 +478,11 @@ static inline void ptep_mkdirty(pte_t *ptep)
pte_update(ptep, 0, _PAGE_DIRTY); pte_update(ptep, 0, _PAGE_DIRTY);
} }
/*
* Macro to mark a page protection value as "uncacheable".
*/
#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
#define pmd_page_kernel(pmd) \ #define pmd_page_kernel(pmd) \
......
...@@ -303,6 +303,11 @@ static inline void ptep_mkdirty(pte_t *ptep) ...@@ -303,6 +303,11 @@ static inline void ptep_mkdirty(pte_t *ptep)
pte_update(ptep, 0, _PAGE_DIRTY); pte_update(ptep, 0, _PAGE_DIRTY);
} }
/*
* Macro to mark a page protection value as "uncacheable".
*/
#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
/* /*
......
...@@ -265,6 +265,11 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) { return test_and_clea ...@@ -265,6 +265,11 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) { return test_and_clea
static inline void ptep_set_wrprotect(pte_t *ptep) { clear_bit(_PAGE_BIT_RW, ptep); } static inline void ptep_set_wrprotect(pte_t *ptep) { clear_bit(_PAGE_BIT_RW, ptep); }
static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, ptep); } static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, ptep); }
/*
* Macro to mark a page protection value as "uncacheable".
*/
#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
static inline int pmd_large(pmd_t pte) { static inline int pmd_large(pmd_t pte) {
return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment