Commit 8b150478 authored by Roland Dreier's avatar Roland Dreier Committed by Paul Mackerras

[PATCH] ppc: make phys_mem_access_prot() work with pfns instead of addresses

Change the phys_mem_access_prot() function to take a pfn instead of an
address.  This allows mmap64() to work on /dev/mem for addresses above 4G
on 32-bit architectures.  We start with a pfn in mmap_mem(), so there's no
need to convert to an address; in fact, it's actively bad, since the
conversion can overflow when the address is above 4G.

Similarly fix the ppc32 page_is_ram() function to avoid a conversion to an
address by directly comparing to max_pfn.  Working with max_pfn instead of
high_memory fixes page_is_ram() to give the right answer for highmem pages.
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent d49b3401
......@@ -88,13 +88,13 @@ int page_is_ram(unsigned long pfn)
}
EXPORT_SYMBOL(page_is_ram);
pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
if (ppc_md.phys_mem_access_prot)
return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
if (!page_is_ram(addr >> PAGE_SHIFT))
if (!page_is_ram(pfn))
vma_prot = __pgprot(pgprot_val(vma_prot)
| _PAGE_GUARDED | _PAGE_NO_CACHE);
return vma_prot;
......
......@@ -1594,16 +1594,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
* above routine
*/
pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long offset,
unsigned long pfn,
unsigned long size,
pgprot_t protection)
{
struct pci_dev *pdev = NULL;
struct resource *found = NULL;
unsigned long prot = pgprot_val(protection);
unsigned long offset = pfn << PAGE_SHIFT;
int i;
if (page_is_ram(offset >> PAGE_SHIFT))
if (page_is_ram(pfn))
return prot;
prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
......
......@@ -637,18 +637,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
*/
int page_is_ram(unsigned long pfn)
{
unsigned long paddr = (pfn << PAGE_SHIFT);
return paddr < __pa(high_memory);
return pfn < max_pfn;
}
pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
if (ppc_md.phys_mem_access_prot)
return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
if (!page_is_ram(addr >> PAGE_SHIFT))
if (!page_is_ram(pfn))
vma_prot = __pgprot(pgprot_val(vma_prot)
| _PAGE_GUARDED | _PAGE_NO_CACHE);
return vma_prot;
......
......@@ -726,16 +726,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
* above routine
*/
pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long offset,
unsigned long pfn,
unsigned long size,
pgprot_t protection)
{
struct pci_dev *pdev = NULL;
struct resource *found = NULL;
unsigned long prot = pgprot_val(protection);
unsigned long offset = pfn << PAGE_SHIFT;
int i;
if (page_is_ram(offset >> PAGE_SHIFT))
if (page_is_ram(pfn))
return __pgprot(prot);
prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
......
......@@ -231,9 +231,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
static int mmap_mem(struct file * file, struct vm_area_struct * vma)
{
#if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma->vm_page_prot = phys_mem_access_prot(file, offset,
vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
#elif defined(pgprot_noncached)
......
......@@ -918,7 +918,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
}
#endif
#elif defined(__powerpc__)
vma->vm_page_prot = phys_mem_access_prot(file, off,
vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
#elif defined(__alpha__)
......
......@@ -144,7 +144,7 @@ struct machdep_calls {
/* Get access protection for /dev/mem */
pgprot_t (*phys_mem_access_prot)(struct file *file,
unsigned long offset,
unsigned long pfn,
unsigned long size,
pgprot_t vma_prot);
......
......@@ -98,7 +98,7 @@ struct machdep_calls {
/* Get access protection for /dev/mem */
pgprot_t (*phys_mem_access_prot)(struct file *file,
unsigned long offset,
unsigned long pfn,
unsigned long size,
pgprot_t vma_prot);
......
......@@ -126,7 +126,7 @@ extern void pcibios_add_platform_entries(struct pci_dev *dev);
struct file;
extern pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long offset,
unsigned long pfn,
unsigned long size,
pgprot_t prot);
......
......@@ -705,7 +705,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT
......
......@@ -168,7 +168,7 @@ extern void pcibios_add_platform_entries(struct pci_dev *dev);
struct file;
extern pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long offset,
unsigned long pfn,
unsigned long size,
pgprot_t prot);
......
......@@ -471,7 +471,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment