Commit 40574c27 authored by David Mosberger's avatar David Mosberger

ia64: Add asm/cacheflush.h and asm/tlbflush.h.

parent 6db48033
......@@ -42,7 +42,7 @@
extern void ia64_mca_check_errors( void );
#endif
struct pci_fixup pcibios_fixups[];
struct pci_fixup pcibios_fixups[1];
struct pci_ops *pci_root_ops;
......
#ifndef _ASM_IA64_CACHEFLUSH_H
#define _ASM_IA64_CACHEFLUSH_H
/*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/mm.h>
#include <asm/bitops.h>
#include <asm/page.h>
/*
* Cache flushing routines. This is the kind of stuff that can be very expensive, so try
* to avoid them whenever possible.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_page(vma,page) do { } while (0)
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &page->flags); \
} while (0)
extern void flush_icache_range (unsigned long start, unsigned long end);
#define flush_icache_user_range(vma, page, user_addr, len) \
do { \
unsigned long _addr = page_address(page) + ((user_addr) & ~PAGE_MASK); \
flush_icache_range(_addr, _addr + (len)); \
} while (0)
#endif /* _ASM_IA64_CACHEFLUSH_H */
......@@ -152,94 +152,4 @@ pte_free_kernel (pte_t *pte)
extern int do_check_pgt_cache (int, int);
/*
* IA-64 doesn't have any external MMU info: the page tables contain all the necessary
* information. However, we use this macro to take care of any (delayed) i-cache flushing
* that may be necessary.
*/
static inline void
update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
{
unsigned long addr;
struct page *page;
if (!pte_exec(pte))
return; /* not an executable page... */
page = pte_page(pte);
/* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
addr = (unsigned long) page_address(page);
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
flush_icache_range(addr, addr + PAGE_SIZE);
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
/*
* Now for some TLB flushing routines. This is the kind of stuff that
* can be very expensive, so try to avoid them whenever possible.
*/
/*
* Flush everything (kernel mapping may also have changed due to
* vmalloc/vfree).
*/
extern void __flush_tlb_all (void);
#ifdef CONFIG_SMP
extern void smp_flush_tlb_all (void);
# define flush_tlb_all() smp_flush_tlb_all()
#else
# define flush_tlb_all() __flush_tlb_all()
#endif
/*
* Flush a specified user mapping
*/
static inline void
flush_tlb_mm (struct mm_struct *mm)
{
if (mm) {
mm->context = 0;
if (mm == current->active_mm) {
/* This is called, e.g., as a result of exec(). */
get_new_mmu_context(mm);
reload_context(mm);
}
}
}
extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
/*
* Page-granular tlb flush.
*/
static inline void
flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
{
#ifdef CONFIG_SMP
flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
#else
if (vma->vm_mm == current->active_mm)
asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
#endif
}
/*
* Flush the TLB entries mapping the virtually mapped linear page
* table corresponding to address range [START-END).
*/
static inline void
flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
{
struct vm_area_struct vma;
if (rgn_index(start) != rgn_index(end))
printk("flush_tlb_pgtables: can't flush across regions!!\n");
vma.vm_mm = mm;
flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
}
#endif /* _ASM_IA64_PGALLOC_H */
......@@ -14,6 +14,7 @@
#include <linux/config.h>
#include <asm/cacheflush.h>
#include <asm/mman.h>
#include <asm/page.h>
#include <asm/processor.h>
......@@ -290,30 +291,6 @@ ia64_phys_addr_valid (unsigned long addr)
# define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
#endif
/*
* Return the region index for virtual address ADDRESS.
*/
static inline unsigned long
rgn_index (unsigned long address)
{
ia64_va a;
a.l = address;
return a.f.reg;
}
/*
* Return the region offset for virtual address ADDRESS.
*/
static inline unsigned long
rgn_offset (unsigned long address)
{
ia64_va a;
a.l = address;
return a.f.off;
}
static inline unsigned long
pgd_index (unsigned long address)
{
......@@ -429,6 +406,31 @@ pte_same (pte_t a, pte_t b)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init (void);
/*
* IA-64 doesn't have any external MMU info: the page tables contain all the necessary
* information. However, we use this macro to take care of any (delayed) i-cache flushing
* that may be necessary.
*/
static inline void
update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
{
unsigned long addr;
struct page *page;
if (!pte_exec(pte))
return; /* not an executable page... */
page = pte_page(pte);
/* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
addr = (unsigned long) page_address(page);
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
flush_icache_range(addr, addr + PAGE_SIZE);
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
#define SWP_TYPE(entry) (((entry).val >> 1) & 0xff)
#define SWP_OFFSET(entry) (((entry).val << 1) >> 10)
#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 9) })
......@@ -440,33 +442,6 @@ extern void paging_init (void);
#define io_remap_page_range remap_page_range /* XXX is this right? */
/*
* Now for some cache flushing routines. This is the kind of stuff that can be very
* expensive, so try to avoid them whenever possible.
*/
/* Caches aren't brain-dead on the IA-64. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_page(vma,page) do { } while (0)
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &page->flags); \
} while (0)
extern void flush_icache_range (unsigned long start, unsigned long end);
#define flush_icache_user_range(vma, page, user_addr, len) \
do { \
unsigned long _addr = page_address(page) + ((user_addr) & ~PAGE_MASK); \
flush_icache_range(_addr, _addr + (len)); \
} while (0)
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
......
#ifndef _ASM_IA64_TLBFLUSH_H
#define _ASM_IA64_TLBFLUSH_H
/*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
/*
* Now for some TLB flushing routines. This is the kind of stuff that
* can be very expensive, so try to avoid them whenever possible.
*/
/*
* Flush everything (kernel mapping may also have changed due to
* vmalloc/vfree).
*/
extern void __flush_tlb_all (void);
#ifdef CONFIG_SMP
extern void smp_flush_tlb_all (void);
# define flush_tlb_all() smp_flush_tlb_all()
#else
# define flush_tlb_all() __flush_tlb_all()
#endif
/*
* Flush a specified user mapping
*/
static inline void
flush_tlb_mm (struct mm_struct *mm)
{
if (mm) {
mm->context = 0;
if (mm == current->active_mm) {
/* This is called, e.g., as a result of exec(). */
get_new_mmu_context(mm);
reload_context(mm);
}
}
}
extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
/*
* Page-granular tlb flush.
*/
static inline void
flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
{
#ifdef CONFIG_SMP
flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
#else
if (vma->vm_mm == current->active_mm)
asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
#endif
}
/*
* Flush the TLB entries mapping the virtually mapped linear page
* table corresponding to address range [START-END).
*/
static inline void
flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
{
struct vm_area_struct vma;
if (REGION_NUMBER(start) != REGION_NUMBER(end))
printk("flush_tlb_pgtables: can't flush across regions!!\n");
vma.vm_mm = mm;
flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
}
#define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
#endif /* _ASM_IA64_TLBFLUSH_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment