Commit cad6dc2f authored by David S. Miller's avatar David S. Miller

Merge nuts.davemloft.net:/disk1/BK/sparcwork-2.6

into nuts.davemloft.net:/disk1/BK/sparc-2.6
parents 65d17d59 62f7176e
...@@ -654,13 +654,13 @@ extern atomic_t dcpage_flushes_xcall; ...@@ -654,13 +654,13 @@ extern atomic_t dcpage_flushes_xcall;
static __inline__ void __local_flush_dcache_page(struct page *page) static __inline__ void __local_flush_dcache_page(struct page *page)
{ {
#if (L1DCACHE_SIZE > PAGE_SIZE) #if (L1DCACHE_SIZE > PAGE_SIZE)
__flush_dcache_page(page->virtual, __flush_dcache_page(page_address(page),
((tlb_type == spitfire) && ((tlb_type == spitfire) &&
page_mapping(page) != NULL)); page_mapping(page) != NULL));
#else #else
if (page_mapping(page) != NULL && if (page_mapping(page) != NULL &&
tlb_type == spitfire) tlb_type == spitfire)
__flush_icache_page(__pa(page->virtual)); __flush_icache_page(__pa(page_address(page)));
#endif #endif
} }
...@@ -675,6 +675,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) ...@@ -675,6 +675,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
if (cpu == this_cpu) { if (cpu == this_cpu) {
__local_flush_dcache_page(page); __local_flush_dcache_page(page);
} else if (cpu_online(cpu)) { } else if (cpu_online(cpu)) {
void *pg_addr = page_address(page);
u64 data0; u64 data0;
if (tlb_type == spitfire) { if (tlb_type == spitfire) {
...@@ -683,14 +684,14 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) ...@@ -683,14 +684,14 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
if (page_mapping(page) != NULL) if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32); data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0, spitfire_xcall_deliver(data0,
__pa(page->virtual), __pa(pg_addr),
(u64) page->virtual, (u64) pg_addr,
mask); mask);
} else { } else {
data0 = data0 =
((u64)&xcall_flush_dcache_page_cheetah); ((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0, cheetah_xcall_deliver(data0,
__pa(page->virtual), __pa(pg_addr),
0, mask); 0, mask);
} }
#ifdef CONFIG_DEBUG_DCFLUSH #ifdef CONFIG_DEBUG_DCFLUSH
...@@ -703,6 +704,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) ...@@ -703,6 +704,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
void flush_dcache_page_all(struct mm_struct *mm, struct page *page) void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{ {
void *pg_addr = page_address(page);
cpumask_t mask = cpu_online_map; cpumask_t mask = cpu_online_map;
u64 data0; u64 data0;
int this_cpu = get_cpu(); int this_cpu = get_cpu();
...@@ -719,13 +721,13 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) ...@@ -719,13 +721,13 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
if (page_mapping(page) != NULL) if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32); data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0, spitfire_xcall_deliver(data0,
__pa(page->virtual), __pa(pg_addr),
(u64) page->virtual, (u64) pg_addr,
mask); mask);
} else { } else {
data0 = ((u64)&xcall_flush_dcache_page_cheetah); data0 = ((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0, cheetah_xcall_deliver(data0,
__pa(page->virtual), __pa(pg_addr),
0, mask); 0, mask);
} }
#ifdef CONFIG_DEBUG_DCFLUSH #ifdef CONFIG_DEBUG_DCFLUSH
......
...@@ -360,6 +360,8 @@ EXPORT_SYMBOL(__bzero_noasi); ...@@ -360,6 +360,8 @@ EXPORT_SYMBOL(__bzero_noasi);
EXPORT_SYMBOL(phys_base); EXPORT_SYMBOL(phys_base);
EXPORT_SYMBOL(pfn_base); EXPORT_SYMBOL(pfn_base);
EXPORT_SYMBOL(sparc64_valid_addr_bitmap); EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
EXPORT_SYMBOL(page_to_pfn);
EXPORT_SYMBOL(pfn_to_page);
/* No version information on this, heavily used in inline asm, /* No version information on this, heavily used in inline asm,
* and will always be 'void __ret_efault(void)'. * and will always be 'void __ret_efault(void)'.
......
...@@ -137,13 +137,13 @@ __inline__ void flush_dcache_page_impl(struct page *page) ...@@ -137,13 +137,13 @@ __inline__ void flush_dcache_page_impl(struct page *page)
#endif #endif
#if (L1DCACHE_SIZE > PAGE_SIZE) #if (L1DCACHE_SIZE > PAGE_SIZE)
__flush_dcache_page(page->virtual, __flush_dcache_page(page_address(page),
((tlb_type == spitfire) && ((tlb_type == spitfire) &&
page_mapping(page) != NULL)); page_mapping(page) != NULL));
#else #else
if (page_mapping(page) != NULL && if (page_mapping(page) != NULL &&
tlb_type == spitfire) tlb_type == spitfire)
__flush_icache_page(__pa(page->virtual)); __flush_icache_page(__pa(page_address(page)));
#endif #endif
} }
...@@ -344,6 +344,16 @@ void flush_icache_range(unsigned long start, unsigned long end) ...@@ -344,6 +344,16 @@ void flush_icache_range(unsigned long start, unsigned long end)
} }
} }
unsigned long page_to_pfn(struct page *page)
{
return (unsigned long) ((page - mem_map) + pfn_base);
}
struct page *pfn_to_page(unsigned long pfn)
{
return (mem_map + (pfn - pfn_base));
}
void show_mem(void) void show_mem(void)
{ {
printk("Mem-info:\n"); printk("Mem-info:\n");
......
...@@ -19,8 +19,8 @@ extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr); ...@@ -19,8 +19,8 @@ extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
#define bus_to_virt bus_to_virt_not_defined_use_pci_map #define bus_to_virt bus_to_virt_not_defined_use_pci_map
/* BIO layer definitions. */ /* BIO layer definitions. */
extern unsigned long phys_base, kern_base, kern_size; extern unsigned long kern_base, kern_size;
#define page_to_phys(page) ((((page) - mem_map) << PAGE_SHIFT)+phys_base) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define BIO_VMERGE_BOUNDARY 8192 #define BIO_VMERGE_BOUNDARY 8192
/* Different PCI controllers we support have their PCI MEM space /* Different PCI controllers we support have their PCI MEM space
......
...@@ -14,9 +14,6 @@ ...@@ -14,9 +14,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* Sparc64 is slow at multiplication, we prefer to use some extra space. */
#define WANT_PAGE_VIRTUAL 1
extern void _clear_page(void *page); extern void _clear_page(void *page);
#define clear_page(X) _clear_page((void *)(X)) #define clear_page(X) _clear_page((void *)(X))
struct page; struct page;
...@@ -111,17 +108,19 @@ typedef unsigned long iopgprot_t; ...@@ -111,17 +108,19 @@ typedef unsigned long iopgprot_t;
*/ */
#define PAGE_OFFSET _AC(0xFFFFF80000000000,UL) #define PAGE_OFFSET _AC(0xFFFFF80000000000,UL)
#ifndef __ASSEMBLY__
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
/* PFNs are real physical page numbers. However, mem_map only begins to record /* PFNs are real physical page numbers. However, mem_map only begins to record
* per-page information starting at pfn_base. This is to handle systems where * per-page information starting at pfn_base. This is to handle systems where
* the first physical page in the machine is at some huge physical address, such * the first physical page in the machine is at some huge physical address,
* as 4GB. This is common on a partitioned E10000, for example. * such as 4GB. This is common on a partitioned E10000, for example.
*/ */
extern struct page *pfn_to_page(unsigned long pfn);
extern unsigned long page_to_pfn(struct page *);
#define pfn_to_page(pfn) (mem_map + ((pfn)-(pfn_base)))
#define page_to_pfn(page) ((unsigned long)(((page) - mem_map) + pfn_base))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
#define pfn_valid(pfn) (((pfn)-(pfn_base)) < max_mapnr) #define pfn_valid(pfn) (((pfn)-(pfn_base)) < max_mapnr)
...@@ -130,8 +129,6 @@ typedef unsigned long iopgprot_t; ...@@ -130,8 +129,6 @@ typedef unsigned long iopgprot_t;
#define virt_to_phys __pa #define virt_to_phys __pa
#define phys_to_virt __va #define phys_to_virt __va
#ifndef __ASSEMBLY__
/* The following structure is used to hold the physical /* The following structure is used to hold the physical
* memory configuration of the machine. This is filled in * memory configuration of the machine. This is filled in
* probe_memory() and is later used by mem_init() to set up * probe_memory() and is later used by mem_init() to set up
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment