Commit a4403298 authored by Keith M. Wesolowski's avatar Keith M. Wesolowski

[SPARC32]: Support memory starting at physical address other than 0

From: Stefan Holst <mail@s-holst.de>

Allow physical memory to start at almost arbitrary addresses.  LEON
needs it, so do SPARCstation 10/20 without slot 0 populated.  Although
Sun do not support this configuration, at least some such systems can
boot with this patch.

Physical memory starting at or above 0xF4000000 is not supported.
parent 366bf3a0
......@@ -331,6 +331,7 @@ void __init setup_arch(char **cmdline_p)
if (highest_paddr < top)
highest_paddr = top;
}
pfn_base = phys_base >> PAGE_SHIFT;
if (!root_flags)
root_mountflags &= ~MS_RDONLY;
......
......@@ -145,6 +145,7 @@ EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(sparc_valid_addr_bitmap);
EXPORT_SYMBOL(phys_base);
EXPORT_SYMBOL(pfn_base);
/* Atomic operations. */
EXPORT_SYMBOL(___atomic24_add);
......
......@@ -38,6 +38,7 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long *sparc_valid_addr_bitmap;
unsigned long phys_base;
unsigned long pfn_base;
unsigned long page_kernel;
......@@ -134,7 +135,7 @@ unsigned long calc_highpages(void)
unsigned long calc_max_low_pfn(void)
{
int i;
unsigned long tmp = (SRMMU_MAXMEM >> PAGE_SHIFT);
unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
unsigned long curr_pfn, last_pfn;
last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT;
......@@ -189,9 +190,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
*/
start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end));
/* Adjust up to the physical address where the kernel begins. */
start_pfn += phys_base;
/* Now shift down to get the real physical page frame number. */
start_pfn >>= PAGE_SHIFT;
......@@ -202,8 +200,8 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
max_low_pfn = max_pfn;
highstart_pfn = highend_pfn = max_pfn;
if (max_low_pfn > (SRMMU_MAXMEM >> PAGE_SHIFT)) {
highstart_pfn = (SRMMU_MAXMEM >> PAGE_SHIFT);
if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) {
highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
max_low_pfn = calc_max_low_pfn();
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
calc_highpages() >> (20 - PAGE_SHIFT));
......@@ -230,7 +228,8 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
}
#endif
/* Initialize the boot-time allocator. */
bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, phys_base>>PAGE_SHIFT, max_low_pfn);
bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base,
max_low_pfn);
/* Now register the available physical memory with the
* allocator.
......@@ -267,8 +266,8 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
reserve_bootmem(initrd_start, size);
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
initrd_start += PAGE_OFFSET;
initrd_end += PAGE_OFFSET;
initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
initrd_end = (initrd_end - phys_base) + PAGE_OFFSET;
}
#endif
/* Reserve the kernel text/data/bss. */
......@@ -432,7 +431,7 @@ void __init mem_init(void)
taint_real_pages();
max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT);
max_mapnr = last_valid_pfn - pfn_base;
high_memory = __va(max_low_pfn << PAGE_SHIFT);
num_physpages = totalram_pages = free_all_bootmem();
......@@ -474,11 +473,9 @@ void free_initmem (void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
unsigned long page;
struct page *p;
page = addr + phys_base;
p = virt_to_page(page);
p = virt_to_page(addr);
ClearPageReserved(p);
set_page_count(p, 1);
......
......@@ -213,7 +213,7 @@ static inline pte_t srmmu_pte_mkyoung(pte_t pte)
* and a page entry and page directory to the page they refer to.
*/
static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot)
{ return __pte(((page - mem_map) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); }
{ return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); }
static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
......@@ -245,7 +245,7 @@ static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
unsigned long ptp; /* Physical address, shifted right by 4 */
int i;
ptp = (ptep - mem_map) << (PAGE_SHIFT-4); /* watch for overflow */
ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++) {
srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
ptp += (SRMMU_PTRS_PER_PTE*sizeof(pte_t) >> 4);
......@@ -480,7 +480,7 @@ srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
return NULL;
return mem_map + (__nocache_pa(pte) >> PAGE_SHIFT);
return pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
}
static void srmmu_free_pte_fast(pte_t *pte)
......@@ -495,7 +495,7 @@ static void srmmu_pte_free(struct page *pte)
p = (unsigned long)page_address(pte); /* Cached address (for test) */
if (p == 0)
BUG();
p = ((pte - mem_map) << PAGE_SHIFT); /* Physical address */
p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
p = (unsigned long) __nocache_va(p); /* Nocached virtual */
srmmu_free_nocache(p, SRMMU_PTE_SZ_SOFT);
}
......@@ -1316,7 +1316,7 @@ void __init srmmu_paging_init(void)
for (znum = 0; znum < MAX_NR_ZONES; znum++)
zones_size[znum] = zholes_size[znum] = 0;
npages = max_low_pfn - (phys_base >> PAGE_SHIFT);
npages = max_low_pfn - pfn_base;
zones_size[ZONE_DMA] = npages;
zholes_size[ZONE_DMA] = npages - pages_avail;
......@@ -1326,13 +1326,9 @@ void __init srmmu_paging_init(void)
zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
free_area_init_node(0, &contig_page_data, NULL, zones_size,
phys_base >> PAGE_SHIFT, zholes_size);
pfn_base, zholes_size);
mem_map = contig_page_data.node_mem_map;
}
/* P3: easy to fix, todo. Current code is utterly broken, though. */
if (phys_base != 0)
panic("phys_base nonzero");
}
static void srmmu_mmu_info(struct seq_file *m)
......
......@@ -2088,7 +2088,7 @@ void __init sun4c_paging_init(void)
for (znum = 0; znum < MAX_NR_ZONES; znum++)
zones_size[znum] = zholes_size[znum] = 0;
npages = max_low_pfn - (phys_base >> PAGE_SHIFT);
npages = max_low_pfn - pfn_base;
zones_size[ZONE_DMA] = npages;
zholes_size[ZONE_DMA] = npages - pages_avail;
......@@ -2098,7 +2098,7 @@ void __init sun4c_paging_init(void)
zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
free_area_init_node(0, &contig_page_data, NULL, zones_size,
phys_base >> PAGE_SHIFT, zholes_size);
pfn_base, zholes_size);
mem_map = contig_page_data.node_mem_map;
}
......
......@@ -156,17 +156,22 @@ extern __inline__ int get_order(unsigned long size)
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
#define PAGE_OFFSET 0xf0000000
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#ifndef __ASSEMBLY__
extern unsigned long phys_base;
extern unsigned long pfn_base;
#endif
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + phys_base)
#define __va(x) ((void *)((unsigned long) (x) - phys_base + PAGE_OFFSET))
#define virt_to_phys __pa
#define phys_to_virt __va
#define virt_to_phys(x) __pa((unsigned long)(x))
#define phys_to_virt(x) __va((unsigned long)(x))
#define pfn_to_page(pfn) (mem_map + ((pfn)-(pfn_base)))
#define page_to_pfn(page) ((unsigned long)(((page) - mem_map) + pfn_base))
#define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT)))
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr))
#define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
......@@ -181,6 +181,7 @@ extern int num_contexts;
* hit for all __pa()/__va() operations.
*/
extern unsigned long phys_base;
extern unsigned long pfn_base;
/*
* BAD_PAGETABLE is used when we need a bogus page-table, while
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment