Commit 94c974a1 authored by Jeff Dike's avatar Jeff Dike

Merge uml.karaya.com:/home/jdike/linux/2.5/linus-2.5

into uml.karaya.com:/home/jdike/linux/2.5/highmem-2.5
parents b2fd37fa 1b3205e2
...@@ -32,6 +32,7 @@ bool 'Symmetric multi-processing support' CONFIG_UML_SMP ...@@ -32,6 +32,7 @@ bool 'Symmetric multi-processing support' CONFIG_UML_SMP
define_bool CONFIG_SMP $CONFIG_UML_SMP define_bool CONFIG_SMP $CONFIG_UML_SMP
int 'Nesting level' CONFIG_NEST_LEVEL 0 int 'Nesting level' CONFIG_NEST_LEVEL 0
int 'Kernel address space size (in .5G units)' CONFIG_KERNEL_HALF_GIGS 1 int 'Kernel address space size (in .5G units)' CONFIG_KERNEL_HALF_GIGS 1
bool 'Highmem support' CONFIG_HIGHMEM
endmenu endmenu
mainmenu_option next_comment mainmenu_option next_comment
......
...@@ -31,6 +31,7 @@ CONFIG_MAGIC_SYSRQ=y ...@@ -31,6 +31,7 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_SMP is not set # CONFIG_SMP is not set
CONFIG_NEST_LEVEL=0 CONFIG_NEST_LEVEL=0
CONFIG_KERNEL_HALF_GIGS=1 CONFIG_KERNEL_HALF_GIGS=1
# CONFIG_HIGHMEM is not set
# #
# Loadable module support # Loadable module support
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
struct mem_region { struct mem_region {
char *driver; char *driver;
unsigned long start_pfn;
unsigned long start; unsigned long start;
unsigned long len; unsigned long len;
void *mem_map; void *mem_map;
...@@ -51,8 +52,8 @@ extern unsigned long task_size; ...@@ -51,8 +52,8 @@ extern unsigned long task_size;
extern int init_mem_user(void); extern int init_mem_user(void);
extern int create_mem_file(unsigned long len); extern int create_mem_file(unsigned long len);
extern void setup_range(int fd, char *driver, unsigned long start, extern void setup_range(int fd, char *driver, unsigned long start,
unsigned long total, struct mem_region *region, unsigned long pfn, unsigned long total, int need_vm,
void *reserved); struct mem_region *region, void *reserved);
extern void map(unsigned long virt, unsigned long p, unsigned long len, extern void map(unsigned long virt, unsigned long p, unsigned long len,
int r, int w, int x); int r, int w, int x);
extern int unmap(void *addr, int len); extern int unmap(void *addr, int len);
...@@ -62,8 +63,6 @@ extern void setup_memory(void *entry); ...@@ -62,8 +63,6 @@ extern void setup_memory(void *entry);
extern unsigned long find_iomem(char *driver, unsigned long *len_out); extern unsigned long find_iomem(char *driver, unsigned long *len_out);
extern int init_maps(struct mem_region *region); extern int init_maps(struct mem_region *region);
extern int nregions(void); extern int nregions(void);
extern void setup_one_range(int n, int fd, char *driver, unsigned long start,
unsigned long len, struct mem_region *region);
extern int reserve_vm(unsigned long start, unsigned long end, void *e); extern int reserve_vm(unsigned long start, unsigned long end, void *e);
extern unsigned long get_vm(unsigned long len); extern unsigned long get_vm(unsigned long len);
extern void setup_physmem(unsigned long start, unsigned long usable, extern void setup_physmem(unsigned long start, unsigned long usable,
......
...@@ -27,6 +27,7 @@ extern unsigned long uml_physmem; ...@@ -27,6 +27,7 @@ extern unsigned long uml_physmem;
extern unsigned long uml_reserved; extern unsigned long uml_reserved;
extern unsigned long end_vm; extern unsigned long end_vm;
extern unsigned long start_vm; extern unsigned long start_vm;
extern unsigned long highmem;
extern int tracing_pid; extern int tracing_pid;
extern int honeypot; extern int honeypot;
......
/* /*
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com) * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL * Licensed under the GPL
*/ */
...@@ -27,13 +27,13 @@ ...@@ -27,13 +27,13 @@
#include "init.h" #include "init.h"
unsigned long high_physmem; unsigned long high_physmem;
unsigned long low_physmem; unsigned long low_physmem;
unsigned long vm_start; unsigned long vm_start;
unsigned long vm_end; unsigned long vm_end;
unsigned long highmem;
pgd_t swapper_pg_dir[1024]; pgd_t swapper_pg_dir[1024];
unsigned long *empty_zero_page = NULL; unsigned long *empty_zero_page = NULL;
...@@ -71,7 +71,10 @@ void mem_init(void) ...@@ -71,7 +71,10 @@ void mem_init(void)
{ {
unsigned long start; unsigned long start;
max_mapnr = num_physpages = max_low_pfn; max_low_pfn = (high_physmem - uml_physmem) >> PAGE_SHIFT;
#ifdef CONFIG_HIGHMEM
highmem_start_page = phys_page(__pa(high_physmem));
#endif
/* clear the zero-page */ /* clear the zero-page */
memset((void *) empty_zero_page, 0, PAGE_SIZE); memset((void *) empty_zero_page, 0, PAGE_SIZE);
...@@ -93,16 +96,189 @@ void mem_init(void) ...@@ -93,16 +96,189 @@ void mem_init(void)
} }
/* this will put all low memory onto the freelists */ /* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem(); totalram_pages = free_all_bootmem();
totalhigh_pages = highmem >> PAGE_SHIFT;
totalram_pages += totalhigh_pages;
num_physpages = totalram_pages;
max_mapnr = totalram_pages;
max_pfn = totalram_pages;
printk(KERN_INFO "Memory: %luk available\n", printk(KERN_INFO "Memory: %luk available\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10)); (unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
kmalloc_ok = 1; kmalloc_ok = 1;
} }
#if CONFIG_HIGHMEM
pte_t *kmap_pte;
pgprot_t kmap_prot;
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
void __init kmap_init(void)
{
unsigned long kmap_vstart;
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
kmap_prot = PAGE_KERNEL;
}
#endif /* CONFIG_HIGHMEM */
static void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
int i, j;
unsigned long vaddr;
vaddr = start;
i = __pgd_offset(vaddr);
j = __pmd_offset(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pmd = (pmd_t *)pgd;
for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd(_KERNPG_TABLE +
(unsigned long) __pa(pte)));
if (pte != pte_offset_kernel(pmd, 0))
BUG();
}
vaddr += PMD_SIZE;
}
j = 0;
}
}
int init_maps(struct mem_region *region)
{
struct page *p, *map;
int i, n, len;
if(region == &physmem_region){
region->mem_map = mem_map;
return(0);
}
else if(region->mem_map != NULL) return(0);
n = region->len >> PAGE_SHIFT;
len = n * sizeof(struct page);
if(kmalloc_ok){
map = kmalloc(len, GFP_KERNEL);
if(map == NULL) map = vmalloc(len);
}
else map = alloc_bootmem_low_pages(len);
if(map == NULL)
return(-ENOMEM);
for(i = 0; i < n; i++){
p = &map[i];
set_page_count(p, 0);
SetPageReserved(p);
INIT_LIST_HEAD(&p->list);
}
region->mem_map = map;
return(0);
}
static int setup_one_range(int fd, char *driver, unsigned long start,
unsigned long pfn, int len,
struct mem_region *region)
{
int i;
for(i = 0; i < NREGIONS; i++){
if(regions[i] == NULL) break;
}
if(i == NREGIONS){
printk("setup_range : no free regions\n");
return(-1);
}
if(fd == -1)
fd = create_mem_file(len);
if(region == NULL){
region = alloc_bootmem_low_pages(sizeof(*region));
if(region == NULL)
panic("Failed to allocating mem_region");
}
*region = ((struct mem_region) { driver : driver,
start_pfn : pfn,
start : start,
len : len,
fd : fd } );
regions[i] = region;
return(i);
}
#ifdef CONFIG_HIGHMEM
static void init_highmem(void)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long vaddr;
/*
* Permanent kmaps:
*/
vaddr = PKMAP_BASE;
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
pgd = swapper_pg_dir + __pgd_offset(vaddr);
pmd = pmd_offset(pgd, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
kmap_init();
}
void setup_highmem(unsigned long len)
{
struct mem_region *region;
struct page *page, *map;
unsigned long phys;
int i, cur, index;
phys = physmem_size;
do {
cur = min(len, (unsigned long) REGION_SIZE);
i = setup_one_range(-1, NULL, -1, phys >> PAGE_SHIFT, cur,
NULL);
if(i == -1){
printk("setup_highmem - setup_one_range failed\n");
return;
}
region = regions[i];
index = phys / PAGE_SIZE;
region->mem_map = &mem_map[index];
map = region->mem_map;
for(i = 0; i < (cur >> PAGE_SHIFT); i++){
page = &map[i];
ClearPageReserved(page);
set_bit(PG_highmem, &page->flags);
atomic_set(&page->count, 1);
__free_page(page);
}
phys += cur;
len -= cur;
} while(len > 0);
}
#endif
void paging_init(void) void paging_init(void)
{ {
struct mem_region *region; struct mem_region *region;
unsigned long zones_size[MAX_NR_ZONES], start, end; unsigned long zones_size[MAX_NR_ZONES], start, end, vaddr;
int i, index; int i, index;
empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
...@@ -111,6 +287,7 @@ void paging_init(void) ...@@ -111,6 +287,7 @@ void paging_init(void)
zones_size[i] = 0; zones_size[i] = 0;
zones_size[0] = (high_physmem >> PAGE_SHIFT) - zones_size[0] = (high_physmem >> PAGE_SHIFT) -
(uml_physmem >> PAGE_SHIFT); (uml_physmem >> PAGE_SHIFT);
zones_size[2] = highmem >> PAGE_SHIFT;
free_area_init(zones_size); free_area_init(zones_size);
start = phys_region_index(__pa(uml_physmem)); start = phys_region_index(__pa(uml_physmem));
end = phys_region_index(__pa(high_physmem - 1)); end = phys_region_index(__pa(high_physmem - 1));
...@@ -120,6 +297,18 @@ void paging_init(void) ...@@ -120,6 +297,18 @@ void paging_init(void)
region->mem_map = &mem_map[index]; region->mem_map = &mem_map[index];
if(i > start) free_bootmem(__pa(region->start), region->len); if(i > start) free_bootmem(__pa(region->start), region->len);
} }
/*
* Fixed mappings, only the page table structure has to be
* created - mappings will be set by set_fixmap():
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
#if CONFIG_HIGHMEM
init_highmem();
setup_highmem(highmem);
#endif
} }
pte_t __bad_page(void) pte_t __bad_page(void)
...@@ -220,6 +409,8 @@ struct page *arch_validate(struct page *page, int mask, int order) ...@@ -220,6 +409,8 @@ struct page *arch_validate(struct page *page, int mask, int order)
again: again:
if(page == NULL) return(page); if(page == NULL) return(page);
if(PageHighMem(page)) return(page);
addr = (unsigned long) page_address(page); addr = (unsigned long) page_address(page);
for(i = 0; i < (1 << order); i++){ for(i = 0; i < (1 << order); i++){
current->thread.fault_addr = (void *) addr; current->thread.fault_addr = (void *) addr;
...@@ -315,56 +506,24 @@ int nregions(void) ...@@ -315,56 +506,24 @@ int nregions(void)
return(NREGIONS); return(NREGIONS);
} }
int init_maps(struct mem_region *region) void setup_range(int fd, char *driver, unsigned long start, unsigned long pfn,
{ unsigned long len, int need_vm, struct mem_region *region,
struct page *p, *map; void *reserved)
int i, n;
if(region == &physmem_region){
region->mem_map = mem_map;
return(0);
}
else if(region->mem_map != NULL) return(0);
n = region->len >> PAGE_SHIFT;
map = kmalloc(n * sizeof(struct page), GFP_KERNEL);
if(map == NULL) map = vmalloc(n * sizeof(struct page));
if(map == NULL)
return(-ENOMEM);
for(i = 0; i < n; i++){
p = &map[i];
set_page_count(p, 0);
SetPageReserved(p);
INIT_LIST_HEAD(&p->list);
}
region->mem_map = map;
return(0);
}
void setup_range(int fd, char *driver, unsigned long start,
unsigned long len, struct mem_region *region, void *reserved)
{ {
int i, incr; int i, cur;
i = 0;
do { do {
for(; i < NREGIONS; i++){ cur = min(len, (unsigned long) REGION_SIZE);
if(regions[i] == NULL) break; i = setup_one_range(fd, driver, start, pfn, cur, region);
}
if(i == NREGIONS){
printk("setup_range : no free regions\n");
return;
}
setup_one_range(i, fd, driver, start, len, region);
region = regions[i]; region = regions[i];
if(setup_region(region, reserved)){ if(need_vm && setup_region(region, reserved)){
kfree(region); kfree(region);
regions[i] = NULL; regions[i] = NULL;
return; return;
} }
incr = min(len, (unsigned long) REGION_SIZE); start += cur;
start += incr; if(pfn != -1) pfn += cur;
len -= incr; len -= cur;
} while(len > 0); } while(len > 0);
} }
...@@ -399,8 +558,8 @@ int setup_iomem(void) ...@@ -399,8 +558,8 @@ int setup_iomem(void)
for(i = 0; i < num_iomem_regions; i++){ for(i = 0; i < num_iomem_regions; i++){
iomem = &iomem_regions[i]; iomem = &iomem_regions[i];
setup_range(iomem->fd, iomem->name, -1, iomem->size, NULL, setup_range(iomem->fd, iomem->name, -1, -1, iomem->size, 1,
NULL); NULL, NULL);
} }
return(0); return(0);
} }
...@@ -418,7 +577,7 @@ void setup_physmem(unsigned long start, unsigned long reserve_end, ...@@ -418,7 +577,7 @@ void setup_physmem(unsigned long start, unsigned long reserve_end,
{ {
struct mem_region *region = &physmem_region; struct mem_region *region = &physmem_region;
struct vm_reserved *reserved = &physmem_reserved; struct vm_reserved *reserved = &physmem_reserved;
unsigned long cur; unsigned long cur, pfn = 0;
int do_free = 1, bootmap_size; int do_free = 1, bootmap_size;
do { do {
...@@ -430,7 +589,7 @@ void setup_physmem(unsigned long start, unsigned long reserve_end, ...@@ -430,7 +589,7 @@ void setup_physmem(unsigned long start, unsigned long reserve_end,
if((region == NULL) || (reserved == NULL)) if((region == NULL) || (reserved == NULL))
panic("Couldn't allocate physmem region or vm " panic("Couldn't allocate physmem region or vm "
"reservation\n"); "reservation\n");
setup_range(-1, NULL, start, cur, region, reserved); setup_range(-1, NULL, start, pfn, cur, 1, region, reserved);
if(do_free){ if(do_free){
unsigned long reserve = reserve_end - start; unsigned long reserve = reserve_end - start;
...@@ -443,6 +602,7 @@ void setup_physmem(unsigned long start, unsigned long reserve_end, ...@@ -443,6 +602,7 @@ void setup_physmem(unsigned long start, unsigned long reserve_end,
do_free = 0; do_free = 0;
} }
start += cur; start += cur;
pfn += cur >> PAGE_SHIFT;
len -= cur; len -= cur;
region = NULL; region = NULL;
reserved = NULL; reserved = NULL;
...@@ -492,6 +652,56 @@ struct mem_region *page_region(struct page *page, int *index_out) ...@@ -492,6 +652,56 @@ struct mem_region *page_region(struct page *page, int *index_out)
return(NULL); return(NULL);
} }
unsigned long page_to_pfn(struct page *page)
{
struct mem_region *region = page_region(page, NULL);
return(region->start_pfn + (page - (struct page *) region->mem_map));
}
struct mem_region *pfn_to_region(unsigned long pfn, int *index_out)
{
struct mem_region *region;
int i;
for(i = 0; i < NREGIONS; i++){
region = regions[i];
if(region == NULL)
continue;
if((region->start_pfn <= pfn) &&
(region->start_pfn + (region->len >> PAGE_SHIFT) > pfn)){
if(index_out != NULL)
*index_out = i;
return(region);
}
}
return(NULL);
}
struct page *pfn_to_page(unsigned long pfn)
{
struct mem_region *region = pfn_to_region(pfn, NULL);
struct page *mem_map = (struct page *) region->mem_map;
return(&mem_map[pfn - region->start_pfn]);
}
unsigned long phys_to_pfn(unsigned long p)
{
struct mem_region *region = regions[phys_region_index(p)];
return(region->start_pfn + (phys_addr(p) >> PAGE_SHIFT));
}
unsigned long pfn_to_phys(unsigned long pfn)
{
int n;
struct mem_region *region = pfn_to_region(pfn, &n);
return(mk_phys((pfn - region->start_pfn) << PAGE_SHIFT, n));
}
struct page *page_mem_map(struct page *page) struct page *page_mem_map(struct page *page)
{ {
return((struct page *) page_region(page, NULL)->mem_map); return((struct page *) page_region(page, NULL)->mem_map);
...@@ -535,7 +745,7 @@ struct page *phys_to_page(unsigned long phys) ...@@ -535,7 +745,7 @@ struct page *phys_to_page(unsigned long phys)
return(mem_map + (phys_offset(phys) >> PAGE_SHIFT)); return(mem_map + (phys_offset(phys) >> PAGE_SHIFT));
} }
int setup_mem_maps(void) static int setup_mem_maps(void)
{ {
struct mem_region *region; struct mem_region *region;
int i; int i;
...@@ -594,7 +804,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -594,7 +804,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
struct page *pte; struct page *pte;
do { do {
pte = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0); pte = alloc_pages(GFP_KERNEL, 0);
if (pte) if (pte)
clear_highpage(pte); clear_highpage(pte);
else { else {
......
...@@ -77,25 +77,6 @@ int create_mem_file(unsigned long len) ...@@ -77,25 +77,6 @@ int create_mem_file(unsigned long len)
return(fd); return(fd);
} }
void setup_one_range(int n, int fd, char *driver, unsigned long start,
unsigned long len, struct mem_region *region)
{
if(fd == -1)
fd = create_mem_file(len);
if(region == NULL){
region = malloc(sizeof(*region));
if(region == NULL){
perror("Allocating mem_region");
exit(1);
}
}
*region = ((struct mem_region) { driver : driver,
start : start,
len : len,
fd : fd } );
regions[n] = region;
}
int setup_region(struct mem_region *region, void *entry) int setup_region(struct mem_region *region, void *entry)
{ {
void *loc, *start; void *loc, *start;
......
...@@ -528,7 +528,7 @@ unsigned long um_virt_to_phys(void *t, unsigned long addr) ...@@ -528,7 +528,7 @@ unsigned long um_virt_to_phys(void *t, unsigned long addr)
char *current_cmd(void) char *current_cmd(void)
{ {
#ifdef CONFIG_SMP #if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM)
return("(Unknown)"); return("(Unknown)");
#else #else
unsigned long addr = um_virt_to_phys(current, current->mm->arg_start); unsigned long addr = um_virt_to_phys(current, current->mm->arg_start);
......
...@@ -178,6 +178,11 @@ void flush_tlb_kernel_vm(void) ...@@ -178,6 +178,11 @@ void flush_tlb_kernel_vm(void)
flush_tlb_kernel_range(start_vm, end_vm); flush_tlb_kernel_range(start_vm, end_vm);
} }
void __flush_tlb_one(unsigned long addr)
{
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end) unsigned long end)
{ {
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
*/ */
#include "linux/config.h" #include "linux/config.h"
#include "linux/kernel.h"
#include "linux/sched.h" #include "linux/sched.h"
#include "linux/notifier.h" #include "linux/notifier.h"
#include "linux/mm.h" #include "linux/mm.h"
...@@ -109,8 +110,6 @@ static int start_kernel_proc(void *unused) ...@@ -109,8 +110,6 @@ static int start_kernel_proc(void *unused)
return(0); return(0);
} }
extern unsigned long high_physmem;
#ifdef CONFIG_HOST_2G_2G #ifdef CONFIG_HOST_2G_2G
#define TOP 0x80000000 #define TOP 0x80000000
#else #else
...@@ -160,7 +159,7 @@ void set_cmdline(char *cmd) ...@@ -160,7 +159,7 @@ void set_cmdline(char *cmd)
snprintf(ptr, (argv1_end - ptr) * sizeof(*ptr), " [%s]", cmd); snprintf(ptr, (argv1_end - ptr) * sizeof(*ptr), " [%s]", cmd);
memset(argv1_begin + strlen(argv1_begin), '\0', memset(argv1_begin + strlen(argv1_begin), '\0',
argv1_end - argv1_begin - strlen(argv1_begin)); argv1_end - argv1_begin - strlen(argv1_begin));
} }
static char *usage_string = static char *usage_string =
...@@ -263,10 +262,12 @@ unsigned long brk_start; ...@@ -263,10 +262,12 @@ unsigned long brk_start;
static struct vm_reserved kernel_vm_reserved; static struct vm_reserved kernel_vm_reserved;
#define MIN_VMALLOC (32 * 1024 * 1024)
int linux_main(int argc, char **argv) int linux_main(int argc, char **argv)
{ {
unsigned long avail; unsigned long avail;
unsigned long virtmem_size; unsigned long virtmem_size, max_physmem;
unsigned int i, add, err; unsigned int i, add, err;
void *sp; void *sp;
...@@ -278,7 +279,7 @@ int linux_main(int argc, char **argv) ...@@ -278,7 +279,7 @@ int linux_main(int argc, char **argv)
} }
if(have_root == 0) add_arg(saved_command_line, DEFAULT_COMMAND_LINE); if(have_root == 0) add_arg(saved_command_line, DEFAULT_COMMAND_LINE);
if(!jail) if(!jail || debug)
remap_data(ROUND_DOWN(&_stext), ROUND_UP(&_etext), 1); remap_data(ROUND_DOWN(&_stext), ROUND_UP(&_etext), 1);
remap_data(ROUND_DOWN(&_sdata), ROUND_UP(&_edata), 1); remap_data(ROUND_DOWN(&_sdata), ROUND_UP(&_edata), 1);
brk_start = (unsigned long) sbrk(0); brk_start = (unsigned long) sbrk(0);
...@@ -295,20 +296,20 @@ int linux_main(int argc, char **argv) ...@@ -295,20 +296,20 @@ int linux_main(int argc, char **argv)
argv1_end = &argv[1][strlen(argv[1])]; argv1_end = &argv[1][strlen(argv[1])];
set_usable_vm(uml_physmem, get_kmem_end()); set_usable_vm(uml_physmem, get_kmem_end());
highmem = 0;
max_physmem = get_kmem_end() - uml_physmem - MIN_VMALLOC;
if(physmem_size > max_physmem){
highmem = physmem_size - max_physmem;
physmem_size -= highmem;
}
high_physmem = uml_physmem + physmem_size; high_physmem = uml_physmem + physmem_size;
high_memory = (void *) high_physmem; high_memory = (void *) high_physmem;
setup_physmem(uml_physmem, uml_reserved, physmem_size);
/* Kernel vm starts after physical memory and is either the size
* of physical memory or the remaining space left in the kernel
* area of the address space, whichever is smaller.
*/
start_vm = VMALLOC_START; start_vm = VMALLOC_START;
if(start_vm >= get_kmem_end())
panic("Physical memory too large to allow any kernel "
"virtual memory");
setup_physmem(uml_physmem, uml_reserved, physmem_size);
virtmem_size = physmem_size; virtmem_size = physmem_size;
avail = get_kmem_end() - start_vm; avail = get_kmem_end() - start_vm;
if(physmem_size > avail) virtmem_size = avail; if(physmem_size > avail) virtmem_size = avail;
......
#ifndef __UM_FIXMAP_H
#define __UM_FIXMAP_H
#include <linux/config.h>
#include <asm/kmap_types.h>
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of virtual memory (0xfffff000) backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* highger than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
/*
* on UP currently we will have no trace of the fixmap mechanizm,
* no page table allocations, etc. This might change in the
* future, say framebuffers for the console driver(s) could be
* fix-mapped?
*/
enum fixed_addresses {
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
__end_of_fixed_addresses
};
extern void __set_fixmap (enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
/*
* Some hardware wants to get fixmapped without caching.
*/
#define set_fixmap_nocache(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
/*
* used by vmalloc.c.
*
* Leave one empty page between vmalloc'ed areas and
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
extern unsigned long get_kmem_end(void);
#define FIXADDR_TOP (get_kmem_end() - 0x2000)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
extern void __this_fixmap_does_not_exist(void);
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without tranlation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* this branch gets completely eliminated after inlining,
* except when someone tries to use fixaddr indices in an
* illegal way. (such as mixing up address types or using
* out-of-range indices).
*
* If it doesn't get removed, the linker will complain
* loudly with a reasonably clear error message..
*/
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#endif
#ifndef __UM_HIGHMEM_H #ifndef __UM_HIGHMEM_H
#define __UM_HIGHMEM_H #define __UM_HIGHMEM_H
#include "asm/page.h"
#include "asm/fixmap.h"
#include "asm/arch/highmem.h" #include "asm/arch/highmem.h"
#undef PKMAP_BASE
#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
#endif #endif
...@@ -43,10 +43,11 @@ extern void *region_va(unsigned long phys); ...@@ -43,10 +43,11 @@ extern void *region_va(unsigned long phys);
#define __pa(virt) region_pa((void *) (virt)) #define __pa(virt) region_pa((void *) (virt))
#define __va(phys) region_va((unsigned long) (phys)) #define __va(phys) region_va((unsigned long) (phys))
extern unsigned long page_to_pfn(struct page *page);
extern struct page *pfn_to_page(unsigned long pfn);
extern struct page *phys_to_page(unsigned long phys); extern struct page *phys_to_page(unsigned long phys);
#define pfn_to_page(pfn) (phys_to_page(pfn << PAGE_SHIFT))
#define page_to_pfn(page) (page_to_phys(page) >> PAGE_SHIFT)
#define virt_to_page(v) (phys_to_page(__pa(v))) #define virt_to_page(v) (phys_to_page(__pa(v)))
extern struct page *page_mem_map(struct page *page); extern struct page *page_mem_map(struct page *page);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define __UM_PGALLOC_H #define __UM_PGALLOC_H
#include "linux/mm.h" #include "linux/mm.h"
#include "asm/fixmap.h"
#define pmd_populate_kernel(mm, pmd, pte) \ #define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte))) set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
...@@ -15,7 +16,7 @@ ...@@ -15,7 +16,7 @@
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
struct page *pte) struct page *pte)
{ {
set_pmd(pmd, __pmd(_PAGE_TABLE + phys_addr(page_to_phys(pte)))); set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
} }
extern pgd_t *pgd_alloc(struct mm_struct *); extern pgd_t *pgd_alloc(struct mm_struct *);
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "linux/sched.h" #include "linux/sched.h"
#include "asm/processor.h" #include "asm/processor.h"
#include "asm/page.h" #include "asm/page.h"
#include "asm/fixmap.h"
extern pgd_t swapper_pg_dir[1024]; extern pgd_t swapper_pg_dir[1024];
...@@ -62,12 +63,16 @@ extern unsigned long *empty_zero_page; ...@@ -62,12 +63,16 @@ extern unsigned long *empty_zero_page;
*/ */
extern unsigned long high_physmem; extern unsigned long high_physmem;
extern unsigned long end_vm;
#define VMALLOC_OFFSET (__va_space) #define VMALLOC_OFFSET (__va_space)
#define VMALLOC_START (((unsigned long) high_physmem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_START (((unsigned long) high_physmem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END (end_vm)
#if CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
#else
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#endif
#define _PAGE_PRESENT 0x001 #define _PAGE_PRESENT 0x001
#define _PAGE_NEWPAGE 0x002 #define _PAGE_NEWPAGE 0x002
...@@ -183,15 +188,17 @@ static inline void pgd_clear(pgd_t * pgdp) { } ...@@ -183,15 +188,17 @@ static inline void pgd_clear(pgd_t * pgdp) { }
extern struct page *pte_mem_map(pte_t pte); extern struct page *pte_mem_map(pte_t pte);
extern struct page *phys_mem_map(unsigned long phys); extern struct page *phys_mem_map(unsigned long phys);
extern unsigned long phys_to_pfn(unsigned long p);
extern unsigned long pfn_to_phys(unsigned long pfn);
#define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_address(x) (__va(pte_val(x) & PAGE_MASK)) #define pte_address(x) (__va(pte_val(x) & PAGE_MASK))
#define mk_phys(a, r) ((a) + (r << REGION_SHIFT)) #define mk_phys(a, r) ((a) + (r << REGION_SHIFT))
#define phys_addr(p) ((p) & ~REGION_MASK) #define phys_addr(p) ((p) & ~REGION_MASK)
#define phys_page(p) (phys_mem_map(p) + ((phys_addr(p)) >> PAGE_SHIFT)) #define phys_page(p) (phys_mem_map(p) + ((phys_addr(p)) >> PAGE_SHIFT))
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) #define pte_pfn(x) phys_to_pfn(pte_val(x))
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
static inline pte_t pte_mknewprot(pte_t pte) static inline pte_t pte_mknewprot(pte_t pte)
{ {
...@@ -333,6 +340,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -333,6 +340,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
/* to find an entry in a page-table-directory. */ /* to find an entry in a page-table-directory. */
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define __pgd_offset(address) pgd_index(address)
/* to find an entry in a page-table-directory */ /* to find an entry in a page-table-directory */
#define pgd_offset(mm, address) \ #define pgd_offset(mm, address) \
...@@ -341,6 +349,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -341,6 +349,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
/* to find an entry in a kernel page-table-directory */ /* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define __pmd_offset(address) \
(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
/* Find an entry in the second-level page table.. */ /* Find an entry in the second-level page table.. */
static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{ {
......
...@@ -27,6 +27,7 @@ extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, ...@@ -27,6 +27,7 @@ extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void flush_tlb_kernel_vm(void); extern void flush_tlb_kernel_vm(void);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void __flush_tlb_one(unsigned long addr);
static inline void flush_tlb_pgtables(struct mm_struct *mm, static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment