Commit d24919a7 authored by Christoph Hellwig's avatar Christoph Hellwig

VM: Rework vmalloc code to support mapping of arbitray pages

The vmalloc operation is split into two pieces:  allocate the backing
pages and map them into the kernel page tabels for virtually contingous
access. (Same for vfree).  A new set of interfaces, vmap & vunmap does
only the second part and thus allows mapping arbitray pages into 
kernel virtual memory.

The vmalloc.c internals have been completly overhauled to support this,
but the exported interfaces are unchanged.
parent 35aa61ec
...@@ -159,7 +159,7 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag ...@@ -159,7 +159,7 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
area->phys_addr = phys_addr; area->phys_addr = phys_addr;
addr = area->addr; addr = area->addr;
if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) { if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
vfree(addr); vunmap(addr);
return NULL; return NULL;
} }
return (void *) (offset + (char *)addr); return (void *) (offset + (char *)addr);
...@@ -215,13 +215,13 @@ void iounmap(void *addr) ...@@ -215,13 +215,13 @@ void iounmap(void *addr)
struct vm_struct *p; struct vm_struct *p;
if (addr <= high_memory) if (addr <= high_memory)
return; return;
p = remove_kernel_area((void *) (PAGE_MASK & (unsigned long) addr)); p = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
if (!p) { if (!p) {
printk("__iounmap: bad address %p\n", addr); printk("__iounmap: bad address %p\n", addr);
return; return;
} }
vmfree_area_pages(VMALLOC_VMADDR(p->addr), p->size); unmap_vm_area(p);
if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { if (p->flags && p->phys_addr < virt_to_phys(high_memory)) {
change_page_attr(virt_to_page(__va(p->phys_addr)), change_page_attr(virt_to_page(__va(p->phys_addr)),
p->size >> PAGE_SHIFT, p->size >> PAGE_SHIFT,
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
* Based upon code written by Linus Torvalds and others. * Based upon code written by Linus Torvalds and others.
*/ */
#warning "major untested changes to this file --hch (2002/08/05)"
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
...@@ -16,6 +18,7 @@ static struct vm_struct * modvmlist = NULL; ...@@ -16,6 +18,7 @@ static struct vm_struct * modvmlist = NULL;
void module_unmap (void * addr) void module_unmap (void * addr)
{ {
struct vm_struct **p, *tmp; struct vm_struct **p, *tmp;
int i;
if (!addr) if (!addr)
return; return;
...@@ -23,21 +26,38 @@ void module_unmap (void * addr) ...@@ -23,21 +26,38 @@ void module_unmap (void * addr)
printk("Trying to unmap module with bad address (%p)\n", addr); printk("Trying to unmap module with bad address (%p)\n", addr);
return; return;
} }
for (p = &modvmlist ; (tmp = *p) ; p = &tmp->next) { for (p = &modvmlist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) { if (tmp->addr == addr) {
*p = tmp->next; *p = tmp->next;
vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
kfree(tmp);
return;
} }
} }
printk("Trying to unmap nonexistent module vm area (%p)\n", addr); printk("Trying to unmap nonexistent module vm area (%p)\n", addr);
return;
found:
unmap_vm_area(tmp);
for (i = 0; i < tmp->nr_pages; i++) {
if (unlikely(!tmp->pages[i]))
BUG();
__free_page(tmp->pages[i]);
}
kfree(tmp->pages);
kfree(tmp);
} }
void * module_map (unsigned long size) void * module_map (unsigned long size)
{ {
void * addr;
struct vm_struct **p, *tmp, *area; struct vm_struct **p, *tmp, *area;
struct vm_struct *area;
struct page **pages;
void * addr;
unsigned int nr_pages, array_size, i;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
if (!size || size > MODULES_LEN) return NULL; if (!size || size > MODULES_LEN) return NULL;
...@@ -55,11 +75,32 @@ void * module_map (unsigned long size) ...@@ -55,11 +75,32 @@ void * module_map (unsigned long size)
area->size = size + PAGE_SIZE; area->size = size + PAGE_SIZE;
area->addr = addr; area->addr = addr;
area->next = *p; area->next = *p;
area->pages = NULL;
area->nr_pages = 0;
area->phys_addr = 0;
*p = area; *p = area;
if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, GFP_KERNEL, PAGE_KERNEL)) { nr_pages = (size+PAGE_SIZE) >> PAGE_SHIFT;
module_unmap(addr); array_size = (nr_pages * sizeof(struct page *));
area->nr_pages = nr_pages;
area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
if (!area->pages)
return NULL; return NULL;
memset(area->pages, 0, array_size);
for (i = 0; i < area->nr_pages; i++) {
area->pages[i] = alloc_page(gfp_mask);
if (unlikely(!area->pages[i]))
goto fail;
} }
return addr;
if (map_vm_area(area, prot, &pages))
goto fail;
return area->addr;
fail:
vfree(area->addr);
return NULL;
}
} }
...@@ -210,6 +210,9 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma) ...@@ -210,6 +210,9 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
return 0; return 0;
} }
extern long vread(char *buf, char *addr, unsigned long count);
extern long vwrite(char *buf, char *addr, unsigned long count);
/* /*
* This function reads the *virtual* memory as seen by the kernel. * This function reads the *virtual* memory as seen by the kernel.
*/ */
...@@ -273,8 +276,6 @@ static ssize_t read_kmem(struct file *file, char *buf, ...@@ -273,8 +276,6 @@ static ssize_t read_kmem(struct file *file, char *buf,
return virtr + read; return virtr + read;
} }
extern long vwrite(char *buf, char *addr, unsigned long count);
/* /*
* This function writes to the *virtual* memory as seen by the kernel. * This function writes to the *virtual* memory as seen by the kernel.
*/ */
......
#ifndef __LINUX_VMALLOC_H #ifndef _LINUX_VMALLOC_H
#define __LINUX_VMALLOC_H #define _LINUX_VMALLOC_H
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/pgtable.h>
/* bits in vm_struct->flags */ /* bits in vm_struct->flags */
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ #define VM_IOREMAP 0x00000001 /* ioremap() and friends */
#define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
struct vm_struct { struct vm_struct {
unsigned long flags; void *addr;
void * addr;
unsigned long size; unsigned long size;
unsigned long flags;
struct page **pages;
unsigned int nr_pages;
unsigned long phys_addr; unsigned long phys_addr;
struct vm_struct * next; struct vm_struct *next;
}; };
extern struct vm_struct * get_vm_area (unsigned long size, unsigned long flags);
extern void vfree(void * addr);
extern void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot);
extern long vread(char *buf, char *addr, unsigned long count);
extern void vmfree_area_pages(unsigned long address, unsigned long size);
extern int vmalloc_area_pages(unsigned long address, unsigned long size,
int gfp_mask, pgprot_t prot);
extern struct vm_struct *remove_kernel_area(void *addr);
/* /*
* Various ways to allocate pages. * Highlevel APIs for driver use
*/ */
extern void *vmalloc(unsigned long size);
extern void *vmalloc_32(unsigned long size);
extern void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot);
extern void vfree(void *addr);
extern void * vmalloc(unsigned long size); extern void *vmap(struct page **pages, unsigned int count);
extern void * vmalloc_32(unsigned long size); extern void vunmap(void *addr);
/* /*
* vmlist_lock is a read-write spinlock that protects vmlist * Lowlevel-APIs (not for driver use!)
* Used in mm/vmalloc.c (get_vm_area() and vfree()) and fs/proc/kcore.c.
*/ */
extern rwlock_t vmlist_lock; extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *remove_vm_area(void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
extern void unmap_vm_area(struct vm_struct *area);
extern struct vm_struct * vmlist; /*
#endif * Internals. Dont't use..
*/
extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist;
#endif /* _LINUX_VMALLOC_H */
...@@ -109,6 +109,8 @@ EXPORT_SYMBOL(vfree); ...@@ -109,6 +109,8 @@ EXPORT_SYMBOL(vfree);
EXPORT_SYMBOL(__vmalloc); EXPORT_SYMBOL(__vmalloc);
EXPORT_SYMBOL(vmalloc); EXPORT_SYMBOL(vmalloc);
EXPORT_SYMBOL(vmalloc_32); EXPORT_SYMBOL(vmalloc_32);
EXPORT_SYMBOL(vmap);
EXPORT_SYMBOL(vunmap);
EXPORT_SYMBOL(vmalloc_to_page); EXPORT_SYMBOL(vmalloc_to_page);
EXPORT_SYMBOL(mem_map); EXPORT_SYMBOL(mem_map);
EXPORT_SYMBOL(remap_page_range); EXPORT_SYMBOL(remap_page_range);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment