Commit 630986cd authored by Linus Torvalds's avatar Linus Torvalds

Automerge

parents 7366f03f d24919a7
...@@ -159,7 +159,7 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag ...@@ -159,7 +159,7 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
area->phys_addr = phys_addr; area->phys_addr = phys_addr;
addr = area->addr; addr = area->addr;
if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) { if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
vfree(addr); vunmap(addr);
return NULL; return NULL;
} }
return (void *) (offset + (char *)addr); return (void *) (offset + (char *)addr);
...@@ -215,13 +215,13 @@ void iounmap(void *addr) ...@@ -215,13 +215,13 @@ void iounmap(void *addr)
struct vm_struct *p; struct vm_struct *p;
if (addr <= high_memory) if (addr <= high_memory)
return; return;
p = remove_kernel_area((void *) (PAGE_MASK & (unsigned long) addr)); p = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
if (!p) { if (!p) {
printk("__iounmap: bad address %p\n", addr); printk("__iounmap: bad address %p\n", addr);
return; return;
} }
vmfree_area_pages(VMALLOC_VMADDR(p->addr), p->size); unmap_vm_area(p);
if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { if (p->flags && p->phys_addr < virt_to_phys(high_memory)) {
change_page_attr(virt_to_page(__va(p->phys_addr)), change_page_attr(virt_to_page(__va(p->phys_addr)),
p->size >> PAGE_SHIFT, p->size >> PAGE_SHIFT,
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
* Based upon code written by Linus Torvalds and others. * Based upon code written by Linus Torvalds and others.
*/ */
#warning "major untested changes to this file --hch (2002/08/05)"
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
...@@ -16,6 +18,7 @@ static struct vm_struct * modvmlist = NULL; ...@@ -16,6 +18,7 @@ static struct vm_struct * modvmlist = NULL;
void module_unmap (void * addr) void module_unmap (void * addr)
{ {
struct vm_struct **p, *tmp; struct vm_struct **p, *tmp;
int i;
if (!addr) if (!addr)
return; return;
...@@ -23,21 +26,38 @@ void module_unmap (void * addr) ...@@ -23,21 +26,38 @@ void module_unmap (void * addr)
printk("Trying to unmap module with bad address (%p)\n", addr); printk("Trying to unmap module with bad address (%p)\n", addr);
return; return;
} }
for (p = &modvmlist ; (tmp = *p) ; p = &tmp->next) { for (p = &modvmlist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) { if (tmp->addr == addr) {
*p = tmp->next; *p = tmp->next;
vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
kfree(tmp);
return;
} }
} }
printk("Trying to unmap nonexistent module vm area (%p)\n", addr); printk("Trying to unmap nonexistent module vm area (%p)\n", addr);
return;
found:
unmap_vm_area(tmp);
for (i = 0; i < tmp->nr_pages; i++) {
if (unlikely(!tmp->pages[i]))
BUG();
__free_page(tmp->pages[i]);
}
kfree(tmp->pages);
kfree(tmp);
} }
void * module_map (unsigned long size) void * module_map (unsigned long size)
{ {
void * addr;
struct vm_struct **p, *tmp, *area; struct vm_struct **p, *tmp, *area;
struct vm_struct *area;
struct page **pages;
void * addr;
unsigned int nr_pages, array_size, i;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
if (!size || size > MODULES_LEN) return NULL; if (!size || size > MODULES_LEN) return NULL;
...@@ -55,11 +75,32 @@ void * module_map (unsigned long size) ...@@ -55,11 +75,32 @@ void * module_map (unsigned long size)
area->size = size + PAGE_SIZE; area->size = size + PAGE_SIZE;
area->addr = addr; area->addr = addr;
area->next = *p; area->next = *p;
area->pages = NULL;
area->nr_pages = 0;
area->phys_addr = 0;
*p = area; *p = area;
if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, GFP_KERNEL, PAGE_KERNEL)) { nr_pages = (size+PAGE_SIZE) >> PAGE_SHIFT;
module_unmap(addr); array_size = (nr_pages * sizeof(struct page *));
area->nr_pages = nr_pages;
area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
if (!area->pages)
return NULL; return NULL;
memset(area->pages, 0, array_size);
for (i = 0; i < area->nr_pages; i++) {
area->pages[i] = alloc_page(gfp_mask);
if (unlikely(!area->pages[i]))
goto fail;
} }
return addr;
if (map_vm_area(area, prot, &pages))
goto fail;
return area->addr;
fail:
vfree(area->addr);
return NULL;
}
} }
...@@ -210,6 +210,9 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma) ...@@ -210,6 +210,9 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
return 0; return 0;
} }
extern long vread(char *buf, char *addr, unsigned long count);
extern long vwrite(char *buf, char *addr, unsigned long count);
/* /*
* This function reads the *virtual* memory as seen by the kernel. * This function reads the *virtual* memory as seen by the kernel.
*/ */
...@@ -273,8 +276,6 @@ static ssize_t read_kmem(struct file *file, char *buf, ...@@ -273,8 +276,6 @@ static ssize_t read_kmem(struct file *file, char *buf,
return virtr + read; return virtr + read;
} }
extern long vwrite(char *buf, char *addr, unsigned long count);
/* /*
* This function writes to the *virtual* memory as seen by the kernel. * This function writes to the *virtual* memory as seen by the kernel.
*/ */
......
#ifndef __LINUX_VMALLOC_H #ifndef _LINUX_VMALLOC_H
#define __LINUX_VMALLOC_H #define _LINUX_VMALLOC_H
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/pgtable.h>
/* bits in vm_struct->flags */ /* bits in vm_struct->flags */
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ #define VM_IOREMAP 0x00000001 /* ioremap() and friends */
#define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
struct vm_struct { struct vm_struct {
unsigned long flags; void *addr;
void * addr;
unsigned long size; unsigned long size;
unsigned long flags;
struct page **pages;
unsigned int nr_pages;
unsigned long phys_addr; unsigned long phys_addr;
struct vm_struct * next; struct vm_struct *next;
}; };
extern struct vm_struct * get_vm_area (unsigned long size, unsigned long flags);
extern void vfree(void * addr);
extern void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot);
extern long vread(char *buf, char *addr, unsigned long count);
extern void vmfree_area_pages(unsigned long address, unsigned long size);
extern int vmalloc_area_pages(unsigned long address, unsigned long size,
int gfp_mask, pgprot_t prot);
extern struct vm_struct *remove_kernel_area(void *addr);
/* /*
* Various ways to allocate pages. * Highlevel APIs for driver use
*/ */
extern void *vmalloc(unsigned long size);
extern void *vmalloc_32(unsigned long size);
extern void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot);
extern void vfree(void *addr);
extern void * vmalloc(unsigned long size); extern void *vmap(struct page **pages, unsigned int count);
extern void * vmalloc_32(unsigned long size); extern void vunmap(void *addr);
/* /*
* vmlist_lock is a read-write spinlock that protects vmlist * Lowlevel-APIs (not for driver use!)
* Used in mm/vmalloc.c (get_vm_area() and vfree()) and fs/proc/kcore.c.
*/ */
extern rwlock_t vmlist_lock; extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *remove_vm_area(void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
extern void unmap_vm_area(struct vm_struct *area);
extern struct vm_struct * vmlist; /*
#endif * Internals. Dont't use..
*/
extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist;
#endif /* _LINUX_VMALLOC_H */
...@@ -109,6 +109,8 @@ EXPORT_SYMBOL(vfree); ...@@ -109,6 +109,8 @@ EXPORT_SYMBOL(vfree);
EXPORT_SYMBOL(__vmalloc); EXPORT_SYMBOL(__vmalloc);
EXPORT_SYMBOL(vmalloc); EXPORT_SYMBOL(vmalloc);
EXPORT_SYMBOL(vmalloc_32); EXPORT_SYMBOL(vmalloc_32);
EXPORT_SYMBOL(vmap);
EXPORT_SYMBOL(vunmap);
EXPORT_SYMBOL(vmalloc_to_page); EXPORT_SYMBOL(vmalloc_to_page);
EXPORT_SYMBOL(mem_map); EXPORT_SYMBOL(mem_map);
EXPORT_SYMBOL(remap_page_range); EXPORT_SYMBOL(remap_page_range);
......
...@@ -4,27 +4,28 @@ ...@@ -4,27 +4,28 @@
* Copyright (C) 1993 Linus Torvalds * Copyright (C) 1993 Linus Torvalds
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
* SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
* Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
*/ */
#include <linux/config.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/smp_lock.h> #include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
rwlock_t vmlist_lock = RW_LOCK_UNLOCKED; rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
struct vm_struct * vmlist; struct vm_struct *vmlist;
static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size) static inline void unmap_area_pte(pmd_t *pmd, unsigned long address,
unsigned long size)
{ {
pte_t * pte;
unsigned long end; unsigned long end;
pte_t *pte;
if (pmd_none(*pmd)) if (pmd_none(*pmd))
return; return;
...@@ -33,11 +34,13 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo ...@@ -33,11 +34,13 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo
pmd_clear(pmd); pmd_clear(pmd);
return; return;
} }
pte = pte_offset_kernel(pmd, address); pte = pte_offset_kernel(pmd, address);
address &= ~PMD_MASK; address &= ~PMD_MASK;
end = address + size; end = address + size;
if (end > PMD_SIZE) if (end > PMD_SIZE)
end = PMD_SIZE; end = PMD_SIZE;
do { do {
pte_t page; pte_t page;
page = ptep_get_and_clear(pte); page = ptep_get_and_clear(pte);
...@@ -45,24 +48,17 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo ...@@ -45,24 +48,17 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo
pte++; pte++;
if (pte_none(page)) if (pte_none(page))
continue; continue;
if (pte_present(page)) { if (pte_present(page))
struct page *ptpage;
unsigned long pfn = pte_pfn(page);
if (!pfn_valid(pfn))
continue;
ptpage = pfn_to_page(pfn);
if (!PageReserved(ptpage))
__free_page(ptpage);
continue; continue;
}
printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n"); printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
} while (address < end); } while (address < end);
} }
static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size) static inline void unmap_area_pmd(pgd_t *dir, unsigned long address,
unsigned long size)
{ {
pmd_t * pmd;
unsigned long end; unsigned long end;
pmd_t *pmd;
if (pgd_none(*dir)) if (pgd_none(*dir))
return; return;
...@@ -71,36 +67,23 @@ static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned lo ...@@ -71,36 +67,23 @@ static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned lo
pgd_clear(dir); pgd_clear(dir);
return; return;
} }
pmd = pmd_offset(dir, address); pmd = pmd_offset(dir, address);
address &= ~PGDIR_MASK; address &= ~PGDIR_MASK;
end = address + size; end = address + size;
if (end > PGDIR_SIZE) if (end > PGDIR_SIZE)
end = PGDIR_SIZE; end = PGDIR_SIZE;
do { do {
free_area_pte(pmd, address, end - address); unmap_area_pte(pmd, address, end - address);
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
pmd++; pmd++;
} while (address < end); } while (address < end);
} }
void vmfree_area_pages(unsigned long start, unsigned long size) static inline int map_area_pte(pte_t *pte, unsigned long address,
{ unsigned long size, pgprot_t prot,
pgd_t * dir; struct page ***pages)
unsigned long address = start;
unsigned long end = start + size;
dir = pgd_offset_k(address);
flush_cache_all();
do {
free_area_pmd(dir, address, end - address);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
flush_tlb_kernel_range(start, end);
}
static inline int alloc_area_pte (pte_t * pte, unsigned long address,
unsigned long size, int gfp_mask, pgprot_t prot)
{ {
unsigned long end; unsigned long end;
...@@ -108,23 +91,26 @@ static inline int alloc_area_pte (pte_t * pte, unsigned long address, ...@@ -108,23 +91,26 @@ static inline int alloc_area_pte (pte_t * pte, unsigned long address,
end = address + size; end = address + size;
if (end > PMD_SIZE) if (end > PMD_SIZE)
end = PMD_SIZE; end = PMD_SIZE;
do { do {
struct page * page; struct page *page = **pages;
spin_unlock(&init_mm.page_table_lock);
page = alloc_page(gfp_mask);
spin_lock(&init_mm.page_table_lock);
if (!pte_none(*pte)) if (!pte_none(*pte))
printk(KERN_ERR "alloc_area_pte: page already exists\n"); printk(KERN_ERR "alloc_area_pte: page already exists\n");
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
set_pte(pte, mk_pte(page, prot)); set_pte(pte, mk_pte(page, prot));
address += PAGE_SIZE; address += PAGE_SIZE;
pte++; pte++;
(*pages)++;
} while (address < end); } while (address < end);
return 0; return 0;
} }
static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot) static inline int map_area_pmd(pmd_t *pmd, unsigned long address,
unsigned long size, pgprot_t prot,
struct page ***pages)
{ {
unsigned long end; unsigned long end;
...@@ -132,76 +118,108 @@ static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo ...@@ -132,76 +118,108 @@ static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo
end = address + size; end = address + size;
if (end > PGDIR_SIZE) if (end > PGDIR_SIZE)
end = PGDIR_SIZE; end = PGDIR_SIZE;
do { do {
pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
if (!pte) if (!pte)
return -ENOMEM; return -ENOMEM;
if (alloc_area_pte(pte, address, end - address, gfp_mask, prot)) if (map_area_pte(pte, address, end - address, prot, pages))
return -ENOMEM; return -ENOMEM;
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
pmd++; pmd++;
} while (address < end); } while (address < end);
return 0; return 0;
} }
inline int vmalloc_area_pages (unsigned long address, unsigned long size, void unmap_vm_area(struct vm_struct *area)
int gfp_mask, pgprot_t prot)
{ {
pgd_t * dir; unsigned long address = VMALLOC_VMADDR(area->addr);
unsigned long end = address + size; unsigned long end = (address + area->size);
int ret; pgd_t *dir;
dir = pgd_offset_k(address); dir = pgd_offset_k(address);
spin_lock(&init_mm.page_table_lock); flush_cache_all();
do { do {
pmd_t *pmd; unmap_area_pmd(dir, address, end - address);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
flush_tlb_kernel_range(VMALLOC_VMADDR(area->addr), end);
}
pmd = pmd_alloc(&init_mm, dir, address); int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
ret = -ENOMEM; {
if (!pmd) unsigned long address = VMALLOC_VMADDR(area->addr);
break; unsigned long end = address + (area->size-PAGE_SIZE);
pgd_t *dir;
ret = -ENOMEM; dir = pgd_offset_k(address);
if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot)) spin_lock(&init_mm.page_table_lock);
break; do {
pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
if (!pmd)
return -ENOMEM;
if (map_area_pmd(pmd, address, end - address, prot, pages))
return -ENOMEM;
address = (address + PGDIR_SIZE) & PGDIR_MASK; address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++; dir++;
ret = 0;
} while (address && (address < end)); } while (address && (address < end));
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
flush_cache_all(); flush_cache_all();
return ret; return 0;
} }
struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
/**
* get_vm_area - reserve a contingous kernel virtual area
*
* @size: size of the area
* @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
*
* Search an area of @size in the kernel virtual mapping area,
* and reserved it for out purposes. Returns the area descriptor
* on success or %NULL on failure.
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{ {
unsigned long addr;
struct vm_struct **p, *tmp, *area; struct vm_struct **p, *tmp, *area;
unsigned long addr = VMALLOC_START;
area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL); area = kmalloc(sizeof(*area), GFP_KERNEL);
if (!area) if (unlikely(!area))
return NULL; return NULL;
/*
* We always allocate a guard page.
*/
size += PAGE_SIZE; size += PAGE_SIZE;
addr = VMALLOC_START;
write_lock(&vmlist_lock); write_lock(&vmlist_lock);
for (p = &vmlist; (tmp = *p) ; p = &tmp->next) { for (p = &vmlist; (tmp = *p) ;p = &tmp->next) {
if ((size + addr) < addr) if ((size + addr) < addr)
goto out; goto out;
if (size + addr <= (unsigned long) tmp->addr) if (size + addr <= (unsigned long)tmp->addr)
break; goto found;
addr = tmp->size + (unsigned long) tmp->addr; addr = tmp->size + (unsigned long)tmp->addr;
if (addr > VMALLOC_END-size) if (addr > VMALLOC_END-size)
goto out; goto out;
} }
area->phys_addr = 0;
found:
area->next = *p;
*p = area;
area->flags = flags; area->flags = flags;
area->addr = (void *)addr; area->addr = (void *)addr;
area->size = size; area->size = size;
area->next = *p; area->pages = NULL;
*p = area; area->nr_pages = 0;
area->phys_addr = 0;
write_unlock(&vmlist_lock); write_unlock(&vmlist_lock);
return area; return area;
out: out:
...@@ -210,87 +228,203 @@ struct vm_struct * get_vm_area(unsigned long size, unsigned long flags) ...@@ -210,87 +228,203 @@ struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
return NULL; return NULL;
} }
struct vm_struct *remove_kernel_area(void *addr) /**
* remove_vm_area - find and remove a contingous kernel virtual area
*
* @addr: base address
*
* Search for the kernel VM area starting at @addr, and remove it.
* This function returns the found VM area, but using it is NOT safe
* on SMP machines.
*/
struct vm_struct *remove_vm_area(void *addr)
{ {
struct vm_struct **p, *tmp; struct vm_struct **p, *tmp;
write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
write_unlock(&vmlist_lock);
return tmp;
}
write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ;p = &tmp->next) {
if (tmp->addr == addr)
goto found;
} }
write_unlock(&vmlist_lock); write_unlock(&vmlist_lock);
return NULL; return NULL;
found:
*p = tmp->next;
write_unlock(&vmlist_lock);
return tmp;
} }
void vfree(void * addr) void __vunmap(void *addr, int deallocate_pages)
{ {
struct vm_struct *tmp; struct vm_struct *area;
if (!addr) if (!addr)
return; return;
if ((PAGE_SIZE-1) & (unsigned long) addr) {
if ((PAGE_SIZE-1) & (unsigned long)addr) {
printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr); printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
return; return;
} }
tmp = remove_kernel_area(addr);
if (tmp) { area = remove_vm_area(addr);
vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size); if (unlikely(!area)) {
kfree(tmp); printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
addr);
return; return;
} }
printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
unmap_vm_area(area);
if (deallocate_pages) {
int i;
for (i = 0; i < area->nr_pages; i++) {
if (unlikely(!area->pages[i]))
BUG();
__free_page(area->pages[i]);
}
kfree(area->pages);
}
kfree(area);
return;
} }
/* /**
* Allocate any pages * vfree - release memory allocated by vmalloc()
*
* @addr: memory base address
*
* Free the virtually continguos memory area starting at @addr, as
* obtained from vmalloc(), vmalloc_32() or __vmalloc().
*
* May not be called in interrupt context.
*/ */
void vfree(void *addr)
void * vmalloc (unsigned long size)
{ {
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); __vunmap(addr, 1);
} }
/* /**
* Allocate ISA addressable pages for broke crap * vunmap - release virtual mapping obtained by vmap()
*
* @addr: memory base address
*
* Free the virtually continguos memory area starting at @addr,
* which was created from the page array passed to vmap().
*
* May not be called in interrupt context.
*/ */
void vunmap(void *addr)
void * vmalloc_dma (unsigned long size)
{ {
return __vmalloc(size, GFP_KERNEL|GFP_DMA, PAGE_KERNEL); __vunmap(addr, 0);
} }
/* /**
* vmalloc 32bit PA addressable pages - eg for PCI 32bit devices * vmap - map an array of pages into virtually continguos space
*
* @pages: array of page pointers
* @count: number of pages to map
*
* Maps @count pages from @pages into continguos kernel virtual
* space.
*/ */
void *vmap(struct page **pages, unsigned int count)
void * vmalloc_32(unsigned long size)
{ {
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); struct vm_struct *area;
if (count > num_physpages)
return NULL;
area = get_vm_area((count << PAGE_SHIFT), VM_MAP);
if (!area)
return NULL;
if (map_vm_area(area, PAGE_KERNEL, &pages)) {
vunmap(area->addr);
return NULL;
}
return area->addr;
} }
void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot) /**
* __vmalloc - allocate virtually continguos memory
*
* @size: allocation size
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
*
* Allocate enough pages to cover @size from the page level
* allocator with @gfp_mask flags. Map them into continguos
* kernel virtual space, using a pagetable protection of @prot.
*/
void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
{ {
void * addr;
struct vm_struct *area; struct vm_struct *area;
struct page **pages;
unsigned int nr_pages, array_size, i;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
if (!size || (size >> PAGE_SHIFT) > num_physpages) { if (!size || (size >> PAGE_SHIFT) > num_physpages)
BUG();
return NULL; return NULL;
}
area = get_vm_area(size, VM_ALLOC); area = get_vm_area(size, VM_ALLOC);
if (!area) if (!area)
return NULL; return NULL;
addr = area->addr;
if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask, prot)) { nr_pages = (size+PAGE_SIZE) >> PAGE_SHIFT;
vfree(addr); array_size = (nr_pages * sizeof(struct page *));
area->nr_pages = nr_pages;
area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
if (!area->pages)
return NULL; return NULL;
memset(area->pages, 0, array_size);
for (i = 0; i < area->nr_pages; i++) {
area->pages[i] = alloc_page(gfp_mask);
if (unlikely(!area->pages[i]))
goto fail;
} }
return addr;
if (map_vm_area(area, prot, &pages))
goto fail;
return area->addr;
fail:
vfree(area->addr);
return NULL;
}
/**
* vmalloc - allocate virtually continguos memory
*
* @size: allocation size
*
* Allocate enough pages to cover @size from the page level
* allocator and map them into continguos kernel virtual space.
*
* For tight cotrol over page level allocator and protection flags
* use __vmalloc() instead.
*/
void *vmalloc(unsigned long size)
{
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
/**
* vmalloc_32 - allocate virtually continguos memory (32bit addressable)
*
* @size: allocation size
*
* Allocate enough 32bit PA addressable pages to cover @size from the
* page level allocator and map them into continguos kernel virtual space.
*/
void *vmalloc_32(unsigned long size)
{
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
} }
long vread(char *buf, char *addr, unsigned long count) long vread(char *buf, char *addr, unsigned long count)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment