Commit 16b38746 authored by Andrew Morton's avatar Andrew Morton Committed by Christoph Hellwig

[PATCH] various small cleanups

- Remove defunct active_list/inactive_list declarations (wli)

- Update an obsolete comment (wli)

- "mm/slab.c contains one leftover from the initial version with
  'unsigned short' bufctl entries.  The attached patch replaces '2'
  with the correct sizeof [which is now 4]" - Manfred Spraul

- BUG checks for vfree/vunmap being called in interrupt context
  (because they take irq-unsafe spinlocks, I guess?) - davej

- Simplify some coding in one_highpage_init() (Christoph Hellwig).
parent 5868caf6
......@@ -215,19 +215,14 @@ void __init permanent_kmaps_init(pgd_t *pgd_base)
void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
{
if (!page_is_ram(pfn)) {
if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
ClearPageReserved(page);
set_bit(PG_highmem, &page->flags);
set_page_count(page, 1);
__free_page(page);
totalhigh_pages++;
} else
SetPageReserved(page);
return;
}
if (bad_ppro && page_kills_ppro(pfn)) {
SetPageReserved(page);
return;
}
ClearPageReserved(page);
set_bit(PG_highmem, &page->flags);
atomic_set(&page->count, 1);
__free_page(page);
totalhigh_pages++;
}
#ifndef CONFIG_DISCONTIGMEM
......
......@@ -19,9 +19,6 @@ extern unsigned long max_mapnr;
extern unsigned long num_physpages;
extern void * high_memory;
extern int page_cluster;
/* The inactive_clean lists are per zone. */
extern struct list_head active_list;
extern struct list_head inactive_list;
#include <asm/page.h>
#include <asm/pgtable.h>
......
......@@ -151,8 +151,8 @@ struct zonelist {
* On NUMA machines, each NUMA node would have a pg_data_t to describe
* it's memory layout.
*
* XXX: we need to move the global memory statistics (active_list, ...)
* into the pg_data_t to properly support NUMA.
* Memory statistics and page replacement data structures are maintained on a
* per-zone basis.
*/
struct bootmem_data;
typedef struct pglist_data {
......
......@@ -487,7 +487,7 @@ void __init kmem_cache_sizes_init(void)
/* Inc off-slab bufctl limit until the ceiling is hit. */
if (!(OFF_SLAB(sizes->cs_cachep))) {
offslab_limit = sizes->cs_size-sizeof(slab_t);
offslab_limit /= 2;
offslab_limit /= sizeof(kmem_bufctl_t);
}
sizes->cs_dmacachep = kmem_cache_create(
cache_names[sizes-cache_sizes].name_dma,
......
......@@ -11,6 +11,8 @@
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
#include <asm/uaccess.h>
......@@ -309,6 +311,7 @@ void __vunmap(void *addr, int deallocate_pages)
*/
void vfree(void *addr)
{
BUG_ON(in_interrupt());
__vunmap(addr, 1);
}
......@@ -324,6 +327,7 @@ void vfree(void *addr)
*/
void vunmap(void *addr)
{
BUG_ON(in_interrupt());
__vunmap(addr, 0);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment