Commit b983cb51 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] uninline the ia32 highmem functions

With gcc-2.95.3, text size goes from 2,399,190 to 2,390,004 - nearly
10 kbytes.
parent 301f67bd
......@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/tty.h>
#include <linux/highmem.h>
#include <asm/semaphore.h>
#include <asm/processor.h>
......@@ -192,3 +193,11 @@ extern int is_sony_vaio_laptop;
EXPORT_SYMBOL(is_sony_vaio_laptop);
EXPORT_SYMBOL(__PAGE_KERNEL);
#ifdef CONFIG_HIGHMEM
EXPORT_SYMBOL(kmap);
EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kunmap_atomic);
EXPORT_SYMBOL(kmap_atomic_to_page);
#endif
......@@ -4,8 +4,10 @@
export-objs := pageattr.o
obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o
obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_HIGHMEM) += highmem.o
include $(TOPDIR)/Rules.make
#include <linux/highmem.h>
void *kmap(struct page *page)
{
if (in_interrupt())
BUG();
if (page < highmem_start_page)
return page_address(page);
return kmap_high(page);
}
void kunmap(struct page *page)
{
if (in_interrupt())
BUG();
if (page < highmem_start_page)
return;
kunmap_high(page);
}
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
void *kmap_atomic(struct page *page, enum km_type type)
{
enum fixed_addresses idx;
unsigned long vaddr;
inc_preempt_count();
if (page < highmem_start_page)
return page_address(page);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#if CONFIG_DEBUG_HIGHMEM
if (!pte_none(*(kmap_pte-idx)))
BUG();
#endif
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
__flush_tlb_one(vaddr);
return (void*) vaddr;
}
void kunmap_atomic(void *kvaddr, enum km_type type)
{
#if CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIXADDR_START) { // FIXME
dec_preempt_count();
return;
}
if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
BUG();
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(kmap_pte-idx);
__flush_tlb_one(vaddr);
#endif
dec_preempt_count();
}
struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
idx = virt_to_fix(vaddr);
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
......@@ -52,88 +52,11 @@ extern void kmap_init(void);
extern void * FASTCALL(kmap_high(struct page *page));
extern void FASTCALL(kunmap_high(struct page *page));
static inline void *kmap(struct page *page)
{
if (in_interrupt())
BUG();
if (page < highmem_start_page)
return page_address(page);
return kmap_high(page);
}
static inline void kunmap(struct page *page)
{
if (in_interrupt())
BUG();
if (page < highmem_start_page)
return;
kunmap_high(page);
}
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
static inline void *kmap_atomic(struct page *page, enum km_type type)
{
enum fixed_addresses idx;
unsigned long vaddr;
inc_preempt_count();
if (page < highmem_start_page)
return page_address(page);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#if CONFIG_DEBUG_HIGHMEM
if (!pte_none(*(kmap_pte-idx)))
BUG();
#endif
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
__flush_tlb_one(vaddr);
return (void*) vaddr;
}
static inline void kunmap_atomic(void *kvaddr, enum km_type type)
{
#if CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIXADDR_START) { // FIXME
dec_preempt_count();
return;
}
if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
BUG();
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(kmap_pte-idx);
__flush_tlb_one(vaddr);
#endif
dec_preempt_count();
}
static inline struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
idx = virt_to_fix(vaddr);
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
void *kmap(struct page *page);
void kunmap(struct page *page);
void *kmap_atomic(struct page *page, enum km_type type);
void kunmap_atomic(void *kvaddr, enum km_type type);
struct page *kmap_atomic_to_page(void *ptr);
#endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment