Commit 3c1016b5 authored by Thomas Gleixner's avatar Thomas Gleixner

mm/highmem: Remove the old kmap_atomic cruft

All users gone.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linuxfoundation.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20201103095858.516281567@linutronix.de
parent d7029e45
......@@ -86,31 +86,16 @@ static inline void kunmap(struct page *page)
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
#ifndef CONFIG_KMAP_LOCAL
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
void kunmap_atomic_high(void *kvaddr);
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
return kmap_atomic_high_prot(page, prot);
}
static inline void __kunmap_atomic(void *vaddr)
{
kunmap_atomic_high(vaddr);
return __kmap_local_page_prot(page, prot);
}
#else /* !CONFIG_KMAP_LOCAL */
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
static inline void *kmap_atomic(struct page *page)
{
preempt_disable();
pagefault_disable();
return __kmap_local_page_prot(page, prot);
return kmap_atomic_prot(page, kmap_prot);
}
static inline void *kmap_atomic_pfn(unsigned long pfn)
......@@ -125,13 +110,6 @@ static inline void __kunmap_atomic(void *addr)
kunmap_local_indexed(addr);
}
#endif /* CONFIG_KMAP_LOCAL */
static inline void *kmap_atomic(struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
extern atomic_long_t _totalhigh_pages;
......@@ -212,41 +190,8 @@ static inline void __kunmap_atomic(void *addr)
#define kmap_flush_unused() do {} while(0)
#endif /* CONFIG_HIGHMEM */
#if !defined(CONFIG_KMAP_LOCAL)
#if defined(CONFIG_HIGHMEM)
DECLARE_PER_CPU(int, __kmap_atomic_idx);
static inline int kmap_atomic_idx_push(void)
{
int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(in_irq() && !irqs_disabled());
BUG_ON(idx >= KM_TYPE_NR);
#endif
return idx;
}
static inline int kmap_atomic_idx(void)
{
return __this_cpu_read(__kmap_atomic_idx) - 1;
}
static inline void kmap_atomic_idx_pop(void)
{
#ifdef CONFIG_DEBUG_HIGHMEM
int idx = __this_cpu_dec_return(__kmap_atomic_idx);
BUG_ON(idx < 0);
#else
__this_cpu_dec(__kmap_atomic_idx);
#endif
}
#endif
#endif
#endif /* CONFIG_HIGHMEM */
/*
* Prevent people trying to call kunmap_atomic() as if it were kunmap()
......
......@@ -31,12 +31,6 @@
#include <asm/tlbflush.h>
#include <linux/vmalloc.h>
#ifndef CONFIG_KMAP_LOCAL
#ifdef CONFIG_HIGHMEM
DEFINE_PER_CPU(int, __kmap_atomic_idx);
#endif
#endif
/*
* Virtual_count is not a pure "count".
* 0 means that it is not mapped, and has not been mapped
......@@ -410,6 +404,7 @@ static inline void kmap_local_idx_pop(void)
#ifndef arch_kmap_local_post_map
# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
#endif
#ifndef arch_kmap_local_pre_unmap
# define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment