Commit 1eb0616c authored by Thomas Gleixner's avatar Thomas Gleixner

xtensa/mm/highmem: Make generic kmap_atomic() work correctly

The conversion to the generic kmap_atomic() implementation missed the fact
that xtensa's fixmap works bottom up while all other implementations work
top down. There is no real reason why xtensa needs to work that way.

Cure it by:

  - Using the generic fix_to_virt()/virt_to_fix() functions which work top
    down
  - Adjusting the mapping defines
  - Using the generic index calculation for the non cache aliasing case
  - Making the cache colour offset reverse so the effective index is correct

While at it, remove the outdated and misleading comment above the fixmap
enum which originates from the initial copy&pasta of this code from i386.

[ Max: Fixed the off by one in the index calculation ]

Fixes: 629ed3f7 ("xtensa/mm/highmem: Switch to generic kmap atomic")
Reported-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
Link: https://lore.kernel.org/r/20201116193253.23875-1-jcmvbkbc@gmail.com
parent 2a656cad
...@@ -17,63 +17,22 @@ ...@@ -17,63 +17,22 @@
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/kmap_size.h> #include <asm/kmap_size.h>
#endif
/* /* The map slots for temporary mappings via kmap_atomic/local(). */
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the start of the consistent memory region upwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* higher than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*/
enum fixed_addresses { enum fixed_addresses {
#ifdef CONFIG_HIGHMEM
/* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN, FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN + FIX_KMAP_END = FIX_KMAP_BEGIN +
(KM_MAX_IDX * NR_CPUS * DCACHE_N_COLORS) - 1, (KM_MAX_IDX * NR_CPUS * DCACHE_N_COLORS) - 1,
#endif
__end_of_fixed_addresses __end_of_fixed_addresses
}; };
#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE) #define FIXADDR_END (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) /* Enforce that FIXADDR_START is PMD aligned to handle cache aliasing */
#define FIXADDR_START ((FIXADDR_END - FIXADDR_SIZE) & PMD_MASK)
#define FIXADDR_TOP (FIXADDR_START + FIXADDR_SIZE - PAGE_SIZE)
#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) #include <asm-generic/fixmap.h>
#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
#ifndef __ASSEMBLY__
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without translation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
/* Check if this memory layout is broken because fixmap overlaps page
* table.
*/
BUILD_BUG_ON(FIXADDR_START <
TLBTEMP_BASE_1 + TLBTEMP_SIZE);
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#endif
#endif /* CONFIG_HIGHMEM */
#endif #endif
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#ifndef _XTENSA_HIGHMEM_H #ifndef _XTENSA_HIGHMEM_H
#define _XTENSA_HIGHMEM_H #define _XTENSA_HIGHMEM_H
#ifdef CONFIG_HIGHMEM
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -58,6 +59,13 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) ...@@ -58,6 +59,13 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
{ {
return pkmap_map_wait_arr + color; return pkmap_map_wait_arr + color;
} }
enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
#define arch_kmap_local_map_idx kmap_local_map_idx
enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr);
#define arch_kmap_local_unmap_idx kmap_local_unmap_idx
#endif #endif
extern pte_t *pkmap_page_table; extern pte_t *pkmap_page_table;
...@@ -67,15 +75,10 @@ static inline void flush_cache_kmaps(void) ...@@ -67,15 +75,10 @@ static inline void flush_cache_kmaps(void)
flush_cache_all(); flush_cache_all();
} }
enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
#define arch_kmap_local_map_idx kmap_local_map_idx
enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr);
#define arch_kmap_local_unmap_idx kmap_local_unmap_idx
#define arch_kmap_local_post_unmap(vaddr) \ #define arch_kmap_local_post_unmap(vaddr) \
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE) local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
void kmap_init(void); void kmap_init(void);
#endif /* CONFIG_HIGHMEM */
#endif #endif
...@@ -23,16 +23,16 @@ static void __init kmap_waitqueues_init(void) ...@@ -23,16 +23,16 @@ static void __init kmap_waitqueues_init(void)
for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i) for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i)
init_waitqueue_head(pkmap_map_wait_arr + i); init_waitqueue_head(pkmap_map_wait_arr + i);
} }
#else
static inline void kmap_waitqueues_init(void)
{
}
#endif
static inline enum fixed_addresses kmap_idx(int type, unsigned long color) static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
{ {
return (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS + int idx = (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS;
color;
/*
* The fixmap operates top down, so the color offset needs to be
* reverse as well.
*/
return idx + DCACHE_N_COLORS - 1 - color;
} }
enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn) enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn)
...@@ -45,6 +45,10 @@ enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr) ...@@ -45,6 +45,10 @@ enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr)
return kmap_idx(type, DCACHE_ALIAS(addr)); return kmap_idx(type, DCACHE_ALIAS(addr));
} }
#else
static inline void kmap_waitqueues_init(void) { }
#endif
void __init kmap_init(void) void __init kmap_init(void)
{ {
/* Check if this memory layout is broken because PKMAP overlaps /* Check if this memory layout is broken because PKMAP overlaps
......
...@@ -147,8 +147,8 @@ void __init mem_init(void) ...@@ -147,8 +147,8 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10, (LAST_PKMAP*PAGE_SIZE) >> 10,
FIXADDR_START, FIXADDR_TOP, FIXADDR_START, FIXADDR_END,
(FIXADDR_TOP - FIXADDR_START) >> 10, (FIXADDR_END - FIXADDR_START) >> 10,
#endif #endif
PAGE_OFFSET, PAGE_OFFSET + PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE, (max_low_pfn - min_low_pfn) * PAGE_SIZE,
......
...@@ -52,7 +52,8 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) ...@@ -52,7 +52,8 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
static void __init fixedrange_init(void) static void __init fixedrange_init(void)
{ {
init_pmd(__fix_to_virt(0), __end_of_fixed_addresses); BUILD_BUG_ON(FIXADDR_START < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
init_pmd(FIXADDR_START, __end_of_fixed_addresses);
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment