Commit 51386120 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Linus Torvalds

highmem: don't disable preemption on RT in kmap_atomic()

kmap_atomic() disables preemption and pagefaults for historical reasons.
The conversion to kmap_local(), which only disables migration, cannot be
done wholesale because quite some call sites need to be updated to
accommodate with the changed semantics.

On PREEMPT_RT enabled kernels the kmap_atomic() semantics are problematic
due to the implicit disabling of preemption which makes it impossible to
acquire 'sleeping' spinlocks within the kmap atomic sections.

PREEMPT_RT replaces the preempt_disable() with a migrate_disable() for
more than a decade.  It could be argued that this is a justification to do
this unconditionally, but PREEMPT_RT covers only a limited number of
architectures and it disables some functionality which limits the coverage
further.

Limit the replacement to PREEMPT_RT for now.

Link: https://lkml.kernel.org/r/20210810091116.pocdmaatdcogvdso@linutronix.deSigned-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 395519b4
...@@ -90,7 +90,11 @@ static inline void __kunmap_local(void *vaddr) ...@@ -90,7 +90,11 @@ static inline void __kunmap_local(void *vaddr)
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{ {
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
else
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
return __kmap_local_page_prot(page, prot); return __kmap_local_page_prot(page, prot);
} }
...@@ -102,7 +106,11 @@ static inline void *kmap_atomic(struct page *page) ...@@ -102,7 +106,11 @@ static inline void *kmap_atomic(struct page *page)
static inline void *kmap_atomic_pfn(unsigned long pfn) static inline void *kmap_atomic_pfn(unsigned long pfn)
{ {
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
else
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
return __kmap_local_pfn_prot(pfn, kmap_prot); return __kmap_local_pfn_prot(pfn, kmap_prot);
} }
...@@ -111,6 +119,9 @@ static inline void __kunmap_atomic(void *addr) ...@@ -111,6 +119,9 @@ static inline void __kunmap_atomic(void *addr)
{ {
kunmap_local_indexed(addr); kunmap_local_indexed(addr);
pagefault_enable(); pagefault_enable();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_enable();
else
preempt_enable(); preempt_enable();
} }
...@@ -179,6 +190,9 @@ static inline void __kunmap_local(void *addr) ...@@ -179,6 +190,9 @@ static inline void __kunmap_local(void *addr)
static inline void *kmap_atomic(struct page *page) static inline void *kmap_atomic(struct page *page)
{ {
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
else
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
return page_address(page); return page_address(page);
...@@ -200,6 +214,9 @@ static inline void __kunmap_atomic(void *addr) ...@@ -200,6 +214,9 @@ static inline void __kunmap_atomic(void *addr)
kunmap_flush_on_unmap(addr); kunmap_flush_on_unmap(addr);
#endif #endif
pagefault_enable(); pagefault_enable();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_enable();
else
preempt_enable(); preempt_enable();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment