Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
0b30191b
Commit
0b30191b
authored
Sep 18, 2020
by
Michael Ellerman
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/irqs-off-activate-mm' into next
Merge Nick's series to add ARCH_WANT_IRQS_OFF_ACTIVATE_MM.
parents
d208e13c
a665eec0
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
54 additions
and
74 deletions
+54
-74
arch/Kconfig
arch/Kconfig
+7
-0
arch/powerpc/Kconfig
arch/powerpc/Kconfig
+1
-0
arch/powerpc/include/asm/mmu_context.h
arch/powerpc/include/asm/mmu_context.h
+1
-1
arch/powerpc/include/asm/tlb.h
arch/powerpc/include/asm/tlb.h
+0
-13
arch/powerpc/mm/book3s64/radix_tlb.c
arch/powerpc/mm/book3s64/radix_tlb.c
+16
-7
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/smp_64.c
+14
-51
fs/exec.c
fs/exec.c
+15
-2
No files found.
arch/Kconfig
View file @
0b30191b
...
@@ -414,6 +414,13 @@ config MMU_GATHER_NO_GATHER
...
@@ -414,6 +414,13 @@ config MMU_GATHER_NO_GATHER
bool
bool
depends on MMU_GATHER_TABLE_FREE
depends on MMU_GATHER_TABLE_FREE
config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
bool
help
Temporary select until all architectures can be converted to have
irqs disabled over activate_mm. Architectures that do IPI based TLB
shootdowns should enable this.
config ARCH_HAVE_NMI_SAFE_CMPXCHG
config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool
bool
...
...
arch/powerpc/Kconfig
View file @
0b30191b
...
@@ -151,6 +151,7 @@ config PPC
...
@@ -151,6 +151,7 @@ config PPC
select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS
select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS
select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS
select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WEAK_RELEASE_ACQUIRE
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BINFMT_ELF
select BUILDTIME_TABLE_SORT
select BUILDTIME_TABLE_SORT
...
...
arch/powerpc/include/asm/mmu_context.h
View file @
0b30191b
...
@@ -244,7 +244,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
...
@@ -244,7 +244,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
*/
static
inline
void
activate_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
)
static
inline
void
activate_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
)
{
{
switch_mm
(
prev
,
next
,
current
);
switch_mm
_irqs_off
(
prev
,
next
,
current
);
}
}
/* We don't currently use enter_lazy_tlb() for anything */
/* We don't currently use enter_lazy_tlb() for anything */
...
...
arch/powerpc/include/asm/tlb.h
View file @
0b30191b
...
@@ -66,19 +66,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
...
@@ -66,19 +66,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
return
false
;
return
false
;
return
cpumask_test_cpu
(
smp_processor_id
(),
mm_cpumask
(
mm
));
return
cpumask_test_cpu
(
smp_processor_id
(),
mm_cpumask
(
mm
));
}
}
static
inline
void
mm_reset_thread_local
(
struct
mm_struct
*
mm
)
{
WARN_ON
(
atomic_read
(
&
mm
->
context
.
copros
)
>
0
);
/*
* It's possible for mm_access to take a reference on mm_users to
* access the remote mm from another thread, but it's not allowed
* to set mm_cpumask, so mm_users may be > 1 here.
*/
WARN_ON
(
current
->
mm
!=
mm
);
atomic_set
(
&
mm
->
context
.
active_cpus
,
1
);
cpumask_clear
(
mm_cpumask
(
mm
));
cpumask_set_cpu
(
smp_processor_id
(),
mm_cpumask
(
mm
));
}
#else
/* CONFIG_PPC_BOOK3S_64 */
#else
/* CONFIG_PPC_BOOK3S_64 */
static
inline
int
mm_is_thread_local
(
struct
mm_struct
*
mm
)
static
inline
int
mm_is_thread_local
(
struct
mm_struct
*
mm
)
{
{
...
...
arch/powerpc/mm/book3s64/radix_tlb.c
View file @
0b30191b
...
@@ -645,19 +645,29 @@ static void do_exit_flush_lazy_tlb(void *arg)
...
@@ -645,19 +645,29 @@ static void do_exit_flush_lazy_tlb(void *arg)
struct
mm_struct
*
mm
=
arg
;
struct
mm_struct
*
mm
=
arg
;
unsigned
long
pid
=
mm
->
context
.
id
;
unsigned
long
pid
=
mm
->
context
.
id
;
/*
* A kthread could have done a mmget_not_zero() after the flushing CPU
* checked mm_is_singlethreaded, and be in the process of
* kthread_use_mm when interrupted here. In that case, current->mm will
* be set to mm, because kthread_use_mm() setting ->mm and switching to
* the mm is done with interrupts off.
*/
if
(
current
->
mm
==
mm
)
if
(
current
->
mm
==
mm
)
return
;
/* Local CPU */
goto
out_flush
;
if
(
current
->
active_mm
==
mm
)
{
if
(
current
->
active_mm
==
mm
)
{
/*
WARN_ON_ONCE
(
current
->
mm
!=
NULL
);
* Must be a kernel thread because sender is single-threaded.
/* Is a kernel thread and is using mm as the lazy tlb */
*/
BUG_ON
(
current
->
mm
);
mmgrab
(
&
init_mm
);
mmgrab
(
&
init_mm
);
switch_mm
(
mm
,
&
init_mm
,
current
);
current
->
active_mm
=
&
init_mm
;
current
->
active_mm
=
&
init_mm
;
switch_mm_irqs_off
(
mm
,
&
init_mm
,
current
);
mmdrop
(
mm
);
mmdrop
(
mm
);
}
}
atomic_dec
(
&
mm
->
context
.
active_cpus
);
cpumask_clear_cpu
(
smp_processor_id
(),
mm_cpumask
(
mm
));
out_flush:
_tlbiel_pid
(
pid
,
RIC_FLUSH_ALL
);
_tlbiel_pid
(
pid
,
RIC_FLUSH_ALL
);
}
}
...
@@ -672,7 +682,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
...
@@ -672,7 +682,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
*/
*/
smp_call_function_many
(
mm_cpumask
(
mm
),
do_exit_flush_lazy_tlb
,
smp_call_function_many
(
mm_cpumask
(
mm
),
do_exit_flush_lazy_tlb
,
(
void
*
)
mm
,
1
);
(
void
*
)
mm
,
1
);
mm_reset_thread_local
(
mm
);
}
}
void
radix__flush_tlb_mm
(
struct
mm_struct
*
mm
)
void
radix__flush_tlb_mm
(
struct
mm_struct
*
mm
)
...
...
arch/sparc/kernel/smp_64.c
View file @
0b30191b
...
@@ -1039,38 +1039,9 @@ void smp_fetch_global_pmu(void)
...
@@ -1039,38 +1039,9 @@ void smp_fetch_global_pmu(void)
* are flush_tlb_*() routines, and these run after flush_cache_*()
* are flush_tlb_*() routines, and these run after flush_cache_*()
* which performs the flushw.
* which performs the flushw.
*
*
* The SMP TLB coherency scheme we use works as follows:
* mm->cpu_vm_mask is a bit mask of which cpus an address
*
* space has (potentially) executed on, this is the heuristic
* 1) mm->cpu_vm_mask is a bit mask of which cpus an address
* we use to limit cross calls.
* space has (potentially) executed on, this is the heuristic
* we use to avoid doing cross calls.
*
* Also, for flushing from kswapd and also for clones, we
* use cpu_vm_mask as the list of cpus to make run the TLB.
*
* 2) TLB context numbers are shared globally across all processors
* in the system, this allows us to play several games to avoid
* cross calls.
*
* One invariant is that when a cpu switches to a process, and
* that processes tsk->active_mm->cpu_vm_mask does not have the
* current cpu's bit set, that tlb context is flushed locally.
*
* If the address space is non-shared (ie. mm->count == 1) we avoid
* cross calls when we want to flush the currently running process's
* tlb state. This is done by clearing all cpu bits except the current
* processor's in current->mm->cpu_vm_mask and performing the
* flush locally only. This will force any subsequent cpus which run
* this task to flush the context from the local tlb if the process
* migrates to another cpu (again).
*
* 3) For shared address spaces (threads) and swapping we bite the
* bullet for most cases and perform the cross call (but only to
* the cpus listed in cpu_vm_mask).
*
* The performance gain from "optimizing" away the cross call for threads is
* questionable (in theory the big win for threads is the massive sharing of
* address space state across processors).
*/
*/
/* This currently is only used by the hugetlb arch pre-fault
/* This currently is only used by the hugetlb arch pre-fault
...
@@ -1080,18 +1051,13 @@ void smp_fetch_global_pmu(void)
...
@@ -1080,18 +1051,13 @@ void smp_fetch_global_pmu(void)
void
smp_flush_tlb_mm
(
struct
mm_struct
*
mm
)
void
smp_flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
{
u32
ctx
=
CTX_HWBITS
(
mm
->
context
);
u32
ctx
=
CTX_HWBITS
(
mm
->
context
);
int
cpu
=
get_cpu
();
if
(
atomic_read
(
&
mm
->
mm_users
)
==
1
)
{
get_cpu
();
cpumask_copy
(
mm_cpumask
(
mm
),
cpumask_of
(
cpu
));
goto
local_flush_and_out
;
}
smp_cross_call_masked
(
&
xcall_flush_tlb_mm
,
smp_cross_call_masked
(
&
xcall_flush_tlb_mm
,
ctx
,
0
,
0
,
ctx
,
0
,
0
,
mm_cpumask
(
mm
));
mm_cpumask
(
mm
));
local_flush_and_out:
__flush_tlb_mm
(
ctx
,
SECONDARY_CONTEXT
);
__flush_tlb_mm
(
ctx
,
SECONDARY_CONTEXT
);
put_cpu
();
put_cpu
();
...
@@ -1114,17 +1080,15 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
...
@@ -1114,17 +1080,15 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
{
{
u32
ctx
=
CTX_HWBITS
(
mm
->
context
);
u32
ctx
=
CTX_HWBITS
(
mm
->
context
);
struct
tlb_pending_info
info
;
struct
tlb_pending_info
info
;
int
cpu
=
get_cpu
();
get_cpu
();
info
.
ctx
=
ctx
;
info
.
ctx
=
ctx
;
info
.
nr
=
nr
;
info
.
nr
=
nr
;
info
.
vaddrs
=
vaddrs
;
info
.
vaddrs
=
vaddrs
;
if
(
mm
==
current
->
mm
&&
atomic_read
(
&
mm
->
mm_users
)
==
1
)
smp_call_function_many
(
mm_cpumask
(
mm
),
tlb_pending_func
,
cpumask_copy
(
mm_cpumask
(
mm
),
cpumask_of
(
cpu
));
&
info
,
1
);
else
smp_call_function_many
(
mm_cpumask
(
mm
),
tlb_pending_func
,
&
info
,
1
);
__flush_tlb_pending
(
ctx
,
nr
,
vaddrs
);
__flush_tlb_pending
(
ctx
,
nr
,
vaddrs
);
...
@@ -1134,14 +1098,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
...
@@ -1134,14 +1098,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
void
smp_flush_tlb_page
(
struct
mm_struct
*
mm
,
unsigned
long
vaddr
)
void
smp_flush_tlb_page
(
struct
mm_struct
*
mm
,
unsigned
long
vaddr
)
{
{
unsigned
long
context
=
CTX_HWBITS
(
mm
->
context
);
unsigned
long
context
=
CTX_HWBITS
(
mm
->
context
);
int
cpu
=
get_cpu
();
if
(
mm
==
current
->
mm
&&
atomic_read
(
&
mm
->
mm_users
)
==
1
)
get_cpu
();
cpumask_copy
(
mm_cpumask
(
mm
),
cpumask_of
(
cpu
));
else
smp_cross_call_masked
(
&
xcall_flush_tlb_page
,
smp_cross_call_masked
(
&
xcall_flush_tlb_page
,
context
,
vaddr
,
0
,
context
,
vaddr
,
0
,
mm_cpumask
(
mm
));
mm_cpumask
(
mm
));
__flush_tlb_page
(
context
,
vaddr
);
__flush_tlb_page
(
context
,
vaddr
);
put_cpu
();
put_cpu
();
...
...
fs/exec.c
View file @
0b30191b
...
@@ -1130,11 +1130,24 @@ static int exec_mmap(struct mm_struct *mm)
...
@@ -1130,11 +1130,24 @@ static int exec_mmap(struct mm_struct *mm)
}
}
task_lock
(
tsk
);
task_lock
(
tsk
);
active_mm
=
tsk
->
active_mm
;
membarrier_exec_mmap
(
mm
);
membarrier_exec_mmap
(
mm
);
tsk
->
mm
=
mm
;
local_irq_disable
();
active_mm
=
tsk
->
active_mm
;
tsk
->
active_mm
=
mm
;
tsk
->
active_mm
=
mm
;
tsk
->
mm
=
mm
;
/*
* This prevents preemption while active_mm is being loaded and
* it and mm are being updated, which could cause problems for
* lazy tlb mm refcounting when these are updated by context
* switches. Not all architectures can handle irqs off over
* activate_mm yet.
*/
if
(
!
IS_ENABLED
(
CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM
))
local_irq_enable
();
activate_mm
(
active_mm
,
mm
);
activate_mm
(
active_mm
,
mm
);
if
(
IS_ENABLED
(
CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM
))
local_irq_enable
();
tsk
->
mm
->
vmacache_seqnum
=
0
;
tsk
->
mm
->
vmacache_seqnum
=
0
;
vmacache_flush
(
tsk
);
vmacache_flush
(
tsk
);
task_unlock
(
tsk
);
task_unlock
(
tsk
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment