Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
7ee175f7
Commit
7ee175f7
authored
Aug 29, 2005
by
Tony Luck
Browse files
Options
Browse Files
Download
Plain Diff
Pull mm-context-fix into release branch
parents
dcf82962
badea125
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
37 additions
and
25 deletions
+37
-25
include/asm-ia64/mmu.h
include/asm-ia64/mmu.h
+5
-3
include/asm-ia64/mmu_context.h
include/asm-ia64/mmu_context.h
+32
-22
No files found.
include/asm-ia64/mmu.h
View file @
7ee175f7
...
@@ -2,10 +2,12 @@
...
@@ -2,10 +2,12 @@
#define __MMU_H
#define __MMU_H
/*
/*
* Type for a context number. We declare it volatile to ensure proper
ordering when it's
* Type for a context number. We declare it volatile to ensure proper
*
accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and
*
ordering when it's accessed outside of spinlock'd critical sections
* init_new_context()).
*
(e.g., as done in activate_mm() and
init_new_context()).
*/
*/
typedef
volatile
unsigned
long
mm_context_t
;
typedef
volatile
unsigned
long
mm_context_t
;
typedef
unsigned
long
nv_mm_context_t
;
#endif
#endif
include/asm-ia64/mmu_context.h
View file @
7ee175f7
...
@@ -55,34 +55,46 @@ static inline void
...
@@ -55,34 +55,46 @@ static inline void
delayed_tlb_flush
(
void
)
delayed_tlb_flush
(
void
)
{
{
extern
void
local_flush_tlb_all
(
void
);
extern
void
local_flush_tlb_all
(
void
);
unsigned
long
flags
;
if
(
unlikely
(
__ia64_per_cpu_var
(
ia64_need_tlb_flush
)))
{
if
(
unlikely
(
__ia64_per_cpu_var
(
ia64_need_tlb_flush
)))
{
local_flush_tlb_all
();
spin_lock_irqsave
(
&
ia64_ctx
.
lock
,
flags
);
__ia64_per_cpu_var
(
ia64_need_tlb_flush
)
=
0
;
{
if
(
__ia64_per_cpu_var
(
ia64_need_tlb_flush
))
{
local_flush_tlb_all
();
__ia64_per_cpu_var
(
ia64_need_tlb_flush
)
=
0
;
}
}
spin_unlock_irqrestore
(
&
ia64_ctx
.
lock
,
flags
);
}
}
}
}
static
inline
mm_context_t
static
inline
nv_
mm_context_t
get_mmu_context
(
struct
mm_struct
*
mm
)
get_mmu_context
(
struct
mm_struct
*
mm
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
mm_context_t
context
=
mm
->
context
;
nv_mm_context_t
context
=
mm
->
context
;
if
(
context
)
if
(
unlikely
(
!
context
))
{
return
context
;
spin_lock_irqsave
(
&
ia64_ctx
.
lock
,
flags
);
{
spin_lock_irqsave
(
&
ia64_ctx
.
lock
,
flags
);
/* re-check, now that we've got the lock: */
{
context
=
mm
->
context
;
/* re-check, now that we've got the lock: */
if
(
context
==
0
)
{
context
=
mm
->
context
;
cpus_clear
(
mm
->
cpu_vm_mask
);
if
(
context
==
0
)
{
if
(
ia64_ctx
.
next
>=
ia64_ctx
.
limit
)
cpus_clear
(
mm
->
cpu_vm_mask
);
wrap_mmu_context
(
mm
);
if
(
ia64_ctx
.
next
>=
ia64_ctx
.
limit
)
mm
->
context
=
context
=
ia64_ctx
.
next
++
;
wrap_mmu_context
(
mm
);
}
mm
->
context
=
context
=
ia64_ctx
.
next
++
;
}
}
spin_unlock_irqrestore
(
&
ia64_ctx
.
lock
,
flags
);
}
}
spin_unlock_irqrestore
(
&
ia64_ctx
.
lock
,
flags
);
/*
* Ensure we're not starting to use "context" before any old
* uses of it are gone from our TLB.
*/
delayed_tlb_flush
();
return
context
;
return
context
;
}
}
...
@@ -104,7 +116,7 @@ destroy_context (struct mm_struct *mm)
...
@@ -104,7 +116,7 @@ destroy_context (struct mm_struct *mm)
}
}
static
inline
void
static
inline
void
reload_context
(
mm_context_t
context
)
reload_context
(
nv_
mm_context_t
context
)
{
{
unsigned
long
rid
;
unsigned
long
rid
;
unsigned
long
rid_incr
=
0
;
unsigned
long
rid_incr
=
0
;
...
@@ -138,7 +150,7 @@ reload_context (mm_context_t context)
...
@@ -138,7 +150,7 @@ reload_context (mm_context_t context)
static
inline
void
static
inline
void
activate_context
(
struct
mm_struct
*
mm
)
activate_context
(
struct
mm_struct
*
mm
)
{
{
mm_context_t
context
;
nv_
mm_context_t
context
;
do
{
do
{
context
=
get_mmu_context
(
mm
);
context
=
get_mmu_context
(
mm
);
...
@@ -157,8 +169,6 @@ activate_context (struct mm_struct *mm)
...
@@ -157,8 +169,6 @@ activate_context (struct mm_struct *mm)
static
inline
void
static
inline
void
activate_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
)
activate_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
)
{
{
delayed_tlb_flush
();
/*
/*
* We may get interrupts here, but that's OK because interrupt handlers cannot
* We may get interrupts here, but that's OK because interrupt handlers cannot
* touch user-space.
* touch user-space.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment