Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
e1dccf46
Commit
e1dccf46
authored
Jun 10, 2002
by
David S. Miller
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Get Sparc64 building again, both UP and SMP.
parent
4d7c1a20
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
36 additions
and
26 deletions
+36
-26
arch/sparc64/kernel/dtlb_backend.S
arch/sparc64/kernel/dtlb_backend.S
+1
-0
arch/sparc64/kernel/dtlb_base.S
arch/sparc64/kernel/dtlb_base.S
+3
-0
arch/sparc64/kernel/setup.c
arch/sparc64/kernel/setup.c
+1
-0
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/smp.c
+1
-0
arch/sparc64/mm/init.c
arch/sparc64/mm/init.c
+6
-7
include/asm-sparc64/mmu_context.h
include/asm-sparc64/mmu_context.h
+2
-0
include/asm-sparc64/pgalloc.h
include/asm-sparc64/pgalloc.h
+8
-8
include/asm-sparc64/pgtable.h
include/asm-sparc64/pgtable.h
+3
-1
include/asm-sparc64/tlbflush.h
include/asm-sparc64/tlbflush.h
+11
-10
No files found.
arch/sparc64/kernel/dtlb_backend.S
View file @
e1dccf46
...
@@ -7,6 +7,7 @@
...
@@ -7,6 +7,7 @@
*/
*/
#include <asm/pgtable.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#if PAGE_SHIFT == 13
#if PAGE_SHIFT == 13
#define FILL_VALID_SZ_BITS1(r1) \
#define FILL_VALID_SZ_BITS1(r1) \
...
...
arch/sparc64/kernel/dtlb_base.S
View file @
e1dccf46
...
@@ -6,6 +6,9 @@
...
@@ -6,6 +6,9 @@
*
Copyright
(
C
)
1997
,
1998
Jakub
Jelinek
(
jj
@
ultra
.
linux
.
cz
)
*
Copyright
(
C
)
1997
,
1998
Jakub
Jelinek
(
jj
@
ultra
.
linux
.
cz
)
*/
*/
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
/*
%
g1
TLB_SFSR
(%
g1
+
%
g1
==
TLB_TAG_ACCESS
)
/*
%
g1
TLB_SFSR
(%
g1
+
%
g1
==
TLB_TAG_ACCESS
)
*
%
g2
(
KERN_HIGHBITS
|
KERN_LOWBITS
)
*
%
g2
(
KERN_HIGHBITS
|
KERN_LOWBITS
)
*
%
g3
VPTE
base
(
0xfffffffe00000000
)
Spitfire
/
Blackbird
(
44
-
bit
VA
space
)
*
%
g3
VPTE
base
(
0xfffffffe00000000
)
Spitfire
/
Blackbird
(
44
-
bit
VA
space
)
...
...
arch/sparc64/kernel/setup.c
View file @
e1dccf46
...
@@ -40,6 +40,7 @@
...
@@ -40,6 +40,7 @@
#include <asm/head.h>
#include <asm/head.h>
#include <asm/starfire.h>
#include <asm/starfire.h>
#include <asm/hardirq.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#ifdef CONFIG_IP_PNP
#ifdef CONFIG_IP_PNP
#include <net/ipconfig.h>
#include <net/ipconfig.h>
...
...
arch/sparc64/kernel/smp.c
View file @
e1dccf46
...
@@ -23,6 +23,7 @@
...
@@ -23,6 +23,7 @@
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/atomic.h>
#include <asm/atomic.h>
#include <asm/tlbflush.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/irq.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/page.h>
...
...
arch/sparc64/mm/init.c
View file @
e1dccf46
...
@@ -30,6 +30,7 @@
...
@@ -30,6 +30,7 @@
#include <asm/io.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/dma.h>
#include <asm/dma.h>
#include <asm/starfire.h>
#include <asm/starfire.h>
#include <asm/tlb.h>
#include <asm/tlb.h>
...
@@ -89,17 +90,15 @@ void check_pgt_cache(void)
...
@@ -89,17 +90,15 @@ void check_pgt_cache(void)
if
(
pgd_cache_size
>
PGT_CACHE_HIGH
/
4
)
{
if
(
pgd_cache_size
>
PGT_CACHE_HIGH
/
4
)
{
struct
page
*
page
,
*
page2
;
struct
page
*
page
,
*
page2
;
for
(
page2
=
NULL
,
page
=
(
struct
page
*
)
pgd_quicklist
;
page
;)
{
for
(
page2
=
NULL
,
page
=
(
struct
page
*
)
pgd_quicklist
;
page
;)
{
if
((
unsigned
long
)
page
->
pprev_hash
==
3
)
{
if
((
unsigned
long
)
page
->
lru
.
prev
==
3
)
{
if
(
page2
)
if
(
page2
)
page2
->
next_hash
=
page
->
next_hash
;
page2
->
lru
.
next
=
page
->
lru
.
next
;
else
else
(
struct
page
*
)
pgd_quicklist
=
page
->
next_hash
;
(
struct
page
*
)
pgd_quicklist
=
page
->
lru
.
next
;
page
->
next_hash
=
NULL
;
page
->
pprev_hash
=
NULL
;
pgd_cache_size
-=
2
;
pgd_cache_size
-=
2
;
__free_page
(
page
);
__free_page
(
page
);
if
(
page2
)
if
(
page2
)
page
=
page2
->
next_hash
;
page
=
(
struct
page
*
)
page2
->
lru
.
next
;
else
else
page
=
(
struct
page
*
)
pgd_quicklist
;
page
=
(
struct
page
*
)
pgd_quicklist
;
if
(
pgd_cache_size
<=
PGT_CACHE_LOW
/
4
)
if
(
pgd_cache_size
<=
PGT_CACHE_LOW
/
4
)
...
@@ -107,7 +106,7 @@ void check_pgt_cache(void)
...
@@ -107,7 +106,7 @@ void check_pgt_cache(void)
continue
;
continue
;
}
}
page2
=
page
;
page2
=
page
;
page
=
page
->
next_hash
;
page
=
(
struct
page
*
)
page
->
lru
.
next
;
}
}
}
}
#endif
#endif
...
...
include/asm-sparc64/mmu_context.h
View file @
e1dccf46
...
@@ -141,6 +141,8 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
...
@@ -141,6 +141,8 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
spin_unlock
(
&
mm
->
page_table_lock
);
spin_unlock
(
&
mm
->
page_table_lock
);
}
}
extern
void
__flush_tlb_mm
(
unsigned
long
,
unsigned
long
);
/* Activate a new MM instance for the current task. */
/* Activate a new MM instance for the current task. */
static
inline
void
activate_mm
(
struct
mm_struct
*
active_mm
,
struct
mm_struct
*
mm
)
static
inline
void
activate_mm
(
struct
mm_struct
*
active_mm
,
struct
mm_struct
*
mm
)
{
{
...
...
include/asm-sparc64/pgalloc.h
View file @
e1dccf46
...
@@ -35,11 +35,11 @@ static __inline__ void free_pgd_fast(pgd_t *pgd)
...
@@ -35,11 +35,11 @@ static __inline__ void free_pgd_fast(pgd_t *pgd)
struct
page
*
page
=
virt_to_page
(
pgd
);
struct
page
*
page
=
virt_to_page
(
pgd
);
preempt_disable
();
preempt_disable
();
if
(
!
page
->
pprev_hash
)
{
if
(
!
page
->
lru
.
prev
)
{
(
unsigned
long
*
)
page
->
next_hash
=
pgd_quicklist
;
(
unsigned
long
*
)
page
->
lru
.
next
=
pgd_quicklist
;
pgd_quicklist
=
(
unsigned
long
*
)
page
;
pgd_quicklist
=
(
unsigned
long
*
)
page
;
}
}
(
unsigned
long
)
page
->
pprev_hash
|=
(
unsigned
long
)
page
->
lru
.
prev
|=
(((
unsigned
long
)
pgd
&
(
PAGE_SIZE
/
2
))
?
2
:
1
);
(((
unsigned
long
)
pgd
&
(
PAGE_SIZE
/
2
))
?
2
:
1
);
pgd_cache_size
++
;
pgd_cache_size
++
;
preempt_enable
();
preempt_enable
();
...
@@ -51,7 +51,7 @@ static __inline__ pgd_t *get_pgd_fast(void)
...
@@ -51,7 +51,7 @@ static __inline__ pgd_t *get_pgd_fast(void)
preempt_disable
();
preempt_disable
();
if
((
ret
=
(
struct
page
*
)
pgd_quicklist
)
!=
NULL
)
{
if
((
ret
=
(
struct
page
*
)
pgd_quicklist
)
!=
NULL
)
{
unsigned
long
mask
=
(
unsigned
long
)
ret
->
pprev_hash
;
unsigned
long
mask
=
(
unsigned
long
)
ret
->
lru
.
prev
;
unsigned
long
off
=
0
;
unsigned
long
off
=
0
;
if
(
mask
&
1
)
if
(
mask
&
1
)
...
@@ -60,9 +60,9 @@ static __inline__ pgd_t *get_pgd_fast(void)
...
@@ -60,9 +60,9 @@ static __inline__ pgd_t *get_pgd_fast(void)
off
=
PAGE_SIZE
/
2
;
off
=
PAGE_SIZE
/
2
;
mask
&=
~
2
;
mask
&=
~
2
;
}
}
(
unsigned
long
)
ret
->
pprev_hash
=
mask
;
(
unsigned
long
)
ret
->
lru
.
prev
=
mask
;
if
(
!
mask
)
if
(
!
mask
)
pgd_quicklist
=
(
unsigned
long
*
)
ret
->
next_hash
;
pgd_quicklist
=
(
unsigned
long
*
)
ret
->
lru
.
next
;
ret
=
(
struct
page
*
)(
__page_address
(
ret
)
+
off
);
ret
=
(
struct
page
*
)(
__page_address
(
ret
)
+
off
);
pgd_cache_size
--
;
pgd_cache_size
--
;
preempt_enable
();
preempt_enable
();
...
@@ -74,10 +74,10 @@ static __inline__ pgd_t *get_pgd_fast(void)
...
@@ -74,10 +74,10 @@ static __inline__ pgd_t *get_pgd_fast(void)
if
(
page
)
{
if
(
page
)
{
ret
=
(
struct
page
*
)
page_address
(
page
);
ret
=
(
struct
page
*
)
page_address
(
page
);
clear_page
(
ret
);
clear_page
(
ret
);
(
unsigned
long
)
page
->
pprev_hash
=
2
;
(
unsigned
long
)
page
->
lru
.
prev
=
2
;
preempt_disable
();
preempt_disable
();
(
unsigned
long
*
)
page
->
next_hash
=
pgd_quicklist
;
(
unsigned
long
*
)
page
->
lru
.
next
=
pgd_quicklist
;
pgd_quicklist
=
(
unsigned
long
*
)
page
;
pgd_quicklist
=
(
unsigned
long
*
)
page
;
pgd_cache_size
++
;
pgd_cache_size
++
;
preempt_enable
();
preempt_enable
();
...
...
include/asm-sparc64/pgtable.h
View file @
e1dccf46
...
@@ -14,7 +14,6 @@
...
@@ -14,7 +14,6 @@
#include <asm/spitfire.h>
#include <asm/spitfire.h>
#include <asm/asi.h>
#include <asm/asi.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/processor.h>
...
@@ -63,6 +62,8 @@
...
@@ -63,6 +62,8 @@
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
#include <linux/sched.h>
/* Certain architectures need to do special things when pte's
/* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
* within a page table are directly modified. Thus, the following
* hook is made available.
* hook is made available.
...
@@ -286,6 +287,7 @@ extern pgd_t swapper_pg_dir[1];
...
@@ -286,6 +287,7 @@ extern pgd_t swapper_pg_dir[1];
#define mmu_lockarea(vaddr, len) (vaddr)
#define mmu_lockarea(vaddr, len) (vaddr)
#define mmu_unlockarea(vaddr, len) do { } while(0)
#define mmu_unlockarea(vaddr, len) do { } while(0)
struct
vm_area_struct
;
extern
void
update_mmu_cache
(
struct
vm_area_struct
*
,
unsigned
long
,
pte_t
);
extern
void
update_mmu_cache
(
struct
vm_area_struct
*
,
unsigned
long
,
pte_t
);
/* Make a non-present pseudo-TTE. */
/* Make a non-present pseudo-TTE. */
...
...
include/asm-sparc64/tlbflush.h
View file @
e1dccf46
...
@@ -3,6 +3,7 @@
...
@@ -3,6 +3,7 @@
#include <linux/config.h>
#include <linux/config.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <asm/mmu_context.h>
/* TLB flush operations. */
/* TLB flush operations. */
...
@@ -22,43 +23,43 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
...
@@ -22,43 +23,43 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
__flush_tlb_kernel_range(start,end)
__flush_tlb_kernel_range(start,end)
#define flush_tlb_mm(__mm) \
#define flush_tlb_mm(__mm) \
do { if(CTX_VALID((__mm)->context)) \
do { if
(CTX_VALID((__mm)->context)) \
__flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
__flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
} while(0)
} while
(0)
#define flush_tlb_range(__vma, start, end) \
#define flush_tlb_range(__vma, start, end) \
do { if(CTX_VALID((__vma)->vm_mm->context)) { \
do { if
(CTX_VALID((__vma)->vm_mm->context)) { \
unsigned long __start = (start)&PAGE_MASK; \
unsigned long __start = (start)&PAGE_MASK; \
unsigned long __end = PAGE_ALIGN(end); \
unsigned long __end = PAGE_ALIGN(end); \
__flush_tlb_range(CTX_HWBITS((__vma)->vm_mm->context), __start, \
__flush_tlb_range(CTX_HWBITS((__vma)->vm_mm->context), __start, \
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
(__end - __start)); \
(__end - __start)); \
} \
} \
} while(0)
} while
(0)
#define flush_tlb_vpte_range(__mm, start, end) \
#define flush_tlb_vpte_range(__mm, start, end) \
do { if(CTX_VALID((__mm)->context)) { \
do { if
(CTX_VALID((__mm)->context)) { \
unsigned long __start = (start)&PAGE_MASK; \
unsigned long __start = (start)&PAGE_MASK; \
unsigned long __end = PAGE_ALIGN(end); \
unsigned long __end = PAGE_ALIGN(end); \
__flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \
__flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
(__end - __start)); \
(__end - __start)); \
} \
} \
} while(0)
} while
(0)
#define flush_tlb_page(vma, page) \
#define flush_tlb_page(vma, page) \
do { struct mm_struct *__mm = (vma)->vm_mm; \
do { struct mm_struct *__mm = (vma)->vm_mm; \
if(CTX_VALID(__mm->context)) \
if
(CTX_VALID(__mm->context)) \
__flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
__flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
SECONDARY_CONTEXT); \
SECONDARY_CONTEXT); \
} while(0)
} while
(0)
#define flush_tlb_vpte_page(mm, addr) \
#define flush_tlb_vpte_page(mm, addr) \
do { struct mm_struct *__mm = (mm); \
do { struct mm_struct *__mm = (mm); \
if(CTX_VALID(__mm->context)) \
if
(CTX_VALID(__mm->context)) \
__flush_tlb_page(CTX_HWBITS(__mm->context), (addr)&PAGE_MASK, \
__flush_tlb_page(CTX_HWBITS(__mm->context), (addr)&PAGE_MASK, \
SECONDARY_CONTEXT); \
SECONDARY_CONTEXT); \
} while(0)
} while
(0)
#else
/* CONFIG_SMP */
#else
/* CONFIG_SMP */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment