Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
ef4ed97d
Commit
ef4ed97d
authored
May 13, 2010
by
Paul Mundt
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'sh/lmb'
Conflicts: arch/sh/kernel/setup.c
parents
c5eb5b37
21823259
Changes
11
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
271 additions
and
209 deletions
+271
-209
arch/sh/include/asm/io_generic.h
arch/sh/include/asm/io_generic.h
+1
-0
arch/sh/include/asm/kexec.h
arch/sh/include/asm/kexec.h
+8
-0
arch/sh/include/asm/machvec.h
arch/sh/include/asm/machvec.h
+2
-0
arch/sh/include/asm/mmzone.h
arch/sh/include/asm/mmzone.h
+2
-1
arch/sh/include/asm/page.h
arch/sh/include/asm/page.h
+1
-1
arch/sh/include/asm/setup.h
arch/sh/include/asm/setup.h
+1
-0
arch/sh/kernel/machine_kexec.c
arch/sh/kernel/machine_kexec.c
+58
-1
arch/sh/kernel/machvec.c
arch/sh/kernel/machvec.c
+1
-0
arch/sh/kernel/setup.c
arch/sh/kernel/setup.c
+26
-202
arch/sh/mm/init.c
arch/sh/mm/init.c
+169
-4
arch/sh/mm/pmb.c
arch/sh/mm/pmb.c
+2
-0
No files found.
arch/sh/include/asm/io_generic.h
View file @
ef4ed97d
...
...
@@ -38,5 +38,6 @@ void IO_CONCAT(__IO_PREFIX,iounmap)(void *addr);
void
__iomem
*
IO_CONCAT
(
__IO_PREFIX
,
ioport_map
)(
unsigned
long
addr
,
unsigned
int
size
);
void
IO_CONCAT
(
__IO_PREFIX
,
ioport_unmap
)(
void
__iomem
*
addr
);
void
IO_CONCAT
(
__IO_PREFIX
,
mem_init
)(
void
);
#undef __IO_PREFIX
arch/sh/include/asm/kexec.h
View file @
ef4ed97d
...
...
@@ -26,6 +26,10 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_SH
#ifdef CONFIG_KEXEC
/* arch/sh/kernel/machine_kexec.c */
void
reserve_crashkernel
(
void
);
static
inline
void
crash_setup_regs
(
struct
pt_regs
*
newregs
,
struct
pt_regs
*
oldregs
)
{
...
...
@@ -59,4 +63,8 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
newregs
->
pc
=
(
unsigned
long
)
current_text_addr
();
}
}
#else
static
inline
void
reserve_crashkernel
(
void
)
{
}
#endif
/* CONFIG_KEXEC */
#endif
/* __ASM_SH_KEXEC_H */
arch/sh/include/asm/machvec.h
View file @
ef4ed97d
...
...
@@ -49,6 +49,8 @@ struct sh_machine_vector {
int
(
*
mv_clk_init
)(
void
);
int
(
*
mv_mode_pins
)(
void
);
void
(
*
mv_mem_init
)(
void
);
};
extern
struct
sh_machine_vector
sh_mv
;
...
...
arch/sh/include/asm/mmzone.h
View file @
ef4ed97d
...
...
@@ -42,9 +42,10 @@ setup_bootmem_node(int nid, unsigned long start, unsigned long end)
void
__init
plat_mem_setup
(
void
);
/* arch/sh/kernel/setup.c */
void
__init
setup_bootmem_allocator
(
unsigned
long
start_pfn
);
void
__init
__add_active_range
(
unsigned
int
nid
,
unsigned
long
start_pfn
,
unsigned
long
end_pfn
);
/* arch/sh/mm/init.c */
void
__init
allocate_pgdat
(
unsigned
int
nid
);
#endif
/* __KERNEL__ */
#endif
/* __ASM_SH_MMZONE_H */
arch/sh/include/asm/page.h
View file @
ef4ed97d
...
...
@@ -49,7 +49,7 @@
extern
unsigned
long
shm_align_mask
;
extern
unsigned
long
max_low_pfn
,
min_low_pfn
;
extern
unsigned
long
memory_start
,
memory_end
;
extern
unsigned
long
memory_start
,
memory_end
,
memory_limit
;
static
inline
unsigned
long
pages_do_alias
(
unsigned
long
addr1
,
unsigned
long
addr2
)
...
...
arch/sh/include/asm/setup.h
View file @
ef4ed97d
...
...
@@ -19,6 +19,7 @@
#define COMMAND_LINE ((char *) (PARAM+0x100))
void
sh_mv_setup
(
void
);
void
check_for_initrd
(
void
);
#endif
/* __KERNEL__ */
...
...
arch/sh/kernel/machine_kexec.c
View file @
ef4ed97d
...
...
@@ -8,7 +8,6 @@
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/delay.h>
...
...
@@ -16,6 +15,7 @@
#include <linux/numa.h>
#include <linux/ftrace.h>
#include <linux/suspend.h>
#include <linux/lmb.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
...
...
@@ -151,3 +151,60 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_CONFIG
(
X2TLB
);
#endif
}
void
__init
reserve_crashkernel
(
void
)
{
unsigned
long
long
crash_size
,
crash_base
;
int
ret
;
/* this is necessary because of lmb_phys_mem_size() */
lmb_analyze
();
ret
=
parse_crashkernel
(
boot_command_line
,
lmb_phys_mem_size
(),
&
crash_size
,
&
crash_base
);
if
(
ret
==
0
&&
crash_size
>
0
)
{
crashk_res
.
start
=
crash_base
;
crashk_res
.
end
=
crash_base
+
crash_size
-
1
;
}
if
(
crashk_res
.
end
==
crashk_res
.
start
)
goto
disable
;
crash_size
=
PAGE_ALIGN
(
crashk_res
.
end
-
crashk_res
.
start
+
1
);
if
(
!
crashk_res
.
start
)
{
unsigned
long
max
=
lmb_end_of_DRAM
()
-
memory_limit
;
crashk_res
.
start
=
__lmb_alloc_base
(
crash_size
,
PAGE_SIZE
,
max
);
if
(
!
crashk_res
.
start
)
{
pr_err
(
"crashkernel allocation failed
\n
"
);
goto
disable
;
}
}
else
{
ret
=
lmb_reserve
(
crashk_res
.
start
,
crash_size
);
if
(
unlikely
(
ret
<
0
))
{
pr_err
(
"crashkernel reservation failed - "
"memory is in use
\n
"
);
goto
disable
;
}
}
crashk_res
.
end
=
crashk_res
.
start
+
crash_size
-
1
;
/*
* Crash kernel trumps memory limit
*/
if
((
lmb_end_of_DRAM
()
-
memory_limit
)
<=
crashk_res
.
end
)
{
memory_limit
=
0
;
pr_info
(
"Disabled memory limit for crashkernel
\n
"
);
}
pr_info
(
"Reserving %ldMB of memory at 0x%08lx "
"for crashkernel (System RAM: %ldMB)
\n
"
,
(
unsigned
long
)(
crash_size
>>
20
),
(
unsigned
long
)(
crashk_res
.
start
),
(
unsigned
long
)(
lmb_phys_mem_size
()
>>
20
));
return
;
disable:
crashk_res
.
start
=
crashk_res
.
end
=
0
;
}
arch/sh/kernel/machvec.c
View file @
ef4ed97d
...
...
@@ -131,6 +131,7 @@ void __init sh_mv_setup(void)
mv_set
(
ioport_unmap
);
mv_set
(
irq_demux
);
mv_set
(
mode_pins
);
mv_set
(
mem_init
);
if
(
!
sh_mv
.
mv_nr_irqs
)
sh_mv
.
mv_nr_irqs
=
NR_IRQS
;
...
...
arch/sh/kernel/setup.c
View file @
ef4ed97d
...
...
@@ -4,7 +4,7 @@
* This file handles the architecture-dependent parts of initialization
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2002 - 20
07
Paul Mundt
* Copyright (C) 2002 - 20
10
Paul Mundt
*/
#include <linux/screen_info.h>
#include <linux/ioport.h>
...
...
@@ -41,6 +41,7 @@
#include <asm/clock.h>
#include <asm/smp.h>
#include <asm/mmu_context.h>
#include <asm/mmzone.h>
/*
* Initialize loops_per_jiffy as 10000000 (1000MIPS).
...
...
@@ -94,6 +95,7 @@ unsigned long memory_start;
EXPORT_SYMBOL
(
memory_start
);
unsigned
long
memory_end
=
0
;
EXPORT_SYMBOL
(
memory_end
);
unsigned
long
memory_limit
=
0
;
static
struct
resource
mem_resources
[
MAX_NUMNODES
];
...
...
@@ -101,94 +103,18 @@ int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
static
int
__init
early_parse_mem
(
char
*
p
)
{
unsigned
long
size
;
if
(
!
p
)
return
1
;
memory_start
=
(
unsigned
long
)
__va
(
__MEMORY_START
);
size
=
memparse
(
p
,
&
p
);
memory_limit
=
PAGE_ALIGN
(
memparse
(
p
,
&
p
));
if
(
size
>
__MEMORY_SIZE
)
{
printk
(
KERN_ERR
"Using mem= to increase the size of kernel memory "
"is not allowed.
\n
"
" Recompile the kernel with the correct value for "
"CONFIG_MEMORY_SIZE.
\n
"
);
return
0
;
}
memory_end
=
memory_start
+
size
;
pr_notice
(
"Memory limited to %ldMB
\n
"
,
memory_limit
>>
20
);
return
0
;
}
early_param
(
"mem"
,
early_parse_mem
);
/*
* Register fully available low RAM pages with the bootmem allocator.
*/
static
void
__init
register_bootmem_low_pages
(
void
)
{
unsigned
long
curr_pfn
,
last_pfn
,
pages
;
/*
* We are rounding up the start address of usable memory:
*/
curr_pfn
=
PFN_UP
(
__MEMORY_START
);
/*
* ... and at the end of the usable range downwards:
*/
last_pfn
=
PFN_DOWN
(
__pa
(
memory_end
));
if
(
last_pfn
>
max_low_pfn
)
last_pfn
=
max_low_pfn
;
pages
=
last_pfn
-
curr_pfn
;
free_bootmem
(
PFN_PHYS
(
curr_pfn
),
PFN_PHYS
(
pages
));
}
#ifdef CONFIG_KEXEC
static
void
__init
reserve_crashkernel
(
void
)
{
unsigned
long
long
free_mem
;
unsigned
long
long
crash_size
,
crash_base
;
void
*
vp
;
int
ret
;
free_mem
=
((
unsigned
long
long
)
max_low_pfn
-
min_low_pfn
)
<<
PAGE_SHIFT
;
ret
=
parse_crashkernel
(
boot_command_line
,
free_mem
,
&
crash_size
,
&
crash_base
);
if
(
ret
==
0
&&
crash_size
)
{
if
(
crash_base
<=
0
)
{
vp
=
alloc_bootmem_nopanic
(
crash_size
);
if
(
!
vp
)
{
printk
(
KERN_INFO
"crashkernel allocation "
"failed
\n
"
);
return
;
}
crash_base
=
__pa
(
vp
);
}
else
if
(
reserve_bootmem
(
crash_base
,
crash_size
,
BOOTMEM_EXCLUSIVE
)
<
0
)
{
printk
(
KERN_INFO
"crashkernel reservation failed - "
"memory is in use
\n
"
);
return
;
}
printk
(
KERN_INFO
"Reserving %ldMB of memory at %ldMB "
"for crashkernel (System RAM: %ldMB)
\n
"
,
(
unsigned
long
)(
crash_size
>>
20
),
(
unsigned
long
)(
crash_base
>>
20
),
(
unsigned
long
)(
free_mem
>>
20
));
crashk_res
.
start
=
crash_base
;
crashk_res
.
end
=
crash_base
+
crash_size
-
1
;
insert_resource
(
&
iomem_resource
,
&
crashk_res
);
}
}
#else
static
inline
void
__init
reserve_crashkernel
(
void
)
{}
#endif
static
void
__init
check_for_initrd
(
void
)
void
__init
check_for_initrd
(
void
)
{
#ifdef CONFIG_BLK_DEV_INITRD
unsigned
long
start
,
end
;
...
...
@@ -235,7 +161,7 @@ static void __init check_for_initrd(void)
initrd_start
=
(
unsigned
long
)
__va
(
__pa
(
start
));
initrd_end
=
initrd_start
+
INITRD_SIZE
;
reserve_bootmem
(
__pa
(
initrd_start
),
INITRD_SIZE
,
BOOTMEM_DEFAULT
);
lmb_reserve
(
__pa
(
initrd_start
),
INITRD_SIZE
);
return
;
...
...
@@ -265,13 +191,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned
long
end_pfn
)
{
struct
resource
*
res
=
&
mem_resources
[
nid
];
unsigned
long
start
,
end
;
WARN_ON
(
res
->
name
);
/* max one active range per node for now */
start
=
start_pfn
<<
PAGE_SHIFT
;
end
=
end_pfn
<<
PAGE_SHIFT
;
res
->
name
=
"System RAM"
;
res
->
start
=
start
_pfn
<<
PAGE_SHIFT
;
res
->
end
=
(
end_pfn
<<
PAGE_SHIFT
)
-
1
;
res
->
start
=
start
;
res
->
end
=
end
-
1
;
res
->
flags
=
IORESOURCE_MEM
|
IORESOURCE_BUSY
;
if
(
request_resource
(
&
iomem_resource
,
res
))
{
pr_err
(
"unable to request memory_resource 0x%lx 0x%lx
\n
"
,
start_pfn
,
end_pfn
);
...
...
@@ -287,100 +218,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
request_resource
(
res
,
&
data_resource
);
request_resource
(
res
,
&
bss_resource
);
add_active_range
(
nid
,
start_pfn
,
end_pfn
);
}
void
__init
setup_bootmem_allocator
(
unsigned
long
free_pfn
)
{
unsigned
long
bootmap_size
;
unsigned
long
bootmap_pages
,
bootmem_paddr
;
u64
total_pages
=
(
lmb_end_of_DRAM
()
-
__MEMORY_START
)
>>
PAGE_SHIFT
;
int
i
;
bootmap_pages
=
bootmem_bootmap_pages
(
total_pages
);
bootmem_paddr
=
lmb_alloc
(
bootmap_pages
<<
PAGE_SHIFT
,
PAGE_SIZE
);
/*
* Find a proper area for the bootmem bitmap. After this
* bootstrap step all allocations (until the page allocator
* is intact) must be done via bootmem_alloc().
*/
bootmap_size
=
init_bootmem_node
(
NODE_DATA
(
0
),
bootmem_paddr
>>
PAGE_SHIFT
,
min_low_pfn
,
max_low_pfn
);
/* Add active regions with valid PFNs. */
for
(
i
=
0
;
i
<
lmb
.
memory
.
cnt
;
i
++
)
{
unsigned
long
start_pfn
,
end_pfn
;
start_pfn
=
lmb
.
memory
.
region
[
i
].
base
>>
PAGE_SHIFT
;
end_pfn
=
start_pfn
+
lmb_size_pages
(
&
lmb
.
memory
,
i
);
__add_active_range
(
0
,
start_pfn
,
end_pfn
);
}
/*
* Add all physical memory to the bootmem map and mark each
* area as present.
*/
register_bootmem_low_pages
();
/* Reserve the sections we're already using. */
for
(
i
=
0
;
i
<
lmb
.
reserved
.
cnt
;
i
++
)
reserve_bootmem
(
lmb
.
reserved
.
region
[
i
].
base
,
lmb_size_bytes
(
&
lmb
.
reserved
,
i
),
BOOTMEM_DEFAULT
);
node_set_online
(
0
);
sparse_memory_present_with_active_regions
(
0
);
check_for_initrd
();
reserve_crashkernel
();
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
static
void
__init
setup_memory
(
void
)
{
unsigned
long
start_pfn
;
u64
base
=
min_low_pfn
<<
PAGE_SHIFT
;
u64
size
=
(
max_low_pfn
<<
PAGE_SHIFT
)
-
base
;
/*
* Partially used pages are not usable - thus
* we are rounding upwards:
* Also make sure that there is a PMB mapping that covers this
* range before we attempt to activate it, to avoid reset by MMU.
* We can hit this path with NUMA or memory hot-add.
*/
start_pfn
=
PFN_UP
(
__pa
(
_end
));
pmb_bolt_mapping
((
unsigned
long
)
__va
(
start
),
start
,
end
-
start
,
PAGE_KERNEL
);
lmb_add
(
base
,
size
);
/*
* Reserve the kernel text and
* Reserve the bootmem bitmap. We do this in two steps (first step
* was init_bootmem()), because this catches the (definitely buggy)
* case of us accidentally initializing the bootmem allocator with
* an invalid RAM area.
*/
lmb_reserve
(
__MEMORY_START
+
CONFIG_ZERO_PAGE_OFFSET
,
(
PFN_PHYS
(
start_pfn
)
+
PAGE_SIZE
-
1
)
-
(
__MEMORY_START
+
CONFIG_ZERO_PAGE_OFFSET
));
/*
* Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
*/
if
(
CONFIG_ZERO_PAGE_OFFSET
!=
0
)
lmb_reserve
(
__MEMORY_START
,
CONFIG_ZERO_PAGE_OFFSET
);
lmb_analyze
();
lmb_dump_all
();
setup_bootmem_allocator
(
start_pfn
);
add_active_range
(
nid
,
start_pfn
,
end_pfn
);
}
#else
extern
void
__init
setup_memory
(
void
);
#endif
void
__init
__
attribute__
((
weak
))
plat_early_device_setup
(
void
)
void
__init
__
weak
plat_early_device_setup
(
void
)
{
}
...
...
@@ -421,10 +270,6 @@ void __init setup_arch(char **cmdline_p)
bss_resource
.
start
=
virt_to_phys
(
__bss_start
);
bss_resource
.
end
=
virt_to_phys
(
_ebss
)
-
1
;
memory_start
=
(
unsigned
long
)
__va
(
__MEMORY_START
);
if
(
!
memory_end
)
memory_end
=
memory_start
+
__MEMORY_SIZE
;
#ifdef CONFIG_CMDLINE_OVERWRITE
strlcpy
(
command_line
,
CONFIG_CMDLINE
,
sizeof
(
command_line
));
#else
...
...
@@ -441,39 +286,18 @@ void __init setup_arch(char **cmdline_p)
parse_early_param
();
uncached_init
();
plat_early_device_setup
();
/* Let earlyprintk output early console messages */
early_platform_driver_probe
(
"earlyprintk"
,
1
,
1
);
sh_mv_setup
();
/*
* Find the highest page frame number we have available
*/
max_pfn
=
PFN_DOWN
(
__pa
(
memory_end
));
/*
* Determine low and high memory ranges:
*/
max_low_pfn
=
max_pfn
;
min_low_pfn
=
__MEMORY_START
>>
PAGE_SHIFT
;
nodes_clear
(
node_online_map
);
/* Let earlyprintk output early console messages */
early_platform_driver_probe
(
"earlyprintk"
,
1
,
1
);
pmb_init
();
lmb_init
();
setup_memory
();
sparse_init
();
paging_init
();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp
=
&
dummy_con
;
#endif
paging_init
();
ioremap_fixed_init
();
/* Perform the machine specific initialisation */
if
(
likely
(
sh_mv
.
mv_setup
))
...
...
arch/sh/mm/init.c
View file @
ef4ed97d
...
...
@@ -2,7 +2,7 @@
* linux/arch/sh/mm/init.c
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2002 - 20
07
Paul Mundt
* Copyright (C) 2002 - 20
10
Paul Mundt
*
* Based on linux/arch/i386/mm/init.c:
* Copyright (C) 1995 Linus Torvalds
...
...
@@ -16,17 +16,31 @@
#include <linux/pagemap.h>
#include <linux/percpu.h>
#include <linux/io.h>
#include <linux/lmb.h>
#include <linux/kexec.h>
#include <linux/dma-mapping.h>
#include <asm/mmu_context.h>
#include <asm/mmzone.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/cache.h>
#include <asm/sizes.h>
DEFINE_PER_CPU
(
struct
mmu_gather
,
mmu_gathers
);
pgd_t
swapper_pg_dir
[
PTRS_PER_PGD
];
void
__init
generic_mem_init
(
void
)
{
lmb_add
(
__MEMORY_START
,
__MEMORY_SIZE
);
}
void
__init
__weak
plat_mem_setup
(
void
)
{
/* Nothing to see here, move along. */
}
#ifdef CONFIG_MMU
static
pte_t
*
__get_pte_phys
(
unsigned
long
addr
)
{
...
...
@@ -152,15 +166,166 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
}
#endif
/* CONFIG_MMU */
/*
* paging_init() sets up the page tables
*/
void
__init
allocate_pgdat
(
unsigned
int
nid
)
{
unsigned
long
start_pfn
,
end_pfn
;
#ifdef CONFIG_NEED_MULTIPLE_NODES
unsigned
long
phys
;
#endif
get_pfn_range_for_nid
(
nid
,
&
start_pfn
,
&
end_pfn
);
#ifdef CONFIG_NEED_MULTIPLE_NODES
phys
=
__lmb_alloc_base
(
sizeof
(
struct
pglist_data
),
SMP_CACHE_BYTES
,
end_pfn
<<
PAGE_SHIFT
);
/* Retry with all of system memory */
if
(
!
phys
)
phys
=
__lmb_alloc_base
(
sizeof
(
struct
pglist_data
),
SMP_CACHE_BYTES
,
lmb_end_of_DRAM
());
if
(
!
phys
)
panic
(
"Can't allocate pgdat for node %d
\n
"
,
nid
);
NODE_DATA
(
nid
)
=
__va
(
phys
);
memset
(
NODE_DATA
(
nid
),
0
,
sizeof
(
struct
pglist_data
));
NODE_DATA
(
nid
)
->
bdata
=
&
bootmem_node_data
[
nid
];
#endif
NODE_DATA
(
nid
)
->
node_start_pfn
=
start_pfn
;
NODE_DATA
(
nid
)
->
node_spanned_pages
=
end_pfn
-
start_pfn
;
}
static
void
__init
bootmem_init_one_node
(
unsigned
int
nid
)
{
unsigned
long
total_pages
,
paddr
;
unsigned
long
end_pfn
;
struct
pglist_data
*
p
;
int
i
;
p
=
NODE_DATA
(
nid
);
/* Nothing to do.. */
if
(
!
p
->
node_spanned_pages
)
return
;
end_pfn
=
p
->
node_start_pfn
+
p
->
node_spanned_pages
;
total_pages
=
bootmem_bootmap_pages
(
p
->
node_spanned_pages
);
paddr
=
lmb_alloc
(
total_pages
<<
PAGE_SHIFT
,
PAGE_SIZE
);
if
(
!
paddr
)
panic
(
"Can't allocate bootmap for nid[%d]
\n
"
,
nid
);
init_bootmem_node
(
p
,
paddr
>>
PAGE_SHIFT
,
p
->
node_start_pfn
,
end_pfn
);
free_bootmem_with_active_regions
(
nid
,
end_pfn
);
/*
* XXX Handle initial reservations for the system memory node
* only for the moment, we'll refactor this later for handling
* reservations in other nodes.
*/
if
(
nid
==
0
)
{
/* Reserve the sections we're already using. */
for
(
i
=
0
;
i
<
lmb
.
reserved
.
cnt
;
i
++
)
reserve_bootmem
(
lmb
.
reserved
.
region
[
i
].
base
,
lmb_size_bytes
(
&
lmb
.
reserved
,
i
),
BOOTMEM_DEFAULT
);
}
sparse_memory_present_with_active_regions
(
nid
);
}
static
void
__init
do_init_bootmem
(
void
)
{
int
i
;
/* Add active regions with valid PFNs. */
for
(
i
=
0
;
i
<
lmb
.
memory
.
cnt
;
i
++
)
{
unsigned
long
start_pfn
,
end_pfn
;
start_pfn
=
lmb
.
memory
.
region
[
i
].
base
>>
PAGE_SHIFT
;
end_pfn
=
start_pfn
+
lmb_size_pages
(
&
lmb
.
memory
,
i
);
__add_active_range
(
0
,
start_pfn
,
end_pfn
);
}
/* All of system RAM sits in node 0 for the non-NUMA case */
allocate_pgdat
(
0
);
node_set_online
(
0
);
plat_mem_setup
();
for_each_online_node
(
i
)
bootmem_init_one_node
(
i
);
sparse_init
();
}
static
void
__init
early_reserve_mem
(
void
)
{
unsigned
long
start_pfn
;
/*
* Partially used pages are not usable - thus
* we are rounding upwards:
*/
start_pfn
=
PFN_UP
(
__pa
(
_end
));
/*
* Reserve the kernel text and Reserve the bootmem bitmap. We do
* this in two steps (first step was init_bootmem()), because
* this catches the (definitely buggy) case of us accidentally
* initializing the bootmem allocator with an invalid RAM area.
*/
lmb_reserve
(
__MEMORY_START
+
CONFIG_ZERO_PAGE_OFFSET
,
(
PFN_PHYS
(
start_pfn
)
+
PAGE_SIZE
-
1
)
-
(
__MEMORY_START
+
CONFIG_ZERO_PAGE_OFFSET
));
/*
* Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
*/
if
(
CONFIG_ZERO_PAGE_OFFSET
!=
0
)
lmb_reserve
(
__MEMORY_START
,
CONFIG_ZERO_PAGE_OFFSET
);
/*
* Handle additional early reservations
*/
check_for_initrd
();
reserve_crashkernel
();
}
void
__init
paging_init
(
void
)
{
unsigned
long
max_zone_pfns
[
MAX_NR_ZONES
];
unsigned
long
vaddr
,
end
;
int
nid
;
lmb_init
();
sh_mv
.
mv_mem_init
();
early_reserve_mem
();
lmb_enforce_memory_limit
(
memory_limit
);
lmb_analyze
();
lmb_dump_all
();
/*
* Determine low and high memory ranges:
*/
max_low_pfn
=
max_pfn
=
lmb_end_of_DRAM
()
>>
PAGE_SHIFT
;
min_low_pfn
=
__MEMORY_START
>>
PAGE_SHIFT
;
nodes_clear
(
node_online_map
);
memory_start
=
(
unsigned
long
)
__va
(
__MEMORY_START
);
memory_end
=
memory_start
+
(
memory_limit
?:
lmb_phys_mem_size
());
uncached_init
();
pmb_init
();
do_init_bootmem
();
ioremap_fixed_init
();
/* We don't need to map the kernel through the TLB, as
* it is permanatly mapped using P1. So clear the
* entire pgd. */
...
...
arch/sh/mm/pmb.c
View file @
ef4ed97d
...
...
@@ -341,6 +341,8 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
unsigned
long
flags
,
pmb_flags
;
int
i
,
mapped
;
if
(
size
<
SZ_16M
)
return
-
EINVAL
;
if
(
!
pmb_addr_valid
(
vaddr
,
size
))
return
-
EFAULT
;
if
(
pmb_mapping_exists
(
vaddr
,
phys
,
size
))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment