Commit 50be6345 authored by Philipp Hachtmann's avatar Philipp Hachtmann Committed by Martin Schwidefsky

s390/mm: Convert bootmem to memblock

The original bootmem allocator is getting replaced by memblock. To
cover the needs of the s390 kdump implementation the physical memory
list is used.
With this patch the bootmem allocator and its bitmaps are completely
removed from s390.
Signed-off-by: default avatarPhilipp Hachtmann <phacht@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 70210ed9
...@@ -60,7 +60,6 @@ config PCI_QUIRKS ...@@ -60,7 +60,6 @@ config PCI_QUIRKS
config S390 config S390
def_bool y def_bool y
select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
...@@ -130,6 +129,7 @@ config S390 ...@@ -130,6 +129,7 @@ config S390
select HAVE_KVM if 64BIT select HAVE_KVM if 64BIT
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MEMBLOCK_PHYS_MAP
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
...@@ -139,6 +139,7 @@ config S390 ...@@ -139,6 +139,7 @@ config S390
select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING
select KTIME_SCALAR if 32BIT select KTIME_SCALAR if 32BIT
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select NO_BOOTMEM
select OLD_SIGACTION select OLD_SIGACTION
select OLD_SIGSUSPEND3 select OLD_SIGSUSPEND3
select SYSCTL_EXCEPTION_TRACE select SYSCTL_EXCEPTION_TRACE
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#define PARMAREA 0x10400 #define PARMAREA 0x10400
#define MEMORY_CHUNKS 256
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -31,22 +30,11 @@ ...@@ -31,22 +30,11 @@
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
#define COMMAND_LINE ((char *) (0x10480)) #define COMMAND_LINE ((char *) (0x10480))
#define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY 1
struct mem_chunk {
unsigned long addr;
unsigned long size;
int type;
};
extern struct mem_chunk memory_chunk[];
extern int memory_end_set; extern int memory_end_set;
extern unsigned long memory_end; extern unsigned long memory_end;
extern unsigned long max_physmem_end;
void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize); extern void detect_memory_memblock(void);
void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
unsigned long size);
/* /*
* Machine features detected in head.S * Machine features detected in head.S
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <linux/memblock.h>
#include <asm/os_info.h> #include <asm/os_info.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/ipl.h> #include <asm/ipl.h>
...@@ -22,6 +23,24 @@ ...@@ -22,6 +23,24 @@
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
static struct memblock_region oldmem_region;
static struct memblock_type oldmem_type = {
.cnt = 1,
.max = 1,
.total_size = 0,
.regions = &oldmem_region,
};
#define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \
for (i = 0, __next_mem_range(&i, nid, &memblock.physmem, \
&oldmem_type, p_start, \
p_end, p_nid); \
i != (u64)ULLONG_MAX; \
__next_mem_range(&i, nid, &memblock.physmem, \
&oldmem_type, \
p_start, p_end, p_nid))
struct dump_save_areas dump_save_areas; struct dump_save_areas dump_save_areas;
/* /*
...@@ -263,19 +282,6 @@ static void *kzalloc_panic(int len) ...@@ -263,19 +282,6 @@ static void *kzalloc_panic(int len)
return rc; return rc;
} }
/*
* Get memory layout and create hole for oldmem
*/
static struct mem_chunk *get_memory_layout(void)
{
struct mem_chunk *chunk_array;
chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
detect_memory_layout(chunk_array, 0);
create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE);
return chunk_array;
}
/* /*
* Initialize ELF note * Initialize ELF note
*/ */
...@@ -490,52 +496,33 @@ static int get_cpu_cnt(void) ...@@ -490,52 +496,33 @@ static int get_cpu_cnt(void)
*/ */
static int get_mem_chunk_cnt(void) static int get_mem_chunk_cnt(void)
{ {
struct mem_chunk *chunk_array, *mem_chunk; int cnt = 0;
int i, cnt = 0; u64 idx;
chunk_array = get_memory_layout(); for_each_dump_mem_range(idx, NUMA_NO_NODE, NULL, NULL, NULL)
for (i = 0; i < MEMORY_CHUNKS; i++) {
mem_chunk = &chunk_array[i];
if (chunk_array[i].type != CHUNK_READ_WRITE &&
chunk_array[i].type != CHUNK_READ_ONLY)
continue;
if (mem_chunk->size == 0)
continue;
cnt++; cnt++;
}
kfree(chunk_array);
return cnt; return cnt;
} }
/* /*
* Initialize ELF loads (new kernel) * Initialize ELF loads (new kernel)
*/ */
static int loads_init(Elf64_Phdr *phdr, u64 loads_offset) static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
{ {
struct mem_chunk *chunk_array, *mem_chunk; phys_addr_t start, end;
int i; u64 idx;
chunk_array = get_memory_layout(); for_each_dump_mem_range(idx, NUMA_NO_NODE, &start, &end, NULL) {
for (i = 0; i < MEMORY_CHUNKS; i++) { phdr->p_filesz = end - start;
mem_chunk = &chunk_array[i];
if (mem_chunk->size == 0)
continue;
if (chunk_array[i].type != CHUNK_READ_WRITE &&
chunk_array[i].type != CHUNK_READ_ONLY)
continue;
else
phdr->p_filesz = mem_chunk->size;
phdr->p_type = PT_LOAD; phdr->p_type = PT_LOAD;
phdr->p_offset = mem_chunk->addr; phdr->p_offset = start;
phdr->p_vaddr = mem_chunk->addr; phdr->p_vaddr = start;
phdr->p_paddr = mem_chunk->addr; phdr->p_paddr = start;
phdr->p_memsz = mem_chunk->size; phdr->p_memsz = end - start;
phdr->p_flags = PF_R | PF_W | PF_X; phdr->p_flags = PF_R | PF_W | PF_X;
phdr->p_align = PAGE_SIZE; phdr->p_align = PAGE_SIZE;
phdr++; phdr++;
} }
kfree(chunk_array);
return i;
} }
/* /*
...@@ -584,6 +571,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) ...@@ -584,6 +571,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
/* If we cannot get HSA size for zfcpdump return error */ /* If we cannot get HSA size for zfcpdump return error */
if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size()) if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size())
return -ENODEV; return -ENODEV;
/* For kdump, exclude previous crashkernel memory */
if (OLDMEM_BASE) {
oldmem_region.base = OLDMEM_BASE;
oldmem_region.size = OLDMEM_SIZE;
oldmem_type.total_size = OLDMEM_SIZE;
}
mem_chunk_cnt = get_mem_chunk_cnt(); mem_chunk_cnt = get_mem_chunk_cnt();
alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
......
...@@ -258,13 +258,19 @@ static __init void setup_topology(void) ...@@ -258,13 +258,19 @@ static __init void setup_topology(void)
static void early_pgm_check_handler(void) static void early_pgm_check_handler(void)
{ {
const struct exception_table_entry *fixup; const struct exception_table_entry *fixup;
unsigned long cr0, cr0_new;
unsigned long addr; unsigned long addr;
addr = S390_lowcore.program_old_psw.addr; addr = S390_lowcore.program_old_psw.addr;
fixup = search_exception_tables(addr & PSW_ADDR_INSN); fixup = search_exception_tables(addr & PSW_ADDR_INSN);
if (!fixup) if (!fixup)
disabled_wait(0); disabled_wait(0);
/* Disable low address protection before storing into lowcore. */
__ctl_store(cr0, 0, 0);
cr0_new = cr0 & ~(1UL << 28);
__ctl_load(cr0_new, 0, 0);
S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE; S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE;
__ctl_load(cr0, 0, 0);
} }
static noinline __init void setup_lowcore_early(void) static noinline __init void setup_lowcore_early(void)
......
...@@ -59,7 +59,6 @@ ENTRY(startup_continue) ...@@ -59,7 +59,6 @@ ENTRY(startup_continue)
.long 0 # cr13: home space segment table .long 0 # cr13: home space segment table
.long 0xc0000000 # cr14: machine check handling off .long 0xc0000000 # cr14: machine check handling off
.long 0 # cr15: linkage stack operations .long 0 # cr15: linkage stack operations
.Lmchunk:.long memory_chunk
.Lbss_bgn: .long __bss_start .Lbss_bgn: .long __bss_start
.Lbss_end: .long _end .Lbss_end: .long _end
.Lparmaddr: .long PARMAREA .Lparmaddr: .long PARMAREA
......
This diff is collapsed.
...@@ -333,7 +333,9 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info, ...@@ -333,7 +333,9 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
nr_masks = max(nr_masks, 1); nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) { for (i = 0; i < nr_masks; i++) {
mask->next = alloc_bootmem(sizeof(struct mask_info)); mask->next = alloc_bootmem_align(
roundup_pow_of_two(sizeof(struct mask_info)),
roundup_pow_of_two(sizeof(struct mask_info)));
mask = mask->next; mask = mask->next;
} }
} }
......
...@@ -6,130 +6,60 @@ ...@@ -6,130 +6,60 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/ipl.h> #include <asm/ipl.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/setup.h> #include <asm/setup.h>
#define ADDR2G (1ULL << 31) #define ADDR2G (1ULL << 31)
static void find_memory_chunks(struct mem_chunk chunk[], unsigned long maxsize) #define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY 1
static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
{
memblock_add_range(&memblock.memory, start, size, 0, 0);
memblock_add_range(&memblock.physmem, start, size, 0, 0);
}
void __init detect_memory_memblock(void)
{ {
unsigned long long memsize, rnmax, rzm; unsigned long long memsize, rnmax, rzm;
unsigned long addr = 0, size; unsigned long addr, size;
int i = 0, type; int type;
rzm = sclp_get_rzm(); rzm = sclp_get_rzm();
rnmax = sclp_get_rnmax(); rnmax = sclp_get_rnmax();
memsize = rzm * rnmax; memsize = rzm * rnmax;
if (!rzm) if (!rzm)
rzm = 1ULL << 17; rzm = 1ULL << 17;
if (sizeof(long) == 4) { if (IS_ENABLED(CONFIG_32BIT)) {
rzm = min(ADDR2G, rzm); rzm = min(ADDR2G, rzm);
memsize = memsize ? min(ADDR2G, memsize) : ADDR2G; memsize = min(ADDR2G, memsize);
} }
if (maxsize) max_physmem_end = memsize;
memsize = memsize ? min((unsigned long)memsize, maxsize) : maxsize; addr = 0;
/* keep memblock lists close to the kernel */
memblock_set_bottom_up(true);
do { do {
size = 0; size = 0;
type = tprot(addr); type = tprot(addr);
do { do {
size += rzm; size += rzm;
if (memsize && addr + size >= memsize) if (max_physmem_end && addr + size >= max_physmem_end)
break; break;
} while (type == tprot(addr + size)); } while (type == tprot(addr + size));
if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
if (memsize && (addr + size > memsize)) if (max_physmem_end && (addr + size > max_physmem_end))
size = memsize - addr; size = max_physmem_end - addr;
chunk[i].addr = addr; memblock_physmem_add(addr, size);
chunk[i].size = size;
chunk[i].type = type;
i++;
} }
addr += size; addr += size;
} while (addr < memsize && i < MEMORY_CHUNKS); } while (addr < max_physmem_end);
} memblock_set_bottom_up(false);
if (!max_physmem_end)
/** max_physmem_end = memblock_end_of_DRAM();
* detect_memory_layout - fill mem_chunk array with memory layout data
* @chunk: mem_chunk array to be filled
* @maxsize: maximum address where memory detection should stop
*
* Fills the passed in memory chunk array with the memory layout of the
* machine. The array must have a size of at least MEMORY_CHUNKS and will
* be fully initialized afterwards.
* If the maxsize paramater has a value > 0 memory detection will stop at
* that address. It is guaranteed that all chunks have an ending address
* that is smaller than maxsize.
* If maxsize is 0 all memory will be detected.
*/
void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize)
{
unsigned long flags, flags_dat, cr0;
memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
/*
* Disable IRQs, DAT and low address protection so tprot does the
* right thing and we don't get scheduled away with low address
* protection disabled.
*/
local_irq_save(flags);
flags_dat = __arch_local_irq_stnsm(0xfb);
/*
* In case DAT was enabled, make sure chunk doesn't reside in vmalloc
* space. We have disabled DAT and any access to vmalloc area will
* cause an exception.
* If DAT was disabled we are called from early ipl code.
*/
if (test_bit(5, &flags_dat)) {
if (WARN_ON_ONCE(is_vmalloc_or_module_addr(chunk)))
goto out;
}
__ctl_store(cr0, 0, 0);
__ctl_clear_bit(0, 28);
find_memory_chunks(chunk, maxsize);
__ctl_load(cr0, 0, 0);
out:
__arch_local_irq_ssm(flags_dat);
local_irq_restore(flags);
}
EXPORT_SYMBOL(detect_memory_layout);
/*
* Create memory hole with given address and size.
*/
void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
unsigned long size)
{
int i;
for (i = 0; i < MEMORY_CHUNKS; i++) {
struct mem_chunk *chunk = &mem_chunk[i];
if (chunk->size == 0)
continue;
if (addr > chunk->addr + chunk->size)
continue;
if (addr + size <= chunk->addr)
continue;
/* Split */
if ((addr > chunk->addr) &&
(addr + size < chunk->addr + chunk->size)) {
struct mem_chunk *new = chunk + 1;
memmove(new, chunk, (MEMORY_CHUNKS-i-1) * sizeof(*new));
new->addr = addr + size;
new->size = chunk->addr + chunk->size - new->addr;
chunk->size = addr - chunk->addr;
continue;
} else if ((addr <= chunk->addr) &&
(addr + size >= chunk->addr + chunk->size)) {
memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
} else if (addr + size < chunk->addr + chunk->size) {
chunk->size = chunk->addr + chunk->size - addr - size;
chunk->addr = addr + size;
} else if (addr > chunk->addr) {
chunk->size = addr - chunk->addr;
}
}
} }
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/memblock.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -66,7 +67,8 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address) ...@@ -66,7 +67,8 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
if (slab_is_available()) if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm, address); pte = (pte_t *) page_table_alloc(&init_mm, address);
else else
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
PTRS_PER_PTE * sizeof(pte_t));
if (!pte) if (!pte)
return NULL; return NULL;
clear_table((unsigned long *) pte, _PAGE_INVALID, clear_table((unsigned long *) pte, _PAGE_INVALID,
...@@ -371,16 +373,14 @@ int vmem_add_mapping(unsigned long start, unsigned long size) ...@@ -371,16 +373,14 @@ int vmem_add_mapping(unsigned long start, unsigned long size)
void __init vmem_map_init(void) void __init vmem_map_init(void)
{ {
unsigned long ro_start, ro_end; unsigned long ro_start, ro_end;
unsigned long start, end; struct memblock_region *reg;
int i; phys_addr_t start, end;
ro_start = PFN_ALIGN((unsigned long)&_stext); ro_start = PFN_ALIGN((unsigned long)&_stext);
ro_end = (unsigned long)&_eshared & PAGE_MASK; ro_end = (unsigned long)&_eshared & PAGE_MASK;
for (i = 0; i < MEMORY_CHUNKS; i++) { for_each_memblock(memory, reg) {
if (!memory_chunk[i].size) start = reg->base;
continue; end = reg->base + reg->size - 1;
start = memory_chunk[i].addr;
end = memory_chunk[i].addr + memory_chunk[i].size;
if (start >= ro_end || end <= ro_start) if (start >= ro_end || end <= ro_start)
vmem_add_mem(start, end - start, 0); vmem_add_mem(start, end - start, 0);
else if (start >= ro_start && end <= ro_end) else if (start >= ro_start && end <= ro_end)
...@@ -400,23 +400,21 @@ void __init vmem_map_init(void) ...@@ -400,23 +400,21 @@ void __init vmem_map_init(void)
} }
/* /*
* Convert memory chunk array to a memory segment list so there is a single * Convert memblock.memory to a memory segment list so there is a single
* list that contains both r/w memory and shared memory segments. * list that contains all memory segments.
*/ */
static int __init vmem_convert_memory_chunk(void) static int __init vmem_convert_memory_chunk(void)
{ {
struct memblock_region *reg;
struct memory_segment *seg; struct memory_segment *seg;
int i;
mutex_lock(&vmem_mutex); mutex_lock(&vmem_mutex);
for (i = 0; i < MEMORY_CHUNKS; i++) { for_each_memblock(memory, reg) {
if (!memory_chunk[i].size)
continue;
seg = kzalloc(sizeof(*seg), GFP_KERNEL); seg = kzalloc(sizeof(*seg), GFP_KERNEL);
if (!seg) if (!seg)
panic("Out of memory...\n"); panic("Out of memory...\n");
seg->start = memory_chunk[i].addr; seg->start = reg->base;
seg->size = memory_chunk[i].size; seg->size = reg->size;
insert_memory_segment(seg); insert_memory_segment(seg);
} }
mutex_unlock(&vmem_mutex); mutex_unlock(&vmem_mutex);
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/memblock.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/ipl.h> #include <asm/ipl.h>
#include <asm/sclp.h> #include <asm/sclp.h>
...@@ -411,33 +413,24 @@ static ssize_t zcore_memmap_read(struct file *filp, char __user *buf, ...@@ -411,33 +413,24 @@ static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
return simple_read_from_buffer(buf, count, ppos, filp->private_data, return simple_read_from_buffer(buf, count, ppos, filp->private_data,
MEMORY_CHUNKS * CHUNK_INFO_SIZE); memblock.memory.cnt * CHUNK_INFO_SIZE);
} }
static int zcore_memmap_open(struct inode *inode, struct file *filp) static int zcore_memmap_open(struct inode *inode, struct file *filp)
{ {
int i; struct memblock_region *reg;
char *buf; char *buf;
struct mem_chunk *chunk_array; int i = 0;
chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), buf = kzalloc(memblock.memory.cnt * CHUNK_INFO_SIZE, GFP_KERNEL);
GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;
detect_memory_layout(chunk_array, 0);
buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
if (!buf) { if (!buf) {
kfree(chunk_array);
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < MEMORY_CHUNKS; i++) { for_each_memblock(memory, reg) {
sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ", sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ",
(unsigned long long) chunk_array[i].addr, (unsigned long long) reg->base,
(unsigned long long) chunk_array[i].size); (unsigned long long) reg->size);
if (chunk_array[i].size == 0)
break;
} }
kfree(chunk_array);
filp->private_data = buf; filp->private_data = buf;
return nonseekable_open(inode, filp); return nonseekable_open(inode, filp);
} }
...@@ -593,21 +586,12 @@ static int __init check_sdias(void) ...@@ -593,21 +586,12 @@ static int __init check_sdias(void)
static int __init get_mem_info(unsigned long *mem, unsigned long *end) static int __init get_mem_info(unsigned long *mem, unsigned long *end)
{ {
int i; struct memblock_region *reg;
struct mem_chunk *chunk_array;
chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), for_each_memblock(memory, reg) {
GFP_KERNEL); *mem += reg->size;
if (!chunk_array) *end = max_t(unsigned long, *end, reg->base + reg->size);
return -ENOMEM;
detect_memory_layout(chunk_array, 0);
for (i = 0; i < MEMORY_CHUNKS; i++) {
if (chunk_array[i].size == 0)
break;
*mem += chunk_array[i].size;
*end = max(*end, chunk_array[i].addr + chunk_array[i].size);
} }
kfree(chunk_array);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment