Commit 2ef43ec7 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

kcore: use usual list for kclist

This patchset is for /proc/kcore.  With this,

 - many per-arch hooks are removed.

 - /proc/kcore will know really valid physical memory area.

 - /proc/kcore will be aware of memory hotplug.

 - /proc/kcore will be architecture independent i.e.
   if an arch supports CONFIG_MMU, it can use /proc/kcore.
   (if the arch uses usual memory layout.)

This patch:

/proc/kcore uses its own list handling codes. It's better to use
generic list codes.

No changes in logic. just clean up.
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: WANG Cong <xiyou.wangcong@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d899bf7b
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
#include <linux/list.h>
#define CORE_STR "CORE" #define CORE_STR "CORE"
...@@ -57,7 +58,7 @@ struct memelfnote ...@@ -57,7 +58,7 @@ struct memelfnote
void *data; void *data;
}; };
static struct kcore_list *kclist; static LIST_HEAD(kclist_head);
static DEFINE_RWLOCK(kclist_lock); static DEFINE_RWLOCK(kclist_lock);
void void
...@@ -67,8 +68,7 @@ kclist_add(struct kcore_list *new, void *addr, size_t size) ...@@ -67,8 +68,7 @@ kclist_add(struct kcore_list *new, void *addr, size_t size)
new->size = size; new->size = size;
write_lock(&kclist_lock); write_lock(&kclist_lock);
new->next = kclist; list_add_tail(&new->list, &kclist_head);
kclist = new;
write_unlock(&kclist_lock); write_unlock(&kclist_lock);
} }
...@@ -80,7 +80,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen) ...@@ -80,7 +80,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
*nphdr = 1; /* PT_NOTE */ *nphdr = 1; /* PT_NOTE */
size = 0; size = 0;
for (m=kclist; m; m=m->next) { list_for_each_entry(m, &kclist_head, list) {
try = kc_vaddr_to_offset((size_t)m->addr + m->size); try = kc_vaddr_to_offset((size_t)m->addr + m->size);
if (try > size) if (try > size)
size = try; size = try;
...@@ -192,7 +192,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) ...@@ -192,7 +192,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
nhdr->p_align = 0; nhdr->p_align = 0;
/* setup ELF PT_LOAD program header for every area */ /* setup ELF PT_LOAD program header for every area */
for (m=kclist; m; m=m->next) { list_for_each_entry(m, &kclist_head, list) {
phdr = (struct elf_phdr *) bufp; phdr = (struct elf_phdr *) bufp;
bufp += sizeof(struct elf_phdr); bufp += sizeof(struct elf_phdr);
offset += sizeof(struct elf_phdr); offset += sizeof(struct elf_phdr);
...@@ -317,7 +317,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) ...@@ -317,7 +317,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
struct kcore_list *m; struct kcore_list *m;
read_lock(&kclist_lock); read_lock(&kclist_lock);
for (m=kclist; m; m=m->next) { list_for_each_entry(m, &kclist_head, list) {
if (start >= m->addr && start < (m->addr+m->size)) if (start >= m->addr && start < (m->addr+m->size))
break; break;
} }
......
...@@ -79,7 +79,7 @@ struct proc_dir_entry { ...@@ -79,7 +79,7 @@ struct proc_dir_entry {
}; };
struct kcore_list { struct kcore_list {
struct kcore_list *next; struct list_head list;
unsigned long addr; unsigned long addr;
size_t size; size_t size;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment