Commit 91cdee4c authored by Miles Bader's avatar Miles Bader Committed by Linus Torvalds

[PATCH] Move `ptrinfo' function from mm/slab.c to mm/memory.c

This function doesn't compile on non-MMU systems, so put it in a place
where it won't cause problems (mm/memory.c is only compiled if
CONFIG_MMU is defined).
parent cbe39894
......@@ -1701,3 +1701,76 @@ struct page * vmalloc_to_page(void * vmalloc_addr)
}
EXPORT_SYMBOL(vmalloc_to_page);
void ptrinfo(unsigned long addr)
{
struct page *page;
printk("Dumping data about address %p.\n", (void*)addr);
if (!virt_addr_valid((void*)addr)) {
printk("virt addr invalid.\n");
return;
}
do {
pgd_t *pgd = pgd_offset_k(addr);
pmd_t *pmd;
if (pgd_none(*pgd)) {
printk("No pgd.\n");
break;
}
pmd = pmd_offset(pgd, addr);
if (pmd_none(*pmd)) {
printk("No pmd.\n");
break;
}
#ifdef CONFIG_X86
if (pmd_large(*pmd)) {
printk("Large page.\n");
break;
}
#endif
printk("normal page, pte_val 0x%llx\n",
(unsigned long long)pte_val(*pte_offset_kernel(pmd, addr)));
} while(0);
page = virt_to_page((void*)addr);
printk("struct page at %p, flags %lxh.\n", page, page->flags);
if (PageSlab(page)) {
kmem_cache_t *c;
struct slab *s;
unsigned long flags;
int objnr;
void *objp;
c = GET_PAGE_CACHE(page);
printk("belongs to cache %s.\n",c->name);
spin_lock_irqsave(&c->spinlock, flags);
s = GET_PAGE_SLAB(page);
printk("slabp %p with %d inuse objects (from %d).\n",
s, s->inuse, c->num);
check_slabp(c,s);
objnr = (addr-(unsigned long)s->s_mem)/c->objsize;
objp = s->s_mem+c->objsize*objnr;
printk("points into object no %d, starting at %p, len %d.\n",
objnr, objp, c->objsize);
if (objnr >= c->num) {
printk("Bad obj number.\n");
} else {
kernel_map_pages(virt_to_page(objp),
c->objsize/PAGE_SIZE, 1);
if (c->flags & SLAB_RED_ZONE)
printk("redzone: 0x%lx/0x%lx.\n",
*dbg_redzone1(c, objp),
*dbg_redzone2(c, objp));
if (c->flags & SLAB_STORE_USER)
printk("Last user: %p.\n",
*dbg_userword(c, objp));
}
spin_unlock_irqrestore(&c->spinlock, flags);
}
}
......@@ -2763,76 +2763,3 @@ unsigned int ksize(const void *objp)
return size;
}
void ptrinfo(unsigned long addr)
{
struct page *page;
printk("Dumping data about address %p.\n", (void*)addr);
if (!virt_addr_valid((void*)addr)) {
printk("virt addr invalid.\n");
return;
}
do {
pgd_t *pgd = pgd_offset_k(addr);
pmd_t *pmd;
if (pgd_none(*pgd)) {
printk("No pgd.\n");
break;
}
pmd = pmd_offset(pgd, addr);
if (pmd_none(*pmd)) {
printk("No pmd.\n");
break;
}
#ifdef CONFIG_X86
if (pmd_large(*pmd)) {
printk("Large page.\n");
break;
}
#endif
printk("normal page, pte_val 0x%llx\n",
(unsigned long long)pte_val(*pte_offset_kernel(pmd, addr)));
} while(0);
page = virt_to_page((void*)addr);
printk("struct page at %p, flags %lxh.\n", page, page->flags);
if (PageSlab(page)) {
kmem_cache_t *c;
struct slab *s;
unsigned long flags;
int objnr;
void *objp;
c = GET_PAGE_CACHE(page);
printk("belongs to cache %s.\n",c->name);
spin_lock_irqsave(&c->spinlock, flags);
s = GET_PAGE_SLAB(page);
printk("slabp %p with %d inuse objects (from %d).\n",
s, s->inuse, c->num);
check_slabp(c,s);
objnr = (addr-(unsigned long)s->s_mem)/c->objsize;
objp = s->s_mem+c->objsize*objnr;
printk("points into object no %d, starting at %p, len %d.\n",
objnr, objp, c->objsize);
if (objnr >= c->num) {
printk("Bad obj number.\n");
} else {
kernel_map_pages(virt_to_page(objp),
c->objsize/PAGE_SIZE, 1);
if (c->flags & SLAB_RED_ZONE)
printk("redzone: 0x%lx/0x%lx.\n",
*dbg_redzone1(c, objp),
*dbg_redzone2(c, objp));
if (c->flags & SLAB_STORE_USER)
printk("Last user: %p.\n",
*dbg_userword(c, objp));
}
spin_unlock_irqrestore(&c->spinlock, flags);
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment