Commit 72945d86 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: make mem_to_page available outside of xfs_buf.c

Rename the function to kmem_to_page and move it to kmem.h together
with our kmem_large allocator that may either return kmalloced or
vmalloc pages.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent ce89755c
...@@ -124,4 +124,12 @@ kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags) ...@@ -124,4 +124,12 @@ kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
return kmem_zone_alloc(zone, flags | KM_ZERO); return kmem_zone_alloc(zone, flags | KM_ZERO);
} }
static inline struct page *
kmem_to_page(void *addr)
{
if (is_vmalloc_addr(addr))
return vmalloc_to_page(addr);
return virt_to_page(addr);
}
#endif /* __XFS_SUPPORT_KMEM_H__ */ #endif /* __XFS_SUPPORT_KMEM_H__ */
...@@ -934,17 +934,6 @@ xfs_buf_set_empty( ...@@ -934,17 +934,6 @@ xfs_buf_set_empty(
bp->b_maps[0].bm_len = bp->b_length; bp->b_maps[0].bm_len = bp->b_length;
} }
static inline struct page *
mem_to_page(
void *addr)
{
if ((!is_vmalloc_addr(addr))) {
return virt_to_page(addr);
} else {
return vmalloc_to_page(addr);
}
}
int int
xfs_buf_associate_memory( xfs_buf_associate_memory(
xfs_buf_t *bp, xfs_buf_t *bp,
...@@ -977,7 +966,7 @@ xfs_buf_associate_memory( ...@@ -977,7 +966,7 @@ xfs_buf_associate_memory(
bp->b_offset = offset; bp->b_offset = offset;
for (i = 0; i < bp->b_page_count; i++) { for (i = 0; i < bp->b_page_count; i++) {
bp->b_pages[i] = mem_to_page((void *)pageaddr); bp->b_pages[i] = kmem_to_page((void *)pageaddr);
pageaddr += PAGE_SIZE; pageaddr += PAGE_SIZE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment