Commit 1406ec9b authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm, hugetlb: improve, cleanup resv_map parameters

To change a protection method for region tracking to find grained one,
we pass the resv_map, instead of list_head, to region manipulation
functions.

This doesn't introduce any functional change, and it is just for
preparing a next step.

[davidlohr@hp.com: update changelog]
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarDavidlohr Bueso <davidlohr@hp.com>
Reviewed-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Reviewed-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9119a41e
...@@ -151,8 +151,9 @@ struct file_region { ...@@ -151,8 +151,9 @@ struct file_region {
long to; long to;
}; };
static long region_add(struct list_head *head, long f, long t) static long region_add(struct resv_map *resv, long f, long t)
{ {
struct list_head *head = &resv->regions;
struct file_region *rg, *nrg, *trg; struct file_region *rg, *nrg, *trg;
/* Locate the region we are either in or before. */ /* Locate the region we are either in or before. */
...@@ -187,8 +188,9 @@ static long region_add(struct list_head *head, long f, long t) ...@@ -187,8 +188,9 @@ static long region_add(struct list_head *head, long f, long t)
return 0; return 0;
} }
static long region_chg(struct list_head *head, long f, long t) static long region_chg(struct resv_map *resv, long f, long t)
{ {
struct list_head *head = &resv->regions;
struct file_region *rg, *nrg; struct file_region *rg, *nrg;
long chg = 0; long chg = 0;
...@@ -236,8 +238,9 @@ static long region_chg(struct list_head *head, long f, long t) ...@@ -236,8 +238,9 @@ static long region_chg(struct list_head *head, long f, long t)
return chg; return chg;
} }
static long region_truncate(struct list_head *head, long end) static long region_truncate(struct resv_map *resv, long end)
{ {
struct list_head *head = &resv->regions;
struct file_region *rg, *trg; struct file_region *rg, *trg;
long chg = 0; long chg = 0;
...@@ -266,8 +269,9 @@ static long region_truncate(struct list_head *head, long end) ...@@ -266,8 +269,9 @@ static long region_truncate(struct list_head *head, long end)
return chg; return chg;
} }
static long region_count(struct list_head *head, long f, long t) static long region_count(struct resv_map *resv, long f, long t)
{ {
struct list_head *head = &resv->regions;
struct file_region *rg; struct file_region *rg;
long chg = 0; long chg = 0;
...@@ -393,7 +397,7 @@ void resv_map_release(struct kref *ref) ...@@ -393,7 +397,7 @@ void resv_map_release(struct kref *ref)
struct resv_map *resv_map = container_of(ref, struct resv_map, refs); struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
/* Clear out any active regions before we release the map. */ /* Clear out any active regions before we release the map. */
region_truncate(&resv_map->regions, 0); region_truncate(resv_map, 0);
kfree(resv_map); kfree(resv_map);
} }
...@@ -1152,7 +1156,7 @@ static long vma_needs_reservation(struct hstate *h, ...@@ -1152,7 +1156,7 @@ static long vma_needs_reservation(struct hstate *h,
pgoff_t idx = vma_hugecache_offset(h, vma, addr); pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *resv = inode->i_mapping->private_data; struct resv_map *resv = inode->i_mapping->private_data;
return region_chg(&resv->regions, idx, idx + 1); return region_chg(resv, idx, idx + 1);
} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
return 1; return 1;
...@@ -1162,7 +1166,7 @@ static long vma_needs_reservation(struct hstate *h, ...@@ -1162,7 +1166,7 @@ static long vma_needs_reservation(struct hstate *h,
pgoff_t idx = vma_hugecache_offset(h, vma, addr); pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *resv = vma_resv_map(vma); struct resv_map *resv = vma_resv_map(vma);
err = region_chg(&resv->regions, idx, idx + 1); err = region_chg(resv, idx, idx + 1);
if (err < 0) if (err < 0)
return err; return err;
return 0; return 0;
...@@ -1178,14 +1182,14 @@ static void vma_commit_reservation(struct hstate *h, ...@@ -1178,14 +1182,14 @@ static void vma_commit_reservation(struct hstate *h,
pgoff_t idx = vma_hugecache_offset(h, vma, addr); pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *resv = inode->i_mapping->private_data; struct resv_map *resv = inode->i_mapping->private_data;
region_add(&resv->regions, idx, idx + 1); region_add(resv, idx, idx + 1);
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr); pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *resv = vma_resv_map(vma); struct resv_map *resv = vma_resv_map(vma);
/* Mark this page used in the map. */ /* Mark this page used in the map. */
region_add(&resv->regions, idx, idx + 1); region_add(resv, idx, idx + 1);
} }
} }
...@@ -2276,7 +2280,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma) ...@@ -2276,7 +2280,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
end = vma_hugecache_offset(h, vma, vma->vm_end); end = vma_hugecache_offset(h, vma, vma->vm_end);
reserve = (end - start) - reserve = (end - start) -
region_count(&resv->regions, start, end); region_count(resv, start, end);
resv_map_put(vma); resv_map_put(vma);
...@@ -3178,7 +3182,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -3178,7 +3182,7 @@ int hugetlb_reserve_pages(struct inode *inode,
if (!vma || vma->vm_flags & VM_MAYSHARE) { if (!vma || vma->vm_flags & VM_MAYSHARE) {
resv_map = inode->i_mapping->private_data; resv_map = inode->i_mapping->private_data;
chg = region_chg(&resv_map->regions, from, to); chg = region_chg(resv_map, from, to);
} else { } else {
resv_map = resv_map_alloc(); resv_map = resv_map_alloc();
...@@ -3224,7 +3228,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -3224,7 +3228,7 @@ int hugetlb_reserve_pages(struct inode *inode,
* else has to be done for private mappings here * else has to be done for private mappings here
*/ */
if (!vma || vma->vm_flags & VM_MAYSHARE) if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&resv_map->regions, from, to); region_add(resv_map, from, to);
return 0; return 0;
out_err: out_err:
if (vma) if (vma)
...@@ -3240,7 +3244,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) ...@@ -3240,7 +3244,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
struct hugepage_subpool *spool = subpool_inode(inode); struct hugepage_subpool *spool = subpool_inode(inode);
if (resv_map) if (resv_map)
chg = region_truncate(&resv_map->regions, offset); chg = region_truncate(resv_map, offset);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
inode->i_blocks -= (blocks_per_huge_page(h) * freed); inode->i_blocks -= (blocks_per_huge_page(h) * freed);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment