Commit 2e710fbb authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] hugetlbfs - 'recovering' too many blocks on failure

From: Zwane Mwaikambo <zwane@linuxpower.ca>

The code appears to be able to add too many blocks back to
sbinfo->free_blocks in the failure path. We first do;

len = vma->vm_end - vma->vm_start;
sbinfo->free_blocks -= len;

but then later do;
len = (vma->vm_end - vma->vma_start) + (vma->vm_pgoff << HPAGE_SHIFT)

error:
sbinfo->free_blocks += len;
parent b6a040f5
...@@ -48,7 +48,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -48,7 +48,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
struct inode *inode = file->f_dentry->d_inode; struct inode *inode = file->f_dentry->d_inode;
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(inode->i_sb); struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(inode->i_sb);
loff_t len; loff_t len, vma_len;
int ret; int ret;
if (vma->vm_start & ~HPAGE_MASK) if (vma->vm_start & ~HPAGE_MASK)
...@@ -60,11 +60,11 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -60,11 +60,11 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_end - vma->vm_start < HPAGE_SIZE) if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
return -EINVAL; return -EINVAL;
len = (loff_t)(vma->vm_end - vma->vm_start); vma_len = (loff_t)(vma->vm_end - vma->vm_start);
if (sbinfo->free_blocks >= 0) { /* Check if there is any size limit. */ if (sbinfo->free_blocks >= 0) { /* Check if there is any size limit. */
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
if ((len >> HPAGE_SHIFT) <= sbinfo->free_blocks) { if ((vma_len >> HPAGE_SHIFT) <= sbinfo->free_blocks) {
sbinfo->free_blocks -= (len >> HPAGE_SHIFT); sbinfo->free_blocks -= (vma_len >> HPAGE_SHIFT);
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
} else { } else {
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
...@@ -78,8 +78,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -78,8 +78,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_HUGETLB | VM_RESERVED; vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
vma->vm_ops = &hugetlb_vm_ops; vma->vm_ops = &hugetlb_vm_ops;
ret = hugetlb_prefault(mapping, vma); ret = hugetlb_prefault(mapping, vma);
len = (loff_t)(vma->vm_end - vma->vm_start) + len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
((loff_t)vma->vm_pgoff << PAGE_SHIFT);
if (ret == 0 && inode->i_size < len) if (ret == 0 && inode->i_size < len)
inode->i_size = len; inode->i_size = len;
up(&inode->i_sem); up(&inode->i_sem);
...@@ -89,7 +88,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -89,7 +88,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
*/ */
if ((ret != 0) && (sbinfo->free_blocks >= 0)) { if ((ret != 0) && (sbinfo->free_blocks >= 0)) {
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
sbinfo->free_blocks += (len >> HPAGE_SHIFT); sbinfo->free_blocks += (vma_len >> HPAGE_SHIFT);
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment