Commit 21c2baef authored by Andrew Morton's avatar Andrew Morton Committed by Jaroslav Kysela

[PATCH] hugetlb fixes

From Rohit

1) hugetlbfs_zero_setup returns ENOMEM in case the request size can
   not be easily handleed.

2) Preference is given to LOW_MEM while freeing the pages from
   hugetlbpage free list.
parent c720c50a
......@@ -20,6 +20,8 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <linux/sysctl.h>
static long htlbpagemem;
int htlbpage_max;
static long htlbzone_pages;
......@@ -555,6 +557,53 @@ int alloc_hugetlb_pages(int key, unsigned long addr, unsigned long len, int prot
return alloc_shared_hugetlb_pages(key, addr, len, prot, flag);
return alloc_private_hugetlb_pages(key, addr, len, prot, flag);
}
void update_and_free_page(struct page *page)
{
int j;
struct page *map;
map = page;
htlbzone_pages--;
for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1<< PG_writeback);
set_page_count(map, 0);
map++;
}
set_page_count(page, 1);
__free_pages(page, HUGETLB_PAGE_ORDER);
}
int try_to_free_low(int count)
{
struct list_head *p;
struct page *page, *map;
map = NULL;
spin_lock(&htlbpage_lock);
list_for_each(p, &htlbpage_freelist) {
if (map) {
list_del(&map->list);
update_and_free_page(map);
htlbpagemem--;
map = NULL;
if (++count == 0)
break;
}
page = list_entry(p, struct page, list);
if ((page_zone(page))->name[0] != 'H') // Look for non-Highmem
map = page;
}
if (map) {
list_del(&map->list);
update_and_free_page(map);
htlbpagemem--;
count++;
}
spin_unlock(&htlbpage_lock);
return count;
}
int set_hugetlb_mem_size(int count)
{
......@@ -568,6 +617,8 @@ int set_hugetlb_mem_size(int count)
else
lcount = count - htlbzone_pages;
if (lcount == 0)
return (int)htlbzone_pages;
if (lcount > 0) { /* Increase the mem size. */
while (lcount--) {
page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER);
......@@ -587,23 +638,14 @@ int set_hugetlb_mem_size(int count)
return (int) htlbzone_pages;
}
/* Shrink the memory size. */
lcount = try_to_free_low(lcount);
while (lcount++) {
page = alloc_hugetlb_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
htlbzone_pages--;
update_and_free_page(page);
spin_unlock(&htlbpage_lock);
map = page;
for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1<< PG_writeback);
set_page_count(map, 0);
map++;
}
set_page_count(page, 1);
__free_pages(page, HUGETLB_PAGE_ORDER);
}
return (int) htlbzone_pages;
}
......@@ -659,6 +701,13 @@ int hugetlb_report_meminfo(char *buf)
HPAGE_SIZE/1024);
}
int is_hugepage_mem_enough(size_t size)
{
if (size > (htlbpagemem << HPAGE_SHIFT))
return 0;
return 1;
}
static struct page * hugetlb_nopage(struct vm_area_struct * area, unsigned long address, int unused)
{
BUG();
......
......@@ -528,6 +528,8 @@ struct file *hugetlb_zero_setup(size_t size)
if (!capable(CAP_IPC_LOCK))
return ERR_PTR(-EPERM);
if (!is_hugepage_mem_enough(size))
return ERR_PTR(-ENOMEM);
n = atomic_read(&hugetlbfs_counter);
atomic_inc(&hugetlbfs_counter);
......
......@@ -20,6 +20,7 @@ int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
void huge_page_release(struct page *);
void hugetlb_release_key(struct hugetlb_key *);
int hugetlb_report_meminfo(char *);
int is_hugepage_mem_enough(size_t);
extern int htlbpage_max;
......@@ -35,6 +36,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
#define zap_hugepage_range(vma, start, len) BUG()
#define unmap_hugepage_range(vma, start, end) BUG()
#define huge_page_release(page) BUG()
#define is_hugepage_mem_enough(size) 0
#define hugetlb_report_meminfo(buf) 0
#endif /* !CONFIG_HUGETLB_PAGE */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment