Commit bd64f049 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] stop using page.list in readahead

The address_space.readapges() function currently takes a list of pages,
strung together via page->list.  Switch it to using page->lru.

This changes the API into filesystems.
parent 90687aa1
...@@ -898,9 +898,9 @@ static void cifs_copy_cache_pages(struct address_space *mapping, ...@@ -898,9 +898,9 @@ static void cifs_copy_cache_pages(struct address_space *mapping,
if(list_empty(pages)) if(list_empty(pages))
break; break;
page = list_entry(pages->prev, struct page, list); page = list_entry(pages->prev, struct page, lru);
list_del(&page->list); list_del(&page->lru);
if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
page_cache_release(page); page_cache_release(page);
...@@ -962,7 +962,7 @@ cifs_readpages(struct file *file, struct address_space *mapping, ...@@ -962,7 +962,7 @@ cifs_readpages(struct file *file, struct address_space *mapping,
for(i = 0;i<num_pages;) { for(i = 0;i<num_pages;) {
if(list_empty(page_list)) if(list_empty(page_list))
break; break;
page = list_entry(page_list->prev, struct page, list); page = list_entry(page_list->prev, struct page, lru);
offset = (loff_t)page->index << PAGE_CACHE_SHIFT; offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
/* for reads over a certain size could initiate async read ahead */ /* for reads over a certain size could initiate async read ahead */
...@@ -984,8 +984,9 @@ cifs_readpages(struct file *file, struct address_space *mapping, ...@@ -984,8 +984,9 @@ cifs_readpages(struct file *file, struct address_space *mapping,
/* clean up remaing pages off list */ /* clean up remaing pages off list */
while (!list_empty(page_list) && (i < num_pages)) { while (!list_empty(page_list) && (i < num_pages)) {
page = list_entry(page_list->prev, struct page, list); page = list_entry(page_list->prev,
list_del(&page->list); struct page, lru);
list_del(&page->lru);
} }
break; break;
} else if (bytes_read > 0) { } else if (bytes_read > 0) {
...@@ -1002,8 +1003,9 @@ cifs_readpages(struct file *file, struct address_space *mapping, ...@@ -1002,8 +1003,9 @@ cifs_readpages(struct file *file, struct address_space *mapping,
cFYI(1,("No bytes read cleaning remaining pages off readahead list")); cFYI(1,("No bytes read cleaning remaining pages off readahead list"));
/* BB turn off caching and do new lookup on file size at server? */ /* BB turn off caching and do new lookup on file size at server? */
while (!list_empty(page_list) && (i < num_pages)) { while (!list_empty(page_list) && (i < num_pages)) {
page = list_entry(page_list->prev, struct page, list); page = list_entry(page_list->prev,
list_del(&page->list); struct page, lru);
list_del(&page->lru);
} }
break; break;
......
...@@ -329,10 +329,10 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, ...@@ -329,10 +329,10 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
pagevec_init(&lru_pvec, 0); pagevec_init(&lru_pvec, 0);
for (page_idx = 0; page_idx < nr_pages; page_idx++) { for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_entry(pages->prev, struct page, list); struct page *page = list_entry(pages->prev, struct page, lru);
prefetchw(&page->flags); prefetchw(&page->flags);
list_del(&page->list); list_del(&page->lru);
if (!add_to_page_cache(page, mapping, if (!add_to_page_cache(page, mapping,
page->index, GFP_KERNEL)) { page->index, GFP_KERNEL)) {
bio = do_mpage_readpage(bio, page, bio = do_mpage_readpage(bio, page,
......
...@@ -48,7 +48,7 @@ static inline unsigned long get_min_readahead(struct file_ra_state *ra) ...@@ -48,7 +48,7 @@ static inline unsigned long get_min_readahead(struct file_ra_state *ra)
return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE; return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
} }
#define list_to_page(head) (list_entry((head)->prev, struct page, list)) #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
/** /**
* read_cache_pages - populate an address space with some pages, and * read_cache_pages - populate an address space with some pages, and
...@@ -72,7 +72,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, ...@@ -72,7 +72,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
while (!list_empty(pages)) { while (!list_empty(pages)) {
page = list_to_page(pages); page = list_to_page(pages);
list_del(&page->list); list_del(&page->lru);
if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
page_cache_release(page); page_cache_release(page);
continue; continue;
...@@ -85,7 +85,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, ...@@ -85,7 +85,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
struct page *victim; struct page *victim;
victim = list_to_page(pages); victim = list_to_page(pages);
list_del(&victim->list); list_del(&victim->lru);
page_cache_release(victim); page_cache_release(victim);
} }
break; break;
...@@ -112,7 +112,7 @@ static int read_pages(struct address_space *mapping, struct file *filp, ...@@ -112,7 +112,7 @@ static int read_pages(struct address_space *mapping, struct file *filp,
pagevec_init(&lru_pvec, 0); pagevec_init(&lru_pvec, 0);
for (page_idx = 0; page_idx < nr_pages; page_idx++) { for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_to_page(pages); struct page *page = list_to_page(pages);
list_del(&page->list); list_del(&page->lru);
if (!add_to_page_cache(page, mapping, if (!add_to_page_cache(page, mapping,
page->index, GFP_KERNEL)) { page->index, GFP_KERNEL)) {
mapping->a_ops->readpage(filp, page); mapping->a_ops->readpage(filp, page);
...@@ -247,7 +247,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp, ...@@ -247,7 +247,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
if (!page) if (!page)
break; break;
page->index = page_offset; page->index = page_offset;
list_add(&page->list, &page_pool); list_add(&page->lru, &page_pool);
ret++; ret++;
} }
spin_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment