Commit f97f22cb authored by Linus Torvalds's avatar Linus Torvalds

v2.4.13.3 -> v2.4.13.4

  - Mikael Pettersson: fix P4 boot with APIC enabled
  - me: fix device queuing thinko, clean up VM locking
parent ff35c838
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 14
EXTRAVERSION =-pre3
EXTRAVERSION =-pre4
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
......@@ -588,6 +588,7 @@ static int __init detect_init_APIC (void)
goto no_apic;
case X86_VENDOR_INTEL:
if (boot_cpu_data.x86 == 6 ||
(boot_cpu_data.x86 == 15 && cpu_has_apic) ||
(boot_cpu_data.x86 == 5 && cpu_has_apic))
break;
goto no_apic;
......
......@@ -446,12 +446,15 @@ static struct request *__get_request_wait(request_queue_t *q, int rw)
DECLARE_WAITQUEUE(wait, current);
generic_unplug_device(q);
add_wait_queue_exclusive(&q->wait_for_request, &wait);
add_wait_queue(&q->wait_for_request, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
if (q->rq[rw].count < batch_requests)
schedule();
} while ((rq = get_request(q,rw)) == NULL);
spin_lock_irq(&io_request_lock);
rq = get_request(q,rw);
spin_unlock_irq(&io_request_lock);
} while (rq == NULL);
remove_wait_queue(&q->wait_for_request, &wait);
current->state = TASK_RUNNING;
return rq;
......
......@@ -271,7 +271,7 @@ static struct block_device_operations pcd_bdops = {
release: cdrom_release,
ioctl: cdrom_ioctl,
check_media_change: cdrom_media_changed,
}
};
static struct cdrom_device_ops pcd_dops = {
pcd_open,
......
......@@ -780,31 +780,23 @@ extern unsigned int DRM(poll)(struct file *filp,
/* Mapping support (drm_vm.h) */
#if LINUX_VERSION_CODE < 0x020317
extern unsigned long DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
unsigned long address);
extern unsigned long DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
unsigned long address);
extern unsigned long DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
unsigned long address);
extern unsigned long DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
unsigned long address);
#else
/* Return type changed in 2.3.23 */
extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
unsigned long address);
extern struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
unsigned long address);
extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
unsigned long address);
extern struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
unsigned long address);
#endif
extern void DRM(vm_open)(struct vm_area_struct *vma);
extern void DRM(vm_close)(struct vm_area_struct *vma);
......
......@@ -58,13 +58,11 @@ struct vm_operations_struct DRM(vm_sg_ops) = {
#if LINUX_VERSION_CODE < 0x020317
unsigned long DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
unsigned long address)
#else
/* Return type changed in 2.3.23 */
struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
unsigned long address)
#endif
{
#if __REALLY_HAVE_AGP
......@@ -136,13 +134,11 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
#if LINUX_VERSION_CODE < 0x020317
unsigned long DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
unsigned long address)
#else
/* Return type changed in 2.3.23 */
struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
unsigned long address)
#endif
{
#if LINUX_VERSION_CODE >= 0x020300
......@@ -272,13 +268,11 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
#if LINUX_VERSION_CODE < 0x020317
unsigned long DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
unsigned long address)
#else
/* Return type changed in 2.3.23 */
struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
unsigned long address)
#endif
{
drm_file_t *priv = vma->vm_file->private_data;
......@@ -309,13 +303,11 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
#if LINUX_VERSION_CODE < 0x020317
unsigned long DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
unsigned long address)
#else
/* Return type changed in 2.3.23 */
struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
unsigned long address)
#endif
{
#if LINUX_VERSION_CODE >= 0x020300
......
......@@ -569,7 +569,7 @@ static void add_entropy_words(struct entropy_store *r, const __u32 *in,
__u32 w;
while (nwords--) {
w = rotate_left(r->input_rotate, *in);
w = rotate_left(r->input_rotate, *in++);
i = r->add_ptr = (r->add_ptr - 1) & wordmask;
/*
* Normally, we add 7 bits of rotation to the pool.
......
......@@ -770,12 +770,6 @@ static void end_buffer_io_async(struct buffer_head * bh, int uptodate)
if (!PageError(page))
SetPageUptodate(page);
/*
* Run the hooks that have to be done when a page I/O has completed.
*/
if (PageTestandClearDecrAfter(page))
atomic_dec(&nr_async_pages);
UnlockPage(page);
return;
......
......@@ -125,7 +125,7 @@ extern pgprot_t protection_map[16];
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
struct page * (*nopage)(struct vm_area_struct * area, unsigned long address);
};
/*
......@@ -270,7 +270,7 @@ typedef struct page {
#define PG_referenced 2
#define PG_uptodate 3
#define PG_dirty 4
#define PG_decr_after 5
#define PG_unused 5
#define PG_active 6
#define PG_inactive 7
#define PG_slab 8
......@@ -325,9 +325,6 @@ static inline void set_page_dirty(struct page * page)
#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
#define PageTestandClearReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
#define PageDecrAfter(page) test_bit(PG_decr_after, &(page)->flags)
#define SetPageDecrAfter(page) set_bit(PG_decr_after, &(page)->flags)
#define PageTestandClearDecrAfter(page) test_and_clear_bit(PG_decr_after, &(page)->flags)
#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
#define PageSetSlab(page) set_bit(PG_slab, &(page)->flags)
#define PageClearSlab(page) clear_bit(PG_slab, &(page)->flags)
......@@ -415,7 +412,7 @@ extern void show_free_areas_node(pg_data_t *pgdat);
extern void clear_page_tables(struct mm_struct *, unsigned long, int);
extern int fail_writepage(struct page *);
struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int no_share);
struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address);
struct file *shmem_file_setup(char * name, loff_t size);
extern void shmem_lock(struct file * file, int lock);
extern int shmem_zero_setup(struct vm_area_struct *);
......@@ -541,7 +538,7 @@ extern void truncate_inode_pages(struct address_space *, loff_t);
/* generic vm_area_ops exported for stackable file systems */
extern int filemap_sync(struct vm_area_struct *, unsigned long, size_t, unsigned int);
extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int);
extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long);
/*
* GFP bitmasks..
......
......@@ -491,7 +491,6 @@ EXPORT_SYMBOL(file_fsync);
EXPORT_SYMBOL(fsync_inode_buffers);
EXPORT_SYMBOL(fsync_inode_data_buffers);
EXPORT_SYMBOL(clear_inode);
EXPORT_SYMBOL(nr_async_pages);
EXPORT_SYMBOL(___strtok);
EXPORT_SYMBOL(init_special_inode);
EXPORT_SYMBOL(read_ahead);
......
......@@ -49,8 +49,13 @@ struct page **page_hash_table;
spinlock_t pagecache_lock ____cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
/*
* NOTE: to avoid deadlocking you must never acquire the pagecache_lock with
* the pagemap_lru_lock held.
* NOTE: to avoid deadlocking you must never acquire the pagemap_lru_lock
* with the pagecache_lock held.
*
* Ordering:
* swap_lock ->
* pagemap_lru_lock ->
* pagecache_lock
*/
spinlock_t pagemap_lru_lock ____cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
......@@ -164,8 +169,8 @@ void invalidate_inode_pages(struct inode * inode)
head = &inode->i_mapping->clean_pages;
spin_lock(&pagecache_lock);
spin_lock(&pagemap_lru_lock);
spin_lock(&pagecache_lock);
curr = head->next;
while (curr != head) {
......@@ -196,8 +201,8 @@ void invalidate_inode_pages(struct inode * inode)
continue;
}
spin_unlock(&pagemap_lru_lock);
spin_unlock(&pagecache_lock);
spin_unlock(&pagemap_lru_lock);
}
static inline void truncate_partial_page(struct page *page, unsigned partial)
......@@ -633,8 +638,9 @@ void add_to_page_cache_locked(struct page * page, struct address_space *mapping,
spin_lock(&pagecache_lock);
add_page_to_inode_queue(mapping, page);
add_page_to_hash_queue(page, page_hash(mapping, index));
lru_cache_add(page);
spin_unlock(&pagecache_lock);
lru_cache_add(page);
}
/*
......@@ -653,7 +659,6 @@ static inline void __add_to_page_cache(struct page * page,
page->index = offset;
add_page_to_inode_queue(mapping, page);
add_page_to_hash_queue(page, hash);
lru_cache_add(page);
}
void add_to_page_cache(struct page * page, struct address_space * mapping, unsigned long offset)
......@@ -661,6 +666,7 @@ void add_to_page_cache(struct page * page, struct address_space * mapping, unsig
spin_lock(&pagecache_lock);
__add_to_page_cache(page, mapping, offset, page_hash(mapping, offset));
spin_unlock(&pagecache_lock);
lru_cache_add(page);
}
int add_to_page_cache_unique(struct page * page,
......@@ -680,6 +686,8 @@ int add_to_page_cache_unique(struct page * page,
}
spin_unlock(&pagecache_lock);
if (!err)
lru_cache_add(page);
return err;
}
......@@ -909,7 +917,9 @@ struct page * find_or_create_page(struct address_space *mapping, unsigned long i
newpage = NULL;
}
spin_unlock(&pagecache_lock);
if (unlikely(newpage != NULL))
if (newpage == NULL)
lru_cache_add(page);
else
page_cache_release(newpage);
}
}
......@@ -1385,6 +1395,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
page = cached_page;
__add_to_page_cache(page, mapping, index, hash);
spin_unlock(&pagecache_lock);
lru_cache_add(page);
cached_page = NULL;
goto readpage;
......@@ -1671,14 +1682,13 @@ static void nopage_sequential_readahead(struct vm_area_struct * vma,
* it in the page cache, and handles the special cases reasonably without
* having a lot of duplicated code.
*/
struct page * filemap_nopage(struct vm_area_struct * area,
unsigned long address, int no_share)
struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address)
{
int error;
struct file *file = area->vm_file;
struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
struct inode *inode = mapping->host;
struct page *page, **hash, *old_page;
struct page *page, **hash;
unsigned long size, pgoff, endoff;
pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
......@@ -1724,22 +1734,9 @@ struct page * filemap_nopage(struct vm_area_struct * area,
* Found the page and have a reference on it, need to check sharing
* and possibly copy it over to another page..
*/
old_page = page;
mark_page_accessed(page);
if (no_share) {
struct page *new_page = alloc_page(GFP_HIGHUSER);
if (new_page) {
copy_user_highpage(new_page, old_page, address);
flush_page_to_ram(new_page);
} else
new_page = NOPAGE_OOM;
page_cache_release(page);
return new_page;
}
flush_page_to_ram(old_page);
return old_page;
flush_page_to_ram(page);
return page;
no_cached_page:
/*
......
......@@ -906,7 +906,8 @@ static inline void break_cow(struct vm_area_struct * vma, struct page * new_page
* change only once the write actually happens. This avoids a few races,
* and potentially makes it more efficient.
*
* We hold the mm semaphore and the page_table_lock on entry and exit.
* We hold the mm semaphore and the page_table_lock on entry and exit
* with the page_table_lock released.
*/
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
unsigned long address, pte_t *page_table, pte_t pte)
......@@ -950,6 +951,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
break;
flush_cache_page(vma, address);
establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
spin_unlock(&mm->page_table_lock);
return 1; /* Minor fault */
}
......@@ -978,15 +980,16 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
/* Free the old page.. */
new_page = old_page;
}
spin_unlock(&mm->page_table_lock);
free_lru_page(new_page);
return 1; /* Minor fault */
bad_wp_page:
spin_unlock(&mm->page_table_lock);
printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page);
return -1;
no_mem:
free_lru_page(old_page);
spin_lock(&mm->page_table_lock);
return -1;
}
......@@ -1091,10 +1094,6 @@ void swapin_readahead(swp_entry_t entry)
*/
num = valid_swaphandles(entry, &offset);
for (i = 0; i < num; offset++, i++) {
/* Don't block on I/O for read-ahead */
if (atomic_read(&nr_async_pages) >=
pager_daemon.swap_cluster << page_cluster)
break;
/* Ok, do the async read-ahead now */
new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset));
if (!new_page)
......@@ -1109,7 +1108,8 @@ void swapin_readahead(swp_entry_t entry)
(swapper_space.nrpages*5 > total_swap_pages*4)
/*
* We hold the mm semaphore and the page_table_lock on entry and exit.
* We hold the mm semaphore and the page_table_lock on entry and
* should release the pagetable lock on exit..
*/
static int do_swap_page(struct mm_struct * mm,
struct vm_area_struct * vma, unsigned long address,
......@@ -1126,24 +1126,23 @@ static int do_swap_page(struct mm_struct * mm,
swapin_readahead(entry);
page = read_swap_cache_async(entry);
if (!page) {
spin_lock(&mm->page_table_lock);
/*
* Back out if somebody else faulted in this pte while
* we released the page table lock.
*/
return pte_same(*page_table, orig_pte) ? -1 : 1;
int retval;
spin_lock(&mm->page_table_lock);
retval = pte_same(*page_table, orig_pte) ? -1 : 1;
spin_unlock(&mm->page_table_lock);
return retval;
}
/* Had to read the page from swap area: Major fault */
ret = 2;
}
/*
* Freeze the "shared"ness of the page, ie page_count + swap_count.
* Must lock page before transferring our swap count to already
* obtained page count.
*/
lock_page(page);
if (!Page_Uptodate(page))
wait_on_page(page);
/*
* Back out if somebody else faulted in this pte while we
......@@ -1153,6 +1152,7 @@ static int do_swap_page(struct mm_struct * mm,
if (!pte_same(*page_table, orig_pte)) {
UnlockPage(page);
page_cache_release(page);
spin_unlock(&mm->page_table_lock);
return 1;
}
......@@ -1161,16 +1161,6 @@ static int do_swap_page(struct mm_struct * mm,
pte = mk_pte(page, vma->vm_page_prot);
swap_free(entry);
mark_page_accessed(page);
if (exclusive_swap_page(page)) {
if (write_access || vm_swap_full()) {
pte = pte_mkdirty(pte);
if (vma->vm_flags & VM_WRITE)
pte = pte_mkwrite(pte);
delete_from_swap_cache(page);
}
}
UnlockPage(page);
flush_page_to_ram(page);
flush_icache_page(vma, page);
......@@ -1178,6 +1168,7 @@ static int do_swap_page(struct mm_struct * mm,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, pte);
spin_unlock(&mm->page_table_lock);
return ret;
}
......@@ -1208,6 +1199,7 @@ static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma,
spin_lock(&mm->page_table_lock);
if (!pte_none(*page_table)) {
page_cache_release(page);
spin_unlock(&mm->page_table_lock);
return 1;
}
mm->rss++;
......@@ -1220,10 +1212,10 @@ static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, addr, entry);
spin_unlock(&mm->page_table_lock);
return 1; /* Minor fault */
no_mem:
spin_lock(&mm->page_table_lock);
return -1;
}
......@@ -1237,7 +1229,7 @@ static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma,
* do not need to flush old virtual caches or the TLB.
*
* This is called with the MM semaphore held and the page table
* spinlock held.
* spinlock held. Exit with the spinlock released.
*/
static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
unsigned long address, int write_access, pte_t *page_table)
......@@ -1249,18 +1241,27 @@ static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
return do_anonymous_page(mm, vma, page_table, write_access, address);
spin_unlock(&mm->page_table_lock);
/*
* The third argument is "no_share", which tells the low-level code
* to copy, not share the page even if sharing is possible. It's
* essentially an early COW detection.
*/
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, (vma->vm_flags & VM_SHARED)?0:write_access);
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK);
spin_lock(&mm->page_table_lock);
if (new_page == NULL) /* no page was available -- SIGBUS */
return 0;
if (new_page == NOPAGE_OOM)
return -1;
/*
* Should we do an early C-O-W break?
*/
if (write_access && !(vma->vm_flags & VM_SHARED)) {
struct page * page = alloc_page(GFP_HIGHUSER);
if (!page)
return -1;
copy_highpage(page, new_page);
page_cache_release(new_page);
lru_cache_add(page);
new_page = page;
}
spin_lock(&mm->page_table_lock);
/*
* This silly early PAGE_DIRTY setting removes a race
* due to the bad i386 page protection. But it's valid
......@@ -1277,20 +1278,19 @@ static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
flush_page_to_ram(new_page);
flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot);
if (write_access) {
if (write_access)
entry = pte_mkwrite(pte_mkdirty(entry));
} else if (page_count(new_page) > 1 &&
!(vma->vm_flags & VM_SHARED))
entry = pte_wrprotect(entry);
set_pte(page_table, entry);
} else {
/* One of our sibling threads was faster, back out. */
page_cache_release(new_page);
free_lru_page(new_page);
spin_unlock(&mm->page_table_lock);
return 1;
}
/* no need to invalidate: a not-present page shouldn't be cached */
update_mmu_cache(vma, address, entry);
spin_unlock(&mm->page_table_lock);
return 2; /* Major fault */
}
......@@ -1311,6 +1311,9 @@ static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
* The adding of pages is protected by the MM semaphore (which we hold),
* so we don't need to worry about a page being suddenly been added into
* our VM.
*
* We enter with the pagetable spinlock held, we are supposed to
* release it when done.
*/
static inline int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct * vma, unsigned long address,
......@@ -1338,6 +1341,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
}
entry = pte_mkyoung(entry);
establish_pte(vma, address, pte, entry);
spin_unlock(&mm->page_table_lock);
return 1;
}
......@@ -1347,7 +1351,6 @@ static inline int handle_pte_fault(struct mm_struct *mm,
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
unsigned long address, int write_access)
{
int ret = -1;
pgd_t *pgd;
pmd_t *pmd;
......@@ -1364,10 +1367,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
if (pmd) {
pte_t * pte = pte_alloc(mm, pmd, address);
if (pte)
ret = handle_pte_fault(mm, vma, address, write_access, pte);
return handle_pte_fault(mm, vma, address, write_access, pte);
}
spin_unlock(&mm->page_table_lock);
return ret;
return -1;
}
/*
......
......@@ -80,8 +80,6 @@ static void __free_pages_ok (struct page *page, unsigned int order)
BUG();
if (PageLocked(page))
BUG();
if (PageDecrAfter(page))
BUG();
if (PageActive(page))
BUG();
if (PageInactive(page))
......@@ -274,8 +272,6 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask
BUG();
if (PageLocked(page))
BUG();
if (PageDecrAfter(page))
BUG();
if (PageActive(page))
BUG();
if (PageInactive(page))
......
......@@ -43,11 +43,6 @@ static int rw_swap_page_base(int rw, swp_entry_t entry, struct page *page)
struct inode *swapf = 0;
int wait = 0;
/* Don't allow too many pending pages in flight.. */
if ((rw == WRITE) && atomic_read(&nr_async_pages) >
pager_daemon.swap_cluster * (1 << page_cluster))
wait = 1;
if (rw == READ) {
ClearPageUptodate(page);
kstat.pswpin++;
......@@ -75,10 +70,6 @@ static int rw_swap_page_base(int rw, swp_entry_t entry, struct page *page)
} else {
return 0;
}
if (!wait) {
SetPageDecrAfter(page);
atomic_inc(&nr_async_pages);
}
/* block_size == PAGE_SIZE/zones_used */
brw_page(rw, page, dev, zones, block_size);
......
......@@ -449,7 +449,6 @@ static int shmem_writepage(struct page * page)
BUG();
/* Remove it from the page cache */
lru_cache_del(page);
remove_inode_page(page);
page_cache_release(page);
......@@ -468,6 +467,7 @@ static int shmem_writepage(struct page * page)
*entry = swap;
info->swapped++;
spin_unlock(&info->lock);
SetPageUptodate(page);
set_page_dirty(page);
UnlockPage(page);
return 0;
......@@ -621,7 +621,7 @@ static int shmem_getpage(struct inode * inode, unsigned long idx, struct page **
return error;
}
struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int no_share)
struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address)
{
struct page * page;
unsigned int idx;
......@@ -633,19 +633,7 @@ struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, i
if (shmem_getpage(inode, idx, &page))
return page;
if (no_share) {
struct page *new_page = page_cache_alloc(inode->i_mapping);
if (new_page) {
copy_user_highpage(new_page, page, address);
flush_page_to_ram(new_page);
} else
new_page = NOPAGE_OOM;
page_cache_release(page);
return new_page;
}
flush_page_to_ram (page);
flush_page_to_ram(page);
return(page);
}
......@@ -1150,7 +1138,7 @@ static int shmem_symlink(struct inode * dir, struct dentry *dentry, const char *
inode = dentry->d_inode;
info = SHMEM_I(inode);
inode->i_size = len;
inode->i_size = len-1;
if (len <= sizeof(struct shmem_inode_info)) {
/* do it inline */
memcpy(info, symname, len);
......
......@@ -27,10 +27,6 @@
/* How many pages do we try to swap or page in/out together? */
int page_cluster;
/* We track the number of pages currently being asynchronously swapped
out, so that we don't try to swap TOO many pages out at once */
atomic_t nr_async_pages = ATOMIC_INIT(0);
pager_daemon_t pager_daemon = {
512, /* base number for calculating the number of tries */
SWAP_CLUSTER_MAX, /* minimum number of tries */
......
......@@ -17,17 +17,8 @@
#include <asm/pgtable.h>
/*
* We may have stale swap cache pages in memory: notice
* them here and get rid of the unnecessary final write.
*/
static int swap_writepage(struct page *page)
{
if (exclusive_swap_page(page)) {
delete_from_swap_cache(page);
UnlockPage(page);
return 0;
}
rw_swap_page(WRITE, page);
return 0;
}
......@@ -82,7 +73,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry)
INC_CACHE_INFO(exist_race);
return -EEXIST;
}
SetPageUptodate(page);
if (!PageLocked(page))
BUG();
if (!PageSwapCache(page))
......@@ -119,8 +109,7 @@ void delete_from_swap_cache(struct page *page)
if (!PageLocked(page))
BUG();
if (block_flushpage(page, 0))
lru_cache_del(page);
block_flushpage(page, 0);
entry.val = page->index;
......
......@@ -242,7 +242,7 @@ void free_swap_and_cache(swp_entry_t entry)
page_cache_get(page);
delete_from_swap_cache(page);
UnlockPage(page);
page_cache_release(page);
free_lru_page(page);
}
}
......@@ -582,7 +582,7 @@ static int try_to_unuse(unsigned int type)
*/
SetPageDirty(page);
UnlockPage(page);
page_cache_release(page);
free_lru_page(page);
/*
* Make sure that we aren't completely killing
......
......@@ -134,6 +134,7 @@ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct*
break;
/* Add it to the swap cache and mark it dirty */
if (add_to_swap_cache(page, entry) == 0) {
SetPageUptodate(page);
set_page_dirty(page);
goto set_swap_pte;
}
......@@ -459,13 +460,7 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask,
}
}
if (unlikely(!spin_trylock(&pagecache_lock))) {
/* we hold the page lock so the page cannot go away from under us */
spin_unlock(&pagemap_lru_lock);
spin_lock(&pagecache_lock);
spin_lock(&pagemap_lru_lock);
}
/*
* this is the non-racy check for busy page.
......@@ -520,6 +515,20 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask,
}
spin_unlock(&pagemap_lru_lock);
if (nr_pages <= 0)
return 0;
/*
* If swapping out isn't appropriate, and
* we still fail, try the other (usually smaller)
* caches instead.
*/
shrink_dcache_memory(priority, gfp_mask);
shrink_icache_memory(priority, gfp_mask);
#ifdef CONFIG_QUOTA
shrink_dqcache_memory(DEF_PRIORITY, gfp_mask);
#endif
return nr_pages;
}
......@@ -568,17 +577,7 @@ static int shrink_caches(zone_t * classzone, int priority, unsigned int gfp_mask
ratio = (unsigned long) nr_pages * nr_active_pages / ((nr_inactive_pages + 1) * 2);
refill_inactive(ratio);
nr_pages = shrink_cache(nr_pages, classzone, gfp_mask, priority);
if (nr_pages <= 0)
return 0;
shrink_dcache_memory(priority, gfp_mask);
shrink_icache_memory(priority, gfp_mask);
#ifdef CONFIG_QUOTA
shrink_dqcache_memory(DEF_PRIORITY, gfp_mask);
#endif
return nr_pages;
return shrink_cache(nr_pages, classzone, gfp_mask, priority);
}
int try_to_free_pages(zone_t *classzone, unsigned int gfp_mask, unsigned int order)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment