Commit 186b0970 authored by Linus Torvalds's avatar Linus Torvalds

Import 1.3.56

parent 001e3f20
VERSION = 1
PATCHLEVEL = 3
SUBLEVEL = 55
SUBLEVEL = 56
ARCH = i386
......
......@@ -140,6 +140,7 @@ void copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
p->tss.usp = usp;
p->tss.ksp = (unsigned long) childstack;
p->tss.flags = 1;
p->mm->context = 0;
}
/*
......
......@@ -23,7 +23,6 @@ asmlinkage int sys_waitpid(pid_t pid,unsigned long * stat_addr, int options);
asmlinkage void ret_from_sys_call(void);
asmlinkage int do_signal(unsigned long, struct pt_regs *, struct switch_stack *,
unsigned long, unsigned long);
asmlinkage void imb(void);
extern int ptrace_set_bpt (struct task_struct *child);
extern int ptrace_cancel_bpt (struct task_struct *child);
......
......@@ -1099,7 +1099,7 @@ static int bread_page(unsigned long address, kdev_t dev, int b[], int size)
*/
int generic_readpage(struct inode * inode, struct page * page)
{
unsigned long block;
unsigned long block, address;
int *p, nr[PAGE_SIZE/512];
int i;
......@@ -1113,10 +1113,20 @@ int generic_readpage(struct inode * inode, struct page * page)
p++;
} while (i > 0);
/* We should make this asynchronous, but this is good enough for now.. */
bread_page(page_address(page), inode->i_dev, nr, inode->i_sb->s_blocksize);
/*
* We should make this asynchronous, but this is good enough for now..
*/
/* IO start */
page->count++;
address = page_address(page);
bread_page(address, inode->i_dev, nr, inode->i_sb->s_blocksize);
/* IO ready (this part should be in the "page ready callback" function) */
page->uptodate = 1;
wake_up(&page->wait);
free_page(address);
return 0;
}
......
......@@ -27,14 +27,17 @@
static unsigned long msdos_file_mmap_nopage(
struct vm_area_struct * area,
unsigned long address,
unsigned long page,
int error_code)
{
struct inode * inode = area->vm_inode;
unsigned long page;
unsigned int clear;
int pos;
long gap; /* distance from eof to pos */
page = __get_free_page(GFP_KERNEL);
if (!page)
return page;
address &= PAGE_MASK;
pos = address - area->vm_start + area->vm_offset;
......
......@@ -757,7 +757,7 @@ asmlinkage int sys_link(const char * oldname, const char * newname)
char * to;
struct inode * oldinode;
error = namei(oldname, &oldinode);
error = lnamei(oldname, &oldinode);
if (error)
return error;
error = getname(newname,&to);
......
......@@ -31,15 +31,19 @@ static inline int min(int a, int b)
*/
static unsigned long
ncp_file_mmap_nopage(struct vm_area_struct * area,
unsigned long address, unsigned long page, int no_share)
unsigned long address, int no_share)
{
struct inode * inode = area->vm_inode;
unsigned long page;
unsigned int clear;
unsigned long tmp;
int bufsize;
int pos;
unsigned short fs;
page = __get_free_page(GFP_KERNEL);
if (!page)
return page;
address &= PAGE_MASK;
pos = address - area->vm_start + area->vm_offset;
......
......@@ -25,12 +25,14 @@
#include <asm/system.h>
/*
* Fill in the supplied page for mmap
* Return a page for mmap. We need to start using the page cache,
* because otherwise we can't share pages between processes..
*/
static unsigned long nfs_file_mmap_nopage(struct vm_area_struct * area,
unsigned long address, unsigned long page, int no_share)
unsigned long address, int no_share)
{
struct inode * inode = area->vm_inode;
unsigned long page;
unsigned int clear;
unsigned long tmp;
int n;
......@@ -38,6 +40,9 @@ static unsigned long nfs_file_mmap_nopage(struct vm_area_struct * area,
int pos;
struct nfs_fattr fattr;
page = __get_free_page(GFP_KERNEL);
if (!page)
return page;
address &= PAGE_MASK;
pos = address - area->vm_start + area->vm_offset;
......
......@@ -25,15 +25,19 @@
*/
static unsigned long
smb_file_mmap_nopage(struct vm_area_struct * area,
unsigned long address, unsigned long page, int no_share)
unsigned long address, int no_share)
{
struct inode * inode = area->vm_inode;
unsigned long page;
unsigned int clear;
unsigned long tmp;
int n;
int i;
int pos;
page = __get_free_page(GFP_KERNEL);
if (!page)
return 0;
address &= PAGE_MASK;
pos = address - area->vm_start + area->vm_offset;
......
......@@ -7,16 +7,38 @@
* Copyright (C) 1996, Linus Torvalds
*/
#include <asm/pgtable.h>
#include <linux/config.h>
#include <asm/system.h>
/*
* The maximum ASN's the processor supports. On the EV4 this doesn't
* matter as the pal-code doesn't use the ASNs anyway, on the EV5
* The maximum ASN's the processor supports. On the EV4 this is 63
* but the PAL-code doesn't actually use this information. On the
* EV5 this is 127.
*
* On the EV4, the ASNs are more-or-less useless anyway, as they are
* only used as a icache tag, not for TB entries. On the EV5 ASN's
* also validate the TB entries, and thus make a lot more sense.
*
* The EV4 ASN's don't even match the architecture manual, ugh. And
* I quote: "If a processor implements address space numbers (ASNs),
* and the old PTE has the Address Space Match (ASM) bit clear (ASNs
* in use) and the Valid bit set, then entries can also effectively be
* made coherent by assigning a new, unused ASN to the currently
* running process and not reusing the previous ASN before calling the
* appropriate PALcode routine to invalidate the translation buffer
* (TB)".
*
* In short, the EV4 has a "kind of" ASN capability, but it doesn't actually
* work correctly and can thus not be used (explaining the lack of PAL-code
* support).
*/
#ifdef CONFIG_EV5
#define MAX_ASN 127
#else
#define MAX_ASN 63
#endif
#define ASN_VERSION_SHIFT 32
#define ASN_VERSION_SHIFT 16
#define ASN_VERSION_MASK ((~0UL) << ASN_VERSION_SHIFT)
#define ASN_FIRST_VERSION (1UL << ASN_VERSION_SHIFT)
......@@ -33,6 +55,7 @@
*/
extern inline void get_mmu_context(struct task_struct *p)
{
#ifdef CONFIG_EV5
static unsigned long asn_cache = ASN_FIRST_VERSION;
struct mm_struct * mm = p->mm;
unsigned long asn = mm->context;
......@@ -44,7 +67,7 @@ extern inline void get_mmu_context(struct task_struct *p)
/* check if it's legal.. */
if ((asn & ~ASN_VERSION_MASK) > MAX_ASN) {
/* start a new version, invalidate all old asn's */
tbiap();
tbiap(); imb();
asn_cache = (asn_cache & ASN_VERSION_MASK) + ASN_FIRST_VERSION;
if (!asn_cache)
asn_cache = ASN_FIRST_VERSION;
......@@ -53,6 +76,7 @@ extern inline void get_mmu_context(struct task_struct *p)
mm->context = asn; /* full version + asn */
p->tss.asn = asn & ~ASN_VERSION_MASK; /* just asn */
}
#endif
}
#endif
......@@ -51,7 +51,7 @@ typedef unsigned long pgprot_t;
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
#define PAGE_OFFSET 0xFFFFFC0000000000
#define PAGE_OFFSET 0xFFFFFC0000000000UL
#define MAP_NR(addr) ((((unsigned long) (addr)) - PAGE_OFFSET) >> PAGE_SHIFT)
#endif /* __KERNEL__ */
......
......@@ -9,13 +9,7 @@
* in <asm/page.h> (currently 8192).
*/
extern void tbi(long type, ...);
#define tbisi(x) tbi(1,(x))
#define tbisd(x) tbi(2,(x))
#define tbis(x) tbi(3,(x))
#define tbiap() tbi(-1)
#define tbia() tbi(-2)
#include <asm/system.h>
/*
* Invalidate current user mapping.
......
......@@ -59,6 +59,8 @@ extern void alpha_switch_to(unsigned long pctxp);
alpha_switch_to((unsigned long) &(p)->tss - 0xfffffc0000000000); \
} while (0)
extern void imb(void);
#define mb() \
__asm__ __volatile__("mb": : :"memory")
......@@ -97,6 +99,17 @@ __old_ipl; })
#define save_flags(flags) do { flags = getipl(); } while (0)
#define restore_flags(flags) setipl(flags)
/*
* TB routines..
*/
extern void tbi(long type, ...);
#define tbisi(x) tbi(1,(x))
#define tbisd(x) tbi(2,(x))
#define tbis(x) tbi(3,(x))
#define tbiap() tbi(-1)
#define tbia() tbi(-2)
/*
* Give prototypes to shut up gcc.
*/
......
......@@ -98,8 +98,7 @@ struct vm_operations_struct {
void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address,
unsigned long page, int write_access);
unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
unsigned long page);
int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
......
......@@ -39,6 +39,16 @@ static inline unsigned long _page_hashfn(struct inode * inode, unsigned long off
#define page_hash(inode,offset) page_hash_table[_page_hashfn(inode,offset)]
static inline int page_age_update(struct page * page, int accessed)
{
unsigned int age = page->age;
if (accessed)
age |= PAGE_AGE_VALUE << 1;
age >>= 1;
page->age = age;
return age > (PAGE_AGE_VALUE >> 1);
}
static inline struct page * find_page(struct inode * inode, unsigned long offset)
{
struct page *page;
......
......@@ -88,7 +88,7 @@ int shrink_mmap(int priority, unsigned long limit)
if (page->count != 1)
age |= PAGE_AGE_VALUE << 1;
page->age = age >> 1;
if (age <= PAGE_AGE_VALUE/2) {
if (age < PAGE_AGE_VALUE) {
remove_page_from_hash_queue(page);
remove_page_from_inode_queue(page);
free_page(page_address(page));
......@@ -147,51 +147,47 @@ void update_vm_cache(struct inode * inode, unsigned long pos, const char * buf,
}
/*
* Find a cached page and wait for it to become up-to-date, return
* the page address.
*
* If no cached page can be found, create one using the supplied
* new page instead (and return zero to indicate that we used the
* supplied page in doing so).
* Try to read ahead in the file. "page_cache" is a potentially free page
* that we could use for the cache (if it is 0 we can try to create one,
* this is all overlapped with the IO on the previous page finishing anyway)
*/
static unsigned long fill_page(struct inode * inode, unsigned long offset, unsigned long newpage)
static unsigned long try_to_read_ahead(struct inode * inode, unsigned long offset, unsigned long page_cache)
{
struct page * page;
if (!page_cache) {
page_cache = __get_free_page(GFP_KERNEL);
if (!page_cache)
return 0;
}
#ifdef readahead_makes_sense_due_to_asynchronous_reads
offset = (offset + PAGE_SIZE) & PAGE_MASK;
page = find_page(inode, offset);
if (page) {
if (!page->uptodate)
sleep_on(&page->wait);
return page_address(page);
page->count--;
return page_cache;
}
page = mem_map + MAP_NR(newpage);
/*
* Ok, add the new page to the hash-queues...
*/
page = mem_map + MAP_NR(page_cache);
page->count++;
page->uptodate = 0;
page->error = 0;
page->offset = offset;
add_page_to_inode_queue(inode, page);
add_page_to_hash_queue(inode, page);
inode->i_op->readpage(inode, page);
page->uptodate = 1;
wake_up(&page->wait);
return 0;
}
/*
* Try to read ahead in the file. "page_cache" is a potentially free page
* that we could use for the cache (if it is 0 we can try to create one,
* this is all overlapped with the IO on the previous page finishing anyway)
*/
static unsigned long try_to_read_ahead(struct inode * inode, unsigned long offset, unsigned long page_cache)
{
if (!page_cache)
page_cache = __get_free_page(GFP_KERNEL);
offset = (offset + PAGE_SIZE) & PAGE_MASK;
/*
* read-ahead is not implemented yet, but this is
* where we should start..
/*
* And start IO on it..
* (this should be asynchronous, but currently isn't)
*/
inode->i_op->readpage(inode, page);
free_page(page_cache);
return 0;
#else
return page_cache;
#endif
}
/*
......@@ -293,43 +289,65 @@ int generic_file_read(struct inode * inode, struct file * filp, char * buf, int
return read;
}
/*
* Find a cached page and wait for it to become up-to-date, return
* the page address.
*/
static inline unsigned long fill_page(struct inode * inode, unsigned long offset)
{
struct page * page;
unsigned long new_page;
page = find_page(inode, offset);
if (page)
goto found_page;
new_page = __get_free_page(GFP_KERNEL);
page = find_page(inode, offset);
if (page) {
if (new_page)
free_page(new_page);
goto found_page;
}
if (!new_page)
return 0;
page = mem_map + MAP_NR(new_page);
new_page = 0;
page->count++;
page->uptodate = 0;
page->error = 0;
page->offset = offset;
add_page_to_inode_queue(inode, page);
add_page_to_hash_queue(inode, page);
inode->i_op->readpage(inode, page);
found_page:
if (!page->uptodate)
sleep_on(&page->wait);
return page_address(page);
}
/*
* Semantics for shared and private memory areas are different past the end
* of the file. A shared mapping past the last page of the file is an error
* and results in a SIBGUS, while a private mapping just maps in a zero page.
*/
static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address,
unsigned long page, int no_share)
static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address, int no_share)
{
unsigned long offset;
struct inode * inode = area->vm_inode;
unsigned long new_page;
unsigned long page;
offset = (address & PAGE_MASK) - area->vm_start + area->vm_offset;
if (offset >= inode->i_size && (area->vm_flags & VM_SHARED) && area->vm_mm == current->mm)
send_sig(SIGBUS, current, 1);
return 0;
new_page = fill_page(inode, offset, page);
if (new_page) {
if (no_share) {
memcpy((void *) page, (void *) new_page, PAGE_SIZE);
free_page(new_page);
return page;
}
page = fill_page(inode, offset);
if (page && no_share) {
unsigned long new_page = __get_free_page(GFP_KERNEL);
if (new_page)
memcpy((void *) new_page, (void *) page, PAGE_SIZE);
free_page(page);
return new_page;
}
if (no_share) {
new_page = __get_free_page(GFP_USER);
if (!new_page) {
oom(current);
new_page = pte_page(BAD_PAGE);
}
memcpy((void *) new_page, (void *) page, PAGE_SIZE);
free_page(page);
page = new_page;
}
return page;
}
......
......@@ -905,21 +905,19 @@ void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
get_empty_page(tsk, vma, page_table);
return;
}
page = __get_free_page(GFP_KERNEL);
if (!page) {
oom(tsk);
put_page(page_table, BAD_PAGE);
return;
}
++tsk->maj_flt;
++vma->vm_mm->rss;
/*
* The fourth argument is "no_share", which tells the low-level code
* The third argument is "no_share", which tells the low-level code
* to copy, not share the page even if sharing is possible. It's
* essentially an early COW detection
*/
page = vma->vm_ops->nopage(vma, address, page,
write_access && !(vma->vm_flags & VM_SHARED));
page = vma->vm_ops->nopage(vma, address, write_access && !(vma->vm_flags & VM_SHARED));
if (!page) {
send_sig(SIGBUS, current, 1);
put_page(page_table, BAD_PAGE);
return;
}
/*
* This silly early PAGE_DIRTY setting removes a race
* due to the bad i386 page protection. But it's valid
......
......@@ -22,6 +22,7 @@
#include <linux/swap.h>
#include <linux/fs.h>
#include <linux/swapctl.h>
#include <linux/pagemap.h>
#include <asm/dma.h>
#include <asm/system.h> /* for cli()/sti() */
......@@ -420,17 +421,19 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
pte_t pte;
unsigned long entry;
unsigned long page;
struct page * page_map;
pte = *page_table;
if (!pte_present(pte))
return 0;
page = pte_page(pte);
if (page >= high_memory)
if (MAP_NR(page) >= MAP_NR(high_memory))
return 0;
if (page >= limit)
return 0;
if (mem_map[MAP_NR(page)].reserved)
page_map = mem_map + MAP_NR(page);
if (page_map->reserved)
return 0;
/* Deal with page aging. Pages age from being unused; they
* rejuvinate on being accessed. Only swap old pages (age==0
......@@ -438,11 +441,10 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
if ((pte_dirty(pte) && delete_from_swap_cache(page))
|| pte_young(pte)) {
set_pte(page_table, pte_mkold(pte));
touch_page(page);
page_age_update(page_map, 1);
return 0;
}
age_page(page);
if (age_of(page))
if (page_age_update(page_map, pte_young(pte)))
return 0;
if (pte_dirty(pte)) {
if (vma->vm_ops && vma->vm_ops->swapout) {
......@@ -451,7 +453,7 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
if (vma->vm_ops->swapout(vma, address - vma->vm_start + vma->vm_offset, page_table))
kill_proc(pid, SIGBUS, 1);
} else {
if (mem_map[MAP_NR(page)].count != 1)
if (page_map->count != 1)
return 0;
if (!(entry = get_swap_page()))
return 0;
......@@ -465,7 +467,7 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
return 1; /* we slept: the process may not exist any more */
}
if ((entry = find_in_swap_cache(page))) {
if (mem_map[MAP_NR(page)].count != 1) {
if (page_map->count != 1) {
set_pte(page_table, pte_mkdirty(pte));
printk("Aiee.. duplicated cached swap-cache entry\n");
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment