Commit fb796d31 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://linux-sound.bkbits.net/linux-sound

into home.transmeta.com:/home/torvalds/v2.5/linux
parents d9c28b28 3fddff46
......@@ -6,6 +6,7 @@
#include <asm/processor.h>
#include <asm/thread_info.h>
#include <asm/msr.h>
#include <asm/uaccess.h>
#include "cpu.h"
......@@ -13,6 +14,11 @@ static int disable_x86_serial_nr __initdata = 1;
static int disable_P4_HT __initdata = 0;
extern int trap_init_f00f_bug(void);
#ifdef INTEL_MOVSL
struct movsl_mask movsl_mask; /* alignment at which movsl is preferred for
bulk memory copies */
#endif
/*
* Early probe support logic for ppro memory erratum #50
*
......@@ -348,6 +354,25 @@ static void __init init_intel(struct cpuinfo_x86 *c)
/* Work around errata */
Intel_errata_workarounds(c);
#ifdef INTEL_MOVSL
/*
* Set up the preferred alignment for movsl bulk memory moves
*/
switch (c->x86) {
case 4: /* 486: untested */
break;
case 5: /* Old Pentia: untested */
break;
case 6: /* PII/PIII only like movsl with 8-byte alignment */
movsl_mask.mask = 7;
break;
case 15: /* P4 is OK down to 8-byte alignment */
movsl_mask.mask = 7;
break;
}
#endif
}
......
......@@ -21,8 +21,13 @@ struct dmi_header
u16 handle;
};
#undef DMI_DEBUG
#ifdef DMI_DEBUG
#define dmi_printk(x) printk x
#else
#define dmi_printk(x)
//#define dmi_printk(x) printk x
#endif
static char * __init dmi_string(struct dmi_header *dm, u8 s)
{
......@@ -832,7 +837,9 @@ static __init void dmi_check_blacklist(void)
static void __init dmi_decode(struct dmi_header *dm)
{
#ifdef DMI_DEBUG
u8 *data = (u8 *)dm;
#endif
switch(dm->type)
{
......
......@@ -116,8 +116,10 @@ EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__generic_copy_from_user);
EXPORT_SYMBOL(__generic_copy_to_user);
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(strnlen_user);
EXPORT_SYMBOL(pci_alloc_consistent);
......
......@@ -328,20 +328,21 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
irq_desc_t *desc = irq_desc + irq;
struct irqaction * action;
unsigned int status;
long esp;
irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
__asm__ __volatile__("andl %%esp,%0" : "=r" (esp) : "0" (8191));
if (unlikely(esp < (sizeof(struct task_struct) + 1024))) {
extern void show_stack(unsigned long *);
printk("do_IRQ: stack overflow: %ld\n",
esp - sizeof(struct task_struct));
__asm__ __volatile__("movl %%esp,%0" : "=r" (esp));
show_stack((void *)esp);
{
long esp;
__asm__ __volatile__("andl %%esp,%0" :
"=r" (esp) : "0" (8191));
if (unlikely(esp < (sizeof(struct task_struct) + 1024))) {
printk("do_IRQ: stack overflow: %ld\n",
esp - sizeof(struct task_struct));
dump_stack();
}
}
#endif
kstat.irqs[cpu][irq]++;
......
This diff is collapsed.
......@@ -19,10 +19,12 @@ void kunmap(struct page *page)
}
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps.
*
* However when holding an atomic kmap is is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
void *kmap_atomic(struct page *page, enum km_type type)
{
......
......@@ -118,7 +118,6 @@ deadline_find_hash(struct deadline_data *dd, sector_t offset)
while ((entry = next) != hash_list) {
next = entry->next;
prefetch(next);
drq = list_entry_hash(entry);
......@@ -193,8 +192,6 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
while ((entry = entry->prev) != sort_list) {
__rq = list_entry_rq(entry);
prefetch(entry->prev);
BUG_ON(__rq->flags & REQ_STARTED);
if (!(__rq->flags & REQ_CMD))
......@@ -302,8 +299,6 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
struct list_head *nxt = rq->queuelist.next;
int this_rq_cost;
prefetch(nxt);
/*
* take it off the sort and fifo list, move
* to dispatch queue
......
......@@ -37,6 +37,13 @@
#include <asm/uaccess.h>
/* Command group 3 is reserved and should never be used. */
const unsigned char scsi_command_size[8] =
{
6, 10, 10, 12,
16, 12, 10, 10
};
#define BLK_DEFAULT_TIMEOUT (60 * HZ)
int blk_do_rq(request_queue_t *q, struct block_device *bdev, struct request *rq)
......@@ -468,3 +475,4 @@ int scsi_cmd_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long ar
}
EXPORT_SYMBOL(scsi_cmd_ioctl);
EXPORT_SYMBOL(scsi_command_size);
......@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,41)
#include <linux/tqueue.h>
#else
......
......@@ -1803,11 +1803,11 @@ static void vortex_tx_timeout(struct net_device *dev)
dev->name, inb(ioaddr + TxStatus),
inw(ioaddr + EL3_STATUS));
EL3WINDOW(4);
printk(KERN_ERR " diagnostics: net %04x media %04x dma %08lx fifo %04x\n",
inw(ioaddr + Wn4_NetDiag),
inw(ioaddr + Wn4_Media),
inl(ioaddr + PktStatus),
inw(ioaddr + Wn4_FIFODiag));
printk(KERN_ERR " diagnostics: net %04x media %04x dma %08x fifo %04x\n",
(unsigned)inw(ioaddr + Wn4_NetDiag),
(unsigned)inw(ioaddr + Wn4_Media),
(unsigned)inl(ioaddr + PktStatus),
(unsigned)inw(ioaddr + Wn4_FIFODiag));
/* Slight code bloat to be user friendly. */
if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
......@@ -2643,8 +2643,8 @@ dump_tx_ring(struct net_device *dev)
vp->full_bus_master_tx,
vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
printk(KERN_ERR " Transmit list %8.8lx vs. %p.\n",
inl(ioaddr + DownListPtr),
printk(KERN_ERR " Transmit list %8.8x vs. %p.\n",
(unsigned)inl(ioaddr + DownListPtr),
&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
issue_and_wait(dev, DownStall);
for (i = 0; i < TX_RING_SIZE; i++) {
......
......@@ -123,12 +123,6 @@ struct scsi_host_sg_pool scsi_sg_pools[SG_MEMPOOL_NR] = {
*/
unsigned long scsi_pid;
Scsi_Cmnd *last_cmnd;
/* Command group 3 is reserved and should never be used. */
const unsigned char scsi_command_size[8] =
{
6, 10, 10, 12,
16, 12, 10, 10
};
static unsigned long serial_number;
struct softscsi_data {
......
......@@ -751,15 +751,9 @@ static int scsi_init_io(Scsi_Cmnd *SCpnt)
int count, gfp_mask;
/*
* non-sg block request. FIXME: check bouncing for isa hosts!
* if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
*/
if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
/*
* FIXME: isa bouncing
*/
if (SCpnt->host->unchecked_isa_dma)
goto fail;
SCpnt->request_bufflen = req->data_len;
SCpnt->request_buffer = req->data;
req->buffer = req->data;
......@@ -816,7 +810,6 @@ static int scsi_init_io(Scsi_Cmnd *SCpnt)
/*
* kill it. there should be no leftover blocks in this request
*/
fail:
SCpnt = scsi_end_request(SCpnt, 0, req->nr_sectors);
BUG_ON(SCpnt);
out:
......
......@@ -39,7 +39,6 @@ EXPORT_SYMBOL(scsi_partsize);
EXPORT_SYMBOL(scsi_bios_ptable);
EXPORT_SYMBOL(scsi_allocate_device);
EXPORT_SYMBOL(scsi_do_cmd);
EXPORT_SYMBOL(scsi_command_size);
EXPORT_SYMBOL(scsi_ioctl);
EXPORT_SYMBOL(print_command);
EXPORT_SYMBOL(print_sense);
......
......@@ -608,7 +608,7 @@ void kick_iocb(struct kiocb *iocb)
}
if (!kiocbTryKick(iocb)) {
long flags;
unsigned long flags;
spin_lock_irqsave(&ctx->ctx_lock, flags);
list_add_tail(&iocb->ki_run_list, &ctx->run_list);
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
......
......@@ -120,7 +120,7 @@ blkdev_direct_IO(int rw, struct file *file, const struct iovec *iov,
{
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
return generic_direct_IO(rw, inode, iov, offset,
return generic_direct_IO(rw, inode, inode->i_bdev, iov, offset,
nr_segs, blkdev_get_blocks);
}
......@@ -308,6 +308,7 @@ struct block_device *bdget(dev_t dev)
new_bdev->bd_dev = dev;
new_bdev->bd_contains = NULL;
new_bdev->bd_inode = inode;
new_bdev->bd_block_size = (1 << inode->i_blkbits);
new_bdev->bd_part_count = 0;
new_bdev->bd_invalidated = 0;
inode->i_mode = S_IFBLK;
......
This diff is collapsed.
......@@ -624,7 +624,7 @@ ext2_direct_IO(int rw, struct file *file, const struct iovec *iov,
{
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
return generic_direct_IO(rw, inode, iov,
return generic_direct_IO(rw, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, ext2_get_blocks);
}
......
......@@ -1431,7 +1431,7 @@ static int ext3_direct_IO(int rw, struct file *file,
}
}
ret = generic_direct_IO(rw, inode, iov, offset,
ret = generic_direct_IO(rw, inode, inode->i_sb->s_bdev, iov, offset,
nr_segs, ext3_direct_io_get_blocks);
out_stop:
......
......@@ -986,6 +986,10 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
goto out_fail;
blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE);
if (!blocksize) {
printk(KERN_ERR "EXT3-fs: unable to set blocksize\n");
goto out_fail;
}
/*
* The ext3 superblock will not be buffer aligned for other than 1kB
......
......@@ -226,7 +226,7 @@ __writeback_single_inode(struct inode *inode, int sync,
* The inodes to be written are parked on sb->s_io. They are moved back onto
* sb->s_dirty as they are selected for writing. This way, none can be missed
* on the writer throttling path, and we get decent balancing between many
* throlttled threads: we don't want them all piling up on __wait_on_inode.
* throttled threads: we don't want them all piling up on __wait_on_inode.
*/
static void
sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
......
......@@ -315,7 +315,7 @@ static int jfs_direct_IO(int rw, struct file *file, const struct iovec *iov,
{
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
return generic_direct_IO(rw, inode, iov,
return generic_direct_IO(rw, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, jfs_get_blocks);
}
......
......@@ -260,7 +260,8 @@ int simple_rmdir(struct inode *dir, struct dentry *dentry)
return 0;
}
int simple_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode);
......@@ -277,3 +278,48 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode
}
return 0;
}
int simple_readpage(struct file *file, struct page *page)
{
void *kaddr;
if (PageUptodate(page))
goto out;
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr, 0, PAGE_CACHE_SIZE);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
SetPageUptodate(page);
out:
unlock_page(page);
return 0;
}
int simple_prepare_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
if (!PageUptodate(page)) {
if (to - from != PAGE_CACHE_SIZE) {
void *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr, 0, from);
memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
}
SetPageUptodate(page);
}
return 0;
}
int simple_commit_write(struct file *file, struct page *page,
unsigned offset, unsigned to)
{
struct inode *inode = page->mapping->host;
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
if (pos > inode->i_size)
inode->i_size = pos;
set_page_dirty(page);
return 0;
}
......@@ -643,7 +643,7 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
goto cleanup_file;
}
f->f_ra.ra_pages = inode->i_mapping->backing_dev_info->ra_pages;
file_ra_state_init(&f->f_ra, inode->i_mapping);
f->f_dentry = dentry;
f->f_vfsmnt = mnt;
f->f_pos = 0;
......
......@@ -39,6 +39,7 @@
#include <linux/seq_file.h>
#include <linux/times.h>
#include <linux/profile.h>
#include <linux/blkdev.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
......@@ -404,10 +405,14 @@ static int kstat_read_proc(char *page, char **start, off_t off,
len += sprintf(page + len,
"\nctxt %lu\n"
"btime %lu\n"
"processes %lu\n",
"processes %lu\n"
"procs_running %lu\n"
"procs_blocked %u\n",
nr_context_switches(),
xtime.tv_sec - jif / HZ,
total_forks);
total_forks,
nr_running(),
atomic_read(&nr_iowait_tasks));
return proc_calc_metrics(page, start, off, count, eof, len);
}
......
......@@ -47,48 +47,6 @@ static struct backing_dev_info ramfs_backing_dev_info = {
.memory_backed = 1, /* Does not contribute to dirty memory */
};
/*
* Read a page. Again trivial. If it didn't already exist
* in the page cache, it is zero-filled.
*/
static int ramfs_readpage(struct file *file, struct page * page)
{
if (!PageUptodate(page)) {
char *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr, 0, PAGE_CACHE_SIZE);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
SetPageUptodate(page);
}
unlock_page(page);
return 0;
}
static int ramfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{
if (!PageUptodate(page)) {
char *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page);
}
set_page_dirty(page);
return 0;
}
static int ramfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{
struct inode *inode = page->mapping->host;
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
if (pos > inode->i_size)
inode->i_size = pos;
return 0;
}
struct inode *ramfs_get_inode(struct super_block *sb, int mode, int dev)
{
struct inode * inode = new_inode(sb);
......@@ -175,10 +133,10 @@ static int ramfs_symlink(struct inode * dir, struct dentry *dentry, const char *
}
static struct address_space_operations ramfs_aops = {
.readpage = ramfs_readpage,
.readpage = simple_readpage,
.writepage = fail_writepage,
.prepare_write = ramfs_prepare_write,
.commit_write = ramfs_commit_write
.prepare_write = simple_prepare_write,
.commit_write = simple_commit_write
};
static struct file_operations ramfs_file_operations = {
......
......@@ -607,8 +607,8 @@ linvfs_direct_IO(
{
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
return generic_direct_IO(rw, inode, iov, offset, nr_segs,
linvfs_get_blocks_direct);
return generic_direct_IO(rw, inode, NULL,
iov, offset, nr_segs, linvfs_get_blocks_direct);
}
......
This diff is collapsed.
......@@ -258,7 +258,7 @@ static inline void clear_in_cr4 (unsigned long mask)
#define TASK_UNMAPPED_32 0x40000000
#define TASK_UNMAPPED_64 (TASK_SIZE/3)
#define TASK_UNMAPPED_BASE \
(test_thread_flags(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
(test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
/*
* Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
......
......@@ -22,30 +22,35 @@ struct request_list {
wait_queue_head_t wait;
};
/*
* try to put the fields that are referenced together in the same cacheline
*/
struct request {
struct list_head queuelist; /* looking for ->queue? you must _not_
* access it directly, use
* blkdev_dequeue_request! */
int ref_count;
void *elevator_private;
unsigned long flags; /* see REQ_ bits below */
unsigned char cmd[16];
kdev_t rq_dev;
sector_t sector;
unsigned long nr_sectors;
unsigned int current_nr_sectors;
unsigned long flags; /* see REQ_ bits below */
void *elevator_private;
int rq_status; /* should split this into a few status bits */
kdev_t rq_dev;
struct gendisk *rq_disk;
int errors;
sector_t sector;
unsigned long start_time;
unsigned long nr_sectors;
sector_t hard_sector; /* the hard_* are block layer
* internals, no driver should
* touch them
*/
unsigned long hard_nr_sectors;
unsigned int hard_cur_sectors;
struct bio *bio;
struct bio *biotail;
/* Number of scatter-gather DMA addr+len pairs after
* physical address coalescing is performed.
......@@ -59,13 +64,21 @@ struct request {
*/
unsigned short nr_hw_segments;
unsigned int current_nr_sectors;
unsigned int hard_cur_sectors;
int tag;
void *special;
char *buffer;
/* For packet commands */
int ref_count;
request_queue_t *q;
struct request_list *rl;
struct completion *waiting;
void *special;
/*
* when request is used as a packet command carrier
*/
unsigned char cmd[16];
unsigned int data_len;
void *data;
......@@ -73,10 +86,6 @@ struct request {
void *sense;
unsigned int timeout;
struct completion *waiting;
struct bio *bio, *biotail;
request_queue_t *q;
struct request_list *rl;
};
/*
......
......@@ -1250,10 +1250,12 @@ ssize_t generic_file_write_nolock(struct file *file, const struct iovec *iov,
extern ssize_t generic_file_sendfile(struct file *, struct file *, loff_t *, size_t);
extern void do_generic_mapping_read(struct address_space *, struct file_ra_state *, struct file *,
loff_t *, read_descriptor_t *, read_actor_t);
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern ssize_t generic_file_direct_IO(int rw, struct file *file,
const struct iovec *iov, loff_t offset, unsigned long nr_segs);
extern int generic_direct_IO(int rw, struct inode *inode, const struct iovec
*iov, loff_t offset, unsigned long nr_segs, get_blocks_t *get_blocks);
extern int generic_direct_IO(int rw, struct inode *inode, struct block_device *bdev,
const struct iovec *iov, loff_t offset, unsigned long nr_segs, get_blocks_t *get_blocks);
extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos);
ssize_t generic_file_writev(struct file *filp, const struct iovec *iov,
......@@ -1311,6 +1313,12 @@ extern int simple_rmdir(struct inode *, struct dentry *);
extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
extern int simple_sync_file(struct file *, struct dentry *, int);
extern int simple_empty(struct dentry *);
extern int simple_readpage(struct file *file, struct page *page);
extern int simple_prepare_write(struct file *file, struct page *page,
unsigned offset, unsigned to);
extern int simple_commit_write(struct file *file, struct page *page,
unsigned offset, unsigned to);
extern struct dentry *simple_lookup(struct inode *, struct dentry *);
extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *);
extern struct file_operations simple_dir_operations;
......
......@@ -863,6 +863,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
/*
* Check for pending SIGKILL! The new thread should not be allowed
* to slip out of an OOM kill. (or normal SIGKILL.)
*/
if (sigismember(&current->pending.signal, SIGKILL)) {
write_unlock_irq(&tasklist_lock);
goto bad_fork_cleanup_namespace;
}
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & CLONE_PARENT)
......
......@@ -228,6 +228,7 @@ EXPORT_SYMBOL(generic_block_bmap);
EXPORT_SYMBOL(generic_file_read);
EXPORT_SYMBOL(generic_file_sendfile);
EXPORT_SYMBOL(do_generic_mapping_read);
EXPORT_SYMBOL(file_ra_state_init);
EXPORT_SYMBOL(generic_file_write);
EXPORT_SYMBOL(generic_file_write_nolock);
EXPORT_SYMBOL(generic_file_mmap);
......@@ -306,6 +307,9 @@ EXPORT_SYMBOL(simple_unlink);
EXPORT_SYMBOL(simple_rmdir);
EXPORT_SYMBOL(simple_rename);
EXPORT_SYMBOL(simple_sync_file);
EXPORT_SYMBOL(simple_readpage);
EXPORT_SYMBOL(simple_prepare_write);
EXPORT_SYMBOL(simple_commit_write);
EXPORT_SYMBOL(simple_empty);
EXPORT_SYMBOL(fd_install);
EXPORT_SYMBOL(put_unused_fd);
......
......@@ -282,17 +282,9 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
break;
nr_found = __lookup(root, results + ret, cur_index,
max_items - ret, &next_index, max_index);
if (nr_found == 0) {
if (!(cur_index & RADIX_TREE_MAP_MASK))
break;
/*
* It could be that there simply were no items to the
* right of `cur_index' in the leaf node. So we still
* need to search for additional nodes to the right of
* this one.
*/
}
ret += nr_found;
if (next_index == max_index)
break;
cur_index = next_index;
}
out:
......
......@@ -61,6 +61,10 @@
* ->mapping->page_lock
* ->inode_lock
* ->sb_lock (fs/fs-writeback.c)
* ->page_table_lock
* ->swap_device_lock (try_to_unmap_one)
* ->private_lock (try_to_unmap_one)
* ->page_lock (try_to_unmap_one)
*/
/*
......
......@@ -399,6 +399,10 @@ static int vma_merge(struct mm_struct * mm, struct vm_area_struct * prev,
return 0;
}
/*
* The caller must hold down_write(current->mm->mmap_sem).
*/
unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flags, unsigned long pgoff)
......
......@@ -175,9 +175,13 @@ static void oom_kill(void)
if (p == NULL)
panic("Out of memory and no killable processes...\n");
/* kill all processes that share the ->mm (i.e. all threads) */
oom_kill_task(p);
/*
* kill all processes that share the ->mm (i.e. all threads),
* but are in a different thread group
*/
do_each_thread(g, q)
if (q->mm == p->mm)
if (q->mm == p->mm && q->tgid != p->tgid)
oom_kill_task(q);
while_each_thread(g, q);
......
......@@ -59,6 +59,14 @@ static inline int bad_range(struct zone *zone, struct page *page)
return 0;
}
static void bad_page(const char *function, struct page *page)
{
printk("Bad page state at %s\n", function);
printk("flags:0x%08lx mapping:%p mapped:%d count:%d\n",
page->flags, page->mapping,
page_mapped(page), page_count(page));
}
/*
* Freeing function for a buddy system allocator.
*
......@@ -91,16 +99,19 @@ void __free_pages_ok (struct page *page, unsigned int order)
mod_page_state(pgfree, 1<<order);
BUG_ON(PageLRU(page));
BUG_ON(PagePrivate(page));
BUG_ON(page->mapping != NULL);
BUG_ON(PageLocked(page));
BUG_ON(PageActive(page));
BUG_ON(PageWriteback(page));
BUG_ON(page->pte.direct != 0);
if ( page_mapped(page) ||
page->mapping != NULL ||
page_count(page) != 0 ||
(page->flags & (
1 << PG_lru |
1 << PG_private |
1 << PG_locked |
1 << PG_active |
1 << PG_writeback )))
bad_page(__FUNCTION__, page);
if (PageDirty(page))
ClearPageDirty(page);
BUG_ON(page_count(page) != 0);
if (unlikely(current->flags & PF_FREE_PAGES)) {
if (!current->nr_local_pages && !in_interrupt()) {
......@@ -181,14 +192,17 @@ expand(struct zone *zone, struct page *page,
*/
static inline void prep_new_page(struct page *page)
{
BUG_ON(page->mapping);
BUG_ON(PagePrivate(page));
BUG_ON(PageLocked(page));
BUG_ON(PageLRU(page));
BUG_ON(PageActive(page));
BUG_ON(PageDirty(page));
BUG_ON(PageWriteback(page));
BUG_ON(page->pte.direct != 0);
if ( page->mapping ||
page_mapped(page) ||
(page->flags & (
1 << PG_private |
1 << PG_locked |
1 << PG_lru |
1 << PG_active |
1 << PG_dirty |
1 << PG_writeback )))
bad_page(__FUNCTION__, page);
page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
1 << PG_referenced | 1 << PG_arch_1 |
1 << PG_checked);
......
......@@ -19,6 +19,16 @@ struct backing_dev_info default_backing_dev_info = {
.state = 0,
};
/*
* Initialise a struct file's readahead state
*/
void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
{
memset(ra, 0, sizeof(*ra));
ra->ra_pages = mapping->backing_dev_info->ra_pages;
}
/*
* Return max readahead size for this inode in number-of-pages.
*/
......
......@@ -53,7 +53,34 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
clear_page_dirty(page);
ClearPageUptodate(page);
remove_from_page_cache(page);
page_cache_release(page);
page_cache_release(page); /* pagecache ref */
}
/*
* This is for invalidate_inode_pages(). That function can be called at
* any time, and is not supposed to throw away dirty pages. But pages can
* be marked dirty at any time too. So we re-check the dirtiness inside
* ->page_lock. That provides exclusion against the __set_page_dirty
* functions.
*/
static void
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
if (page->mapping != mapping)
return;
if (PagePrivate(page) && !try_to_release_page(page, 0))
return;
write_lock(&mapping->page_lock);
if (PageDirty(page)) {
write_unlock(&mapping->page_lock);
} else {
__remove_from_page_cache(page);
write_unlock(&mapping->page_lock);
ClearPageUptodate(page);
page_cache_release(page); /* pagecache ref */
}
}
/**
......@@ -172,11 +199,9 @@ void invalidate_inode_pages(struct address_space *mapping)
next++;
if (PageDirty(page) || PageWriteback(page))
goto unlock;
if (PagePrivate(page) && !try_to_release_page(page, 0))
goto unlock;
if (page_mapped(page))
goto unlock;
truncate_complete_page(mapping, page);
invalidate_complete_page(mapping, page);
unlock:
unlock_page(page);
}
......@@ -213,7 +238,7 @@ void invalidate_inode_pages2(struct address_space *mapping)
if (page_mapped(page))
clear_page_dirty(page);
else
truncate_complete_page(mapping, page);
invalidate_complete_page(mapping, page);
}
unlock_page(page);
}
......
......@@ -31,6 +31,7 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/topology.h>
#include <asm/div64.h>
#include <linux/swapops.h>
......@@ -85,7 +86,7 @@ struct shrinker {
shrinker_t shrinker;
struct list_head list;
int seeks; /* seeks to recreate an obj */
int nr; /* objs pending delete */
long nr; /* objs pending delete */
};
static LIST_HEAD(shrinker_list);
......@@ -121,7 +122,7 @@ void remove_shrinker(struct shrinker *shrinker)
kfree(shrinker);
}
#define SHRINK_BATCH 32
#define SHRINK_BATCH 128
/*
* Call the shrink functions to age shrinkable caches
*
......@@ -134,29 +135,27 @@ void remove_shrinker(struct shrinker *shrinker)
* slab to avoid swapping.
*
* FIXME: do not do for zone highmem
*
* We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
*/
static int shrink_slab(int scanned, unsigned int gfp_mask)
static int shrink_slab(long scanned, unsigned int gfp_mask)
{
struct list_head *lh;
int pages;
struct shrinker *shrinker;
long pages;
if (down_trylock(&shrinker_sem))
return 0;
pages = nr_used_zone_pages();
list_for_each(lh, &shrinker_list) {
struct shrinker *shrinker;
int entries;
unsigned long delta;
shrinker = list_entry(lh, struct shrinker, list);
entries = (*shrinker->shrinker)(0, gfp_mask);
if (!entries)
continue;
delta = scanned * shrinker->seeks * entries;
shrinker->nr += delta / (pages + 1);
list_for_each_entry(shrinker, &shrinker_list, list) {
long long delta;
delta = scanned * shrinker->seeks;
delta *= (*shrinker->shrinker)(0, gfp_mask);
do_div(delta, pages + 1);
shrinker->nr += delta;
if (shrinker->nr > SHRINK_BATCH) {
int nr = shrinker->nr;
long nr = shrinker->nr;
shrinker->nr = 0;
(*shrinker->shrinker)(nr, gfp_mask);
......@@ -824,7 +823,7 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps)
int i;
for (priority = DEF_PRIORITY; priority; priority--) {
int success = 1;
int all_zones_ok = 1;
for (i = 0; i < pgdat->nr_zones; i++) {
struct zone *zone = pgdat->node_zones + i;
......@@ -832,20 +831,24 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps)
int max_scan;
int to_reclaim;
to_reclaim = zone->pages_high - zone->free_pages;
if (nr_pages && to_free > 0)
if (nr_pages && to_free > 0) { /* Software suspend */
to_reclaim = min(to_free, SWAP_CLUSTER_MAX*8);
if (to_reclaim <= 0)
continue;
success = 0;
} else { /* Zone balancing */
to_reclaim = zone->pages_high-zone->free_pages;
if (to_reclaim <= 0)
continue;
}
all_zones_ok = 0;
max_scan = zone->nr_inactive >> priority;
if (max_scan < to_reclaim * 2)
max_scan = to_reclaim * 2;
if (max_scan < SWAP_CLUSTER_MAX)
max_scan = SWAP_CLUSTER_MAX;
to_free -= shrink_zone(zone, max_scan, GFP_KSWAPD,
to_reclaim, &nr_mapped, ps, priority);
shrink_slab(max_scan + nr_mapped, GFP_KSWAPD);
}
if (success)
if (all_zones_ok)
break;
blk_congestion_wait(WRITE, HZ/4);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment