Commit a9eb9eb7 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/davem/BK/net-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents e9fae7f4 e4cf8264
No related merge requests found
......@@ -1095,25 +1095,6 @@ config PCI_MMCONFIG
select ACPI_BOOT
default y
config PCI_USE_VECTOR
bool "Vector-based interrupt indexing (MSI)"
depends on X86_LOCAL_APIC && X86_IO_APIC
default n
help
This replaces the current existing IRQ-based index interrupt scheme
with the vector-base index scheme. The advantages of vector base
over IRQ base are listed below:
1) Support MSI implementation.
2) Support future IOxAPIC hotplug
Note that this allows the device drivers to enable MSI, Message
Signaled Interrupt, on all MSI capable device functions detected.
Message Signal Interrupt enables an MSI-capable hardware device to
send an inbound Memory Write on its PCI bus instead of asserting
IRQ signal on device IRQ pin.
If you don't know what to do here, say N.
source "drivers/pci/Kconfig"
config ISA
......
......@@ -206,10 +206,8 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
struct page *page;
page = pte_page(*(pte_t *)pmd);
if (page) {
if (page)
page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
get_page(page);
}
return page;
}
#endif
......
......@@ -361,16 +361,6 @@ config PCI
information about which PCI hardware does work under Linux and which
doesn't.
config PCI_USE_VECTOR
bool
default y if IA64
help
This enables MSI, Message Signaled Interrupt, on specific
MSI capable device functions detected upon requests from the
device drivers. Message Signal Interrupt enables an MSI-capable
hardware device to send an inbound Memory Write on its PCI bus
instead of asserting IRQ signal on device IRQ pin.
config PCI_DOMAINS
bool
default PCI
......
......@@ -114,8 +114,8 @@ copy_siginfo_from_user32 (siginfo_t *to, siginfo_t32 *from)
err |= __get_user(to->si_band, &from->si_band);
err |= __get_user(to->si_fd, &from->si_fd);
break;
case __SI_RT: /* This is not generated by the kernel as of now. */
case __SI_MESGQ:
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __get_user(to->si_pid, &from->si_pid);
err |= __get_user(to->si_uid, &from->si_uid);
err |= __get_user(to->si_int, &from->si_int);
......
......@@ -170,7 +170,6 @@ struct page *follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma,
ptep = huge_pte_offset(mm, addr);
page = pte_page(*ptep);
page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
get_page(page);
return page;
}
int pmd_huge(pmd_t pmd)
......
......@@ -358,8 +358,8 @@ static int copy_siginfo_to_user32(siginfo_t32 *to, siginfo_t *from)
err |= __put_user(from->si_band, &to->si_band);
err |= __put_user(from->si_fd, &to->si_fd);
break;
case __SI_RT: /* This is not generated by the kernel as of now. */
case __SI_MESGQ:
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_int, &to->si_int);
......
......@@ -360,10 +360,8 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
BUG_ON(! pmd_hugepage(*pmd));
page = hugepte_page(*(hugepte_t *)pmd);
if (page) {
if (page)
page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
get_page(page);
}
return page;
}
......@@ -609,15 +607,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
}
}
static inline unsigned long computeHugeHptePP(unsigned int hugepte)
{
unsigned long flags = 0x2;
if (! (hugepte & _HUGEPAGE_RW))
flags |= 0x1;
return flags;
}
int hash_huge_page(struct mm_struct *mm, unsigned long access,
unsigned long ea, unsigned long vsid, int local)
{
......@@ -671,7 +660,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
old_pte = *ptep;
new_pte = old_pte;
hpteflags = computeHugeHptePP(hugepte_val(new_pte));
hpteflags = 0x2 | (! (hugepte_val(new_pte) & _HUGEPAGE_RW));
/* Check if pte already has an hpte (case 2) */
if (unlikely(hugepte_val(old_pte) & _HUGEPAGE_HASHPTE)) {
......@@ -747,7 +736,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
hugepte_t pte, int local)
{
unsigned long vsid, vpn, va, hash, secondary, slot;
unsigned long vsid, vpn, va, hash, slot;
BUG_ON(hugepte_bad(pte));
BUG_ON(!in_hugepage_area(context, ea));
......@@ -757,8 +746,7 @@ static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
va = (vsid << 28) | (ea & 0x0fffffff);
vpn = va >> LARGE_PAGE_SHIFT;
hash = hpt_hash(vpn, 1);
secondary = !!(hugepte_val(pte) & _HUGEPAGE_SECONDARY);
if (secondary)
if (hugepte_val(pte) & _HUGEPAGE_SECONDARY)
hash = ~hash;
slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
slot += (hugepte_val(pte) & _HUGEPAGE_GROUP_IX) >> 5;
......
......@@ -74,8 +74,8 @@ int copy_siginfo_to_user32(siginfo_t32 *to, siginfo_t *from)
err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
else {
switch (from->si_code >> 16) {
case __SI_RT: /* This is not generated by the kernel as of now. */
case __SI_MESGQ:
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_int, &to->si_int);
/* fallthrough */
case __SI_KILL >> 16:
......
......@@ -129,8 +129,8 @@ int copy_siginfo_to_user32(siginfo_t32 __user *to, siginfo_t *from)
err |= __put_user(from->si_trapno, &to->si_trapno);
err |= __put_user((long)from->si_addr, &to->si_addr);
break;
case __SI_RT: /* This is not generated by the kernel as of now. */
case __SI_MESGQ:
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_int, &to->si_int);
......
......@@ -338,26 +338,6 @@ config PCI_MMCONFIG
depends on PCI
select ACPI_BOOT
# the drivers/pci/msi.c code needs to be fixed first before enabling
config PCI_USE_VECTOR
bool "Vector-based interrupt indexing"
depends on X86_LOCAL_APIC && NOTWORKING
default n
help
This replaces the current existing IRQ-based index interrupt scheme
with the vector-base index scheme. The advantages of vector base
over IRQ base are listed below:
1) Support MSI implementation.
2) Support future IOxAPIC hotplug
Note that this enables MSI, Message Signaled Interrupt, on all
MSI capable device functions detected if users also install the
MSI patch. Message Signal Interrupt enables an MSI-capable
hardware device to send an inbound Memory Write on its PCI bus
instead of asserting IRQ signal on device IRQ pin.
If you don't know what to do here, say N.
source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
......
......@@ -85,8 +85,8 @@ int ia32_copy_siginfo_to_user(siginfo_t32 __user *to, siginfo_t *from)
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user((u32)(u64)from->si_ptr, &to->si_ptr);
break;
case __SI_RT: /* This is not generated by the kernel as of now. */
case __SI_MESGQ:
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_int, &to->si_int);
break;
......
......@@ -168,8 +168,11 @@ static int print_unex=1;
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/version.h>
#define FDPATCHES
#include <linux/fdreg.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/cdrom.h> /* for the compatibility eject ioctl */
#include <linux/completion.h>
/*
* 1998/1/21 -- Richard Gooch <rgooch@atnf.csiro.au> -- devfs support
......@@ -179,7 +182,6 @@ static int print_unex=1;
#include <linux/fd.h>
#define FLOPPY98_MOTOR_MASK 0x08
#define FDPATCHES
#include <linux/hdreg.h>
#define FD98_STATUS (0 + FD_IOPORT )
#define FD98_DATA (2 + FD_IOPORT )
......@@ -250,9 +252,10 @@ static int use_virtual_dma;
*/
static spinlock_t floppy_lock = SPIN_LOCK_UNLOCKED;
static struct completion device_release;
static unsigned short virtual_dma_port=0x3f0;
void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs);
irqreturn_t floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs);
static int set_mode(char mask, char data);
static void register_devfs_entries (int drive) __init;
......@@ -987,9 +990,9 @@ static void empty(void)
static DECLARE_WORK(floppy_work, NULL, NULL);
static void schedule_bh( void (*handler)(void*) )
static void schedule_bh(void (*handler) (void))
{
PREPARE_WORK(&floppy_work, handler, NULL);
PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL);
schedule_work(&floppy_work);
}
......@@ -1627,7 +1630,7 @@ static void print_result(char *message, int inr)
}
/* interrupt handler. Note that this can be called externally on the Sparc */
void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs)
irqreturn_t floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
void (*handler)(void) = do_floppy;
int do_print;
......@@ -1648,7 +1651,7 @@ void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs)
printk("floppy interrupt on bizarre fdc %d\n",fdc);
printk("handler=%p\n", handler);
is_alive("bizarre fdc");
return;
return IRQ_NONE;
}
FDCS->reset = 0;
......@@ -1661,7 +1664,7 @@ void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs)
* activity.
*/
do_print = !handler && !initialising;
do_print = !handler && print_unex && !initialising;
inr = result();
if (inr && do_print)
......@@ -1701,13 +1704,16 @@ void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs)
} while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2);
}
if (handler) {
schedule_bh( (void *)(void *) handler);
schedule_bh(handler);
} else {
#if 0
FDCS->reset = 1;
#endif
}
is_alive("normal interrupt end");
/* FIXME! Was it really for us? */
return IRQ_HANDLED;
}
static void recalibrate_floppy(void)
......@@ -4231,11 +4237,16 @@ static int __init floppy_setup(char *str)
static int have_no_fdc= -ENODEV;
static void floppy_device_release(struct device *dev)
{
complete(&device_release);
}
static struct platform_device floppy_device = {
.name = "floppy",
.id = 0,
.dev = {
.name = "Floppy Drive",
.release = floppy_device_release,
},
};
......@@ -4267,10 +4278,8 @@ int __init floppy_init(void)
}
devfs_mk_dir (NULL, "floppy", NULL);
if (register_blkdev(FLOPPY_MAJOR,"fd")) {
err = -EBUSY;
if ((err = register_blkdev(FLOPPY_MAJOR,"fd")))
goto out;
}
for (i=0; i<N_DRIVE; i++) {
disks[i]->major = FLOPPY_MAJOR;
......@@ -4288,7 +4297,7 @@ int __init floppy_init(void)
else
floppy_sizes[i] = MAX_DISK_SIZE << 1;
floppy_queue = blk_init_queue(do_fd_request, &floppy_lock)
floppy_queue = blk_init_queue(do_fd_request, &floppy_lock);
if (!floppy_queue)
goto out_queue;
......@@ -4628,10 +4637,14 @@ void cleanup_module(void)
{
int drive;
init_completion(&device_release);
platform_device_unregister(&floppy_device);
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
unregister_blkdev(FLOPPY_MAJOR, "fd");
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
if ((allowed_drive_mask & (1 << drive)) &&
fdc_state[FDC(drive)].version != FDC_NONE) {
del_gendisk(disks[drive]);
......@@ -4641,9 +4654,17 @@ void cleanup_module(void)
}
devfs_remove("floppy");
del_timer_sync(&fd_timeout);
del_timer_sync(&fd_timer);
blk_cleanup_queue(floppy_queue);
if (usage_count)
floppy_release_irq_and_dma();
/* eject disk, if any */
fd_eject(0);
wait_for_completion(&device_release);
}
MODULE_PARM(floppy,"s");
......
......@@ -2429,7 +2429,7 @@ EXPORT_SYMBOL(generic_make_request);
* interfaces, @bio must be presetup and ready for I/O.
*
*/
int submit_bio(int rw, struct bio *bio)
void submit_bio(int rw, struct bio *bio)
{
int count = bio_sectors(bio);
......@@ -2451,7 +2451,6 @@ int submit_bio(int rw, struct bio *bio)
}
generic_make_request(bio);
return 1;
}
EXPORT_SYMBOL(submit_bio);
......
......@@ -51,6 +51,7 @@
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#ifdef CONFIG_HIGH_RES_TIMERS
#include <linux/hrtime.h>
# if defined(schedule_next_int)
......
......@@ -800,7 +800,7 @@ static void retrieve_status(struct dm_table *table,
struct dm_target *ti = dm_table_get_target(table, i);
remaining = len - (outptr - outbuf);
if (remaining < sizeof(struct dm_target_spec)) {
if (remaining <= sizeof(struct dm_target_spec)) {
param->flags |= DM_BUFFER_FULL_FLAG;
break;
}
......@@ -815,6 +815,10 @@ static void retrieve_status(struct dm_table *table,
outptr += sizeof(struct dm_target_spec);
remaining = len - (outptr - outbuf);
if (remaining <= 0) {
param->flags |= DM_BUFFER_FULL_FLAG;
break;
}
/* Get the status/table string from the target driver */
if (ti->type->status) {
......@@ -828,7 +832,7 @@ static void retrieve_status(struct dm_table *table,
outptr += strlen(outptr) + 1;
used = param->data_start + (outptr - outbuf);
align_ptr(outptr);
outptr = align_ptr(outptr);
spec->next = outptr - outbuf;
}
......
......@@ -187,24 +187,24 @@ static int stripe_status(struct dm_target *ti,
status_type_t type, char *result, unsigned int maxlen)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
int offset;
unsigned int sz = 0;
unsigned int i;
char buffer[32];
#define EMIT(x...) sz += ((sz >= maxlen) ? \
0 : scnprintf(result + sz, maxlen - sz, x))
switch (type) {
case STATUSTYPE_INFO:
result[0] = '\0';
break;
case STATUSTYPE_TABLE:
offset = scnprintf(result, maxlen, "%d " SECTOR_FORMAT,
sc->stripes, sc->chunk_mask + 1);
EMIT("%d " SECTOR_FORMAT, sc->stripes, sc->chunk_mask + 1);
for (i = 0; i < sc->stripes; i++) {
format_dev_t(buffer, sc->stripe[i].dev->bdev->bd_dev);
offset +=
scnprintf(result + offset, maxlen - offset,
" %s " SECTOR_FORMAT, buffer,
sc->stripe[i].physical_start);
EMIT(" %s " SECTOR_FORMAT, buffer,
sc->stripe[i].physical_start);
}
break;
}
......
......@@ -663,12 +663,14 @@ int dm_table_add_target(struct dm_table *t, const char *type,
if (!len) {
tgt->error = "zero-length target";
DMERR(": %s\n", tgt->error);
return -EINVAL;
}
tgt->type = dm_get_target_type(type);
if (!tgt->type) {
tgt->error = "unknown target type";
DMERR(": %s\n", tgt->error);
return -EINVAL;
}
......@@ -705,7 +707,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return 0;
bad:
printk(KERN_ERR DM_NAME ": %s\n", tgt->error);
DMERR(": %s\n", tgt->error);
dm_put_target_type(tgt->type);
return r;
}
......
......@@ -294,6 +294,9 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
if (bio->bi_size)
return 1;
if (!bio_flagged(bio, BIO_UPTODATE) && !error)
error = -EIO;
if (endio) {
r = endio(tio->ti, bio, error, &tio->info);
if (r < 0)
......@@ -745,7 +748,7 @@ static void event_callback(void *context)
down_write(&md->lock);
md->event_nr++;
wake_up_interruptible(&md->eventq);
wake_up(&md->eventq);
up_write(&md->lock);
}
......@@ -922,7 +925,7 @@ int dm_suspend(struct mapped_device *md)
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (!atomic_read(&md->pending))
if (!atomic_read(&md->pending) || signal_pending(current))
break;
io_schedule();
......@@ -931,6 +934,14 @@ int dm_suspend(struct mapped_device *md)
down_write(&md->lock);
remove_wait_queue(&md->wait, &wait);
/* were we interrupted ? */
if (atomic_read(&md->pending)) {
clear_bit(DMF_BLOCK_IO, &md->flags);
up_write(&md->lock);
return -EINTR;
}
set_bit(DMF_SUSPENDED, &md->flags);
map = dm_get_table(md);
......
#
# PCI configuration
#
config PCI_USE_VECTOR
bool "Vector-based interrupt indexing (MSI)"
depends on (X86_LOCAL_APIC && X86_IO_APIC && !X86_64) || IA64
default n
help
This replaces the current existing IRQ-based index interrupt scheme
with the vector-base index scheme. The advantages of vector base
over IRQ base are listed below:
1) Support MSI implementation.
2) Support future IOxAPIC hotplug
Note that this allows the device drivers to enable MSI, Message
Signaled Interrupt, on all MSI capable device functions detected.
Message Signal Interrupt enables an MSI-capable hardware device to
send an inbound Memory Write on its PCI bus instead of asserting
IRQ signal on device IRQ pin.
If you don't know what to do here, say N.
config PCI_LEGACY_PROC
bool "Legacy /proc/pci interface"
depends on PCI
......
......@@ -33,6 +33,7 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/major.h>
......@@ -78,7 +79,7 @@ static const u_int exponent[] = {
/* Parameters that can be set with 'insmod' */
#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444)
INT_MODULE_PARM(cis_width, 0); /* 16-bit CIS? */
......
......@@ -94,7 +94,7 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
MODULE_DESCRIPTION("Linux Kernel Card Services\noptions:" OPTIONS);
MODULE_LICENSE("Dual MPL/GPL");
#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444)
INT_MODULE_PARM(setup_delay, 10); /* centiseconds */
INT_MODULE_PARM(resume_delay, 20); /* centiseconds */
......
......@@ -104,7 +104,8 @@ static int extra_sockets = 0;
static int ignore = -1;
/* Bit map or list of interrupts to choose from */
static u_int irq_mask = 0xffff;
static int irq_list[16] = { -1 };
static int irq_list[16];
static int irq_list_count;
/* The card status change interrupt -- 0 means autoselect */
static int cs_irq = 0;
......@@ -130,27 +131,27 @@ static int async_clock = -1;
static int cable_mode = -1;
static int wakeup = 0;
MODULE_PARM(i365_base, "i");
MODULE_PARM(ignore, "i");
MODULE_PARM(extra_sockets, "i");
MODULE_PARM(irq_mask, "i");
MODULE_PARM(irq_list, "1-16i");
MODULE_PARM(cs_irq, "i");
MODULE_PARM(async_clock, "i");
MODULE_PARM(cable_mode, "i");
MODULE_PARM(wakeup, "i");
MODULE_PARM(do_scan, "i");
MODULE_PARM(poll_interval, "i");
MODULE_PARM(cycle_time, "i");
MODULE_PARM(has_dma, "i");
MODULE_PARM(has_led, "i");
MODULE_PARM(has_ring, "i");
MODULE_PARM(dynamic_mode, "i");
MODULE_PARM(freq_bypass, "i");
MODULE_PARM(setup_time, "i");
MODULE_PARM(cmd_time, "i");
MODULE_PARM(recov_time, "i");
module_param(i365_base, int, 0444);
module_param(ignore, int, 0444);
module_param(extra_sockets, int, 0444);
module_param(irq_mask, int, 0444);
module_param_array(irq_list, int, irq_list_count, 0444);
module_param(cs_irq, int, 0444);
module_param(async_clock, int, 0444);
module_param(cable_mode, int, 0444);
module_param(wakeup, int, 0444);
module_param(do_scan, int, 0444);
module_param(poll_interval, int, 0444);
module_param(cycle_time, int, 0444);
module_param(has_dma, int, 0444);
module_param(has_led, int, 0444);
module_param(has_ring, int, 0444);
module_param(dynamic_mode, int, 0444);
module_param(freq_bypass, int, 0444);
module_param(setup_time, int, 0444);
module_param(cmd_time, int, 0444);
module_param(recov_time, int, 0444);
/*====================================================================*/
......@@ -705,10 +706,10 @@ static void __init add_pcic(int ns, int type)
printk(", %d socket%s\n", ns, ((ns > 1) ? "s" : ""));
/* Set host options, build basic interrupt mask */
if (irq_list[0] == -1)
if (irq_list_count == 0)
mask = irq_mask;
else
for (i = mask = 0; i < 16; i++)
for (i = mask = 0; i < irq_list_count; i++)
mask |= (1<<irq_list[i]);
mask &= I365_MASK & set_bridge_opts(base, ns);
/* Scan for ISA interrupts */
......
......@@ -33,6 +33,7 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
......@@ -56,7 +57,7 @@
/* Parameters that can be set with 'insmod' */
#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444)
INT_MODULE_PARM(probe_mem, 1); /* memory probe? */
#ifdef CONFIG_PCMCIA_PROBE
......
......@@ -60,7 +60,6 @@
static int pc_debug;
module_param(pc_debug, int, 0644);
MODULE_PARM(pc_debug, "i");
static const char *version =
"tcic.c 1.111 2000/02/15 04:13:12 (David Hinds)";
......@@ -91,7 +90,8 @@ static int do_scan = 1;
/* Bit map of interrupts to choose from */
static u_int irq_mask = 0xffff;
static int irq_list[16] = { -1 };
static int irq_list[16];
static int irq_list_count;
/* The card status change interrupt -- 0 means autoselect */
static int cs_irq;
......@@ -105,15 +105,15 @@ static int poll_quick = HZ/20;
/* CCLK external clock time, in nanoseconds. 70 ns = 14.31818 MHz */
static int cycle_time = 70;
MODULE_PARM(tcic_base, "i");
MODULE_PARM(ignore, "i");
MODULE_PARM(do_scan, "i");
MODULE_PARM(irq_mask, "i");
MODULE_PARM(irq_list, "1-16i");
MODULE_PARM(cs_irq, "i");
MODULE_PARM(poll_interval, "i");
MODULE_PARM(poll_quick, "i");
MODULE_PARM(cycle_time, "i");
module_param(tcic_base, int, 0444);
module_param(ignore, int, 0444);
module_param(do_scan, int, 0444);
module_param(irq_mask, int, 0444);
module_param_array(irq_list, int, irq_list_count, 0444);
module_param(cs_irq, int, 0444);
module_param(poll_interval, int, 0444);
module_param(poll_quick, int, 0444);
module_param(cycle_time, int, 0444);
/*====================================================================*/
......@@ -481,10 +481,10 @@ static int __init init_tcic(void)
/* Build interrupt mask */
printk(", %d sockets\n" KERN_INFO " irq list (", sockets);
if (irq_list[0] == -1)
if (irq_list_count == 0)
mask = irq_mask;
else
for (i = mask = 0; i < 16; i++)
for (i = mask = 0; i < irq_list_count; i++)
mask |= (1<<irq_list[i]);
/* irq 14, 11, 10, 7, 6, 5, 4, 3 */
......
......@@ -21,6 +21,7 @@
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/init.h>
......@@ -117,11 +118,11 @@ static struct old_serial_port old_serial_port[] = {
#define UART_NR (ARRAY_SIZE(old_serial_port) + CONFIG_SERIAL_8250_NR_UARTS)
#if defined(CONFIG_SERIAL_8250_RSA) && defined(MODULE)
#ifdef CONFIG_SERIAL_8250_RSA
#define PORT_RSA_MAX 4
static int probe_rsa[PORT_RSA_MAX];
static int force_rsa[PORT_RSA_MAX];
static unsigned long probe_rsa[PORT_RSA_MAX];
static unsigned int probe_rsa_count;
#endif /* CONFIG_SERIAL_8250_RSA */
struct uart_8250_port {
......@@ -678,21 +679,16 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
break;
}
#if defined(CONFIG_SERIAL_8250_RSA) && defined(MODULE)
#ifdef CONFIG_SERIAL_8250_RSA
/*
* Only probe for RSA ports if we got the region.
*/
if (up->port.type == PORT_16550A && probeflags & PROBE_RSA) {
int i;
for (i = 0 ; i < PORT_RSA_MAX ; ++i) {
if (!probe_rsa[i] && !force_rsa[i])
break;
if (((probe_rsa[i] != up->port.iobase) ||
check_region(up->port.iobase + UART_RSA_BASE, 16)) &&
(force_rsa[i] != up->port.iobase))
continue;
if (__enable_rsa(up)) {
for (i = 0 ; i < probe_rsa_count; ++i) {
if (probe_rsa[i] == up->port.iobase &&
__enable_rsa(up)) {
up->port.type = PORT_RSA;
break;
}
......@@ -2215,14 +2211,12 @@ EXPORT_SYMBOL(serial8250_resume_port);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic 8250/16x50 serial driver $Revision: 1.90 $");
MODULE_PARM(share_irqs, "i");
module_param(share_irqs, uint, 0644);
MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
" (unsafe)");
#if defined(CONFIG_SERIAL_8250_RSA) && defined(MODULE)
MODULE_PARM(probe_rsa, "1-" __MODULE_STRING(PORT_RSA_MAX) "i");
#ifdef CONFIG_SERIAL_8250_RSA
module_param_array(probe_rsa, ulong, probe_rsa_count, 0444);
MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
MODULE_PARM(force_rsa, "1-" __MODULE_STRING(PORT_RSA_MAX) "i");
MODULE_PARM_DESC(force_rsa, "Force I/O ports for RSA");
#endif
MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
......@@ -32,6 +32,7 @@
======================================================================*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
......@@ -71,17 +72,18 @@ static char *version = "serial_cs.c 1.134 2002/05/04 05:48:53 (David Hinds)";
/* Bit map of interrupts to choose from */
static u_int irq_mask = 0xdeb8;
static int irq_list[4] = { -1 };
static int irq_list[4];
static unsigned int irq_list_count;
/* Enable the speaker? */
static int do_sound = 1;
/* Skip strict UART tests? */
static int buggy_uart;
MODULE_PARM(irq_mask, "i");
MODULE_PARM(irq_list, "1-4i");
MODULE_PARM(do_sound, "i");
MODULE_PARM(buggy_uart, "i");
module_param(irq_mask, uint, 0444);
module_param_array(irq_list, int, irq_list_count, 0444);
module_param(do_sound, int, 0444);
module_param(buggy_uart, int, 0444);
/*====================================================================*/
......@@ -221,10 +223,10 @@ static dev_link_t *serial_attach(void)
link->io.NumPorts1 = 8;
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
if (irq_list[0] == -1)
if (irq_list_count == 0)
link->irq.IRQInfo2 = irq_mask;
else
for (i = 0; i < 4; i++)
for (i = 0; i < irq_list_count; i++)
link->irq.IRQInfo2 |= 1 << irq_list[i];
link->conf.Attributes = CONF_ENABLE_IRQ;
if (do_sound) {
......
......@@ -1998,11 +1998,13 @@ static int __init aty128_probe(struct pci_dev *pdev, const struct pci_device_id
static void __devexit aty128_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct aty128fb_par *par = info->par;
struct aty128fb_par *par;
if (!info)
return;
par = info->par;
unregister_framebuffer(info);
#ifdef CONFIG_MTRR
if (par->mtrr.vram_valid)
......
......@@ -51,25 +51,6 @@ static struct bh_wait_queue_head {
wait_queue_head_t wqh;
} ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER];
/*
* Debug/devel support stuff
*/
void __buffer_error(char *file, int line)
{
static int enough;
if (enough > 10)
return;
enough++;
printk("buffer layer error at %s:%d\n", file, line);
#ifndef CONFIG_KALLSYMS
printk("Pass this trace through ksymoops for reporting\n");
#endif
dump_stack();
}
EXPORT_SYMBOL(__buffer_error);
inline void
init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
{
......@@ -99,17 +80,6 @@ EXPORT_SYMBOL(wake_up_buffer);
void fastcall unlock_buffer(struct buffer_head *bh)
{
/*
* unlock_buffer against a zero-count bh is a bug, if the page
* is not locked. Because then nothing protects the buffer's
* waitqueue, which is used here. (Well. Other locked buffers
* against the page will pin it. But complain anyway).
*/
if (atomic_read(&bh->b_count) == 0 &&
!PageLocked(bh->b_page) &&
!PageWriteback(bh->b_page))
buffer_error();
clear_buffer_locked(bh);
smp_mb__after_clear_bit();
wake_up_buffer(bh);
......@@ -125,10 +95,6 @@ void __wait_on_buffer(struct buffer_head * bh)
wait_queue_head_t *wqh = bh_waitq_head(bh);
DEFINE_WAIT(wait);
if (atomic_read(&bh->b_count) == 0 &&
(!bh->b_page || !PageLocked(bh->b_page)))
buffer_error();
do {
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
if (buffer_locked(bh)) {
......@@ -146,8 +112,6 @@ void __wait_on_buffer(struct buffer_head * bh)
static void
__set_page_buffers(struct page *page, struct buffer_head *head)
{
if (page_has_buffers(page))
buffer_error();
page_cache_get(page);
SetPagePrivate(page);
page->private = (unsigned long)head;
......@@ -433,10 +397,12 @@ __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
}
bh = bh->b_this_page;
} while (bh != head);
buffer_error();
printk("block=%llu, b_blocknr=%llu\n",
printk("__find_get_block_slow() failed. "
"block=%llu, b_blocknr=%llu\n",
(unsigned long long)block, (unsigned long long)bh->b_blocknr);
printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
out_unlock:
spin_unlock(&bd_mapping->private_lock);
page_cache_release(page);
......@@ -847,10 +813,7 @@ int __set_page_dirty_buffers(struct page *page)
struct buffer_head *bh = head;
do {
if (buffer_uptodate(bh))
set_buffer_dirty(bh);
else
buffer_error();
set_buffer_dirty(bh);
bh = bh->b_this_page;
} while (bh != head);
}
......@@ -1151,7 +1114,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
return page;
failed:
buffer_error();
BUG();
unlock_page(page);
page_cache_release(page);
return NULL;
......@@ -1247,8 +1210,6 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
*/
void fastcall mark_buffer_dirty(struct buffer_head *bh)
{
if (!buffer_uptodate(bh))
buffer_error();
if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
__set_page_dirty_nobuffers(bh->b_page);
}
......@@ -1267,7 +1228,7 @@ void __brelse(struct buffer_head * buf)
return;
}
printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
buffer_error(); /* For the stack backtrace */
WARN_ON(1);
}
/*
......@@ -1294,8 +1255,6 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh)
unlock_buffer(bh);
return bh;
} else {
if (buffer_dirty(bh))
buffer_error();
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(READ, bh);
......@@ -1686,10 +1645,6 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
old_bh = __find_get_block_slow(bdev, block, 0);
if (old_bh) {
#if 0 /* This happens. Later. */
if (buffer_dirty(old_bh))
buffer_error();
#endif
clear_buffer_dirty(old_bh);
wait_on_buffer(old_bh);
clear_buffer_req(old_bh);
......@@ -1737,8 +1692,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
if (!page_has_buffers(page)) {
if (!PageUptodate(page))
buffer_error();
create_empty_buffers(page, 1 << inode->i_blkbits,
(1 << BH_Dirty)|(1 << BH_Uptodate));
}
......@@ -1767,9 +1720,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
* mapped buffers outside i_size will occur, because
* this page can be outside i_size when there is a
* truncate in progress.
*
* if (buffer_mapped(bh))
* buffer_error();
*/
/*
* The buffer was zeroed by block_write_full_page()
......@@ -1777,8 +1727,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
if (buffer_new(bh))
buffer_error();
err = get_block(inode, block, bh, 1);
if (err)
goto recover;
......@@ -1811,8 +1759,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
continue;
}
if (test_clear_buffer_dirty(bh)) {
if (!buffer_uptodate(bh))
buffer_error();
mark_buffer_async_write(bh);
} else {
unlock_buffer(bh);
......@@ -1942,8 +1888,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
if (PageUptodate(page)) {
if (!buffer_mapped(bh))
buffer_error();
set_buffer_uptodate(bh);
continue;
}
......@@ -2001,8 +1945,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
void *kaddr;
clear_buffer_new(bh);
if (buffer_uptodate(bh))
buffer_error();
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr+block_start, 0, bh->b_size);
kunmap_atomic(kaddr, KM_USER0);
......@@ -2068,8 +2010,6 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
if (!PageLocked(page))
PAGE_BUG(page);
if (PageUptodate(page))
buffer_error();
blocksize = 1 << inode->i_blkbits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
......@@ -2684,7 +2624,7 @@ static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
return 0;
}
int submit_bh(int rw, struct buffer_head * bh)
void submit_bh(int rw, struct buffer_head * bh)
{
struct bio *bio;
......@@ -2692,13 +2632,6 @@ int submit_bh(int rw, struct buffer_head * bh)
BUG_ON(!buffer_mapped(bh));
BUG_ON(!bh->b_end_io);
if ((rw == READ || rw == READA) && buffer_uptodate(bh))
buffer_error();
if (rw == WRITE && !buffer_uptodate(bh))
buffer_error();
if (rw == READ && buffer_dirty(bh))
buffer_error();
/* Only clear out a write error when rewriting */
if (test_set_buffer_req(bh) && rw == WRITE)
clear_buffer_write_io_error(bh);
......@@ -2722,7 +2655,7 @@ int submit_bh(int rw, struct buffer_head * bh)
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
return submit_bio(rw, bio);
submit_bio(rw, bio);
}
/**
......@@ -2797,21 +2730,6 @@ void sync_dirty_buffer(struct buffer_head *bh)
}
}
/*
* Sanity checks for try_to_free_buffers.
*/
static void check_ttfb_buffer(struct page *page, struct buffer_head *bh)
{
if (!buffer_uptodate(bh) && !buffer_req(bh)) {
if (PageUptodate(page) && page->mapping
&& buffer_mapped(bh) /* discard_buffer */
&& S_ISBLK(page->mapping->host->i_mode))
{
buffer_error();
}
}
}
/*
* try_to_free_buffers() checks if all the buffers on this particular page
* are unused, and releases them if so.
......@@ -2847,7 +2765,6 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
bh = head;
do {
check_ttfb_buffer(page, bh);
if (buffer_write_io_error(bh))
set_bit(AS_EIO, &page->mapping->flags);
if (buffer_busy(bh))
......@@ -2857,9 +2774,6 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
bh = bh->b_this_page;
} while (bh != head);
if (!was_uptodate && PageUptodate(page) && !PageError(page))
buffer_error();
do {
struct buffer_head *next = bh->b_this_page;
......
......@@ -609,7 +609,9 @@ static inline int de_thread(struct task_struct *tsk)
newsig->group_stop_count = 0;
newsig->curr_target = NULL;
init_sigpending(&newsig->shared_pending);
INIT_LIST_HEAD(&newsig->posix_timers);
newsig->tty = oldsig->tty;
newsig->pgrp = oldsig->pgrp;
newsig->session = oldsig->session;
newsig->leader = oldsig->leader;
......
......@@ -1358,8 +1358,6 @@ static int ext3_ordered_writepage(struct page *page,
}
if (!page_has_buffers(page)) {
if (!PageUptodate(page))
buffer_error();
create_empty_buffers(page, inode->i_sb->s_blocksize,
(1 << BH_Dirty)|(1 << BH_Uptodate));
}
......
......@@ -485,8 +485,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
break;
block_in_file++;
}
if (page_block == 0)
buffer_error();
BUG_ON(page_block == 0);
first_unmapped = page_block;
......
......@@ -65,11 +65,32 @@ fh_dup2(struct svc_fh *dst, struct svc_fh *src)
*dst = *src;
}
static int
do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
int accmode, status;
if (open->op_truncate &&
!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
return nfserr_inval;
accmode = MAY_NOP;
if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
accmode = MAY_READ;
if (open->op_share_deny & NFS4_SHARE_ACCESS_WRITE)
accmode |= (MAY_WRITE | MAY_TRUNC);
accmode |= MAY_OWNER_OVERRIDE;
status = fh_verify(rqstp, current_fh, S_IFREG, accmode);
return status;
}
static int
do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
struct svc_fh resfh;
int accmode, status;
int status;
fh_init(&resfh, NFS4_FHSIZE);
open->op_truncate = 0;
......@@ -92,6 +113,8 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
if (!status) {
set_change_info(&open->op_cinfo, current_fh);
/* set reply cache */
fh_dup2(current_fh, &resfh);
/* XXXJBF: keep a saved svc_fh struct instead?? */
open->op_stateowner->so_replay.rp_openfh_len =
......@@ -100,30 +123,66 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
&resfh.fh_handle.fh_base,
resfh.fh_handle.fh_size);
accmode = MAY_NOP;
if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
accmode = MAY_READ;
if (open->op_share_deny & NFS4_SHARE_ACCESS_WRITE)
accmode |= (MAY_WRITE | MAY_TRUNC);
accmode |= MAY_OWNER_OVERRIDE;
status = fh_verify(rqstp, current_fh, S_IFREG, accmode);
status = do_open_permission(rqstp, current_fh, open);
}
fh_put(&resfh);
return status;
}
static int
do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
int status;
dprintk("NFSD: do_open_fhandle\n");
/* we don't know the target directory, and therefore can not
* set the change info
*/
memset(&open->op_cinfo, 0, sizeof(struct nfsd4_change_info));
/* set replay cache */
open->op_stateowner->so_replay.rp_openfh_len = current_fh->fh_handle.fh_size;
memcpy(open->op_stateowner->so_replay.rp_openfh,
&current_fh->fh_handle.fh_base,
current_fh->fh_handle.fh_size);
open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
!open->op_iattr.ia_size;
status = do_open_permission(rqstp, current_fh, open);
return status;
}
/*
* nfs4_unlock_state() called in encode
*/
static inline int
nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
int status;
dprintk("NFSD: nfsd4_open filename %.*s\n",
(int)open->op_fname.len, open->op_fname.data);
dprintk("NFSD: nfsd4_open filename %.*s op_stateowner %p\n",
(int)open->op_fname.len, open->op_fname.data,
open->op_stateowner);
if (nfs4_in_grace() && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
return nfserr_grace;
if (nfs4_in_no_grace() &&
open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
return nfserr_no_grace;
/* This check required by spec. */
if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
return nfserr_inval;
open->op_stateowner = NULL;
nfs4_lock_state();
/* check seqid for replay. set nfs4_owner */
status = nfsd4_process_open1(open);
if (status == NFSERR_REPLAY_ME) {
......@@ -141,16 +200,30 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
}
if (status)
return status;
if (open->op_claim_type == NFS4_OPEN_CLAIM_NULL) {
/*
* This block of code will (1) set CURRENT_FH to the file being opened,
* creating it if necessary, (2) set open->op_cinfo,
* (3) set open->op_truncate if the file is to be truncated
* after opening, (4) do permission checking.
*/
status = do_open_lookup(rqstp, current_fh, open);
if (status)
return status;
status = do_open_lookup(rqstp, current_fh, open);
if (status)
return status;
} else if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) {
/*
* The CURRENT_FH is already set to the file being opened. This
* block of code will (1) set open->op_cinfo, (2) set
* open->op_truncate if the file is to be truncated after opening,
* (3) do permission checking.
*/
status = do_open_fhandle(rqstp, current_fh, open);
if (status)
return status;
} else {
printk("NFSD: unsupported OPEN claim type\n");
return nfserr_inval;
}
/*
* nfsd4_process_open2() does the actual opening of the file. If
* successful, it (1) truncates the file if open->op_truncate was
......@@ -187,9 +260,14 @@ nfsd4_putfh(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_putf
static inline int
nfsd4_putrootfh(struct svc_rqst *rqstp, struct svc_fh *current_fh)
{
int status;
fh_put(current_fh);
return exp_pseudoroot(rqstp->rq_client, current_fh,
status = exp_pseudoroot(rqstp->rq_client, current_fh,
&rqstp->rq_chandle);
if (!status)
status = nfsd_setuser(rqstp, current_fh->fh_export);
return status;
}
static inline int
......@@ -402,6 +480,8 @@ nfsd4_read(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_read
int status;
/* no need to check permission - this will be done in nfsd_read() */
if (nfs4_in_grace())
return nfserr_grace;
if (read->rd_offset >= OFFSET_MAX)
return nfserr_inval;
......@@ -448,6 +528,9 @@ nfsd4_read(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_read
static inline int
nfsd4_readdir(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_readdir *readdir)
{
u64 cookie = readdir->rd_cookie;
static const nfs4_verifier zeroverf;
/* no need to check permission - this will be done in nfsd_readdir() */
if (readdir->rd_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
......@@ -456,7 +539,8 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_re
readdir->rd_bmval[0] &= NFSD_SUPPORTED_ATTRS_WORD0;
readdir->rd_bmval[1] &= NFSD_SUPPORTED_ATTRS_WORD1;
if (readdir->rd_cookie > ~(u32)0)
if ((cookie > ~(u32)0) || (cookie == 1) || (cookie == 2) ||
(cookie == 0 && memcmp(readdir->rd_verf.data, zeroverf.data, NFS4_VERIFIER_SIZE)))
return nfserr_bad_cookie;
readdir->rd_rqstp = rqstp;
......@@ -521,10 +605,13 @@ static inline int
nfsd4_setattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_setattr *setattr)
{
struct nfs4_stateid *stp;
int status = nfserr_nofilehandle;
int status = nfs_ok;
if (nfs4_in_grace())
return nfserr_grace;
if (!current_fh->fh_dentry)
goto out;
return nfserr_nofilehandle;
status = nfs_ok;
if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
......@@ -563,6 +650,9 @@ nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_writ
u32 *p;
int status = nfs_ok;
if (nfs4_in_grace())
return nfserr_grace;
/* no need to check permission - this will be done in nfsd_write() */
if (write->wr_offset >= OFFSET_MAX)
......@@ -757,7 +847,9 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
break;
case OP_CLOSE:
op->status = nfsd4_close(rqstp, &current_fh, &op->u.close);
op->replay = &op->u.close.cl_stateowner->so_replay;
if (op->u.close.cl_stateowner)
op->replay =
&op->u.close.cl_stateowner->so_replay;
break;
case OP_COMMIT:
op->status = nfsd4_commit(rqstp, &current_fh, &op->u.commit);
......@@ -776,13 +868,18 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
break;
case OP_LOCK:
op->status = nfsd4_lock(rqstp, &current_fh, &op->u.lock);
op->replay = &op->u.lock.lk_stateowner->so_replay;
if (op->u.lock.lk_stateowner)
op->replay =
&op->u.lock.lk_stateowner->so_replay;
break;
case OP_LOCKT:
op->status = nfsd4_lockt(rqstp, &current_fh, &op->u.lockt);
break;
case OP_LOCKU:
op->status = nfsd4_locku(rqstp, &current_fh, &op->u.locku);
if (op->u.locku.lu_stateowner)
op->replay =
&op->u.locku.lu_stateowner->so_replay;
break;
case OP_LOOKUP:
op->status = nfsd4_lookup(rqstp, &current_fh, &op->u.lookup);
......@@ -797,15 +894,21 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
break;
case OP_OPEN:
op->status = nfsd4_open(rqstp, &current_fh, &op->u.open);
op->replay = &op->u.open.op_stateowner->so_replay;
if (op->u.open.op_stateowner)
op->replay =
&op->u.open.op_stateowner->so_replay;
break;
case OP_OPEN_CONFIRM:
op->status = nfsd4_open_confirm(rqstp, &current_fh, &op->u.open_confirm);
op->replay = &op->u.open_confirm.oc_stateowner->so_replay;
if (op->u.open_confirm.oc_stateowner)
op->replay =
&op->u.open_confirm.oc_stateowner->so_replay;
break;
case OP_OPEN_DOWNGRADE:
op->status = nfsd4_open_downgrade(rqstp, &current_fh, &op->u.open_downgrade);
op->replay = &op->u.open_downgrade.od_stateowner->so_replay;
if (op->u.open_downgrade.od_stateowner)
op->replay =
&op->u.open_downgrade.od_stateowner->so_replay;
break;
case OP_PUTFH:
op->status = nfsd4_putfh(rqstp, &current_fh, &op->u.putfh);
......
This diff is collapsed.
......@@ -484,11 +484,14 @@ nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access
DECODE_TAIL;
}
#define NFS4_STATE_NOT_LOCKED ((void *)-1)
static int
nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
{
DECODE_HEAD;
close->cl_stateowner = NFS4_STATE_NOT_LOCKED;
READ_BUF(4 + sizeof(stateid_t));
READ32(close->cl_seqid);
READ32(close->cl_stateid.si_generation);
......@@ -579,6 +582,7 @@ nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
{
DECODE_HEAD;
lock->lk_stateowner = NFS4_STATE_NOT_LOCKED;
/*
* type, reclaim(boolean), offset, length, new_lock_owner(boolean)
*/
......@@ -636,6 +640,7 @@ nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
{
DECODE_HEAD;
locku->lu_stateowner = NFS4_STATE_NOT_LOCKED;
READ_BUF(24 + sizeof(stateid_t));
READ32(locku->lu_type);
if ((locku->lu_type < NFS4_READ_LT) || (locku->lu_type > NFS4_WRITEW_LT))
......@@ -671,6 +676,7 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
memset(open->op_bmval, 0, sizeof(open->op_bmval));
open->op_iattr.ia_valid = 0;
open->op_stateowner = NFS4_STATE_NOT_LOCKED;
/* seqid, share_access, share_deny, clientid, ownerlen */
READ_BUF(16 + sizeof(clientid_t));
......@@ -746,6 +752,7 @@ nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_con
{
DECODE_HEAD;
open_conf->oc_stateowner = NFS4_STATE_NOT_LOCKED;
READ_BUF(4 + sizeof(stateid_t));
READ32(open_conf->oc_req_stateid.si_generation);
COPYMEM(&open_conf->oc_req_stateid.si_opaque, sizeof(stateid_opaque_t));
......@@ -759,6 +766,7 @@ nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_d
{
DECODE_HEAD;
open_down->od_stateowner = NFS4_STATE_NOT_LOCKED;
READ_BUF(4 + sizeof(stateid_t));
READ32(open_down->od_stateid.si_generation);
COPYMEM(&open_down->od_stateid.si_opaque, sizeof(stateid_opaque_t));
......@@ -1259,7 +1267,8 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
*/
#define ENCODE_SEQID_OP_TAIL(stateowner) do { \
if (seqid_mutating_err(nfserr) && stateowner) { \
if (seqid_mutating_err(nfserr) && stateowner \
&& (stateowner != NFS4_STATE_NOT_LOCKED)) { \
if (stateowner->so_confirmed) \
stateowner->so_seqid++; \
stateowner->so_replay.rp_status = nfserr; \
......@@ -1267,7 +1276,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
(((char *)(resp)->p - (char *)save)); \
memcpy(stateowner->so_replay.rp_buf, save, \
stateowner->so_replay.rp_buflen); \
} } while(0)
} \
if (stateowner != NFS4_STATE_NOT_LOCKED) \
nfs4_unlock_state(); \
} while (0);
static u32 nfs4_ftypes[16] = {
......@@ -1917,7 +1929,7 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_open
ENCODE_SEQID_OP_HEAD;
if (nfserr)
return;
goto out;
RESERVE_SPACE(36 + sizeof(stateid_t));
WRITE32(open->op_stateid.si_generation);
......@@ -1972,7 +1984,7 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_open
BUG();
}
/* XXX save filehandle here */
out:
ENCODE_SEQID_OP_TAIL(open->op_stateowner);
}
......@@ -2179,6 +2191,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_re
readdir->common.err == nfserr_toosmall &&
readdir->buffer == page)
nfserr = nfserr_toosmall;
if (nfserr == nfserr_symlink)
nfserr = nfserr_notdir;
if (nfserr)
goto err_no_verf;
......@@ -2295,14 +2309,8 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
RESERVE_SPACE(8);
WRITE32(op->opnum);
if ((op->opnum != OP_SETATTR) && (op->opnum != OP_LOCK) && (op->opnum != OP_LOCKT) && (op->opnum != OP_SETCLIENTID) && (op->status)) {
*p++ = op->status;
ADJUST_ARGS();
return;
} else {
statp = p++; /* to be backfilled at the end */
ADJUST_ARGS();
}
statp = p++; /* to be backfilled at the end */
ADJUST_ARGS();
switch (op->opnum) {
case OP_ACCESS:
......@@ -2406,6 +2414,8 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
*
* XDR note: do not encode rp->rp_buflen: the buffer contains the
* previously sent already encoded operation.
*
* called with nfs4_lock_state() held
*/
void
nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
......@@ -2423,6 +2433,7 @@ nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
RESERVE_SPACE(rp->rp_buflen);
WRITEMEM(rp->rp_buf, rp->rp_buflen);
ADJUST_ARGS();
nfs4_unlock_state();
}
/*
......
......@@ -1340,8 +1340,6 @@ static int ntfs_prepare_nonresident_write(struct page *page,
void *kaddr;
clear_buffer_new(bh);
if (buffer_uptodate(bh))
buffer_error();
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + block_start, 0, bh->b_size);
kunmap_atomic(kaddr, KM_USER0);
......
......@@ -1925,7 +1925,6 @@ static int map_block_for_writepage(struct inode *inode,
th.t_trans_id = 0;
if (!buffer_uptodate(bh_result)) {
buffer_error();
return -EIO;
}
......@@ -2057,8 +2056,6 @@ static int reiserfs_write_full_page(struct page *page, struct writeback_control
* in the BH_Uptodate is just a sanity check.
*/
if (!page_has_buffers(page)) {
if (!PageUptodate(page))
buffer_error();
create_empty_buffers(page, inode->i_sb->s_blocksize,
(1 << BH_Dirty) | (1 << BH_Uptodate));
}
......@@ -2120,8 +2117,6 @@ static int reiserfs_write_full_page(struct page *page, struct writeback_control
}
}
if (test_clear_buffer_dirty(bh)) {
if (!buffer_uptodate(bh))
buffer_error();
mark_buffer_async_write(bh);
} else {
unlock_buffer(bh);
......
......@@ -86,6 +86,7 @@ static struct workqueue_struct *commit_wq;
/* journal list state bits */
#define LIST_TOUCHED 1
#define LIST_DIRTY 2
#define LIST_COMMIT_PENDING 4 /* someone will commit this list */
/* flags for do_journal_end */
#define FLUSH_ALL 1 /* flush commit and real blocks */
......@@ -2308,6 +2309,7 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo
SB_JOURNAL_MAX_TRANS_AGE(p_s_sb) = commit_max_age;
} else {
SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_max_commit_age);
SB_JOURNAL_DEFAULT_MAX_COMMIT_AGE(p_s_sb) = SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb);
SB_JOURNAL_MAX_TRANS_AGE(p_s_sb) = JOURNAL_MAX_TRANS_AGE;
}
......@@ -2462,8 +2464,20 @@ void reiserfs_wait_on_write_block(struct super_block *s) {
}
static void queue_log_writer(struct super_block *s) {
wait_queue_t wait;
set_bit(WRITERS_QUEUED, &SB_JOURNAL(s)->j_state);
sleep_on(&SB_JOURNAL(s)->j_join_wait);
/*
* we don't want to use wait_event here because
* we only want to wait once.
*/
init_waitqueue_entry(&wait, current);
add_wait_queue(&SB_JOURNAL(s)->j_join_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
if (test_bit(WRITERS_QUEUED, &SB_JOURNAL(s)->j_state))
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&SB_JOURNAL(s)->j_join_wait, &wait);
}
static void wake_queued_writers(struct super_block *s) {
......@@ -2476,7 +2490,9 @@ static void let_transaction_grow(struct super_block *sb,
{
unsigned long bcount = SB_JOURNAL(sb)->j_bcount;
while(1) {
yield();
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
SB_JOURNAL(sb)->j_current_jl->j_state |= LIST_COMMIT_PENDING;
while ((atomic_read(&SB_JOURNAL(sb)->j_wcount) > 0 ||
atomic_read(&SB_JOURNAL(sb)->j_jlock)) &&
SB_JOURNAL(sb)->j_trans_id == trans_id) {
......@@ -2909,9 +2925,15 @@ static void flush_async_commits(void *p) {
flush_commit_list(p_s_sb, jl, 1);
}
unlock_kernel();
atomic_inc(&SB_JOURNAL(p_s_sb)->j_async_throttle);
filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
atomic_dec(&SB_JOURNAL(p_s_sb)->j_async_throttle);
/*
* this is a little racey, but there's no harm in missing
* the filemap_fdata_write
*/
if (!atomic_read(&SB_JOURNAL(p_s_sb)->j_async_throttle)) {
atomic_inc(&SB_JOURNAL(p_s_sb)->j_async_throttle);
filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
atomic_dec(&SB_JOURNAL(p_s_sb)->j_async_throttle);
}
}
/*
......@@ -3000,7 +3022,8 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, struct supe
jl = SB_JOURNAL(p_s_sb)->j_current_jl;
trans_id = jl->j_trans_id;
if (wait_on_commit)
jl->j_state |= LIST_COMMIT_PENDING;
atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 1) ;
if (flush) {
SB_JOURNAL(p_s_sb)->j_next_full_flush = 1 ;
......@@ -3522,8 +3545,8 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_b
if (flush) {
flush_commit_list(p_s_sb, jl, 1) ;
flush_journal_list(p_s_sb, jl, 1) ;
} else
queue_work(commit_wq, &SB_JOURNAL(p_s_sb)->j_work);
} else if (!(jl->j_state & LIST_COMMIT_PENDING))
queue_delayed_work(commit_wq, &SB_JOURNAL(p_s_sb)->j_work, HZ/10);
/* if the next transaction has any chance of wrapping, flush
......
......@@ -709,13 +709,11 @@ static int reiserfs_parse_options (struct super_block * s, char * options, /* st
char *p = 0;
int val = simple_strtoul (arg, &p, 0);
/* commit=NNN (time in seconds) */
if ( *p != '\0' || val == 0) {
if ( *p != '\0' || val < 0) {
printk ("reiserfs_parse_options: bad value %s\n", arg);
return 0;
}
if ( val > 0 ) {
*commit_max_age = val;
}
*commit_max_age = val;
}
if ( c == 'w' ) {
......@@ -821,8 +819,14 @@ static int reiserfs_remount (struct super_block * s, int * mount_flags, char * a
REISERFS_SB(s)->s_mount_opt = (REISERFS_SB(s)->s_mount_opt & ~safe_mask) | (mount_options & safe_mask);
if(commit_max_age != 0) {
SB_JOURNAL_MAX_COMMIT_AGE(s) = commit_max_age;
SB_JOURNAL_MAX_TRANS_AGE(s) = commit_max_age;
SB_JOURNAL_MAX_COMMIT_AGE(s) = commit_max_age;
SB_JOURNAL_MAX_TRANS_AGE(s) = commit_max_age;
}
else
{
/* 0 means restore defaults. */
SB_JOURNAL_MAX_COMMIT_AGE(s) = SB_JOURNAL_DEFAULT_MAX_COMMIT_AGE(s);
SB_JOURNAL_MAX_TRANS_AGE(s) = JOURNAL_MAX_TRANS_AGE;
}
if(blocks) {
......
......@@ -15,13 +15,62 @@
#include <asm/arch/vmalloc.h>
/*
* We pull a couple of tricks here:
* 1. We wrap the PMD into the PGD.
* 2. We lie about the size of the PTE and PGD.
* Even though we have 256 PTE entries and 4096 PGD entries, we tell
* Linux that we actually have 512 PTE entries and 2048 PGD entries.
* Each "Linux" PGD entry is made up of two hardware PGD entries, and
* each PTE table is actually two hardware PTE tables.
* Hardware-wise, we have a two level page table structure, where the first
* level has 4096 entries, and the second level has 256 entries. Each entry
* is one 32-bit word. Most of the bits in the second level entry are used
* by hardware, and there aren't any "accessed" and "dirty" bits.
*
* Linux on the other hand has a three level page table structure, which can
* be wrapped to fit a two level page table structure easily - using the PGD
* and PTE only. However, Linux also expects one "PTE" table per page, and
* at least a "dirty" bit.
*
* Therefore, we tweak the implementation slightly - we tell Linux that we
* have 2048 entries in the first level, each of which is 8 bytes (iow, two
* hardware pointers to the second level.) The second level contains two
* hardware PTE tables arranged contiguously, followed by Linux versions
* which contain the state information Linux needs. We, therefore, end up
* with 512 entries in the "PTE" level.
*
* This leads to the page tables having the following layout:
*
* pgd pte
* | |
* +--------+ +0
* | |-----> +------------+ +0
* +- - - - + +4 | h/w pt 0 |
* | |-----> +------------+ +1024
* +--------+ +8 | h/w pt 1 |
* | | +------------+ +2048
* +- - - - + | Linux pt 0 |
* | | +------------+ +3072
* +--------+ | Linux pt 1 |
* | | +------------+ +4096
*
* See L_PTE_xxx below for definitions of bits in the "Linux pt", and
* PTE_xxx for definitions of bits appearing in the "h/w pt".
*
* PMD_xxx definitions refer to bits in the first level page table.
*
* The "dirty" bit is emulated by only granting hardware write permission
* iff the page is marked "writable" and "dirty" in the Linux PTE. This
* means that a write to a clean page will cause a permission fault, and
* the Linux MM layer will mark the page dirty via handle_pte_fault().
* For the hardware to notice the permission change, the TLB entry must
* be flushed, and ptep_establish() does that for us.
*
* The "accessed" or "young" bit is emulated by a similar method; we only
* allow accesses to the page if the "young" bit is set. Accesses to the
* page will cause a fault, and handle_pte_fault() will set the young bit
* for us as long as the page is marked present in the corresponding Linux
* PTE entry. Again, ptep_establish() will ensure that the TLB is up to
* date.
*
* However, when the "young" bit is cleared, we deny access to the page
* by clearing the hardware PTE. Currently Linux does not flush the TLB
* for us in this case, which means the TLB will retain the transation
* until either the TLB entry is evicted under pressure, or a context
* switch which changes the user space mapping occurs.
*/
#define PTRS_PER_PTE 512
#define PTRS_PER_PMD 1
......
......@@ -57,7 +57,8 @@ static inline unsigned long ptep_to_address(pte_t * ptep)
{
struct page * page = kmap_atomic_to_page(ptep);
unsigned long low_bits;
low_bits = ((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE;
low_bits = ((unsigned long)ptep & (PTRS_PER_PTE*sizeof(pte_t) - 1))
* (PAGE_SIZE/sizeof(pte_t));
return page->index + low_bits;
}
......
......@@ -175,7 +175,7 @@ typedef struct siginfo32 {
#undef SI_MESGQ
#define SI_ASYNCIO -2 /* sent by AIO completion */
#define SI_TIMER __SI_CODE(__SI_TIMER,-3) /* sent by timer expiration */
#define SI_MESGQ -4 /* sent by real time mesq state change */
#define SI_MESGQ __SI_CODE(__SI_MESGQ,-4) /* sent by real time mesq state change */
#ifdef __KERNEL__
......
......@@ -61,13 +61,6 @@ struct buffer_head {
struct list_head b_assoc_buffers; /* associated with another mapping */
};
/*
* Debug
*/
void __buffer_error(char *file, int line);
#define buffer_error() __buffer_error(__FILE__, __LINE__)
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
......@@ -177,7 +170,7 @@ void free_buffer_head(struct buffer_head * bh);
void FASTCALL(unlock_buffer(struct buffer_head *bh));
void ll_rw_block(int, int, struct buffer_head * bh[]);
void sync_dirty_buffer(struct buffer_head *bh);
int submit_bh(int, struct buffer_head *);
void submit_bh(int, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
......
......@@ -123,6 +123,19 @@ COMPATIBLE_IOCTL(STOP_ARRAY)
COMPATIBLE_IOCTL(STOP_ARRAY_RO)
COMPATIBLE_IOCTL(RESTART_ARRAY_RW)
/* DM */
COMPATIBLE_IOCTL(DM_VERSION_32)
COMPATIBLE_IOCTL(DM_LIST_DEVICES_32)
COMPATIBLE_IOCTL(DM_DEV_CREATE_32)
COMPATIBLE_IOCTL(DM_DEV_REMOVE_32)
COMPATIBLE_IOCTL(DM_DEV_RENAME_32)
COMPATIBLE_IOCTL(DM_DEV_SUSPEND_32)
COMPATIBLE_IOCTL(DM_DEV_STATUS_32)
COMPATIBLE_IOCTL(DM_DEV_WAIT_32)
COMPATIBLE_IOCTL(DM_TABLE_LOAD_32)
COMPATIBLE_IOCTL(DM_TABLE_CLEAR_32)
COMPATIBLE_IOCTL(DM_TABLE_DEPS_32)
COMPATIBLE_IOCTL(DM_TABLE_STATUS_32)
COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32)
COMPATIBLE_IOCTL(DM_VERSION)
COMPATIBLE_IOCTL(DM_LIST_DEVICES)
COMPATIBLE_IOCTL(DM_DEV_CREATE)
......
......@@ -129,8 +129,14 @@ struct dm_target_spec {
int32_t status; /* used when reading from kernel only */
/*
* Offset in bytes (from the start of this struct) to
* next target_spec.
* Location of the next dm_target_spec.
* - When specifying targets on a DM_TABLE_LOAD command, this value is
* the number of bytes from the start of the "current" dm_target_spec
* to the start of the "next" dm_target_spec.
* - When retrieving targets on a DM_TABLE_STATUS command, this value
* is the number of bytes from the start of the first dm_target_spec
* (that follows the dm_ioctl struct) to the start of the "next"
* dm_target_spec.
*/
uint32_t next;
......@@ -200,6 +206,34 @@ enum {
DM_LIST_VERSIONS_CMD,
};
/*
* The dm_ioctl struct passed into the ioctl is just the header
* on a larger chunk of memory. On x86-64 and other
* architectures the dm-ioctl struct will be padded to an 8 byte
* boundary so the size will be different, which would change the
* ioctl code - yes I really messed up. This hack forces these
* architectures to have the correct ioctl code.
*/
#ifdef CONFIG_COMPAT
typedef char ioctl_struct[308];
#define DM_VERSION_32 _IOWR(DM_IOCTL, DM_VERSION_CMD, ioctl_struct)
#define DM_REMOVE_ALL_32 _IOWR(DM_IOCTL, DM_REMOVE_ALL_CMD, ioctl_struct)
#define DM_LIST_DEVICES_32 _IOWR(DM_IOCTL, DM_LIST_DEVICES_CMD, ioctl_struct)
#define DM_DEV_CREATE_32 _IOWR(DM_IOCTL, DM_DEV_CREATE_CMD, ioctl_struct)
#define DM_DEV_REMOVE_32 _IOWR(DM_IOCTL, DM_DEV_REMOVE_CMD, ioctl_struct)
#define DM_DEV_RENAME_32 _IOWR(DM_IOCTL, DM_DEV_RENAME_CMD, ioctl_struct)
#define DM_DEV_SUSPEND_32 _IOWR(DM_IOCTL, DM_DEV_SUSPEND_CMD, ioctl_struct)
#define DM_DEV_STATUS_32 _IOWR(DM_IOCTL, DM_DEV_STATUS_CMD, ioctl_struct)
#define DM_DEV_WAIT_32 _IOWR(DM_IOCTL, DM_DEV_WAIT_CMD, ioctl_struct)
#define DM_TABLE_LOAD_32 _IOWR(DM_IOCTL, DM_TABLE_LOAD_CMD, ioctl_struct)
#define DM_TABLE_CLEAR_32 _IOWR(DM_IOCTL, DM_TABLE_CLEAR_CMD, ioctl_struct)
#define DM_TABLE_DEPS_32 _IOWR(DM_IOCTL, DM_TABLE_DEPS_CMD, ioctl_struct)
#define DM_TABLE_STATUS_32 _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, ioctl_struct)
#define DM_LIST_VERSIONS_32 _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, ioctl_struct)
#endif
#define DM_IOCTL 0xfd
#define DM_VERSION _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl)
......
......@@ -751,7 +751,6 @@ struct super_block {
char s_id[32]; /* Informational name */
struct kobject kobj; /* anchor for sysfs */
void *s_fs_info; /* Filesystem private info */
/*
......@@ -1359,7 +1358,7 @@ extern struct file * get_empty_filp(void);
extern void file_move(struct file *f, struct list_head *list);
extern void file_kill(struct file *f);
struct bio;
extern int submit_bio(int, struct bio *);
extern void submit_bio(int, struct bio *);
extern int bdev_read_only(struct block_device *);
extern int set_blocksize(struct block_device *, int);
extern int sb_set_blocksize(struct super_block *, int);
......
......@@ -526,9 +526,8 @@ extern void si_meminfo_node(struct sysinfo *val, int nid);
extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
struct rb_node **, struct rb_node *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct *,
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, unsigned long pgoff);
extern void vma_relink_file(struct vm_area_struct *, struct vm_area_struct *);
extern void exit_mmap(struct mm_struct *);
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
......
......@@ -196,6 +196,9 @@ void nfsd_lockd_shutdown(void);
#define nfserr_openmode __constant_htonl(NFSERR_OPENMODE)
#define nfserr_locks_held __constant_htonl(NFSERR_LOCKS_HELD)
#define nfserr_op_illegal __constant_htonl(NFSERR_OP_ILLEGAL)
#define nfserr_grace __constant_htonl(NFSERR_GRACE)
#define nfserr_no_grace __constant_htonl(NFSERR_NO_GRACE)
#define nfserr_reclaim_bad __constant_htonl(NFSERR_RECLAIM_BAD)
/* error codes for internal use */
/* if a request fails due to kmalloc failure, it gets dropped.
......
......@@ -132,6 +132,9 @@ struct nfs4_replay {
* release a stateowner.
* so_perlockowner: (open) nfs4_stateid->st_perlockowner entry - used when
* close is called to reap associated byte-range locks
* so_close_lru: (open) stateowner is placed on this list instead of being
* reaped (when so_perfilestate is empty) to hold the last close replay.
* reaped by laundramat thread after lease period.
*/
struct nfs4_stateowner {
struct list_head so_idhash; /* hash by so_id */
......@@ -139,6 +142,8 @@ struct nfs4_stateowner {
struct list_head so_perclient; /* nfs4_client->cl_perclient */
struct list_head so_perfilestate; /* list: nfs4_stateid */
struct list_head so_perlockowner; /* nfs4_stateid->st_perlockowner */
struct list_head so_close_lru; /* tail queue */
time_t so_time; /* time of placement on so_close_lru */
int so_is_open_owner; /* 1=openowner,0=lockowner */
u32 so_id;
struct nfs4_client * so_client;
......@@ -194,6 +199,7 @@ struct nfs4_stateid {
#define OPEN_STATE 0x00000004
#define LOCK_STATE 0x00000008
#define RDWR_STATE 0x00000010
#define CLOSE_STATE 0x00000020
#define seqid_mutating_err(err) \
(((err) != nfserr_stale_clientid) && \
......@@ -209,4 +215,6 @@ extern int nfs4_share_conflict(struct svc_fh *current_fh,
unsigned int deny_type);
extern void nfs4_lock_state(void);
extern void nfs4_unlock_state(void);
extern int nfs4_in_grace(void);
extern int nfs4_in_no_grace(void);
#endif /* NFSD4_STATE_H */
......@@ -208,6 +208,7 @@ struct reiserfs_journal {
unsigned int s_journal_trans_max ; /* max number of blocks in a transaction. */
unsigned int s_journal_max_batch ; /* max number of blocks to batch into a trans */
unsigned int s_journal_max_commit_age ; /* in seconds, how old can an async commit be */
unsigned int s_journal_default_max_commit_age ; /* the default for the max commit age */
unsigned int s_journal_max_trans_age ; /* in seconds, how old can a transaction be */
struct reiserfs_journal_cnode *j_cnode_free_list ;
......@@ -481,6 +482,7 @@ int reiserfs_resize(struct super_block *, unsigned long) ;
#define SB_JOURNAL_TRANS_MAX(s) (SB_JOURNAL(s)->s_journal_trans_max)
#define SB_JOURNAL_MAX_BATCH(s) (SB_JOURNAL(s)->s_journal_max_batch)
#define SB_JOURNAL_MAX_COMMIT_AGE(s) (SB_JOURNAL(s)->s_journal_max_commit_age)
#define SB_JOURNAL_DEFAULT_MAX_COMMIT_AGE(s) (SB_JOURNAL(s)->s_journal_default_max_commit_age)
#define SB_JOURNAL_MAX_TRANS_AGE(s) (SB_JOURNAL(s)->s_journal_max_trans_age)
/* A safe version of the "bdevname", which returns the "s_id" field of
......
......@@ -153,7 +153,7 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
sb->s_magic = MQUEUE_MAGIC;
sb->s_op = &mqueue_super_ops;
inode = mqueue_get_inode(sb, S_IFDIR | S_IRWXUGO);
inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO);
if (!inode)
return -ENOMEM;
......@@ -685,10 +685,6 @@ asmlinkage long sys_mq_unlink(const char __user *u_name)
goto out_err;
}
if (permission(dentry->d_inode, MAY_WRITE, NULL)) {
err = -EACCES;
goto out_err;
}
inode = dentry->d_inode;
if (inode)
atomic_inc(&inode->i_count);
......
......@@ -2052,7 +2052,6 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
case __SI_MESGQ: /* But this is */
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_int, &to->si_int);
err |= __put_user(from->si_ptr, &to->si_ptr);
break;
default: /* this is just in case for now ... */
......
......@@ -1498,9 +1498,11 @@ void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
* Copy the vma structure to a new location in the same mm,
* prior to moving page table entries, to effect an mremap move.
*/
struct vm_area_struct *copy_vma(struct vm_area_struct *vma,
struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
unsigned long addr, unsigned long len, unsigned long pgoff)
{
struct vm_area_struct *vma = *vmap;
unsigned long vma_start = vma->vm_start;
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma, *prev;
struct rb_node **rb_link, *rb_parent;
......@@ -1508,7 +1510,14 @@ struct vm_area_struct *copy_vma(struct vm_area_struct *vma,
find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
new_vma = vma_merge(mm, prev, rb_parent, addr, addr + len,
vma->vm_flags, vma->vm_file, pgoff);
if (!new_vma) {
if (new_vma) {
/*
* Source vma may have been merged into new_vma
*/
if (vma_start >= new_vma->vm_start &&
vma_start < new_vma->vm_end)
*vmap = new_vma;
} else {
new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (new_vma) {
*new_vma = *vma;
......@@ -1525,24 +1534,3 @@ struct vm_area_struct *copy_vma(struct vm_area_struct *vma,
}
return new_vma;
}
/*
* Position vma after prev in shared file list:
* for mremap move error recovery racing against vmtruncate.
*/
void vma_relink_file(struct vm_area_struct *vma, struct vm_area_struct *prev)
{
struct mm_struct *mm = vma->vm_mm;
struct address_space *mapping;
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
if (mapping) {
down(&mapping->i_shared_sem);
spin_lock(&mm->page_table_lock);
list_move(&vma->shared, &prev->shared);
spin_unlock(&mm->page_table_lock);
up(&mapping->i_shared_sem);
}
}
}
......@@ -169,6 +169,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
unsigned long new_len, unsigned long new_addr)
{
struct mm_struct *mm = vma->vm_mm;
struct address_space *mapping = NULL;
struct vm_area_struct *new_vma;
unsigned long vm_flags = vma->vm_flags;
unsigned long new_pgoff;
......@@ -184,30 +185,35 @@ static unsigned long move_vma(struct vm_area_struct *vma,
return -ENOMEM;
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
new_vma = copy_vma(vma, new_addr, new_len, new_pgoff);
new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
if (!new_vma)
return -ENOMEM;
if (vma->vm_file) {
/*
* Subtle point from Rajesh Venkatasubramanian: before
* moving file-based ptes, we must lock vmtruncate out,
* since it might clean the dst vma before the src vma,
* and we propagate stale pages into the dst afterward.
*/
mapping = vma->vm_file->f_mapping;
down(&mapping->i_shared_sem);
}
moved_len = move_page_tables(vma, new_addr, old_addr, old_len);
if (moved_len < old_len) {
/*
* On error, move entries back from new area to old,
* which will succeed since page tables still there,
* and then proceed to unmap new area instead of old.
*
* Subtle point from Rajesh Venkatasubramanian: before
* moving file-based ptes, move new_vma before old vma
* in the i_mmap or i_mmap_shared list, so when racing
* against vmtruncate we cannot propagate pages to be
* truncated back from new_vma into just cleaned old.
*/
vma_relink_file(vma, new_vma);
move_page_tables(new_vma, old_addr, new_addr, moved_len);
vma = new_vma;
old_len = new_len;
old_addr = new_addr;
new_addr = -ENOMEM;
}
if (mapping)
up(&mapping->i_shared_sem);
/* Conceal VM_ACCOUNT so old reservation is not undone */
if (vm_flags & VM_ACCOUNT) {
......
......@@ -895,6 +895,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
svc_putu32(resv, rpc_success);
goto complete;
case RPC_GSS_PROC_DATA:
*authp = rpc_autherr_badcred;
rqstp->rq_client =
find_gss_auth_domain(rsci->mechctx, gc->gc_svc);
if (rqstp->rq_client == NULL)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment