Commit 2bb1ff4c authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents d27dca13 5650a43b
......@@ -273,6 +273,13 @@ config MVIAC3_2
endchoice
config X86_GENERIC
bool "Generic x86 support"
help
Including some tuning for non selected x86 CPUs too.
when it has moderate overhead. This is intended for generic
distributions kernels.
#
# Define implied options from the CPU selection here
#
......@@ -288,10 +295,10 @@ config X86_XADD
config X86_L1_CACHE_SHIFT
int
default "7" if MPENTIUM4 || X86_GENERIC
default "4" if MELAN || M486 || M386
default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2
default "6" if MK7 || MK8
default "7" if MPENTIUM4
config RWSEM_GENERIC_SPINLOCK
bool
......@@ -363,16 +370,6 @@ config X86_OOSTORE
depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6
default y
config X86_PREFETCH
bool
depends on MPENTIUMIII || MPENTIUM4 || MVIAC3_2
default y
config X86_SSE2
bool
depends on MK8 || MPENTIUM4
default y
config HUGETLB_PAGE
bool "Huge TLB Page Support"
help
......@@ -413,6 +410,18 @@ config SMP
If you don't know what to do here, say N.
config NR_CPUS
int "Maximum number of CPUs (2-32)"
depends on SMP
default "32"
help
This allows you to specify the maximum number of CPUs which this
kernel will support. The maximum supported value is 32 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
approximately eight kilobytes to the kernel image.
config PREEMPT
bool "Preemptible Kernel"
help
......@@ -465,18 +474,6 @@ config X86_IO_APIC
depends on !SMP && X86_UP_IOAPIC
default y
config NR_CPUS
int "Maximum number of CPUs (2-32)"
depends on SMP
default "32"
help
This allows you to specify the maximum number of CPUs which this
kernel will support. The maximum supported value is 32 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
approximately eight kilobytes to the kernel image.
config X86_TSC
bool
depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2) && !X86_NUMAQ
......@@ -989,6 +986,11 @@ config X86_LOCAL_APIC
depends on (X86_VISWS || SMP) && !X86_VOYAGER
default y
config X86_IO_APIC
bool
depends on SMP && !(X86_VISWS || X86_VOYAGER)
default y
config PCI
bool "PCI support" if !X86_VISWS
depends on !X86_VOYAGER
......@@ -1004,11 +1006,6 @@ config PCI
information about which PCI hardware does work under Linux and which
doesn't.
config X86_IO_APIC
bool
depends on SMP && !(X86_VISWS || X86_VOYAGER)
default y
choice
prompt "PCI access mode"
depends on PCI && !X86_VISWS
......@@ -1048,18 +1045,6 @@ config PCI_DIRECT
depends on PCI && ((PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
default y
config SCx200
tristate "NatSemi SCx200 support"
depends on !X86_VOYAGER
help
This provides basic support for the National Semiconductor SCx200
processor. Right now this is just a driver for the GPIO pins.
If you don't know what to do here, say N.
This support is also available as a module. If compiled as a
module, it will be called scx200.
source "drivers/pci/Kconfig"
config ISA
......@@ -1105,6 +1090,18 @@ config MCA
source "drivers/mca/Kconfig"
config SCx200
tristate "NatSemi SCx200 support"
depends on !X86_VOYAGER
help
This provides basic support for the National Semiconductor SCx200
processor. Right now this is just a driver for the GPIO pins.
If you don't know what to do here, say N.
This support is also available as a module. If compiled as a
module, it will be called scx200.
config HOTPLUG
bool "Support for hot-pluggable devices"
---help---
......
......@@ -1205,7 +1205,17 @@ static int suspend(int vetoable)
spin_lock(&i8253_lock);
get_time_diff();
/*
* Irq spinlock must be dropped around set_system_power_state.
* We'll undo any timer changes due to interrupts below.
*/
spin_unlock(&i8253_lock);
write_sequnlock_irq(&xtime_lock);
err = set_system_power_state(APM_STATE_SUSPEND);
write_seqlock_irq(&xtime_lock);
spin_lock(&i8253_lock);
reinit_timer();
set_time();
ignore_normal_resume = 1;
......
......@@ -178,6 +178,15 @@ static void __init init_amd(struct cpuinfo_x86 *c)
break;
}
switch (c->x86) {
case 15:
set_bit(X86_FEATURE_K8, c->x86_capability);
break;
case 6:
set_bit(X86_FEATURE_K7, c->x86_capability);
break;
}
display_cacheinfo(c);
}
......
......@@ -353,6 +353,11 @@ static void __init init_intel(struct cpuinfo_x86 *c)
break;
}
#endif
if (c->x86 == 15)
set_bit(X86_FEATURE_P4, c->x86_capability);
if (c->x86 == 6)
set_bit(X86_FEATURE_P3, c->x86_capability);
}
......
......@@ -795,41 +795,91 @@ static void __init register_memory(unsigned long max_low_pfn)
pci_mem_start = low_mem_size;
}
/* Use inline assembly to define this because the nops are defined
as inline assembly strings in the include files and we cannot
get them easily into strings. */
asm("intelnops: "
GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
GENERIC_NOP7 GENERIC_NOP8);
asm("k8nops: "
K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
K8_NOP7 K8_NOP8);
asm("k7nops: "
K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
K7_NOP7 K7_NOP8);
extern unsigned char intelnops[], k8nops[], k7nops[];
static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
NULL,
intelnops,
intelnops + 1,
intelnops + 1 + 2,
intelnops + 1 + 2 + 3,
intelnops + 1 + 2 + 3 + 4,
intelnops + 1 + 2 + 3 + 4 + 5,
intelnops + 1 + 2 + 3 + 4 + 5 + 6,
intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
NULL,
k8nops,
k8nops + 1,
k8nops + 1 + 2,
k8nops + 1 + 2 + 3,
k8nops + 1 + 2 + 3 + 4,
k8nops + 1 + 2 + 3 + 4 + 5,
k8nops + 1 + 2 + 3 + 4 + 5 + 6,
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
NULL,
k7nops,
k7nops + 1,
k7nops + 1 + 2,
k7nops + 1 + 2 + 3,
k7nops + 1 + 2 + 3 + 4,
k7nops + 1 + 2 + 3 + 4 + 5,
k7nops + 1 + 2 + 3 + 4 + 5 + 6,
k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
static struct nop {
int cpuid;
unsigned char **noptable;
} noptypes[] = {
{ X86_FEATURE_K8, k8_nops },
{ X86_FEATURE_K7, k7_nops },
{ -1, 0 }
};
/* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with
self modifying code. This implies that assymetric systems where
APs have less capabilities than the boot processor are not handled.
In this case boot with "noreplacement". */
void apply_alternatives(void *start, void *end)
{
struct alt_instr *a;
int diff, i, k;
for (a = start; a < (struct alt_instr *)end;
a = (void *)ALIGN((unsigned long)(a + 1) + a->instrlen, 4)) {
unsigned char **noptable = intel_nops;
for (i = 0; noptypes[i].cpuid >= 0; i++) {
if (boot_cpu_has(noptypes[i].cpuid)) {
noptable = noptypes[i].noptable;
break;
}
}
for (a = start; (void *)a < end; a++) {
if (!boot_cpu_has(a->cpuid))
continue;
BUG_ON(a->replacementlen > a->instrlen);
memcpy(a->instr, a->replacement, a->replacementlen);
diff = a->instrlen - a->replacementlen;
/* Pad the rest with nops */
for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
static const char *nops[] = {
0,
"\x90",
#if CONFIG_MK7 || CONFIG_MK8
"\x66\x90",
"\x66\x66\x90",
"\x66\x66\x66\x90",
#else
"\x89\xf6",
"\x8d\x76\x00",
"\x8d\x74\x26\x00",
#endif
};
k = min_t(int, diff, ARRAY_SIZE(nops));
memcpy(a->instr + i, nops[k], k);
k = diff;
if (k > ASM_NOP_MAX)
k = ASM_NOP_MAX;
memcpy(a->instr + i, noptable[k], k);
}
}
}
......
......@@ -67,11 +67,11 @@ static void mark_offset_cyclone(void)
/* lost tick compensation */
delta = last_cyclone_low - delta;
delta /=(CYCLONE_TIMER_FREQ/1000000);
delta /= (CYCLONE_TIMER_FREQ/1000000);
delta += delay_at_last_interrupt;
lost = delta/(1000000/HZ);
delay = delta%(1000000/HZ);
if(lost >= 2)
if (lost >= 2)
jiffies += lost-1;
/* update the monotonic base value */
......@@ -83,10 +83,12 @@ static void mark_offset_cyclone(void)
count = ((LATCH-1) - count) * TICK_SIZE;
delay_at_last_interrupt = (count + LATCH/2) / LATCH;
/* catch corner case where tick rollover
* occured between cyclone and pit reads
/* catch corner case where tick rollover occured
* between cyclone and pit reads (as noted when
* usec delta is > 90% # of usecs/tick)
*/
if(abs(delay - delay_at_last_interrupt) > 900)
if (abs(delay - delay_at_last_interrupt) > (900000/HZ))
jiffies++;
}
......
......@@ -178,7 +178,7 @@ static void mark_offset_tsc(void)
delta += delay_at_last_interrupt;
lost = delta/(1000000/HZ);
delay = delta%(1000000/HZ);
if(lost >= 2)
if (lost >= 2)
jiffies += lost-1;
/* update the monotonic base value */
......@@ -190,10 +190,11 @@ static void mark_offset_tsc(void)
count = ((LATCH-1) - count) * TICK_SIZE;
delay_at_last_interrupt = (count + LATCH/2) / LATCH;
/* catch corner case where tick rollover
* occured between tsc and pit reads
/* catch corner case where tick rollover occured
* between tsc and pit reads (as noted when
* usec delta is > 90% # of usecs/tick)
*/
if(abs(delay - delay_at_last_interrupt) > 900)
if (abs(delay - delay_at_last_interrupt) > (900000/HZ))
jiffies++;
}
......
......@@ -85,6 +85,7 @@ SECTIONS
__alt_instructions = .;
.altinstructions : { *(.altinstructions) }
__alt_instructions_end = .;
.altinstr_replacement : { *(.altinstr_replacement) }
. = ALIGN(4096);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
......
This diff is collapsed.
......@@ -2364,7 +2364,6 @@ typedef struct DAC960_Controller
unsigned short MaxBlocksPerCommand;
unsigned short ControllerScatterGatherLimit;
unsigned short DriverScatterGatherLimit;
unsigned int ControllerUsageCount;
u64 BounceBufferLimit;
unsigned int CombinedStatusBufferLength;
unsigned int InitialStatusLength;
......@@ -2397,7 +2396,6 @@ typedef struct DAC960_Controller
DAC960_Command_T InitialCommand;
DAC960_Command_T *Commands[DAC960_MaxDriverQueueDepth];
PROC_DirectoryEntry_T *ControllerProcEntry;
unsigned int LogicalDriveUsageCount[DAC960_MaxLogicalDrives];
boolean LogicalDriveInitiallyAccessible[DAC960_MaxLogicalDrives];
void (*QueueCommand)(DAC960_Command_T *Command);
boolean (*ReadControllerConfiguration)(struct DAC960_Controller *);
......@@ -4242,9 +4240,6 @@ static irqreturn_t DAC960_P_InterruptHandler(int, void *, Registers_T *);
static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *);
static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *);
static void DAC960_MonitoringTimerFunction(unsigned long);
static int DAC960_Open(Inode_T *, File_T *);
static int DAC960_Release(Inode_T *, File_T *);
static int DAC960_IOCTL(Inode_T *, File_T *, unsigned int, unsigned long);
static int DAC960_UserIOCTL(Inode_T *, File_T *, unsigned int, unsigned long);
static void DAC960_Message(DAC960_MessageLevel_T, unsigned char *,
DAC960_Controller_T *, ...);
......
......@@ -57,7 +57,9 @@
#include "xd.h"
static void __init do_xd_setup (int *integers);
#ifdef MODULE
static int xd[5] = { -1,-1,-1,-1, };
#endif
#define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using
"nodma" module option */
......@@ -148,16 +150,18 @@ static struct request_queue xd_queue;
static int __init xd_init(void)
{
u_char i,controller;
u_char count = 0;
unsigned int address;
int err;
#ifdef MODULE
for (i = 4; i > 0; i--)
if (((xd[i] = xd[i-1]) >= 0) && !count)
count = i;
if ((xd[0] = count))
do_xd_setup(xd);
{
u_char count = 0;
for (i = 4; i > 0; i--)
if (((xd[i] = xd[i-1]) >= 0) && !count)
count = i;
if ((xd[0] = count))
do_xd_setup(xd);
}
#endif
init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog;
......
......@@ -502,8 +502,6 @@ static struct device_attribute * const mwave_dev_attrs[] = {
&dev_attr_uart_irq,
&dev_attr_uart_io,
};
static int nr_registered_attrs;
static int device_registered;
/*
* mwave_init is called on module load
......@@ -518,13 +516,13 @@ static void mwave_exit(void)
PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n");
for (i = 0; i < nr_registered_attrs; i++)
for (i = 0; i < pDrvData->nr_registered_attrs; i++)
device_remove_file(&mwave_device, mwave_dev_attrs[i]);
nr_registered_attrs = 0;
pDrvData->nr_registered_attrs = 0;
if (device_registered) {
if (pDrvData->device_registered) {
device_unregister(&mwave_device);
device_registered = 0;
pDrvData->device_registered = FALSE;
}
if ( pDrvData->sLine >= 0 ) {
......@@ -650,7 +648,7 @@ static int __init mwave_init(void)
if (device_register(&mwave_device))
goto cleanup_error;
device_registered = 1;
pDrvData->device_registered = TRUE;
for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) {
if(device_create_file(&mwave_device, mwave_dev_attrs[i])) {
PRINTK_ERROR(KERN_ERR_MWAVE
......@@ -659,7 +657,7 @@ static int __init mwave_init(void)
mwave_dev_attrs[i]->attr.name);
goto cleanup_error;
}
nr_registered_attrs++;
pDrvData->nr_registered_attrs++;
}
/* SUCCESS! */
......
......@@ -140,6 +140,8 @@ typedef struct _MWAVE_DEVICE_DATA {
MWAVE_IPC IPCs[16];
BOOLEAN bMwaveDevRegistered;
short sLine;
int nr_registered_attrs;
int device_registered;
} MWAVE_DEVICE_DATA, *pMWAVE_DEVICE_DATA;
......
......@@ -132,9 +132,10 @@ static void pcf_isa_waitforpin(void) {
}
static void pcf_isa_handler(int this_irq, void *dev_id, struct pt_regs *regs) {
static irqreturn_t pcf_isa_handler(int this_irq, void *dev_id, struct pt_regs *regs) {
pcf_pending = 1;
wake_up_interruptible(&pcf_wait);
return IRQ_HANDLED;
}
......
comment "Micro Channel Architecture Bus support"
depends on MCA
config MCA_LEGACY
bool "Legacy MCA API Support"
depends on MCA
......
......@@ -1279,7 +1279,7 @@ static int bttv_prepare_buffer(struct bttv *btv, struct bttv_buffer *buf,
}
static int
buffer_setup(struct file *file, int *count, int *size)
buffer_setup(struct file *file, unsigned int *count, unsigned int *size)
{
struct bttv_fh *fh = file->private_data;
......@@ -3156,22 +3156,23 @@ bttv_irq_switch_fields(struct bttv *btv)
spin_unlock(&btv->s_lock);
}
static void bttv_irq(int irq, void *dev_id, struct pt_regs * regs)
static irqreturn_t bttv_irq(int irq, void *dev_id, struct pt_regs * regs)
{
u32 stat,astat;
u32 dstat;
int count;
struct bttv *btv;
int handled = 0;
btv=(struct bttv *)dev_id;
count=0;
while (1)
{
while (1) {
/* get/clear interrupt status bits */
stat=btread(BT848_INT_STAT);
astat=stat&btread(BT848_INT_MASK);
if (!astat)
return;
break;
handled = 1;
btwrite(stat,BT848_INT_STAT);
/* get device status bits */
......@@ -3231,6 +3232,7 @@ static void bttv_irq(int irq, void *dev_id, struct pt_regs * regs)
"bttv%d: IRQ lockup, cleared int mask\n", btv->nr);
}
}
return IRQ_RETVAL(handled);
}
......
......@@ -95,7 +95,6 @@ static int ali_ircc_net_close(struct net_device *dev);
static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int ali_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
static void ali_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static void ali_ircc_suspend(struct ali_ircc_cb *self);
static void ali_ircc_wakeup(struct ali_ircc_cb *self);
static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev);
......@@ -632,7 +631,8 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
* An interrupt from the chip has arrived. Time to do some work
*
*/
static void ali_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id,
struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
struct ali_ircc_cb *self;
......@@ -641,7 +641,7 @@ static void ali_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
if (!dev) {
WARNING("%s: irq %d for unknown device.\n", driver_name, irq);
return;
return IRQ_NONE;
}
self = (struct ali_ircc_cb *) dev->priv;
......@@ -656,7 +656,8 @@ static void ali_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
spin_unlock(&self->lock);
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
return IRQ_HANDLED;
}
/*
* Function ali_ircc_fir_interrupt(irq, struct ali_ircc_cb *self, regs)
......
......@@ -745,20 +745,20 @@ STATIC int toshoboe_invalid_dev(int irq)
return 1;
}
STATIC void
STATIC irqreturn_t
toshoboe_probeinterrupt (int irq, void *dev_id, struct pt_regs *regs)
{
struct toshoboe_cb *self = (struct toshoboe_cb *) dev_id;
__u8 irqstat;
if (self == NULL && toshoboe_invalid_dev(irq))
return;
return IRQ_NONE;
irqstat = INB (OBOE_ISR);
/* was it us */
if (!(irqstat & OBOE_INT_MASK))
return;
return IRQ_NONE;
/* Ack all the interrupts */
OUTB (irqstat, OBOE_ISR);
......@@ -791,6 +791,7 @@ toshoboe_probeinterrupt (int irq, void *dev_id, struct pt_regs *regs)
if (irqstat & OBOE_INT_SIP) {
self->int_sip++;
PROBE_DEBUG("I"); }
return IRQ_HANDLED;
}
STATIC int
......
......@@ -131,7 +131,6 @@ static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev);
static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase);
static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 baud);
static void nsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self);
static int nsc_ircc_read_dongle_id (int iobase);
static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id);
......@@ -1781,7 +1780,8 @@ static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase,
* An interrupt from the chip has arrived. Time to do some work
*
*/
static void nsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static irqreturn_t nsc_ircc_interrupt(int irq, void *dev_id,
struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
struct nsc_ircc_cb *self;
......@@ -1790,7 +1790,7 @@ static void nsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
if (!dev) {
WARNING("%s: irq %d for unknown device.\n", driver_name, irq);
return;
return IRQ_NONE;
}
self = (struct nsc_ircc_cb *) dev->priv;
......@@ -1818,6 +1818,7 @@ static void nsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
outb(bsr, iobase+BSR); /* Restore bank register */
spin_unlock(&self->lock);
return IRQ_HANDLED;
}
/*
......
......@@ -992,9 +992,9 @@ static irqreturn_t ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_NONE;
}
irport = (struct irport_cb *) dev->priv;
ASSERT(irport != NULL, return;);
ASSERT(irport != NULL, return IRQ_NONE;);
self = (struct ircc_cb *) irport->priv;
ASSERT(self != NULL, return;);
ASSERT(self != NULL, return IRQ_NONE;);
/* Check if we should use the SIR interrupt handler */
if (self->io->speed < 576000) {
......
......@@ -349,7 +349,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
}
/*interrupt handler */
static void
static irqreturn_t
toshoboe_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{
struct toshoboe_cb *self = (struct toshoboe_cb *) dev_id;
......@@ -360,7 +360,7 @@ toshoboe_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{
printk (KERN_WARNING "%s: irq %d for unknown device.\n",
driver_name, irq);
return;
return IRQ_NONE;
}
IRDA_DEBUG (4, "%s()\n", __FUNCTION__ );
......@@ -369,7 +369,7 @@ toshoboe_interrupt (int irq, void *dev_id, struct pt_regs *regs)
/* woz it us */
if (!(irqstat & 0xf8))
return;
return IRQ_NONE;
outb_p (irqstat, OBOE_ISR); /*Acknologede it */
......@@ -456,8 +456,7 @@ toshoboe_interrupt (int irq, void *dev_id, struct pt_regs *regs)
self->stats.rx_errors++;
}
return IRQ_HANDLED;
}
static int
......
......@@ -1570,7 +1570,8 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
/********************************************************/
static void vlsi_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
static irqreturn_t vlsi_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct net_device *ndev = dev_instance;
vlsi_irda_dev_t *idev = ndev->priv;
......@@ -1579,6 +1580,7 @@ static void vlsi_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
int boguscount = 32;
unsigned got_act;
unsigned long flags;
int handled = 0;
got_act = 0;
iobase = ndev->base_addr;
......@@ -1591,7 +1593,7 @@ static void vlsi_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */
break;
handled = 1;
if (irintr&IRINTR_RPKTINT)
vlsi_rx_interrupt(ndev);
......@@ -1610,7 +1612,7 @@ static void vlsi_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
if (boguscount <= 0)
printk(KERN_WARNING "%s: too much work in interrupt!\n", __FUNCTION__);
return IRQ_RETVAL(handled);
}
/********************************************************/
......
......@@ -97,7 +97,6 @@ static int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev);
static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
static void w83977af_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static int w83977af_is_receiving(struct w83977af_ir *self);
static int w83977af_net_init(struct net_device *dev);
......@@ -1118,7 +1117,8 @@ static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
* An interrupt from the chip has arrived. Time to do some work
*
*/
static void w83977af_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static irqreturn_t w83977af_interrupt(int irq, void *dev_id,
struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
struct w83977af_ir *self;
......@@ -1128,7 +1128,7 @@ static void w83977af_interrupt(int irq, void *dev_id, struct pt_regs *regs)
if (!dev) {
printk(KERN_WARNING "%s: irq %d for unknown device.\n",
driver_name, irq);
return;
return IRQ_NONE;
}
self = (struct w83977af_ir *) dev->priv;
......@@ -1153,7 +1153,7 @@ static void w83977af_interrupt(int irq, void *dev_id, struct pt_regs *regs)
outb(icr, iobase+ICR); /* Restore (new) interrupts */
outb(set, iobase+SSR); /* Restore bank register */
return IRQ_HANDLED;
}
/*
......
......@@ -255,8 +255,8 @@ config HDLC
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read Documentation/modules.txt. The module
will be called hdlc.o.
say M here and read <file:Documentation/modules.txt>. The module
will be called hdlc.
If unsure, say N here.
......@@ -363,7 +363,7 @@ config N2
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/modules.txt>. The module
will be called n2.o.
will be called n2.
If unsure, say N here.
......@@ -378,7 +378,7 @@ config C101
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/modules.txt>. The module
will be called c101.o.
will be called c101.
If unsure, say N here.
......
......@@ -157,14 +157,9 @@ static int c101_open(struct net_device *dev)
port_t *port = hdlc_to_port(hdlc);
int result;
if (!try_module_get(THIS_MODULE))
return -EFAULT; /* rmmod in progress */
result = hdlc_open(hdlc);
if (result) {
if (result)
return result;
module_put(THIS_MODULE);
}
writeb(1, port->win0base + C101_DTR);
sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */
......@@ -183,7 +178,6 @@ static int c101_close(struct net_device *dev)
writeb(0, port->win0base + C101_DTR);
sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port);
hdlc_close(hdlc);
module_put(THIS_MODULE);
return 0;
}
......@@ -319,6 +313,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
dev = hdlc_to_dev(&card->hdlc);
spin_lock_init(&card->lock);
SET_MODULE_OWNER(dev);
dev->irq = irq;
dev->mem_start = winbase;
dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1;
......
......@@ -218,15 +218,9 @@ static int n2_open(struct net_device *dev)
u8 mcr = inb(io + N2_MCR) | (port->phy_node ? TX422_PORT1:TX422_PORT0);
int result;
if (!try_module_get(THIS_MODULE))
return -EFAULT; /* rmmod in progress */
result = hdlc_open(hdlc);
if (result) {
if (result)
return result;
module_put(THIS_MODULE);
}
mcr &= port->phy_node ? ~DTR_PORT1 : ~DTR_PORT0; /* set DTR ON */
outb(mcr, io + N2_MCR);
......@@ -251,7 +245,6 @@ static int n2_close(struct net_device *dev)
mcr |= port->phy_node ? DTR_PORT1 : DTR_PORT0; /* set DTR OFF */
outb(mcr, io + N2_MCR);
hdlc_close(hdlc);
module_put(THIS_MODULE);
return 0;
}
......@@ -451,6 +444,7 @@ static int __init n2_run(unsigned long io, unsigned long irq,
port->log_node = 1;
spin_lock_init(&port->lock);
SET_MODULE_OWNER(dev);
dev->irq = irq;
dev->mem_start = winbase;
dev->mem_end = winbase + USE_WINDOWSIZE-1;
......
......@@ -135,7 +135,7 @@ config JBD
# CONFIG_JBD could be its own option (even modular), but until there are
# other users than ext3, we will simply make it be the same as CONFIG_EXT3_FS
# dep_tristate ' Journal Block Device support (JBD for ext3)' CONFIG_JBD $CONFIG_EXT3_FS
bool
tristate
default EXT3_FS
help
This is a generic journaling layer for block devices. It is
......
......@@ -689,6 +689,15 @@ static ssize_t blkdev_file_write(struct file *file, const char *buf,
return generic_file_write_nolock(file, &local_iov, 1, ppos);
}
static ssize_t blkdev_file_aio_write(struct kiocb *iocb, const char *buf,
size_t count, loff_t pos)
{
struct iovec local_iov = { .iov_base = (void *)buf, .iov_len = count };
return generic_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
}
struct address_space_operations def_blk_aops = {
.readpage = blkdev_readpage,
.writepage = blkdev_writepage,
......@@ -705,6 +714,8 @@ struct file_operations def_blk_fops = {
.llseek = block_llseek,
.read = generic_file_read,
.write = blkdev_file_write,
.aio_read = generic_file_aio_read,
.aio_write = blkdev_file_aio_write,
.mmap = generic_file_mmap,
.fsync = block_fsync,
.ioctl = blkdev_ioctl,
......
......@@ -2019,7 +2019,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
struct inode *inode = page->mapping->host;
sector_t iblock, lblock;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
unsigned int blocksize, blocks;
unsigned int blocksize;
int nr, i;
int fully_mapped = 1;
......@@ -2032,7 +2032,6 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
create_empty_buffers(page, blocksize, 0);
head = page_buffers(page);
blocks = PAGE_CACHE_SIZE >> inode->i_blkbits;
iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
lblock = (inode->i_size+blocksize-1) >> inode->i_blkbits;
bh = head;
......
......@@ -978,12 +978,6 @@ static void ext3_orphan_cleanup (struct super_block * sb,
return;
}
if (s_flags & MS_RDONLY) {
printk(KERN_INFO "EXT3-fs: %s: orphan cleanup on readonly fs\n",
sb->s_id);
sb->s_flags &= ~MS_RDONLY;
}
if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
if (es->s_last_orphan)
jbd_debug(1, "Errors on filesystem, "
......@@ -993,6 +987,12 @@ static void ext3_orphan_cleanup (struct super_block * sb,
return;
}
if (s_flags & MS_RDONLY) {
printk(KERN_INFO "EXT3-fs: %s: orphan cleanup on readonly fs\n",
sb->s_id);
sb->s_flags &= ~MS_RDONLY;
}
while (es->s_last_orphan) {
struct inode *inode;
......
......@@ -32,17 +32,7 @@ static rwlock_t file_systems_lock = RW_LOCK_UNLOCKED;
/* WARNING: This can be used only if we _already_ own a reference */
void get_filesystem(struct file_system_type *fs)
{
if (!try_module_get(fs->owner)) {
#ifdef CONFIG_MODULE_UNLOAD
unsigned int cpu = get_cpu();
local_inc(&fs->owner->ref[cpu].count);
put_cpu();
#else
/* Getting filesystem while it's starting up? We're
already supposed to have a reference. */
BUG();
#endif
}
__module_get(fs->owner);
}
void put_filesystem(struct file_system_type *fs)
......
......@@ -99,25 +99,32 @@ char *disk_name(struct gendisk *hd, int part, char *buf)
#ifdef CONFIG_DEVFS_FS
if (hd->devfs_name[0] != '\0') {
if (part)
sprintf(buf, "%s/part%d", hd->devfs_name, part);
snprintf(buf, BDEVNAME_SIZE, "%s/part%d",
hd->devfs_name, part);
else if (hd->minors != 1)
sprintf(buf, "%s/disc", hd->devfs_name);
snprintf(buf, BDEVNAME_SIZE, "%s/disc", hd->devfs_name);
else
sprintf(buf, "%s", hd->devfs_name);
snprintf(buf, BDEVNAME_SIZE, "%s", hd->devfs_name);
return buf;
}
#endif
if (!part)
sprintf(buf, "%s", hd->disk_name);
snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
sprintf(buf, "%sp%d", hd->disk_name, part);
snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, part);
else
sprintf(buf, "%s%d", hd->disk_name, part);
snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, part);
return buf;
}
const char *bdevname(struct block_device *bdev, char *buf)
{
int part = MINOR(bdev->bd_dev) - bdev->bd_disk->first_minor;
return disk_name(bdev->bd_disk, part, buf);
}
static struct parsed_partitions *
check_partition(struct gendisk *hd, struct block_device *bdev)
{
......@@ -417,7 +424,7 @@ void del_gendisk(struct gendisk *disk)
struct dev_name {
struct list_head list;
dev_t dev;
char namebuf[64];
char namebuf[BDEVNAME_SIZE];
char *name;
};
......
......@@ -8,7 +8,7 @@
enum { MAX_PART = 256 };
struct parsed_partitions {
char name[40];
char name[BDEVNAME_SIZE];
struct {
sector_t from;
sector_t size;
......
......@@ -136,11 +136,11 @@ proc_file_read(struct file * file, char * buf, size_t nbytes, loff_t *ppos)
"proc_file_read: Apparent buffer overflow!\n");
n = PAGE_SIZE;
}
if (n > count)
n = count;
n -= *ppos;
if (n <= 0)
break;
if (n > count)
n = count;
start = page + *ppos;
} else if (start < page) {
if (n > PAGE_SIZE) {
......
......@@ -63,6 +63,11 @@
#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
/* cpu types for specific tunings: */
#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */
#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */
#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
......
......@@ -15,6 +15,7 @@
#include <asm/sigcontext.h>
#include <asm/cpufeature.h>
#include <asm/msr.h>
#include <asm/system.h>
#include <linux/cache.h>
#include <linux/config.h>
#include <linux/threads.h>
......@@ -495,32 +496,93 @@ static inline void rep_nop(void)
#define cpu_relax() rep_nop()
/* Prefetch instructions for Pentium III and AMD Athlon */
#ifdef CONFIG_X86_PREFETCH
/* generic versions from gas */
#define GENERIC_NOP1 ".byte 0x90\n"
#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
/* Opteron nops */
#define K8_NOP1 GENERIC_NOP1
#define K8_NOP2 ".byte 0x66,0x90\n"
#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
#define K8_NOP5 K8_NOP3 K8_NOP2
#define K8_NOP6 K8_NOP3 K8_NOP3
#define K8_NOP7 K8_NOP4 K8_NOP3
#define K8_NOP8 K8_NOP4 K8_NOP4
/* K7 nops */
/* uses eax dependencies (arbitary choice) */
#define K7_NOP1 GENERIC_NOP1
#define K7_NOP2 ".byte 0x8b,0xc0\n"
#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
#define K7_NOP5 K7_NOP4 ASM_NOP1
#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
#define K7_NOP8 K7_NOP7 ASM_NOP1
#ifdef CONFIG_MK8
#define ASM_NOP1 K8_NOP1
#define ASM_NOP2 K8_NOP2
#define ASM_NOP3 K8_NOP3
#define ASM_NOP4 K8_NOP4
#define ASM_NOP5 K8_NOP5
#define ASM_NOP6 K8_NOP6
#define ASM_NOP7 K8_NOP7
#define ASM_NOP8 K8_NOP8
#elif CONFIG_MK7
#define ASM_NOP1 K7_NOP1
#define ASM_NOP2 K7_NOP2
#define ASM_NOP3 K7_NOP3
#define ASM_NOP4 K7_NOP4
#define ASM_NOP5 K7_NOP5
#define ASM_NOP6 K7_NOP6
#define ASM_NOP7 K7_NOP7
#define ASM_NOP8 K7_NOP8
#else
#define ASM_NOP1 GENERIC_NOP1
#define ASM_NOP2 GENERIC_NOP2
#define ASM_NOP3 GENERIC_NOP3
#define ASM_NOP4 GENERIC_NOP4
#define ASM_NOP5 GENERIC_NOP5
#define ASM_NOP6 GENERIC_NOP6
#define ASM_NOP7 GENERIC_NOP7
#define ASM_NOP8 GENERIC_NOP8
#endif
#define ASM_NOP_MAX 8
/* Prefetch instructions for Pentium III and AMD Athlon */
/* It's not worth to care about 3dnow! prefetches for the K6
because they are microcoded there and very slow. */
#define ARCH_HAS_PREFETCH
extern inline void prefetch(const void *x)
{
__asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
alternative_input(ASM_NOP3,
"prefetchnta (%1)",
X86_FEATURE_XMM,
"r" (x));
}
#elif defined CONFIG_X86_USE_3DNOW
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
extern inline void prefetch(const void *x)
{
__asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
}
/* 3dnow! prefetch to get an exclusive cache line. Useful for
spinlocks to avoid one state transition in the cache coherency protocol. */
extern inline void prefetchw(const void *x)
{
__asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
alternative_input(ASM_NOP3,
"prefetchw (%1)",
X86_FEATURE_3DNOW,
"r" (x));
}
#define spin_lock_prefetch(x) prefetchw(x)
#endif
#endif /* __ASM_I386_PROCESSOR_H */
......@@ -277,13 +277,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
/* Compiling for a 386 proper. Is it worth implementing via cli/sti? */
#endif
#ifdef __KERNEL__
struct alt_instr {
u8 *instr; /* original instruction */
u8 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction, <= instrlen */
u8 replacement[0]; /* new instruction */
__u8 *instr; /* original instruction */
__u8 *replacement;
__u8 cpuid; /* cpuid bit set for replacement */
__u8 instrlen; /* length of original instruction */
__u8 replacementlen; /* length of new instruction, <= instrlen */
__u8 pad;
};
#endif
/*
* Alternative instructions for different CPU types or capabilities.
......@@ -302,12 +305,39 @@ struct alt_instr {
".section .altinstructions,\"a\"\n" \
" .align 4\n" \
" .long 661b\n" /* label */ \
" .long 663f\n" /* new instruction */ \
" .byte %c0\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" :: "i" (feature) : "memory")
/*
* Alternative inline assembly with input.
*
* Pecularities:
* No memory clobber here.
* Argument numbers start with 1.
* Best is to use constraints that are fixed size (like (%1) ... "r")
* If you use variable sized constraints like "m" or "g" in the
* replacement maake sure to pad to the worst case length.
*/
#define alternative_input(oldinstr, newinstr, feature, input) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
" .align 4\n" \
" .long 661b\n" /* label */ \
" .long 663f\n" /* new instruction */ \
" .byte %c0\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" :: "i" (feature), input)
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
......
......@@ -1068,10 +1068,7 @@ extern int chrdev_open(struct inode *, struct file *);
/* fs/block_dev.c */
#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
extern const char *__bdevname(dev_t, char *buffer);
extern inline const char *bdevname(struct block_device *bdev, char *buffer)
{
return __bdevname(bdev->bd_dev, buffer);
}
extern const char *bdevname(struct block_device *bdev, char *buffer);
extern struct block_device *lookup_bdev(const char *);
extern struct block_device *open_bdev_excl(const char *, int, int, void *);
extern void close_bdev_excl(struct block_device *, int);
......
......@@ -31,6 +31,7 @@
#define __GFP_REPEAT 0x400 /* Retry the allocation. Might fail */
#define __GFP_NOFAIL 0x800 /* Retry for ever. Cannot fail */
#define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */
#define __GFP_NO_GROW 0x2000 /* Slab internal usage */
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_NOIO (__GFP_WAIT)
......
......@@ -20,10 +20,7 @@
#include <asm/module.h>
/* Not Yet Implemented */
#define MODULE_AUTHOR(name)
#define MODULE_DESCRIPTION(desc)
#define MODULE_SUPPORTED_DEVICE(name)
#define MODULE_PARM_DESC(var,desc)
#define print_modules()
/* v850 toolchain uses a `_' prefix for all user symbols */
......@@ -58,12 +55,11 @@ search_extable(const struct exception_table_entry *first,
unsigned long value);
#ifdef MODULE
#define ___module_cat(a,b) a ## b
#define ___module_cat(a,b) __mod_ ## a ## b
#define __module_cat(a,b) ___module_cat(a,b)
/* For userspace: you can also call me... */
#define MODULE_ALIAS(alias) \
static const char __module_cat(__alias_,__LINE__)[] \
__attribute__((section(".modinfo"),unused)) = "alias=" alias
#define __MODULE_INFO(tag, name, info) \
static const char __module_cat(name,__LINE__)[] \
__attribute__((section(".modinfo"),unused)) = __stringify(tag) "=" info
#define MODULE_GENERIC_TABLE(gtype,name) \
extern const struct gtype##_id __mod_##gtype##_table \
......@@ -71,6 +67,19 @@ extern const struct gtype##_id __mod_##gtype##_table \
#define THIS_MODULE (&__this_module)
#else /* !MODULE */
#define MODULE_GENERIC_TABLE(gtype,name)
#define __MODULE_INFO(tag, name, info)
#define THIS_MODULE ((struct module *)0)
#endif
/* Generic info of form tag = "info" */
#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
/* For userspace: you can also call me... */
#define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias)
/*
* The following license idents are currently accepted as indicating free
* software modules
......@@ -97,17 +106,18 @@ extern const struct gtype##_id __mod_##gtype##_table \
* 2. So the community can ignore bug reports including proprietary modules
* 3. So vendors can do likewise based on their own policies
*/
#define MODULE_LICENSE(license) \
static const char __module_license[] \
__attribute__((section(".init.license"), unused)) = license
#define MODULE_LICENSE(_license) MODULE_INFO(license, _license)
#else /* !MODULE */
/* Author, ideally of form NAME <EMAIL>[, NAME <EMAIL>]*[ and NAME <EMAIL>] */
#define MODULE_AUTHOR(_author) MODULE_INFO(author, _author)
/* What your module does. */
#define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description)
#define MODULE_ALIAS(alias)
#define MODULE_GENERIC_TABLE(gtype,name)
#define THIS_MODULE ((struct module *)0)
#define MODULE_LICENSE(license)
#endif
/* One for each parameter, describing how to use it. Some files do
multiple of these per line, so can't just use MODULE_INFO. */
#define MODULE_PARM_DESC(_parm, desc) \
__MODULE_INFO(parm, _parm, #_parm ":" desc)
#define MODULE_DEVICE_TABLE(type,name) \
MODULE_GENERIC_TABLE(type##_device,name)
......@@ -255,6 +265,7 @@ struct module *module_text_address(unsigned long addr);
#ifdef CONFIG_MODULE_UNLOAD
unsigned int module_refcount(struct module *mod);
void __symbol_put(const char *symbol);
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
void symbol_put_addr(void *addr);
......@@ -265,6 +276,17 @@ void symbol_put_addr(void *addr);
#define local_dec(x) atomic_dec(x)
#endif
/* Sometimes we know we already have a refcount, and it's easier not
to handle the error case (which only happens with rmmod --wait). */
static inline void __module_get(struct module *module)
{
if (module) {
BUG_ON(module_refcount(module) == 0);
local_inc(&module->ref[get_cpu()].count);
put_cpu();
}
}
static inline int try_module_get(struct module *module)
{
int ret = 1;
......@@ -300,6 +322,9 @@ static inline int try_module_get(struct module *module)
static inline void module_put(struct module *module)
{
}
static inline void __module_get(struct module *module)
{
}
#define symbol_put(x) do { } while(0)
#define symbol_put_addr(p) do { } while(0)
......@@ -357,6 +382,10 @@ static inline struct module *module_text_address(unsigned long addr)
#define symbol_put(x) do { } while(0)
#define symbol_put_addr(x) do { } while(0)
static inline void __module_get(struct module *module)
{
}
static inline int try_module_get(struct module *module)
{
return 1;
......
......@@ -7,7 +7,7 @@
#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/preempt.h>
#include <linux/threads.h>
#ifdef CONFIG_SMP
......
......@@ -22,8 +22,11 @@ typedef struct kmem_cache_s kmem_cache_t;
#define SLAB_KERNEL GFP_KERNEL
#define SLAB_DMA GFP_DMA
#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|__GFP_NORETRY)
#define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */
#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT|\
__GFP_NOFAIL|__GFP_NORETRY)
#define SLAB_NO_GROW __GFP_NO_GROW /* don't grow a cache */
/* flags to pass to kmem_cache_create().
* The first 3 are only valid when the allocator as been build
......
......@@ -73,7 +73,7 @@ struct irport_cb {
/* For piggyback drivers */
void *priv;
void (*change_speed)(void *priv, __u32 speed);
void (*interrupt)(int irq, void *dev_id, struct pt_regs *regs);
int (*interrupt)(int irq, void *dev_id, struct pt_regs *regs);
};
struct irport_cb *irport_open(int i, unsigned int iobase, unsigned int irq);
......
......@@ -43,7 +43,9 @@ static kmem_cache_t *task_struct_cachep;
extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
extern void exit_semundo(struct task_struct *tsk);
/* The idle threads do not count.. */
/* The idle threads do not count..
* Protected by write_lock_irq(&tasklist_lock)
*/
int nr_threads;
int max_threads;
......@@ -792,9 +794,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
atomic_inc(&p->user->processes);
/*
* Counter increases are protected by
* the kernel lock so nr_threads can't
* increase under us (but it may decrease).
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
* to stop root fork bombs.
*/
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
......
......@@ -515,6 +515,7 @@ EXPORT_SYMBOL(vsprintf);
EXPORT_SYMBOL(vsnprintf);
EXPORT_SYMBOL(vsscanf);
EXPORT_SYMBOL(__bdevname);
EXPORT_SYMBOL(bdevname);
EXPORT_SYMBOL(cdevname);
EXPORT_SYMBOL(simple_strtoull);
EXPORT_SYMBOL(simple_strtoul);
......
......@@ -431,7 +431,7 @@ static inline void restart_refcounts(void)
}
#endif
static unsigned int module_refcount(struct module *mod)
unsigned int module_refcount(struct module *mod)
{
unsigned int i, total = 0;
......@@ -439,6 +439,7 @@ static unsigned int module_refcount(struct module *mod)
total += atomic_read(&mod->ref[i].count);
return total;
}
EXPORT_SYMBOL(module_refcount);
/* This exists whether we can unload or not */
static void free_module(struct module *mod);
......@@ -939,12 +940,12 @@ EXPORT_SYMBOL_GPL(__symbol_get);
/* Change all symbols so that sh_value encodes the pointer directly. */
static int simplify_symbols(Elf_Shdr *sechdrs,
unsigned int symindex,
unsigned int strindex,
const char *strtab,
unsigned int versindex,
struct module *mod)
{
Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
const char *strtab = (char *)sechdrs[strindex].sh_addr;
unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
int ret = 0;
......@@ -1063,13 +1064,9 @@ static inline int license_is_gpl_compatible(const char *license)
|| strcmp(license, "Dual MPL/GPL") == 0);
}
static void set_license(struct module *mod, Elf_Shdr *sechdrs, int licenseidx)
static void set_license(struct module *mod, const char *license)
{
char *license;
if (licenseidx)
license = (char *)sechdrs[licenseidx].sh_addr;
else
if (!license)
license = "unspecified";
mod->license_gplok = license_is_gpl_compatible(license);
......@@ -1080,6 +1077,40 @@ static void set_license(struct module *mod, Elf_Shdr *sechdrs, int licenseidx)
}
}
/* Parse tag=value strings from .modinfo section */
static char *next_string(char *string, unsigned long *secsize)
{
/* Skip non-zero chars */
while (string[0]) {
string++;
if ((*secsize)-- <= 1)
return NULL;
}
/* Skip any zero padding. */
while (!string[0]) {
string++;
if ((*secsize)-- <= 1)
return NULL;
}
return string;
}
static char *get_modinfo(Elf_Shdr *sechdrs,
unsigned int info,
const char *tag)
{
char *p;
unsigned int taglen = strlen(tag);
unsigned long size = sechdrs[info].sh_size;
for (p = (char *)sechdrs[info].sh_addr; p; p = next_string(p, &size)) {
if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
return p + taglen + 1;
}
return NULL;
}
/* Allocate and load the module: note that size of section 0 is always
zero, and we rely on this for optional sections. */
static struct module *load_module(void __user *umod,
......@@ -1088,9 +1119,9 @@ static struct module *load_module(void __user *umod,
{
Elf_Ehdr *hdr;
Elf_Shdr *sechdrs;
char *secstrings, *args;
unsigned int i, symindex, exportindex, strindex, setupindex, exindex,
modindex, obsparmindex, licenseindex, gplindex, vmagindex,
char *secstrings, *args, *modmagic, *strtab = NULL;
unsigned int i, symindex = 0, strindex = 0, setupindex, exindex,
exportindex, modindex, obsparmindex, infoindex, gplindex,
crcindex, gplcrcindex, versindex;
long arglen;
struct module *mod;
......@@ -1124,6 +1155,7 @@ static struct module *load_module(void __user *umod,
/* Convenience variables */
sechdrs = (void *)hdr + hdr->e_shoff;
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
sechdrs[0].sh_addr = 0;
/* And these should exist, but gcc whinges if we don't init them */
symindex = strindex = 0;
......@@ -1137,6 +1169,7 @@ static struct module *load_module(void __user *umod,
if (sechdrs[i].sh_type == SHT_SYMTAB) {
symindex = i;
strindex = sechdrs[i].sh_link;
strtab = (char *)hdr + sechdrs[strindex].sh_offset;
}
#ifndef CONFIG_MODULE_UNLOAD
/* Don't load .exit sections */
......@@ -1145,12 +1178,6 @@ static struct module *load_module(void __user *umod,
#endif
}
#ifdef CONFIG_KALLSYMS
/* Keep symbol and string tables for decoding later. */
sechdrs[symindex].sh_flags |= SHF_ALLOC;
sechdrs[strindex].sh_flags |= SHF_ALLOC;
#endif
modindex = find_sec(hdr, sechdrs, secstrings,
".gnu.linkonce.this_module");
if (!modindex) {
......@@ -1168,9 +1195,16 @@ static struct module *load_module(void __user *umod,
setupindex = find_sec(hdr, sechdrs, secstrings, "__param");
exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table");
obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm");
licenseindex = find_sec(hdr, sechdrs, secstrings, ".init.license");
vmagindex = find_sec(hdr, sechdrs, secstrings, "__vermagic");
versindex = find_sec(hdr, sechdrs, secstrings, "__versions");
infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo");
/* Don't keep modinfo section */
sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
#ifdef CONFIG_KALLSYMS
/* Keep symbol and string tables for decoding later. */
sechdrs[symindex].sh_flags |= SHF_ALLOC;
sechdrs[strindex].sh_flags |= SHF_ALLOC;
#endif
/* Check module struct version now, before we try to use module. */
if (!check_modstruct_version(sechdrs, versindex, mod)) {
......@@ -1178,14 +1212,15 @@ static struct module *load_module(void __user *umod,
goto free_hdr;
}
modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
/* This is allowed: modprobe --force will invalidate it. */
if (!vmagindex) {
if (!modmagic) {
tainted |= TAINT_FORCED_MODULE;
printk(KERN_WARNING "%s: no version magic, tainting kernel.\n",
mod->name);
} else if (!same_magic((char *)sechdrs[vmagindex].sh_addr, vermagic)) {
} else if (!same_magic(modmagic, vermagic)) {
printk(KERN_ERR "%s: version magic '%s' should be '%s'\n",
mod->name, (char*)sechdrs[vmagindex].sh_addr, vermagic);
mod->name, modmagic, vermagic);
err = -ENOEXEC;
goto free_hdr;
}
......@@ -1265,11 +1300,11 @@ static struct module *load_module(void __user *umod,
/* Now we've moved module, initialize linked lists, etc. */
module_unload_init(mod);
/* Set up license info based on contents of section */
set_license(mod, sechdrs, licenseindex);
/* Set up license info based on the info section */
set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
/* Fix up syms, so that st_value is a pointer to location. */
err = simplify_symbols(sechdrs, symindex, strindex, versindex, mod);
err = simplify_symbols(sechdrs, symindex, strtab, versindex, mod);
if (err < 0)
goto cleanup;
......@@ -1300,8 +1335,7 @@ static struct module *load_module(void __user *umod,
for (i = 1; i < hdr->e_shnum; i++) {
const char *strtab = (char *)sechdrs[strindex].sh_addr;
if (sechdrs[i].sh_type == SHT_REL)
err = apply_relocate(sechdrs, strtab, symindex, i,
mod);
err = apply_relocate(sechdrs, strtab, symindex, i,mod);
else if (sechdrs[i].sh_type == SHT_RELA)
err = apply_relocate_add(sechdrs, strtab, symindex, i,
mod);
......
#include <linux/percpu_counter.h>
#include <linux/sched.h>
void percpu_counter_mod(struct percpu_counter *fbc, long amount)
{
......
......@@ -208,6 +208,11 @@ static void oom_kill(void)
*/
void out_of_memory(void)
{
/*
* oom_lock protects out_of_memory()'s static variables.
* It's a global lock; this is not performance-critical.
*/
static spinlock_t oom_lock = SPIN_LOCK_UNLOCKED;
static unsigned long first, last, count, lastkill;
unsigned long now, since;
......@@ -217,6 +222,7 @@ void out_of_memory(void)
if (nr_swap_pages > 0)
return;
spin_lock(&oom_lock);
now = jiffies;
since = now - last;
last = now;
......@@ -235,14 +241,14 @@ void out_of_memory(void)
*/
since = now - first;
if (since < HZ)
return;
goto out_unlock;
/*
* If we have gotten only a few failures,
* we're not really oom.
*/
if (++count < 10)
return;
goto out_unlock;
/*
* If we just killed a process, wait a while
......@@ -251,15 +257,27 @@ void out_of_memory(void)
*/
since = now - lastkill;
if (since < HZ*5)
return;
goto out_unlock;
/*
* Ok, really out of memory. Kill something.
*/
lastkill = now;
/* oom_kill() sleeps */
spin_unlock(&oom_lock);
oom_kill();
spin_lock(&oom_lock);
reset:
first = now;
/*
* We dropped the lock above, so check to be sure the variable
* first only ever increases to prevent false OOM's.
*/
if (time_after(now, first))
first = now;
count = 0;
out_unlock:
spin_unlock(&oom_lock);
}
......@@ -557,6 +557,7 @@ static void
refill_inactive_zone(struct zone *zone, const int nr_pages_in,
struct page_state *ps, int priority)
{
int pgmoved;
int pgdeactivate = 0;
int nr_pages = nr_pages_in;
LIST_HEAD(l_hold); /* The pages which were snipped off */
......@@ -570,6 +571,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
long swap_tendency;
lru_add_drain();
pgmoved = 0;
spin_lock_irq(&zone->lru_lock);
while (nr_pages && !list_empty(&zone->active_list)) {
page = list_entry(zone->active_list.prev, struct page, lru);
......@@ -584,9 +586,11 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
} else {
page_cache_get(page);
list_add(&page->lru, &l_hold);
pgmoved++;
}
nr_pages--;
}
zone->nr_active -= pgmoved;
spin_unlock_irq(&zone->lru_lock);
/*
......@@ -646,10 +650,10 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
continue;
}
list_add(&page->lru, &l_inactive);
pgdeactivate++;
}
pagevec_init(&pvec, 1);
pgmoved = 0;
spin_lock_irq(&zone->lru_lock);
while (!list_empty(&l_inactive)) {
page = list_entry(l_inactive.prev, struct page, lru);
......@@ -659,19 +663,27 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
if (!TestClearPageActive(page))
BUG();
list_move(&page->lru, &zone->inactive_list);
pgmoved++;
if (!pagevec_add(&pvec, page)) {
zone->nr_inactive += pgmoved;
spin_unlock_irq(&zone->lru_lock);
pgdeactivate += pgmoved;
pgmoved = 0;
if (buffer_heads_over_limit)
pagevec_strip(&pvec);
__pagevec_release(&pvec);
spin_lock_irq(&zone->lru_lock);
}
}
zone->nr_inactive += pgmoved;
pgdeactivate += pgmoved;
if (buffer_heads_over_limit) {
spin_unlock_irq(&zone->lru_lock);
pagevec_strip(&pvec);
spin_lock_irq(&zone->lru_lock);
}
pgmoved = 0;
while (!list_empty(&l_active)) {
page = list_entry(l_active.prev, struct page, lru);
prefetchw_prev_lru_page(page, &l_active, flags);
......@@ -679,14 +691,16 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
BUG();
BUG_ON(!PageActive(page));
list_move(&page->lru, &zone->active_list);
pgmoved++;
if (!pagevec_add(&pvec, page)) {
zone->nr_active += pgmoved;
pgmoved = 0;
spin_unlock_irq(&zone->lru_lock);
__pagevec_release(&pvec);
spin_lock_irq(&zone->lru_lock);
}
}
zone->nr_active -= pgdeactivate;
zone->nr_inactive += pgdeactivate;
zone->nr_active += pgmoved;
spin_unlock_irq(&zone->lru_lock);
pagevec_release(&pvec);
......
......@@ -404,9 +404,7 @@ add_header(struct buffer *b)
buf_printf(b, "#include <linux/vermagic.h>\n");
buf_printf(b, "#include <linux/compiler.h>\n");
buf_printf(b, "\n");
buf_printf(b, "const char vermagic[]\n");
buf_printf(b, "__attribute__((section(\"__vermagic\"))) =\n");
buf_printf(b, "VERMAGIC_STRING;\n");
buf_printf(b, "MODULE_INFO(vermagic, VERMAGIC_STRING);\n");
}
/* Record CRCs for unresolved symbols */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment