Commit 5ec5104a authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://linuxusb.bkbits.net/linus-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents ead584e3 e78df3d0
...@@ -552,8 +552,6 @@ MODULE_AUTHOR("Stephen Rothwell"); ...@@ -552,8 +552,6 @@ MODULE_AUTHOR("Stephen Rothwell");
MODULE_DESCRIPTION("Advanced Power Management"); MODULE_DESCRIPTION("Advanced Power Management");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
EXPORT_NO_SYMBOLS;
#ifndef MODULE #ifndef MODULE
static int __init apm_setup(char *str) static int __init apm_setup(char *str)
{ {
......
...@@ -69,6 +69,7 @@ extern void __udivmoddi4(void); ...@@ -69,6 +69,7 @@ extern void __udivmoddi4(void);
extern void __udivsi3(void); extern void __udivsi3(void);
extern void __umodsi3(void); extern void __umodsi3(void);
extern void abort(void); extern void abort(void);
extern void do_div64(void);
extern void ret_from_exception(void); extern void ret_from_exception(void);
extern void fpundefinstr(void); extern void fpundefinstr(void);
...@@ -234,6 +235,7 @@ EXPORT_SYMBOL_NOVERS(__umoddi3); ...@@ -234,6 +235,7 @@ EXPORT_SYMBOL_NOVERS(__umoddi3);
EXPORT_SYMBOL_NOVERS(__udivmoddi4); EXPORT_SYMBOL_NOVERS(__udivmoddi4);
EXPORT_SYMBOL_NOVERS(__udivsi3); EXPORT_SYMBOL_NOVERS(__udivsi3);
EXPORT_SYMBOL_NOVERS(__umodsi3); EXPORT_SYMBOL_NOVERS(__umodsi3);
EXPORT_SYMBOL_NOVERS(do_div64);
/* bitops */ /* bitops */
EXPORT_SYMBOL(_set_bit_le); EXPORT_SYMBOL(_set_bit_le);
......
...@@ -997,11 +997,11 @@ ENTRY(fp_enter) ...@@ -997,11 +997,11 @@ ENTRY(fp_enter)
* previous and next are guaranteed not to be the same. * previous and next are guaranteed not to be the same.
*/ */
ENTRY(__switch_to) ENTRY(__switch_to)
add ip, r0, #TI_CPU_SAVE add ip, r1, #TI_CPU_SAVE
ldr r2, [r1, #TI_CPU_DOMAIN]! ldr r3, [r2, #TI_CPU_DOMAIN]!
stmia ip, {r4 - sl, fp, sp, lr} @ Store most regs on stack stmia ip, {r4 - sl, fp, sp, lr} @ Store most regs on stack
mcr p15, 0, r2, c3, c0 @ Set domain register mcr p15, 0, r3, c3, c0, 0 @ Set domain register
ldmib r1, {r4 - sl, fp, sp, pc} @ Load all regs saved previously ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
__INIT __INIT
/* /*
......
...@@ -75,7 +75,6 @@ no_work_pending: ...@@ -75,7 +75,6 @@ no_work_pending:
* This is how we return from a fork. * This is how we return from a fork.
*/ */
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
ldr r0, [r0, #TI_TASK]
bl schedule_tail bl schedule_tail
get_thread_info tsk get_thread_info tsk
ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
......
...@@ -13,7 +13,7 @@ obj-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ ...@@ -13,7 +13,7 @@ obj-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
strnlen_user.o strchr.o strrchr.o testchangebit.o \ strnlen_user.o strchr.o strrchr.o testchangebit.o \
testclearbit.o testsetbit.o uaccess.o getuser.o \ testclearbit.o testsetbit.o uaccess.o getuser.o \
putuser.o ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ putuser.o ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
ucmpdi2.o udivdi3.o lib1funcs.o ucmpdi2.o udivdi3.o lib1funcs.o div64.o
obj-m := obj-m :=
obj-n := obj-n :=
......
#include <linux/linkage.h>
ql .req r0 @ quotient low
qh .req r1 @ quotient high
dl .req r3 @ divisor low
dh .req r2 @ divisor high
nl .req r4 @ dividend low
nh .req r5 @ dividend high
ENTRY(do_div64)
stmfd sp!, {r4, r5, lr}
mov nl, r0
movs nh, r1 @ if high bits are zero
movne lr, #33
moveq lr, #1 @ only divide low bits
moveq nh, r0
1: cmp nh, dh
bls 2f
add lr, lr, #1
movs dh, dh, lsl #1 @ left justify divisor
bpl 1b
2: movs nh, r1
moveq dl, dh
moveq dh, #0
movne dl, #0
mov ql, #0
mov qh, #0
3: subs ip, nl, dl @ trial subtraction
sbcs ip, nh, dh
movcs nh, ip @ only update if successful
subcs nl, nl, dl @ (repeat the subtraction)
adcs ql, ql, ql @ C=1 if successful, shift into
adc qh, qh, qh @ quotient
movs dh, dh, lsr #1 @ shift base high part right
mov dl, dl, rrx @ shift base low part right
subs lr, lr, #1
bne 3b
mov r2, nl
ldmfd sp!, {r4, r5, pc}
...@@ -25,6 +25,9 @@ ...@@ -25,6 +25,9 @@
#define MAX_SLOTS 21 #define MAX_SLOTS 21
#define PCICMD_ABORT ((PCI_STATUS_REC_MASTER_ABORT| \
PCI_STATUS_REC_TARGET_ABORT)<<16)
#define PCICMD_ERROR_BITS ((PCI_STATUS_DETECTED_PARITY | \ #define PCICMD_ERROR_BITS ((PCI_STATUS_DETECTED_PARITY | \
PCI_STATUS_REC_MASTER_ABORT | \ PCI_STATUS_REC_MASTER_ABORT | \
PCI_STATUS_REC_TARGET_ABORT | \ PCI_STATUS_REC_TARGET_ABORT | \
...@@ -84,6 +87,12 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, ...@@ -84,6 +87,12 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
*value = v; *value = v;
v = *CSR_PCICMD;
if (v & PCICMD_ABORT) {
*CSR_PCICMD = v & (0xffff|PCICMD_ABORT);
return -1;
}
return PCIBIOS_SUCCESSFUL; return PCIBIOS_SUCCESSFUL;
} }
...@@ -92,6 +101,7 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, ...@@ -92,6 +101,7 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value) int size, u32 value)
{ {
unsigned long addr = dc21285_base_address(bus, devfn); unsigned long addr = dc21285_base_address(bus, devfn);
u32 v;
if (addr) if (addr)
switch (size) { switch (size) {
...@@ -109,6 +119,12 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, ...@@ -109,6 +119,12 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
break; break;
} }
v = *CSR_PCICMD;
if (v & PCICMD_ABORT) {
*CSR_PCICMD = v & (0xffff|PCICMD_ABORT);
return -1;
}
return PCIBIOS_SUCCESSFUL; return PCIBIOS_SUCCESSFUL;
} }
...@@ -148,16 +164,16 @@ static void dc21285_abort_irq(int irq, void *dev_id, struct pt_regs *regs) ...@@ -148,16 +164,16 @@ static void dc21285_abort_irq(int irq, void *dev_id, struct pt_regs *regs)
cmd = cmd & 0xffff; cmd = cmd & 0xffff;
if (status & PCI_STATUS_REC_MASTER_ABORT) { if (status & PCI_STATUS_REC_MASTER_ABORT) {
printk(KERN_DEBUG "PCI: master abort: "); printk(KERN_DEBUG "PCI: master abort, pc=0x%08lx\n",
pcibios_report_status(PCI_STATUS_REC_MASTER_ABORT, 1); instruction_pointer(regs));
printk("\n");
cmd |= PCI_STATUS_REC_MASTER_ABORT << 16; cmd |= PCI_STATUS_REC_MASTER_ABORT << 16;
} }
if (status & PCI_STATUS_REC_TARGET_ABORT) { if (status & PCI_STATUS_REC_TARGET_ABORT) {
printk(KERN_DEBUG "PCI: target abort: "); printk(KERN_DEBUG "PCI: target abort: ");
pcibios_report_status(PCI_STATUS_SIG_TARGET_ABORT, 1); pcibios_report_status(PCI_STATUS_REC_MASTER_ABORT |
PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT, 1);
printk("\n"); printk("\n");
cmd |= PCI_STATUS_REC_TARGET_ABORT << 16; cmd |= PCI_STATUS_REC_TARGET_ABORT << 16;
...@@ -289,6 +305,38 @@ void __init dc21285_preinit(void) ...@@ -289,6 +305,38 @@ void __init dc21285_preinit(void)
"%s mode\n", *CSR_CLASSREV & 0xff, cfn_mode ? "%s mode\n", *CSR_CLASSREV & 0xff, cfn_mode ?
"central function" : "addin"); "central function" : "addin");
if (footbridge_cfn_mode()) {
/*
* Clear any existing errors - we aren't
* interested in historical data...
*/
*CSR_SA110_CNTL = (*CSR_SA110_CNTL & 0xffffde07) |
SA110_CNTL_RXSERR;
*CSR_PCICMD = (*CSR_PCICMD & 0xffff) | PCICMD_ERROR_BITS;
}
init_timer(&serr_timer);
init_timer(&perr_timer);
serr_timer.data = IRQ_PCI_SERR;
serr_timer.function = dc21285_enable_error;
perr_timer.data = IRQ_PCI_PERR;
perr_timer.function = dc21285_enable_error;
/*
* We don't care if these fail.
*/
request_irq(IRQ_PCI_SERR, dc21285_serr_irq, SA_INTERRUPT,
"PCI system error", &serr_timer);
request_irq(IRQ_PCI_PERR, dc21285_parity_irq, SA_INTERRUPT,
"PCI parity error", &perr_timer);
request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, SA_INTERRUPT,
"PCI abort", NULL);
request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, SA_INTERRUPT,
"Discard timer", NULL);
request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, SA_INTERRUPT,
"PCI data parity", NULL);
if (cfn_mode) { if (cfn_mode) {
static struct resource csrio; static struct resource csrio;
...@@ -324,35 +372,5 @@ void __init dc21285_preinit(void) ...@@ -324,35 +372,5 @@ void __init dc21285_preinit(void)
void __init dc21285_postinit(void) void __init dc21285_postinit(void)
{ {
if (footbridge_cfn_mode()) {
/*
* Clear any existing errors - we aren't
* interested in historical data...
*/
*CSR_SA110_CNTL = (*CSR_SA110_CNTL & 0xffffde07) |
SA110_CNTL_RXSERR;
*CSR_PCICMD = (*CSR_PCICMD & 0xffff) | PCICMD_ERROR_BITS;
}
/*
* Initialise PCI error IRQ after we've finished probing
*/
request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, SA_INTERRUPT, "PCI abort", NULL);
request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, SA_INTERRUPT, "Discard timer", NULL);
request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, SA_INTERRUPT, "PCI data parity", NULL);
init_timer(&serr_timer);
init_timer(&perr_timer);
serr_timer.data = IRQ_PCI_SERR;
serr_timer.function = dc21285_enable_error;
perr_timer.data = IRQ_PCI_PERR;
perr_timer.function = dc21285_enable_error;
request_irq(IRQ_PCI_SERR, dc21285_serr_irq, SA_INTERRUPT,
"PCI system error", &serr_timer);
request_irq(IRQ_PCI_PERR, dc21285_parity_irq, SA_INTERRUPT,
"PCI parity error", &perr_timer);
register_isa_ports(DC21285_PCI_MEM, DC21285_PCI_IO, 0); register_isa_ports(DC21285_PCI_MEM, DC21285_PCI_IO, 0);
} }
...@@ -117,6 +117,8 @@ static int __init assabet_init(void) ...@@ -117,6 +117,8 @@ static int __init assabet_init(void)
PGSR = 0; PGSR = 0;
PCFR = 0; PCFR = 0;
PSDR = 0; PSDR = 0;
PPDR |= PPC_TXD3 | PPC_TXD1;
PPSR |= PPC_TXD3 | PPC_TXD1;
sa1100fb_lcd_power = assabet_lcd_power; sa1100fb_lcd_power = assabet_lcd_power;
sa1100fb_backlight_power = assabet_backlight_power; sa1100fb_backlight_power = assabet_backlight_power;
......
...@@ -234,6 +234,8 @@ static int sa1110_target(struct cpufreq_policy *policy, ...@@ -234,6 +234,8 @@ static int sa1110_target(struct cpufreq_policy *policy,
(sa11x0_ppcr_to_freq(ppcr-1) >= policy->min)) (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min))
ppcr--; ppcr--;
break; break;
default:
return -EINVAL;
} }
freqs.old = sa11x0_getspeed(); freqs.old = sa11x0_getspeed();
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
# To add an entry into this database, please see Documentation/arm/README, # To add an entry into this database, please see Documentation/arm/README,
# or contact rmk@arm.linux.org.uk # or contact rmk@arm.linux.org.uk
# #
# Last update: Wed Mar 5 22:11:59 2003 # Last update: Tue Mar 25 16:34:29 2003
# #
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
# #
...@@ -305,3 +305,11 @@ meg03 ARCH_MEG03 MEG03 293 ...@@ -305,3 +305,11 @@ meg03 ARCH_MEG03 MEG03 293
pxa_whitechapel ARCH_PXA_WHITECHAPEL PXA_WHITECHAPEL 294 pxa_whitechapel ARCH_PXA_WHITECHAPEL PXA_WHITECHAPEL 294
nwsc ARCH_NWSC NWSC 295 nwsc ARCH_NWSC NWSC 295
nwlarm ARCH_NWLARM NWLARM 296 nwlarm ARCH_NWLARM NWLARM 296
ixp425_mguard ARCH_IXP425_MGUARD IXP425_MGUARD 297
pxa_netdcu4 ARCH_PXA_NETDCU4 PXA_NETDCU4 298
ixdp2401 ARCH_IXDP2401 IXDP2401 299
ixdp2801 ARCH_IXDP2801 IXDP2801 300
zodiac ARCH_ZODIAC ZODIAC 301
armmodul ARCH_ARMMODUL ARMMODUL 302
ketop SA1100_KETOP KETOP 303
av7200 ARCH_AV7200 AV7200 304
...@@ -54,13 +54,16 @@ NET_Y := $(patsubst %/, %/built-in.o, $(net-y)) ...@@ -54,13 +54,16 @@ NET_Y := $(patsubst %/, %/built-in.o, $(net-y))
LIBS_Y := $(patsubst %/, %/lib.a, $(libs-y)) LIBS_Y := $(patsubst %/, %/lib.a, $(libs-y))
export INIT_Y CORE_Y DRIVERS_Y NET_Y LIBS_Y HEAD_Y export INIT_Y CORE_Y DRIVERS_Y NET_Y LIBS_Y HEAD_Y
makeboot =$(Q)$(MAKE) -f scripts/Makefile.build obj=arch/$(ARCH)/boot $(1) # Default target
all: image
boot := arch/sparc/boot
image tftpboot.img: vmlinux image tftpboot.img: vmlinux
$(call makeboot,arch/sparc/boot/$@) $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
archclean: archclean:
$(Q)$(MAKE) -f scripts/Makefile.clean obj=arch/$(ARCH)/boot $(Q)$(MAKE) $(clean)=$(boot)
prepare: include/asm-$(ARCH)/asm_offsets.h prepare: include/asm-$(ARCH)/asm_offsets.h
...@@ -72,3 +75,9 @@ include/asm-$(ARCH)/asm_offsets.h: arch/$(ARCH)/kernel/asm-offsets.s ...@@ -72,3 +75,9 @@ include/asm-$(ARCH)/asm_offsets.h: arch/$(ARCH)/kernel/asm-offsets.s
CLEAN_FILES += include/asm-$(ARCH)/asm_offsets.h \ CLEAN_FILES += include/asm-$(ARCH)/asm_offsets.h \
arch/$(ARCH)/kernel/asm-offsets.s arch/$(ARCH)/kernel/asm-offsets.s
# Don't use tabs in echo arguments.
define archhelp
echo '* image - kernel image ($(boot)/image)'
echo ' tftpboot.img - image prepared for tftp'
endef
...@@ -32,7 +32,3 @@ $(obj)/tftpboot.img: $(obj)/piggyback $(obj)/System.map $(obj)/image FORCE ...@@ -32,7 +32,3 @@ $(obj)/tftpboot.img: $(obj)/piggyback $(obj)/System.map $(obj)/image FORCE
$(obj)/btfix.s: $(obj)/btfixupprep vmlinux FORCE $(obj)/btfix.s: $(obj)/btfixupprep vmlinux FORCE
$(call if_changed,btfix) $(call if_changed,btfix)
archhelp:
@echo '* image - kernel image ($(obj)/image)'
@echo ' tftpboot.img - image prepared for tftp'
...@@ -139,8 +139,8 @@ config CPU_FREQ ...@@ -139,8 +139,8 @@ config CPU_FREQ
bool "CPU Frequency scaling" bool "CPU Frequency scaling"
help help
Clock scaling allows you to change the clock speed of CPUs on the Clock scaling allows you to change the clock speed of CPUs on the
fly. Currently there is only a sparc64 driver for UltraSPARC-III fly. Currently there are only sparc64 drivers for UltraSPARC-III
processors. and UltraSPARC-IIe processors.
For details, take a look at linux/Documentation/cpufreq. For details, take a look at linux/Documentation/cpufreq.
...@@ -166,6 +166,16 @@ config US3_FREQ ...@@ -166,6 +166,16 @@ config US3_FREQ
If in doubt, say N. If in doubt, say N.
config US2E_FREQ
tristate "UltraSPARC-IIe CPU Frequency driver"
depends on CPU_FREQ_TABLE
help
This adds the CPUFreq driver for UltraSPARC-IIe processors.
For details, take a look at linux/Documentation/cpufreq.
If in doubt, say N.
source "drivers/cpufreq/Kconfig" source "drivers/cpufreq/Kconfig"
# Identify this as a Sparc64 build # Identify this as a Sparc64 build
......
...@@ -71,10 +71,13 @@ libs-y += arch/sparc64/prom/ arch/sparc64/lib/ ...@@ -71,10 +71,13 @@ libs-y += arch/sparc64/prom/ arch/sparc64/lib/
# FIXME: is drivers- right? # FIXME: is drivers- right?
drivers-$(CONFIG_OPROFILE) += arch/sparc64/oprofile/ drivers-$(CONFIG_OPROFILE) += arch/sparc64/oprofile/
makeboot = $(Q)$(MAKE) -f scripts/Makefile.build obj=arch/$(ARCH)/boot $(1) boot := arch/sparc64/boot
image tftpboot.img vmlinux.aout: vmlinux image tftpboot.img vmlinux.aout: vmlinux
$(call makeboot,arch/sparc64/boot/$@) $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
define archhelp define archhelp
echo '* vmlinux - Standard sparc64 kernel' echo '* vmlinux - Standard sparc64 kernel'
......
...@@ -8,7 +8,7 @@ ROOT_IMG := /usr/src/root.img ...@@ -8,7 +8,7 @@ ROOT_IMG := /usr/src/root.img
ELFTOAOUT := elftoaout ELFTOAOUT := elftoaout
host-progs := piggyback host-progs := piggyback
targets := tftpboot.img vmlinux.aout targets := image tftpboot.img vmlinux.aout
quiet_cmd_elftoaout = ELT2AOUT $@ quiet_cmd_elftoaout = ELT2AOUT $@
cmd_elftoaout = $(ELFTOAOUT) vmlinux -o $@ cmd_elftoaout = $(ELFTOAOUT) vmlinux -o $@
......
...@@ -21,6 +21,7 @@ obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o ...@@ -21,6 +21,7 @@ obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
obj-$(CONFIG_BINFMT_AOUT32) += binfmt_aout32.o obj-$(CONFIG_BINFMT_AOUT32) += binfmt_aout32.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o
obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
ifdef CONFIG_SUNOS_EMUL ifdef CONFIG_SUNOS_EMUL
obj-y += sys_sunos32.o sunos_ioctl32.o obj-y += sys_sunos32.o sunos_ioctl32.o
......
...@@ -152,7 +152,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) ...@@ -152,7 +152,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
#ifdef CONFIG_BINFMT_ELF32_MODULE #ifdef CONFIG_BINFMT_ELF32_MODULE
#define CONFIG_BINFMT_ELF_MODULE CONFIG_BINFMT_ELF32_MODULE #define CONFIG_BINFMT_ELF_MODULE CONFIG_BINFMT_ELF32_MODULE
#endif #endif
#define ELF_FLAGS_INIT set_thread_flag(TIF_32BIT)
MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit SparcLinux binaries on the Ultra"); MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit SparcLinux binaries on the Ultra");
MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek"); MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek");
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/timer.h>
/* Used to synchronize acceses to NatSemi SUPER I/O chip configure /* Used to synchronize acceses to NatSemi SUPER I/O chip configure
* operations in asm/ns87303.h * operations in asm/ns87303.h
...@@ -88,7 +89,6 @@ void __init device_scan(void) ...@@ -88,7 +89,6 @@ void __init device_scan(void)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
{ {
extern unsigned long up_clock_tick;
up_clock_tick = prom_getintdefault(prom_node_cpu, up_clock_tick = prom_getintdefault(prom_node_cpu,
"clock-frequency", "clock-frequency",
0); 0);
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <asm/starfire.h> #include <asm/starfire.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/timer.h>
#ifdef CONFIG_IP_PNP #ifdef CONFIG_IP_PNP
#include <net/ipconfig.h> #include <net/ipconfig.h>
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#endif #endif
#include <asm/a.out.h> #include <asm/a.out.h>
#include <asm/ns87303.h> #include <asm/ns87303.h>
#include <asm/timer.h>
struct poll { struct poll {
int fd; int fd;
...@@ -159,11 +160,7 @@ EXPORT_SYMBOL(_do_write_unlock); ...@@ -159,11 +160,7 @@ EXPORT_SYMBOL(_do_write_unlock);
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* Uniprocessor clock frequency */ EXPORT_SYMBOL(sparc64_get_clock_tick);
#ifndef CONFIG_SMP
extern unsigned long up_clock_tick;
EXPORT_SYMBOL(up_clock_tick);
#endif
/* semaphores */ /* semaphores */
EXPORT_SYMBOL(down); EXPORT_SYMBOL(down);
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/bcd.h> #include <linux/bcd.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/cpufreq.h>
#include <linux/percpu.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/mostek.h> #include <asm/mostek.h>
...@@ -988,6 +990,73 @@ static unsigned long sparc64_init_timers(void (*cfunc)(int, void *, struct pt_re ...@@ -988,6 +990,73 @@ static unsigned long sparc64_init_timers(void (*cfunc)(int, void *, struct pt_re
return clock; return clock;
} }
struct freq_table {
unsigned long udelay_val_ref;
unsigned long clock_tick_ref;
unsigned int ref_freq;
};
static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0, 0 };
unsigned long sparc64_get_clock_tick(unsigned int cpu)
{
struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
if (ft->clock_tick_ref)
return ft->clock_tick_ref;
#ifdef CONFIG_SMP
return cpu_data[cpu].clock_tick;
#else
return up_clock_tick;
#endif
}
#ifdef CONFIG_CPU_FREQ
static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
unsigned int cpu = freq->cpu;
struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
#ifdef CONFIG_SMP
if (!ft->ref_freq) {
ft->ref_freq = freq->old;
ft->udelay_val_ref = cpu_data[cpu].udelay_val;
ft->clock_tick_ref = cpu_data[cpu].clock_tick;
}
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
cpu_data[cpu].udelay_val =
cpufreq_scale(ft->udelay_val_ref,
ft->ref_freq,
freq->new);
cpu_data[cpu].clock_tick =
cpufreq_scale(ft->clock_tick_ref,
ft->ref_freq,
freq->new);
}
#else
/* In the non-SMP case, kernel/cpufreq.c takes care of adjusting
* loops_per_jiffy.
*/
if (!ft->ref_freq) {
ft->ref_freq = freq->old;
ft->clock_tick_ref = up_clock_tick;
}
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new))
up_clock_tick = cpufreq_scale(ft->clock_tick_ref, ft->ref_freq, freq->new);
#endif
return 0;
}
static struct notifier_block sparc64_cpufreq_notifier_block = {
.notifier_call = sparc64_cpufreq_notifier
};
#endif
/* The quotient formula is taken from the IA64 port. */ /* The quotient formula is taken from the IA64 port. */
void __init time_init(void) void __init time_init(void)
{ {
...@@ -996,6 +1065,11 @@ void __init time_init(void) ...@@ -996,6 +1065,11 @@ void __init time_init(void)
timer_ticks_per_usec_quotient = timer_ticks_per_usec_quotient =
(((1000000UL << 30) + (((1000000UL << 30) +
(clock / 2)) / clock); (clock / 2)) / clock);
#ifdef CONFIG_CPU_FREQ
cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
#endif
} }
static __inline__ unsigned long do_gettimeoffset(void) static __inline__ unsigned long do_gettimeoffset(void)
......
/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
*
* Copyright (C) 2003 David S. Miller (davem@redhat.com)
*
* Many thanks to Dominik Brodowski for fixing up the cpufreq
* infrastructure in order to make this driver easier to implement.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/threads.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <asm/asi.h>
#include <asm/timer.h>
static struct cpufreq_driver *cpufreq_us2e_driver;
struct us2e_freq_percpu_info {
struct cpufreq_frequency_table table[6];
};
/* Indexed by cpu number. */
static struct us2e_freq_percpu_info *us2e_freq_table;
#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
* in the ESTAR mode control register.
*/
#define ESTAR_MODE_DIV_1 0x0000000000000000UL
#define ESTAR_MODE_DIV_2 0x0000000000000001UL
#define ESTAR_MODE_DIV_4 0x0000000000000003UL
#define ESTAR_MODE_DIV_6 0x0000000000000002UL
#define ESTAR_MODE_DIV_8 0x0000000000000004UL
#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
#define MCTRL0_REFR_COUNT_SHIFT 8
#define MCTRL0_REFR_INTERVAL 7800
#define MCTRL0_REFR_CLKS_P_CNT 64
static unsigned long read_hbreg(unsigned long addr)
{
unsigned long ret;
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=&r" (ret)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
return ret;
}
static void write_hbreg(unsigned long addr, unsigned long val)
{
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
: "memory");
if (addr == HBIRD_ESTAR_MODE_ADDR) {
/* Need to wait 16 clock cycles for the PLL to lock. */
udelay(1);
}
}
static void self_refresh_ctl(int enable)
{
unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
if (enable)
mctrl |= MCTRL0_SREFRESH_ENAB;
else
mctrl &= ~MCTRL0_SREFRESH_ENAB;
write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
(void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
}
static void frob_mem_refresh(int cpu_slowing_down,
unsigned long clock_tick,
unsigned long old_divisor, unsigned long divisor)
{
unsigned long old_refr_count, refr_count, mctrl;
refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
>> MCTRL0_REFR_COUNT_SHIFT;
mctrl &= ~MCTRL0_REFR_COUNT_MASK;
mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
unsigned long usecs;
/* We have to wait for both refresh counts (old
* and new) to go to zero.
*/
usecs = (MCTRL0_REFR_CLKS_P_CNT *
(refr_count + old_refr_count) *
1000000UL *
old_divisor) / clock_tick;
udelay(usecs + 1UL);
}
}
static void us2e_transition(unsigned long estar, unsigned long new_bits,
unsigned long clock_tick,
unsigned long old_divisor, unsigned long divisor)
{
unsigned long flags;
local_irq_save(flags);
estar &= ~ESTAR_MODE_DIV_MASK;
/* This is based upon the state transition diagram in the IIe manual. */
if (old_divisor == 2 && divisor == 1) {
self_refresh_ctl(0);
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
frob_mem_refresh(0, clock_tick, old_divisor, divisor);
} else if (old_divisor == 1 && divisor == 2) {
frob_mem_refresh(1, clock_tick, old_divisor, divisor);
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
self_refresh_ctl(1);
} else if (old_divisor == 1 && divisor > 2) {
us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
1, 2);
us2e_transition(estar, new_bits, clock_tick,
2, divisor);
} else if (old_divisor > 2 && divisor == 1) {
us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
old_divisor, 2);
us2e_transition(estar, new_bits, clock_tick,
2, divisor);
} else if (old_divisor < divisor) {
frob_mem_refresh(0, clock_tick, old_divisor, divisor);
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
} else if (old_divisor > divisor) {
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
frob_mem_refresh(1, clock_tick, old_divisor, divisor);
} else {
BUG();
}
local_irq_restore(flags);
}
static unsigned long index_to_estar_mode(unsigned int index)
{
switch (index) {
case 0:
return ESTAR_MODE_DIV_1;
case 1:
return ESTAR_MODE_DIV_2;
case 2:
return ESTAR_MODE_DIV_4;
case 3:
return ESTAR_MODE_DIV_6;
case 4:
return ESTAR_MODE_DIV_8;
default:
BUG();
};
}
static unsigned long index_to_divisor(unsigned int index)
{
switch (index) {
case 0:
return 1;
case 1:
return 2;
case 2:
return 4;
case 3:
return 6;
case 4:
return 8;
default:
BUG();
};
}
static unsigned long estar_to_divisor(unsigned long estar)
{
unsigned long ret;
switch (estar & ESTAR_MODE_DIV_MASK) {
case ESTAR_MODE_DIV_1:
ret = 1;
break;
case ESTAR_MODE_DIV_2:
ret = 2;
break;
case ESTAR_MODE_DIV_4:
ret = 4;
break;
case ESTAR_MODE_DIV_6:
ret = 6;
break;
case ESTAR_MODE_DIV_8:
ret = 8;
break;
default:
BUG();
};
return ret;
}
static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
{
unsigned long new_bits, new_freq, cpus_allowed;
unsigned long clock_tick, divisor, old_divisor, estar;
struct cpufreq_freqs freqs;
if (!cpu_online(cpu))
return;
cpus_allowed = current->cpus_allowed;
set_cpus_allowed(current, (1UL << cpu));
new_freq = clock_tick = sparc64_get_clock_tick(cpu);
new_bits = index_to_estar_mode(index);
divisor = index_to_divisor(index);
new_freq /= divisor;
estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
old_divisor = estar_to_divisor(estar);
freqs.old = clock_tick / old_divisor;
freqs.new = new_freq;
freqs.cpu = cpu;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
us2e_transition(estar, new_bits, clock_tick, old_divisor, divisor);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
set_cpus_allowed(current, cpus_allowed);
}
static int us2e_freq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int new_index = 0;
if (cpufreq_frequency_table_target(policy,
&us2e_freq_table[policy->cpu].table[0],
target_freq,
relation,
&new_index))
return -EINVAL;
us2e_set_cpu_divider_index(policy->cpu, new_index);
return 0;
}
static int us2e_freq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
&us2e_freq_table[policy->cpu].table[0]);
}
static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
unsigned long clock_tick = sparc64_get_clock_tick(cpu);
struct cpufreq_frequency_table *table =
&us2e_freq_table[cpu].table[0];
table[0].index = 0;
table[0].frequency = clock_tick / 1;
table[1].index = 1;
table[1].frequency = clock_tick / 2;
table[2].index = 2;
table[2].frequency = clock_tick / 4;
table[2].index = 3;
table[2].frequency = clock_tick / 6;
table[2].index = 4;
table[2].frequency = clock_tick / 8;
table[2].index = 5;
table[3].frequency = CPUFREQ_TABLE_END;
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
return cpufreq_frequency_table_cpuinfo(policy, table);
}
static int __exit us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
if (cpufreq_us2e_driver)
us2e_set_cpu_divider_index(policy->cpu, 0);
return 0;
}
static int __init us2e_freq_init(void)
{
unsigned long manuf, impl, ver;
int ret;
__asm__("rdpr %%ver, %0" : "=r" (ver));
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
struct cpufreq_driver *driver;
ret = -ENOMEM;
driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
if (!driver)
goto err_out;
memset(driver, 0, sizeof(*driver));
us2e_freq_table = kmalloc(
(NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
GFP_KERNEL);
if (!us2e_freq_table)
goto err_out;
memset(us2e_freq_table, 0,
(NR_CPUS * sizeof(struct us2e_freq_percpu_info)));
driver->verify = us2e_freq_verify;
driver->target = us2e_freq_target;
driver->init = us2e_freq_cpu_init;
driver->exit = us2e_freq_cpu_exit;
driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-IIe");
cpufreq_us2e_driver = driver;
ret = cpufreq_register_driver(driver);
if (ret)
goto err_out;
return 0;
err_out:
if (driver) {
kfree(driver);
cpufreq_us2e_driver = NULL;
}
if (us2e_freq_table) {
kfree(us2e_freq_table);
us2e_freq_table = NULL;
}
return ret;
}
return -ENODEV;
}
static void __exit us2e_freq_exit(void)
{
if (cpufreq_us2e_driver) {
cpufreq_unregister_driver(cpufreq_us2e_driver);
kfree(cpufreq_us2e_driver);
cpufreq_us2e_driver = NULL;
kfree(us2e_freq_table);
us2e_freq_table = NULL;
}
}
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
MODULE_LICENSE("GPL");
module_init(us2e_freq_init);
module_exit(us2e_freq_exit);
...@@ -16,14 +16,12 @@ ...@@ -16,14 +16,12 @@
#include <linux/init.h> #include <linux/init.h>
#include <asm/head.h> #include <asm/head.h>
#include <asm/timer.h>
static struct cpufreq_driver *cpufreq_us3_driver; static struct cpufreq_driver *cpufreq_us3_driver;
struct us3_freq_percpu_info { struct us3_freq_percpu_info {
struct cpufreq_frequency_table table[4]; struct cpufreq_frequency_table table[4];
unsigned long udelay_val_ref;
unsigned long clock_tick_ref;
unsigned int ref_freq;
}; };
/* Indexed by cpu number. */ /* Indexed by cpu number. */
...@@ -56,71 +54,9 @@ static void write_safari_cfg(unsigned long val) ...@@ -56,71 +54,9 @@ static void write_safari_cfg(unsigned long val)
: "memory"); : "memory");
} }
#ifndef CONFIG_SMP
extern unsigned long up_clock_tick;
unsigned long clock_tick_ref;
unsigned int ref_freq;
#endif
static __inline__ unsigned long get_clock_tick(unsigned int cpu)
{
#ifdef CONFIG_SMP
if (us3_freq_table[cpu].clock_tick_ref)
return us3_freq_table[cpu].clock_tick_ref;
return cpu_data[cpu].clock_tick;
#else
if (clock_tick_ref)
return clock_tick_ref;
return up_clock_tick;
#endif
}
static int us3_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
#ifdef CONFIG_SMP
unsigned int cpu = freq->cpu;
if (!us3_freq_table[cpu].ref_freq) {
us3_freq_table[cpu].ref_freq = freq->old;
us3_freq_table[cpu].udelay_val_ref = cpu_data[cpu].udelay_val;
us3_freq_table[cpu].clock_tick_ref = cpu_data[cpu].clock_tick;
}
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
cpu_data[cpu].udelay_val =
cpufreq_scale(us3_freq_table[cpu].udelay_val_ref,
us3_freq_table[cpu].ref_freq,
freq->new);
cpu_data[cpu].clock_tick =
cpufreq_scale(us3_freq_table[cpu].clock_tick_ref,
us3_freq_table[cpu].ref_freq,
freq->new);
}
#else
/* In the non-SMP case, kernel/cpufreq.c takes care of adjusting
* loops_per_jiffy.
*/
if (!ref_freq) {
ref_freq = freq->old;
clock_tick_ref = up_clock_tick;
}
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new))
up_clock_tick = cpufreq_scale(clock_tick_ref, ref_freq, freq->new);
#endif
return 0;
}
static struct notifier_block us3_cpufreq_notifier_block = {
.notifier_call = us3_cpufreq_notifier
};
static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg) static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
{ {
unsigned long clock_tick = get_clock_tick(cpu); unsigned long clock_tick = sparc64_get_clock_tick(cpu);
unsigned long ret; unsigned long ret;
switch (safari_cfg & SAFARI_CFG_DIV_MASK) { switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
...@@ -151,7 +87,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) ...@@ -151,7 +87,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
cpus_allowed = current->cpus_allowed; cpus_allowed = current->cpus_allowed;
set_cpus_allowed(current, (1UL << cpu)); set_cpus_allowed(current, (1UL << cpu));
new_freq = get_clock_tick(cpu); new_freq = sparc64_get_clock_tick(cpu);
switch (index) { switch (index) {
case 0: case 0:
new_bits = SAFARI_CFG_DIV_1; new_bits = SAFARI_CFG_DIV_1;
...@@ -186,7 +122,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) ...@@ -186,7 +122,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
set_cpus_allowed(current, cpus_allowed); set_cpus_allowed(current, cpus_allowed);
} }
static int us3freq_target(struct cpufreq_policy *policy, static int us3_freq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int target_freq,
unsigned int relation) unsigned int relation)
{ {
...@@ -204,16 +140,16 @@ static int us3freq_target(struct cpufreq_policy *policy, ...@@ -204,16 +140,16 @@ static int us3freq_target(struct cpufreq_policy *policy,
return 0; return 0;
} }
static int us3freq_verify(struct cpufreq_policy *policy) static int us3_freq_verify(struct cpufreq_policy *policy)
{ {
return cpufreq_frequency_table_verify(policy, return cpufreq_frequency_table_verify(policy,
&us3_freq_table[policy->cpu].table[0]); &us3_freq_table[policy->cpu].table[0]);
} }
static int __init us3freq_cpu_init(struct cpufreq_policy *policy) static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
{ {
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
unsigned long clock_tick = get_clock_tick(cpu); unsigned long clock_tick = sparc64_get_clock_tick(cpu);
struct cpufreq_frequency_table *table = struct cpufreq_frequency_table *table =
&us3_freq_table[cpu].table[0]; &us3_freq_table[cpu].table[0];
...@@ -233,7 +169,7 @@ static int __init us3freq_cpu_init(struct cpufreq_policy *policy) ...@@ -233,7 +169,7 @@ static int __init us3freq_cpu_init(struct cpufreq_policy *policy)
return cpufreq_frequency_table_cpuinfo(policy, table); return cpufreq_frequency_table_cpuinfo(policy, table);
} }
static int __exit us3freq_cpu_exit(struct cpufreq_policy *policy) static int __exit us3_freq_cpu_exit(struct cpufreq_policy *policy)
{ {
if (cpufreq_us3_driver) if (cpufreq_us3_driver)
us3_set_cpu_divider_index(policy->cpu, 0); us3_set_cpu_divider_index(policy->cpu, 0);
...@@ -241,7 +177,7 @@ static int __exit us3freq_cpu_exit(struct cpufreq_policy *policy) ...@@ -241,7 +177,7 @@ static int __exit us3freq_cpu_exit(struct cpufreq_policy *policy)
return 0; return 0;
} }
static int __init us3freq_init(void) static int __init us3_freq_init(void)
{ {
unsigned long manuf, impl, ver; unsigned long manuf, impl, ver;
int ret; int ret;
...@@ -254,9 +190,6 @@ static int __init us3freq_init(void) ...@@ -254,9 +190,6 @@ static int __init us3freq_init(void)
(impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) { (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) {
struct cpufreq_driver *driver; struct cpufreq_driver *driver;
cpufreq_register_notifier(&us3_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
ret = -ENOMEM; ret = -ENOMEM;
driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
if (!driver) if (!driver)
...@@ -272,10 +205,10 @@ static int __init us3freq_init(void) ...@@ -272,10 +205,10 @@ static int __init us3freq_init(void)
memset(us3_freq_table, 0, memset(us3_freq_table, 0,
(NR_CPUS * sizeof(struct us3_freq_percpu_info))); (NR_CPUS * sizeof(struct us3_freq_percpu_info)));
driver->verify = us3freq_verify; driver->verify = us3_freq_verify;
driver->target = us3freq_target; driver->target = us3_freq_target;
driver->init = us3freq_cpu_init; driver->init = us3_freq_cpu_init;
driver->exit = us3freq_cpu_exit; driver->exit = us3_freq_cpu_exit;
driver->owner = THIS_MODULE, driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-III"); strcpy(driver->name, "UltraSPARC-III");
...@@ -295,20 +228,16 @@ static int __init us3freq_init(void) ...@@ -295,20 +228,16 @@ static int __init us3freq_init(void)
kfree(us3_freq_table); kfree(us3_freq_table);
us3_freq_table = NULL; us3_freq_table = NULL;
} }
cpufreq_unregister_notifier(&us3_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
return ret; return ret;
} }
return -ENODEV; return -ENODEV;
} }
static void __exit us3freq_exit(void) static void __exit us3_freq_exit(void)
{ {
if (cpufreq_us3_driver) { if (cpufreq_us3_driver) {
cpufreq_unregister_driver(cpufreq_us3_driver); cpufreq_unregister_driver(cpufreq_us3_driver);
cpufreq_unregister_notifier(&us3_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
kfree(cpufreq_us3_driver); kfree(cpufreq_us3_driver);
cpufreq_us3_driver = NULL; cpufreq_us3_driver = NULL;
...@@ -321,5 +250,5 @@ MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); ...@@ -321,5 +250,5 @@ MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III"); MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
module_init(us3freq_init); module_init(us3_freq_init);
module_exit(us3freq_exit); module_exit(us3_freq_exit);
...@@ -11,4 +11,4 @@ obj-y := PeeCeeI.o blockops.o debuglocks.o strlen.o strncmp.o \ ...@@ -11,4 +11,4 @@ obj-y := PeeCeeI.o blockops.o debuglocks.o strlen.o strncmp.o \
VIScopy.o VISbzero.o VISmemset.o VIScsum.o VIScsumcopy.o \ VIScopy.o VISbzero.o VISmemset.o VIScsum.o VIScsumcopy.o \
VIScsumcopyusr.o VISsave.o atomic.o rwlock.o bitops.o \ VIScsumcopyusr.o VISsave.o atomic.o rwlock.o bitops.o \
dec_and_lock.o U3memcpy.o U3copy_from_user.o U3copy_to_user.o \ dec_and_lock.o U3memcpy.o U3copy_from_user.o U3copy_to_user.o \
U3copy_in_user.o mcount.o ipcsum.o U3copy_in_user.o mcount.o ipcsum.o rwsem.o
/* rwsem.c: Don't inline expand these suckers all over the place.
*
* Written by David S. Miller (davem@redhat.com), 2001.
* Derived from asm-i386/rwsem.h
*/
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/module.h>
extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *));
void __down_read(struct rw_semaphore *sem)
{
__asm__ __volatile__(
"! beginning __down_read\n"
"1:\tlduw [%0], %%g5\n\t"
"add %%g5, 1, %%g7\n\t"
"cas [%0], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" add %%g7, 1, %%g7\n\t"
"cmp %%g7, 0\n\t"
"bl,pn %%icc, 3f\n\t"
" membar #StoreLoad | #StoreStore\n"
"2:\n\t"
".subsection 2\n"
"3:\tmov %0, %%g5\n\t"
"save %%sp, -160, %%sp\n\t"
"mov %%g1, %%l1\n\t"
"mov %%g2, %%l2\n\t"
"mov %%g3, %%l3\n\t"
"call %1\n\t"
" mov %%g5, %%o0\n\t"
"mov %%l1, %%g1\n\t"
"mov %%l2, %%g2\n\t"
"ba,pt %%xcc, 2b\n\t"
" restore %%l3, %%g0, %%g3\n\t"
".previous\n\t"
"! ending __down_read"
: : "r" (sem), "i" (rwsem_down_read_failed)
: "g5", "g7", "memory", "cc");
}
EXPORT_SYMBOL(__down_read);
int __down_read_trylock(struct rw_semaphore *sem)
{
int result;
__asm__ __volatile__(
"! beginning __down_read_trylock\n"
"1:\tlduw [%1], %%g5\n\t"
"add %%g5, 1, %%g7\n\t"
"cmp %%g7, 0\n\t"
"bl,pn %%icc, 2f\n\t"
" mov 0, %0\n\t"
"cas [%1], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" mov 1, %0\n\t"
"membar #StoreLoad | #StoreStore\n"
"2:\n\t"
"! ending __down_read_trylock"
: "=&r" (result)
: "r" (sem)
: "g5", "g7", "memory", "cc");
return result;
}
EXPORT_SYMBOL(__down_read_trylock);
void __down_write(struct rw_semaphore *sem)
{
__asm__ __volatile__(
"! beginning __down_write\n\t"
"sethi %%hi(%2), %%g1\n\t"
"or %%g1, %%lo(%2), %%g1\n"
"1:\tlduw [%0], %%g5\n\t"
"add %%g5, %%g1, %%g7\n\t"
"cas [%0], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" cmp %%g7, 0\n\t"
"bne,pn %%icc, 3f\n\t"
" membar #StoreLoad | #StoreStore\n"
"2:\n\t"
".subsection 2\n"
"3:\tmov %0, %%g5\n\t"
"save %%sp, -160, %%sp\n\t"
"mov %%g2, %%l2\n\t"
"mov %%g3, %%l3\n\t"
"call %1\n\t"
" mov %%g5, %%o0\n\t"
"mov %%l2, %%g2\n\t"
"ba,pt %%xcc, 2b\n\t"
" restore %%l3, %%g0, %%g3\n\t"
".previous\n\t"
"! ending __down_write"
: : "r" (sem), "i" (rwsem_down_write_failed),
"i" (RWSEM_ACTIVE_WRITE_BIAS)
: "g1", "g5", "g7", "memory", "cc");
}
EXPORT_SYMBOL(__down_write);
int __down_write_trylock(struct rw_semaphore *sem)
{
int result;
__asm__ __volatile__(
"! beginning __down_write_trylock\n\t"
"sethi %%hi(%2), %%g1\n\t"
"or %%g1, %%lo(%2), %%g1\n"
"1:\tlduw [%1], %%g5\n\t"
"cmp %%g5, 0\n\t"
"bne,pn %%icc, 2f\n\t"
" mov 0, %0\n\t"
"add %%g5, %%g1, %%g7\n\t"
"cas [%1], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" mov 1, %0\n\t"
"membar #StoreLoad | #StoreStore\n"
"2:\n\t"
"! ending __down_write_trylock"
: "=&r" (result)
: "r" (sem), "i" (RWSEM_ACTIVE_WRITE_BIAS)
: "g1", "g5", "g7", "memory", "cc");
return result;
}
EXPORT_SYMBOL(__down_write_trylock);
void __up_read(struct rw_semaphore *sem)
{
__asm__ __volatile__(
"! beginning __up_read\n\t"
"1:\tlduw [%0], %%g5\n\t"
"sub %%g5, 1, %%g7\n\t"
"cas [%0], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" cmp %%g7, 0\n\t"
"bl,pn %%icc, 3f\n\t"
" membar #StoreLoad | #StoreStore\n"
"2:\n\t"
".subsection 2\n"
"3:\tsethi %%hi(%2), %%g1\n\t"
"sub %%g7, 1, %%g7\n\t"
"or %%g1, %%lo(%2), %%g1\n\t"
"andcc %%g7, %%g1, %%g0\n\t"
"bne,pn %%icc, 2b\n\t"
" mov %0, %%g5\n\t"
"save %%sp, -160, %%sp\n\t"
"mov %%g2, %%l2\n\t"
"mov %%g3, %%l3\n\t"
"call %1\n\t"
" mov %%g5, %%o0\n\t"
"mov %%l2, %%g2\n\t"
"ba,pt %%xcc, 2b\n\t"
" restore %%l3, %%g0, %%g3\n\t"
".previous\n\t"
"! ending __up_read"
: : "r" (sem), "i" (rwsem_wake),
"i" (RWSEM_ACTIVE_MASK)
: "g1", "g5", "g7", "memory", "cc");
}
EXPORT_SYMBOL(__up_read);
void __up_write(struct rw_semaphore *sem)
{
__asm__ __volatile__(
"! beginning __up_write\n\t"
"sethi %%hi(%2), %%g1\n\t"
"or %%g1, %%lo(%2), %%g1\n"
"1:\tlduw [%0], %%g5\n\t"
"sub %%g5, %%g1, %%g7\n\t"
"cas [%0], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" sub %%g7, %%g1, %%g7\n\t"
"cmp %%g7, 0\n\t"
"bl,pn %%icc, 3f\n\t"
" membar #StoreLoad | #StoreStore\n"
"2:\n\t"
".subsection 2\n"
"3:\tmov %0, %%g5\n\t"
"save %%sp, -160, %%sp\n\t"
"mov %%g2, %%l2\n\t"
"mov %%g3, %%l3\n\t"
"call %1\n\t"
" mov %%g5, %%o0\n\t"
"mov %%l2, %%g2\n\t"
"ba,pt %%xcc, 2b\n\t"
" restore %%l3, %%g0, %%g3\n\t"
".previous\n\t"
"! ending __up_write"
: : "r" (sem), "i" (rwsem_wake),
"i" (RWSEM_ACTIVE_WRITE_BIAS)
: "g1", "g5", "g7", "memory", "cc");
}
EXPORT_SYMBOL(__up_write);
void __downgrade_write(struct rw_semaphore *sem)
{
__asm__ __volatile__(
"! beginning __downgrade_write\n\t"
"sethi %%hi(%2), %%g1\n\t"
"or %%g1, %%lo(%2), %%g1\n"
"1:\tlduw [%0], %%g5\n\t"
"sub %%g5, %%g1, %%g7\n\t"
"cas [%0], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" sub %%g7, %%g1, %%g7\n\t"
"cmp %%g7, 0\n\t"
"bl,pn %%icc, 3f\n\t"
" membar #StoreLoad | #StoreStore\n"
"2:\n\t"
".subsection 2\n"
"3:\tmov %0, %%g5\n\t"
"save %%sp, -160, %%sp\n\t"
"mov %%g2, %%l2\n\t"
"mov %%g3, %%l3\n\t"
"call %1\n\t"
" mov %%g5, %%o0\n\t"
"mov %%l2, %%g2\n\t"
"ba,pt %%xcc, 2b\n\t"
" restore %%l3, %%g0, %%g3\n\t"
".previous\n\t"
"! ending __up_write"
: : "r" (sem), "i" (rwsem_downgrade_wake),
"i" (RWSEM_WAITING_BIAS)
: "g1", "g5", "g7", "memory", "cc");
}
EXPORT_SYMBOL(__downgrade_write);
...@@ -2893,7 +2893,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { ...@@ -2893,7 +2893,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
struct tx_buf_desc *buf_desc_ptr; struct tx_buf_desc *buf_desc_ptr;
int desc; int desc;
int comp_code; int comp_code;
int total_len, pad, last; int total_len;
struct cpcs_trailer *trailer; struct cpcs_trailer *trailer;
struct ia_vcc *iavcc; struct ia_vcc *iavcc;
...@@ -2975,9 +2975,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { ...@@ -2975,9 +2975,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
/* Figure out the exact length of the packet and padding required to /* Figure out the exact length of the packet and padding required to
make it aligned on a 48 byte boundary. */ make it aligned on a 48 byte boundary. */
total_len = skb->len + sizeof(struct cpcs_trailer); total_len = skb->len + sizeof(struct cpcs_trailer);
last = total_len - (total_len/48)*48; total_len = ((total_len + 47) / 48) * 48;
pad = 48 - last;
total_len = pad + total_len;
IF_TX(printk("ia packet len:%d padding:%d\n", total_len, pad);) IF_TX(printk("ia packet len:%d padding:%d\n", total_len, pad);)
/* Put the packet in a tx buffer */ /* Put the packet in a tx buffer */
......
...@@ -199,12 +199,12 @@ static int __init epxa_default_partitions(struct mtd_info *master, struct mtd_pa ...@@ -199,12 +199,12 @@ static int __init epxa_default_partitions(struct mtd_info *master, struct mtd_pa
printk("Using default partitions for %s\n",BOARD_NAME); printk("Using default partitions for %s\n",BOARD_NAME);
npartitions=1; npartitions=1;
parts = kmalloc(npartitions*sizeof(*parts)+strlen(name), GFP_KERNEL); parts = kmalloc(npartitions*sizeof(*parts)+strlen(name)+1, GFP_KERNEL);
memzero(parts,npartitions*sizeof(*parts)+strlen(name));
if (!parts) { if (!parts) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
memzero(parts,npartitions*sizeof(*parts)+strlen(name));
i=0; i=0;
names = (char *)&parts[npartitions]; names = (char *)&parts[npartitions];
parts[i].name = names; parts[i].name = names;
...@@ -218,10 +218,11 @@ static int __init epxa_default_partitions(struct mtd_info *master, struct mtd_pa ...@@ -218,10 +218,11 @@ static int __init epxa_default_partitions(struct mtd_info *master, struct mtd_pa
parts[i].size = FLASH_SIZE-0x00180000; parts[i].size = FLASH_SIZE-0x00180000;
parts[i].offset = 0x00180000; parts[i].offset = 0x00180000;
#endif #endif
ret = npartitions;
out: out:
*pparts = parts; *pparts = parts;
return npartitions; return ret;
} }
......
...@@ -501,10 +501,11 @@ static struct console serial21285_console = ...@@ -501,10 +501,11 @@ static struct console serial21285_console =
.index = -1, .index = -1,
}; };
static void __init rs285_console_init(void) static int __init rs285_console_init(void)
{ {
serial21285_setup_ports(); serial21285_setup_ports();
register_console(&serial21285_console); register_console(&serial21285_console);
return 0;
} }
console_initcall(rs285_console_init); console_initcall(rs285_console_init);
......
...@@ -31,7 +31,7 @@ obj-$(CONFIG_FB_CT65550) += chipsfb.o cfbfillrect.o cfbcopyarea.o cfbim ...@@ -31,7 +31,7 @@ obj-$(CONFIG_FB_CT65550) += chipsfb.o cfbfillrect.o cfbcopyarea.o cfbim
obj-$(CONFIG_FB_ANAKIN) += anakinfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_ANAKIN) += anakinfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o
obj-$(CONFIG_FB_CLPS711X) += clps711xfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_CLPS711X) += clps711xfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o
obj-$(CONFIG_FB_CYBER) += cyberfb.o obj-$(CONFIG_FB_CYBER) += cyberfb.o
obj-$(CONFIG_FB_CYBER2000) += cyber2000fb.o obj-$(CONFIG_FB_CYBER2000) += cyber2000fb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o
obj-$(CONFIG_FB_SGIVW) += sgivwfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_SGIVW) += sgivwfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o
obj-$(CONFIG_FB_3DFX) += tdfxfb.o cfbimgblt.o obj-$(CONFIG_FB_3DFX) += tdfxfb.o cfbimgblt.o
obj-$(CONFIG_FB_MAC) += macfb.o macmodes.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_MAC) += macfb.o macmodes.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o
......
This diff is collapsed.
...@@ -1111,13 +1111,11 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) ...@@ -1111,13 +1111,11 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
#elif defined(__mips__) #elif defined(__mips__)
pgprot_val(vma->vm_page_prot) &= ~_CACHE_MASK; pgprot_val(vma->vm_page_prot) &= ~_CACHE_MASK;
pgprot_val(vma->vm_page_prot) |= _CACHE_UNCACHED; pgprot_val(vma->vm_page_prot) |= _CACHE_UNCACHED;
#elif defined(__arm__)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#elif defined(__sh__) #elif defined(__sh__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_CACHABLE; pgprot_val(vma->vm_page_prot) &= ~_PAGE_CACHABLE;
#elif defined(__hppa__) #elif defined(__hppa__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
#elif defined(__ia64__) #elif defined(__ia64__) || defined(__arm__)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
#else #else
#warning What do we have to do here?? #warning What do we have to do here??
......
...@@ -4,9 +4,13 @@ ...@@ -4,9 +4,13 @@
/* We're not 64-bit, but... */ /* We're not 64-bit, but... */
#define do_div(n,base) \ #define do_div(n,base) \
({ \ ({ \
int __res; \ register int __res asm("r2") = base; \
__res = ((unsigned long)n) % (unsigned int)base; \ register unsigned long long __n asm("r0") = n; \
n = ((unsigned long)n) / (unsigned int)base; \ asm("bl do_div64" \
: "=r" (__n), "=r" (__res) \
: "0" (__n), "1" (__res) \
: "r3", "ip", "lr", "cc"); \
n = __n; \
__res; \ __res; \
}) })
......
...@@ -103,6 +103,7 @@ ...@@ -103,6 +103,7 @@
* entries are stored 1024 bytes below. * entries are stored 1024 bytes below.
*/ */
#define L_PTE_PRESENT (1 << 0) #define L_PTE_PRESENT (1 << 0)
#define L_PTE_FILE (1 << 1) /* only when !PRESENT */
#define L_PTE_YOUNG (1 << 1) #define L_PTE_YOUNG (1 << 1)
#define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */ #define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */
#define L_PTE_CACHEABLE (1 << 3) /* matches PTE */ #define L_PTE_CACHEABLE (1 << 3) /* matches PTE */
...@@ -173,6 +174,7 @@ static inline pte_t *pmd_page_kernel(pmd_t pmd) ...@@ -173,6 +174,7 @@ static inline pte_t *pmd_page_kernel(pmd_t pmd)
#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) #define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
#define PTE_BIT_FUNC(fn,op) \ #define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
...@@ -196,6 +198,11 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); ...@@ -196,6 +198,11 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
#define pgtable_cache_init() do { } while (0) #define pgtable_cache_init() do { } while (0)
#define pte_to_pgoff(x) (pte_val(x) >> 2)
#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE)
#define PTE_FILE_MAX_BITS 30
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_PROC_PGTABLE_H */ #endif /* __ASM_PROC_PGTABLE_H */
...@@ -65,11 +65,12 @@ extern int cpu_architecture(void); ...@@ -65,11 +65,12 @@ extern int cpu_architecture(void);
* The `mb' is to tell GCC not to cache `current' across this call. * The `mb' is to tell GCC not to cache `current' across this call.
*/ */
struct thread_info; struct thread_info;
extern struct thread_info *__switch_to(struct thread_info *, struct thread_info *); struct task_struct;
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
#define switch_to(prev,next,last) \ #define switch_to(prev,next,last) \
do { \ do { \
__switch_to(prev->thread_info,next->thread_info); \ last = __switch_to(prev,prev->thread_info,next->thread_info); \
mb(); \ mb(); \
} while (0) } while (0)
......
...@@ -18,11 +18,6 @@ ...@@ -18,11 +18,6 @@
struct rwsem_waiter; struct rwsem_waiter;
extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *));
struct rw_semaphore { struct rw_semaphore {
signed int count; signed int count;
#define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_UNLOCKED_VALUE 0x00000000
...@@ -48,222 +43,13 @@ static __inline__ void init_rwsem(struct rw_semaphore *sem) ...@@ -48,222 +43,13 @@ static __inline__ void init_rwsem(struct rw_semaphore *sem)
INIT_LIST_HEAD(&sem->wait_list); INIT_LIST_HEAD(&sem->wait_list);
} }
static __inline__ void __down_read(struct rw_semaphore *sem) extern void __down_read(struct rw_semaphore *sem);
{ extern int __down_read_trylock(struct rw_semaphore *sem);
__asm__ __volatile__( extern void __down_write(struct rw_semaphore *sem);
"! beginning __down_read\n" extern int __down_write_trylock(struct rw_semaphore *sem);
"1:\tlduw [%0], %%g5\n\t" extern void __up_read(struct rw_semaphore *sem);
"add %%g5, 1, %%g7\n\t" extern void __up_write(struct rw_semaphore *sem);
"cas [%0], %%g5, %%g7\n\t" extern void __downgrade_write(struct rw_semaphore *sem);
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" add %%g7, 1, %%g7\n\t"
"cmp %%g7, 0\n\t"
"bl,pn %%icc, 3f\n\t"
" membar #StoreLoad | #StoreStore\n"
"2:\n\t"
".subsection 2\n"
"3:\tmov %0, %%g5\n\t"
"save %%sp, -160, %%sp\n\t"
"mov %%g1, %%l1\n\t"
"mov %%g2, %%l2\n\t"
"mov %%g3, %%l3\n\t"
"call %1\n\t"
" mov %%g5, %%o0\n\t"
"mov %%l1, %%g1\n\t"
"mov %%l2, %%g2\n\t"
"ba,pt %%xcc, 2b\n\t"
" restore %%l3, %%g0, %%g3\n\t"
".previous\n\t"
"! ending __down_read"
: : "r" (sem), "i" (rwsem_down_read_failed)
: "g5", "g7", "memory", "cc");
}
static __inline__ int __down_read_trylock(struct rw_semaphore *sem)
{
int result;
__asm__ __volatile__(
"! beginning __down_read_trylock\n"
"1:\tlduw [%1], %%g5\n\t"
"add %%g5, 1, %%g7\n\t"
"cmp %%g7, 0\n\t"
"bl,pn %%icc, 2f\n\t"
" mov 0, %0\n\t"
"cas [%1], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" mov 1, %0\n\t"
"membar #StoreLoad | #StoreStore\n"
"2:\n\t"
"! ending __down_read_trylock"
: "=&r" (result)
: "r" (sem)
: "g5", "g7", "memory", "cc");
return result;
}
static __inline__ void __down_write(struct rw_semaphore *sem)
{
__asm__ __volatile__(
"! beginning __down_write\n\t"
"sethi %%hi(%2), %%g1\n\t"
"or %%g1, %%lo(%2), %%g1\n"
"1:\tlduw [%0], %%g5\n\t"
"add %%g5, %%g1, %%g7\n\t"
"cas [%0], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" cmp %%g7, 0\n\t"
"bne,pn %%icc, 3f\n\t"
" membar #StoreLoad | #StoreStore\n"
"2:\n\t"
".subsection 2\n"
"3:\tmov %0, %%g5\n\t"
"save %%sp, -160, %%sp\n\t"
"mov %%g2, %%l2\n\t"
"mov %%g3, %%l3\n\t"
"call %1\n\t"
" mov %%g5, %%o0\n\t"
"mov %%l2, %%g2\n\t"
"ba,pt %%xcc, 2b\n\t"
" restore %%l3, %%g0, %%g3\n\t"
".previous\n\t"
"! ending __down_write"
: : "r" (sem), "i" (rwsem_down_write_failed),
"i" (RWSEM_ACTIVE_WRITE_BIAS)
: "g1", "g5", "g7", "memory", "cc");
}
static __inline__ int __down_write_trylock(struct rw_semaphore *sem)
{
int result;
__asm__ __volatile__(
"! beginning __down_write_trylock\n\t"
"sethi %%hi(%2), %%g1\n\t"
"or %%g1, %%lo(%2), %%g1\n"
"1:\tlduw [%1], %%g5\n\t"
"cmp %%g5, 0\n\t"
"bne,pn %%icc, 2f\n\t"
" mov 0, %0\n\t"
"add %%g5, %%g1, %%g7\n\t"
"cas [%1], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" mov 1, %0\n\t"
"membar #StoreLoad | #StoreStore\n"
"2:\n\t"
"! ending __down_write_trylock"
: "=&r" (result)
: "r" (sem), "i" (RWSEM_ACTIVE_WRITE_BIAS)
: "g1", "g5", "g7", "memory", "cc");
return result;
}
static __inline__ void __up_read(struct rw_semaphore *sem)
{
__asm__ __volatile__(
"! beginning __up_read\n\t"
"1:\tlduw [%0], %%g5\n\t"
"sub %%g5, 1, %%g7\n\t"
"cas [%0], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" cmp %%g7, 0\n\t"
"bl,pn %%icc, 3f\n\t"
" membar #StoreLoad | #StoreStore\n"
"2:\n\t"
".subsection 2\n"
"3:\tsethi %%hi(%2), %%g1\n\t"
"sub %%g7, 1, %%g7\n\t"
"or %%g1, %%lo(%2), %%g1\n\t"
"andcc %%g7, %%g1, %%g0\n\t"
"bne,pn %%icc, 2b\n\t"
" mov %0, %%g5\n\t"
"save %%sp, -160, %%sp\n\t"
"mov %%g2, %%l2\n\t"
"mov %%g3, %%l3\n\t"
"call %1\n\t"
" mov %%g5, %%o0\n\t"
"mov %%l2, %%g2\n\t"
"ba,pt %%xcc, 2b\n\t"
" restore %%l3, %%g0, %%g3\n\t"
".previous\n\t"
"! ending __up_read"
: : "r" (sem), "i" (rwsem_wake),
"i" (RWSEM_ACTIVE_MASK)
: "g1", "g5", "g7", "memory", "cc");
}
static __inline__ void __up_write(struct rw_semaphore *sem)
{
__asm__ __volatile__(
"! beginning __up_write\n\t"
"sethi %%hi(%2), %%g1\n\t"
"or %%g1, %%lo(%2), %%g1\n"
"1:\tlduw [%0], %%g5\n\t"
"sub %%g5, %%g1, %%g7\n\t"
"cas [%0], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" sub %%g7, %%g1, %%g7\n\t"
"cmp %%g7, 0\n\t"
"bl,pn %%icc, 3f\n\t"
" membar #StoreLoad | #StoreStore\n"
"2:\n\t"
".subsection 2\n"
"3:\tmov %0, %%g5\n\t"
"save %%sp, -160, %%sp\n\t"
"mov %%g2, %%l2\n\t"
"mov %%g3, %%l3\n\t"
"call %1\n\t"
" mov %%g5, %%o0\n\t"
"mov %%l2, %%g2\n\t"
"ba,pt %%xcc, 2b\n\t"
" restore %%l3, %%g0, %%g3\n\t"
".previous\n\t"
"! ending __up_write"
: : "r" (sem), "i" (rwsem_wake),
"i" (RWSEM_ACTIVE_WRITE_BIAS)
: "g1", "g5", "g7", "memory", "cc");
}
static __inline__ void __downgrade_write(struct rw_semaphore *sem)
{
__asm__ __volatile__(
"! beginning __up_write\n\t"
"sethi %%hi(%2), %%g1\n\t"
"or %%g1, %%lo(%2), %%g1\n"
"1:\tlduw [%0], %%g5\n\t"
"sub %%g5, %%g1, %%g7\n\t"
"cas [%0], %%g5, %%g7\n\t"
"cmp %%g5, %%g7\n\t"
"bne,pn %%icc, 1b\n\t"
" sub %%g7, %%g1, %%g7\n\t"
"cmp %%g7, 0\n\t"
"bl,pn %%icc, 3f\n\t"
" membar #StoreLoad | #StoreStore\n"
"2:\n\t"
".subsection 2\n"
"3:\tmov %0, %%g5\n\t"
"save %%sp, -160, %%sp\n\t"
"mov %%g2, %%l2\n\t"
"mov %%g3, %%l3\n\t"
"call %1\n\t"
" mov %%g5, %%o0\n\t"
"mov %%l2, %%g2\n\t"
"ba,pt %%xcc, 2b\n\t"
" restore %%l3, %%g0, %%g3\n\t"
".previous\n\t"
"! ending __up_write"
: : "r" (sem), "i" (rwsem_downgrade_wake),
"i" (RWSEM_WAITING_BIAS)
: "g1", "g5", "g7", "memory", "cc");
}
static __inline__ int rwsem_atomic_update(int delta, struct rw_semaphore *sem) static __inline__ int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{ {
......
...@@ -66,4 +66,10 @@ extern unsigned long timer_tick_offset; ...@@ -66,4 +66,10 @@ extern unsigned long timer_tick_offset;
extern void timer_tick_interrupt(struct pt_regs *); extern void timer_tick_interrupt(struct pt_regs *);
#endif #endif
#ifndef CONFIG_SMP
extern unsigned long up_clock_tick;
#endif
extern unsigned long sparc64_get_clock_tick(unsigned int cpu);
#endif /* _SPARC64_TIMER_H */ #endif /* _SPARC64_TIMER_H */
...@@ -231,15 +231,8 @@ struct sk_buff { ...@@ -231,15 +231,8 @@ struct sk_buff {
pkt_type, pkt_type,
ip_summed; ip_summed;
__u32 priority; __u32 priority;
atomic_t users;
unsigned short protocol, unsigned short protocol,
security; security;
unsigned int truesize;
unsigned char *head,
*data,
*tail,
*end;
void (*destructor)(struct sk_buff *skb); void (*destructor)(struct sk_buff *skb);
#ifdef CONFIG_NETFILTER #ifdef CONFIG_NETFILTER
...@@ -261,6 +254,14 @@ struct sk_buff { ...@@ -261,6 +254,14 @@ struct sk_buff {
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_SCHED
__u32 tc_index; /* traffic control index */ __u32 tc_index; /* traffic control index */
#endif #endif
/* These elements must be at the end, see alloc_skb() for details. */
unsigned int truesize;
atomic_t users;
unsigned char *head,
*data,
*tail,
*end;
}; };
#define SK_WMEM_MAX 65535 #define SK_WMEM_MAX 65535
......
...@@ -42,7 +42,6 @@ struct xfrm_selector ...@@ -42,7 +42,6 @@ struct xfrm_selector
__u8 proto; __u8 proto;
int ifindex; int ifindex;
uid_t user; uid_t user;
void *owner;
}; };
#define XFRM_INF (~(u64)0) #define XFRM_INF (~(u64)0)
...@@ -164,8 +163,8 @@ struct xfrm_usersa_id { ...@@ -164,8 +163,8 @@ struct xfrm_usersa_id {
struct xfrm_userspi_info { struct xfrm_userspi_info {
struct xfrm_usersa_info info; struct xfrm_usersa_info info;
u32 min; __u32 min;
u32 max; __u32 max;
}; };
struct xfrm_userpolicy_info { struct xfrm_userpolicy_info {
......
...@@ -17,9 +17,6 @@ struct ah_data ...@@ -17,9 +17,6 @@ struct ah_data
struct crypto_tfm *tfm; struct crypto_tfm *tfm;
}; };
extern void skb_ah_walk(const struct sk_buff *skb,
struct crypto_tfm *tfm, icv_update_fn_t icv_update);
static inline void static inline void
ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data) ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data)
{ {
...@@ -27,7 +24,7 @@ ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data) ...@@ -27,7 +24,7 @@ ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data)
memset(auth_data, 0, ahp->icv_trunc_len); memset(auth_data, 0, ahp->icv_trunc_len);
crypto_hmac_init(tfm, ahp->key, &ahp->key_len); crypto_hmac_init(tfm, ahp->key, &ahp->key_len);
skb_ah_walk(skb, tfm, crypto_hmac_update); skb_icv_walk(skb, tfm, 0, skb->len, crypto_hmac_update);
crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_icv); crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_icv);
memcpy(auth_data, ahp->work_icv, ahp->icv_trunc_len); memcpy(auth_data, ahp->work_icv, ahp->icv_trunc_len);
} }
......
...@@ -248,11 +248,6 @@ extern void dst_init(void); ...@@ -248,11 +248,6 @@ extern void dst_init(void);
struct flowi; struct flowi;
extern int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, extern int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
struct sock *sk, int flags); struct sock *sk, int flags);
extern int xfrm6_lookup(struct dst_entry **dst_p, struct flowi *fl,
struct sock *sk, int flags);
extern void xfrm_init(void);
extern void xfrm6_init(void);
#endif #endif
#endif /* _NET_DST_H */ #endif /* _NET_DST_H */
...@@ -33,8 +33,6 @@ struct esp_data ...@@ -33,8 +33,6 @@ struct esp_data
} auth; } auth;
}; };
extern void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
int offset, int len, icv_update_fn_t icv_update);
extern int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len); extern int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len);
extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
......
...@@ -781,4 +781,7 @@ extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name); ...@@ -781,4 +781,7 @@ extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name);
struct crypto_tfm; struct crypto_tfm;
typedef void (icv_update_fn_t)(struct crypto_tfm *, struct scatterlist *, unsigned int); typedef void (icv_update_fn_t)(struct crypto_tfm *, struct scatterlist *, unsigned int);
extern void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
int offset, int len, icv_update_fn_t icv_update);
#endif /* _NET_XFRM_H */ #endif /* _NET_XFRM_H */
...@@ -195,46 +195,14 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) ...@@ -195,46 +195,14 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
if (!data) if (!data)
goto nodata; goto nodata;
/* XXX: does not include slab overhead */ memset(skb, 0, offsetof(struct sk_buff, truesize));
skb->next = skb->prev = NULL;
skb->list = NULL;
skb->sk = NULL;
skb->stamp.tv_sec = 0; /* No idea about time */
skb->dev = NULL;
skb->dst = NULL;
skb->sp = NULL;
memset(skb->cb, 0, sizeof(skb->cb));
/* Set up other state */
skb->len = 0;
skb->data_len = 0;
skb->csum = 0;
skb->local_df = 0;
skb->cloned = 0;
skb->pkt_type = PACKET_HOST; /* Default type */
skb->ip_summed = 0;
skb->priority = 0;
atomic_set(&skb->users, 1);
skb->security = 0; /* By default packets are insecure */
skb->truesize = size + sizeof(struct sk_buff); skb->truesize = size + sizeof(struct sk_buff);
atomic_set(&skb->users, 1);
/* Load the data pointers. */ skb->head = data;
skb->head = skb->data = skb->tail = data; skb->data = data;
skb->tail = data;
skb->end = data + size; skb->end = data + size;
skb->destructor = NULL;
#ifdef CONFIG_NETFILTER
skb->nfmark = skb->nfcache = 0;
skb->nfct = NULL;
#ifdef CONFIG_NETFILTER_DEBUG
skb->nf_debug = 0;
#endif
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
skb->nf_bridge = NULL;
#endif
#endif
#ifdef CONFIG_NET_SCHED
skb->tc_index = 0;
#endif
atomic_set(&(skb_shinfo(skb)->dataref), 1); atomic_set(&(skb_shinfo(skb)->dataref), 1);
skb_shinfo(skb)->nr_frags = 0; skb_shinfo(skb)->nr_frags = 0;
skb_shinfo(skb)->tso_size = 0; skb_shinfo(skb)->tso_size = 0;
...@@ -367,10 +335,10 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) ...@@ -367,10 +335,10 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
C(nh); C(nh);
C(mac); C(mac);
C(dst); C(dst);
dst_clone(n->dst); dst_clone(skb->dst);
C(sp); C(sp);
#ifdef CONFIG_INET #ifdef CONFIG_INET
secpath_get(n->sp); secpath_get(skb->sp);
#endif #endif
memcpy(n->cb, skb->cb, sizeof(skb->cb)); memcpy(n->cb, skb->cb, sizeof(skb->cb));
C(len); C(len);
...@@ -381,24 +349,20 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) ...@@ -381,24 +349,20 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
C(pkt_type); C(pkt_type);
C(ip_summed); C(ip_summed);
C(priority); C(priority);
atomic_set(&n->users, 1);
C(protocol); C(protocol);
C(security); C(security);
C(truesize);
C(head);
C(data);
C(tail);
C(end);
n->destructor = NULL; n->destructor = NULL;
#ifdef CONFIG_NETFILTER #ifdef CONFIG_NETFILTER
C(nfmark); C(nfmark);
C(nfcache); C(nfcache);
C(nfct); C(nfct);
nf_conntrack_get(skb->nfct);
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
C(nf_debug); C(nf_debug);
#endif #endif
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
C(nf_bridge); C(nf_bridge);
nf_bridge_get(skb->nf_bridge);
#endif #endif
#endif /*CONFIG_NETFILTER*/ #endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_HIPPI) #if defined(CONFIG_HIPPI)
...@@ -407,15 +371,16 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) ...@@ -407,15 +371,16 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_SCHED
C(tc_index); C(tc_index);
#endif #endif
C(truesize);
atomic_set(&n->users, 1);
C(head);
C(data);
C(tail);
C(end);
atomic_inc(&(skb_shinfo(skb)->dataref)); atomic_inc(&(skb_shinfo(skb)->dataref));
skb->cloned = 1; skb->cloned = 1;
#ifdef CONFIG_NETFILTER
nf_conntrack_get(skb->nfct);
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
nf_bridge_get(skb->nf_bridge);
#endif
#endif
return n; return n;
} }
...@@ -439,7 +404,6 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) ...@@ -439,7 +404,6 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->nh.raw = old->nh.raw + offset; new->nh.raw = old->nh.raw + offset;
new->mac.raw = old->mac.raw + offset; new->mac.raw = old->mac.raw + offset;
memcpy(new->cb, old->cb, sizeof(old->cb)); memcpy(new->cb, old->cb, sizeof(old->cb));
atomic_set(&new->users, 1);
new->local_df = old->local_df; new->local_df = old->local_df;
new->pkt_type = old->pkt_type; new->pkt_type = old->pkt_type;
new->stamp = old->stamp; new->stamp = old->stamp;
...@@ -449,18 +413,19 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) ...@@ -449,18 +413,19 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->nfmark = old->nfmark; new->nfmark = old->nfmark;
new->nfcache = old->nfcache; new->nfcache = old->nfcache;
new->nfct = old->nfct; new->nfct = old->nfct;
nf_conntrack_get(new->nfct); nf_conntrack_get(old->nfct);
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
new->nf_debug = old->nf_debug; new->nf_debug = old->nf_debug;
#endif #endif
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
new->nf_bridge = old->nf_bridge; new->nf_bridge = old->nf_bridge;
nf_bridge_get(new->nf_bridge); nf_bridge_get(old->nf_bridge);
#endif #endif
#endif #endif
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_SCHED
new->tc_index = old->tc_index; new->tc_index = old->tc_index;
#endif #endif
atomic_set(&new->users, 1);
} }
/** /**
......
...@@ -1065,6 +1065,62 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write, ...@@ -1065,6 +1065,62 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
return ret; return ret;
} }
static int ipv4_doint_and_flush(ctl_table *ctl, int write,
struct file* filp, void *buffer,
size_t *lenp)
{
int *valp = ctl->data;
int val = *valp;
int ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *valp != val)
rt_cache_flush(0);
return ret;
}
static int ipv4_doint_and_flush_strategy(ctl_table *table, int *name, int nlen,
void *oldval, size_t *oldlenp,
void *newval, size_t newlen,
void **context)
{
int *valp = table->data;
int new;
if (!newval || !newlen)
return 0;
if (newlen != sizeof(int))
return -EINVAL;
if (get_user(new, (int *)newval))
return -EFAULT;
if (new == *valp)
return 0;
if (oldval && oldlenp) {
size_t len;
if (get_user(len, oldlenp))
return -EFAULT;
if (len) {
if (len > table->maxlen)
len = table->maxlen;
if (copy_to_user(oldval, valp, len))
return -EFAULT;
if (put_user(len, oldlenp))
return -EFAULT;
}
}
*valp = new;
rt_cache_flush(0);
return 1;
}
static struct devinet_sysctl_table { static struct devinet_sysctl_table {
struct ctl_table_header *sysctl_header; struct ctl_table_header *sysctl_header;
ctl_table devinet_vars[17]; ctl_table devinet_vars[17];
...@@ -1192,7 +1248,8 @@ static struct devinet_sysctl_table { ...@@ -1192,7 +1248,8 @@ static struct devinet_sysctl_table {
.data = &ipv4_devconf.no_xfrm, .data = &ipv4_devconf.no_xfrm,
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler =&proc_dointvec, .proc_handler = &ipv4_doint_and_flush,
.strategy = &ipv4_doint_and_flush_strategy,
}, },
{ {
.ctl_name = NET_IPV4_CONF_NOPOLICY, .ctl_name = NET_IPV4_CONF_NOPOLICY,
...@@ -1200,7 +1257,8 @@ static struct devinet_sysctl_table { ...@@ -1200,7 +1257,8 @@ static struct devinet_sysctl_table {
.data = &ipv4_devconf.no_policy, .data = &ipv4_devconf.no_policy,
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler =&proc_dointvec, .proc_handler = &ipv4_doint_and_flush,
.strategy = &ipv4_doint_and_flush_strategy,
}, },
}, },
.devinet_dev = { .devinet_dev = {
......
...@@ -77,14 +77,39 @@ static int ipv4_sysctl_forward_strategy(ctl_table *table, int *name, int nlen, ...@@ -77,14 +77,39 @@ static int ipv4_sysctl_forward_strategy(ctl_table *table, int *name, int nlen,
void *newval, size_t newlen, void *newval, size_t newlen,
void **context) void **context)
{ {
int *valp = table->data;
int new; int new;
if (!newval || !newlen)
return 0;
if (newlen != sizeof(int)) if (newlen != sizeof(int))
return -EINVAL; return -EINVAL;
if (get_user(new,(int *)newval))
if (get_user(new, (int *)newval))
return -EFAULT; return -EFAULT;
if (new != ipv4_devconf.forwarding)
if (new == *valp)
return 0;
if (oldval && oldlenp) {
size_t len;
if (get_user(len, oldlenp))
return -EFAULT;
if (len) {
if (len > table->maxlen)
len = table->maxlen;
if (copy_to_user(oldval, valp, len))
return -EFAULT;
if (put_user(len, oldlenp))
return -EFAULT;
}
}
inet_forward_change(); inet_forward_change();
return 0; /* caller does change again and handles handles oldval */ return 1;
} }
ctl_table ipv4_table[] = { ctl_table ipv4_table[] = {
......
...@@ -3363,7 +3363,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -3363,7 +3363,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
/* /*
* Header prediction. * Header prediction.
* The code losely follows the one in the famous * The code loosely follows the one in the famous
* "30 instruction TCP receive" Van Jacobson mail. * "30 instruction TCP receive" Van Jacobson mail.
* *
* Van's trick is to deposit buffers into socket queue * Van's trick is to deposit buffers into socket queue
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
EXPORT_SYMBOL(ipv6_addr_type); EXPORT_SYMBOL(ipv6_addr_type);
EXPORT_SYMBOL(icmpv6_send); EXPORT_SYMBOL(icmpv6_send);
EXPORT_SYMBOL(icmpv6_statistics);
EXPORT_SYMBOL(icmpv6_err_convert);
EXPORT_SYMBOL(ndisc_mc_map); EXPORT_SYMBOL(ndisc_mc_map);
EXPORT_SYMBOL(register_inet6addr_notifier); EXPORT_SYMBOL(register_inet6addr_notifier);
EXPORT_SYMBOL(unregister_inet6addr_notifier); EXPORT_SYMBOL(unregister_inet6addr_notifier);
......
...@@ -356,13 +356,10 @@ EXPORT_SYMBOL_GPL(xfrm_calg_get_byid); ...@@ -356,13 +356,10 @@ EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname); EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname); EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
EXPORT_SYMBOL_GPL(xfrm_calg_get_byname); EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE) EXPORT_SYMBOL_GPL(skb_icv_walk);
EXPORT_SYMBOL_GPL(skb_ah_walk);
#endif
#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
EXPORT_SYMBOL_GPL(skb_cow_data); EXPORT_SYMBOL_GPL(skb_cow_data);
EXPORT_SYMBOL_GPL(pskb_put); EXPORT_SYMBOL_GPL(pskb_put);
EXPORT_SYMBOL_GPL(skb_icv_walk);
EXPORT_SYMBOL_GPL(skb_to_sgvec); EXPORT_SYMBOL_GPL(skb_to_sgvec);
#endif #endif
......
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
CSZ presents a more precise but less flexible and less efficient CSZ presents a more precise but less flexible and less efficient
approach. As I understand it, the main idea is to create approach. As I understand it, the main idea is to create
WFQ flows for each guaranteed service and to allocate WFQ flows for each guaranteed service and to allocate
the rest of bandwith to dummy flow-0. Flow-0 comprises the rest of bandwidth to dummy flow-0. Flow-0 comprises
the predictive services and the best effort traffic; the predictive services and the best effort traffic;
it is handled by a priority scheduler with the highest it is handled by a priority scheduler with the highest
priority band allocated for predictive services, and the rest --- priority band allocated for predictive services, and the rest ---
......
...@@ -347,17 +347,17 @@ static struct dentry_operations sockfs_dentry_operations = { ...@@ -347,17 +347,17 @@ static struct dentry_operations sockfs_dentry_operations = {
/* /*
* Obtains the first available file descriptor and sets it up for use. * Obtains the first available file descriptor and sets it up for use.
* *
* This functions creates file structure and maps it to fd space * This function creates file structure and maps it to fd space
* of current process. On success it returns file descriptor * of current process. On success it returns file descriptor
* and file struct implicitly stored in sock->file. * and file struct implicitly stored in sock->file.
* Note that another thread may close file descriptor before we return * Note that another thread may close file descriptor before we return
* from this function. We use the fact that now we do not refer * from this function. We use the fact that now we do not refer
* to socket after mapping. If one day we will need it, this * to socket after mapping. If one day we will need it, this
* function will inincrement ref. count on file by 1. * function will increment ref. count on file by 1.
* *
* In any case returned fd MAY BE not valid! * In any case returned fd MAY BE not valid!
* This race condition is inavoidable * This race condition is unavoidable
* with shared fd spaces, we cannot solve is inside kernel, * with shared fd spaces, we cannot solve it inside kernel,
* but we take care of internal coherence yet. * but we take care of internal coherence yet.
*/ */
......
...@@ -440,83 +440,6 @@ int xfrm_count_enc_supported(void) ...@@ -440,83 +440,6 @@ int xfrm_count_enc_supported(void)
return n; return n;
} }
#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
void skb_ah_walk(const struct sk_buff *skb,
struct crypto_tfm *tfm, icv_update_fn_t icv_update)
{
int offset = 0;
int len = skb->len;
int start = skb->len - skb->data_len;
int i, copy = start - offset;
struct scatterlist sg;
/* Checksum header. */
if (copy > 0) {
if (copy > len)
copy = len;
sg.page = virt_to_page(skb->data + offset);
sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
sg.length = copy;
icv_update(tfm, &sg, 1);
if ((len -= copy) == 0)
return;
offset += copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
sg.page = frag->page;
sg.offset = frag->page_offset + offset-start;
sg.length = copy;
icv_update(tfm, &sg, 1);
if (!(len -= copy))
return;
offset += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
skb_ah_walk(list, tfm, icv_update);
if ((len -= copy) == 0)
return;
offset += copy;
}
start = end;
}
}
if (len)
BUG();
}
#endif
#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
/* Move to common area: it is shared with AH. */ /* Move to common area: it is shared with AH. */
void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
...@@ -591,6 +514,7 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, ...@@ -591,6 +514,7 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
BUG(); BUG();
} }
#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
/* Looking generic it is not used in another places. */ /* Looking generic it is not used in another places. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment