Commit fca86803 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/jgarzik/net-drivers-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents ef8b1c5e 3a025a17
...@@ -546,7 +546,7 @@ config CPU_FREQ_SA1110 ...@@ -546,7 +546,7 @@ config CPU_FREQ_SA1110
if (CPU_FREQ_SA1100 || CPU_FREQ_SA1110) if (CPU_FREQ_SA1100 || CPU_FREQ_SA1110)
config CPU_FREQ_GOV_USERSPACE config CPU_FREQ_GOV_USERSPACE
bool tristate
depends on CPU_FREQ depends on CPU_FREQ
default y default y
...@@ -1027,9 +1027,6 @@ source "drivers/scsi/Kconfig" ...@@ -1027,9 +1027,6 @@ source "drivers/scsi/Kconfig"
endmenu endmenu
#if [ "$CONFIG_ARCH_CLPS711X" = "y" ]; then
# source drivers/ssi/Config.in
#fi
source "drivers/ieee1394/Kconfig" source "drivers/ieee1394/Kconfig"
source "drivers/message/i2o/Kconfig" source "drivers/message/i2o/Kconfig"
......
...@@ -19,10 +19,6 @@ OBJS += ll_char_wr.o font.o ...@@ -19,10 +19,6 @@ OBJS += ll_char_wr.o font.o
CFLAGS_misc.o := -DPARAMS_PHYS=$(PARAMS_PHYS) CFLAGS_misc.o := -DPARAMS_PHYS=$(PARAMS_PHYS)
endif endif
ifeq ($(CONFIG_ARCH_NETWINDER),y)
OBJS += head-netwinder.o
endif
ifeq ($(CONFIG_ARCH_SHARK),y) ifeq ($(CONFIG_ARCH_SHARK),y)
OBJS += head-shark.o ofw-shark.o OBJS += head-shark.o ofw-shark.o
endif endif
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* Bits taken from various places. * Bits taken from various places.
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -61,7 +62,7 @@ void pcibios_report_status(u_int status_mask, int warn) ...@@ -61,7 +62,7 @@ void pcibios_report_status(u_int status_mask, int warn)
* Bug 3 is responsible for the sound DMA grinding to a halt. We now * Bug 3 is responsible for the sound DMA grinding to a halt. We now
* live with bug 2. * live with bug 2.
*/ */
static void __init pci_fixup_83c553(struct pci_dev *dev) static void __devinit pci_fixup_83c553(struct pci_dev *dev)
{ {
/* /*
* Set memory region to start at address 0, and enable IO * Set memory region to start at address 0, and enable IO
...@@ -112,7 +113,7 @@ static void __init pci_fixup_83c553(struct pci_dev *dev) ...@@ -112,7 +113,7 @@ static void __init pci_fixup_83c553(struct pci_dev *dev)
outb(0x08, 0x4d1); outb(0x08, 0x4d1);
} }
static void __init pci_fixup_unassign(struct pci_dev *dev) static void __devinit pci_fixup_unassign(struct pci_dev *dev)
{ {
dev->resource[0].end -= dev->resource[0].start; dev->resource[0].end -= dev->resource[0].start;
dev->resource[0].start = 0; dev->resource[0].start = 0;
...@@ -123,7 +124,7 @@ static void __init pci_fixup_unassign(struct pci_dev *dev) ...@@ -123,7 +124,7 @@ static void __init pci_fixup_unassign(struct pci_dev *dev)
* if it is the host bridge by marking it as such. These resources are of * if it is the host bridge by marking it as such. These resources are of
* no consequence to the PCI layer (they are handled elsewhere). * no consequence to the PCI layer (they are handled elsewhere).
*/ */
static void __init pci_fixup_dec21285(struct pci_dev *dev) static void __devinit pci_fixup_dec21285(struct pci_dev *dev)
{ {
int i; int i;
...@@ -141,7 +142,7 @@ static void __init pci_fixup_dec21285(struct pci_dev *dev) ...@@ -141,7 +142,7 @@ static void __init pci_fixup_dec21285(struct pci_dev *dev)
/* /*
* PCI IDE controllers use non-standard I/O port decoding, respect it. * PCI IDE controllers use non-standard I/O port decoding, respect it.
*/ */
static void __init pci_fixup_ide_bases(struct pci_dev *dev) static void __devinit pci_fixup_ide_bases(struct pci_dev *dev)
{ {
struct resource *r; struct resource *r;
int i; int i;
...@@ -161,7 +162,7 @@ static void __init pci_fixup_ide_bases(struct pci_dev *dev) ...@@ -161,7 +162,7 @@ static void __init pci_fixup_ide_bases(struct pci_dev *dev)
/* /*
* Put the DEC21142 to sleep * Put the DEC21142 to sleep
*/ */
static void __init pci_fixup_dec21142(struct pci_dev *dev) static void __devinit pci_fixup_dec21142(struct pci_dev *dev)
{ {
pci_write_config_dword(dev, 0x40, 0x80000000); pci_write_config_dword(dev, 0x40, 0x80000000);
} }
...@@ -182,7 +183,7 @@ static void __init pci_fixup_dec21142(struct pci_dev *dev) ...@@ -182,7 +183,7 @@ static void __init pci_fixup_dec21142(struct pci_dev *dev)
* functional. However, The CY82C693U _does not work_ in bus * functional. However, The CY82C693U _does not work_ in bus
* master mode without locking the PCI bus solid. * master mode without locking the PCI bus solid.
*/ */
static void __init pci_fixup_cy82c693(struct pci_dev *dev) static void __devinit pci_fixup_cy82c693(struct pci_dev *dev)
{ {
if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) { if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
u32 base0, base1; u32 base0, base1;
...@@ -511,6 +512,8 @@ static void __init pcibios_init_hw(struct hw_pci *hw) ...@@ -511,6 +512,8 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
panic("PCI: unable to scan bus!"); panic("PCI: unable to scan bus!");
busnr = sys->bus->subordinate + 1; busnr = sys->bus->subordinate + 1;
list_add(&sys->node, &hw->buses);
} else { } else {
kfree(sys); kfree(sys);
if (ret < 0) if (ret < 0)
...@@ -521,17 +524,36 @@ static void __init pcibios_init_hw(struct hw_pci *hw) ...@@ -521,17 +524,36 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
void __init pci_common_init(struct hw_pci *hw) void __init pci_common_init(struct hw_pci *hw)
{ {
struct pci_sys_data *sys;
INIT_LIST_HEAD(&hw->buses);
if (hw->preinit) if (hw->preinit)
hw->preinit(); hw->preinit();
pcibios_init_hw(hw); pcibios_init_hw(hw);
if (hw->postinit) if (hw->postinit)
hw->postinit(); hw->postinit();
pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
list_for_each_entry(sys, &hw->buses, node) {
struct pci_bus *bus = sys->bus;
/* /*
* Assign any unassigned resources. * Size the bridge windows.
*/ */
pci_assign_unassigned_resources(); pci_bus_size_bridges(bus);
pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
/*
* Assign resources.
*/
pci_bus_assign_resources(bus);
/*
* Tell drivers about devices found.
*/
pci_bus_add_devices(bus);
}
} }
char * __init pcibios_setup(char *str) char * __init pcibios_setup(char *str)
......
...@@ -562,7 +562,8 @@ ecard_dump_irq_state(ecard_t *ec) ...@@ -562,7 +562,8 @@ ecard_dump_irq_state(ecard_t *ec)
static void ecard_check_lockup(struct irqdesc *desc) static void ecard_check_lockup(struct irqdesc *desc)
{ {
static int last, lockup; static unsigned long last;
static int lockup;
ecard_t *ec; ecard_t *ec;
/* /*
......
...@@ -89,7 +89,7 @@ static inline void do_profile(struct pt_regs *regs) ...@@ -89,7 +89,7 @@ static inline void do_profile(struct pt_regs *regs)
} }
} }
static long next_rtc_update; static unsigned long next_rtc_update;
/* /*
* If we have an externally synchronized linux clock, then update * If we have an externally synchronized linux clock, then update
......
...@@ -326,7 +326,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode) ...@@ -326,7 +326,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode)
dump_mem(KERN_CRIT "Vectors: ", vectors, vectors + 0x40); dump_mem(KERN_CRIT "Vectors: ", vectors, vectors + 0x40);
dump_mem(KERN_CRIT "Stubs: ", vectors + 0x200, vectors + 0x4b8); dump_mem(KERN_CRIT "Stubs: ", vectors + 0x200, vectors + 0x4b8);
die("Oops", regs, 0); die("Oops - bad mode", regs, 0);
local_irq_disable(); local_irq_disable();
panic("bad mode"); panic("bad mode");
} }
...@@ -354,7 +354,7 @@ static int bad_syscall(int n, struct pt_regs *regs) ...@@ -354,7 +354,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
(thumb_mode(regs) ? 2 : 4); (thumb_mode(regs) ? 2 : 4);
force_sig_info(SIGILL, &info, current); force_sig_info(SIGILL, &info, current);
die_if_kernel("Oops", regs, n); die_if_kernel("Oops - bad syscall", regs, n);
return regs->ARM_r0; return regs->ARM_r0;
} }
...@@ -472,7 +472,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) ...@@ -472,7 +472,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
(thumb_mode(regs) ? 2 : 4); (thumb_mode(regs) ? 2 : 4);
force_sig_info(SIGILL, &info, current); force_sig_info(SIGILL, &info, current);
die_if_kernel("Oops", regs, no); die_if_kernel("Oops - bad syscall(2)", regs, no);
return 0; return 0;
} }
......
...@@ -8,19 +8,22 @@ ...@@ -8,19 +8,22 @@
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/hardware/iomd.h> #include <asm/hardware/iomd.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/page.h> #include <asm/irq.h>
#include <asm/proc/domain.h>
#include <asm/setup.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
static void cl7500_mask_irq_ack_a(unsigned int irq) static void cl7500_ack_irq_a(unsigned int irq)
{ {
unsigned int val, mask; unsigned int val, mask;
...@@ -48,6 +51,12 @@ static void cl7500_unmask_irq_a(unsigned int irq) ...@@ -48,6 +51,12 @@ static void cl7500_unmask_irq_a(unsigned int irq)
iomd_writeb(val | mask, IOMD_IRQMASKA); iomd_writeb(val | mask, IOMD_IRQMASKA);
} }
static struct irqchip clps7500_a_chip = {
.ack = cl7500_ack_irq_a,
.mask = cl7500_mask_irq_a,
.unmask = cl7500_unmask_irq_a,
};
static void cl7500_mask_irq_b(unsigned int irq) static void cl7500_mask_irq_b(unsigned int irq)
{ {
unsigned int val, mask; unsigned int val, mask;
...@@ -66,6 +75,12 @@ static void cl7500_unmask_irq_b(unsigned int irq) ...@@ -66,6 +75,12 @@ static void cl7500_unmask_irq_b(unsigned int irq)
iomd_writeb(val | mask, IOMD_IRQMASKB); iomd_writeb(val | mask, IOMD_IRQMASKB);
} }
static struct irqchip clps7500_b_chip = {
.ack = cl7500_mask_irq_b,
.mask = cl7500_mask_irq_b,
.unmask = cl7500_unmask_irq_b,
};
static void cl7500_mask_irq_c(unsigned int irq) static void cl7500_mask_irq_c(unsigned int irq)
{ {
unsigned int val, mask; unsigned int val, mask;
...@@ -84,6 +99,11 @@ static void cl7500_unmask_irq_c(unsigned int irq) ...@@ -84,6 +99,11 @@ static void cl7500_unmask_irq_c(unsigned int irq)
iomd_writeb(val | mask, IOMD_IRQMASKC); iomd_writeb(val | mask, IOMD_IRQMASKC);
} }
static struct irqchip clps7500_c_chip = {
.ack = cl7500_mask_irq_c,
.mask = cl7500_mask_irq_c,
.unmask = cl7500_unmask_irq_c,
};
static void cl7500_mask_irq_d(unsigned int irq) static void cl7500_mask_irq_d(unsigned int irq)
{ {
...@@ -103,6 +123,12 @@ static void cl7500_unmask_irq_d(unsigned int irq) ...@@ -103,6 +123,12 @@ static void cl7500_unmask_irq_d(unsigned int irq)
iomd_writeb(val | mask, IOMD_IRQMASKD); iomd_writeb(val | mask, IOMD_IRQMASKD);
} }
static struct irqchip clps7500_d_chip = {
.ack = cl7500_mask_irq_d,
.mask = cl7500_mask_irq_d,
.unmask = cl7500_unmask_irq_d,
};
static void cl7500_mask_irq_dma(unsigned int irq) static void cl7500_mask_irq_dma(unsigned int irq)
{ {
unsigned int val, mask; unsigned int val, mask;
...@@ -121,6 +147,12 @@ static void cl7500_unmask_irq_dma(unsigned int irq) ...@@ -121,6 +147,12 @@ static void cl7500_unmask_irq_dma(unsigned int irq)
iomd_writeb(val | mask, IOMD_DMAMASK); iomd_writeb(val | mask, IOMD_DMAMASK);
} }
static struct irqchip clps7500_dma_chip = {
.ack = cl7500_mask_irq_dma,
.mask = cl7500_mask_irq_dma,
.unmask = cl7500_unmask_irq_dma,
};
static void cl7500_mask_irq_fiq(unsigned int irq) static void cl7500_mask_irq_fiq(unsigned int irq)
{ {
unsigned int val, mask; unsigned int val, mask;
...@@ -139,6 +171,22 @@ static void cl7500_unmask_irq_fiq(unsigned int irq) ...@@ -139,6 +171,22 @@ static void cl7500_unmask_irq_fiq(unsigned int irq)
iomd_writeb(val | mask, IOMD_FIQMASK); iomd_writeb(val | mask, IOMD_FIQMASK);
} }
static struct irqchip clps7500_fiq_chip = {
.ack = cl7500_mask_irq_fiq,
.mask = cl7500_mask_irq_fiq,
.unmask = cl7500_unmask_irq_fiq,
};
static void cl7500_no_action(unsigned int irq)
{
}
static struct irqchip clps7500_no_chip = {
.ack = cl7500_no_action,
.mask = cl7500_no_action,
.unmask = cl7500_no_action,
};
static void no_action(int cpl, void *dev_id, struct pt_regs *regs) static void no_action(int cpl, void *dev_id, struct pt_regs *regs)
{ {
} }
...@@ -147,7 +195,7 @@ static struct irqaction irq_isa = { no_action, 0, 0, "isa", NULL, NULL }; ...@@ -147,7 +195,7 @@ static struct irqaction irq_isa = { no_action, 0, 0, "isa", NULL, NULL };
static void __init clps7500_init_irq(void) static void __init clps7500_init_irq(void)
{ {
int irq; unsigned int irq, flags;
iomd_writeb(0, IOMD_IRQMASKA); iomd_writeb(0, IOMD_IRQMASKA);
iomd_writeb(0, IOMD_IRQMASKB); iomd_writeb(0, IOMD_IRQMASKB);
...@@ -155,64 +203,58 @@ static void __init clps7500_init_irq(void) ...@@ -155,64 +203,58 @@ static void __init clps7500_init_irq(void)
iomd_writeb(0, IOMD_DMAMASK); iomd_writeb(0, IOMD_DMAMASK);
for (irq = 0; irq < NR_IRQS; irq++) { for (irq = 0; irq < NR_IRQS; irq++) {
flags = IRQF_VALID;
if (irq <= 6 || (irq >= 9 && irq <= 15) ||
(irq >= 48 && irq <= 55))
flags |= IRQF_PROBE;
switch (irq) { switch (irq) {
case 0 ... 6: case 0 ... 7:
irq_desc[irq].probe_ok = 1; set_irq_chip(irq, &clps7500_a_chip);
case 7: set_irq_handler(irq, do_level_IRQ);
irq_desc[irq].valid = 1; set_irq_flags(irq, flags);
irq_desc[irq].mask_ack = cl7500_mask_irq_ack_a;
irq_desc[irq].mask = cl7500_mask_irq_a;
irq_desc[irq].unmask = cl7500_unmask_irq_a;
break; break;
case 9 ... 15: case 8 ... 15:
irq_desc[irq].probe_ok = 1; set_irq_chip(irq, &clps7500_b_chip);
case 8: set_irq_handler(irq, do_level_IRQ);
irq_desc[irq].valid = 1; set_irq_flags(irq, flags);
irq_desc[irq].mask_ack = cl7500_mask_irq_b;
irq_desc[irq].mask = cl7500_mask_irq_b;
irq_desc[irq].unmask = cl7500_unmask_irq_b;
break; break;
case 16 ... 22: case 16 ... 22:
irq_desc[irq].valid = 1; set_irq_chip(irq, &clps7500_dma_chip);
irq_desc[irq].mask_ack = cl7500_mask_irq_dma; set_irq_handler(irq, do_level_IRQ);
irq_desc[irq].mask = cl7500_mask_irq_dma; set_irq_flags(irq, flags);
irq_desc[irq].unmask = cl7500_unmask_irq_dma;
break; break;
case 24 ... 31: case 24 ... 31:
irq_desc[irq].valid = 1; set_irq_chip(irq, &clps7500_c_chip);
irq_desc[irq].mask_ack = cl7500_mask_irq_c; set_irq_handler(irq, do_level_IRQ);
irq_desc[irq].mask = cl7500_mask_irq_c; set_irq_flags(irq, flags);
irq_desc[irq].unmask = cl7500_unmask_irq_c;
break; break;
case 40 ... 47: case 40 ... 47:
irq_desc[irq].valid = 1; set_irq_chip(irq, &clps7500_d_chip);
irq_desc[irq].mask_ack = cl7500_mask_irq_d; set_irq_handler(irq, do_level_IRQ);
irq_desc[irq].mask = cl7500_mask_irq_d; set_irq_flags(irq, flags);
irq_desc[irq].unmask = cl7500_unmask_irq_d;
break; break;
case 48 ... 55: case 48 ... 55:
irq_desc[irq].valid = 1; set_irq_chip(irq, &clps7500_no_chip);
irq_desc[irq].probe_ok = 1; set_irq_handler(irq, do_level_IRQ);
irq_desc[irq].mask_ack = no_action; set_irq_flags(irq, flags);
irq_desc[irq].mask = no_action;
irq_desc[irq].unmask = no_action;
break; break;
case 64 ... 72: case 64 ... 72:
irq_desc[irq].valid = 1; set_irq_chip(irq, &clps7500_fiq_chip);
irq_desc[irq].mask_ack = cl7500_mask_irq_fiq; set_irq_handler(irq, do_level_IRQ);
irq_desc[irq].mask = cl7500_mask_irq_fiq; set_irq_flags(irq, flags);
irq_desc[irq].unmask = cl7500_unmask_irq_fiq;
break; break;
} }
} }
setup_arm_irq(IRQ_ISA, &irq_isa); setup_irq(IRQ_ISA, &irq_isa);
} }
static struct map_desc cl7500_io_desc[] __initdata = { static struct map_desc cl7500_io_desc[] __initdata = {
......
...@@ -39,7 +39,8 @@ static void ssp_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -39,7 +39,8 @@ static void ssp_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* @data: 16-bit, MSB justified data to write. * @data: 16-bit, MSB justified data to write.
* *
* Wait for a free entry in the SSP transmit FIFO, and write a data * Wait for a free entry in the SSP transmit FIFO, and write a data
* word to the SSP port. * word to the SSP port. Wait for the SSP port to start sending
* the data.
* *
* The caller is expected to perform the necessary locking. * The caller is expected to perform the necessary locking.
* *
...@@ -54,6 +55,9 @@ int ssp_write_word(u16 data) ...@@ -54,6 +55,9 @@ int ssp_write_word(u16 data)
Ser4SSDR = data; Ser4SSDR = data;
while (!(Ser4SSSR & SSSR_BSY))
cpu_relax();
return 0; return 0;
} }
......
...@@ -120,13 +120,28 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum) ...@@ -120,13 +120,28 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
return pci_scan_bus(busnum, pci_root_ops, NULL); return pci_scan_bus(busnum, pci_root_ops, NULL);
} }
extern u8 pci_cache_line_size;
static int __init pcibios_init(void) static int __init pcibios_init(void)
{ {
struct cpuinfo_x86 *c = &boot_cpu_data;
if (!pci_root_ops) { if (!pci_root_ops) {
printk("PCI: System does not support PCI\n"); printk("PCI: System does not support PCI\n");
return 0; return 0;
} }
/*
* Assume PCI cacheline size of 32 bytes for all x86s except K7/K8
* and P4. It's also good for 386/486s (which actually have 16)
* as quite a few PCI devices do not support smaller values.
*/
pci_cache_line_size = 32 >> 2;
if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
pci_cache_line_size = 64 >> 2; /* K7 & K8 */
else if (c->x86 > 6)
pci_cache_line_size = 128 >> 2; /* P4 */
pcibios_resource_survey(); pcibios_resource_survey();
#ifdef CONFIG_PCI_BIOS #ifdef CONFIG_PCI_BIOS
......
...@@ -2521,19 +2521,19 @@ void acornscsi_intr(int irq, void *dev_id, struct pt_regs *regs) ...@@ -2521,19 +2521,19 @@ void acornscsi_intr(int irq, void *dev_id, struct pt_regs *regs)
*/ */
int acornscsi_queuecmd(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) int acornscsi_queuecmd(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
{ {
AS_Host *host = (AS_Host *)SCpnt->host->hostdata; AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
if (!done) { if (!done) {
/* there should be some way of rejecting errors like this without panicing... */ /* there should be some way of rejecting errors like this without panicing... */
panic("scsi%d: queuecommand called with NULL done function [cmd=%p]", panic("scsi%d: queuecommand called with NULL done function [cmd=%p]",
SCpnt->host->host_no, SCpnt); host->host->host_no, SCpnt);
return -EINVAL; return -EINVAL;
} }
#if (DEBUG & DEBUG_NO_WRITE) #if (DEBUG & DEBUG_NO_WRITE)
if (acornscsi_cmdtype(SCpnt->cmnd[0]) == CMD_WRITE && (NO_WRITE & (1 << SCpnt->device->id))) { if (acornscsi_cmdtype(SCpnt->cmnd[0]) == CMD_WRITE && (NO_WRITE & (1 << SCpnt->device->id))) {
printk(KERN_CRIT "scsi%d.%c: WRITE attempted with NO_WRITE flag set\n", printk(KERN_CRIT "scsi%d.%c: WRITE attempted with NO_WRITE flag set\n",
SCpnt->host->host_no, '0' + SCpnt->device->id); host->host->host_no, '0' + SCpnt->device->id);
SCpnt->result = DID_NO_CONNECT << 16; SCpnt->result = DID_NO_CONNECT << 16;
done(SCpnt); done(SCpnt);
return 0; return 0;
...@@ -2695,7 +2695,7 @@ acornscsi_do_abort(AS_Host *host, Scsi_Cmnd *SCpnt) ...@@ -2695,7 +2695,7 @@ acornscsi_do_abort(AS_Host *host, Scsi_Cmnd *SCpnt)
*/ */
int acornscsi_abort(Scsi_Cmnd *SCpnt) int acornscsi_abort(Scsi_Cmnd *SCpnt)
{ {
AS_Host *host = (AS_Host *) SCpnt->host->hostdata; AS_Host *host = (AS_Host *) SCpnt->device->host->hostdata;
int result; int result;
host->stats.aborts += 1; host->stats.aborts += 1;
...@@ -2782,7 +2782,7 @@ int acornscsi_abort(Scsi_Cmnd *SCpnt) ...@@ -2782,7 +2782,7 @@ int acornscsi_abort(Scsi_Cmnd *SCpnt)
*/ */
int acornscsi_reset(Scsi_Cmnd *SCpnt, unsigned int reset_flags) int acornscsi_reset(Scsi_Cmnd *SCpnt, unsigned int reset_flags)
{ {
AS_Host *host = (AS_Host *)SCpnt->host->hostdata; AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
Scsi_Cmnd *SCptr; Scsi_Cmnd *SCptr;
host->stats.resets += 1; host->stats.resets += 1;
......
...@@ -68,8 +68,7 @@ static inline void put_next_SCp_byte(Scsi_Pointer *SCp, unsigned char c) ...@@ -68,8 +68,7 @@ static inline void put_next_SCp_byte(Scsi_Pointer *SCp, unsigned char c)
static inline void init_SCp(Scsi_Cmnd *SCpnt) static inline void init_SCp(Scsi_Cmnd *SCpnt)
{ {
SCpnt->SCp.Message = 0; memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer));
SCpnt->SCp.Status = 0;
if (SCpnt->use_sg) { if (SCpnt->use_sg) {
unsigned long len = 0; unsigned long len = 0;
...@@ -97,8 +96,6 @@ static inline void init_SCp(Scsi_Cmnd *SCpnt) ...@@ -97,8 +96,6 @@ static inline void init_SCp(Scsi_Cmnd *SCpnt)
SCpnt->request_bufflen = len; SCpnt->request_bufflen = len;
#endif #endif
} else { } else {
SCpnt->SCp.buffer = NULL;
SCpnt->SCp.buffers_residual = 0;
SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer; SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer;
SCpnt->SCp.this_residual = SCpnt->request_bufflen; SCpnt->SCp.this_residual = SCpnt->request_bufflen;
} }
......
...@@ -354,6 +354,7 @@ static int erase_block(int nBlock) ...@@ -354,6 +354,7 @@ static int erase_block(int nBlock)
{ {
volatile unsigned int c1; volatile unsigned int c1;
volatile unsigned char *pWritePtr; volatile unsigned char *pWritePtr;
unsigned long timeout;
int temp, temp1; int temp, temp1;
/* /*
...@@ -406,9 +407,9 @@ static int erase_block(int nBlock) ...@@ -406,9 +407,9 @@ static int erase_block(int nBlock)
/* /*
* wait while erasing in process (up to 10 sec) * wait while erasing in process (up to 10 sec)
*/ */
temp = jiffies + 10 * HZ; timeout = jiffies + 10 * HZ;
c1 = 0; c1 = 0;
while (!(c1 & 0x80) && time_before(jiffies, temp)) { while (!(c1 & 0x80) && time_before(jiffies, timeout)) {
flash_wait(HZ / 100); flash_wait(HZ / 100);
/* /*
* read any address * read any address
...@@ -466,8 +467,8 @@ static int write_block(unsigned long p, const char *buf, int count) ...@@ -466,8 +467,8 @@ static int write_block(unsigned long p, const char *buf, int count)
unsigned char *pWritePtr; unsigned char *pWritePtr;
unsigned int uAddress; unsigned int uAddress;
unsigned int offset; unsigned int offset;
unsigned int timeout; unsigned long timeout;
unsigned int timeout1; unsigned long timeout1;
/* /*
* red LED == write * red LED == write
......
...@@ -584,6 +584,9 @@ pci_set_master(struct pci_dev *dev) ...@@ -584,6 +584,9 @@ pci_set_master(struct pci_dev *dev)
} }
#ifndef HAVE_ARCH_PCI_MWI #ifndef HAVE_ARCH_PCI_MWI
/* This can be overridden by arch code. */
u8 pci_cache_line_size = L1_CACHE_BYTES >> 2;
/** /**
* pci_generic_prep_mwi - helper function for pci_set_mwi * pci_generic_prep_mwi - helper function for pci_set_mwi
* @dev: the PCI device for which MWI is enabled * @dev: the PCI device for which MWI is enabled
...@@ -597,32 +600,29 @@ pci_set_master(struct pci_dev *dev) ...@@ -597,32 +600,29 @@ pci_set_master(struct pci_dev *dev)
static int static int
pci_generic_prep_mwi(struct pci_dev *dev) pci_generic_prep_mwi(struct pci_dev *dev)
{ {
int rc = 0; u8 cacheline_size;
u8 cache_size;
/* if (!pci_cache_line_size)
* Looks like this is necessary to deal with on all architectures, return -EINVAL; /* The system doesn't support MWI. */
* even this %$#%$# N440BX Intel based thing doesn't get it right.
* Ie. having two NICs in the machine, one will have the cache
* line set at boot time, the other will not.
*/
pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cache_size);
cache_size <<= 2;
if (cache_size != SMP_CACHE_BYTES) {
printk(KERN_WARNING "PCI: %s PCI cache line size set "
"incorrectly (%i bytes) by BIOS/FW, ",
dev->slot_name, cache_size);
if (cache_size > SMP_CACHE_BYTES) {
printk("expecting %i\n", SMP_CACHE_BYTES);
rc = -EINVAL;
} else {
printk("correcting to %i\n", SMP_CACHE_BYTES);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
SMP_CACHE_BYTES >> 2);
}
}
return rc; /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
equal to or multiple of the right value. */
pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
if (cacheline_size >= pci_cache_line_size &&
(cacheline_size % pci_cache_line_size) == 0)
return 0;
/* Write the correct value. */
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
/* Read it back. */
pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
if (cacheline_size == pci_cache_line_size)
return 0;
printk(KERN_WARNING "PCI: cache line size of %d is not supported "
"by device %s\n", pci_cache_line_size << 2, dev->slot_name);
return -EINVAL;
} }
#endif /* !HAVE_ARCH_PCI_MWI */ #endif /* !HAVE_ARCH_PCI_MWI */
......
...@@ -36,6 +36,13 @@ ...@@ -36,6 +36,13 @@
#define ROUND_UP(x, a) (((x) + (a) - 1) & ~((a) - 1)) #define ROUND_UP(x, a) (((x) + (a) - 1) & ~((a) - 1))
/*
* FIXME: IO should be max 256 bytes. However, since we may
* have a P2P bridge below a cardbus bridge, we need 4K.
*/
#define CARDBUS_IO_SIZE (4096)
#define CARDBUS_MEM_SIZE (32*1024*1024)
static int __devinit static int __devinit
pbus_assign_resources_sorted(struct pci_bus *bus) pbus_assign_resources_sorted(struct pci_bus *bus)
{ {
...@@ -67,12 +74,67 @@ pbus_assign_resources_sorted(struct pci_bus *bus) ...@@ -67,12 +74,67 @@ pbus_assign_resources_sorted(struct pci_bus *bus)
return found_vga; return found_vga;
} }
static void __devinit
pci_setup_cardbus(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
struct pci_bus_region region;
printk("PCI: Bus %d, cardbus bridge: %s\n",
bus->number, bridge->slot_name);
pcibios_resource_to_bus(bridge, &region, bus->resource[0]);
if (bus->resource[0]->flags & IORESOURCE_IO) {
/*
* The IO resource is allocated a range twice as large as it
* would normally need. This allows us to set both IO regs.
*/
printk(" IO window: %08lx-%08lx\n",
region.start, region.end);
pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
region.start);
pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
region.end);
}
pcibios_resource_to_bus(bridge, &region, bus->resource[1]);
if (bus->resource[1]->flags & IORESOURCE_IO) {
printk(" IO window: %08lx-%08lx\n",
region.start, region.end);
pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
region.start);
pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
region.end);
}
pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
if (bus->resource[2]->flags & IORESOURCE_MEM) {
printk(" PREFETCH window: %08lx-%08lx\n",
region.start, region.end);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
region.start);
pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
region.end);
}
pcibios_resource_to_bus(bridge, &region, bus->resource[3]);
if (bus->resource[3]->flags & IORESOURCE_MEM) {
printk(" MEM window: %08lx-%08lx\n",
region.start, region.end);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
region.start);
pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
region.end);
}
}
/* Initialize bridges with base/limit values we have collected. /* Initialize bridges with base/limit values we have collected.
PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998) PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
requires that if there is no I/O ports or memory behind the requires that if there is no I/O ports or memory behind the
bridge, corresponding range must be turned off by writing base bridge, corresponding range must be turned off by writing base
value greater than limit to the bridge's base/limit registers. */ value greater than limit to the bridge's base/limit registers. */
static void __devinit pci_setup_bridge(struct pci_bus *bus) static void __devinit
pci_setup_bridge(struct pci_bus *bus)
{ {
struct pci_dev *bridge = bus->self; struct pci_dev *bridge = bus->self;
struct pci_bus_region region; struct pci_bus_region region;
...@@ -154,9 +216,6 @@ pci_bridge_check_ranges(struct pci_bus *bus) ...@@ -154,9 +216,6 @@ pci_bridge_check_ranges(struct pci_bus *bus)
struct pci_dev *bridge = bus->self; struct pci_dev *bridge = bus->self;
struct resource *b_res; struct resource *b_res;
if (!bridge || (bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
return;
b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
b_res[1].flags |= IORESOURCE_MEM; b_res[1].flags |= IORESOURCE_MEM;
...@@ -184,6 +243,26 @@ pci_bridge_check_ranges(struct pci_bus *bus) ...@@ -184,6 +243,26 @@ pci_bridge_check_ranges(struct pci_bus *bus)
b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
} }
/* Helper function for sizing routines: find first available
bus resource of a given type. Note: we intentionally skip
the bus resources which have already been assigned (that is,
have non-NULL parent resource). */
static struct resource * __devinit
find_free_bus_resource(struct pci_bus *bus, unsigned long type)
{
int i;
struct resource *r;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
r = bus->resource[i];
if (r && (r->flags & type_mask) == type && !r->parent)
return r;
}
return NULL;
}
/* Sizing the IO windows of the PCI-PCI bridge is trivial, /* Sizing the IO windows of the PCI-PCI bridge is trivial,
since these windows have 4K granularity and the IO ranges since these windows have 4K granularity and the IO ranges
of non-bridge PCI devices are limited to 256 bytes. of non-bridge PCI devices are limited to 256 bytes.
...@@ -192,10 +271,10 @@ static void __devinit ...@@ -192,10 +271,10 @@ static void __devinit
pbus_size_io(struct pci_bus *bus) pbus_size_io(struct pci_bus *bus)
{ {
struct pci_dev *dev; struct pci_dev *dev;
struct resource *b_res = bus->resource[0]; struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
unsigned long size = 0, size1 = 0; unsigned long size = 0, size1 = 0;
if (!(b_res->flags & IORESOURCE_IO)) if (!b_res)
return; return;
list_for_each_entry(dev, &bus->devices, bus_list) { list_for_each_entry(dev, &bus->devices, bus_list) {
...@@ -215,9 +294,6 @@ pbus_size_io(struct pci_bus *bus) ...@@ -215,9 +294,6 @@ pbus_size_io(struct pci_bus *bus)
else else
size1 += r_size; size1 += r_size;
} }
/* ??? Reserve some resources for CardBus. */
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS)
size1 += 4*1024;
} }
/* To be fixed in 2.5: we should have sort of HAVE_ISA /* To be fixed in 2.5: we should have sort of HAVE_ISA
flag in the struct pci_bus. */ flag in the struct pci_bus. */
...@@ -236,15 +312,17 @@ pbus_size_io(struct pci_bus *bus) ...@@ -236,15 +312,17 @@ pbus_size_io(struct pci_bus *bus)
/* Calculate the size of the bus and minimal alignment which /* Calculate the size of the bus and minimal alignment which
guarantees that all child resources fit in this size. */ guarantees that all child resources fit in this size. */
static void __devinit static int __devinit
pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type) pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type)
{ {
struct pci_dev *dev; struct pci_dev *dev;
unsigned long min_align, align, size; unsigned long min_align, align, size;
unsigned long aligns[12]; /* Alignments from 1Mb to 2Gb */ unsigned long aligns[12]; /* Alignments from 1Mb to 2Gb */
int order, max_order; int order, max_order;
struct resource *b_res = (type & IORESOURCE_PREFETCH) ? struct resource *b_res = find_free_bus_resource(bus, type);
bus->resource[2] : bus->resource[1];
if (!b_res)
return 0;
memset(aligns, 0, sizeof(aligns)); memset(aligns, 0, sizeof(aligns));
max_order = 0; max_order = 0;
...@@ -280,11 +358,6 @@ pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type) ...@@ -280,11 +358,6 @@ pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type)
if (order > max_order) if (order > max_order)
max_order = order; max_order = order;
} }
/* ??? Reserve some resources for CardBus. */
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) {
size += 1UL << 24; /* 16 Mb */
aligns[24 - 20] += 1UL << 24;
}
} }
align = 0; align = 0;
...@@ -301,38 +374,111 @@ pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type) ...@@ -301,38 +374,111 @@ pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type)
size = ROUND_UP(size, min_align); size = ROUND_UP(size, min_align);
if (!size) { if (!size) {
b_res->flags = 0; b_res->flags = 0;
return; return 1;
} }
b_res->start = min_align; b_res->start = min_align;
b_res->end = size + min_align - 1; b_res->end = size + min_align - 1;
return 1;
}
static void __devinit
pci_bus_size_cardbus(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
u16 ctrl;
/*
* Reserve some resources for CardBus. We reserve
* a fixed amount of bus space for CardBus bridges.
*/
b_res[0].start = CARDBUS_IO_SIZE;
b_res[0].end = b_res[0].start + CARDBUS_IO_SIZE - 1;
b_res[0].flags |= IORESOURCE_IO;
b_res[1].start = CARDBUS_IO_SIZE;
b_res[1].end = b_res[1].start + CARDBUS_IO_SIZE - 1;
b_res[1].flags |= IORESOURCE_IO;
/*
* Check whether prefetchable memory is supported
* by this bridge.
*/
pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
}
/*
* If we have prefetchable memory support, allocate
* two regions. Otherwise, allocate one region of
* twice the size.
*/
if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
b_res[2].start = CARDBUS_MEM_SIZE;
b_res[2].end = b_res[2].start + CARDBUS_MEM_SIZE - 1;
b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
b_res[3].start = CARDBUS_MEM_SIZE;
b_res[3].end = b_res[3].start + CARDBUS_MEM_SIZE - 1;
b_res[3].flags |= IORESOURCE_MEM;
} else {
b_res[3].start = CARDBUS_MEM_SIZE * 2;
b_res[3].end = b_res[3].start + CARDBUS_MEM_SIZE * 2 - 1;
b_res[3].flags |= IORESOURCE_MEM;
}
} }
void __devinit void __devinit
pci_bus_size_bridges(struct pci_bus *bus) pci_bus_size_bridges(struct pci_bus *bus)
{ {
struct pci_bus *b; struct pci_dev *dev;
unsigned long mask, type; unsigned long mask, prefmask;
list_for_each_entry(b, &bus->children, node) { list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
if (!b)
continue;
switch (dev->class >> 8) {
case PCI_CLASS_BRIDGE_CARDBUS:
pci_bus_size_cardbus(b);
break;
case PCI_CLASS_BRIDGE_PCI:
default:
pci_bus_size_bridges(b); pci_bus_size_bridges(b);
break;
}
} }
/* The root bus? */ /* The root bus? */
if (!bus->self) if (!bus->self)
return; return;
pci_bridge_check_ranges(bus); switch (bus->self->class >> 8) {
case PCI_CLASS_BRIDGE_CARDBUS:
/* don't size cardbuses yet. */
break;
case PCI_CLASS_BRIDGE_PCI:
pci_bridge_check_ranges(bus);
default:
pbus_size_io(bus); pbus_size_io(bus);
/* If the bridge supports prefetchable range, size it
mask = type = IORESOURCE_MEM; separately. If it doesn't, or its prefetchable window
/* If the bridge supports prefetchable range, size it separately. */ has already been allocated by arch code, try
if (bus->resource[2] && non-prefetchable range for both types of PCI memory
bus->resource[2]->flags & IORESOURCE_PREFETCH) { resources. */
pbus_size_mem(bus, IORESOURCE_PREFETCH, IORESOURCE_PREFETCH); mask = IORESOURCE_MEM;
mask |= IORESOURCE_PREFETCH; /* Size non-prefetch only. */ prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
if (pbus_size_mem(bus, prefmask, prefmask))
mask = prefmask; /* Success, size non-prefetch only. */
pbus_size_mem(bus, mask, IORESOURCE_MEM);
break;
} }
pbus_size_mem(bus, mask, type);
} }
EXPORT_SYMBOL(pci_bus_size_bridges); EXPORT_SYMBOL(pci_bus_size_bridges);
...@@ -351,9 +497,24 @@ pci_bus_assign_resources(struct pci_bus *bus) ...@@ -351,9 +497,24 @@ pci_bus_assign_resources(struct pci_bus *bus)
} }
list_for_each_entry(dev, &bus->devices, bus_list) { list_for_each_entry(dev, &bus->devices, bus_list) {
b = dev->subordinate; b = dev->subordinate;
if (b) { if (!b)
continue;
pci_bus_assign_resources(b); pci_bus_assign_resources(b);
switch (dev->class >> 8) {
case PCI_CLASS_BRIDGE_PCI:
pci_setup_bridge(b); pci_setup_bridge(b);
break;
case PCI_CLASS_BRIDGE_CARDBUS:
pci_setup_cardbus(b);
break;
default:
printk(KERN_INFO "PCI: not setting up bridge %s "
"for bus %d\n", dev->slot_name, b->number);
break;
} }
} }
} }
......
...@@ -59,9 +59,7 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) ...@@ -59,9 +59,7 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
reg = dev->rom_base_reg; reg = dev->rom_base_reg;
} else { } else {
/* Hmm, non-standard resource. */ /* Hmm, non-standard resource. */
printk("PCI: trying to set non-standard region %s/%d\n", BUG();
dev->slot_name, resno);
return;
} }
pci_write_config_dword(dev, reg, new); pci_write_config_dword(dev, reg, new);
...@@ -141,7 +139,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno) ...@@ -141,7 +139,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
if (ret) { if (ret) {
printk(KERN_ERR "PCI: Failed to allocate resource %d(%lx-%lx) for %s\n", printk(KERN_ERR "PCI: Failed to allocate resource %d(%lx-%lx) for %s\n",
resno, res->start, res->end, dev->slot_name); resno, res->start, res->end, dev->slot_name);
} else { } else if (resno < PCI_BRIDGE_RESOURCES) {
pci_update_resource(dev, res, resno); pci_update_resource(dev, res, resno);
} }
......
...@@ -239,4 +239,15 @@ DECLARE_IO(int,l,"") ...@@ -239,4 +239,15 @@ DECLARE_IO(int,l,"")
/* the following macro is deprecated */ /* the following macro is deprecated */
#define ioaddr(port) __ioaddr((port)) #define ioaddr(port) __ioaddr((port))
#define insb(p,d,l) __raw_readsb(__ioaddr(p),d,l)
#define insw(p,d,l) __raw_readsw(__ioaddr(p),d,l)
#define outsb(p,d,l) __raw_writesb(__ioaddr(p),d,l)
#define outsw(p,d,l) __raw_writesw(__ioaddr(p),d,l)
/*
* 1:1 mapping for ioremapped regions.
*/
#define __mem_pci(x) (x)
#endif #endif
...@@ -49,7 +49,6 @@ static unsigned long pxa_gettimeoffset (void) ...@@ -49,7 +49,6 @@ static unsigned long pxa_gettimeoffset (void)
static void pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) static void pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
long flags;
int next_match; int next_match;
do_profile(regs); do_profile(regs);
...@@ -63,11 +62,9 @@ static void pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -63,11 +62,9 @@ static void pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
do { do {
do_leds(); do_leds();
do_set_rtc(); do_set_rtc();
local_irq_save( flags );
do_timer(regs); do_timer(regs);
OSSR = OSSR_M0; /* Clear match on timer 0 */ OSSR = OSSR_M0; /* Clear match on timer 0 */
next_match = (OSMR0 += LATCH); next_match = (OSMR0 += LATCH);
local_irq_restore( flags );
} while( (signed long)(next_match - OSCR) <= 0 ); } while( (signed long)(next_match - OSCR) <= 0 );
} }
......
...@@ -4,18 +4,12 @@ ...@@ -4,18 +4,12 @@
#ifndef __ASMARM_CACHE_H #ifndef __ASMARM_CACHE_H
#define __ASMARM_CACHE_H #define __ASMARM_CACHE_H
#define L1_CACHE_BYTES 32 #define L1_CACHE_SHIFT 5
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#ifdef MODULE /*
#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES))) * largest L1 which this arch supports
#else */
#define __cacheline_aligned \ #define L1_CACHE_SHIFT_MAX 5
__attribute__((__aligned__(L1_CACHE_BYTES), \
__section__(".data.cacheline_aligned")))
#endif
#define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */
#endif #endif
/* /*
* linux/arch/arm/boot/compressed/head-netwinder.S * ssp.h
* *
* Copyright (C) 2000-2002 Russell King * Copyright (C) 2003 Russell King, All Rights Reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
.section ".start", "ax" #ifndef SSP_H
#define SSP_H
mov r7, #5 struct ssp_state {
mov r8, #0 unsigned int cr0;
unsigned int cr1;
};
int ssp_write_word(u16 data);
int ssp_read_word(void);
void ssp_flush(void);
void ssp_enable(void);
void ssp_disable(void);
void ssp_save_state(struct ssp_state *ssp);
void ssp_restore_state(struct ssp_state *ssp);
int ssp_init(void);
void ssp_exit(void);
#endif
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct tag; struct tag;
struct meminfo;
struct machine_desc { struct machine_desc {
/* /*
......
...@@ -40,7 +40,6 @@ struct dma_struct { ...@@ -40,7 +40,6 @@ struct dma_struct {
unsigned int dma_base; /* Controller base address */ unsigned int dma_base; /* Controller base address */
int dma_irq; /* Controller IRQ */ int dma_irq; /* Controller IRQ */
int state; /* Controller state */
struct scatterlist cur_sg; /* Current controller buffer */ struct scatterlist cur_sg; /* Current controller buffer */
struct dma_ops *d_ops; struct dma_ops *d_ops;
......
...@@ -12,6 +12,7 @@ struct pci_sys_data; ...@@ -12,6 +12,7 @@ struct pci_sys_data;
struct pci_bus; struct pci_bus;
struct hw_pci { struct hw_pci {
struct list_head buses;
int nr_controllers; int nr_controllers;
int (*setup)(int nr, struct pci_sys_data *); int (*setup)(int nr, struct pci_sys_data *);
struct pci_bus *(*scan)(int nr, struct pci_sys_data *); struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
...@@ -25,6 +26,7 @@ struct hw_pci { ...@@ -25,6 +26,7 @@ struct hw_pci {
* Per-controller structure * Per-controller structure
*/ */
struct pci_sys_data { struct pci_sys_data {
struct list_head node;
int busnr; /* primary bus number */ int busnr; /* primary bus number */
unsigned long mem_offset; /* bus->cpu memory mapping offset */ unsigned long mem_offset; /* bus->cpu memory mapping offset */
unsigned long io_offset; /* bus->cpu IO mapping offset */ unsigned long io_offset; /* bus->cpu IO mapping offset */
......
...@@ -63,6 +63,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */ ...@@ -63,6 +63,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
#define local_irq_save(x) \ #define local_irq_save(x) \
({ \ ({ \
unsigned long temp; \ unsigned long temp; \
(void) (&temp == &x); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_save\n" \ "mrs %0, cpsr @ local_irq_save\n" \
" orr %1, %0, #128\n" \ " orr %1, %0, #128\n" \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment