Commit f912c58a authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5_ppc64_work

into samba.org:/scratch/anton/for-alan
parents cdbab299 141a86bf
......@@ -37,6 +37,10 @@ config COMPAT
bool
default y
config FRAME_POINTER
bool
default y
source "init/Kconfig"
......
......@@ -141,11 +141,12 @@ chrp_setup_arch(void)
fwnmi_init();
#ifndef CONFIG_PPC_ISERIES
/* Find and initialize PCI host bridges */
/* iSeries needs to be done much later. */
#ifndef CONFIG_PPC_ISERIES
find_and_init_phbs();
#endif
eeh_init();
find_and_init_phbs();
#endif
/* Find the Open PIC if present */
root = find_path_device("/");
......
This diff is collapsed.
......@@ -393,7 +393,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
mm = &init_mm;
vsid = get_kernel_vsid(ea);
break;
case IO_UNMAPPED_REGION_ID:
case EEH_REGION_ID:
/*
* Should only be hit if there is an access to MMIO space
* which is protected by EEH.
......
......@@ -39,7 +39,6 @@
#include <asm/ppcdebug.h>
#include <asm/naca.h>
#include <asm/pci_dma.h>
#include <asm/eeh.h>
#include "xics.h"
#include "open_pic.h"
......@@ -50,8 +49,6 @@
*******************************************************************/
struct pci_controller *alloc_phb(struct device_node *dev, char *model,
unsigned int addr_size_words) ;
static int rtas_fake_read(struct device_node *dn, int offset, int nbytes,
unsigned long *returnval);
/* RTAS tokens */
static int read_pci_config;
......@@ -74,8 +71,6 @@ static int rtas_read_config(struct device_node *dn, int where, int size, u32 *va
buid = dn->phb->buid;
if (buid) {
ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval, addr, buid >> 32, buid & 0xffffffff, size);
if (ret < 0|| (returnval == 0xffffffff))
ret = rtas_fake_read(dn, where, size, &returnval);
} else {
ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size);
}
......@@ -142,48 +137,6 @@ struct pci_ops rtas_pci_ops = {
rtas_pci_write_config
};
/*
* Handle the case where rtas refuses to do a pci config read.
* This currently only happens with some PHBs in which case we totally fake
* out the values (and call it a speedwagaon -- something we could look up
* in the device tree).
*/
static int
rtas_fake_read(struct device_node *dn, int offset, int nbytes, unsigned long *returnval)
{
char *device_type = (char *)get_property(dn, "device_type", 0);
u32 *class_code = (u32 *)get_property(dn, "class-code", 0);
*returnval = ~0; /* float by default */
/* udbg_printf("rtas_fake_read dn=%p, offset=0x%02x, nbytes=%d, device_type=%s\n", dn, offset, nbytes, device_type ? device_type : "<none>"); */
if (device_type && strcmp(device_type, "pci") != 0)
return -3; /* Not a phb or bridge */
/* NOTE: class_code != NULL => EADS pci bridge. Else a PHB */
if (nbytes == 1) {
if (offset == PCI_HEADER_TYPE)
*returnval = 0x80; /* multifunction */
else if (offset == PCI_INTERRUPT_PIN || offset == PCI_INTERRUPT_LINE)
*returnval = 0;
} else if (nbytes == 2) {
if (offset == PCI_SUBSYSTEM_VENDOR_ID || offset == PCI_SUBSYSTEM_ID)
*returnval = 0;
else if (offset == PCI_COMMAND)
*returnval = PCI_COMMAND_PARITY|PCI_COMMAND_MASTER|PCI_COMMAND_MEMORY;
} else if (nbytes == 4) {
if (offset == PCI_VENDOR_ID)
*returnval = 0x1014 | ((class_code ? 0x8b : 0x102) << 16); /* a phb */
else if (offset == PCI_REVISION_ID)
*returnval = (class_code ? PCI_CLASS_BRIDGE_PCI : PCI_CLASS_BRIDGE_HOST) << 16; /* revs are zero */
else if ((offset >= PCI_BASE_ADDRESS_0 && offset <= PCI_BASE_ADDRESS_5) || offset == PCI_ROM_ADDRESS)
*returnval = 0;
}
/* printk("fake: %s nbytes=%d, offset=%lx ret=%lx\n", class_code ? "EADS" : "PHB", nbytes, offset, *returnval); */
return 0;
}
/******************************************************************
* pci_read_irq_line
*
......@@ -250,8 +203,6 @@ find_and_init_phbs(void)
ibm_read_pci_config = rtas_token("ibm,read-pci-config");
ibm_write_pci_config = rtas_token("ibm,write-pci-config");
eeh_init();
if (naca->interrupt_controller == IC_OPEN_PIC) {
opprop = (unsigned int *)get_property(find_path_device("/"),
"platform-open-pic", NULL);
......@@ -350,24 +301,16 @@ find_and_init_phbs(void)
res = &phb->io_resource;
res->name = Pci_Node->full_name;
res->flags = IORESOURCE_IO;
if (is_eeh_implemented()) {
if (!isa_io_base && has_isa) {
/* map a page for ISA ports. Not EEH protected. */
isa_io_base = (unsigned long)__ioremap(phb->io_base_phys, PAGE_SIZE, _PAGE_NO_CACHE);
}
res->start = phb->io_base_virt = eeh_token(index, 0, 0, 0);
res->end = eeh_token(index, 0xff, 0xff, 0xffffffff);
} else {
phb->io_base_virt = ioremap(phb->io_base_phys, range.size);
if (!pci_io_base) {
pci_io_base = (unsigned long)phb->io_base_virt;
if (has_isa)
isa_io_base = pci_io_base;
}
res->start = ((((unsigned long) range.child_addr.a_mid) << 32) | (range.child_addr.a_lo));
res->start += (unsigned long)phb->io_base_virt;
res->end = res->start + range.size - 1;
phb->io_base_virt = __ioremap(phb->io_base_phys, range.size, _PAGE_NO_CACHE);
printk("back\n");
if (!pci_io_base) {
pci_io_base = (unsigned long)phb->io_base_virt;
if (has_isa)
isa_io_base = pci_io_base;
}
res->start = ((((unsigned long) range.child_addr.a_mid) << 32) | (range.child_addr.a_lo));
res->start += (unsigned long)phb->io_base_virt - pci_io_base;
res->end = res->start + range.size - 1;
res->parent = NULL;
res->sibling = NULL;
res->child = NULL;
......@@ -391,13 +334,8 @@ find_and_init_phbs(void)
++memno;
res->name = Pci_Node->full_name;
res->flags = IORESOURCE_MEM;
if (is_eeh_implemented()) {
res->start = eeh_token(index, 0, 0, 0);
res->end = eeh_token(index, 0xff, 0xff, 0xffffffff);
} else {
res->start = range.parent_addr;
res->end = range.parent_addr + range.size - 1;
}
res->start = range.parent_addr;
res->end = range.parent_addr + range.size - 1;
res->parent = NULL;
res->sibling = NULL;
res->child = NULL;
......@@ -574,7 +512,6 @@ fixup_resources(struct pci_dev *dev)
int i;
struct pci_controller *phb = PCI_GET_PHB_PTR(dev);
struct device_node *dn;
unsigned long eeh_disable_bit;
/* Add IBM loc code (slot) as a prefix to the device names for service */
dn = pci_device_to_OF_node(dev);
......@@ -591,20 +528,6 @@ fixup_resources(struct pci_dev *dev)
}
}
if (is_eeh_implemented()) {
if (is_eeh_configured(dev)) {
eeh_disable_bit = 0;
if (eeh_set_option(dev, EEH_ENABLE) != 0) {
printk("PCI: failed to enable EEH for %s %s\n", dev->slot_name, dev->dev.name);
eeh_disable_bit = EEH_TOKEN_DISABLED;
}
} else {
/* Assume device is by default EEH_DISABLE'd */
printk("PCI: eeh NOT configured for %s %s\n", dev->slot_name, dev->dev.name);
eeh_disable_bit = EEH_TOKEN_DISABLED;
}
}
PPCDBG(PPCDBG_PHBINIT, "fixup_resources:\n");
PPCDBG(PPCDBG_PHBINIT, "\tphb = 0x%016LX\n", phb);
PPCDBG(PPCDBG_PHBINIT, "\tphb->pci_io_offset = 0x%016LX\n", phb->pci_io_offset);
......@@ -633,19 +556,9 @@ fixup_resources(struct pci_dev *dev)
}
if (dev->resource[i].flags & IORESOURCE_IO) {
if (is_eeh_implemented()) {
unsigned int busno = dev->bus ? dev->bus->number : 0;
unsigned long size = dev->resource[i].end - dev->resource[i].start;
unsigned long addr = (unsigned long)__ioremap(dev->resource[i].start + phb->io_base_phys, size, _PAGE_NO_CACHE);
if (!addr)
panic("fixup_resources: ioremap failed!\n");
dev->resource[i].start = eeh_token(phb->global_number, busno, dev->devfn, addr) | eeh_disable_bit;
dev->resource[i].end = dev->resource[i].start + size;
} else {
unsigned long offset = (unsigned long)phb->io_base_virt;
dev->resource[i].start += offset;
dev->resource[i].end += offset;
}
unsigned long offset = (unsigned long)phb->io_base_virt - pci_io_base;
dev->resource[i].start += offset;
dev->resource[i].end += offset;
PPCDBG(PPCDBG_PHBINIT, "\t\t-> now [%lx .. %lx]\n",
dev->resource[i].start, dev->resource[i].end);
} else if (dev->resource[i].flags & IORESOURCE_MEM) {
......@@ -653,18 +566,8 @@ fixup_resources(struct pci_dev *dev)
/* Bogus. Probably an unused bridge. */
dev->resource[i].end = 0;
} else {
if (is_eeh_implemented()) {
unsigned int busno = dev->bus ? dev->bus->number : 0;
unsigned long size = dev->resource[i].end - dev->resource[i].start;
unsigned long addr = (unsigned long)__ioremap(dev->resource[i].start + phb->pci_mem_offset, size, _PAGE_NO_CACHE);
if (!addr)
panic("fixup_resources: ioremap failed!\n");
dev->resource[i].start = eeh_token(phb->global_number, busno, dev->devfn, addr) | eeh_disable_bit;
dev->resource[i].end = dev->resource[i].start + size;
} else {
dev->resource[i].start += phb->pci_mem_offset;
dev->resource[i].end += phb->pci_mem_offset;
}
dev->resource[i].start += phb->pci_mem_offset;
dev->resource[i].end += phb->pci_mem_offset;
}
PPCDBG(PPCDBG_PHBINIT, "\t\t-> now [%lx..%lx]\n",
dev->resource[i].start, dev->resource[i].end);
......
......@@ -31,7 +31,6 @@
#include <asm/naca.h>
#include <asm/pci_dma.h>
#include <asm/machdep.h>
#include <asm/eeh.h>
#include "pci.h"
......@@ -121,6 +120,43 @@ static void fixup_windbond_82c105(struct pci_dev* dev)
}
/* Given an mmio phys address, find a pci device that implements
* this address. This is of course expensive, but only used
* for device initialization or error paths.
* For io BARs it is assumed the pci_io_base has already been added
* into addr.
*
* Bridges are ignored although they could be used to optimize the search.
*/
struct pci_dev *pci_find_dev_by_addr(unsigned long addr)
{
struct pci_dev *dev;
int i;
unsigned long ioaddr;
ioaddr = (addr > _IO_BASE) ? addr - _IO_BASE : 0;
pci_for_each_dev(dev) {
if ((dev->class >> 8) == PCI_BASE_CLASS_BRIDGE)
continue;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
unsigned long start = pci_resource_start(dev,i);
unsigned long end = pci_resource_end(dev,i);
unsigned int flags = pci_resource_flags(dev,i);
if (start == 0 || ~start == 0 ||
end == 0 || ~end == 0)
continue;
if ((flags & IORESOURCE_IO) &&
(ioaddr >= start && ioaddr <= end))
return dev;
else if ((flags & IORESOURCE_MEM) &&
(addr >= start && addr <= end))
return dev;
}
}
return NULL;
}
void __devinit pcibios_fixup_pbus_ranges(struct pci_bus *pbus,
struct pbus_set_ranges_data *pranges)
{
......@@ -486,21 +522,15 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
/* Transparent resource -- don't try to "fix" it. */
continue;
}
if (is_eeh_implemented()) {
if (res->flags & (IORESOURCE_IO|IORESOURCE_MEM)) {
res->start = eeh_token(phb->global_number, bus->number, 0, 0);
res->end = eeh_token(phb->global_number, bus->number, 0xff, 0xffffffff);
}
} else {
if (res->flags & IORESOURCE_IO) {
res->start += (unsigned long)phb->io_base_virt;
res->end += (unsigned long)phb->io_base_virt;
} else if (phb->pci_mem_offset
&& (res->flags & IORESOURCE_MEM)) {
if (res->start < phb->pci_mem_offset) {
res->start += phb->pci_mem_offset;
res->end += phb->pci_mem_offset;
}
if (res->flags & IORESOURCE_IO) {
unsigned long offset = (unsigned long)phb->io_base_virt - pci_io_base;
res->start += offset;
res->end += offset;
} else if (phb->pci_mem_offset
&& (res->flags & IORESOURCE_MEM)) {
if (res->start < phb->pci_mem_offset) {
res->start += phb->pci_mem_offset;
res->end += phb->pci_mem_offset;
}
}
}
......
......@@ -56,6 +56,7 @@ typedef void *(*traverse_func)(struct device_node *me, void *data);
void *traverse_pci_devices(struct device_node *start, traverse_func pre, traverse_func post, void *data);
void *traverse_all_pci_devices(traverse_func pre);
struct pci_dev *pci_find_dev_by_addr(unsigned long addr);
void pci_devs_phb_init(void);
void pci_fix_bus_sysdata(void);
struct device_node *fetch_dev_dn(struct pci_dev *dev);
......
......@@ -237,29 +237,6 @@ is_devfn_sub_node(struct device_node *dn, void *data)
return (devfn == (dn->devfn & 0xf8) && busno == dn->busno) ? dn : NULL;
}
/* Given an existing EADs (pci bridge) device node create a fake one
* that will simulate function zero. Make it a sibling of other_eads.
*/
static struct device_node *
create_eads_node(struct device_node *other_eads)
{
struct device_node *eads = (struct device_node *)kmalloc(sizeof(struct device_node), GFP_KERNEL);
if (!eads) return NULL; /* huh? */
*eads = *other_eads;
eads->devfn &= ~7; /* make it function zero */
eads->tce_table = NULL;
/*
* NOTE: share properties. We could copy but for now this should
* suffice. The full_name is also incorrect...but seems harmless.
*/
eads->child = NULL;
eads->next = NULL;
other_eads->allnext = eads;
other_eads->sibling = eads;
return eads;
}
/* This is the "slow" path for looking up a device_node from a
* pci_dev. It will hunt for the device under it's parent's
* phb and then update sysdata for a future fastpath.
......@@ -285,43 +262,6 @@ struct device_node *fetch_dev_dn(struct pci_dev *dev)
if (dn) {
dev->sysdata = dn;
/* ToDo: call some device init hook here */
} else {
/* Now it is very possible that we can't find the device
* because it is not the zero'th device of a mutifunction
* device and we don't have permission to read the zero'th
* device. If this is the case, Linux would ordinarily skip
* all the other functions.
*/
if ((searchval & 0x7) == 0) {
struct device_node *thisdevdn;
/* Ok, we are looking for fn == 0. Let's check for other functions. */
thisdevdn = (struct device_node *)traverse_pci_devices(phb_dn, is_devfn_sub_node, NULL, (void *)searchval);
if (thisdevdn) {
/* Ah ha! There does exist a sub function.
* Now this isn't an exact match for
* searchval, but in order to get Linux to
* believe the sub functions exist we will
* need to manufacture a fake device_node for
* this zero'th function. To keept this
* simple for now we only handle pci bridges
* and we just hand back the found node which
* isn't correct, but Linux won't care.
*/
char *device_type = (char *)get_property(thisdevdn, "device_type", 0);
if (device_type && strcmp(device_type, "pci") == 0) {
return create_eads_node(thisdevdn);
}
}
}
/* ToDo: device not found...probe for it anyway with a fake dn?
struct device_node fake_dn;
memset(&fake_dn, 0, sizeof(fake_dn));
fake_dn.phb = phb;
fake_dn.busno = dev->bus->number;
fake_dn.devfn = dev->devfn;
... now do ppc_md.pcibios_read_config_dword(&fake_dn.....)
... if ok, alloc a real device_node and dn = real_dn;
*/
}
return dn;
}
......
......@@ -91,6 +91,7 @@ unsigned int default_distrib_server = 0;
/* RTAS service tokens */
int ibm_get_xive;
int ibm_set_xive;
int ibm_int_on;
int ibm_int_off;
struct xics_interrupt_node {
......@@ -162,6 +163,14 @@ xics_enable_irq(
irq, call_status, status);
return;
}
/* Now unmask the interrupt (often a no-op) */
call_status = rtas_call(ibm_int_on, 1, 1, (unsigned long*)&status,
irq);
if( call_status != 0 ) {
printk("xics_disable_irq on: irq=%x: rtas_call failed, retn=%lx\n",
irq, call_status);
return;
}
}
void
......@@ -302,6 +311,7 @@ xics_init_IRQ( void )
ibm_get_xive = rtas_token("ibm,get-xive");
ibm_set_xive = rtas_token("ibm,set-xive");
ibm_int_on = rtas_token("ibm,int-on");
ibm_int_off = rtas_token("ibm,int-off");
np = find_type_devices("PowerPC-External-Interrupt-Presentation");
......
......@@ -132,12 +132,10 @@ ioremap(unsigned long addr, unsigned long size)
#ifdef CONFIG_PPC_ISERIES
return (void*)addr;
#else
if(mem_init_done && (addr >> 60UL)) {
if (IS_EEH_TOKEN_DISABLED(addr))
return (void *)IO_TOKEN_TO_ADDR(addr);
return (void*)addr; /* already mapped address or EEH token. */
}
return __ioremap(addr, size, _PAGE_NO_CACHE);
void *ret = __ioremap(addr, size, _PAGE_NO_CACHE);
if(mem_init_done)
return eeh_ioremap(addr, ret); /* may remap the addr */
return ret;
#endif
}
......
......@@ -27,25 +27,26 @@
struct pci_dev;
#define IO_UNMAPPED_REGION_ID 0xaUL
#define IO_TOKEN_TO_ADDR(token) ((((unsigned long)(token)) & 0xFFFFFFFF) | (0xEUL << 60))
/* Flag bits encoded in the 3 unused function bits of devfn */
#define EEH_TOKEN_DISABLED (1UL << 34UL) /* eeh is disabled for this token */
#define IS_EEH_TOKEN_DISABLED(token) ((unsigned long)(token) & EEH_TOKEN_DISABLED)
/* I/O addresses are converted to EEH "tokens" such that a driver will cause
* a bad page fault if the address is used directly (i.e. these addresses are
* never actually mapped. Translation between IO <-> EEH region is 1 to 1.
*/
#define IO_TOKEN_TO_ADDR(token) (((unsigned long)(token) & ~(0xfUL << REGION_SHIFT)) | \
(IO_REGION_ID << REGION_SHIFT))
#define IO_ADDR_TO_TOKEN(addr) (((unsigned long)(addr) & ~(0xfUL << REGION_SHIFT)) | \
(EEH_REGION_ID << REGION_SHIFT))
#define EEH_STATE_OVERRIDE 1 /* IOA does not require eeh traps */
#define EEH_STATE_FAILURE 16 /* */
/* Values for eeh_mode bits in device_node */
#define EEH_MODE_SUPPORTED (1<<0)
#define EEH_MODE_NOCHECK (1<<1)
/* This is for profiling only */
extern unsigned long eeh_total_mmio_ffs;
extern int eeh_implemented;
void eeh_init(void);
static inline int is_eeh_implemented(void) { return eeh_implemented; }
int eeh_get_state(unsigned long ea);
unsigned long eeh_check_failure(void *token, unsigned long val);
void *eeh_ioremap(unsigned long addr, void *vaddr);
#define EEH_DISABLE 0
#define EEH_ENABLE 1
......@@ -58,15 +59,11 @@ int eeh_set_option(struct pci_dev *dev, int options);
*/
int is_eeh_configured(struct pci_dev *dev);
/* Generate an EEH token.
* The high nibble of the offset is cleared, otherwise bounds checking is performed.
* Use IO_TOKEN_TO_ADDR(token) to translate this token back to a mapped virtual addr.
* Do NOT do this to perform IO -- use the read/write macros!
/* Translate a (possible) eeh token to a physical addr.
* If "token" is not an eeh token it is simply returned under
* the assumption that it is already a physical addr.
*/
unsigned long eeh_token(unsigned long phb,
unsigned long bus,
unsigned long devfn,
unsigned long offset);
unsigned long eeh_token_to_phys(unsigned long token);
extern void *memcpy(void *, const void *, unsigned long);
extern void *memset(void *,int, unsigned long);
......@@ -77,15 +74,16 @@ extern void *memset(void *,int, unsigned long);
* If EEH is off for a device and it is a memory BAR, ioremap will
* map it to the IOREGION. In this case addr == vaddr and since these
* should be in registers we compare them first. Next we check for
* all ones which is perhaps fastest as ~val. Finally we weed out
* EEH disabled IO BARs.
* ff's which indicates a (very) possible failure.
*
* If this macro yields TRUE, the caller relays to eeh_check_failure()
* which does further tests out of line.
*/
/* #define EEH_POSSIBLE_ERROR(addr, vaddr, val) ((vaddr) != (addr) && ~(val) == 0 && !IS_EEH_TOKEN_DISABLED(addr)) */
/* #define EEH_POSSIBLE_IO_ERROR(val) (~(val) == 0) */
/* #define EEH_POSSIBLE_ERROR(addr, vaddr, val) ((vaddr) != (addr) && EEH_POSSIBLE_IO_ERROR(val) */
/* This version is rearranged to collect some profiling data */
#define EEH_POSSIBLE_ERROR(addr, vaddr, val) (~(val) == 0 && (++eeh_total_mmio_ffs, (vaddr) != (addr) && !IS_EEH_TOKEN_DISABLED(addr)))
#define EEH_POSSIBLE_IO_ERROR(val) (~(val) == 0 && ++eeh_total_mmio_ffs)
#define EEH_POSSIBLE_ERROR(addr, vaddr, val) (EEH_POSSIBLE_IO_ERROR(val) && (vaddr) != (addr))
/*
* MMIO read/write operations with EEH support.
......@@ -149,38 +147,56 @@ static inline void eeh_memcpy_toio(void *dest, void *src, unsigned long n) {
memcpy(vdest, src, n);
}
static inline void eeh_insb(volatile u8 *addr, void *buf, int n) {
volatile u8 *vaddr = (volatile u8 *)IO_TOKEN_TO_ADDR(addr);
_insb(vaddr, buf, n);
/* ToDo: look for ff's in buf[n] */
/* The I/O macros must handle ISA ports as well as PCI I/O bars.
* ISA does not implement EEH and ISA may not exist in the system.
* For PCI we check for EEH failures.
*/
#define _IO_IS_ISA(port) ((port) < 0x10000)
#define _IO_HAS_ISA_BUS (isa_io_base != 0)
static inline u8 eeh_inb(unsigned long port) {
u8 val;
if (_IO_IS_ISA(port) && !_IO_HAS_ISA_BUS)
return ~0;
val = in_8((u8 *)(port+pci_io_base));
if (!_IO_IS_ISA(port) && EEH_POSSIBLE_IO_ERROR(val))
return eeh_check_failure((void*)(port+pci_io_base), val);
return val;
}
static inline void eeh_outsb(volatile u8 *addr, const void *buf, int n) {
volatile u8 *vaddr = (volatile u8 *)IO_TOKEN_TO_ADDR(addr);
_outsb(vaddr, buf, n);
static inline void eeh_outb(u8 val, unsigned long port) {
if (!_IO_IS_ISA(port) || _IO_HAS_ISA_BUS)
return out_8((u8 *)(port+pci_io_base), val);
}
static inline void eeh_insw_ns(volatile u16 *addr, void *buf, int n) {
volatile u16 *vaddr = (volatile u16 *)IO_TOKEN_TO_ADDR(addr);
_insw_ns(vaddr, buf, n);
/* ToDo: look for ffff's in buf[n] */
static inline u16 eeh_inw(unsigned long port) {
u16 val;
if (_IO_IS_ISA(port) && !_IO_HAS_ISA_BUS)
return ~0;
val = in_le16((u16 *)(port+pci_io_base));
if (!_IO_IS_ISA(port) && EEH_POSSIBLE_IO_ERROR(val))
return eeh_check_failure((void*)(port+pci_io_base), val);
return val;
}
static inline void eeh_outsw_ns(volatile u16 *addr, const void *buf, int n) {
volatile u16 *vaddr = (volatile u16 *)IO_TOKEN_TO_ADDR(addr);
_outsw_ns(vaddr, buf, n);
static inline void eeh_outw(u16 val, unsigned long port) {
if (!_IO_IS_ISA(port) || _IO_HAS_ISA_BUS)
return out_le16((u16 *)(port+pci_io_base), val);
}
static inline void eeh_insl_ns(volatile u32 *addr, void *buf, int n) {
volatile u32 *vaddr = (volatile u32 *)IO_TOKEN_TO_ADDR(addr);
_insl_ns(vaddr, buf, n);
/* ToDo: look for ffffffff's in buf[n] */
static inline u32 eeh_inl(unsigned long port) {
u32 val;
if (_IO_IS_ISA(port) && !_IO_HAS_ISA_BUS)
return ~0;
val = in_le32((u32 *)(port+pci_io_base));
if (!_IO_IS_ISA(port) && EEH_POSSIBLE_IO_ERROR(val))
return eeh_check_failure((void*)(port+pci_io_base), val);
return val;
}
static inline void eeh_outsl_ns(volatile u32 *addr, const void *buf, int n) {
volatile u32 *vaddr = (volatile u32 *)IO_TOKEN_TO_ADDR(addr);
_outsl_ns(vaddr, buf, n);
static inline void eeh_outl(u32 val, unsigned long port) {
if (!_IO_IS_ISA(port) || _IO_HAS_ISA_BUS)
return out_le32((u32 *)(port+pci_io_base), val);
}
#endif /* _EEH_H */
......@@ -50,36 +50,45 @@ extern int have_print;
#define outw(data,addr) writew(data,((unsigned long)(addr)))
#define outl(data,addr) writel(data,((unsigned long)(addr)))
#else
#define IS_MAPPED_VADDR(port) ((unsigned long)(port) >> 60UL)
#define readb(addr) eeh_readb((void*)(addr))
#define readw(addr) eeh_readw((void*)(addr))
#define readl(addr) eeh_readl((void*)(addr))
#define writeb(data, addr) eeh_writeb((data), ((void*)(addr)))
#define writew(data, addr) eeh_writew((data), ((void*)(addr)))
#define writel(data, addr) eeh_writel((data), ((void*)(addr)))
#define memset_io(a,b,c) eeh_memset((void *)(a),(b),(c))
#define memset_io(a,b,c) eeh_memset_io((void *)(a),(b),(c))
#define memcpy_fromio(a,b,c) eeh_memcpy_fromio((a),(void *)(b),(c))
#define memcpy_toio(a,b,c) eeh_memcpy_toio((void *)(a),(b),(c))
#define inb(port) _inb((unsigned long)port)
#define outb(val, port) _outb(val, (unsigned long)port)
#define inw(port) _inw((unsigned long)port)
#define outw(val, port) _outw(val, (unsigned long)port)
#define inl(port) _inl((unsigned long)port)
#define outl(val, port) _outl(val, (unsigned long)port)
#define inb(port) eeh_inb((unsigned long)port)
#define outb(val, port) eeh_outb(val, (unsigned long)port)
#define inw(port) eeh_inw((unsigned long)port)
#define outw(val, port) eeh_outw(val, (unsigned long)port)
#define inl(port) eeh_inl((unsigned long)port)
#define outl(val, port) eeh_outl(val, (unsigned long)port)
/*
* The insw/outsw/insl/outsl macros don't do byte-swapping.
* They are only used in practice for transferring buffers which
* are arrays of bytes, and byte-swapping is not appropriate in
* that case. - paulus */
#define insb(port, buf, ns) eeh_insb((u8 *)(port), (buf), (ns))
#define outsb(port, buf, ns) eeh_outsb((u8 *)(port), (buf), (ns))
#define insw(port, buf, ns) eeh_insw_ns((u16 *)(port), (buf), (ns))
#define outsw(port, buf, ns) eeh_outsw_ns((u16 *)(port), (buf), (ns))
#define insl(port, buf, nl) eeh_insl_ns((u32 *)(port), (buf), (nl))
#define outsl(port, buf, nl) eeh_outsl_ns((u32 *)(port), (buf), (nl))
#define insb(port, buf, ns) _insb((u8 *)((port)+pci_io_base), (buf), (ns))
#define outsb(port, buf, ns) _outsb((u8 *)((port)+pci_io_base), (buf), (ns))
#define insw(port, buf, ns) _insw_ns((u16 *)((port)+pci_io_base), (buf), (ns))
#define outsw(port, buf, ns) _outsw_ns((u16 *)((port)+pci_io_base), (buf), (ns))
#define insl(port, buf, nl) _insl_ns((u32 *)((port)+pci_io_base), (buf), (nl))
#define outsl(port, buf, nl) _outsl_ns((u32 *)((port)+pci_io_base), (buf), (nl))
#endif
extern void _insb(volatile u8 *port, void *buf, int ns);
extern void _outsb(volatile u8 *port, const void *buf, int ns);
extern void _insw(volatile u16 *port, void *buf, int ns);
extern void _outsw(volatile u16 *port, const void *buf, int ns);
extern void _insl(volatile u32 *port, void *buf, int nl);
extern void _outsl(volatile u32 *port, const void *buf, int nl);
extern void _insw_ns(volatile u16 *port, void *buf, int ns);
extern void _outsw_ns(volatile u16 *port, const void *buf, int ns);
extern void _insl_ns(volatile u32 *port, void *buf, int nl);
extern void _outsl_ns(volatile u32 *port, const void *buf, int nl);
/*
* output pause versions need a delay at least for the
......@@ -92,27 +101,15 @@ extern int have_print;
#define inl_p(port) inl(port)
#define outl_p(val, port) (udelay(1), outl((val, (port)))
extern void _insb(volatile u8 *port, void *buf, int ns);
extern void _outsb(volatile u8 *port, const void *buf, int ns);
extern void _insw(volatile u16 *port, void *buf, int ns);
extern void _outsw(volatile u16 *port, const void *buf, int ns);
extern void _insl(volatile u32 *port, void *buf, int nl);
extern void _outsl(volatile u32 *port, const void *buf, int nl);
extern void _insw_ns(volatile u16 *port, void *buf, int ns);
extern void _outsw_ns(volatile u16 *port, const void *buf, int ns);
extern void _insl_ns(volatile u32 *port, void *buf, int nl);
extern void _outsl_ns(volatile u32 *port, const void *buf, int nl);
/*
* The *_ns versions below don't do byte-swapping.
* Neither do the standard versions now, these are just here
* for older code.
*/
#define insw_ns(port, buf, ns) insw(port, buf, ns)
#define outsw_ns(port, buf, ns) outsw(port, buf, ns)
#define insl_ns(port, buf, nl) insl(port, buf, nl)
#define outsl_ns(port, buf, nl) outsl(port, buf, nl)
#define insw_ns(port, buf, ns) _insw_ns((u16 *)((port)+pci_io_base), (buf), (ns))
#define outsw_ns(port, buf, ns) _outsw_ns((u16 *)((port)+pci_io_base), (buf), (ns))
#define insl_ns(port, buf, nl) _insl_ns((u32 *)((port)+pci_io_base), (buf), (nl))
#define outsl_ns(port, buf, nl) _outsl_ns((u32 *)((port)+pci_io_base), (buf), (nl))
#define IO_SPACE_LIMIT ~(0UL)
......@@ -249,49 +246,6 @@ static inline void out_be32(volatile unsigned *addr, int val)
#ifndef CONFIG_PPC_ISERIES
#include <asm/eeh.h>
static inline u8 _inb(unsigned long port) {
if (IS_MAPPED_VADDR(port))
return readb((void *)port);
else if (_IO_BASE)
return in_8((u8 *)((port)+_IO_BASE));
else
return 0xff;
}
static inline void _outb(u8 val, unsigned long port) {
if (IS_MAPPED_VADDR(port))
return writeb(val, (void *)port);
else if (_IO_BASE)
out_8((u8 *)((port)+_IO_BASE), val);
}
static inline u16 _inw(unsigned long port) {
if (IS_MAPPED_VADDR(port))
return readw((void *)port);
else if (_IO_BASE)
return in_le16((u16 *)((port)+_IO_BASE));
else
return 0xffff;
}
static inline void _outw(u16 val, unsigned long port) {
if (IS_MAPPED_VADDR(port))
return writew(val, (void *)port);
else if (_IO_BASE)
out_le16((u16 *)((port)+_IO_BASE), val);
}
static inline u32 _inl(unsigned long port) {
if (IS_MAPPED_VADDR(port))
return readl((void *)port);
else if (_IO_BASE)
return in_le32((u32 *)((port)+_IO_BASE));
else
return 0xffffffff;
}
static inline void _outl(u32 val, unsigned long port) {
if (IS_MAPPED_VADDR(port))
return writel(val, (void *)port);
else if (_IO_BASE)
out_le32((u32 *)((port)+_IO_BASE), val);
}
#endif
#ifdef __KERNEL__
......
......@@ -168,8 +168,10 @@ static inline int get_order(unsigned long size)
#define KERNELBASE PAGE_OFFSET
#define VMALLOCBASE 0xD000000000000000
#define IOREGIONBASE 0xE000000000000000
#define EEHREGIONBASE 0xA000000000000000
#define IO_REGION_ID (IOREGIONBASE>>REGION_SHIFT)
#define EEH_REGION_ID (EEHREGIONBASE>>REGION_SHIFT)
#define VMALLOC_REGION_ID (VMALLOCBASE>>REGION_SHIFT)
#define KERNEL_REGION_ID (KERNELBASE>>REGION_SHIFT)
#define USER_REGION_ID (0UL)
......
......@@ -125,12 +125,18 @@ struct device_node {
int n_intrs;
struct interrupt_info *intrs;
char *full_name;
/* PCI stuff probably doesn't belong here */
int busno; /* for pci devices */
int bussubno; /* for pci devices */
int devfn; /* for pci devices */
#define DN_STATUS_BIST_FAILED (1<<0)
int status; /* Current device status (non-zero is bad) */
int eeh_mode; /* See eeh.h for possible EEH_MODEs */
int eeh_config_addr;
struct pci_controller *phb; /* for pci devices */
struct TceTable *tce_table; /* for phb's or bridges */
#define DN_STATUS_BIST_FAILED (1<<0)
struct property *properties;
struct device_node *parent;
struct device_node *child;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment